haifa-sched.c (ISSUE_POINTS): Remove.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "toplev.h"
133 #include "rtl.h"
134 #include "tm_p.h"
135 #include "hard-reg-set.h"
136 #include "regs.h"
137 #include "function.h"
138 #include "flags.h"
139 #include "insn-config.h"
140 #include "insn-attr.h"
141 #include "except.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150 #include "ira.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
152
153 #ifdef INSN_SCHEDULING
154
155 /* issue_rate is the number of insns that can be scheduled in the same
156 machine cycle. It can be defined in the config/mach/mach.h file,
157 otherwise we set it to 1. */
158
159 int issue_rate;
160
161 /* sched-verbose controls the amount of debugging output the
162 scheduler prints. It is controlled by -fsched-verbose=N:
163 N>0 and no -DSR : the output is directed to stderr.
164 N>=10 will direct the printouts to stderr (regardless of -dSR).
165 N=1: same as -dSR.
166 N=2: bb's probabilities, detailed ready list info, unit/insn info.
167 N=3: rtl at abort point, control-flow, regions info.
168 N=5: dependences info. */
169
170 static int sched_verbose_param = 0;
171 int sched_verbose = 0;
172
173 /* Debugging file. All printouts are sent to dump, which is always set,
174 either to stderr, or to the dump listing file (-dRS). */
175 FILE *sched_dump = 0;
176
177 /* fix_sched_param() is called from toplev.c upon detection
178 of the -fsched-verbose=N option. */
179
180 void
181 fix_sched_param (const char *param, const char *val)
182 {
183 if (!strcmp (param, "verbose"))
184 sched_verbose_param = atoi (val);
185 else
186 warning (0, "fix_sched_param: unknown param: %s", param);
187 }
188
189 /* This is a placeholder for the scheduler parameters common
190 to all schedulers. */
191 struct common_sched_info_def *common_sched_info;
192
193 #define INSN_TICK(INSN) (HID (INSN)->tick)
194 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
195
196 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
197 then it should be recalculated from scratch. */
198 #define INVALID_TICK (-(max_insn_queue_index + 1))
199 /* The minimal value of the INSN_TICK of an instruction. */
200 #define MIN_TICK (-max_insn_queue_index)
201
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 rtx note_list;
205
206 static struct spec_info_def spec_info_var;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 spec_info_t spec_info = NULL;
210
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool haifa_recovery_bb_recently_added_p;
214
215 /* True, if recovery block was added during this scheduling pass.
216 Used to determine if we should have empty memory pools of dependencies
217 after finishing current region. */
218 bool haifa_recovery_bb_ever_added_p;
219
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
222
223 /* Array used in {unlink, restore}_bb_notes. */
224 static rtx *bb_header = 0;
225
226 /* Basic block after which recovery blocks will be created. */
227 static basic_block before_recovery;
228
229 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
230 created it. */
231 basic_block after_recovery;
232
233 /* FALSE if we add bb to another region, so we don't need to initialize it. */
234 bool adding_bb_to_current_region_p = true;
235
236 /* Queues, etc. */
237
238 /* An instruction is ready to be scheduled when all insns preceding it
239 have already been scheduled. It is important to ensure that all
240 insns which use its result will not be executed until its result
241 has been computed. An insn is maintained in one of four structures:
242
243 (P) the "Pending" set of insns which cannot be scheduled until
244 their dependencies have been satisfied.
245 (Q) the "Queued" set of insns that can be scheduled when sufficient
246 time has passed.
247 (R) the "Ready" list of unscheduled, uncommitted insns.
248 (S) the "Scheduled" list of insns.
249
250 Initially, all insns are either "Pending" or "Ready" depending on
251 whether their dependencies are satisfied.
252
253 Insns move from the "Ready" list to the "Scheduled" list as they
254 are committed to the schedule. As this occurs, the insns in the
255 "Pending" list have their dependencies satisfied and move to either
256 the "Ready" list or the "Queued" set depending on whether
257 sufficient time has passed to make them ready. As time passes,
258 insns move from the "Queued" set to the "Ready" list.
259
260 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
261 unscheduled insns, i.e., those that are ready, queued, and pending.
262 The "Queued" set (Q) is implemented by the variable `insn_queue'.
263 The "Ready" list (R) is implemented by the variables `ready' and
264 `n_ready'.
265 The "Scheduled" list (S) is the new insn chain built by this pass.
266
267 The transition (R->S) is implemented in the scheduling loop in
268 `schedule_block' when the best insn to schedule is chosen.
269 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
270 insns move from the ready list to the scheduled list.
271 The transition (Q->R) is implemented in 'queue_to_insn' as time
272 passes or stalls are introduced. */
273
274 /* Implement a circular buffer to delay instructions until sufficient
275 time has passed. For the new pipeline description interface,
276 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
277 than maximal time of instruction execution computed by genattr.c on
278 the base maximal time of functional unit reservations and getting a
279 result. This is the longest time an insn may be queued. */
280
281 static rtx *insn_queue;
282 static int q_ptr = 0;
283 static int q_size = 0;
284 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
285 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286
287 #define QUEUE_SCHEDULED (-3)
288 #define QUEUE_NOWHERE (-2)
289 #define QUEUE_READY (-1)
290 /* QUEUE_SCHEDULED - INSN is scheduled.
291 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
292 queue or ready list.
293 QUEUE_READY - INSN is in ready list.
294 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295
296 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
297
298 /* The following variable value refers for all current and future
299 reservations of the processor units. */
300 state_t curr_state;
301
302 /* The following variable value is size of memory representing all
303 current and future reservations of the processor units. */
304 size_t dfa_state_size;
305
306 /* The following array is used to find the best insn from ready when
307 the automaton pipeline interface is used. */
308 char *ready_try = NULL;
309
310 /* The ready list. */
311 struct ready_list ready = {NULL, 0, 0, 0, 0};
312
313 /* The pointer to the ready list (to be removed). */
314 static struct ready_list *readyp = &ready;
315
316 /* Scheduling clock. */
317 static int clock_var;
318
319 static int may_trap_exp (const_rtx, int);
320
321 /* Nonzero iff the address is comprised from at most 1 register. */
322 #define CONST_BASED_ADDRESS_P(x) \
323 (REG_P (x) \
324 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
325 || (GET_CODE (x) == LO_SUM)) \
326 && (CONSTANT_P (XEXP (x, 0)) \
327 || CONSTANT_P (XEXP (x, 1)))))
328
329 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
330 as found by analyzing insn's expression. */
331
332 \f
333 static int haifa_luid_for_non_insn (rtx x);
334
335 /* Haifa version of sched_info hooks common to all headers. */
336 const struct common_sched_info_def haifa_common_sched_info =
337 {
338 NULL, /* fix_recovery_cfg */
339 NULL, /* add_block */
340 NULL, /* estimate_number_of_insns */
341 haifa_luid_for_non_insn, /* luid_for_non_insn */
342 SCHED_PASS_UNKNOWN /* sched_pass_id */
343 };
344
345 const struct sched_scan_info_def *sched_scan_info;
346
347 /* Mapping from instruction UID to its Logical UID. */
348 VEC (int, heap) *sched_luids = NULL;
349
350 /* Next LUID to assign to an instruction. */
351 int sched_max_luid = 1;
352
353 /* Haifa Instruction Data. */
354 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
355
356 void (* sched_init_only_bb) (basic_block, basic_block);
357
358 /* Split block function. Different schedulers might use different functions
359 to handle their internal data consistent. */
360 basic_block (* sched_split_block) (basic_block, rtx);
361
362 /* Create empty basic block after the specified block. */
363 basic_block (* sched_create_empty_bb) (basic_block);
364
365 static int
366 may_trap_exp (const_rtx x, int is_store)
367 {
368 enum rtx_code code;
369
370 if (x == 0)
371 return TRAP_FREE;
372 code = GET_CODE (x);
373 if (is_store)
374 {
375 if (code == MEM && may_trap_p (x))
376 return TRAP_RISKY;
377 else
378 return TRAP_FREE;
379 }
380 if (code == MEM)
381 {
382 /* The insn uses memory: a volatile load. */
383 if (MEM_VOLATILE_P (x))
384 return IRISKY;
385 /* An exception-free load. */
386 if (!may_trap_p (x))
387 return IFREE;
388 /* A load with 1 base register, to be further checked. */
389 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
390 return PFREE_CANDIDATE;
391 /* No info on the load, to be further checked. */
392 return PRISKY_CANDIDATE;
393 }
394 else
395 {
396 const char *fmt;
397 int i, insn_class = TRAP_FREE;
398
399 /* Neither store nor load, check if it may cause a trap. */
400 if (may_trap_p (x))
401 return TRAP_RISKY;
402 /* Recursive step: walk the insn... */
403 fmt = GET_RTX_FORMAT (code);
404 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
405 {
406 if (fmt[i] == 'e')
407 {
408 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
409 insn_class = WORST_CLASS (insn_class, tmp_class);
410 }
411 else if (fmt[i] == 'E')
412 {
413 int j;
414 for (j = 0; j < XVECLEN (x, i); j++)
415 {
416 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
417 insn_class = WORST_CLASS (insn_class, tmp_class);
418 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
419 break;
420 }
421 }
422 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
423 break;
424 }
425 return insn_class;
426 }
427 }
428
429 /* Classifies rtx X of an insn for the purpose of verifying that X can be
430 executed speculatively (and consequently the insn can be moved
431 speculatively), by examining X, returning:
432 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
433 TRAP_FREE: non-load insn.
434 IFREE: load from a globally safe location.
435 IRISKY: volatile load.
436 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
437 being either PFREE or PRISKY. */
438
439 static int
440 haifa_classify_rtx (const_rtx x)
441 {
442 int tmp_class = TRAP_FREE;
443 int insn_class = TRAP_FREE;
444 enum rtx_code code;
445
446 if (GET_CODE (x) == PARALLEL)
447 {
448 int i, len = XVECLEN (x, 0);
449
450 for (i = len - 1; i >= 0; i--)
451 {
452 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
453 insn_class = WORST_CLASS (insn_class, tmp_class);
454 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
455 break;
456 }
457 }
458 else
459 {
460 code = GET_CODE (x);
461 switch (code)
462 {
463 case CLOBBER:
464 /* Test if it is a 'store'. */
465 tmp_class = may_trap_exp (XEXP (x, 0), 1);
466 break;
467 case SET:
468 /* Test if it is a store. */
469 tmp_class = may_trap_exp (SET_DEST (x), 1);
470 if (tmp_class == TRAP_RISKY)
471 break;
472 /* Test if it is a load. */
473 tmp_class =
474 WORST_CLASS (tmp_class,
475 may_trap_exp (SET_SRC (x), 0));
476 break;
477 case COND_EXEC:
478 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
479 if (tmp_class == TRAP_RISKY)
480 break;
481 tmp_class = WORST_CLASS (tmp_class,
482 may_trap_exp (COND_EXEC_TEST (x), 0));
483 break;
484 case TRAP_IF:
485 tmp_class = TRAP_RISKY;
486 break;
487 default:;
488 }
489 insn_class = tmp_class;
490 }
491
492 return insn_class;
493 }
494
495 int
496 haifa_classify_insn (const_rtx insn)
497 {
498 return haifa_classify_rtx (PATTERN (insn));
499 }
500
501 /* Forward declarations. */
502
503 static int priority (rtx);
504 static int rank_for_schedule (const void *, const void *);
505 static void swap_sort (rtx *, int);
506 static void queue_insn (rtx, int);
507 static int schedule_insn (rtx);
508 static void adjust_priority (rtx);
509 static void advance_one_cycle (void);
510 static void extend_h_i_d (void);
511
512
513 /* Notes handling mechanism:
514 =========================
515 Generally, NOTES are saved before scheduling and restored after scheduling.
516 The scheduler distinguishes between two types of notes:
517
518 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
519 Before scheduling a region, a pointer to the note is added to the insn
520 that follows or precedes it. (This happens as part of the data dependence
521 computation). After scheduling an insn, the pointer contained in it is
522 used for regenerating the corresponding note (in reemit_notes).
523
524 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
525 these notes are put in a list (in rm_other_notes() and
526 unlink_other_notes ()). After scheduling the block, these notes are
527 inserted at the beginning of the block (in schedule_block()). */
528
529 static void ready_add (struct ready_list *, rtx, bool);
530 static rtx ready_remove_first (struct ready_list *);
531 static rtx ready_remove_first_dispatch (struct ready_list *ready);
532
533 static void queue_to_ready (struct ready_list *);
534 static int early_queue_to_ready (state_t, struct ready_list *);
535
536 static void debug_ready_list (struct ready_list *);
537
538 /* The following functions are used to implement multi-pass scheduling
539 on the first cycle. */
540 static rtx ready_remove (struct ready_list *, int);
541 static void ready_remove_insn (rtx);
542
543 static int choose_ready (struct ready_list *, rtx *);
544
545 static void fix_inter_tick (rtx, rtx);
546 static int fix_tick_ready (rtx);
547 static void change_queue_index (rtx, int);
548
549 /* The following functions are used to implement scheduling of data/control
550 speculative instructions. */
551
552 static void extend_h_i_d (void);
553 static void init_h_i_d (rtx);
554 static void generate_recovery_code (rtx);
555 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
556 static void begin_speculative_block (rtx);
557 static void add_to_speculative_block (rtx);
558 static void init_before_recovery (basic_block *);
559 static void create_check_block_twin (rtx, bool);
560 static void fix_recovery_deps (basic_block);
561 static void haifa_change_pattern (rtx, rtx);
562 static void dump_new_block_header (int, basic_block, rtx, rtx);
563 static void restore_bb_notes (basic_block);
564 static void fix_jump_move (rtx);
565 static void move_block_after_check (rtx);
566 static void move_succs (VEC(edge,gc) **, basic_block);
567 static void sched_remove_insn (rtx);
568 static void clear_priorities (rtx, rtx_vec_t *);
569 static void calc_priorities (rtx_vec_t);
570 static void add_jump_dependencies (rtx, rtx);
571 #ifdef ENABLE_CHECKING
572 static int has_edge_p (VEC(edge,gc) *, int);
573 static void check_cfg (rtx, rtx);
574 #endif
575
576 #endif /* INSN_SCHEDULING */
577 \f
578 /* Point to state used for the current scheduling pass. */
579 struct haifa_sched_info *current_sched_info;
580 \f
581 #ifndef INSN_SCHEDULING
582 void
583 schedule_insns (void)
584 {
585 }
586 #else
587
588 /* Do register pressure sensitive insn scheduling if the flag is set
589 up. */
590 bool sched_pressure_p;
591
592 /* Map regno -> its cover class. The map defined only when
593 SCHED_PRESSURE_P is true. */
594 enum reg_class *sched_regno_cover_class;
595
596 /* The current register pressure. Only elements corresponding cover
597 classes are defined. */
598 static int curr_reg_pressure[N_REG_CLASSES];
599
600 /* Saved value of the previous array. */
601 static int saved_reg_pressure[N_REG_CLASSES];
602
603 /* Register living at given scheduling point. */
604 static bitmap curr_reg_live;
605
606 /* Saved value of the previous array. */
607 static bitmap saved_reg_live;
608
609 /* Registers mentioned in the current region. */
610 static bitmap region_ref_regs;
611
612 /* Initiate register pressure relative info for scheduling the current
613 region. Currently it is only clearing register mentioned in the
614 current region. */
615 void
616 sched_init_region_reg_pressure_info (void)
617 {
618 bitmap_clear (region_ref_regs);
619 }
620
621 /* Update current register pressure related info after birth (if
622 BIRTH_P) or death of register REGNO. */
623 static void
624 mark_regno_birth_or_death (int regno, bool birth_p)
625 {
626 enum reg_class cover_class;
627
628 cover_class = sched_regno_cover_class[regno];
629 if (regno >= FIRST_PSEUDO_REGISTER)
630 {
631 if (cover_class != NO_REGS)
632 {
633 if (birth_p)
634 {
635 bitmap_set_bit (curr_reg_live, regno);
636 curr_reg_pressure[cover_class]
637 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
638 }
639 else
640 {
641 bitmap_clear_bit (curr_reg_live, regno);
642 curr_reg_pressure[cover_class]
643 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
644 }
645 }
646 }
647 else if (cover_class != NO_REGS
648 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
649 {
650 if (birth_p)
651 {
652 bitmap_set_bit (curr_reg_live, regno);
653 curr_reg_pressure[cover_class]++;
654 }
655 else
656 {
657 bitmap_clear_bit (curr_reg_live, regno);
658 curr_reg_pressure[cover_class]--;
659 }
660 }
661 }
662
663 /* Initiate current register pressure related info from living
664 registers given by LIVE. */
665 static void
666 initiate_reg_pressure_info (bitmap live)
667 {
668 int i;
669 unsigned int j;
670 bitmap_iterator bi;
671
672 for (i = 0; i < ira_reg_class_cover_size; i++)
673 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
674 bitmap_clear (curr_reg_live);
675 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
676 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
677 mark_regno_birth_or_death (j, true);
678 }
679
680 /* Mark registers in X as mentioned in the current region. */
681 static void
682 setup_ref_regs (rtx x)
683 {
684 int i, j, regno;
685 const RTX_CODE code = GET_CODE (x);
686 const char *fmt;
687
688 if (REG_P (x))
689 {
690 regno = REGNO (x);
691 if (regno >= FIRST_PSEUDO_REGISTER)
692 bitmap_set_bit (region_ref_regs, REGNO (x));
693 else
694 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
695 bitmap_set_bit (region_ref_regs, regno + i);
696 return;
697 }
698 fmt = GET_RTX_FORMAT (code);
699 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
700 if (fmt[i] == 'e')
701 setup_ref_regs (XEXP (x, i));
702 else if (fmt[i] == 'E')
703 {
704 for (j = 0; j < XVECLEN (x, i); j++)
705 setup_ref_regs (XVECEXP (x, i, j));
706 }
707 }
708
709 /* Initiate current register pressure related info at the start of
710 basic block BB. */
711 static void
712 initiate_bb_reg_pressure_info (basic_block bb)
713 {
714 unsigned int i;
715 rtx insn;
716
717 if (current_nr_blocks > 1)
718 FOR_BB_INSNS (bb, insn)
719 if (NONDEBUG_INSN_P (insn))
720 setup_ref_regs (PATTERN (insn));
721 initiate_reg_pressure_info (df_get_live_in (bb));
722 #ifdef EH_RETURN_DATA_REGNO
723 if (bb_has_eh_pred (bb))
724 for (i = 0; ; ++i)
725 {
726 unsigned int regno = EH_RETURN_DATA_REGNO (i);
727
728 if (regno == INVALID_REGNUM)
729 break;
730 if (! bitmap_bit_p (df_get_live_in (bb), regno))
731 mark_regno_birth_or_death (regno, true);
732 }
733 #endif
734 }
735
736 /* Save current register pressure related info. */
737 static void
738 save_reg_pressure (void)
739 {
740 int i;
741
742 for (i = 0; i < ira_reg_class_cover_size; i++)
743 saved_reg_pressure[ira_reg_class_cover[i]]
744 = curr_reg_pressure[ira_reg_class_cover[i]];
745 bitmap_copy (saved_reg_live, curr_reg_live);
746 }
747
748 /* Restore saved register pressure related info. */
749 static void
750 restore_reg_pressure (void)
751 {
752 int i;
753
754 for (i = 0; i < ira_reg_class_cover_size; i++)
755 curr_reg_pressure[ira_reg_class_cover[i]]
756 = saved_reg_pressure[ira_reg_class_cover[i]];
757 bitmap_copy (curr_reg_live, saved_reg_live);
758 }
759
760 /* Return TRUE if the register is dying after its USE. */
761 static bool
762 dying_use_p (struct reg_use_data *use)
763 {
764 struct reg_use_data *next;
765
766 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
767 if (NONDEBUG_INSN_P (next->insn)
768 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
769 return false;
770 return true;
771 }
772
773 /* Print info about the current register pressure and its excess for
774 each cover class. */
775 static void
776 print_curr_reg_pressure (void)
777 {
778 int i;
779 enum reg_class cl;
780
781 fprintf (sched_dump, ";;\t");
782 for (i = 0; i < ira_reg_class_cover_size; i++)
783 {
784 cl = ira_reg_class_cover[i];
785 gcc_assert (curr_reg_pressure[cl] >= 0);
786 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
787 curr_reg_pressure[cl],
788 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
789 }
790 fprintf (sched_dump, "\n");
791 }
792
793 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
794 so that insns independent of the last scheduled insn will be preferred
795 over dependent instructions. */
796
797 static rtx last_scheduled_insn;
798
799 /* Cached cost of the instruction. Use below function to get cost of the
800 insn. -1 here means that the field is not initialized. */
801 #define INSN_COST(INSN) (HID (INSN)->cost)
802
803 /* Compute cost of executing INSN.
804 This is the number of cycles between instruction issue and
805 instruction results. */
806 int
807 insn_cost (rtx insn)
808 {
809 int cost;
810
811 if (sel_sched_p ())
812 {
813 if (recog_memoized (insn) < 0)
814 return 0;
815
816 cost = insn_default_latency (insn);
817 if (cost < 0)
818 cost = 0;
819
820 return cost;
821 }
822
823 cost = INSN_COST (insn);
824
825 if (cost < 0)
826 {
827 /* A USE insn, or something else we don't need to
828 understand. We can't pass these directly to
829 result_ready_cost or insn_default_latency because it will
830 trigger a fatal error for unrecognizable insns. */
831 if (recog_memoized (insn) < 0)
832 {
833 INSN_COST (insn) = 0;
834 return 0;
835 }
836 else
837 {
838 cost = insn_default_latency (insn);
839 if (cost < 0)
840 cost = 0;
841
842 INSN_COST (insn) = cost;
843 }
844 }
845
846 return cost;
847 }
848
849 /* Compute cost of dependence LINK.
850 This is the number of cycles between instruction issue and
851 instruction results.
852 ??? We also use this function to call recog_memoized on all insns. */
853 int
854 dep_cost_1 (dep_t link, dw_t dw)
855 {
856 rtx insn = DEP_PRO (link);
857 rtx used = DEP_CON (link);
858 int cost;
859
860 /* A USE insn should never require the value used to be computed.
861 This allows the computation of a function's result and parameter
862 values to overlap the return and call. We don't care about the
863 the dependence cost when only decreasing register pressure. */
864 if (recog_memoized (used) < 0)
865 {
866 cost = 0;
867 recog_memoized (insn);
868 }
869 else
870 {
871 enum reg_note dep_type = DEP_TYPE (link);
872
873 cost = insn_cost (insn);
874
875 if (INSN_CODE (insn) >= 0)
876 {
877 if (dep_type == REG_DEP_ANTI)
878 cost = 0;
879 else if (dep_type == REG_DEP_OUTPUT)
880 {
881 cost = (insn_default_latency (insn)
882 - insn_default_latency (used));
883 if (cost <= 0)
884 cost = 1;
885 }
886 else if (bypass_p (insn))
887 cost = insn_latency (insn, used);
888 }
889
890
891 if (targetm.sched.adjust_cost_2)
892 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
893 dw);
894 else if (targetm.sched.adjust_cost != NULL)
895 {
896 /* This variable is used for backward compatibility with the
897 targets. */
898 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
899
900 /* Make it self-cycled, so that if some tries to walk over this
901 incomplete list he/she will be caught in an endless loop. */
902 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
903
904 /* Targets use only REG_NOTE_KIND of the link. */
905 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
906
907 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
908 insn, cost);
909
910 free_INSN_LIST_node (dep_cost_rtx_link);
911 }
912
913 if (cost < 0)
914 cost = 0;
915 }
916
917 return cost;
918 }
919
920 /* Compute cost of dependence LINK.
921 This is the number of cycles between instruction issue and
922 instruction results. */
923 int
924 dep_cost (dep_t link)
925 {
926 return dep_cost_1 (link, 0);
927 }
928
929 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
930 INSN_PRIORITY explicitly. */
931 void
932 increase_insn_priority (rtx insn, int amount)
933 {
934 if (!sel_sched_p ())
935 {
936 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
937 if (INSN_PRIORITY_KNOWN (insn))
938 INSN_PRIORITY (insn) += amount;
939 }
940 else
941 {
942 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
943 Use EXPR_PRIORITY instead. */
944 sel_add_to_insn_priority (insn, amount);
945 }
946 }
947
948 /* Return 'true' if DEP should be included in priority calculations. */
949 static bool
950 contributes_to_priority_p (dep_t dep)
951 {
952 if (DEBUG_INSN_P (DEP_CON (dep))
953 || DEBUG_INSN_P (DEP_PRO (dep)))
954 return false;
955
956 /* Critical path is meaningful in block boundaries only. */
957 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
958 DEP_PRO (dep)))
959 return false;
960
961 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
962 then speculative instructions will less likely be
963 scheduled. That is because the priority of
964 their producers will increase, and, thus, the
965 producers will more likely be scheduled, thus,
966 resolving the dependence. */
967 if (sched_deps_info->generate_spec_deps
968 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
969 && (DEP_STATUS (dep) & SPECULATIVE))
970 return false;
971
972 return true;
973 }
974
975 /* Compute the number of nondebug forward deps of an insn. */
976
977 static int
978 dep_list_size (rtx insn)
979 {
980 sd_iterator_def sd_it;
981 dep_t dep;
982 int dbgcount = 0, nodbgcount = 0;
983
984 if (!MAY_HAVE_DEBUG_INSNS)
985 return sd_lists_size (insn, SD_LIST_FORW);
986
987 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
988 {
989 if (DEBUG_INSN_P (DEP_CON (dep)))
990 dbgcount++;
991 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
992 nodbgcount++;
993 }
994
995 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
996
997 return nodbgcount;
998 }
999
1000 /* Compute the priority number for INSN. */
1001 static int
1002 priority (rtx insn)
1003 {
1004 if (! INSN_P (insn))
1005 return 0;
1006
1007 /* We should not be interested in priority of an already scheduled insn. */
1008 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1009
1010 if (!INSN_PRIORITY_KNOWN (insn))
1011 {
1012 int this_priority = -1;
1013
1014 if (dep_list_size (insn) == 0)
1015 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1016 some forward deps but all of them are ignored by
1017 contributes_to_priority hook. At the moment we set priority of
1018 such insn to 0. */
1019 this_priority = insn_cost (insn);
1020 else
1021 {
1022 rtx prev_first, twin;
1023 basic_block rec;
1024
1025 /* For recovery check instructions we calculate priority slightly
1026 different than that of normal instructions. Instead of walking
1027 through INSN_FORW_DEPS (check) list, we walk through
1028 INSN_FORW_DEPS list of each instruction in the corresponding
1029 recovery block. */
1030
1031 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1032 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1033 if (!rec || rec == EXIT_BLOCK_PTR)
1034 {
1035 prev_first = PREV_INSN (insn);
1036 twin = insn;
1037 }
1038 else
1039 {
1040 prev_first = NEXT_INSN (BB_HEAD (rec));
1041 twin = PREV_INSN (BB_END (rec));
1042 }
1043
1044 do
1045 {
1046 sd_iterator_def sd_it;
1047 dep_t dep;
1048
1049 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1050 {
1051 rtx next;
1052 int next_priority;
1053
1054 next = DEP_CON (dep);
1055
1056 if (BLOCK_FOR_INSN (next) != rec)
1057 {
1058 int cost;
1059
1060 if (!contributes_to_priority_p (dep))
1061 continue;
1062
1063 if (twin == insn)
1064 cost = dep_cost (dep);
1065 else
1066 {
1067 struct _dep _dep1, *dep1 = &_dep1;
1068
1069 init_dep (dep1, insn, next, REG_DEP_ANTI);
1070
1071 cost = dep_cost (dep1);
1072 }
1073
1074 next_priority = cost + priority (next);
1075
1076 if (next_priority > this_priority)
1077 this_priority = next_priority;
1078 }
1079 }
1080
1081 twin = PREV_INSN (twin);
1082 }
1083 while (twin != prev_first);
1084 }
1085
1086 if (this_priority < 0)
1087 {
1088 gcc_assert (this_priority == -1);
1089
1090 this_priority = insn_cost (insn);
1091 }
1092
1093 INSN_PRIORITY (insn) = this_priority;
1094 INSN_PRIORITY_STATUS (insn) = 1;
1095 }
1096
1097 return INSN_PRIORITY (insn);
1098 }
1099 \f
1100 /* Macros and functions for keeping the priority queue sorted, and
1101 dealing with queuing and dequeuing of instructions. */
1102
1103 #define SCHED_SORT(READY, N_READY) \
1104 do { if ((N_READY) == 2) \
1105 swap_sort (READY, N_READY); \
1106 else if ((N_READY) > 2) \
1107 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1108 while (0)
1109
1110 /* Setup info about the current register pressure impact of scheduling
1111 INSN at the current scheduling point. */
1112 static void
1113 setup_insn_reg_pressure_info (rtx insn)
1114 {
1115 int i, change, before, after, hard_regno;
1116 int excess_cost_change;
1117 enum machine_mode mode;
1118 enum reg_class cl;
1119 struct reg_pressure_data *pressure_info;
1120 int *max_reg_pressure;
1121 struct reg_use_data *use;
1122 static int death[N_REG_CLASSES];
1123
1124 gcc_checking_assert (!DEBUG_INSN_P (insn));
1125
1126 excess_cost_change = 0;
1127 for (i = 0; i < ira_reg_class_cover_size; i++)
1128 death[ira_reg_class_cover[i]] = 0;
1129 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1130 if (dying_use_p (use))
1131 {
1132 cl = sched_regno_cover_class[use->regno];
1133 if (use->regno < FIRST_PSEUDO_REGISTER)
1134 death[cl]++;
1135 else
1136 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1137 }
1138 pressure_info = INSN_REG_PRESSURE (insn);
1139 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1140 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1141 for (i = 0; i < ira_reg_class_cover_size; i++)
1142 {
1143 cl = ira_reg_class_cover[i];
1144 gcc_assert (curr_reg_pressure[cl] >= 0);
1145 change = (int) pressure_info[i].set_increase - death[cl];
1146 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1147 after = MAX (0, max_reg_pressure[i] + change
1148 - ira_available_class_regs[cl]);
1149 hard_regno = ira_class_hard_regs[cl][0];
1150 gcc_assert (hard_regno >= 0);
1151 mode = reg_raw_mode[hard_regno];
1152 excess_cost_change += ((after - before)
1153 * (ira_memory_move_cost[mode][cl][0]
1154 + ira_memory_move_cost[mode][cl][1]));
1155 }
1156 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1157 }
1158
1159 /* Returns a positive value if x is preferred; returns a negative value if
1160 y is preferred. Should never return 0, since that will make the sort
1161 unstable. */
1162
1163 static int
1164 rank_for_schedule (const void *x, const void *y)
1165 {
1166 rtx tmp = *(const rtx *) y;
1167 rtx tmp2 = *(const rtx *) x;
1168 rtx last;
1169 int tmp_class, tmp2_class;
1170 int val, priority_val, info_val;
1171
1172 if (MAY_HAVE_DEBUG_INSNS)
1173 {
1174 /* Schedule debug insns as early as possible. */
1175 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1176 return -1;
1177 else if (DEBUG_INSN_P (tmp2))
1178 return 1;
1179 }
1180
1181 /* The insn in a schedule group should be issued the first. */
1182 if (flag_sched_group_heuristic &&
1183 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1184 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1185
1186 /* Make sure that priority of TMP and TMP2 are initialized. */
1187 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1188
1189 if (sched_pressure_p)
1190 {
1191 int diff;
1192
1193 /* Prefer insn whose scheduling results in the smallest register
1194 pressure excess. */
1195 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1196 + (INSN_TICK (tmp) > clock_var
1197 ? INSN_TICK (tmp) - clock_var : 0)
1198 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1199 - (INSN_TICK (tmp2) > clock_var
1200 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1201 return diff;
1202 }
1203
1204
1205 if (sched_pressure_p
1206 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1207 {
1208 if (INSN_TICK (tmp) <= clock_var)
1209 return -1;
1210 else if (INSN_TICK (tmp2) <= clock_var)
1211 return 1;
1212 else
1213 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1214 }
1215 /* Prefer insn with higher priority. */
1216 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1217
1218 if (flag_sched_critical_path_heuristic && priority_val)
1219 return priority_val;
1220
1221 /* Prefer speculative insn with greater dependencies weakness. */
1222 if (flag_sched_spec_insn_heuristic && spec_info)
1223 {
1224 ds_t ds1, ds2;
1225 dw_t dw1, dw2;
1226 int dw;
1227
1228 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1229 if (ds1)
1230 dw1 = ds_weak (ds1);
1231 else
1232 dw1 = NO_DEP_WEAK;
1233
1234 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1235 if (ds2)
1236 dw2 = ds_weak (ds2);
1237 else
1238 dw2 = NO_DEP_WEAK;
1239
1240 dw = dw2 - dw1;
1241 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1242 return dw;
1243 }
1244
1245 info_val = (*current_sched_info->rank) (tmp, tmp2);
1246 if(flag_sched_rank_heuristic && info_val)
1247 return info_val;
1248
1249 if (flag_sched_last_insn_heuristic)
1250 {
1251 last = last_scheduled_insn;
1252
1253 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1254 do
1255 last = PREV_INSN (last);
1256 while (!NONDEBUG_INSN_P (last)
1257 && last != current_sched_info->prev_head);
1258 }
1259
1260 /* Compare insns based on their relation to the last scheduled
1261 non-debug insn. */
1262 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1263 {
1264 dep_t dep1;
1265 dep_t dep2;
1266
1267 /* Classify the instructions into three classes:
1268 1) Data dependent on last schedule insn.
1269 2) Anti/Output dependent on last scheduled insn.
1270 3) Independent of last scheduled insn, or has latency of one.
1271 Choose the insn from the highest numbered class if different. */
1272 dep1 = sd_find_dep_between (last, tmp, true);
1273
1274 if (dep1 == NULL || dep_cost (dep1) == 1)
1275 tmp_class = 3;
1276 else if (/* Data dependence. */
1277 DEP_TYPE (dep1) == REG_DEP_TRUE)
1278 tmp_class = 1;
1279 else
1280 tmp_class = 2;
1281
1282 dep2 = sd_find_dep_between (last, tmp2, true);
1283
1284 if (dep2 == NULL || dep_cost (dep2) == 1)
1285 tmp2_class = 3;
1286 else if (/* Data dependence. */
1287 DEP_TYPE (dep2) == REG_DEP_TRUE)
1288 tmp2_class = 1;
1289 else
1290 tmp2_class = 2;
1291
1292 if ((val = tmp2_class - tmp_class))
1293 return val;
1294 }
1295
1296 /* Prefer the insn which has more later insns that depend on it.
1297 This gives the scheduler more freedom when scheduling later
1298 instructions at the expense of added register pressure. */
1299
1300 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1301
1302 if (flag_sched_dep_count_heuristic && val != 0)
1303 return val;
1304
1305 /* If insns are equally good, sort by INSN_LUID (original insn order),
1306 so that we make the sort stable. This minimizes instruction movement,
1307 thus minimizing sched's effect on debugging and cross-jumping. */
1308 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1309 }
1310
1311 /* Resort the array A in which only element at index N may be out of order. */
1312
1313 HAIFA_INLINE static void
1314 swap_sort (rtx *a, int n)
1315 {
1316 rtx insn = a[n - 1];
1317 int i = n - 2;
1318
1319 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1320 {
1321 a[i + 1] = a[i];
1322 i -= 1;
1323 }
1324 a[i + 1] = insn;
1325 }
1326
1327 /* Add INSN to the insn queue so that it can be executed at least
1328 N_CYCLES after the currently executing insn. Preserve insns
1329 chain for debugging purposes. */
1330
1331 HAIFA_INLINE static void
1332 queue_insn (rtx insn, int n_cycles)
1333 {
1334 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1335 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1336
1337 gcc_assert (n_cycles <= max_insn_queue_index);
1338 gcc_assert (!DEBUG_INSN_P (insn));
1339
1340 insn_queue[next_q] = link;
1341 q_size += 1;
1342
1343 if (sched_verbose >= 2)
1344 {
1345 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1346 (*current_sched_info->print_insn) (insn, 0));
1347
1348 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1349 }
1350
1351 QUEUE_INDEX (insn) = next_q;
1352 }
1353
1354 /* Remove INSN from queue. */
1355 static void
1356 queue_remove (rtx insn)
1357 {
1358 gcc_assert (QUEUE_INDEX (insn) >= 0);
1359 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1360 q_size--;
1361 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1362 }
1363
1364 /* Return a pointer to the bottom of the ready list, i.e. the insn
1365 with the lowest priority. */
1366
1367 rtx *
1368 ready_lastpos (struct ready_list *ready)
1369 {
1370 gcc_assert (ready->n_ready >= 1);
1371 return ready->vec + ready->first - ready->n_ready + 1;
1372 }
1373
1374 /* Add an element INSN to the ready list so that it ends up with the
1375 lowest/highest priority depending on FIRST_P. */
1376
1377 HAIFA_INLINE static void
1378 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1379 {
1380 if (!first_p)
1381 {
1382 if (ready->first == ready->n_ready)
1383 {
1384 memmove (ready->vec + ready->veclen - ready->n_ready,
1385 ready_lastpos (ready),
1386 ready->n_ready * sizeof (rtx));
1387 ready->first = ready->veclen - 1;
1388 }
1389 ready->vec[ready->first - ready->n_ready] = insn;
1390 }
1391 else
1392 {
1393 if (ready->first == ready->veclen - 1)
1394 {
1395 if (ready->n_ready)
1396 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1397 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1398 ready_lastpos (ready),
1399 ready->n_ready * sizeof (rtx));
1400 ready->first = ready->veclen - 2;
1401 }
1402 ready->vec[++(ready->first)] = insn;
1403 }
1404
1405 ready->n_ready++;
1406 if (DEBUG_INSN_P (insn))
1407 ready->n_debug++;
1408
1409 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1410 QUEUE_INDEX (insn) = QUEUE_READY;
1411 }
1412
1413 /* Remove the element with the highest priority from the ready list and
1414 return it. */
1415
1416 HAIFA_INLINE static rtx
1417 ready_remove_first (struct ready_list *ready)
1418 {
1419 rtx t;
1420
1421 gcc_assert (ready->n_ready);
1422 t = ready->vec[ready->first--];
1423 ready->n_ready--;
1424 if (DEBUG_INSN_P (t))
1425 ready->n_debug--;
1426 /* If the queue becomes empty, reset it. */
1427 if (ready->n_ready == 0)
1428 ready->first = ready->veclen - 1;
1429
1430 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1431 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1432
1433 return t;
1434 }
1435
1436 /* The following code implements multi-pass scheduling for the first
1437 cycle. In other words, we will try to choose ready insn which
1438 permits to start maximum number of insns on the same cycle. */
1439
1440 /* Return a pointer to the element INDEX from the ready. INDEX for
1441 insn with the highest priority is 0, and the lowest priority has
1442 N_READY - 1. */
1443
1444 rtx
1445 ready_element (struct ready_list *ready, int index)
1446 {
1447 gcc_assert (ready->n_ready && index < ready->n_ready);
1448
1449 return ready->vec[ready->first - index];
1450 }
1451
1452 /* Remove the element INDEX from the ready list and return it. INDEX
1453 for insn with the highest priority is 0, and the lowest priority
1454 has N_READY - 1. */
1455
1456 HAIFA_INLINE static rtx
1457 ready_remove (struct ready_list *ready, int index)
1458 {
1459 rtx t;
1460 int i;
1461
1462 if (index == 0)
1463 return ready_remove_first (ready);
1464 gcc_assert (ready->n_ready && index < ready->n_ready);
1465 t = ready->vec[ready->first - index];
1466 ready->n_ready--;
1467 if (DEBUG_INSN_P (t))
1468 ready->n_debug--;
1469 for (i = index; i < ready->n_ready; i++)
1470 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1471 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1472 return t;
1473 }
1474
1475 /* Remove INSN from the ready list. */
1476 static void
1477 ready_remove_insn (rtx insn)
1478 {
1479 int i;
1480
1481 for (i = 0; i < readyp->n_ready; i++)
1482 if (ready_element (readyp, i) == insn)
1483 {
1484 ready_remove (readyp, i);
1485 return;
1486 }
1487 gcc_unreachable ();
1488 }
1489
1490 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1491 macro. */
1492
1493 void
1494 ready_sort (struct ready_list *ready)
1495 {
1496 int i;
1497 rtx *first = ready_lastpos (ready);
1498
1499 if (sched_pressure_p)
1500 {
1501 for (i = 0; i < ready->n_ready; i++)
1502 if (!DEBUG_INSN_P (first[i]))
1503 setup_insn_reg_pressure_info (first[i]);
1504 }
1505 SCHED_SORT (first, ready->n_ready);
1506 }
1507
1508 /* PREV is an insn that is ready to execute. Adjust its priority if that
1509 will help shorten or lengthen register lifetimes as appropriate. Also
1510 provide a hook for the target to tweak itself. */
1511
1512 HAIFA_INLINE static void
1513 adjust_priority (rtx prev)
1514 {
1515 /* ??? There used to be code here to try and estimate how an insn
1516 affected register lifetimes, but it did it by looking at REG_DEAD
1517 notes, which we removed in schedule_region. Nor did it try to
1518 take into account register pressure or anything useful like that.
1519
1520 Revisit when we have a machine model to work with and not before. */
1521
1522 if (targetm.sched.adjust_priority)
1523 INSN_PRIORITY (prev) =
1524 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1525 }
1526
1527 /* Advance DFA state STATE on one cycle. */
1528 void
1529 advance_state (state_t state)
1530 {
1531 if (targetm.sched.dfa_pre_advance_cycle)
1532 targetm.sched.dfa_pre_advance_cycle ();
1533
1534 if (targetm.sched.dfa_pre_cycle_insn)
1535 state_transition (state,
1536 targetm.sched.dfa_pre_cycle_insn ());
1537
1538 state_transition (state, NULL);
1539
1540 if (targetm.sched.dfa_post_cycle_insn)
1541 state_transition (state,
1542 targetm.sched.dfa_post_cycle_insn ());
1543
1544 if (targetm.sched.dfa_post_advance_cycle)
1545 targetm.sched.dfa_post_advance_cycle ();
1546 }
1547
1548 /* Advance time on one cycle. */
1549 HAIFA_INLINE static void
1550 advance_one_cycle (void)
1551 {
1552 advance_state (curr_state);
1553 if (sched_verbose >= 6)
1554 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1555 }
1556
1557 /* Clock at which the previous instruction was issued. */
1558 static int last_clock_var;
1559
1560 /* Update register pressure after scheduling INSN. */
1561 static void
1562 update_register_pressure (rtx insn)
1563 {
1564 struct reg_use_data *use;
1565 struct reg_set_data *set;
1566
1567 gcc_checking_assert (!DEBUG_INSN_P (insn));
1568
1569 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1570 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1571 mark_regno_birth_or_death (use->regno, false);
1572 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1573 mark_regno_birth_or_death (set->regno, true);
1574 }
1575
1576 /* Set up or update (if UPDATE_P) max register pressure (see its
1577 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1578 after insn AFTER. */
1579 static void
1580 setup_insn_max_reg_pressure (rtx after, bool update_p)
1581 {
1582 int i, p;
1583 bool eq_p;
1584 rtx insn;
1585 static int max_reg_pressure[N_REG_CLASSES];
1586
1587 save_reg_pressure ();
1588 for (i = 0; i < ira_reg_class_cover_size; i++)
1589 max_reg_pressure[ira_reg_class_cover[i]]
1590 = curr_reg_pressure[ira_reg_class_cover[i]];
1591 for (insn = NEXT_INSN (after);
1592 insn != NULL_RTX && ! BARRIER_P (insn)
1593 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1594 insn = NEXT_INSN (insn))
1595 if (NONDEBUG_INSN_P (insn))
1596 {
1597 eq_p = true;
1598 for (i = 0; i < ira_reg_class_cover_size; i++)
1599 {
1600 p = max_reg_pressure[ira_reg_class_cover[i]];
1601 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1602 {
1603 eq_p = false;
1604 INSN_MAX_REG_PRESSURE (insn)[i]
1605 = max_reg_pressure[ira_reg_class_cover[i]];
1606 }
1607 }
1608 if (update_p && eq_p)
1609 break;
1610 update_register_pressure (insn);
1611 for (i = 0; i < ira_reg_class_cover_size; i++)
1612 if (max_reg_pressure[ira_reg_class_cover[i]]
1613 < curr_reg_pressure[ira_reg_class_cover[i]])
1614 max_reg_pressure[ira_reg_class_cover[i]]
1615 = curr_reg_pressure[ira_reg_class_cover[i]];
1616 }
1617 restore_reg_pressure ();
1618 }
1619
1620 /* Update the current register pressure after scheduling INSN. Update
1621 also max register pressure for unscheduled insns of the current
1622 BB. */
1623 static void
1624 update_reg_and_insn_max_reg_pressure (rtx insn)
1625 {
1626 int i;
1627 int before[N_REG_CLASSES];
1628
1629 for (i = 0; i < ira_reg_class_cover_size; i++)
1630 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1631 update_register_pressure (insn);
1632 for (i = 0; i < ira_reg_class_cover_size; i++)
1633 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1634 break;
1635 if (i < ira_reg_class_cover_size)
1636 setup_insn_max_reg_pressure (insn, true);
1637 }
1638
1639 /* Set up register pressure at the beginning of basic block BB whose
1640 insns starting after insn AFTER. Set up also max register pressure
1641 for all insns of the basic block. */
1642 void
1643 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1644 {
1645 gcc_assert (sched_pressure_p);
1646 initiate_bb_reg_pressure_info (bb);
1647 setup_insn_max_reg_pressure (after, false);
1648 }
1649
1650 /* INSN is the "currently executing insn". Launch each insn which was
1651 waiting on INSN. READY is the ready list which contains the insns
1652 that are ready to fire. CLOCK is the current cycle. The function
1653 returns necessary cycle advance after issuing the insn (it is not
1654 zero for insns in a schedule group). */
1655
1656 static int
1657 schedule_insn (rtx insn)
1658 {
1659 sd_iterator_def sd_it;
1660 dep_t dep;
1661 int i;
1662 int advance = 0;
1663
1664 if (sched_verbose >= 1)
1665 {
1666 struct reg_pressure_data *pressure_info;
1667 char buf[2048];
1668
1669 print_insn (buf, insn, 0);
1670 buf[40] = 0;
1671 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1672
1673 if (recog_memoized (insn) < 0)
1674 fprintf (sched_dump, "nothing");
1675 else
1676 print_reservation (sched_dump, insn);
1677 pressure_info = INSN_REG_PRESSURE (insn);
1678 if (pressure_info != NULL)
1679 {
1680 fputc (':', sched_dump);
1681 for (i = 0; i < ira_reg_class_cover_size; i++)
1682 fprintf (sched_dump, "%s%+d(%d)",
1683 reg_class_names[ira_reg_class_cover[i]],
1684 pressure_info[i].set_increase, pressure_info[i].change);
1685 }
1686 fputc ('\n', sched_dump);
1687 }
1688
1689 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1690 update_reg_and_insn_max_reg_pressure (insn);
1691
1692 /* Scheduling instruction should have all its dependencies resolved and
1693 should have been removed from the ready list. */
1694 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1695
1696 /* Reset debug insns invalidated by moving this insn. */
1697 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1698 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1699 sd_iterator_cond (&sd_it, &dep);)
1700 {
1701 rtx dbg = DEP_PRO (dep);
1702 struct reg_use_data *use, *next;
1703
1704 gcc_assert (DEBUG_INSN_P (dbg));
1705
1706 if (sched_verbose >= 6)
1707 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1708 INSN_UID (dbg));
1709
1710 /* ??? Rather than resetting the debug insn, we might be able
1711 to emit a debug temp before the just-scheduled insn, but
1712 this would involve checking that the expression at the
1713 point of the debug insn is equivalent to the expression
1714 before the just-scheduled insn. They might not be: the
1715 expression in the debug insn may depend on other insns not
1716 yet scheduled that set MEMs, REGs or even other debug
1717 insns. It's not clear that attempting to preserve debug
1718 information in these cases is worth the effort, given how
1719 uncommon these resets are and the likelihood that the debug
1720 temps introduced won't survive the schedule change. */
1721 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1722 df_insn_rescan (dbg);
1723
1724 /* Unknown location doesn't use any registers. */
1725 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1726 {
1727 struct reg_use_data *prev = use;
1728
1729 /* Remove use from the cyclic next_regno_use chain first. */
1730 while (prev->next_regno_use != use)
1731 prev = prev->next_regno_use;
1732 prev->next_regno_use = use->next_regno_use;
1733 next = use->next_insn_use;
1734 free (use);
1735 }
1736 INSN_REG_USE_LIST (dbg) = NULL;
1737
1738 /* We delete rather than resolve these deps, otherwise we
1739 crash in sched_free_deps(), because forward deps are
1740 expected to be released before backward deps. */
1741 sd_delete_dep (sd_it);
1742 }
1743
1744 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1745 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1746
1747 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1748 if (INSN_TICK (insn) > clock_var)
1749 /* INSN has been prematurely moved from the queue to the ready list.
1750 This is possible only if following flag is set. */
1751 gcc_assert (flag_sched_stalled_insns);
1752
1753 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1754 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1755 INSN_TICK (insn) = clock_var;
1756
1757 /* Update dependent instructions. */
1758 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1759 sd_iterator_cond (&sd_it, &dep);)
1760 {
1761 rtx next = DEP_CON (dep);
1762
1763 /* Resolve the dependence between INSN and NEXT.
1764 sd_resolve_dep () moves current dep to another list thus
1765 advancing the iterator. */
1766 sd_resolve_dep (sd_it);
1767
1768 /* Don't bother trying to mark next as ready if insn is a debug
1769 insn. If insn is the last hard dependency, it will have
1770 already been discounted. */
1771 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1772 continue;
1773
1774 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1775 {
1776 int effective_cost;
1777
1778 effective_cost = try_ready (next);
1779
1780 if (effective_cost >= 0
1781 && SCHED_GROUP_P (next)
1782 && advance < effective_cost)
1783 advance = effective_cost;
1784 }
1785 else
1786 /* Check always has only one forward dependence (to the first insn in
1787 the recovery block), therefore, this will be executed only once. */
1788 {
1789 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1790 fix_recovery_deps (RECOVERY_BLOCK (insn));
1791 }
1792 }
1793
1794 /* This is the place where scheduler doesn't *basically* need backward and
1795 forward dependencies for INSN anymore. Nevertheless they are used in
1796 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1797 some targets (e.g. rs6000). Thus the earliest place where we *can*
1798 remove dependencies is after targetm.sched.finish () call in
1799 schedule_block (). But, on the other side, the safest place to remove
1800 dependencies is when we are finishing scheduling entire region. As we
1801 don't generate [many] dependencies during scheduling itself, we won't
1802 need memory until beginning of next region.
1803 Bottom line: Dependencies are removed for all insns in the end of
1804 scheduling the region. */
1805
1806 /* Annotate the instruction with issue information -- TImode
1807 indicates that the instruction is expected not to be able
1808 to issue on the same cycle as the previous insn. A machine
1809 may use this information to decide how the instruction should
1810 be aligned. */
1811 if (issue_rate > 1
1812 && GET_CODE (PATTERN (insn)) != USE
1813 && GET_CODE (PATTERN (insn)) != CLOBBER
1814 && !DEBUG_INSN_P (insn))
1815 {
1816 if (reload_completed)
1817 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1818 last_clock_var = clock_var;
1819 }
1820
1821 return advance;
1822 }
1823
1824 /* Functions for handling of notes. */
1825
1826 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1827 void
1828 concat_note_lists (rtx from_end, rtx *to_endp)
1829 {
1830 rtx from_start;
1831
1832 /* It's easy when have nothing to concat. */
1833 if (from_end == NULL)
1834 return;
1835
1836 /* It's also easy when destination is empty. */
1837 if (*to_endp == NULL)
1838 {
1839 *to_endp = from_end;
1840 return;
1841 }
1842
1843 from_start = from_end;
1844 while (PREV_INSN (from_start) != NULL)
1845 from_start = PREV_INSN (from_start);
1846
1847 PREV_INSN (from_start) = *to_endp;
1848 NEXT_INSN (*to_endp) = from_start;
1849 *to_endp = from_end;
1850 }
1851
1852 /* Delete notes between HEAD and TAIL and put them in the chain
1853 of notes ended by NOTE_LIST. */
1854 void
1855 remove_notes (rtx head, rtx tail)
1856 {
1857 rtx next_tail, insn, next;
1858
1859 note_list = 0;
1860 if (head == tail && !INSN_P (head))
1861 return;
1862
1863 next_tail = NEXT_INSN (tail);
1864 for (insn = head; insn != next_tail; insn = next)
1865 {
1866 next = NEXT_INSN (insn);
1867 if (!NOTE_P (insn))
1868 continue;
1869
1870 switch (NOTE_KIND (insn))
1871 {
1872 case NOTE_INSN_BASIC_BLOCK:
1873 continue;
1874
1875 case NOTE_INSN_EPILOGUE_BEG:
1876 if (insn != tail)
1877 {
1878 remove_insn (insn);
1879 add_reg_note (next, REG_SAVE_NOTE,
1880 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1881 break;
1882 }
1883 /* FALLTHRU */
1884
1885 default:
1886 remove_insn (insn);
1887
1888 /* Add the note to list that ends at NOTE_LIST. */
1889 PREV_INSN (insn) = note_list;
1890 NEXT_INSN (insn) = NULL_RTX;
1891 if (note_list)
1892 NEXT_INSN (note_list) = insn;
1893 note_list = insn;
1894 break;
1895 }
1896
1897 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1898 }
1899 }
1900
1901
1902 /* Return the head and tail pointers of ebb starting at BEG and ending
1903 at END. */
1904 void
1905 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1906 {
1907 rtx beg_head = BB_HEAD (beg);
1908 rtx beg_tail = BB_END (beg);
1909 rtx end_head = BB_HEAD (end);
1910 rtx end_tail = BB_END (end);
1911
1912 /* Don't include any notes or labels at the beginning of the BEG
1913 basic block, or notes at the end of the END basic blocks. */
1914
1915 if (LABEL_P (beg_head))
1916 beg_head = NEXT_INSN (beg_head);
1917
1918 while (beg_head != beg_tail)
1919 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1920 beg_head = NEXT_INSN (beg_head);
1921 else
1922 break;
1923
1924 *headp = beg_head;
1925
1926 if (beg == end)
1927 end_head = beg_head;
1928 else if (LABEL_P (end_head))
1929 end_head = NEXT_INSN (end_head);
1930
1931 while (end_head != end_tail)
1932 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1933 end_tail = PREV_INSN (end_tail);
1934 else
1935 break;
1936
1937 *tailp = end_tail;
1938 }
1939
1940 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1941
1942 int
1943 no_real_insns_p (const_rtx head, const_rtx tail)
1944 {
1945 while (head != NEXT_INSN (tail))
1946 {
1947 if (!NOTE_P (head) && !LABEL_P (head)
1948 && !BOUNDARY_DEBUG_INSN_P (head))
1949 return 0;
1950 head = NEXT_INSN (head);
1951 }
1952 return 1;
1953 }
1954
1955 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1956 previously found among the insns. Insert them just before HEAD. */
1957 rtx
1958 restore_other_notes (rtx head, basic_block head_bb)
1959 {
1960 if (note_list != 0)
1961 {
1962 rtx note_head = note_list;
1963
1964 if (head)
1965 head_bb = BLOCK_FOR_INSN (head);
1966 else
1967 head = NEXT_INSN (bb_note (head_bb));
1968
1969 while (PREV_INSN (note_head))
1970 {
1971 set_block_for_insn (note_head, head_bb);
1972 note_head = PREV_INSN (note_head);
1973 }
1974 /* In the above cycle we've missed this note. */
1975 set_block_for_insn (note_head, head_bb);
1976
1977 PREV_INSN (note_head) = PREV_INSN (head);
1978 NEXT_INSN (PREV_INSN (head)) = note_head;
1979 PREV_INSN (head) = note_list;
1980 NEXT_INSN (note_list) = head;
1981
1982 if (BLOCK_FOR_INSN (head) != head_bb)
1983 BB_END (head_bb) = note_list;
1984
1985 head = note_head;
1986 }
1987
1988 return head;
1989 }
1990
1991 /* Move insns that became ready to fire from queue to ready list. */
1992
1993 static void
1994 queue_to_ready (struct ready_list *ready)
1995 {
1996 rtx insn;
1997 rtx link;
1998 rtx skip_insn;
1999
2000 q_ptr = NEXT_Q (q_ptr);
2001
2002 if (dbg_cnt (sched_insn) == false)
2003 /* If debug counter is activated do not requeue insn next after
2004 last_scheduled_insn. */
2005 skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
2006 else
2007 skip_insn = NULL_RTX;
2008
2009 /* Add all pending insns that can be scheduled without stalls to the
2010 ready list. */
2011 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2012 {
2013 insn = XEXP (link, 0);
2014 q_size -= 1;
2015
2016 if (sched_verbose >= 2)
2017 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2018 (*current_sched_info->print_insn) (insn, 0));
2019
2020 /* If the ready list is full, delay the insn for 1 cycle.
2021 See the comment in schedule_block for the rationale. */
2022 if (!reload_completed
2023 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2024 && !SCHED_GROUP_P (insn)
2025 && insn != skip_insn)
2026 {
2027 if (sched_verbose >= 2)
2028 fprintf (sched_dump, "requeued because ready full\n");
2029 queue_insn (insn, 1);
2030 }
2031 else
2032 {
2033 ready_add (ready, insn, false);
2034 if (sched_verbose >= 2)
2035 fprintf (sched_dump, "moving to ready without stalls\n");
2036 }
2037 }
2038 free_INSN_LIST_list (&insn_queue[q_ptr]);
2039
2040 /* If there are no ready insns, stall until one is ready and add all
2041 of the pending insns at that point to the ready list. */
2042 if (ready->n_ready == 0)
2043 {
2044 int stalls;
2045
2046 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2047 {
2048 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2049 {
2050 for (; link; link = XEXP (link, 1))
2051 {
2052 insn = XEXP (link, 0);
2053 q_size -= 1;
2054
2055 if (sched_verbose >= 2)
2056 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2057 (*current_sched_info->print_insn) (insn, 0));
2058
2059 ready_add (ready, insn, false);
2060 if (sched_verbose >= 2)
2061 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2062 }
2063 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2064
2065 advance_one_cycle ();
2066
2067 break;
2068 }
2069
2070 advance_one_cycle ();
2071 }
2072
2073 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2074 clock_var += stalls;
2075 }
2076 }
2077
2078 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2079 prematurely move INSN from the queue to the ready list. Currently,
2080 if a target defines the hook 'is_costly_dependence', this function
2081 uses the hook to check whether there exist any dependences which are
2082 considered costly by the target, between INSN and other insns that
2083 have already been scheduled. Dependences are checked up to Y cycles
2084 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2085 controlling this value.
2086 (Other considerations could be taken into account instead (or in
2087 addition) depending on user flags and target hooks. */
2088
2089 static bool
2090 ok_for_early_queue_removal (rtx insn)
2091 {
2092 int n_cycles;
2093 rtx prev_insn = last_scheduled_insn;
2094
2095 if (targetm.sched.is_costly_dependence)
2096 {
2097 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2098 {
2099 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2100 {
2101 int cost;
2102
2103 if (prev_insn == current_sched_info->prev_head)
2104 {
2105 prev_insn = NULL;
2106 break;
2107 }
2108
2109 if (!NOTE_P (prev_insn))
2110 {
2111 dep_t dep;
2112
2113 dep = sd_find_dep_between (prev_insn, insn, true);
2114
2115 if (dep != NULL)
2116 {
2117 cost = dep_cost (dep);
2118
2119 if (targetm.sched.is_costly_dependence (dep, cost,
2120 flag_sched_stalled_insns_dep - n_cycles))
2121 return false;
2122 }
2123 }
2124
2125 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2126 break;
2127 }
2128
2129 if (!prev_insn)
2130 break;
2131 prev_insn = PREV_INSN (prev_insn);
2132 }
2133 }
2134
2135 return true;
2136 }
2137
2138
2139 /* Remove insns from the queue, before they become "ready" with respect
2140 to FU latency considerations. */
2141
2142 static int
2143 early_queue_to_ready (state_t state, struct ready_list *ready)
2144 {
2145 rtx insn;
2146 rtx link;
2147 rtx next_link;
2148 rtx prev_link;
2149 bool move_to_ready;
2150 int cost;
2151 state_t temp_state = alloca (dfa_state_size);
2152 int stalls;
2153 int insns_removed = 0;
2154
2155 /*
2156 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2157 function:
2158
2159 X == 0: There is no limit on how many queued insns can be removed
2160 prematurely. (flag_sched_stalled_insns = -1).
2161
2162 X >= 1: Only X queued insns can be removed prematurely in each
2163 invocation. (flag_sched_stalled_insns = X).
2164
2165 Otherwise: Early queue removal is disabled.
2166 (flag_sched_stalled_insns = 0)
2167 */
2168
2169 if (! flag_sched_stalled_insns)
2170 return 0;
2171
2172 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2173 {
2174 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2175 {
2176 if (sched_verbose > 6)
2177 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2178
2179 prev_link = 0;
2180 while (link)
2181 {
2182 next_link = XEXP (link, 1);
2183 insn = XEXP (link, 0);
2184 if (insn && sched_verbose > 6)
2185 print_rtl_single (sched_dump, insn);
2186
2187 memcpy (temp_state, state, dfa_state_size);
2188 if (recog_memoized (insn) < 0)
2189 /* non-negative to indicate that it's not ready
2190 to avoid infinite Q->R->Q->R... */
2191 cost = 0;
2192 else
2193 cost = state_transition (temp_state, insn);
2194
2195 if (sched_verbose >= 6)
2196 fprintf (sched_dump, "transition cost = %d\n", cost);
2197
2198 move_to_ready = false;
2199 if (cost < 0)
2200 {
2201 move_to_ready = ok_for_early_queue_removal (insn);
2202 if (move_to_ready == true)
2203 {
2204 /* move from Q to R */
2205 q_size -= 1;
2206 ready_add (ready, insn, false);
2207
2208 if (prev_link)
2209 XEXP (prev_link, 1) = next_link;
2210 else
2211 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2212
2213 free_INSN_LIST_node (link);
2214
2215 if (sched_verbose >= 2)
2216 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2217 (*current_sched_info->print_insn) (insn, 0));
2218
2219 insns_removed++;
2220 if (insns_removed == flag_sched_stalled_insns)
2221 /* Remove no more than flag_sched_stalled_insns insns
2222 from Q at a time. */
2223 return insns_removed;
2224 }
2225 }
2226
2227 if (move_to_ready == false)
2228 prev_link = link;
2229
2230 link = next_link;
2231 } /* while link */
2232 } /* if link */
2233
2234 } /* for stalls.. */
2235
2236 return insns_removed;
2237 }
2238
2239
2240 /* Print the ready list for debugging purposes. Callable from debugger. */
2241
2242 static void
2243 debug_ready_list (struct ready_list *ready)
2244 {
2245 rtx *p;
2246 int i;
2247
2248 if (ready->n_ready == 0)
2249 {
2250 fprintf (sched_dump, "\n");
2251 return;
2252 }
2253
2254 p = ready_lastpos (ready);
2255 for (i = 0; i < ready->n_ready; i++)
2256 {
2257 fprintf (sched_dump, " %s:%d",
2258 (*current_sched_info->print_insn) (p[i], 0),
2259 INSN_LUID (p[i]));
2260 if (sched_pressure_p)
2261 fprintf (sched_dump, "(cost=%d",
2262 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2263 if (INSN_TICK (p[i]) > clock_var)
2264 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2265 if (sched_pressure_p)
2266 fprintf (sched_dump, ")");
2267 }
2268 fprintf (sched_dump, "\n");
2269 }
2270
2271 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2272 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2273 replaces the epilogue note in the correct basic block. */
2274 void
2275 reemit_notes (rtx insn)
2276 {
2277 rtx note, last = insn;
2278
2279 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2280 {
2281 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2282 {
2283 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2284
2285 last = emit_note_before (note_type, last);
2286 remove_note (insn, note);
2287 }
2288 }
2289 }
2290
2291 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2292 static void
2293 move_insn (rtx insn, rtx last, rtx nt)
2294 {
2295 if (PREV_INSN (insn) != last)
2296 {
2297 basic_block bb;
2298 rtx note;
2299 int jump_p = 0;
2300
2301 bb = BLOCK_FOR_INSN (insn);
2302
2303 /* BB_HEAD is either LABEL or NOTE. */
2304 gcc_assert (BB_HEAD (bb) != insn);
2305
2306 if (BB_END (bb) == insn)
2307 /* If this is last instruction in BB, move end marker one
2308 instruction up. */
2309 {
2310 /* Jumps are always placed at the end of basic block. */
2311 jump_p = control_flow_insn_p (insn);
2312
2313 gcc_assert (!jump_p
2314 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2315 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2316 || (common_sched_info->sched_pass_id
2317 == SCHED_EBB_PASS));
2318
2319 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2320
2321 BB_END (bb) = PREV_INSN (insn);
2322 }
2323
2324 gcc_assert (BB_END (bb) != last);
2325
2326 if (jump_p)
2327 /* We move the block note along with jump. */
2328 {
2329 gcc_assert (nt);
2330
2331 note = NEXT_INSN (insn);
2332 while (NOTE_NOT_BB_P (note) && note != nt)
2333 note = NEXT_INSN (note);
2334
2335 if (note != nt
2336 && (LABEL_P (note)
2337 || BARRIER_P (note)))
2338 note = NEXT_INSN (note);
2339
2340 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2341 }
2342 else
2343 note = insn;
2344
2345 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2346 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2347
2348 NEXT_INSN (note) = NEXT_INSN (last);
2349 PREV_INSN (NEXT_INSN (last)) = note;
2350
2351 NEXT_INSN (last) = insn;
2352 PREV_INSN (insn) = last;
2353
2354 bb = BLOCK_FOR_INSN (last);
2355
2356 if (jump_p)
2357 {
2358 fix_jump_move (insn);
2359
2360 if (BLOCK_FOR_INSN (insn) != bb)
2361 move_block_after_check (insn);
2362
2363 gcc_assert (BB_END (bb) == last);
2364 }
2365
2366 df_insn_change_bb (insn, bb);
2367
2368 /* Update BB_END, if needed. */
2369 if (BB_END (bb) == last)
2370 BB_END (bb) = insn;
2371 }
2372
2373 SCHED_GROUP_P (insn) = 0;
2374 }
2375
2376 /* Return true if scheduling INSN will finish current clock cycle. */
2377 static bool
2378 insn_finishes_cycle_p (rtx insn)
2379 {
2380 if (SCHED_GROUP_P (insn))
2381 /* After issuing INSN, rest of the sched_group will be forced to issue
2382 in order. Don't make any plans for the rest of cycle. */
2383 return true;
2384
2385 /* Finishing the block will, apparently, finish the cycle. */
2386 if (current_sched_info->insn_finishes_block_p
2387 && current_sched_info->insn_finishes_block_p (insn))
2388 return true;
2389
2390 return false;
2391 }
2392
2393 /* The following structure describe an entry of the stack of choices. */
2394 struct choice_entry
2395 {
2396 /* Ordinal number of the issued insn in the ready queue. */
2397 int index;
2398 /* The number of the rest insns whose issues we should try. */
2399 int rest;
2400 /* The number of issued essential insns. */
2401 int n;
2402 /* State after issuing the insn. */
2403 state_t state;
2404 };
2405
2406 /* The following array is used to implement a stack of choices used in
2407 function max_issue. */
2408 static struct choice_entry *choice_stack;
2409
2410 /* The following variable value is number of essential insns issued on
2411 the current cycle. An insn is essential one if it changes the
2412 processors state. */
2413 int cycle_issued_insns;
2414
2415 /* This holds the value of the target dfa_lookahead hook. */
2416 int dfa_lookahead;
2417
2418 /* The following variable value is maximal number of tries of issuing
2419 insns for the first cycle multipass insn scheduling. We define
2420 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2421 need this constraint if all real insns (with non-negative codes)
2422 had reservations because in this case the algorithm complexity is
2423 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2424 might be incomplete and such insn might occur. For such
2425 descriptions, the complexity of algorithm (without the constraint)
2426 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2427 static int max_lookahead_tries;
2428
2429 /* The following value is value of hook
2430 `first_cycle_multipass_dfa_lookahead' at the last call of
2431 `max_issue'. */
2432 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2433
2434 /* The following value is value of `issue_rate' at the last call of
2435 `sched_init'. */
2436 static int cached_issue_rate = 0;
2437
2438 /* The following function returns maximal (or close to maximal) number
2439 of insns which can be issued on the same cycle and one of which
2440 insns is insns with the best rank (the first insn in READY). To
2441 make this function tries different samples of ready insns. READY
2442 is current queue `ready'. Global array READY_TRY reflects what
2443 insns are already issued in this try. The function stops immediately,
2444 if it reached the such a solution, that all instruction can be issued.
2445 INDEX will contain index of the best insn in READY. The following
2446 function is used only for first cycle multipass scheduling.
2447
2448 PRIVILEGED_N >= 0
2449
2450 This function expects recognized insns only. All USEs,
2451 CLOBBERs, etc must be filtered elsewhere. */
2452 int
2453 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2454 int *index)
2455 {
2456 int n, i, all, n_ready, best, delay, tries_num;
2457 int more_issue;
2458 struct choice_entry *top;
2459 rtx insn;
2460
2461 n_ready = ready->n_ready;
2462 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2463 && privileged_n <= n_ready);
2464
2465 /* Init MAX_LOOKAHEAD_TRIES. */
2466 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2467 {
2468 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2469 max_lookahead_tries = 100;
2470 for (i = 0; i < issue_rate; i++)
2471 max_lookahead_tries *= dfa_lookahead;
2472 }
2473
2474 /* Init max_points. */
2475 more_issue = issue_rate - cycle_issued_insns;
2476 gcc_assert (more_issue >= 0);
2477
2478 /* The number of the issued insns in the best solution. */
2479 best = 0;
2480
2481 top = choice_stack;
2482
2483 /* Set initial state of the search. */
2484 memcpy (top->state, state, dfa_state_size);
2485 top->rest = dfa_lookahead;
2486 top->n = 0;
2487
2488 /* Count the number of the insns to search among. */
2489 for (all = i = 0; i < n_ready; i++)
2490 if (!ready_try [i])
2491 all++;
2492
2493 /* I is the index of the insn to try next. */
2494 i = 0;
2495 tries_num = 0;
2496 for (;;)
2497 {
2498 if (/* If we've reached a dead end or searched enough of what we have
2499 been asked... */
2500 top->rest == 0
2501 /* or have nothing else to try... */
2502 || i >= n_ready
2503 /* or should not issue more. */
2504 || top->n >= more_issue)
2505 {
2506 /* ??? (... || i == n_ready). */
2507 gcc_assert (i <= n_ready);
2508
2509 /* We should not issue more than issue_rate instructions. */
2510 gcc_assert (top->n <= more_issue);
2511
2512 if (top == choice_stack)
2513 break;
2514
2515 if (best < top - choice_stack)
2516 {
2517 if (privileged_n)
2518 {
2519 n = privileged_n;
2520 /* Try to find issued privileged insn. */
2521 while (n && !ready_try[--n]);
2522 }
2523
2524 if (/* If all insns are equally good... */
2525 privileged_n == 0
2526 /* Or a privileged insn will be issued. */
2527 || ready_try[n])
2528 /* Then we have a solution. */
2529 {
2530 best = top - choice_stack;
2531 /* This is the index of the insn issued first in this
2532 solution. */
2533 *index = choice_stack [1].index;
2534 if (top->n == more_issue || best == all)
2535 break;
2536 }
2537 }
2538
2539 /* Set ready-list index to point to the last insn
2540 ('i++' below will advance it to the next insn). */
2541 i = top->index;
2542
2543 /* Backtrack. */
2544 ready_try [i] = 0;
2545 top--;
2546 memcpy (state, top->state, dfa_state_size);
2547 }
2548 else if (!ready_try [i])
2549 {
2550 tries_num++;
2551 if (tries_num > max_lookahead_tries)
2552 break;
2553 insn = ready_element (ready, i);
2554 delay = state_transition (state, insn);
2555 if (delay < 0)
2556 {
2557 if (state_dead_lock_p (state)
2558 || insn_finishes_cycle_p (insn))
2559 /* We won't issue any more instructions in the next
2560 choice_state. */
2561 top->rest = 0;
2562 else
2563 top->rest--;
2564
2565 n = top->n;
2566 if (memcmp (top->state, state, dfa_state_size) != 0)
2567 n++;
2568
2569 /* Advance to the next choice_entry. */
2570 top++;
2571 /* Initialize it. */
2572 top->rest = dfa_lookahead;
2573 top->index = i;
2574 top->n = n;
2575 memcpy (top->state, state, dfa_state_size);
2576
2577 ready_try [i] = 1;
2578 i = -1;
2579 }
2580 }
2581
2582 /* Increase ready-list index. */
2583 i++;
2584 }
2585
2586 /* Restore the original state of the DFA. */
2587 memcpy (state, choice_stack->state, dfa_state_size);
2588
2589 return best;
2590 }
2591
2592 /* The following function chooses insn from READY and modifies
2593 READY. The following function is used only for first
2594 cycle multipass scheduling.
2595 Return:
2596 -1 if cycle should be advanced,
2597 0 if INSN_PTR is set to point to the desirable insn,
2598 1 if choose_ready () should be restarted without advancing the cycle. */
2599 static int
2600 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2601 {
2602 int lookahead;
2603
2604 if (dbg_cnt (sched_insn) == false)
2605 {
2606 rtx insn;
2607
2608 insn = next_nonnote_insn (last_scheduled_insn);
2609
2610 if (QUEUE_INDEX (insn) == QUEUE_READY)
2611 /* INSN is in the ready_list. */
2612 {
2613 ready_remove_insn (insn);
2614 *insn_ptr = insn;
2615 return 0;
2616 }
2617
2618 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2619 return -1;
2620 }
2621
2622 lookahead = 0;
2623
2624 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2625 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2626 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2627 || DEBUG_INSN_P (ready_element (ready, 0)))
2628 {
2629 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2630 *insn_ptr = ready_remove_first_dispatch (ready);
2631 else
2632 *insn_ptr = ready_remove_first (ready);
2633
2634 return 0;
2635 }
2636 else
2637 {
2638 /* Try to choose the better insn. */
2639 int index = 0, i, n;
2640 rtx insn;
2641 int try_data = 1, try_control = 1;
2642 ds_t ts;
2643
2644 insn = ready_element (ready, 0);
2645 if (INSN_CODE (insn) < 0)
2646 {
2647 *insn_ptr = ready_remove_first (ready);
2648 return 0;
2649 }
2650
2651 if (spec_info
2652 && spec_info->flags & (PREFER_NON_DATA_SPEC
2653 | PREFER_NON_CONTROL_SPEC))
2654 {
2655 for (i = 0, n = ready->n_ready; i < n; i++)
2656 {
2657 rtx x;
2658 ds_t s;
2659
2660 x = ready_element (ready, i);
2661 s = TODO_SPEC (x);
2662
2663 if (spec_info->flags & PREFER_NON_DATA_SPEC
2664 && !(s & DATA_SPEC))
2665 {
2666 try_data = 0;
2667 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2668 || !try_control)
2669 break;
2670 }
2671
2672 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2673 && !(s & CONTROL_SPEC))
2674 {
2675 try_control = 0;
2676 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2677 break;
2678 }
2679 }
2680 }
2681
2682 ts = TODO_SPEC (insn);
2683 if ((ts & SPECULATIVE)
2684 && (((!try_data && (ts & DATA_SPEC))
2685 || (!try_control && (ts & CONTROL_SPEC)))
2686 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2687 && !targetm.sched
2688 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2689 /* Discard speculative instruction that stands first in the ready
2690 list. */
2691 {
2692 change_queue_index (insn, 1);
2693 return 1;
2694 }
2695
2696 ready_try[0] = 0;
2697
2698 for (i = 1; i < ready->n_ready; i++)
2699 {
2700 insn = ready_element (ready, i);
2701
2702 ready_try [i]
2703 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2704 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2705 }
2706
2707 /* Let the target filter the search space. */
2708 for (i = 1; i < ready->n_ready; i++)
2709 if (!ready_try[i])
2710 {
2711 insn = ready_element (ready, i);
2712
2713 /* If this insn is recognizable we should have already
2714 recognized it earlier.
2715 ??? Not very clear where this is supposed to be done.
2716 See dep_cost_1. */
2717 gcc_checking_assert (INSN_CODE (insn) >= 0
2718 || recog_memoized (insn) < 0);
2719
2720 ready_try [i]
2721 = (/* INSN_CODE check can be omitted here as it is also done later
2722 in max_issue (). */
2723 INSN_CODE (insn) < 0
2724 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2725 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2726 (insn)));
2727 }
2728
2729 if (max_issue (ready, 1, curr_state, &index) == 0)
2730 {
2731 *insn_ptr = ready_remove_first (ready);
2732 if (sched_verbose >= 4)
2733 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2734 (*current_sched_info->print_insn) (*insn_ptr, 0));
2735 return 0;
2736 }
2737 else
2738 {
2739 if (sched_verbose >= 4)
2740 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2741 (*current_sched_info->print_insn)
2742 (ready_element (ready, index), 0));
2743
2744 *insn_ptr = ready_remove (ready, index);
2745 return 0;
2746 }
2747 }
2748 }
2749
2750 /* Use forward list scheduling to rearrange insns of block pointed to by
2751 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2752 region. */
2753
2754 void
2755 schedule_block (basic_block *target_bb)
2756 {
2757 int i, first_cycle_insn_p;
2758 int can_issue_more;
2759 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2760 int sort_p, advance, start_clock_var;
2761
2762 /* Head/tail info for this block. */
2763 rtx prev_head = current_sched_info->prev_head;
2764 rtx next_tail = current_sched_info->next_tail;
2765 rtx head = NEXT_INSN (prev_head);
2766 rtx tail = PREV_INSN (next_tail);
2767
2768 /* We used to have code to avoid getting parameters moved from hard
2769 argument registers into pseudos.
2770
2771 However, it was removed when it proved to be of marginal benefit
2772 and caused problems because schedule_block and compute_forward_dependences
2773 had different notions of what the "head" insn was. */
2774
2775 gcc_assert (head != tail || INSN_P (head));
2776
2777 haifa_recovery_bb_recently_added_p = false;
2778
2779 /* Debug info. */
2780 if (sched_verbose)
2781 dump_new_block_header (0, *target_bb, head, tail);
2782
2783 state_reset (curr_state);
2784
2785 /* Clear the ready list. */
2786 ready.first = ready.veclen - 1;
2787 ready.n_ready = 0;
2788 ready.n_debug = 0;
2789
2790 /* It is used for first cycle multipass scheduling. */
2791 temp_state = alloca (dfa_state_size);
2792
2793 if (targetm.sched.init)
2794 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2795
2796 /* We start inserting insns after PREV_HEAD. */
2797 last_scheduled_insn = prev_head;
2798
2799 gcc_assert ((NOTE_P (last_scheduled_insn)
2800 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2801 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2802
2803 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2804 queue. */
2805 q_ptr = 0;
2806 q_size = 0;
2807
2808 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2809 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2810
2811 /* Start just before the beginning of time. */
2812 clock_var = -1;
2813
2814 /* We need queue and ready lists and clock_var be initialized
2815 in try_ready () (which is called through init_ready_list ()). */
2816 (*current_sched_info->init_ready_list) ();
2817
2818 /* The algorithm is O(n^2) in the number of ready insns at any given
2819 time in the worst case. Before reload we are more likely to have
2820 big lists so truncate them to a reasonable size. */
2821 if (!reload_completed
2822 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2823 {
2824 ready_sort (&ready);
2825
2826 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2827 If there are debug insns, we know they're first. */
2828 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2829 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2830 break;
2831
2832 if (sched_verbose >= 2)
2833 {
2834 fprintf (sched_dump,
2835 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2836 fprintf (sched_dump,
2837 ";;\t\t before reload => truncated to %d insns\n", i);
2838 }
2839
2840 /* Delay all insns past it for 1 cycle. If debug counter is
2841 activated make an exception for the insn right after
2842 last_scheduled_insn. */
2843 {
2844 rtx skip_insn;
2845
2846 if (dbg_cnt (sched_insn) == false)
2847 skip_insn = next_nonnote_insn (last_scheduled_insn);
2848 else
2849 skip_insn = NULL_RTX;
2850
2851 while (i < ready.n_ready)
2852 {
2853 rtx insn;
2854
2855 insn = ready_remove (&ready, i);
2856
2857 if (insn != skip_insn)
2858 queue_insn (insn, 1);
2859 }
2860 }
2861 }
2862
2863 /* Now we can restore basic block notes and maintain precise cfg. */
2864 restore_bb_notes (*target_bb);
2865
2866 last_clock_var = -1;
2867
2868 advance = 0;
2869
2870 sort_p = TRUE;
2871 /* Loop until all the insns in BB are scheduled. */
2872 while ((*current_sched_info->schedule_more_p) ())
2873 {
2874 do
2875 {
2876 start_clock_var = clock_var;
2877
2878 clock_var++;
2879
2880 advance_one_cycle ();
2881
2882 /* Add to the ready list all pending insns that can be issued now.
2883 If there are no ready insns, increment clock until one
2884 is ready and add all pending insns at that point to the ready
2885 list. */
2886 queue_to_ready (&ready);
2887
2888 gcc_assert (ready.n_ready);
2889
2890 if (sched_verbose >= 2)
2891 {
2892 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2893 debug_ready_list (&ready);
2894 }
2895 advance -= clock_var - start_clock_var;
2896 }
2897 while (advance > 0);
2898
2899 if (sort_p)
2900 {
2901 /* Sort the ready list based on priority. */
2902 ready_sort (&ready);
2903
2904 if (sched_verbose >= 2)
2905 {
2906 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2907 debug_ready_list (&ready);
2908 }
2909 }
2910
2911 /* We don't want md sched reorder to even see debug isns, so put
2912 them out right away. */
2913 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2914 {
2915 if (control_flow_insn_p (last_scheduled_insn))
2916 {
2917 *target_bb = current_sched_info->advance_target_bb
2918 (*target_bb, 0);
2919
2920 if (sched_verbose)
2921 {
2922 rtx x;
2923
2924 x = next_real_insn (last_scheduled_insn);
2925 gcc_assert (x);
2926 dump_new_block_header (1, *target_bb, x, tail);
2927 }
2928
2929 last_scheduled_insn = bb_note (*target_bb);
2930 }
2931
2932 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2933 {
2934 rtx insn = ready_remove_first (&ready);
2935 gcc_assert (DEBUG_INSN_P (insn));
2936 (*current_sched_info->begin_schedule_ready) (insn,
2937 last_scheduled_insn);
2938 move_insn (insn, last_scheduled_insn,
2939 current_sched_info->next_tail);
2940 last_scheduled_insn = insn;
2941 advance = schedule_insn (insn);
2942 gcc_assert (advance == 0);
2943 if (ready.n_ready > 0)
2944 ready_sort (&ready);
2945 }
2946
2947 if (!ready.n_ready)
2948 continue;
2949 }
2950
2951 /* Allow the target to reorder the list, typically for
2952 better instruction bundling. */
2953 if (sort_p && targetm.sched.reorder
2954 && (ready.n_ready == 0
2955 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2956 can_issue_more =
2957 targetm.sched.reorder (sched_dump, sched_verbose,
2958 ready_lastpos (&ready),
2959 &ready.n_ready, clock_var);
2960 else
2961 can_issue_more = issue_rate;
2962
2963 first_cycle_insn_p = 1;
2964 cycle_issued_insns = 0;
2965 for (;;)
2966 {
2967 rtx insn;
2968 int cost;
2969 bool asm_p = false;
2970
2971 if (sched_verbose >= 2)
2972 {
2973 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2974 clock_var);
2975 debug_ready_list (&ready);
2976 if (sched_pressure_p)
2977 print_curr_reg_pressure ();
2978 }
2979
2980 if (ready.n_ready == 0
2981 && can_issue_more
2982 && reload_completed)
2983 {
2984 /* Allow scheduling insns directly from the queue in case
2985 there's nothing better to do (ready list is empty) but
2986 there are still vacant dispatch slots in the current cycle. */
2987 if (sched_verbose >= 6)
2988 fprintf (sched_dump,";;\t\tSecond chance\n");
2989 memcpy (temp_state, curr_state, dfa_state_size);
2990 if (early_queue_to_ready (temp_state, &ready))
2991 ready_sort (&ready);
2992 }
2993
2994 if (ready.n_ready == 0
2995 || !can_issue_more
2996 || state_dead_lock_p (curr_state)
2997 || !(*current_sched_info->schedule_more_p) ())
2998 break;
2999
3000 /* Select and remove the insn from the ready list. */
3001 if (sort_p)
3002 {
3003 int res;
3004
3005 insn = NULL_RTX;
3006 res = choose_ready (&ready, &insn);
3007
3008 if (res < 0)
3009 /* Finish cycle. */
3010 break;
3011 if (res > 0)
3012 /* Restart choose_ready (). */
3013 continue;
3014
3015 gcc_assert (insn != NULL_RTX);
3016 }
3017 else
3018 insn = ready_remove_first (&ready);
3019
3020 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3021 {
3022 ready_add (&ready, insn, true);
3023 advance = 1;
3024 break;
3025 }
3026
3027 if (targetm.sched.dfa_new_cycle
3028 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3029 insn, last_clock_var,
3030 clock_var, &sort_p))
3031 /* SORT_P is used by the target to override sorting
3032 of the ready list. This is needed when the target
3033 has modified its internal structures expecting that
3034 the insn will be issued next. As we need the insn
3035 to have the highest priority (so it will be returned by
3036 the ready_remove_first call above), we invoke
3037 ready_add (&ready, insn, true).
3038 But, still, there is one issue: INSN can be later
3039 discarded by scheduler's front end through
3040 current_sched_info->can_schedule_ready_p, hence, won't
3041 be issued next. */
3042 {
3043 ready_add (&ready, insn, true);
3044 break;
3045 }
3046
3047 sort_p = TRUE;
3048 memcpy (temp_state, curr_state, dfa_state_size);
3049 if (recog_memoized (insn) < 0)
3050 {
3051 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3052 || asm_noperands (PATTERN (insn)) >= 0);
3053 if (!first_cycle_insn_p && asm_p)
3054 /* This is asm insn which is tried to be issued on the
3055 cycle not first. Issue it on the next cycle. */
3056 cost = 1;
3057 else
3058 /* A USE insn, or something else we don't need to
3059 understand. We can't pass these directly to
3060 state_transition because it will trigger a
3061 fatal error for unrecognizable insns. */
3062 cost = 0;
3063 }
3064 else if (sched_pressure_p)
3065 cost = 0;
3066 else
3067 {
3068 cost = state_transition (temp_state, insn);
3069 if (cost < 0)
3070 cost = 0;
3071 else if (cost == 0)
3072 cost = 1;
3073 }
3074
3075 if (cost >= 1)
3076 {
3077 queue_insn (insn, cost);
3078 if (SCHED_GROUP_P (insn))
3079 {
3080 advance = cost;
3081 break;
3082 }
3083
3084 continue;
3085 }
3086
3087 if (current_sched_info->can_schedule_ready_p
3088 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3089 /* We normally get here only if we don't want to move
3090 insn from the split block. */
3091 {
3092 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3093 continue;
3094 }
3095
3096 /* DECISION is made. */
3097
3098 if (TODO_SPEC (insn) & SPECULATIVE)
3099 generate_recovery_code (insn);
3100
3101 if (control_flow_insn_p (last_scheduled_insn)
3102 /* This is used to switch basic blocks by request
3103 from scheduler front-end (actually, sched-ebb.c only).
3104 This is used to process blocks with single fallthru
3105 edge. If succeeding block has jump, it [jump] will try
3106 move at the end of current bb, thus corrupting CFG. */
3107 || current_sched_info->advance_target_bb (*target_bb, insn))
3108 {
3109 *target_bb = current_sched_info->advance_target_bb
3110 (*target_bb, 0);
3111
3112 if (sched_verbose)
3113 {
3114 rtx x;
3115
3116 x = next_real_insn (last_scheduled_insn);
3117 gcc_assert (x);
3118 dump_new_block_header (1, *target_bb, x, tail);
3119 }
3120
3121 last_scheduled_insn = bb_note (*target_bb);
3122 }
3123
3124 /* Update counters, etc in the scheduler's front end. */
3125 (*current_sched_info->begin_schedule_ready) (insn,
3126 last_scheduled_insn);
3127
3128 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3129
3130 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3131 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3132
3133 reemit_notes (insn);
3134 last_scheduled_insn = insn;
3135
3136 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3137 {
3138 cycle_issued_insns++;
3139 memcpy (curr_state, temp_state, dfa_state_size);
3140 }
3141
3142 if (targetm.sched.variable_issue)
3143 can_issue_more =
3144 targetm.sched.variable_issue (sched_dump, sched_verbose,
3145 insn, can_issue_more);
3146 /* A naked CLOBBER or USE generates no instruction, so do
3147 not count them against the issue rate. */
3148 else if (GET_CODE (PATTERN (insn)) != USE
3149 && GET_CODE (PATTERN (insn)) != CLOBBER)
3150 can_issue_more--;
3151 advance = schedule_insn (insn);
3152
3153 /* After issuing an asm insn we should start a new cycle. */
3154 if (advance == 0 && asm_p)
3155 advance = 1;
3156 if (advance != 0)
3157 break;
3158
3159 first_cycle_insn_p = 0;
3160
3161 /* Sort the ready list based on priority. This must be
3162 redone here, as schedule_insn may have readied additional
3163 insns that will not be sorted correctly. */
3164 if (ready.n_ready > 0)
3165 ready_sort (&ready);
3166
3167 /* Quickly go through debug insns such that md sched
3168 reorder2 doesn't have to deal with debug insns. */
3169 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3170 && (*current_sched_info->schedule_more_p) ())
3171 {
3172 if (control_flow_insn_p (last_scheduled_insn))
3173 {
3174 *target_bb = current_sched_info->advance_target_bb
3175 (*target_bb, 0);
3176
3177 if (sched_verbose)
3178 {
3179 rtx x;
3180
3181 x = next_real_insn (last_scheduled_insn);
3182 gcc_assert (x);
3183 dump_new_block_header (1, *target_bb, x, tail);
3184 }
3185
3186 last_scheduled_insn = bb_note (*target_bb);
3187 }
3188
3189 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3190 {
3191 insn = ready_remove_first (&ready);
3192 gcc_assert (DEBUG_INSN_P (insn));
3193 (*current_sched_info->begin_schedule_ready)
3194 (insn, last_scheduled_insn);
3195 move_insn (insn, last_scheduled_insn,
3196 current_sched_info->next_tail);
3197 advance = schedule_insn (insn);
3198 last_scheduled_insn = insn;
3199 gcc_assert (advance == 0);
3200 if (ready.n_ready > 0)
3201 ready_sort (&ready);
3202 }
3203 }
3204
3205 if (targetm.sched.reorder2
3206 && (ready.n_ready == 0
3207 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3208 {
3209 can_issue_more =
3210 targetm.sched.reorder2 (sched_dump, sched_verbose,
3211 ready.n_ready
3212 ? ready_lastpos (&ready) : NULL,
3213 &ready.n_ready, clock_var);
3214 }
3215 }
3216 }
3217
3218 /* Debug info. */
3219 if (sched_verbose)
3220 {
3221 fprintf (sched_dump, ";;\tReady list (final): ");
3222 debug_ready_list (&ready);
3223 }
3224
3225 if (current_sched_info->queue_must_finish_empty)
3226 /* Sanity check -- queue must be empty now. Meaningless if region has
3227 multiple bbs. */
3228 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3229 else
3230 {
3231 /* We must maintain QUEUE_INDEX between blocks in region. */
3232 for (i = ready.n_ready - 1; i >= 0; i--)
3233 {
3234 rtx x;
3235
3236 x = ready_element (&ready, i);
3237 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3238 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3239 }
3240
3241 if (q_size)
3242 for (i = 0; i <= max_insn_queue_index; i++)
3243 {
3244 rtx link;
3245 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3246 {
3247 rtx x;
3248
3249 x = XEXP (link, 0);
3250 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3251 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3252 }
3253 free_INSN_LIST_list (&insn_queue[i]);
3254 }
3255 }
3256
3257 if (sched_verbose)
3258 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3259
3260 if (!current_sched_info->queue_must_finish_empty
3261 || haifa_recovery_bb_recently_added_p)
3262 {
3263 /* INSN_TICK (minimum clock tick at which the insn becomes
3264 ready) may be not correct for the insn in the subsequent
3265 blocks of the region. We should use a correct value of
3266 `clock_var' or modify INSN_TICK. It is better to keep
3267 clock_var value equal to 0 at the start of a basic block.
3268 Therefore we modify INSN_TICK here. */
3269 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3270 }
3271
3272 if (targetm.sched.finish)
3273 {
3274 targetm.sched.finish (sched_dump, sched_verbose);
3275 /* Target might have added some instructions to the scheduled block
3276 in its md_finish () hook. These new insns don't have any data
3277 initialized and to identify them we extend h_i_d so that they'll
3278 get zero luids. */
3279 sched_init_luids (NULL, NULL, NULL, NULL);
3280 }
3281
3282 if (sched_verbose)
3283 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3284 INSN_UID (head), INSN_UID (tail));
3285
3286 /* Update head/tail boundaries. */
3287 head = NEXT_INSN (prev_head);
3288 tail = last_scheduled_insn;
3289
3290 head = restore_other_notes (head, NULL);
3291
3292 current_sched_info->head = head;
3293 current_sched_info->tail = tail;
3294 }
3295 \f
3296 /* Set_priorities: compute priority of each insn in the block. */
3297
3298 int
3299 set_priorities (rtx head, rtx tail)
3300 {
3301 rtx insn;
3302 int n_insn;
3303 int sched_max_insns_priority =
3304 current_sched_info->sched_max_insns_priority;
3305 rtx prev_head;
3306
3307 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3308 gcc_unreachable ();
3309
3310 n_insn = 0;
3311
3312 prev_head = PREV_INSN (head);
3313 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3314 {
3315 if (!INSN_P (insn))
3316 continue;
3317
3318 n_insn++;
3319 (void) priority (insn);
3320
3321 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3322
3323 sched_max_insns_priority = MAX (sched_max_insns_priority,
3324 INSN_PRIORITY (insn));
3325 }
3326
3327 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3328
3329 return n_insn;
3330 }
3331
3332 /* Set dump and sched_verbose for the desired debugging output. If no
3333 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3334 For -fsched-verbose=N, N>=10, print everything to stderr. */
3335 void
3336 setup_sched_dump (void)
3337 {
3338 sched_verbose = sched_verbose_param;
3339 if (sched_verbose_param == 0 && dump_file)
3340 sched_verbose = 1;
3341 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3342 ? stderr : dump_file);
3343 }
3344
3345 /* Initialize some global state for the scheduler. This function works
3346 with the common data shared between all the schedulers. It is called
3347 from the scheduler specific initialization routine. */
3348
3349 void
3350 sched_init (void)
3351 {
3352 /* Disable speculative loads in their presence if cc0 defined. */
3353 #ifdef HAVE_cc0
3354 flag_schedule_speculative_load = 0;
3355 #endif
3356
3357 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3358 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3359
3360 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3361 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3362
3363 if (sched_pressure_p)
3364 ira_setup_eliminable_regset ();
3365
3366 /* Initialize SPEC_INFO. */
3367 if (targetm.sched.set_sched_flags)
3368 {
3369 spec_info = &spec_info_var;
3370 targetm.sched.set_sched_flags (spec_info);
3371
3372 if (spec_info->mask != 0)
3373 {
3374 spec_info->data_weakness_cutoff =
3375 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3376 spec_info->control_weakness_cutoff =
3377 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3378 * REG_BR_PROB_BASE) / 100;
3379 }
3380 else
3381 /* So we won't read anything accidentally. */
3382 spec_info = NULL;
3383
3384 }
3385 else
3386 /* So we won't read anything accidentally. */
3387 spec_info = 0;
3388
3389 /* Initialize issue_rate. */
3390 if (targetm.sched.issue_rate)
3391 issue_rate = targetm.sched.issue_rate ();
3392 else
3393 issue_rate = 1;
3394
3395 if (cached_issue_rate != issue_rate)
3396 {
3397 cached_issue_rate = issue_rate;
3398 /* To invalidate max_lookahead_tries: */
3399 cached_first_cycle_multipass_dfa_lookahead = 0;
3400 }
3401
3402 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3403 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3404 else
3405 dfa_lookahead = 0;
3406
3407 if (targetm.sched.init_dfa_pre_cycle_insn)
3408 targetm.sched.init_dfa_pre_cycle_insn ();
3409
3410 if (targetm.sched.init_dfa_post_cycle_insn)
3411 targetm.sched.init_dfa_post_cycle_insn ();
3412
3413 dfa_start ();
3414 dfa_state_size = state_size ();
3415
3416 init_alias_analysis ();
3417
3418 df_set_flags (DF_LR_RUN_DCE);
3419 df_note_add_problem ();
3420
3421 /* More problems needed for interloop dep calculation in SMS. */
3422 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3423 {
3424 df_rd_add_problem ();
3425 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3426 }
3427
3428 df_analyze ();
3429
3430 /* Do not run DCE after reload, as this can kill nops inserted
3431 by bundling. */
3432 if (reload_completed)
3433 df_clear_flags (DF_LR_RUN_DCE);
3434
3435 regstat_compute_calls_crossed ();
3436
3437 if (targetm.sched.init_global)
3438 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3439
3440 if (sched_pressure_p)
3441 {
3442 int i, max_regno = max_reg_num ();
3443
3444 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3445 sched_regno_cover_class
3446 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3447 for (i = 0; i < max_regno; i++)
3448 sched_regno_cover_class[i]
3449 = (i < FIRST_PSEUDO_REGISTER
3450 ? ira_class_translate[REGNO_REG_CLASS (i)]
3451 : reg_cover_class (i));
3452 curr_reg_live = BITMAP_ALLOC (NULL);
3453 saved_reg_live = BITMAP_ALLOC (NULL);
3454 region_ref_regs = BITMAP_ALLOC (NULL);
3455 }
3456
3457 curr_state = xmalloc (dfa_state_size);
3458 }
3459
3460 static void haifa_init_only_bb (basic_block, basic_block);
3461
3462 /* Initialize data structures specific to the Haifa scheduler. */
3463 void
3464 haifa_sched_init (void)
3465 {
3466 setup_sched_dump ();
3467 sched_init ();
3468
3469 if (spec_info != NULL)
3470 {
3471 sched_deps_info->use_deps_list = 1;
3472 sched_deps_info->generate_spec_deps = 1;
3473 }
3474
3475 /* Initialize luids, dependency caches, target and h_i_d for the
3476 whole function. */
3477 {
3478 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3479 basic_block bb;
3480
3481 sched_init_bbs ();
3482
3483 FOR_EACH_BB (bb)
3484 VEC_quick_push (basic_block, bbs, bb);
3485 sched_init_luids (bbs, NULL, NULL, NULL);
3486 sched_deps_init (true);
3487 sched_extend_target ();
3488 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3489
3490 VEC_free (basic_block, heap, bbs);
3491 }
3492
3493 sched_init_only_bb = haifa_init_only_bb;
3494 sched_split_block = sched_split_block_1;
3495 sched_create_empty_bb = sched_create_empty_bb_1;
3496 haifa_recovery_bb_ever_added_p = false;
3497
3498 #ifdef ENABLE_CHECKING
3499 /* This is used preferably for finding bugs in check_cfg () itself.
3500 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3501 assumes that the last insn in the last bb has a non-null successor. */
3502 check_cfg (0, 0);
3503 #endif
3504
3505 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3506 before_recovery = 0;
3507 after_recovery = 0;
3508 }
3509
3510 /* Finish work with the data specific to the Haifa scheduler. */
3511 void
3512 haifa_sched_finish (void)
3513 {
3514 sched_create_empty_bb = NULL;
3515 sched_split_block = NULL;
3516 sched_init_only_bb = NULL;
3517
3518 if (spec_info && spec_info->dump)
3519 {
3520 char c = reload_completed ? 'a' : 'b';
3521
3522 fprintf (spec_info->dump,
3523 ";; %s:\n", current_function_name ());
3524
3525 fprintf (spec_info->dump,
3526 ";; Procedure %cr-begin-data-spec motions == %d\n",
3527 c, nr_begin_data);
3528 fprintf (spec_info->dump,
3529 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3530 c, nr_be_in_data);
3531 fprintf (spec_info->dump,
3532 ";; Procedure %cr-begin-control-spec motions == %d\n",
3533 c, nr_begin_control);
3534 fprintf (spec_info->dump,
3535 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3536 c, nr_be_in_control);
3537 }
3538
3539 /* Finalize h_i_d, dependency caches, and luids for the whole
3540 function. Target will be finalized in md_global_finish (). */
3541 sched_deps_finish ();
3542 sched_finish_luids ();
3543 current_sched_info = NULL;
3544 sched_finish ();
3545 }
3546
3547 /* Free global data used during insn scheduling. This function works with
3548 the common data shared between the schedulers. */
3549
3550 void
3551 sched_finish (void)
3552 {
3553 haifa_finish_h_i_d ();
3554 if (sched_pressure_p)
3555 {
3556 free (sched_regno_cover_class);
3557 BITMAP_FREE (region_ref_regs);
3558 BITMAP_FREE (saved_reg_live);
3559 BITMAP_FREE (curr_reg_live);
3560 }
3561 free (curr_state);
3562
3563 if (targetm.sched.finish_global)
3564 targetm.sched.finish_global (sched_dump, sched_verbose);
3565
3566 end_alias_analysis ();
3567
3568 regstat_free_calls_crossed ();
3569
3570 dfa_finish ();
3571
3572 #ifdef ENABLE_CHECKING
3573 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3574 if (!reload_completed)
3575 check_cfg (0, 0);
3576 #endif
3577 }
3578
3579 /* Fix INSN_TICKs of the instructions in the current block as well as
3580 INSN_TICKs of their dependents.
3581 HEAD and TAIL are the begin and the end of the current scheduled block. */
3582 static void
3583 fix_inter_tick (rtx head, rtx tail)
3584 {
3585 /* Set of instructions with corrected INSN_TICK. */
3586 bitmap_head processed;
3587 /* ??? It is doubtful if we should assume that cycle advance happens on
3588 basic block boundaries. Basically insns that are unconditionally ready
3589 on the start of the block are more preferable then those which have
3590 a one cycle dependency over insn from the previous block. */
3591 int next_clock = clock_var + 1;
3592
3593 bitmap_initialize (&processed, 0);
3594
3595 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3596 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3597 across different blocks. */
3598 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3599 {
3600 if (INSN_P (head))
3601 {
3602 int tick;
3603 sd_iterator_def sd_it;
3604 dep_t dep;
3605
3606 tick = INSN_TICK (head);
3607 gcc_assert (tick >= MIN_TICK);
3608
3609 /* Fix INSN_TICK of instruction from just scheduled block. */
3610 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3611 {
3612 tick -= next_clock;
3613
3614 if (tick < MIN_TICK)
3615 tick = MIN_TICK;
3616
3617 INSN_TICK (head) = tick;
3618 }
3619
3620 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3621 {
3622 rtx next;
3623
3624 next = DEP_CON (dep);
3625 tick = INSN_TICK (next);
3626
3627 if (tick != INVALID_TICK
3628 /* If NEXT has its INSN_TICK calculated, fix it.
3629 If not - it will be properly calculated from
3630 scratch later in fix_tick_ready. */
3631 && bitmap_set_bit (&processed, INSN_LUID (next)))
3632 {
3633 tick -= next_clock;
3634
3635 if (tick < MIN_TICK)
3636 tick = MIN_TICK;
3637
3638 if (tick > INTER_TICK (next))
3639 INTER_TICK (next) = tick;
3640 else
3641 tick = INTER_TICK (next);
3642
3643 INSN_TICK (next) = tick;
3644 }
3645 }
3646 }
3647 }
3648 bitmap_clear (&processed);
3649 }
3650
3651 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3652
3653 /* Check if NEXT is ready to be added to the ready or queue list.
3654 If "yes", add it to the proper list.
3655 Returns:
3656 -1 - is not ready yet,
3657 0 - added to the ready list,
3658 0 < N - queued for N cycles. */
3659 int
3660 try_ready (rtx next)
3661 {
3662 ds_t old_ts, *ts;
3663
3664 ts = &TODO_SPEC (next);
3665 old_ts = *ts;
3666
3667 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3668 && ((old_ts & HARD_DEP)
3669 || (old_ts & SPECULATIVE)));
3670
3671 if (sd_lists_empty_p (next, SD_LIST_BACK))
3672 /* NEXT has all its dependencies resolved. */
3673 {
3674 /* Remove HARD_DEP bit from NEXT's status. */
3675 *ts &= ~HARD_DEP;
3676
3677 if (current_sched_info->flags & DO_SPECULATION)
3678 /* Remove all speculative bits from NEXT's status. */
3679 *ts &= ~SPECULATIVE;
3680 }
3681 else
3682 {
3683 /* One of the NEXT's dependencies has been resolved.
3684 Recalculate NEXT's status. */
3685
3686 *ts &= ~SPECULATIVE & ~HARD_DEP;
3687
3688 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3689 /* Now we've got NEXT with speculative deps only.
3690 1. Look at the deps to see what we have to do.
3691 2. Check if we can do 'todo'. */
3692 {
3693 sd_iterator_def sd_it;
3694 dep_t dep;
3695 bool first_p = true;
3696
3697 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3698 {
3699 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3700
3701 if (DEBUG_INSN_P (DEP_PRO (dep))
3702 && !DEBUG_INSN_P (next))
3703 continue;
3704
3705 if (first_p)
3706 {
3707 first_p = false;
3708
3709 *ts = ds;
3710 }
3711 else
3712 *ts = ds_merge (*ts, ds);
3713 }
3714
3715 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3716 /* Too few points. */
3717 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3718 }
3719 else
3720 *ts |= HARD_DEP;
3721 }
3722
3723 if (*ts & HARD_DEP)
3724 gcc_assert (*ts == old_ts
3725 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3726 else if (current_sched_info->new_ready)
3727 *ts = current_sched_info->new_ready (next, *ts);
3728
3729 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3730 have its original pattern or changed (speculative) one. This is due
3731 to changing ebb in region scheduling.
3732 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3733 has speculative pattern.
3734
3735 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3736 control-speculative NEXT could have been discarded by sched-rgn.c
3737 (the same case as when discarded by can_schedule_ready_p ()). */
3738
3739 if ((*ts & SPECULATIVE)
3740 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3741 need to change anything. */
3742 && *ts != old_ts)
3743 {
3744 int res;
3745 rtx new_pat;
3746
3747 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3748
3749 res = haifa_speculate_insn (next, *ts, &new_pat);
3750
3751 switch (res)
3752 {
3753 case -1:
3754 /* It would be nice to change DEP_STATUS of all dependences,
3755 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3756 so we won't reanalyze anything. */
3757 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3758 break;
3759
3760 case 0:
3761 /* We follow the rule, that every speculative insn
3762 has non-null ORIG_PAT. */
3763 if (!ORIG_PAT (next))
3764 ORIG_PAT (next) = PATTERN (next);
3765 break;
3766
3767 case 1:
3768 if (!ORIG_PAT (next))
3769 /* If we gonna to overwrite the original pattern of insn,
3770 save it. */
3771 ORIG_PAT (next) = PATTERN (next);
3772
3773 haifa_change_pattern (next, new_pat);
3774 break;
3775
3776 default:
3777 gcc_unreachable ();
3778 }
3779 }
3780
3781 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3782 either correct (*ts & SPECULATIVE),
3783 or we simply don't care (*ts & HARD_DEP). */
3784
3785 gcc_assert (!ORIG_PAT (next)
3786 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3787
3788 if (*ts & HARD_DEP)
3789 {
3790 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3791 control-speculative NEXT could have been discarded by sched-rgn.c
3792 (the same case as when discarded by can_schedule_ready_p ()). */
3793 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3794
3795 change_queue_index (next, QUEUE_NOWHERE);
3796 return -1;
3797 }
3798 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3799 /* We should change pattern of every previously speculative
3800 instruction - and we determine if NEXT was speculative by using
3801 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3802 pat too, so skip them. */
3803 {
3804 haifa_change_pattern (next, ORIG_PAT (next));
3805 ORIG_PAT (next) = 0;
3806 }
3807
3808 if (sched_verbose >= 2)
3809 {
3810 int s = TODO_SPEC (next);
3811
3812 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3813 (*current_sched_info->print_insn) (next, 0));
3814
3815 if (spec_info && spec_info->dump)
3816 {
3817 if (s & BEGIN_DATA)
3818 fprintf (spec_info->dump, "; data-spec;");
3819 if (s & BEGIN_CONTROL)
3820 fprintf (spec_info->dump, "; control-spec;");
3821 if (s & BE_IN_CONTROL)
3822 fprintf (spec_info->dump, "; in-control-spec;");
3823 }
3824
3825 fprintf (sched_dump, "\n");
3826 }
3827
3828 adjust_priority (next);
3829
3830 return fix_tick_ready (next);
3831 }
3832
3833 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3834 static int
3835 fix_tick_ready (rtx next)
3836 {
3837 int tick, delay;
3838
3839 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3840 {
3841 int full_p;
3842 sd_iterator_def sd_it;
3843 dep_t dep;
3844
3845 tick = INSN_TICK (next);
3846 /* if tick is not equal to INVALID_TICK, then update
3847 INSN_TICK of NEXT with the most recent resolved dependence
3848 cost. Otherwise, recalculate from scratch. */
3849 full_p = (tick == INVALID_TICK);
3850
3851 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3852 {
3853 rtx pro = DEP_PRO (dep);
3854 int tick1;
3855
3856 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3857
3858 tick1 = INSN_TICK (pro) + dep_cost (dep);
3859 if (tick1 > tick)
3860 tick = tick1;
3861
3862 if (!full_p)
3863 break;
3864 }
3865 }
3866 else
3867 tick = -1;
3868
3869 INSN_TICK (next) = tick;
3870
3871 delay = tick - clock_var;
3872 if (delay <= 0 || sched_pressure_p)
3873 delay = QUEUE_READY;
3874
3875 change_queue_index (next, delay);
3876
3877 return delay;
3878 }
3879
3880 /* Move NEXT to the proper queue list with (DELAY >= 1),
3881 or add it to the ready list (DELAY == QUEUE_READY),
3882 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3883 static void
3884 change_queue_index (rtx next, int delay)
3885 {
3886 int i = QUEUE_INDEX (next);
3887
3888 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3889 && delay != 0);
3890 gcc_assert (i != QUEUE_SCHEDULED);
3891
3892 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3893 || (delay < 0 && delay == i))
3894 /* We have nothing to do. */
3895 return;
3896
3897 /* Remove NEXT from wherever it is now. */
3898 if (i == QUEUE_READY)
3899 ready_remove_insn (next);
3900 else if (i >= 0)
3901 queue_remove (next);
3902
3903 /* Add it to the proper place. */
3904 if (delay == QUEUE_READY)
3905 ready_add (readyp, next, false);
3906 else if (delay >= 1)
3907 queue_insn (next, delay);
3908
3909 if (sched_verbose >= 2)
3910 {
3911 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3912 (*current_sched_info->print_insn) (next, 0));
3913
3914 if (delay == QUEUE_READY)
3915 fprintf (sched_dump, " into ready\n");
3916 else if (delay >= 1)
3917 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3918 else
3919 fprintf (sched_dump, " removed from ready or queue lists\n");
3920 }
3921 }
3922
3923 static int sched_ready_n_insns = -1;
3924
3925 /* Initialize per region data structures. */
3926 void
3927 sched_extend_ready_list (int new_sched_ready_n_insns)
3928 {
3929 int i;
3930
3931 if (sched_ready_n_insns == -1)
3932 /* At the first call we need to initialize one more choice_stack
3933 entry. */
3934 {
3935 i = 0;
3936 sched_ready_n_insns = 0;
3937 }
3938 else
3939 i = sched_ready_n_insns + 1;
3940
3941 ready.veclen = new_sched_ready_n_insns + issue_rate;
3942 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3943
3944 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3945
3946 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3947 sched_ready_n_insns, sizeof (*ready_try));
3948
3949 /* We allocate +1 element to save initial state in the choice_stack[0]
3950 entry. */
3951 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3952 new_sched_ready_n_insns + 1);
3953
3954 for (; i <= new_sched_ready_n_insns; i++)
3955 choice_stack[i].state = xmalloc (dfa_state_size);
3956
3957 sched_ready_n_insns = new_sched_ready_n_insns;
3958 }
3959
3960 /* Free per region data structures. */
3961 void
3962 sched_finish_ready_list (void)
3963 {
3964 int i;
3965
3966 free (ready.vec);
3967 ready.vec = NULL;
3968 ready.veclen = 0;
3969
3970 free (ready_try);
3971 ready_try = NULL;
3972
3973 for (i = 0; i <= sched_ready_n_insns; i++)
3974 free (choice_stack [i].state);
3975 free (choice_stack);
3976 choice_stack = NULL;
3977
3978 sched_ready_n_insns = -1;
3979 }
3980
3981 static int
3982 haifa_luid_for_non_insn (rtx x)
3983 {
3984 gcc_assert (NOTE_P (x) || LABEL_P (x));
3985
3986 return 0;
3987 }
3988
3989 /* Generates recovery code for INSN. */
3990 static void
3991 generate_recovery_code (rtx insn)
3992 {
3993 if (TODO_SPEC (insn) & BEGIN_SPEC)
3994 begin_speculative_block (insn);
3995
3996 /* Here we have insn with no dependencies to
3997 instructions other then CHECK_SPEC ones. */
3998
3999 if (TODO_SPEC (insn) & BE_IN_SPEC)
4000 add_to_speculative_block (insn);
4001 }
4002
4003 /* Helper function.
4004 Tries to add speculative dependencies of type FS between instructions
4005 in deps_list L and TWIN. */
4006 static void
4007 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4008 {
4009 sd_iterator_def sd_it;
4010 dep_t dep;
4011
4012 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4013 {
4014 ds_t ds;
4015 rtx consumer;
4016
4017 consumer = DEP_CON (dep);
4018
4019 ds = DEP_STATUS (dep);
4020
4021 if (/* If we want to create speculative dep. */
4022 fs
4023 /* And we can do that because this is a true dep. */
4024 && (ds & DEP_TYPES) == DEP_TRUE)
4025 {
4026 gcc_assert (!(ds & BE_IN_SPEC));
4027
4028 if (/* If this dep can be overcome with 'begin speculation'. */
4029 ds & BEGIN_SPEC)
4030 /* Then we have a choice: keep the dep 'begin speculative'
4031 or transform it into 'be in speculative'. */
4032 {
4033 if (/* In try_ready we assert that if insn once became ready
4034 it can be removed from the ready (or queue) list only
4035 due to backend decision. Hence we can't let the
4036 probability of the speculative dep to decrease. */
4037 ds_weak (ds) <= ds_weak (fs))
4038 {
4039 ds_t new_ds;
4040
4041 new_ds = (ds & ~BEGIN_SPEC) | fs;
4042
4043 if (/* consumer can 'be in speculative'. */
4044 sched_insn_is_legitimate_for_speculation_p (consumer,
4045 new_ds))
4046 /* Transform it to be in speculative. */
4047 ds = new_ds;
4048 }
4049 }
4050 else
4051 /* Mark the dep as 'be in speculative'. */
4052 ds |= fs;
4053 }
4054
4055 {
4056 dep_def _new_dep, *new_dep = &_new_dep;
4057
4058 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4059 sd_add_dep (new_dep, false);
4060 }
4061 }
4062 }
4063
4064 /* Generates recovery code for BEGIN speculative INSN. */
4065 static void
4066 begin_speculative_block (rtx insn)
4067 {
4068 if (TODO_SPEC (insn) & BEGIN_DATA)
4069 nr_begin_data++;
4070 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4071 nr_begin_control++;
4072
4073 create_check_block_twin (insn, false);
4074
4075 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4076 }
4077
4078 static void haifa_init_insn (rtx);
4079
4080 /* Generates recovery code for BE_IN speculative INSN. */
4081 static void
4082 add_to_speculative_block (rtx insn)
4083 {
4084 ds_t ts;
4085 sd_iterator_def sd_it;
4086 dep_t dep;
4087 rtx twins = NULL;
4088 rtx_vec_t priorities_roots;
4089
4090 ts = TODO_SPEC (insn);
4091 gcc_assert (!(ts & ~BE_IN_SPEC));
4092
4093 if (ts & BE_IN_DATA)
4094 nr_be_in_data++;
4095 if (ts & BE_IN_CONTROL)
4096 nr_be_in_control++;
4097
4098 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4099 gcc_assert (!TODO_SPEC (insn));
4100
4101 DONE_SPEC (insn) |= ts;
4102
4103 /* First we convert all simple checks to branchy. */
4104 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4105 sd_iterator_cond (&sd_it, &dep);)
4106 {
4107 rtx check = DEP_PRO (dep);
4108
4109 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4110 {
4111 create_check_block_twin (check, true);
4112
4113 /* Restart search. */
4114 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4115 }
4116 else
4117 /* Continue search. */
4118 sd_iterator_next (&sd_it);
4119 }
4120
4121 priorities_roots = NULL;
4122 clear_priorities (insn, &priorities_roots);
4123
4124 while (1)
4125 {
4126 rtx check, twin;
4127 basic_block rec;
4128
4129 /* Get the first backward dependency of INSN. */
4130 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4131 if (!sd_iterator_cond (&sd_it, &dep))
4132 /* INSN has no backward dependencies left. */
4133 break;
4134
4135 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4136 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4137 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4138
4139 check = DEP_PRO (dep);
4140
4141 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4142 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4143
4144 rec = BLOCK_FOR_INSN (check);
4145
4146 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4147 haifa_init_insn (twin);
4148
4149 sd_copy_back_deps (twin, insn, true);
4150
4151 if (sched_verbose && spec_info->dump)
4152 /* INSN_BB (insn) isn't determined for twin insns yet.
4153 So we can't use current_sched_info->print_insn. */
4154 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4155 INSN_UID (twin), rec->index);
4156
4157 twins = alloc_INSN_LIST (twin, twins);
4158
4159 /* Add dependences between TWIN and all appropriate
4160 instructions from REC. */
4161 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4162 {
4163 rtx pro = DEP_PRO (dep);
4164
4165 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4166
4167 /* INSN might have dependencies from the instructions from
4168 several recovery blocks. At this iteration we process those
4169 producers that reside in REC. */
4170 if (BLOCK_FOR_INSN (pro) == rec)
4171 {
4172 dep_def _new_dep, *new_dep = &_new_dep;
4173
4174 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4175 sd_add_dep (new_dep, false);
4176 }
4177 }
4178
4179 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4180
4181 /* Remove all dependencies between INSN and insns in REC. */
4182 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4183 sd_iterator_cond (&sd_it, &dep);)
4184 {
4185 rtx pro = DEP_PRO (dep);
4186
4187 if (BLOCK_FOR_INSN (pro) == rec)
4188 sd_delete_dep (sd_it);
4189 else
4190 sd_iterator_next (&sd_it);
4191 }
4192 }
4193
4194 /* We couldn't have added the dependencies between INSN and TWINS earlier
4195 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4196 while (twins)
4197 {
4198 rtx twin;
4199
4200 twin = XEXP (twins, 0);
4201
4202 {
4203 dep_def _new_dep, *new_dep = &_new_dep;
4204
4205 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4206 sd_add_dep (new_dep, false);
4207 }
4208
4209 twin = XEXP (twins, 1);
4210 free_INSN_LIST_node (twins);
4211 twins = twin;
4212 }
4213
4214 calc_priorities (priorities_roots);
4215 VEC_free (rtx, heap, priorities_roots);
4216 }
4217
4218 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4219 void *
4220 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4221 {
4222 gcc_assert (new_nmemb >= old_nmemb);
4223 p = XRESIZEVAR (void, p, new_nmemb * size);
4224 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4225 return p;
4226 }
4227
4228 /* Helper function.
4229 Find fallthru edge from PRED. */
4230 edge
4231 find_fallthru_edge_from (basic_block pred)
4232 {
4233 edge e;
4234 basic_block succ;
4235
4236 succ = pred->next_bb;
4237 gcc_assert (succ->prev_bb == pred);
4238
4239 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4240 {
4241 e = find_fallthru_edge (pred->succs);
4242
4243 if (e)
4244 {
4245 gcc_assert (e->dest == succ);
4246 return e;
4247 }
4248 }
4249 else
4250 {
4251 e = find_fallthru_edge (succ->preds);
4252
4253 if (e)
4254 {
4255 gcc_assert (e->src == pred);
4256 return e;
4257 }
4258 }
4259
4260 return NULL;
4261 }
4262
4263 /* Extend per basic block data structures. */
4264 static void
4265 sched_extend_bb (void)
4266 {
4267 rtx insn;
4268
4269 /* The following is done to keep current_sched_info->next_tail non null. */
4270 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4271 if (NEXT_INSN (insn) == 0
4272 || (!NOTE_P (insn)
4273 && !LABEL_P (insn)
4274 /* Don't emit a NOTE if it would end up before a BARRIER. */
4275 && !BARRIER_P (NEXT_INSN (insn))))
4276 {
4277 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4278 /* Make insn appear outside BB. */
4279 set_block_for_insn (note, NULL);
4280 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4281 }
4282 }
4283
4284 /* Init per basic block data structures. */
4285 void
4286 sched_init_bbs (void)
4287 {
4288 sched_extend_bb ();
4289 }
4290
4291 /* Initialize BEFORE_RECOVERY variable. */
4292 static void
4293 init_before_recovery (basic_block *before_recovery_ptr)
4294 {
4295 basic_block last;
4296 edge e;
4297
4298 last = EXIT_BLOCK_PTR->prev_bb;
4299 e = find_fallthru_edge_from (last);
4300
4301 if (e)
4302 {
4303 /* We create two basic blocks:
4304 1. Single instruction block is inserted right after E->SRC
4305 and has jump to
4306 2. Empty block right before EXIT_BLOCK.
4307 Between these two blocks recovery blocks will be emitted. */
4308
4309 basic_block single, empty;
4310 rtx x, label;
4311
4312 /* If the fallthrough edge to exit we've found is from the block we've
4313 created before, don't do anything more. */
4314 if (last == after_recovery)
4315 return;
4316
4317 adding_bb_to_current_region_p = false;
4318
4319 single = sched_create_empty_bb (last);
4320 empty = sched_create_empty_bb (single);
4321
4322 /* Add new blocks to the root loop. */
4323 if (current_loops != NULL)
4324 {
4325 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4326 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4327 }
4328
4329 single->count = last->count;
4330 empty->count = last->count;
4331 single->frequency = last->frequency;
4332 empty->frequency = last->frequency;
4333 BB_COPY_PARTITION (single, last);
4334 BB_COPY_PARTITION (empty, last);
4335
4336 redirect_edge_succ (e, single);
4337 make_single_succ_edge (single, empty, 0);
4338 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4339 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4340
4341 label = block_label (empty);
4342 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4343 JUMP_LABEL (x) = label;
4344 LABEL_NUSES (label)++;
4345 haifa_init_insn (x);
4346
4347 emit_barrier_after (x);
4348
4349 sched_init_only_bb (empty, NULL);
4350 sched_init_only_bb (single, NULL);
4351 sched_extend_bb ();
4352
4353 adding_bb_to_current_region_p = true;
4354 before_recovery = single;
4355 after_recovery = empty;
4356
4357 if (before_recovery_ptr)
4358 *before_recovery_ptr = before_recovery;
4359
4360 if (sched_verbose >= 2 && spec_info->dump)
4361 fprintf (spec_info->dump,
4362 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4363 last->index, single->index, empty->index);
4364 }
4365 else
4366 before_recovery = last;
4367 }
4368
4369 /* Returns new recovery block. */
4370 basic_block
4371 sched_create_recovery_block (basic_block *before_recovery_ptr)
4372 {
4373 rtx label;
4374 rtx barrier;
4375 basic_block rec;
4376
4377 haifa_recovery_bb_recently_added_p = true;
4378 haifa_recovery_bb_ever_added_p = true;
4379
4380 init_before_recovery (before_recovery_ptr);
4381
4382 barrier = get_last_bb_insn (before_recovery);
4383 gcc_assert (BARRIER_P (barrier));
4384
4385 label = emit_label_after (gen_label_rtx (), barrier);
4386
4387 rec = create_basic_block (label, label, before_recovery);
4388
4389 /* A recovery block always ends with an unconditional jump. */
4390 emit_barrier_after (BB_END (rec));
4391
4392 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4393 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4394
4395 if (sched_verbose && spec_info->dump)
4396 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4397 rec->index);
4398
4399 return rec;
4400 }
4401
4402 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4403 and emit necessary jumps. */
4404 void
4405 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4406 basic_block second_bb)
4407 {
4408 rtx label;
4409 rtx jump;
4410 int edge_flags;
4411
4412 /* This is fixing of incoming edge. */
4413 /* ??? Which other flags should be specified? */
4414 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4415 /* Partition type is the same, if it is "unpartitioned". */
4416 edge_flags = EDGE_CROSSING;
4417 else
4418 edge_flags = 0;
4419
4420 make_edge (first_bb, rec, edge_flags);
4421 label = block_label (second_bb);
4422 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4423 JUMP_LABEL (jump) = label;
4424 LABEL_NUSES (label)++;
4425
4426 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4427 /* Partition type is the same, if it is "unpartitioned". */
4428 {
4429 /* Rewritten from cfgrtl.c. */
4430 if (flag_reorder_blocks_and_partition
4431 && targetm.have_named_sections)
4432 {
4433 /* We don't need the same note for the check because
4434 any_condjump_p (check) == true. */
4435 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4436 }
4437 edge_flags = EDGE_CROSSING;
4438 }
4439 else
4440 edge_flags = 0;
4441
4442 make_single_succ_edge (rec, second_bb, edge_flags);
4443 }
4444
4445 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4446 INSN is a simple check, that should be converted to branchy one. */
4447 static void
4448 create_check_block_twin (rtx insn, bool mutate_p)
4449 {
4450 basic_block rec;
4451 rtx label, check, twin;
4452 ds_t fs;
4453 sd_iterator_def sd_it;
4454 dep_t dep;
4455 dep_def _new_dep, *new_dep = &_new_dep;
4456 ds_t todo_spec;
4457
4458 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4459
4460 if (!mutate_p)
4461 todo_spec = TODO_SPEC (insn);
4462 else
4463 {
4464 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4465 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4466
4467 todo_spec = CHECK_SPEC (insn);
4468 }
4469
4470 todo_spec &= SPECULATIVE;
4471
4472 /* Create recovery block. */
4473 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4474 {
4475 rec = sched_create_recovery_block (NULL);
4476 label = BB_HEAD (rec);
4477 }
4478 else
4479 {
4480 rec = EXIT_BLOCK_PTR;
4481 label = NULL_RTX;
4482 }
4483
4484 /* Emit CHECK. */
4485 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4486
4487 if (rec != EXIT_BLOCK_PTR)
4488 {
4489 /* To have mem_reg alive at the beginning of second_bb,
4490 we emit check BEFORE insn, so insn after splitting
4491 insn will be at the beginning of second_bb, which will
4492 provide us with the correct life information. */
4493 check = emit_jump_insn_before (check, insn);
4494 JUMP_LABEL (check) = label;
4495 LABEL_NUSES (label)++;
4496 }
4497 else
4498 check = emit_insn_before (check, insn);
4499
4500 /* Extend data structures. */
4501 haifa_init_insn (check);
4502
4503 /* CHECK is being added to current region. Extend ready list. */
4504 gcc_assert (sched_ready_n_insns != -1);
4505 sched_extend_ready_list (sched_ready_n_insns + 1);
4506
4507 if (current_sched_info->add_remove_insn)
4508 current_sched_info->add_remove_insn (insn, 0);
4509
4510 RECOVERY_BLOCK (check) = rec;
4511
4512 if (sched_verbose && spec_info->dump)
4513 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4514 (*current_sched_info->print_insn) (check, 0));
4515
4516 gcc_assert (ORIG_PAT (insn));
4517
4518 /* Initialize TWIN (twin is a duplicate of original instruction
4519 in the recovery block). */
4520 if (rec != EXIT_BLOCK_PTR)
4521 {
4522 sd_iterator_def sd_it;
4523 dep_t dep;
4524
4525 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4526 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4527 {
4528 struct _dep _dep2, *dep2 = &_dep2;
4529
4530 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4531
4532 sd_add_dep (dep2, true);
4533 }
4534
4535 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4536 haifa_init_insn (twin);
4537
4538 if (sched_verbose && spec_info->dump)
4539 /* INSN_BB (insn) isn't determined for twin insns yet.
4540 So we can't use current_sched_info->print_insn. */
4541 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4542 INSN_UID (twin), rec->index);
4543 }
4544 else
4545 {
4546 ORIG_PAT (check) = ORIG_PAT (insn);
4547 HAS_INTERNAL_DEP (check) = 1;
4548 twin = check;
4549 /* ??? We probably should change all OUTPUT dependencies to
4550 (TRUE | OUTPUT). */
4551 }
4552
4553 /* Copy all resolved back dependencies of INSN to TWIN. This will
4554 provide correct value for INSN_TICK (TWIN). */
4555 sd_copy_back_deps (twin, insn, true);
4556
4557 if (rec != EXIT_BLOCK_PTR)
4558 /* In case of branchy check, fix CFG. */
4559 {
4560 basic_block first_bb, second_bb;
4561 rtx jump;
4562
4563 first_bb = BLOCK_FOR_INSN (check);
4564 second_bb = sched_split_block (first_bb, check);
4565
4566 sched_create_recovery_edges (first_bb, rec, second_bb);
4567
4568 sched_init_only_bb (second_bb, first_bb);
4569 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4570
4571 jump = BB_END (rec);
4572 haifa_init_insn (jump);
4573 }
4574
4575 /* Move backward dependences from INSN to CHECK and
4576 move forward dependences from INSN to TWIN. */
4577
4578 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4579 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4580 {
4581 rtx pro = DEP_PRO (dep);
4582 ds_t ds;
4583
4584 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4585 check --TRUE--> producer ??? or ANTI ???
4586 twin --TRUE--> producer
4587 twin --ANTI--> check
4588
4589 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4590 check --ANTI--> producer
4591 twin --ANTI--> producer
4592 twin --ANTI--> check
4593
4594 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4595 check ~~TRUE~~> producer
4596 twin ~~TRUE~~> producer
4597 twin --ANTI--> check */
4598
4599 ds = DEP_STATUS (dep);
4600
4601 if (ds & BEGIN_SPEC)
4602 {
4603 gcc_assert (!mutate_p);
4604 ds &= ~BEGIN_SPEC;
4605 }
4606
4607 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4608 sd_add_dep (new_dep, false);
4609
4610 if (rec != EXIT_BLOCK_PTR)
4611 {
4612 DEP_CON (new_dep) = twin;
4613 sd_add_dep (new_dep, false);
4614 }
4615 }
4616
4617 /* Second, remove backward dependencies of INSN. */
4618 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4619 sd_iterator_cond (&sd_it, &dep);)
4620 {
4621 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4622 || mutate_p)
4623 /* We can delete this dep because we overcome it with
4624 BEGIN_SPECULATION. */
4625 sd_delete_dep (sd_it);
4626 else
4627 sd_iterator_next (&sd_it);
4628 }
4629
4630 /* Future Speculations. Determine what BE_IN speculations will be like. */
4631 fs = 0;
4632
4633 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4634 here. */
4635
4636 gcc_assert (!DONE_SPEC (insn));
4637
4638 if (!mutate_p)
4639 {
4640 ds_t ts = TODO_SPEC (insn);
4641
4642 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4643 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4644
4645 /* Luckiness of future speculations solely depends upon initial
4646 BEGIN speculation. */
4647 if (ts & BEGIN_DATA)
4648 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4649 if (ts & BEGIN_CONTROL)
4650 fs = set_dep_weak (fs, BE_IN_CONTROL,
4651 get_dep_weak (ts, BEGIN_CONTROL));
4652 }
4653 else
4654 CHECK_SPEC (check) = CHECK_SPEC (insn);
4655
4656 /* Future speculations: call the helper. */
4657 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4658
4659 if (rec != EXIT_BLOCK_PTR)
4660 {
4661 /* Which types of dependencies should we use here is,
4662 generally, machine-dependent question... But, for now,
4663 it is not. */
4664
4665 if (!mutate_p)
4666 {
4667 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4668 sd_add_dep (new_dep, false);
4669
4670 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4671 sd_add_dep (new_dep, false);
4672 }
4673 else
4674 {
4675 if (spec_info->dump)
4676 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4677 (*current_sched_info->print_insn) (insn, 0));
4678
4679 /* Remove all dependencies of the INSN. */
4680 {
4681 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4682 | SD_LIST_BACK
4683 | SD_LIST_RES_BACK));
4684 while (sd_iterator_cond (&sd_it, &dep))
4685 sd_delete_dep (sd_it);
4686 }
4687
4688 /* If former check (INSN) already was moved to the ready (or queue)
4689 list, add new check (CHECK) there too. */
4690 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4691 try_ready (check);
4692
4693 /* Remove old check from instruction stream and free its
4694 data. */
4695 sched_remove_insn (insn);
4696 }
4697
4698 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4699 sd_add_dep (new_dep, false);
4700 }
4701 else
4702 {
4703 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4704 sd_add_dep (new_dep, false);
4705 }
4706
4707 if (!mutate_p)
4708 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4709 because it'll be done later in add_to_speculative_block. */
4710 {
4711 rtx_vec_t priorities_roots = NULL;
4712
4713 clear_priorities (twin, &priorities_roots);
4714 calc_priorities (priorities_roots);
4715 VEC_free (rtx, heap, priorities_roots);
4716 }
4717 }
4718
4719 /* Removes dependency between instructions in the recovery block REC
4720 and usual region instructions. It keeps inner dependences so it
4721 won't be necessary to recompute them. */
4722 static void
4723 fix_recovery_deps (basic_block rec)
4724 {
4725 rtx note, insn, jump, ready_list = 0;
4726 bitmap_head in_ready;
4727 rtx link;
4728
4729 bitmap_initialize (&in_ready, 0);
4730
4731 /* NOTE - a basic block note. */
4732 note = NEXT_INSN (BB_HEAD (rec));
4733 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4734 insn = BB_END (rec);
4735 gcc_assert (JUMP_P (insn));
4736 insn = PREV_INSN (insn);
4737
4738 do
4739 {
4740 sd_iterator_def sd_it;
4741 dep_t dep;
4742
4743 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4744 sd_iterator_cond (&sd_it, &dep);)
4745 {
4746 rtx consumer = DEP_CON (dep);
4747
4748 if (BLOCK_FOR_INSN (consumer) != rec)
4749 {
4750 sd_delete_dep (sd_it);
4751
4752 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4753 ready_list = alloc_INSN_LIST (consumer, ready_list);
4754 }
4755 else
4756 {
4757 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4758
4759 sd_iterator_next (&sd_it);
4760 }
4761 }
4762
4763 insn = PREV_INSN (insn);
4764 }
4765 while (insn != note);
4766
4767 bitmap_clear (&in_ready);
4768
4769 /* Try to add instructions to the ready or queue list. */
4770 for (link = ready_list; link; link = XEXP (link, 1))
4771 try_ready (XEXP (link, 0));
4772 free_INSN_LIST_list (&ready_list);
4773
4774 /* Fixing jump's dependences. */
4775 insn = BB_HEAD (rec);
4776 jump = BB_END (rec);
4777
4778 gcc_assert (LABEL_P (insn));
4779 insn = NEXT_INSN (insn);
4780
4781 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4782 add_jump_dependencies (insn, jump);
4783 }
4784
4785 /* Change pattern of INSN to NEW_PAT. */
4786 void
4787 sched_change_pattern (rtx insn, rtx new_pat)
4788 {
4789 int t;
4790
4791 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4792 gcc_assert (t);
4793 dfa_clear_single_insn_cache (insn);
4794 }
4795
4796 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4797 instruction data. */
4798 static void
4799 haifa_change_pattern (rtx insn, rtx new_pat)
4800 {
4801 sched_change_pattern (insn, new_pat);
4802
4803 /* Invalidate INSN_COST, so it'll be recalculated. */
4804 INSN_COST (insn) = -1;
4805 /* Invalidate INSN_TICK, so it'll be recalculated. */
4806 INSN_TICK (insn) = INVALID_TICK;
4807 }
4808
4809 /* -1 - can't speculate,
4810 0 - for speculation with REQUEST mode it is OK to use
4811 current instruction pattern,
4812 1 - need to change pattern for *NEW_PAT to be speculative. */
4813 int
4814 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4815 {
4816 gcc_assert (current_sched_info->flags & DO_SPECULATION
4817 && (request & SPECULATIVE)
4818 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4819
4820 if ((request & spec_info->mask) != request)
4821 return -1;
4822
4823 if (request & BE_IN_SPEC
4824 && !(request & BEGIN_SPEC))
4825 return 0;
4826
4827 return targetm.sched.speculate_insn (insn, request, new_pat);
4828 }
4829
4830 static int
4831 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4832 {
4833 gcc_assert (sched_deps_info->generate_spec_deps
4834 && !IS_SPECULATION_CHECK_P (insn));
4835
4836 if (HAS_INTERNAL_DEP (insn)
4837 || SCHED_GROUP_P (insn))
4838 return -1;
4839
4840 return sched_speculate_insn (insn, request, new_pat);
4841 }
4842
4843 /* Print some information about block BB, which starts with HEAD and
4844 ends with TAIL, before scheduling it.
4845 I is zero, if scheduler is about to start with the fresh ebb. */
4846 static void
4847 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4848 {
4849 if (!i)
4850 fprintf (sched_dump,
4851 ";; ======================================================\n");
4852 else
4853 fprintf (sched_dump,
4854 ";; =====================ADVANCING TO=====================\n");
4855 fprintf (sched_dump,
4856 ";; -- basic block %d from %d to %d -- %s reload\n",
4857 bb->index, INSN_UID (head), INSN_UID (tail),
4858 (reload_completed ? "after" : "before"));
4859 fprintf (sched_dump,
4860 ";; ======================================================\n");
4861 fprintf (sched_dump, "\n");
4862 }
4863
4864 /* Unlink basic block notes and labels and saves them, so they
4865 can be easily restored. We unlink basic block notes in EBB to
4866 provide back-compatibility with the previous code, as target backends
4867 assume, that there'll be only instructions between
4868 current_sched_info->{head and tail}. We restore these notes as soon
4869 as we can.
4870 FIRST (LAST) is the first (last) basic block in the ebb.
4871 NB: In usual case (FIRST == LAST) nothing is really done. */
4872 void
4873 unlink_bb_notes (basic_block first, basic_block last)
4874 {
4875 /* We DON'T unlink basic block notes of the first block in the ebb. */
4876 if (first == last)
4877 return;
4878
4879 bb_header = XNEWVEC (rtx, last_basic_block);
4880
4881 /* Make a sentinel. */
4882 if (last->next_bb != EXIT_BLOCK_PTR)
4883 bb_header[last->next_bb->index] = 0;
4884
4885 first = first->next_bb;
4886 do
4887 {
4888 rtx prev, label, note, next;
4889
4890 label = BB_HEAD (last);
4891 if (LABEL_P (label))
4892 note = NEXT_INSN (label);
4893 else
4894 note = label;
4895 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4896
4897 prev = PREV_INSN (label);
4898 next = NEXT_INSN (note);
4899 gcc_assert (prev && next);
4900
4901 NEXT_INSN (prev) = next;
4902 PREV_INSN (next) = prev;
4903
4904 bb_header[last->index] = label;
4905
4906 if (last == first)
4907 break;
4908
4909 last = last->prev_bb;
4910 }
4911 while (1);
4912 }
4913
4914 /* Restore basic block notes.
4915 FIRST is the first basic block in the ebb. */
4916 static void
4917 restore_bb_notes (basic_block first)
4918 {
4919 if (!bb_header)
4920 return;
4921
4922 /* We DON'T unlink basic block notes of the first block in the ebb. */
4923 first = first->next_bb;
4924 /* Remember: FIRST is actually a second basic block in the ebb. */
4925
4926 while (first != EXIT_BLOCK_PTR
4927 && bb_header[first->index])
4928 {
4929 rtx prev, label, note, next;
4930
4931 label = bb_header[first->index];
4932 prev = PREV_INSN (label);
4933 next = NEXT_INSN (prev);
4934
4935 if (LABEL_P (label))
4936 note = NEXT_INSN (label);
4937 else
4938 note = label;
4939 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4940
4941 bb_header[first->index] = 0;
4942
4943 NEXT_INSN (prev) = label;
4944 NEXT_INSN (note) = next;
4945 PREV_INSN (next) = note;
4946
4947 first = first->next_bb;
4948 }
4949
4950 free (bb_header);
4951 bb_header = 0;
4952 }
4953
4954 /* Helper function.
4955 Fix CFG after both in- and inter-block movement of
4956 control_flow_insn_p JUMP. */
4957 static void
4958 fix_jump_move (rtx jump)
4959 {
4960 basic_block bb, jump_bb, jump_bb_next;
4961
4962 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4963 jump_bb = BLOCK_FOR_INSN (jump);
4964 jump_bb_next = jump_bb->next_bb;
4965
4966 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4967 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4968
4969 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4970 /* if jump_bb_next is not empty. */
4971 BB_END (jump_bb) = BB_END (jump_bb_next);
4972
4973 if (BB_END (bb) != PREV_INSN (jump))
4974 /* Then there are instruction after jump that should be placed
4975 to jump_bb_next. */
4976 BB_END (jump_bb_next) = BB_END (bb);
4977 else
4978 /* Otherwise jump_bb_next is empty. */
4979 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4980
4981 /* To make assertion in move_insn happy. */
4982 BB_END (bb) = PREV_INSN (jump);
4983
4984 update_bb_for_insn (jump_bb_next);
4985 }
4986
4987 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4988 static void
4989 move_block_after_check (rtx jump)
4990 {
4991 basic_block bb, jump_bb, jump_bb_next;
4992 VEC(edge,gc) *t;
4993
4994 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4995 jump_bb = BLOCK_FOR_INSN (jump);
4996 jump_bb_next = jump_bb->next_bb;
4997
4998 update_bb_for_insn (jump_bb);
4999
5000 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5001 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5002
5003 unlink_block (jump_bb_next);
5004 link_block (jump_bb_next, bb);
5005
5006 t = bb->succs;
5007 bb->succs = 0;
5008 move_succs (&(jump_bb->succs), bb);
5009 move_succs (&(jump_bb_next->succs), jump_bb);
5010 move_succs (&t, jump_bb_next);
5011
5012 df_mark_solutions_dirty ();
5013
5014 common_sched_info->fix_recovery_cfg
5015 (bb->index, jump_bb->index, jump_bb_next->index);
5016 }
5017
5018 /* Helper function for move_block_after_check.
5019 This functions attaches edge vector pointed to by SUCCSP to
5020 block TO. */
5021 static void
5022 move_succs (VEC(edge,gc) **succsp, basic_block to)
5023 {
5024 edge e;
5025 edge_iterator ei;
5026
5027 gcc_assert (to->succs == 0);
5028
5029 to->succs = *succsp;
5030
5031 FOR_EACH_EDGE (e, ei, to->succs)
5032 e->src = to;
5033
5034 *succsp = 0;
5035 }
5036
5037 /* Remove INSN from the instruction stream.
5038 INSN should have any dependencies. */
5039 static void
5040 sched_remove_insn (rtx insn)
5041 {
5042 sd_finish_insn (insn);
5043
5044 change_queue_index (insn, QUEUE_NOWHERE);
5045 current_sched_info->add_remove_insn (insn, 1);
5046 remove_insn (insn);
5047 }
5048
5049 /* Clear priorities of all instructions, that are forward dependent on INSN.
5050 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5051 be invoked to initialize all cleared priorities. */
5052 static void
5053 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5054 {
5055 sd_iterator_def sd_it;
5056 dep_t dep;
5057 bool insn_is_root_p = true;
5058
5059 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5060
5061 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5062 {
5063 rtx pro = DEP_PRO (dep);
5064
5065 if (INSN_PRIORITY_STATUS (pro) >= 0
5066 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5067 {
5068 /* If DEP doesn't contribute to priority then INSN itself should
5069 be added to priority roots. */
5070 if (contributes_to_priority_p (dep))
5071 insn_is_root_p = false;
5072
5073 INSN_PRIORITY_STATUS (pro) = -1;
5074 clear_priorities (pro, roots_ptr);
5075 }
5076 }
5077
5078 if (insn_is_root_p)
5079 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5080 }
5081
5082 /* Recompute priorities of instructions, whose priorities might have been
5083 changed. ROOTS is a vector of instructions whose priority computation will
5084 trigger initialization of all cleared priorities. */
5085 static void
5086 calc_priorities (rtx_vec_t roots)
5087 {
5088 int i;
5089 rtx insn;
5090
5091 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5092 priority (insn);
5093 }
5094
5095
5096 /* Add dependences between JUMP and other instructions in the recovery
5097 block. INSN is the first insn the recovery block. */
5098 static void
5099 add_jump_dependencies (rtx insn, rtx jump)
5100 {
5101 do
5102 {
5103 insn = NEXT_INSN (insn);
5104 if (insn == jump)
5105 break;
5106
5107 if (dep_list_size (insn) == 0)
5108 {
5109 dep_def _new_dep, *new_dep = &_new_dep;
5110
5111 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5112 sd_add_dep (new_dep, false);
5113 }
5114 }
5115 while (1);
5116
5117 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5118 }
5119
5120 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5121 rtx
5122 bb_note (basic_block bb)
5123 {
5124 rtx note;
5125
5126 note = BB_HEAD (bb);
5127 if (LABEL_P (note))
5128 note = NEXT_INSN (note);
5129
5130 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5131 return note;
5132 }
5133
5134 #ifdef ENABLE_CHECKING
5135 /* Helper function for check_cfg.
5136 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5137 its flags. */
5138 static int
5139 has_edge_p (VEC(edge,gc) *el, int type)
5140 {
5141 edge e;
5142 edge_iterator ei;
5143
5144 FOR_EACH_EDGE (e, ei, el)
5145 if (e->flags & type)
5146 return 1;
5147 return 0;
5148 }
5149
5150 /* Search back, starting at INSN, for an insn that is not a
5151 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5152 no such insn can be found. */
5153 static inline rtx
5154 prev_non_location_insn (rtx insn, rtx head)
5155 {
5156 while (insn != head && NOTE_P (insn)
5157 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5158 insn = PREV_INSN (insn);
5159
5160 return insn;
5161 }
5162
5163 /* Check few properties of CFG between HEAD and TAIL.
5164 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5165 instruction stream. */
5166 static void
5167 check_cfg (rtx head, rtx tail)
5168 {
5169 rtx next_tail;
5170 basic_block bb = 0;
5171 int not_first = 0, not_last;
5172
5173 if (head == NULL)
5174 head = get_insns ();
5175 if (tail == NULL)
5176 tail = get_last_insn ();
5177 next_tail = NEXT_INSN (tail);
5178
5179 do
5180 {
5181 not_last = head != tail;
5182
5183 if (not_first)
5184 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5185 if (not_last)
5186 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5187
5188 if (LABEL_P (head)
5189 || (NOTE_INSN_BASIC_BLOCK_P (head)
5190 && (!not_first
5191 || (not_first && !LABEL_P (PREV_INSN (head))))))
5192 {
5193 gcc_assert (bb == 0);
5194 bb = BLOCK_FOR_INSN (head);
5195 if (bb != 0)
5196 gcc_assert (BB_HEAD (bb) == head);
5197 else
5198 /* This is the case of jump table. See inside_basic_block_p (). */
5199 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5200 }
5201
5202 if (bb == 0)
5203 {
5204 gcc_assert (!inside_basic_block_p (head));
5205 head = NEXT_INSN (head);
5206 }
5207 else
5208 {
5209 gcc_assert (inside_basic_block_p (head)
5210 || NOTE_P (head));
5211 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5212
5213 if (LABEL_P (head))
5214 {
5215 head = NEXT_INSN (head);
5216 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5217 }
5218 else
5219 {
5220 if (control_flow_insn_p (head))
5221 {
5222 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5223 == head);
5224
5225 if (any_uncondjump_p (head))
5226 gcc_assert (EDGE_COUNT (bb->succs) == 1
5227 && BARRIER_P (NEXT_INSN (head)));
5228 else if (any_condjump_p (head))
5229 gcc_assert (/* Usual case. */
5230 (EDGE_COUNT (bb->succs) > 1
5231 && !BARRIER_P (NEXT_INSN (head)))
5232 /* Or jump to the next instruction. */
5233 || (EDGE_COUNT (bb->succs) == 1
5234 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5235 == JUMP_LABEL (head))));
5236 }
5237 if (BB_END (bb) == head)
5238 {
5239 if (EDGE_COUNT (bb->succs) > 1)
5240 gcc_assert (control_flow_insn_p (prev_non_location_insn
5241 (head, BB_HEAD (bb)))
5242 || has_edge_p (bb->succs, EDGE_COMPLEX));
5243 bb = 0;
5244 }
5245
5246 head = NEXT_INSN (head);
5247 }
5248 }
5249
5250 not_first = 1;
5251 }
5252 while (head != next_tail);
5253
5254 gcc_assert (bb == 0);
5255 }
5256
5257 #endif /* ENABLE_CHECKING */
5258
5259 /* Extend per basic block data structures. */
5260 static void
5261 extend_bb (void)
5262 {
5263 if (sched_scan_info->extend_bb)
5264 sched_scan_info->extend_bb ();
5265 }
5266
5267 /* Init data for BB. */
5268 static void
5269 init_bb (basic_block bb)
5270 {
5271 if (sched_scan_info->init_bb)
5272 sched_scan_info->init_bb (bb);
5273 }
5274
5275 /* Extend per insn data structures. */
5276 static void
5277 extend_insn (void)
5278 {
5279 if (sched_scan_info->extend_insn)
5280 sched_scan_info->extend_insn ();
5281 }
5282
5283 /* Init data structures for INSN. */
5284 static void
5285 init_insn (rtx insn)
5286 {
5287 if (sched_scan_info->init_insn)
5288 sched_scan_info->init_insn (insn);
5289 }
5290
5291 /* Init all insns in BB. */
5292 static void
5293 init_insns_in_bb (basic_block bb)
5294 {
5295 rtx insn;
5296
5297 FOR_BB_INSNS (bb, insn)
5298 init_insn (insn);
5299 }
5300
5301 /* A driver function to add a set of basic blocks (BBS),
5302 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5303 to the scheduling region. */
5304 void
5305 sched_scan (const struct sched_scan_info_def *ssi,
5306 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5307 {
5308 sched_scan_info = ssi;
5309
5310 if (bbs != NULL || bb != NULL)
5311 {
5312 extend_bb ();
5313
5314 if (bbs != NULL)
5315 {
5316 unsigned i;
5317 basic_block x;
5318
5319 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5320 init_bb (x);
5321 }
5322
5323 if (bb != NULL)
5324 init_bb (bb);
5325 }
5326
5327 extend_insn ();
5328
5329 if (bbs != NULL)
5330 {
5331 unsigned i;
5332 basic_block x;
5333
5334 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5335 init_insns_in_bb (x);
5336 }
5337
5338 if (bb != NULL)
5339 init_insns_in_bb (bb);
5340
5341 if (insns != NULL)
5342 {
5343 unsigned i;
5344 rtx x;
5345
5346 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5347 init_insn (x);
5348 }
5349
5350 if (insn != NULL)
5351 init_insn (insn);
5352 }
5353
5354
5355 /* Extend data structures for logical insn UID. */
5356 static void
5357 luids_extend_insn (void)
5358 {
5359 int new_luids_max_uid = get_max_uid () + 1;
5360
5361 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5362 }
5363
5364 /* Initialize LUID for INSN. */
5365 static void
5366 luids_init_insn (rtx insn)
5367 {
5368 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5369 int luid;
5370
5371 if (i >= 0)
5372 {
5373 luid = sched_max_luid;
5374 sched_max_luid += i;
5375 }
5376 else
5377 luid = -1;
5378
5379 SET_INSN_LUID (insn, luid);
5380 }
5381
5382 /* Initialize luids for BBS, BB, INSNS and INSN.
5383 The hook common_sched_info->luid_for_non_insn () is used to determine
5384 if notes, labels, etc. need luids. */
5385 void
5386 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5387 {
5388 const struct sched_scan_info_def ssi =
5389 {
5390 NULL, /* extend_bb */
5391 NULL, /* init_bb */
5392 luids_extend_insn, /* extend_insn */
5393 luids_init_insn /* init_insn */
5394 };
5395
5396 sched_scan (&ssi, bbs, bb, insns, insn);
5397 }
5398
5399 /* Free LUIDs. */
5400 void
5401 sched_finish_luids (void)
5402 {
5403 VEC_free (int, heap, sched_luids);
5404 sched_max_luid = 1;
5405 }
5406
5407 /* Return logical uid of INSN. Helpful while debugging. */
5408 int
5409 insn_luid (rtx insn)
5410 {
5411 return INSN_LUID (insn);
5412 }
5413
5414 /* Extend per insn data in the target. */
5415 void
5416 sched_extend_target (void)
5417 {
5418 if (targetm.sched.h_i_d_extended)
5419 targetm.sched.h_i_d_extended ();
5420 }
5421
5422 /* Extend global scheduler structures (those, that live across calls to
5423 schedule_block) to include information about just emitted INSN. */
5424 static void
5425 extend_h_i_d (void)
5426 {
5427 int reserve = (get_max_uid () + 1
5428 - VEC_length (haifa_insn_data_def, h_i_d));
5429 if (reserve > 0
5430 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5431 {
5432 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5433 3 * get_max_uid () / 2);
5434 sched_extend_target ();
5435 }
5436 }
5437
5438 /* Initialize h_i_d entry of the INSN with default values.
5439 Values, that are not explicitly initialized here, hold zero. */
5440 static void
5441 init_h_i_d (rtx insn)
5442 {
5443 if (INSN_LUID (insn) > 0)
5444 {
5445 INSN_COST (insn) = -1;
5446 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5447 INSN_TICK (insn) = INVALID_TICK;
5448 INTER_TICK (insn) = INVALID_TICK;
5449 TODO_SPEC (insn) = HARD_DEP;
5450 }
5451 }
5452
5453 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5454 void
5455 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5456 {
5457 const struct sched_scan_info_def ssi =
5458 {
5459 NULL, /* extend_bb */
5460 NULL, /* init_bb */
5461 extend_h_i_d, /* extend_insn */
5462 init_h_i_d /* init_insn */
5463 };
5464
5465 sched_scan (&ssi, bbs, bb, insns, insn);
5466 }
5467
5468 /* Finalize haifa_insn_data. */
5469 void
5470 haifa_finish_h_i_d (void)
5471 {
5472 int i;
5473 haifa_insn_data_t data;
5474 struct reg_use_data *use, *next;
5475
5476 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5477 {
5478 if (data->reg_pressure != NULL)
5479 free (data->reg_pressure);
5480 for (use = data->reg_use_list; use != NULL; use = next)
5481 {
5482 next = use->next_insn_use;
5483 free (use);
5484 }
5485 }
5486 VEC_free (haifa_insn_data_def, heap, h_i_d);
5487 }
5488
5489 /* Init data for the new insn INSN. */
5490 static void
5491 haifa_init_insn (rtx insn)
5492 {
5493 gcc_assert (insn != NULL);
5494
5495 sched_init_luids (NULL, NULL, NULL, insn);
5496 sched_extend_target ();
5497 sched_deps_init (false);
5498 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5499
5500 if (adding_bb_to_current_region_p)
5501 {
5502 sd_init_insn (insn);
5503
5504 /* Extend dependency caches by one element. */
5505 extend_dependency_caches (1, false);
5506 }
5507 }
5508
5509 /* Init data for the new basic block BB which comes after AFTER. */
5510 static void
5511 haifa_init_only_bb (basic_block bb, basic_block after)
5512 {
5513 gcc_assert (bb != NULL);
5514
5515 sched_init_bbs ();
5516
5517 if (common_sched_info->add_block)
5518 /* This changes only data structures of the front-end. */
5519 common_sched_info->add_block (bb, after);
5520 }
5521
5522 /* A generic version of sched_split_block (). */
5523 basic_block
5524 sched_split_block_1 (basic_block first_bb, rtx after)
5525 {
5526 edge e;
5527
5528 e = split_block (first_bb, after);
5529 gcc_assert (e->src == first_bb);
5530
5531 /* sched_split_block emits note if *check == BB_END. Probably it
5532 is better to rip that note off. */
5533
5534 return e->dest;
5535 }
5536
5537 /* A generic version of sched_create_empty_bb (). */
5538 basic_block
5539 sched_create_empty_bb_1 (basic_block after)
5540 {
5541 return create_empty_bb (after);
5542 }
5543
5544 /* Insert PAT as an INSN into the schedule and update the necessary data
5545 structures to account for it. */
5546 rtx
5547 sched_emit_insn (rtx pat)
5548 {
5549 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5550 last_scheduled_insn = insn;
5551 haifa_init_insn (insn);
5552 return insn;
5553 }
5554
5555 /* This function returns a candidate satisfying dispatch constraints from
5556 the ready list. */
5557
5558 static rtx
5559 ready_remove_first_dispatch (struct ready_list *ready)
5560 {
5561 int i;
5562 rtx insn = ready_element (ready, 0);
5563
5564 if (ready->n_ready == 1
5565 || INSN_CODE (insn) < 0
5566 || !INSN_P (insn)
5567 || !active_insn_p (insn)
5568 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5569 return ready_remove_first (ready);
5570
5571 for (i = 1; i < ready->n_ready; i++)
5572 {
5573 insn = ready_element (ready, i);
5574
5575 if (INSN_CODE (insn) < 0
5576 || !INSN_P (insn)
5577 || !active_insn_p (insn))
5578 continue;
5579
5580 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5581 {
5582 /* Return ith element of ready. */
5583 insn = ready_remove (ready, i);
5584 return insn;
5585 }
5586 }
5587
5588 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5589 return ready_remove_first (ready);
5590
5591 for (i = 1; i < ready->n_ready; i++)
5592 {
5593 insn = ready_element (ready, i);
5594
5595 if (INSN_CODE (insn) < 0
5596 || !INSN_P (insn)
5597 || !active_insn_p (insn))
5598 continue;
5599
5600 /* Return i-th element of ready. */
5601 if (targetm.sched.dispatch (insn, IS_CMP))
5602 return ready_remove (ready, i);
5603 }
5604
5605 return ready_remove_first (ready);
5606 }
5607
5608 /* Get number of ready insn in the ready list. */
5609
5610 int
5611 number_in_ready (void)
5612 {
5613 return ready.n_ready;
5614 }
5615
5616 /* Get number of ready's in the ready list. */
5617
5618 rtx
5619 get_ready_element (int i)
5620 {
5621 return ready_element (&ready, i);
5622 }
5623
5624 #endif /* INSN_SCHEDULING */