common.opt (initial_max_fld_align, [...]): New Variable entries.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "toplev.h"
133 #include "rtl.h"
134 #include "tm_p.h"
135 #include "hard-reg-set.h"
136 #include "regs.h"
137 #include "function.h"
138 #include "flags.h"
139 #include "insn-config.h"
140 #include "insn-attr.h"
141 #include "except.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150 #include "ira.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
152
153 #ifdef INSN_SCHEDULING
154
155 /* issue_rate is the number of insns that can be scheduled in the same
156 machine cycle. It can be defined in the config/mach/mach.h file,
157 otherwise we set it to 1. */
158
159 int issue_rate;
160
161 /* sched-verbose controls the amount of debugging output the
162 scheduler prints. It is controlled by -fsched-verbose=N:
163 N>0 and no -DSR : the output is directed to stderr.
164 N>=10 will direct the printouts to stderr (regardless of -dSR).
165 N=1: same as -dSR.
166 N=2: bb's probabilities, detailed ready list info, unit/insn info.
167 N=3: rtl at abort point, control-flow, regions info.
168 N=5: dependences info. */
169
170 int sched_verbose = 0;
171
172 /* Debugging file. All printouts are sent to dump, which is always set,
173 either to stderr, or to the dump listing file (-dRS). */
174 FILE *sched_dump = 0;
175
176 /* This is a placeholder for the scheduler parameters common
177 to all schedulers. */
178 struct common_sched_info_def *common_sched_info;
179
180 #define INSN_TICK(INSN) (HID (INSN)->tick)
181 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
182
183 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
184 then it should be recalculated from scratch. */
185 #define INVALID_TICK (-(max_insn_queue_index + 1))
186 /* The minimal value of the INSN_TICK of an instruction. */
187 #define MIN_TICK (-max_insn_queue_index)
188
189 /* List of important notes we must keep around. This is a pointer to the
190 last element in the list. */
191 rtx note_list;
192
193 static struct spec_info_def spec_info_var;
194 /* Description of the speculative part of the scheduling.
195 If NULL - no speculation. */
196 spec_info_t spec_info = NULL;
197
198 /* True, if recovery block was added during scheduling of current block.
199 Used to determine, if we need to fix INSN_TICKs. */
200 static bool haifa_recovery_bb_recently_added_p;
201
202 /* True, if recovery block was added during this scheduling pass.
203 Used to determine if we should have empty memory pools of dependencies
204 after finishing current region. */
205 bool haifa_recovery_bb_ever_added_p;
206
207 /* Counters of different types of speculative instructions. */
208 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
209
210 /* Array used in {unlink, restore}_bb_notes. */
211 static rtx *bb_header = 0;
212
213 /* Basic block after which recovery blocks will be created. */
214 static basic_block before_recovery;
215
216 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
217 created it. */
218 basic_block after_recovery;
219
220 /* FALSE if we add bb to another region, so we don't need to initialize it. */
221 bool adding_bb_to_current_region_p = true;
222
223 /* Queues, etc. */
224
225 /* An instruction is ready to be scheduled when all insns preceding it
226 have already been scheduled. It is important to ensure that all
227 insns which use its result will not be executed until its result
228 has been computed. An insn is maintained in one of four structures:
229
230 (P) the "Pending" set of insns which cannot be scheduled until
231 their dependencies have been satisfied.
232 (Q) the "Queued" set of insns that can be scheduled when sufficient
233 time has passed.
234 (R) the "Ready" list of unscheduled, uncommitted insns.
235 (S) the "Scheduled" list of insns.
236
237 Initially, all insns are either "Pending" or "Ready" depending on
238 whether their dependencies are satisfied.
239
240 Insns move from the "Ready" list to the "Scheduled" list as they
241 are committed to the schedule. As this occurs, the insns in the
242 "Pending" list have their dependencies satisfied and move to either
243 the "Ready" list or the "Queued" set depending on whether
244 sufficient time has passed to make them ready. As time passes,
245 insns move from the "Queued" set to the "Ready" list.
246
247 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
248 unscheduled insns, i.e., those that are ready, queued, and pending.
249 The "Queued" set (Q) is implemented by the variable `insn_queue'.
250 The "Ready" list (R) is implemented by the variables `ready' and
251 `n_ready'.
252 The "Scheduled" list (S) is the new insn chain built by this pass.
253
254 The transition (R->S) is implemented in the scheduling loop in
255 `schedule_block' when the best insn to schedule is chosen.
256 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
257 insns move from the ready list to the scheduled list.
258 The transition (Q->R) is implemented in 'queue_to_insn' as time
259 passes or stalls are introduced. */
260
261 /* Implement a circular buffer to delay instructions until sufficient
262 time has passed. For the new pipeline description interface,
263 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
264 than maximal time of instruction execution computed by genattr.c on
265 the base maximal time of functional unit reservations and getting a
266 result. This is the longest time an insn may be queued. */
267
268 static rtx *insn_queue;
269 static int q_ptr = 0;
270 static int q_size = 0;
271 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
272 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
273
274 #define QUEUE_SCHEDULED (-3)
275 #define QUEUE_NOWHERE (-2)
276 #define QUEUE_READY (-1)
277 /* QUEUE_SCHEDULED - INSN is scheduled.
278 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
279 queue or ready list.
280 QUEUE_READY - INSN is in ready list.
281 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
282
283 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
284
285 /* The following variable value refers for all current and future
286 reservations of the processor units. */
287 state_t curr_state;
288
289 /* The following variable value is size of memory representing all
290 current and future reservations of the processor units. */
291 size_t dfa_state_size;
292
293 /* The following array is used to find the best insn from ready when
294 the automaton pipeline interface is used. */
295 char *ready_try = NULL;
296
297 /* The ready list. */
298 struct ready_list ready = {NULL, 0, 0, 0, 0};
299
300 /* The pointer to the ready list (to be removed). */
301 static struct ready_list *readyp = &ready;
302
303 /* Scheduling clock. */
304 static int clock_var;
305
306 static int may_trap_exp (const_rtx, int);
307
308 /* Nonzero iff the address is comprised from at most 1 register. */
309 #define CONST_BASED_ADDRESS_P(x) \
310 (REG_P (x) \
311 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
312 || (GET_CODE (x) == LO_SUM)) \
313 && (CONSTANT_P (XEXP (x, 0)) \
314 || CONSTANT_P (XEXP (x, 1)))))
315
316 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
317 as found by analyzing insn's expression. */
318
319 \f
320 static int haifa_luid_for_non_insn (rtx x);
321
322 /* Haifa version of sched_info hooks common to all headers. */
323 const struct common_sched_info_def haifa_common_sched_info =
324 {
325 NULL, /* fix_recovery_cfg */
326 NULL, /* add_block */
327 NULL, /* estimate_number_of_insns */
328 haifa_luid_for_non_insn, /* luid_for_non_insn */
329 SCHED_PASS_UNKNOWN /* sched_pass_id */
330 };
331
332 const struct sched_scan_info_def *sched_scan_info;
333
334 /* Mapping from instruction UID to its Logical UID. */
335 VEC (int, heap) *sched_luids = NULL;
336
337 /* Next LUID to assign to an instruction. */
338 int sched_max_luid = 1;
339
340 /* Haifa Instruction Data. */
341 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
342
343 void (* sched_init_only_bb) (basic_block, basic_block);
344
345 /* Split block function. Different schedulers might use different functions
346 to handle their internal data consistent. */
347 basic_block (* sched_split_block) (basic_block, rtx);
348
349 /* Create empty basic block after the specified block. */
350 basic_block (* sched_create_empty_bb) (basic_block);
351
352 static int
353 may_trap_exp (const_rtx x, int is_store)
354 {
355 enum rtx_code code;
356
357 if (x == 0)
358 return TRAP_FREE;
359 code = GET_CODE (x);
360 if (is_store)
361 {
362 if (code == MEM && may_trap_p (x))
363 return TRAP_RISKY;
364 else
365 return TRAP_FREE;
366 }
367 if (code == MEM)
368 {
369 /* The insn uses memory: a volatile load. */
370 if (MEM_VOLATILE_P (x))
371 return IRISKY;
372 /* An exception-free load. */
373 if (!may_trap_p (x))
374 return IFREE;
375 /* A load with 1 base register, to be further checked. */
376 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
377 return PFREE_CANDIDATE;
378 /* No info on the load, to be further checked. */
379 return PRISKY_CANDIDATE;
380 }
381 else
382 {
383 const char *fmt;
384 int i, insn_class = TRAP_FREE;
385
386 /* Neither store nor load, check if it may cause a trap. */
387 if (may_trap_p (x))
388 return TRAP_RISKY;
389 /* Recursive step: walk the insn... */
390 fmt = GET_RTX_FORMAT (code);
391 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
392 {
393 if (fmt[i] == 'e')
394 {
395 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
396 insn_class = WORST_CLASS (insn_class, tmp_class);
397 }
398 else if (fmt[i] == 'E')
399 {
400 int j;
401 for (j = 0; j < XVECLEN (x, i); j++)
402 {
403 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
404 insn_class = WORST_CLASS (insn_class, tmp_class);
405 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
406 break;
407 }
408 }
409 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
410 break;
411 }
412 return insn_class;
413 }
414 }
415
416 /* Classifies rtx X of an insn for the purpose of verifying that X can be
417 executed speculatively (and consequently the insn can be moved
418 speculatively), by examining X, returning:
419 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
420 TRAP_FREE: non-load insn.
421 IFREE: load from a globally safe location.
422 IRISKY: volatile load.
423 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
424 being either PFREE or PRISKY. */
425
426 static int
427 haifa_classify_rtx (const_rtx x)
428 {
429 int tmp_class = TRAP_FREE;
430 int insn_class = TRAP_FREE;
431 enum rtx_code code;
432
433 if (GET_CODE (x) == PARALLEL)
434 {
435 int i, len = XVECLEN (x, 0);
436
437 for (i = len - 1; i >= 0; i--)
438 {
439 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
440 insn_class = WORST_CLASS (insn_class, tmp_class);
441 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
442 break;
443 }
444 }
445 else
446 {
447 code = GET_CODE (x);
448 switch (code)
449 {
450 case CLOBBER:
451 /* Test if it is a 'store'. */
452 tmp_class = may_trap_exp (XEXP (x, 0), 1);
453 break;
454 case SET:
455 /* Test if it is a store. */
456 tmp_class = may_trap_exp (SET_DEST (x), 1);
457 if (tmp_class == TRAP_RISKY)
458 break;
459 /* Test if it is a load. */
460 tmp_class =
461 WORST_CLASS (tmp_class,
462 may_trap_exp (SET_SRC (x), 0));
463 break;
464 case COND_EXEC:
465 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
466 if (tmp_class == TRAP_RISKY)
467 break;
468 tmp_class = WORST_CLASS (tmp_class,
469 may_trap_exp (COND_EXEC_TEST (x), 0));
470 break;
471 case TRAP_IF:
472 tmp_class = TRAP_RISKY;
473 break;
474 default:;
475 }
476 insn_class = tmp_class;
477 }
478
479 return insn_class;
480 }
481
482 int
483 haifa_classify_insn (const_rtx insn)
484 {
485 return haifa_classify_rtx (PATTERN (insn));
486 }
487
488 /* Forward declarations. */
489
490 static int priority (rtx);
491 static int rank_for_schedule (const void *, const void *);
492 static void swap_sort (rtx *, int);
493 static void queue_insn (rtx, int);
494 static int schedule_insn (rtx);
495 static void adjust_priority (rtx);
496 static void advance_one_cycle (void);
497 static void extend_h_i_d (void);
498
499
500 /* Notes handling mechanism:
501 =========================
502 Generally, NOTES are saved before scheduling and restored after scheduling.
503 The scheduler distinguishes between two types of notes:
504
505 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
506 Before scheduling a region, a pointer to the note is added to the insn
507 that follows or precedes it. (This happens as part of the data dependence
508 computation). After scheduling an insn, the pointer contained in it is
509 used for regenerating the corresponding note (in reemit_notes).
510
511 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
512 these notes are put in a list (in rm_other_notes() and
513 unlink_other_notes ()). After scheduling the block, these notes are
514 inserted at the beginning of the block (in schedule_block()). */
515
516 static void ready_add (struct ready_list *, rtx, bool);
517 static rtx ready_remove_first (struct ready_list *);
518 static rtx ready_remove_first_dispatch (struct ready_list *ready);
519
520 static void queue_to_ready (struct ready_list *);
521 static int early_queue_to_ready (state_t, struct ready_list *);
522
523 static void debug_ready_list (struct ready_list *);
524
525 /* The following functions are used to implement multi-pass scheduling
526 on the first cycle. */
527 static rtx ready_remove (struct ready_list *, int);
528 static void ready_remove_insn (rtx);
529
530 static void fix_inter_tick (rtx, rtx);
531 static int fix_tick_ready (rtx);
532 static void change_queue_index (rtx, int);
533
534 /* The following functions are used to implement scheduling of data/control
535 speculative instructions. */
536
537 static void extend_h_i_d (void);
538 static void init_h_i_d (rtx);
539 static void generate_recovery_code (rtx);
540 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
541 static void begin_speculative_block (rtx);
542 static void add_to_speculative_block (rtx);
543 static void init_before_recovery (basic_block *);
544 static void create_check_block_twin (rtx, bool);
545 static void fix_recovery_deps (basic_block);
546 static void haifa_change_pattern (rtx, rtx);
547 static void dump_new_block_header (int, basic_block, rtx, rtx);
548 static void restore_bb_notes (basic_block);
549 static void fix_jump_move (rtx);
550 static void move_block_after_check (rtx);
551 static void move_succs (VEC(edge,gc) **, basic_block);
552 static void sched_remove_insn (rtx);
553 static void clear_priorities (rtx, rtx_vec_t *);
554 static void calc_priorities (rtx_vec_t);
555 static void add_jump_dependencies (rtx, rtx);
556 #ifdef ENABLE_CHECKING
557 static int has_edge_p (VEC(edge,gc) *, int);
558 static void check_cfg (rtx, rtx);
559 #endif
560
561 #endif /* INSN_SCHEDULING */
562 \f
563 /* Point to state used for the current scheduling pass. */
564 struct haifa_sched_info *current_sched_info;
565 \f
566 #ifndef INSN_SCHEDULING
567 void
568 schedule_insns (void)
569 {
570 }
571 #else
572
573 /* Do register pressure sensitive insn scheduling if the flag is set
574 up. */
575 bool sched_pressure_p;
576
577 /* Map regno -> its cover class. The map defined only when
578 SCHED_PRESSURE_P is true. */
579 enum reg_class *sched_regno_cover_class;
580
581 /* The current register pressure. Only elements corresponding cover
582 classes are defined. */
583 static int curr_reg_pressure[N_REG_CLASSES];
584
585 /* Saved value of the previous array. */
586 static int saved_reg_pressure[N_REG_CLASSES];
587
588 /* Register living at given scheduling point. */
589 static bitmap curr_reg_live;
590
591 /* Saved value of the previous array. */
592 static bitmap saved_reg_live;
593
594 /* Registers mentioned in the current region. */
595 static bitmap region_ref_regs;
596
597 /* Initiate register pressure relative info for scheduling the current
598 region. Currently it is only clearing register mentioned in the
599 current region. */
600 void
601 sched_init_region_reg_pressure_info (void)
602 {
603 bitmap_clear (region_ref_regs);
604 }
605
606 /* Update current register pressure related info after birth (if
607 BIRTH_P) or death of register REGNO. */
608 static void
609 mark_regno_birth_or_death (int regno, bool birth_p)
610 {
611 enum reg_class cover_class;
612
613 cover_class = sched_regno_cover_class[regno];
614 if (regno >= FIRST_PSEUDO_REGISTER)
615 {
616 if (cover_class != NO_REGS)
617 {
618 if (birth_p)
619 {
620 bitmap_set_bit (curr_reg_live, regno);
621 curr_reg_pressure[cover_class]
622 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
623 }
624 else
625 {
626 bitmap_clear_bit (curr_reg_live, regno);
627 curr_reg_pressure[cover_class]
628 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
629 }
630 }
631 }
632 else if (cover_class != NO_REGS
633 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
634 {
635 if (birth_p)
636 {
637 bitmap_set_bit (curr_reg_live, regno);
638 curr_reg_pressure[cover_class]++;
639 }
640 else
641 {
642 bitmap_clear_bit (curr_reg_live, regno);
643 curr_reg_pressure[cover_class]--;
644 }
645 }
646 }
647
648 /* Initiate current register pressure related info from living
649 registers given by LIVE. */
650 static void
651 initiate_reg_pressure_info (bitmap live)
652 {
653 int i;
654 unsigned int j;
655 bitmap_iterator bi;
656
657 for (i = 0; i < ira_reg_class_cover_size; i++)
658 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
659 bitmap_clear (curr_reg_live);
660 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
661 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
662 mark_regno_birth_or_death (j, true);
663 }
664
665 /* Mark registers in X as mentioned in the current region. */
666 static void
667 setup_ref_regs (rtx x)
668 {
669 int i, j, regno;
670 const RTX_CODE code = GET_CODE (x);
671 const char *fmt;
672
673 if (REG_P (x))
674 {
675 regno = REGNO (x);
676 if (regno >= FIRST_PSEUDO_REGISTER)
677 bitmap_set_bit (region_ref_regs, REGNO (x));
678 else
679 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
680 bitmap_set_bit (region_ref_regs, regno + i);
681 return;
682 }
683 fmt = GET_RTX_FORMAT (code);
684 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
685 if (fmt[i] == 'e')
686 setup_ref_regs (XEXP (x, i));
687 else if (fmt[i] == 'E')
688 {
689 for (j = 0; j < XVECLEN (x, i); j++)
690 setup_ref_regs (XVECEXP (x, i, j));
691 }
692 }
693
694 /* Initiate current register pressure related info at the start of
695 basic block BB. */
696 static void
697 initiate_bb_reg_pressure_info (basic_block bb)
698 {
699 unsigned int i ATTRIBUTE_UNUSED;
700 rtx insn;
701
702 if (current_nr_blocks > 1)
703 FOR_BB_INSNS (bb, insn)
704 if (NONDEBUG_INSN_P (insn))
705 setup_ref_regs (PATTERN (insn));
706 initiate_reg_pressure_info (df_get_live_in (bb));
707 #ifdef EH_RETURN_DATA_REGNO
708 if (bb_has_eh_pred (bb))
709 for (i = 0; ; ++i)
710 {
711 unsigned int regno = EH_RETURN_DATA_REGNO (i);
712
713 if (regno == INVALID_REGNUM)
714 break;
715 if (! bitmap_bit_p (df_get_live_in (bb), regno))
716 mark_regno_birth_or_death (regno, true);
717 }
718 #endif
719 }
720
721 /* Save current register pressure related info. */
722 static void
723 save_reg_pressure (void)
724 {
725 int i;
726
727 for (i = 0; i < ira_reg_class_cover_size; i++)
728 saved_reg_pressure[ira_reg_class_cover[i]]
729 = curr_reg_pressure[ira_reg_class_cover[i]];
730 bitmap_copy (saved_reg_live, curr_reg_live);
731 }
732
733 /* Restore saved register pressure related info. */
734 static void
735 restore_reg_pressure (void)
736 {
737 int i;
738
739 for (i = 0; i < ira_reg_class_cover_size; i++)
740 curr_reg_pressure[ira_reg_class_cover[i]]
741 = saved_reg_pressure[ira_reg_class_cover[i]];
742 bitmap_copy (curr_reg_live, saved_reg_live);
743 }
744
745 /* Return TRUE if the register is dying after its USE. */
746 static bool
747 dying_use_p (struct reg_use_data *use)
748 {
749 struct reg_use_data *next;
750
751 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
752 if (NONDEBUG_INSN_P (next->insn)
753 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
754 return false;
755 return true;
756 }
757
758 /* Print info about the current register pressure and its excess for
759 each cover class. */
760 static void
761 print_curr_reg_pressure (void)
762 {
763 int i;
764 enum reg_class cl;
765
766 fprintf (sched_dump, ";;\t");
767 for (i = 0; i < ira_reg_class_cover_size; i++)
768 {
769 cl = ira_reg_class_cover[i];
770 gcc_assert (curr_reg_pressure[cl] >= 0);
771 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
772 curr_reg_pressure[cl],
773 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
774 }
775 fprintf (sched_dump, "\n");
776 }
777
778 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
779 so that insns independent of the last scheduled insn will be preferred
780 over dependent instructions. */
781
782 static rtx last_scheduled_insn;
783
784 /* Cached cost of the instruction. Use below function to get cost of the
785 insn. -1 here means that the field is not initialized. */
786 #define INSN_COST(INSN) (HID (INSN)->cost)
787
788 /* Compute cost of executing INSN.
789 This is the number of cycles between instruction issue and
790 instruction results. */
791 int
792 insn_cost (rtx insn)
793 {
794 int cost;
795
796 if (sel_sched_p ())
797 {
798 if (recog_memoized (insn) < 0)
799 return 0;
800
801 cost = insn_default_latency (insn);
802 if (cost < 0)
803 cost = 0;
804
805 return cost;
806 }
807
808 cost = INSN_COST (insn);
809
810 if (cost < 0)
811 {
812 /* A USE insn, or something else we don't need to
813 understand. We can't pass these directly to
814 result_ready_cost or insn_default_latency because it will
815 trigger a fatal error for unrecognizable insns. */
816 if (recog_memoized (insn) < 0)
817 {
818 INSN_COST (insn) = 0;
819 return 0;
820 }
821 else
822 {
823 cost = insn_default_latency (insn);
824 if (cost < 0)
825 cost = 0;
826
827 INSN_COST (insn) = cost;
828 }
829 }
830
831 return cost;
832 }
833
834 /* Compute cost of dependence LINK.
835 This is the number of cycles between instruction issue and
836 instruction results.
837 ??? We also use this function to call recog_memoized on all insns. */
838 int
839 dep_cost_1 (dep_t link, dw_t dw)
840 {
841 rtx insn = DEP_PRO (link);
842 rtx used = DEP_CON (link);
843 int cost;
844
845 /* A USE insn should never require the value used to be computed.
846 This allows the computation of a function's result and parameter
847 values to overlap the return and call. We don't care about the
848 the dependence cost when only decreasing register pressure. */
849 if (recog_memoized (used) < 0)
850 {
851 cost = 0;
852 recog_memoized (insn);
853 }
854 else
855 {
856 enum reg_note dep_type = DEP_TYPE (link);
857
858 cost = insn_cost (insn);
859
860 if (INSN_CODE (insn) >= 0)
861 {
862 if (dep_type == REG_DEP_ANTI)
863 cost = 0;
864 else if (dep_type == REG_DEP_OUTPUT)
865 {
866 cost = (insn_default_latency (insn)
867 - insn_default_latency (used));
868 if (cost <= 0)
869 cost = 1;
870 }
871 else if (bypass_p (insn))
872 cost = insn_latency (insn, used);
873 }
874
875
876 if (targetm.sched.adjust_cost_2)
877 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
878 dw);
879 else if (targetm.sched.adjust_cost != NULL)
880 {
881 /* This variable is used for backward compatibility with the
882 targets. */
883 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
884
885 /* Make it self-cycled, so that if some tries to walk over this
886 incomplete list he/she will be caught in an endless loop. */
887 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
888
889 /* Targets use only REG_NOTE_KIND of the link. */
890 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
891
892 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
893 insn, cost);
894
895 free_INSN_LIST_node (dep_cost_rtx_link);
896 }
897
898 if (cost < 0)
899 cost = 0;
900 }
901
902 return cost;
903 }
904
905 /* Compute cost of dependence LINK.
906 This is the number of cycles between instruction issue and
907 instruction results. */
908 int
909 dep_cost (dep_t link)
910 {
911 return dep_cost_1 (link, 0);
912 }
913
914 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
915 INSN_PRIORITY explicitly. */
916 void
917 increase_insn_priority (rtx insn, int amount)
918 {
919 if (!sel_sched_p ())
920 {
921 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
922 if (INSN_PRIORITY_KNOWN (insn))
923 INSN_PRIORITY (insn) += amount;
924 }
925 else
926 {
927 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
928 Use EXPR_PRIORITY instead. */
929 sel_add_to_insn_priority (insn, amount);
930 }
931 }
932
933 /* Return 'true' if DEP should be included in priority calculations. */
934 static bool
935 contributes_to_priority_p (dep_t dep)
936 {
937 if (DEBUG_INSN_P (DEP_CON (dep))
938 || DEBUG_INSN_P (DEP_PRO (dep)))
939 return false;
940
941 /* Critical path is meaningful in block boundaries only. */
942 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
943 DEP_PRO (dep)))
944 return false;
945
946 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
947 then speculative instructions will less likely be
948 scheduled. That is because the priority of
949 their producers will increase, and, thus, the
950 producers will more likely be scheduled, thus,
951 resolving the dependence. */
952 if (sched_deps_info->generate_spec_deps
953 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
954 && (DEP_STATUS (dep) & SPECULATIVE))
955 return false;
956
957 return true;
958 }
959
960 /* Compute the number of nondebug forward deps of an insn. */
961
962 static int
963 dep_list_size (rtx insn)
964 {
965 sd_iterator_def sd_it;
966 dep_t dep;
967 int dbgcount = 0, nodbgcount = 0;
968
969 if (!MAY_HAVE_DEBUG_INSNS)
970 return sd_lists_size (insn, SD_LIST_FORW);
971
972 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
973 {
974 if (DEBUG_INSN_P (DEP_CON (dep)))
975 dbgcount++;
976 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
977 nodbgcount++;
978 }
979
980 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
981
982 return nodbgcount;
983 }
984
985 /* Compute the priority number for INSN. */
986 static int
987 priority (rtx insn)
988 {
989 if (! INSN_P (insn))
990 return 0;
991
992 /* We should not be interested in priority of an already scheduled insn. */
993 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
994
995 if (!INSN_PRIORITY_KNOWN (insn))
996 {
997 int this_priority = -1;
998
999 if (dep_list_size (insn) == 0)
1000 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1001 some forward deps but all of them are ignored by
1002 contributes_to_priority hook. At the moment we set priority of
1003 such insn to 0. */
1004 this_priority = insn_cost (insn);
1005 else
1006 {
1007 rtx prev_first, twin;
1008 basic_block rec;
1009
1010 /* For recovery check instructions we calculate priority slightly
1011 different than that of normal instructions. Instead of walking
1012 through INSN_FORW_DEPS (check) list, we walk through
1013 INSN_FORW_DEPS list of each instruction in the corresponding
1014 recovery block. */
1015
1016 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1017 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1018 if (!rec || rec == EXIT_BLOCK_PTR)
1019 {
1020 prev_first = PREV_INSN (insn);
1021 twin = insn;
1022 }
1023 else
1024 {
1025 prev_first = NEXT_INSN (BB_HEAD (rec));
1026 twin = PREV_INSN (BB_END (rec));
1027 }
1028
1029 do
1030 {
1031 sd_iterator_def sd_it;
1032 dep_t dep;
1033
1034 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1035 {
1036 rtx next;
1037 int next_priority;
1038
1039 next = DEP_CON (dep);
1040
1041 if (BLOCK_FOR_INSN (next) != rec)
1042 {
1043 int cost;
1044
1045 if (!contributes_to_priority_p (dep))
1046 continue;
1047
1048 if (twin == insn)
1049 cost = dep_cost (dep);
1050 else
1051 {
1052 struct _dep _dep1, *dep1 = &_dep1;
1053
1054 init_dep (dep1, insn, next, REG_DEP_ANTI);
1055
1056 cost = dep_cost (dep1);
1057 }
1058
1059 next_priority = cost + priority (next);
1060
1061 if (next_priority > this_priority)
1062 this_priority = next_priority;
1063 }
1064 }
1065
1066 twin = PREV_INSN (twin);
1067 }
1068 while (twin != prev_first);
1069 }
1070
1071 if (this_priority < 0)
1072 {
1073 gcc_assert (this_priority == -1);
1074
1075 this_priority = insn_cost (insn);
1076 }
1077
1078 INSN_PRIORITY (insn) = this_priority;
1079 INSN_PRIORITY_STATUS (insn) = 1;
1080 }
1081
1082 return INSN_PRIORITY (insn);
1083 }
1084 \f
1085 /* Macros and functions for keeping the priority queue sorted, and
1086 dealing with queuing and dequeuing of instructions. */
1087
1088 #define SCHED_SORT(READY, N_READY) \
1089 do { if ((N_READY) == 2) \
1090 swap_sort (READY, N_READY); \
1091 else if ((N_READY) > 2) \
1092 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1093 while (0)
1094
1095 /* Setup info about the current register pressure impact of scheduling
1096 INSN at the current scheduling point. */
1097 static void
1098 setup_insn_reg_pressure_info (rtx insn)
1099 {
1100 int i, change, before, after, hard_regno;
1101 int excess_cost_change;
1102 enum machine_mode mode;
1103 enum reg_class cl;
1104 struct reg_pressure_data *pressure_info;
1105 int *max_reg_pressure;
1106 struct reg_use_data *use;
1107 static int death[N_REG_CLASSES];
1108
1109 gcc_checking_assert (!DEBUG_INSN_P (insn));
1110
1111 excess_cost_change = 0;
1112 for (i = 0; i < ira_reg_class_cover_size; i++)
1113 death[ira_reg_class_cover[i]] = 0;
1114 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1115 if (dying_use_p (use))
1116 {
1117 cl = sched_regno_cover_class[use->regno];
1118 if (use->regno < FIRST_PSEUDO_REGISTER)
1119 death[cl]++;
1120 else
1121 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1122 }
1123 pressure_info = INSN_REG_PRESSURE (insn);
1124 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1125 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1126 for (i = 0; i < ira_reg_class_cover_size; i++)
1127 {
1128 cl = ira_reg_class_cover[i];
1129 gcc_assert (curr_reg_pressure[cl] >= 0);
1130 change = (int) pressure_info[i].set_increase - death[cl];
1131 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1132 after = MAX (0, max_reg_pressure[i] + change
1133 - ira_available_class_regs[cl]);
1134 hard_regno = ira_class_hard_regs[cl][0];
1135 gcc_assert (hard_regno >= 0);
1136 mode = reg_raw_mode[hard_regno];
1137 excess_cost_change += ((after - before)
1138 * (ira_memory_move_cost[mode][cl][0]
1139 + ira_memory_move_cost[mode][cl][1]));
1140 }
1141 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1142 }
1143
1144 /* Returns a positive value if x is preferred; returns a negative value if
1145 y is preferred. Should never return 0, since that will make the sort
1146 unstable. */
1147
1148 static int
1149 rank_for_schedule (const void *x, const void *y)
1150 {
1151 rtx tmp = *(const rtx *) y;
1152 rtx tmp2 = *(const rtx *) x;
1153 rtx last;
1154 int tmp_class, tmp2_class;
1155 int val, priority_val, info_val;
1156
1157 if (MAY_HAVE_DEBUG_INSNS)
1158 {
1159 /* Schedule debug insns as early as possible. */
1160 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1161 return -1;
1162 else if (DEBUG_INSN_P (tmp2))
1163 return 1;
1164 }
1165
1166 /* The insn in a schedule group should be issued the first. */
1167 if (flag_sched_group_heuristic &&
1168 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1169 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1170
1171 /* Make sure that priority of TMP and TMP2 are initialized. */
1172 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1173
1174 if (sched_pressure_p)
1175 {
1176 int diff;
1177
1178 /* Prefer insn whose scheduling results in the smallest register
1179 pressure excess. */
1180 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1181 + (INSN_TICK (tmp) > clock_var
1182 ? INSN_TICK (tmp) - clock_var : 0)
1183 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1184 - (INSN_TICK (tmp2) > clock_var
1185 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1186 return diff;
1187 }
1188
1189
1190 if (sched_pressure_p
1191 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1192 {
1193 if (INSN_TICK (tmp) <= clock_var)
1194 return -1;
1195 else if (INSN_TICK (tmp2) <= clock_var)
1196 return 1;
1197 else
1198 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1199 }
1200 /* Prefer insn with higher priority. */
1201 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1202
1203 if (flag_sched_critical_path_heuristic && priority_val)
1204 return priority_val;
1205
1206 /* Prefer speculative insn with greater dependencies weakness. */
1207 if (flag_sched_spec_insn_heuristic && spec_info)
1208 {
1209 ds_t ds1, ds2;
1210 dw_t dw1, dw2;
1211 int dw;
1212
1213 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1214 if (ds1)
1215 dw1 = ds_weak (ds1);
1216 else
1217 dw1 = NO_DEP_WEAK;
1218
1219 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1220 if (ds2)
1221 dw2 = ds_weak (ds2);
1222 else
1223 dw2 = NO_DEP_WEAK;
1224
1225 dw = dw2 - dw1;
1226 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1227 return dw;
1228 }
1229
1230 info_val = (*current_sched_info->rank) (tmp, tmp2);
1231 if(flag_sched_rank_heuristic && info_val)
1232 return info_val;
1233
1234 if (flag_sched_last_insn_heuristic)
1235 {
1236 last = last_scheduled_insn;
1237
1238 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1239 do
1240 last = PREV_INSN (last);
1241 while (!NONDEBUG_INSN_P (last)
1242 && last != current_sched_info->prev_head);
1243 }
1244
1245 /* Compare insns based on their relation to the last scheduled
1246 non-debug insn. */
1247 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1248 {
1249 dep_t dep1;
1250 dep_t dep2;
1251
1252 /* Classify the instructions into three classes:
1253 1) Data dependent on last schedule insn.
1254 2) Anti/Output dependent on last scheduled insn.
1255 3) Independent of last scheduled insn, or has latency of one.
1256 Choose the insn from the highest numbered class if different. */
1257 dep1 = sd_find_dep_between (last, tmp, true);
1258
1259 if (dep1 == NULL || dep_cost (dep1) == 1)
1260 tmp_class = 3;
1261 else if (/* Data dependence. */
1262 DEP_TYPE (dep1) == REG_DEP_TRUE)
1263 tmp_class = 1;
1264 else
1265 tmp_class = 2;
1266
1267 dep2 = sd_find_dep_between (last, tmp2, true);
1268
1269 if (dep2 == NULL || dep_cost (dep2) == 1)
1270 tmp2_class = 3;
1271 else if (/* Data dependence. */
1272 DEP_TYPE (dep2) == REG_DEP_TRUE)
1273 tmp2_class = 1;
1274 else
1275 tmp2_class = 2;
1276
1277 if ((val = tmp2_class - tmp_class))
1278 return val;
1279 }
1280
1281 /* Prefer the insn which has more later insns that depend on it.
1282 This gives the scheduler more freedom when scheduling later
1283 instructions at the expense of added register pressure. */
1284
1285 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1286
1287 if (flag_sched_dep_count_heuristic && val != 0)
1288 return val;
1289
1290 /* If insns are equally good, sort by INSN_LUID (original insn order),
1291 so that we make the sort stable. This minimizes instruction movement,
1292 thus minimizing sched's effect on debugging and cross-jumping. */
1293 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1294 }
1295
1296 /* Resort the array A in which only element at index N may be out of order. */
1297
1298 HAIFA_INLINE static void
1299 swap_sort (rtx *a, int n)
1300 {
1301 rtx insn = a[n - 1];
1302 int i = n - 2;
1303
1304 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1305 {
1306 a[i + 1] = a[i];
1307 i -= 1;
1308 }
1309 a[i + 1] = insn;
1310 }
1311
1312 /* Add INSN to the insn queue so that it can be executed at least
1313 N_CYCLES after the currently executing insn. Preserve insns
1314 chain for debugging purposes. */
1315
1316 HAIFA_INLINE static void
1317 queue_insn (rtx insn, int n_cycles)
1318 {
1319 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1320 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1321
1322 gcc_assert (n_cycles <= max_insn_queue_index);
1323 gcc_assert (!DEBUG_INSN_P (insn));
1324
1325 insn_queue[next_q] = link;
1326 q_size += 1;
1327
1328 if (sched_verbose >= 2)
1329 {
1330 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1331 (*current_sched_info->print_insn) (insn, 0));
1332
1333 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1334 }
1335
1336 QUEUE_INDEX (insn) = next_q;
1337 }
1338
1339 /* Remove INSN from queue. */
1340 static void
1341 queue_remove (rtx insn)
1342 {
1343 gcc_assert (QUEUE_INDEX (insn) >= 0);
1344 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1345 q_size--;
1346 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1347 }
1348
1349 /* Return a pointer to the bottom of the ready list, i.e. the insn
1350 with the lowest priority. */
1351
1352 rtx *
1353 ready_lastpos (struct ready_list *ready)
1354 {
1355 gcc_assert (ready->n_ready >= 1);
1356 return ready->vec + ready->first - ready->n_ready + 1;
1357 }
1358
1359 /* Add an element INSN to the ready list so that it ends up with the
1360 lowest/highest priority depending on FIRST_P. */
1361
1362 HAIFA_INLINE static void
1363 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1364 {
1365 if (!first_p)
1366 {
1367 if (ready->first == ready->n_ready)
1368 {
1369 memmove (ready->vec + ready->veclen - ready->n_ready,
1370 ready_lastpos (ready),
1371 ready->n_ready * sizeof (rtx));
1372 ready->first = ready->veclen - 1;
1373 }
1374 ready->vec[ready->first - ready->n_ready] = insn;
1375 }
1376 else
1377 {
1378 if (ready->first == ready->veclen - 1)
1379 {
1380 if (ready->n_ready)
1381 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1382 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1383 ready_lastpos (ready),
1384 ready->n_ready * sizeof (rtx));
1385 ready->first = ready->veclen - 2;
1386 }
1387 ready->vec[++(ready->first)] = insn;
1388 }
1389
1390 ready->n_ready++;
1391 if (DEBUG_INSN_P (insn))
1392 ready->n_debug++;
1393
1394 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1395 QUEUE_INDEX (insn) = QUEUE_READY;
1396 }
1397
1398 /* Remove the element with the highest priority from the ready list and
1399 return it. */
1400
1401 HAIFA_INLINE static rtx
1402 ready_remove_first (struct ready_list *ready)
1403 {
1404 rtx t;
1405
1406 gcc_assert (ready->n_ready);
1407 t = ready->vec[ready->first--];
1408 ready->n_ready--;
1409 if (DEBUG_INSN_P (t))
1410 ready->n_debug--;
1411 /* If the queue becomes empty, reset it. */
1412 if (ready->n_ready == 0)
1413 ready->first = ready->veclen - 1;
1414
1415 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1416 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1417
1418 return t;
1419 }
1420
1421 /* The following code implements multi-pass scheduling for the first
1422 cycle. In other words, we will try to choose ready insn which
1423 permits to start maximum number of insns on the same cycle. */
1424
1425 /* Return a pointer to the element INDEX from the ready. INDEX for
1426 insn with the highest priority is 0, and the lowest priority has
1427 N_READY - 1. */
1428
1429 rtx
1430 ready_element (struct ready_list *ready, int index)
1431 {
1432 gcc_assert (ready->n_ready && index < ready->n_ready);
1433
1434 return ready->vec[ready->first - index];
1435 }
1436
1437 /* Remove the element INDEX from the ready list and return it. INDEX
1438 for insn with the highest priority is 0, and the lowest priority
1439 has N_READY - 1. */
1440
1441 HAIFA_INLINE static rtx
1442 ready_remove (struct ready_list *ready, int index)
1443 {
1444 rtx t;
1445 int i;
1446
1447 if (index == 0)
1448 return ready_remove_first (ready);
1449 gcc_assert (ready->n_ready && index < ready->n_ready);
1450 t = ready->vec[ready->first - index];
1451 ready->n_ready--;
1452 if (DEBUG_INSN_P (t))
1453 ready->n_debug--;
1454 for (i = index; i < ready->n_ready; i++)
1455 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1456 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1457 return t;
1458 }
1459
1460 /* Remove INSN from the ready list. */
1461 static void
1462 ready_remove_insn (rtx insn)
1463 {
1464 int i;
1465
1466 for (i = 0; i < readyp->n_ready; i++)
1467 if (ready_element (readyp, i) == insn)
1468 {
1469 ready_remove (readyp, i);
1470 return;
1471 }
1472 gcc_unreachable ();
1473 }
1474
1475 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1476 macro. */
1477
1478 void
1479 ready_sort (struct ready_list *ready)
1480 {
1481 int i;
1482 rtx *first = ready_lastpos (ready);
1483
1484 if (sched_pressure_p)
1485 {
1486 for (i = 0; i < ready->n_ready; i++)
1487 if (!DEBUG_INSN_P (first[i]))
1488 setup_insn_reg_pressure_info (first[i]);
1489 }
1490 SCHED_SORT (first, ready->n_ready);
1491 }
1492
1493 /* PREV is an insn that is ready to execute. Adjust its priority if that
1494 will help shorten or lengthen register lifetimes as appropriate. Also
1495 provide a hook for the target to tweak itself. */
1496
1497 HAIFA_INLINE static void
1498 adjust_priority (rtx prev)
1499 {
1500 /* ??? There used to be code here to try and estimate how an insn
1501 affected register lifetimes, but it did it by looking at REG_DEAD
1502 notes, which we removed in schedule_region. Nor did it try to
1503 take into account register pressure or anything useful like that.
1504
1505 Revisit when we have a machine model to work with and not before. */
1506
1507 if (targetm.sched.adjust_priority)
1508 INSN_PRIORITY (prev) =
1509 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1510 }
1511
1512 /* Advance DFA state STATE on one cycle. */
1513 void
1514 advance_state (state_t state)
1515 {
1516 if (targetm.sched.dfa_pre_advance_cycle)
1517 targetm.sched.dfa_pre_advance_cycle ();
1518
1519 if (targetm.sched.dfa_pre_cycle_insn)
1520 state_transition (state,
1521 targetm.sched.dfa_pre_cycle_insn ());
1522
1523 state_transition (state, NULL);
1524
1525 if (targetm.sched.dfa_post_cycle_insn)
1526 state_transition (state,
1527 targetm.sched.dfa_post_cycle_insn ());
1528
1529 if (targetm.sched.dfa_post_advance_cycle)
1530 targetm.sched.dfa_post_advance_cycle ();
1531 }
1532
1533 /* Advance time on one cycle. */
1534 HAIFA_INLINE static void
1535 advance_one_cycle (void)
1536 {
1537 advance_state (curr_state);
1538 if (sched_verbose >= 6)
1539 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1540 }
1541
1542 /* Clock at which the previous instruction was issued. */
1543 static int last_clock_var;
1544
1545 /* Update register pressure after scheduling INSN. */
1546 static void
1547 update_register_pressure (rtx insn)
1548 {
1549 struct reg_use_data *use;
1550 struct reg_set_data *set;
1551
1552 gcc_checking_assert (!DEBUG_INSN_P (insn));
1553
1554 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1555 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1556 mark_regno_birth_or_death (use->regno, false);
1557 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1558 mark_regno_birth_or_death (set->regno, true);
1559 }
1560
1561 /* Set up or update (if UPDATE_P) max register pressure (see its
1562 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1563 after insn AFTER. */
1564 static void
1565 setup_insn_max_reg_pressure (rtx after, bool update_p)
1566 {
1567 int i, p;
1568 bool eq_p;
1569 rtx insn;
1570 static int max_reg_pressure[N_REG_CLASSES];
1571
1572 save_reg_pressure ();
1573 for (i = 0; i < ira_reg_class_cover_size; i++)
1574 max_reg_pressure[ira_reg_class_cover[i]]
1575 = curr_reg_pressure[ira_reg_class_cover[i]];
1576 for (insn = NEXT_INSN (after);
1577 insn != NULL_RTX && ! BARRIER_P (insn)
1578 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1579 insn = NEXT_INSN (insn))
1580 if (NONDEBUG_INSN_P (insn))
1581 {
1582 eq_p = true;
1583 for (i = 0; i < ira_reg_class_cover_size; i++)
1584 {
1585 p = max_reg_pressure[ira_reg_class_cover[i]];
1586 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1587 {
1588 eq_p = false;
1589 INSN_MAX_REG_PRESSURE (insn)[i]
1590 = max_reg_pressure[ira_reg_class_cover[i]];
1591 }
1592 }
1593 if (update_p && eq_p)
1594 break;
1595 update_register_pressure (insn);
1596 for (i = 0; i < ira_reg_class_cover_size; i++)
1597 if (max_reg_pressure[ira_reg_class_cover[i]]
1598 < curr_reg_pressure[ira_reg_class_cover[i]])
1599 max_reg_pressure[ira_reg_class_cover[i]]
1600 = curr_reg_pressure[ira_reg_class_cover[i]];
1601 }
1602 restore_reg_pressure ();
1603 }
1604
1605 /* Update the current register pressure after scheduling INSN. Update
1606 also max register pressure for unscheduled insns of the current
1607 BB. */
1608 static void
1609 update_reg_and_insn_max_reg_pressure (rtx insn)
1610 {
1611 int i;
1612 int before[N_REG_CLASSES];
1613
1614 for (i = 0; i < ira_reg_class_cover_size; i++)
1615 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1616 update_register_pressure (insn);
1617 for (i = 0; i < ira_reg_class_cover_size; i++)
1618 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1619 break;
1620 if (i < ira_reg_class_cover_size)
1621 setup_insn_max_reg_pressure (insn, true);
1622 }
1623
1624 /* Set up register pressure at the beginning of basic block BB whose
1625 insns starting after insn AFTER. Set up also max register pressure
1626 for all insns of the basic block. */
1627 void
1628 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1629 {
1630 gcc_assert (sched_pressure_p);
1631 initiate_bb_reg_pressure_info (bb);
1632 setup_insn_max_reg_pressure (after, false);
1633 }
1634
1635 /* INSN is the "currently executing insn". Launch each insn which was
1636 waiting on INSN. READY is the ready list which contains the insns
1637 that are ready to fire. CLOCK is the current cycle. The function
1638 returns necessary cycle advance after issuing the insn (it is not
1639 zero for insns in a schedule group). */
1640
1641 static int
1642 schedule_insn (rtx insn)
1643 {
1644 sd_iterator_def sd_it;
1645 dep_t dep;
1646 int i;
1647 int advance = 0;
1648
1649 if (sched_verbose >= 1)
1650 {
1651 struct reg_pressure_data *pressure_info;
1652 char buf[2048];
1653
1654 print_insn (buf, insn, 0);
1655 buf[40] = 0;
1656 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1657
1658 if (recog_memoized (insn) < 0)
1659 fprintf (sched_dump, "nothing");
1660 else
1661 print_reservation (sched_dump, insn);
1662 pressure_info = INSN_REG_PRESSURE (insn);
1663 if (pressure_info != NULL)
1664 {
1665 fputc (':', sched_dump);
1666 for (i = 0; i < ira_reg_class_cover_size; i++)
1667 fprintf (sched_dump, "%s%+d(%d)",
1668 reg_class_names[ira_reg_class_cover[i]],
1669 pressure_info[i].set_increase, pressure_info[i].change);
1670 }
1671 fputc ('\n', sched_dump);
1672 }
1673
1674 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1675 update_reg_and_insn_max_reg_pressure (insn);
1676
1677 /* Scheduling instruction should have all its dependencies resolved and
1678 should have been removed from the ready list. */
1679 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1680
1681 /* Reset debug insns invalidated by moving this insn. */
1682 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1683 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1684 sd_iterator_cond (&sd_it, &dep);)
1685 {
1686 rtx dbg = DEP_PRO (dep);
1687 struct reg_use_data *use, *next;
1688
1689 gcc_assert (DEBUG_INSN_P (dbg));
1690
1691 if (sched_verbose >= 6)
1692 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1693 INSN_UID (dbg));
1694
1695 /* ??? Rather than resetting the debug insn, we might be able
1696 to emit a debug temp before the just-scheduled insn, but
1697 this would involve checking that the expression at the
1698 point of the debug insn is equivalent to the expression
1699 before the just-scheduled insn. They might not be: the
1700 expression in the debug insn may depend on other insns not
1701 yet scheduled that set MEMs, REGs or even other debug
1702 insns. It's not clear that attempting to preserve debug
1703 information in these cases is worth the effort, given how
1704 uncommon these resets are and the likelihood that the debug
1705 temps introduced won't survive the schedule change. */
1706 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1707 df_insn_rescan (dbg);
1708
1709 /* Unknown location doesn't use any registers. */
1710 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1711 {
1712 struct reg_use_data *prev = use;
1713
1714 /* Remove use from the cyclic next_regno_use chain first. */
1715 while (prev->next_regno_use != use)
1716 prev = prev->next_regno_use;
1717 prev->next_regno_use = use->next_regno_use;
1718 next = use->next_insn_use;
1719 free (use);
1720 }
1721 INSN_REG_USE_LIST (dbg) = NULL;
1722
1723 /* We delete rather than resolve these deps, otherwise we
1724 crash in sched_free_deps(), because forward deps are
1725 expected to be released before backward deps. */
1726 sd_delete_dep (sd_it);
1727 }
1728
1729 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1730 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1731
1732 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1733 if (INSN_TICK (insn) > clock_var)
1734 /* INSN has been prematurely moved from the queue to the ready list.
1735 This is possible only if following flag is set. */
1736 gcc_assert (flag_sched_stalled_insns);
1737
1738 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1739 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1740 INSN_TICK (insn) = clock_var;
1741
1742 /* Update dependent instructions. */
1743 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1744 sd_iterator_cond (&sd_it, &dep);)
1745 {
1746 rtx next = DEP_CON (dep);
1747
1748 /* Resolve the dependence between INSN and NEXT.
1749 sd_resolve_dep () moves current dep to another list thus
1750 advancing the iterator. */
1751 sd_resolve_dep (sd_it);
1752
1753 /* Don't bother trying to mark next as ready if insn is a debug
1754 insn. If insn is the last hard dependency, it will have
1755 already been discounted. */
1756 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1757 continue;
1758
1759 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1760 {
1761 int effective_cost;
1762
1763 effective_cost = try_ready (next);
1764
1765 if (effective_cost >= 0
1766 && SCHED_GROUP_P (next)
1767 && advance < effective_cost)
1768 advance = effective_cost;
1769 }
1770 else
1771 /* Check always has only one forward dependence (to the first insn in
1772 the recovery block), therefore, this will be executed only once. */
1773 {
1774 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1775 fix_recovery_deps (RECOVERY_BLOCK (insn));
1776 }
1777 }
1778
1779 /* This is the place where scheduler doesn't *basically* need backward and
1780 forward dependencies for INSN anymore. Nevertheless they are used in
1781 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1782 some targets (e.g. rs6000). Thus the earliest place where we *can*
1783 remove dependencies is after targetm.sched.finish () call in
1784 schedule_block (). But, on the other side, the safest place to remove
1785 dependencies is when we are finishing scheduling entire region. As we
1786 don't generate [many] dependencies during scheduling itself, we won't
1787 need memory until beginning of next region.
1788 Bottom line: Dependencies are removed for all insns in the end of
1789 scheduling the region. */
1790
1791 /* Annotate the instruction with issue information -- TImode
1792 indicates that the instruction is expected not to be able
1793 to issue on the same cycle as the previous insn. A machine
1794 may use this information to decide how the instruction should
1795 be aligned. */
1796 if (issue_rate > 1
1797 && GET_CODE (PATTERN (insn)) != USE
1798 && GET_CODE (PATTERN (insn)) != CLOBBER
1799 && !DEBUG_INSN_P (insn))
1800 {
1801 if (reload_completed)
1802 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1803 last_clock_var = clock_var;
1804 }
1805
1806 return advance;
1807 }
1808
1809 /* Functions for handling of notes. */
1810
1811 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1812 void
1813 concat_note_lists (rtx from_end, rtx *to_endp)
1814 {
1815 rtx from_start;
1816
1817 /* It's easy when have nothing to concat. */
1818 if (from_end == NULL)
1819 return;
1820
1821 /* It's also easy when destination is empty. */
1822 if (*to_endp == NULL)
1823 {
1824 *to_endp = from_end;
1825 return;
1826 }
1827
1828 from_start = from_end;
1829 while (PREV_INSN (from_start) != NULL)
1830 from_start = PREV_INSN (from_start);
1831
1832 PREV_INSN (from_start) = *to_endp;
1833 NEXT_INSN (*to_endp) = from_start;
1834 *to_endp = from_end;
1835 }
1836
1837 /* Delete notes between HEAD and TAIL and put them in the chain
1838 of notes ended by NOTE_LIST. */
1839 void
1840 remove_notes (rtx head, rtx tail)
1841 {
1842 rtx next_tail, insn, next;
1843
1844 note_list = 0;
1845 if (head == tail && !INSN_P (head))
1846 return;
1847
1848 next_tail = NEXT_INSN (tail);
1849 for (insn = head; insn != next_tail; insn = next)
1850 {
1851 next = NEXT_INSN (insn);
1852 if (!NOTE_P (insn))
1853 continue;
1854
1855 switch (NOTE_KIND (insn))
1856 {
1857 case NOTE_INSN_BASIC_BLOCK:
1858 continue;
1859
1860 case NOTE_INSN_EPILOGUE_BEG:
1861 if (insn != tail)
1862 {
1863 remove_insn (insn);
1864 add_reg_note (next, REG_SAVE_NOTE,
1865 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1866 break;
1867 }
1868 /* FALLTHRU */
1869
1870 default:
1871 remove_insn (insn);
1872
1873 /* Add the note to list that ends at NOTE_LIST. */
1874 PREV_INSN (insn) = note_list;
1875 NEXT_INSN (insn) = NULL_RTX;
1876 if (note_list)
1877 NEXT_INSN (note_list) = insn;
1878 note_list = insn;
1879 break;
1880 }
1881
1882 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1883 }
1884 }
1885
1886
1887 /* Return the head and tail pointers of ebb starting at BEG and ending
1888 at END. */
1889 void
1890 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1891 {
1892 rtx beg_head = BB_HEAD (beg);
1893 rtx beg_tail = BB_END (beg);
1894 rtx end_head = BB_HEAD (end);
1895 rtx end_tail = BB_END (end);
1896
1897 /* Don't include any notes or labels at the beginning of the BEG
1898 basic block, or notes at the end of the END basic blocks. */
1899
1900 if (LABEL_P (beg_head))
1901 beg_head = NEXT_INSN (beg_head);
1902
1903 while (beg_head != beg_tail)
1904 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1905 beg_head = NEXT_INSN (beg_head);
1906 else
1907 break;
1908
1909 *headp = beg_head;
1910
1911 if (beg == end)
1912 end_head = beg_head;
1913 else if (LABEL_P (end_head))
1914 end_head = NEXT_INSN (end_head);
1915
1916 while (end_head != end_tail)
1917 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1918 end_tail = PREV_INSN (end_tail);
1919 else
1920 break;
1921
1922 *tailp = end_tail;
1923 }
1924
1925 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1926
1927 int
1928 no_real_insns_p (const_rtx head, const_rtx tail)
1929 {
1930 while (head != NEXT_INSN (tail))
1931 {
1932 if (!NOTE_P (head) && !LABEL_P (head)
1933 && !BOUNDARY_DEBUG_INSN_P (head))
1934 return 0;
1935 head = NEXT_INSN (head);
1936 }
1937 return 1;
1938 }
1939
1940 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1941 previously found among the insns. Insert them just before HEAD. */
1942 rtx
1943 restore_other_notes (rtx head, basic_block head_bb)
1944 {
1945 if (note_list != 0)
1946 {
1947 rtx note_head = note_list;
1948
1949 if (head)
1950 head_bb = BLOCK_FOR_INSN (head);
1951 else
1952 head = NEXT_INSN (bb_note (head_bb));
1953
1954 while (PREV_INSN (note_head))
1955 {
1956 set_block_for_insn (note_head, head_bb);
1957 note_head = PREV_INSN (note_head);
1958 }
1959 /* In the above cycle we've missed this note. */
1960 set_block_for_insn (note_head, head_bb);
1961
1962 PREV_INSN (note_head) = PREV_INSN (head);
1963 NEXT_INSN (PREV_INSN (head)) = note_head;
1964 PREV_INSN (head) = note_list;
1965 NEXT_INSN (note_list) = head;
1966
1967 if (BLOCK_FOR_INSN (head) != head_bb)
1968 BB_END (head_bb) = note_list;
1969
1970 head = note_head;
1971 }
1972
1973 return head;
1974 }
1975
1976 /* Move insns that became ready to fire from queue to ready list. */
1977
1978 static void
1979 queue_to_ready (struct ready_list *ready)
1980 {
1981 rtx insn;
1982 rtx link;
1983 rtx skip_insn;
1984
1985 q_ptr = NEXT_Q (q_ptr);
1986
1987 if (dbg_cnt (sched_insn) == false)
1988 /* If debug counter is activated do not requeue insn next after
1989 last_scheduled_insn. */
1990 skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
1991 else
1992 skip_insn = NULL_RTX;
1993
1994 /* Add all pending insns that can be scheduled without stalls to the
1995 ready list. */
1996 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1997 {
1998 insn = XEXP (link, 0);
1999 q_size -= 1;
2000
2001 if (sched_verbose >= 2)
2002 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2003 (*current_sched_info->print_insn) (insn, 0));
2004
2005 /* If the ready list is full, delay the insn for 1 cycle.
2006 See the comment in schedule_block for the rationale. */
2007 if (!reload_completed
2008 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2009 && !SCHED_GROUP_P (insn)
2010 && insn != skip_insn)
2011 {
2012 if (sched_verbose >= 2)
2013 fprintf (sched_dump, "requeued because ready full\n");
2014 queue_insn (insn, 1);
2015 }
2016 else
2017 {
2018 ready_add (ready, insn, false);
2019 if (sched_verbose >= 2)
2020 fprintf (sched_dump, "moving to ready without stalls\n");
2021 }
2022 }
2023 free_INSN_LIST_list (&insn_queue[q_ptr]);
2024
2025 /* If there are no ready insns, stall until one is ready and add all
2026 of the pending insns at that point to the ready list. */
2027 if (ready->n_ready == 0)
2028 {
2029 int stalls;
2030
2031 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2032 {
2033 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2034 {
2035 for (; link; link = XEXP (link, 1))
2036 {
2037 insn = XEXP (link, 0);
2038 q_size -= 1;
2039
2040 if (sched_verbose >= 2)
2041 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2042 (*current_sched_info->print_insn) (insn, 0));
2043
2044 ready_add (ready, insn, false);
2045 if (sched_verbose >= 2)
2046 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2047 }
2048 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2049
2050 advance_one_cycle ();
2051
2052 break;
2053 }
2054
2055 advance_one_cycle ();
2056 }
2057
2058 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2059 clock_var += stalls;
2060 }
2061 }
2062
2063 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2064 prematurely move INSN from the queue to the ready list. Currently,
2065 if a target defines the hook 'is_costly_dependence', this function
2066 uses the hook to check whether there exist any dependences which are
2067 considered costly by the target, between INSN and other insns that
2068 have already been scheduled. Dependences are checked up to Y cycles
2069 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2070 controlling this value.
2071 (Other considerations could be taken into account instead (or in
2072 addition) depending on user flags and target hooks. */
2073
2074 static bool
2075 ok_for_early_queue_removal (rtx insn)
2076 {
2077 int n_cycles;
2078 rtx prev_insn = last_scheduled_insn;
2079
2080 if (targetm.sched.is_costly_dependence)
2081 {
2082 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2083 {
2084 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2085 {
2086 int cost;
2087
2088 if (prev_insn == current_sched_info->prev_head)
2089 {
2090 prev_insn = NULL;
2091 break;
2092 }
2093
2094 if (!NOTE_P (prev_insn))
2095 {
2096 dep_t dep;
2097
2098 dep = sd_find_dep_between (prev_insn, insn, true);
2099
2100 if (dep != NULL)
2101 {
2102 cost = dep_cost (dep);
2103
2104 if (targetm.sched.is_costly_dependence (dep, cost,
2105 flag_sched_stalled_insns_dep - n_cycles))
2106 return false;
2107 }
2108 }
2109
2110 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2111 break;
2112 }
2113
2114 if (!prev_insn)
2115 break;
2116 prev_insn = PREV_INSN (prev_insn);
2117 }
2118 }
2119
2120 return true;
2121 }
2122
2123
2124 /* Remove insns from the queue, before they become "ready" with respect
2125 to FU latency considerations. */
2126
2127 static int
2128 early_queue_to_ready (state_t state, struct ready_list *ready)
2129 {
2130 rtx insn;
2131 rtx link;
2132 rtx next_link;
2133 rtx prev_link;
2134 bool move_to_ready;
2135 int cost;
2136 state_t temp_state = alloca (dfa_state_size);
2137 int stalls;
2138 int insns_removed = 0;
2139
2140 /*
2141 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2142 function:
2143
2144 X == 0: There is no limit on how many queued insns can be removed
2145 prematurely. (flag_sched_stalled_insns = -1).
2146
2147 X >= 1: Only X queued insns can be removed prematurely in each
2148 invocation. (flag_sched_stalled_insns = X).
2149
2150 Otherwise: Early queue removal is disabled.
2151 (flag_sched_stalled_insns = 0)
2152 */
2153
2154 if (! flag_sched_stalled_insns)
2155 return 0;
2156
2157 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2158 {
2159 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2160 {
2161 if (sched_verbose > 6)
2162 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2163
2164 prev_link = 0;
2165 while (link)
2166 {
2167 next_link = XEXP (link, 1);
2168 insn = XEXP (link, 0);
2169 if (insn && sched_verbose > 6)
2170 print_rtl_single (sched_dump, insn);
2171
2172 memcpy (temp_state, state, dfa_state_size);
2173 if (recog_memoized (insn) < 0)
2174 /* non-negative to indicate that it's not ready
2175 to avoid infinite Q->R->Q->R... */
2176 cost = 0;
2177 else
2178 cost = state_transition (temp_state, insn);
2179
2180 if (sched_verbose >= 6)
2181 fprintf (sched_dump, "transition cost = %d\n", cost);
2182
2183 move_to_ready = false;
2184 if (cost < 0)
2185 {
2186 move_to_ready = ok_for_early_queue_removal (insn);
2187 if (move_to_ready == true)
2188 {
2189 /* move from Q to R */
2190 q_size -= 1;
2191 ready_add (ready, insn, false);
2192
2193 if (prev_link)
2194 XEXP (prev_link, 1) = next_link;
2195 else
2196 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2197
2198 free_INSN_LIST_node (link);
2199
2200 if (sched_verbose >= 2)
2201 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2202 (*current_sched_info->print_insn) (insn, 0));
2203
2204 insns_removed++;
2205 if (insns_removed == flag_sched_stalled_insns)
2206 /* Remove no more than flag_sched_stalled_insns insns
2207 from Q at a time. */
2208 return insns_removed;
2209 }
2210 }
2211
2212 if (move_to_ready == false)
2213 prev_link = link;
2214
2215 link = next_link;
2216 } /* while link */
2217 } /* if link */
2218
2219 } /* for stalls.. */
2220
2221 return insns_removed;
2222 }
2223
2224
2225 /* Print the ready list for debugging purposes. Callable from debugger. */
2226
2227 static void
2228 debug_ready_list (struct ready_list *ready)
2229 {
2230 rtx *p;
2231 int i;
2232
2233 if (ready->n_ready == 0)
2234 {
2235 fprintf (sched_dump, "\n");
2236 return;
2237 }
2238
2239 p = ready_lastpos (ready);
2240 for (i = 0; i < ready->n_ready; i++)
2241 {
2242 fprintf (sched_dump, " %s:%d",
2243 (*current_sched_info->print_insn) (p[i], 0),
2244 INSN_LUID (p[i]));
2245 if (sched_pressure_p)
2246 fprintf (sched_dump, "(cost=%d",
2247 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2248 if (INSN_TICK (p[i]) > clock_var)
2249 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2250 if (sched_pressure_p)
2251 fprintf (sched_dump, ")");
2252 }
2253 fprintf (sched_dump, "\n");
2254 }
2255
2256 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2257 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2258 replaces the epilogue note in the correct basic block. */
2259 void
2260 reemit_notes (rtx insn)
2261 {
2262 rtx note, last = insn;
2263
2264 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2265 {
2266 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2267 {
2268 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2269
2270 last = emit_note_before (note_type, last);
2271 remove_note (insn, note);
2272 }
2273 }
2274 }
2275
2276 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2277 static void
2278 move_insn (rtx insn, rtx last, rtx nt)
2279 {
2280 if (PREV_INSN (insn) != last)
2281 {
2282 basic_block bb;
2283 rtx note;
2284 int jump_p = 0;
2285
2286 bb = BLOCK_FOR_INSN (insn);
2287
2288 /* BB_HEAD is either LABEL or NOTE. */
2289 gcc_assert (BB_HEAD (bb) != insn);
2290
2291 if (BB_END (bb) == insn)
2292 /* If this is last instruction in BB, move end marker one
2293 instruction up. */
2294 {
2295 /* Jumps are always placed at the end of basic block. */
2296 jump_p = control_flow_insn_p (insn);
2297
2298 gcc_assert (!jump_p
2299 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2300 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2301 || (common_sched_info->sched_pass_id
2302 == SCHED_EBB_PASS));
2303
2304 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2305
2306 BB_END (bb) = PREV_INSN (insn);
2307 }
2308
2309 gcc_assert (BB_END (bb) != last);
2310
2311 if (jump_p)
2312 /* We move the block note along with jump. */
2313 {
2314 gcc_assert (nt);
2315
2316 note = NEXT_INSN (insn);
2317 while (NOTE_NOT_BB_P (note) && note != nt)
2318 note = NEXT_INSN (note);
2319
2320 if (note != nt
2321 && (LABEL_P (note)
2322 || BARRIER_P (note)))
2323 note = NEXT_INSN (note);
2324
2325 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2326 }
2327 else
2328 note = insn;
2329
2330 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2331 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2332
2333 NEXT_INSN (note) = NEXT_INSN (last);
2334 PREV_INSN (NEXT_INSN (last)) = note;
2335
2336 NEXT_INSN (last) = insn;
2337 PREV_INSN (insn) = last;
2338
2339 bb = BLOCK_FOR_INSN (last);
2340
2341 if (jump_p)
2342 {
2343 fix_jump_move (insn);
2344
2345 if (BLOCK_FOR_INSN (insn) != bb)
2346 move_block_after_check (insn);
2347
2348 gcc_assert (BB_END (bb) == last);
2349 }
2350
2351 df_insn_change_bb (insn, bb);
2352
2353 /* Update BB_END, if needed. */
2354 if (BB_END (bb) == last)
2355 BB_END (bb) = insn;
2356 }
2357
2358 SCHED_GROUP_P (insn) = 0;
2359 }
2360
2361 /* Return true if scheduling INSN will finish current clock cycle. */
2362 static bool
2363 insn_finishes_cycle_p (rtx insn)
2364 {
2365 if (SCHED_GROUP_P (insn))
2366 /* After issuing INSN, rest of the sched_group will be forced to issue
2367 in order. Don't make any plans for the rest of cycle. */
2368 return true;
2369
2370 /* Finishing the block will, apparently, finish the cycle. */
2371 if (current_sched_info->insn_finishes_block_p
2372 && current_sched_info->insn_finishes_block_p (insn))
2373 return true;
2374
2375 return false;
2376 }
2377
2378 /* Define type for target data used in multipass scheduling. */
2379 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
2380 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
2381 #endif
2382 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
2383
2384 /* The following structure describe an entry of the stack of choices. */
2385 struct choice_entry
2386 {
2387 /* Ordinal number of the issued insn in the ready queue. */
2388 int index;
2389 /* The number of the rest insns whose issues we should try. */
2390 int rest;
2391 /* The number of issued essential insns. */
2392 int n;
2393 /* State after issuing the insn. */
2394 state_t state;
2395 /* Target-specific data. */
2396 first_cycle_multipass_data_t target_data;
2397 };
2398
2399 /* The following array is used to implement a stack of choices used in
2400 function max_issue. */
2401 static struct choice_entry *choice_stack;
2402
2403 /* The following variable value is number of essential insns issued on
2404 the current cycle. An insn is essential one if it changes the
2405 processors state. */
2406 int cycle_issued_insns;
2407
2408 /* This holds the value of the target dfa_lookahead hook. */
2409 int dfa_lookahead;
2410
2411 /* The following variable value is maximal number of tries of issuing
2412 insns for the first cycle multipass insn scheduling. We define
2413 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2414 need this constraint if all real insns (with non-negative codes)
2415 had reservations because in this case the algorithm complexity is
2416 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2417 might be incomplete and such insn might occur. For such
2418 descriptions, the complexity of algorithm (without the constraint)
2419 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2420 static int max_lookahead_tries;
2421
2422 /* The following value is value of hook
2423 `first_cycle_multipass_dfa_lookahead' at the last call of
2424 `max_issue'. */
2425 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2426
2427 /* The following value is value of `issue_rate' at the last call of
2428 `sched_init'. */
2429 static int cached_issue_rate = 0;
2430
2431 /* The following function returns maximal (or close to maximal) number
2432 of insns which can be issued on the same cycle and one of which
2433 insns is insns with the best rank (the first insn in READY). To
2434 make this function tries different samples of ready insns. READY
2435 is current queue `ready'. Global array READY_TRY reflects what
2436 insns are already issued in this try. The function stops immediately,
2437 if it reached the such a solution, that all instruction can be issued.
2438 INDEX will contain index of the best insn in READY. The following
2439 function is used only for first cycle multipass scheduling.
2440
2441 PRIVILEGED_N >= 0
2442
2443 This function expects recognized insns only. All USEs,
2444 CLOBBERs, etc must be filtered elsewhere. */
2445 int
2446 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2447 bool first_cycle_insn_p, int *index)
2448 {
2449 int n, i, all, n_ready, best, delay, tries_num;
2450 int more_issue;
2451 struct choice_entry *top;
2452 rtx insn;
2453
2454 n_ready = ready->n_ready;
2455 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2456 && privileged_n <= n_ready);
2457
2458 /* Init MAX_LOOKAHEAD_TRIES. */
2459 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2460 {
2461 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2462 max_lookahead_tries = 100;
2463 for (i = 0; i < issue_rate; i++)
2464 max_lookahead_tries *= dfa_lookahead;
2465 }
2466
2467 /* Init max_points. */
2468 more_issue = issue_rate - cycle_issued_insns;
2469 gcc_assert (more_issue >= 0);
2470
2471 /* The number of the issued insns in the best solution. */
2472 best = 0;
2473
2474 top = choice_stack;
2475
2476 /* Set initial state of the search. */
2477 memcpy (top->state, state, dfa_state_size);
2478 top->rest = dfa_lookahead;
2479 top->n = 0;
2480 if (targetm.sched.first_cycle_multipass_begin)
2481 targetm.sched.first_cycle_multipass_begin (&top->target_data,
2482 ready_try, n_ready,
2483 first_cycle_insn_p);
2484
2485 /* Count the number of the insns to search among. */
2486 for (all = i = 0; i < n_ready; i++)
2487 if (!ready_try [i])
2488 all++;
2489
2490 /* I is the index of the insn to try next. */
2491 i = 0;
2492 tries_num = 0;
2493 for (;;)
2494 {
2495 if (/* If we've reached a dead end or searched enough of what we have
2496 been asked... */
2497 top->rest == 0
2498 /* or have nothing else to try... */
2499 || i >= n_ready
2500 /* or should not issue more. */
2501 || top->n >= more_issue)
2502 {
2503 /* ??? (... || i == n_ready). */
2504 gcc_assert (i <= n_ready);
2505
2506 /* We should not issue more than issue_rate instructions. */
2507 gcc_assert (top->n <= more_issue);
2508
2509 if (top == choice_stack)
2510 break;
2511
2512 if (best < top - choice_stack)
2513 {
2514 if (privileged_n)
2515 {
2516 n = privileged_n;
2517 /* Try to find issued privileged insn. */
2518 while (n && !ready_try[--n]);
2519 }
2520
2521 if (/* If all insns are equally good... */
2522 privileged_n == 0
2523 /* Or a privileged insn will be issued. */
2524 || ready_try[n])
2525 /* Then we have a solution. */
2526 {
2527 best = top - choice_stack;
2528 /* This is the index of the insn issued first in this
2529 solution. */
2530 *index = choice_stack [1].index;
2531 if (top->n == more_issue || best == all)
2532 break;
2533 }
2534 }
2535
2536 /* Set ready-list index to point to the last insn
2537 ('i++' below will advance it to the next insn). */
2538 i = top->index;
2539
2540 /* Backtrack. */
2541 ready_try [i] = 0;
2542
2543 if (targetm.sched.first_cycle_multipass_backtrack)
2544 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
2545 ready_try, n_ready);
2546
2547 top--;
2548 memcpy (state, top->state, dfa_state_size);
2549 }
2550 else if (!ready_try [i])
2551 {
2552 tries_num++;
2553 if (tries_num > max_lookahead_tries)
2554 break;
2555 insn = ready_element (ready, i);
2556 delay = state_transition (state, insn);
2557 if (delay < 0)
2558 {
2559 if (state_dead_lock_p (state)
2560 || insn_finishes_cycle_p (insn))
2561 /* We won't issue any more instructions in the next
2562 choice_state. */
2563 top->rest = 0;
2564 else
2565 top->rest--;
2566
2567 n = top->n;
2568 if (memcmp (top->state, state, dfa_state_size) != 0)
2569 n++;
2570
2571 /* Advance to the next choice_entry. */
2572 top++;
2573 /* Initialize it. */
2574 top->rest = dfa_lookahead;
2575 top->index = i;
2576 top->n = n;
2577 memcpy (top->state, state, dfa_state_size);
2578 ready_try [i] = 1;
2579
2580 if (targetm.sched.first_cycle_multipass_issue)
2581 targetm.sched.first_cycle_multipass_issue (&top->target_data,
2582 ready_try, n_ready,
2583 insn,
2584 &((top - 1)
2585 ->target_data));
2586
2587 i = -1;
2588 }
2589 }
2590
2591 /* Increase ready-list index. */
2592 i++;
2593 }
2594
2595 if (targetm.sched.first_cycle_multipass_end)
2596 targetm.sched.first_cycle_multipass_end (best != 0
2597 ? &choice_stack[1].target_data
2598 : NULL);
2599
2600 /* Restore the original state of the DFA. */
2601 memcpy (state, choice_stack->state, dfa_state_size);
2602
2603 return best;
2604 }
2605
2606 /* The following function chooses insn from READY and modifies
2607 READY. The following function is used only for first
2608 cycle multipass scheduling.
2609 Return:
2610 -1 if cycle should be advanced,
2611 0 if INSN_PTR is set to point to the desirable insn,
2612 1 if choose_ready () should be restarted without advancing the cycle. */
2613 static int
2614 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
2615 rtx *insn_ptr)
2616 {
2617 int lookahead;
2618
2619 if (dbg_cnt (sched_insn) == false)
2620 {
2621 rtx insn;
2622
2623 insn = next_nonnote_insn (last_scheduled_insn);
2624
2625 if (QUEUE_INDEX (insn) == QUEUE_READY)
2626 /* INSN is in the ready_list. */
2627 {
2628 ready_remove_insn (insn);
2629 *insn_ptr = insn;
2630 return 0;
2631 }
2632
2633 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2634 return -1;
2635 }
2636
2637 lookahead = 0;
2638
2639 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2640 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2641 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2642 || DEBUG_INSN_P (ready_element (ready, 0)))
2643 {
2644 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2645 *insn_ptr = ready_remove_first_dispatch (ready);
2646 else
2647 *insn_ptr = ready_remove_first (ready);
2648
2649 return 0;
2650 }
2651 else
2652 {
2653 /* Try to choose the better insn. */
2654 int index = 0, i, n;
2655 rtx insn;
2656 int try_data = 1, try_control = 1;
2657 ds_t ts;
2658
2659 insn = ready_element (ready, 0);
2660 if (INSN_CODE (insn) < 0)
2661 {
2662 *insn_ptr = ready_remove_first (ready);
2663 return 0;
2664 }
2665
2666 if (spec_info
2667 && spec_info->flags & (PREFER_NON_DATA_SPEC
2668 | PREFER_NON_CONTROL_SPEC))
2669 {
2670 for (i = 0, n = ready->n_ready; i < n; i++)
2671 {
2672 rtx x;
2673 ds_t s;
2674
2675 x = ready_element (ready, i);
2676 s = TODO_SPEC (x);
2677
2678 if (spec_info->flags & PREFER_NON_DATA_SPEC
2679 && !(s & DATA_SPEC))
2680 {
2681 try_data = 0;
2682 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2683 || !try_control)
2684 break;
2685 }
2686
2687 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2688 && !(s & CONTROL_SPEC))
2689 {
2690 try_control = 0;
2691 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2692 break;
2693 }
2694 }
2695 }
2696
2697 ts = TODO_SPEC (insn);
2698 if ((ts & SPECULATIVE)
2699 && (((!try_data && (ts & DATA_SPEC))
2700 || (!try_control && (ts & CONTROL_SPEC)))
2701 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2702 && !targetm.sched
2703 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2704 /* Discard speculative instruction that stands first in the ready
2705 list. */
2706 {
2707 change_queue_index (insn, 1);
2708 return 1;
2709 }
2710
2711 ready_try[0] = 0;
2712
2713 for (i = 1; i < ready->n_ready; i++)
2714 {
2715 insn = ready_element (ready, i);
2716
2717 ready_try [i]
2718 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2719 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2720 }
2721
2722 /* Let the target filter the search space. */
2723 for (i = 1; i < ready->n_ready; i++)
2724 if (!ready_try[i])
2725 {
2726 insn = ready_element (ready, i);
2727
2728 /* If this insn is recognizable we should have already
2729 recognized it earlier.
2730 ??? Not very clear where this is supposed to be done.
2731 See dep_cost_1. */
2732 gcc_checking_assert (INSN_CODE (insn) >= 0
2733 || recog_memoized (insn) < 0);
2734
2735 ready_try [i]
2736 = (/* INSN_CODE check can be omitted here as it is also done later
2737 in max_issue (). */
2738 INSN_CODE (insn) < 0
2739 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2740 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2741 (insn)));
2742 }
2743
2744 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
2745 {
2746 *insn_ptr = ready_remove_first (ready);
2747 if (sched_verbose >= 4)
2748 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2749 (*current_sched_info->print_insn) (*insn_ptr, 0));
2750 return 0;
2751 }
2752 else
2753 {
2754 if (sched_verbose >= 4)
2755 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2756 (*current_sched_info->print_insn)
2757 (ready_element (ready, index), 0));
2758
2759 *insn_ptr = ready_remove (ready, index);
2760 return 0;
2761 }
2762 }
2763 }
2764
2765 /* Use forward list scheduling to rearrange insns of block pointed to by
2766 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2767 region. */
2768
2769 void
2770 schedule_block (basic_block *target_bb)
2771 {
2772 int i;
2773 bool first_cycle_insn_p;
2774 int can_issue_more;
2775 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2776 int sort_p, advance, start_clock_var;
2777
2778 /* Head/tail info for this block. */
2779 rtx prev_head = current_sched_info->prev_head;
2780 rtx next_tail = current_sched_info->next_tail;
2781 rtx head = NEXT_INSN (prev_head);
2782 rtx tail = PREV_INSN (next_tail);
2783
2784 /* We used to have code to avoid getting parameters moved from hard
2785 argument registers into pseudos.
2786
2787 However, it was removed when it proved to be of marginal benefit
2788 and caused problems because schedule_block and compute_forward_dependences
2789 had different notions of what the "head" insn was. */
2790
2791 gcc_assert (head != tail || INSN_P (head));
2792
2793 haifa_recovery_bb_recently_added_p = false;
2794
2795 /* Debug info. */
2796 if (sched_verbose)
2797 dump_new_block_header (0, *target_bb, head, tail);
2798
2799 state_reset (curr_state);
2800
2801 /* Clear the ready list. */
2802 ready.first = ready.veclen - 1;
2803 ready.n_ready = 0;
2804 ready.n_debug = 0;
2805
2806 /* It is used for first cycle multipass scheduling. */
2807 temp_state = alloca (dfa_state_size);
2808
2809 if (targetm.sched.init)
2810 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2811
2812 /* We start inserting insns after PREV_HEAD. */
2813 last_scheduled_insn = prev_head;
2814
2815 gcc_assert ((NOTE_P (last_scheduled_insn)
2816 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2817 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2818
2819 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2820 queue. */
2821 q_ptr = 0;
2822 q_size = 0;
2823
2824 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2825 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2826
2827 /* Start just before the beginning of time. */
2828 clock_var = -1;
2829
2830 /* We need queue and ready lists and clock_var be initialized
2831 in try_ready () (which is called through init_ready_list ()). */
2832 (*current_sched_info->init_ready_list) ();
2833
2834 /* The algorithm is O(n^2) in the number of ready insns at any given
2835 time in the worst case. Before reload we are more likely to have
2836 big lists so truncate them to a reasonable size. */
2837 if (!reload_completed
2838 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2839 {
2840 ready_sort (&ready);
2841
2842 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2843 If there are debug insns, we know they're first. */
2844 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2845 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2846 break;
2847
2848 if (sched_verbose >= 2)
2849 {
2850 fprintf (sched_dump,
2851 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2852 fprintf (sched_dump,
2853 ";;\t\t before reload => truncated to %d insns\n", i);
2854 }
2855
2856 /* Delay all insns past it for 1 cycle. If debug counter is
2857 activated make an exception for the insn right after
2858 last_scheduled_insn. */
2859 {
2860 rtx skip_insn;
2861
2862 if (dbg_cnt (sched_insn) == false)
2863 skip_insn = next_nonnote_insn (last_scheduled_insn);
2864 else
2865 skip_insn = NULL_RTX;
2866
2867 while (i < ready.n_ready)
2868 {
2869 rtx insn;
2870
2871 insn = ready_remove (&ready, i);
2872
2873 if (insn != skip_insn)
2874 queue_insn (insn, 1);
2875 }
2876 }
2877 }
2878
2879 /* Now we can restore basic block notes and maintain precise cfg. */
2880 restore_bb_notes (*target_bb);
2881
2882 last_clock_var = -1;
2883
2884 advance = 0;
2885
2886 sort_p = TRUE;
2887 /* Loop until all the insns in BB are scheduled. */
2888 while ((*current_sched_info->schedule_more_p) ())
2889 {
2890 do
2891 {
2892 start_clock_var = clock_var;
2893
2894 clock_var++;
2895
2896 advance_one_cycle ();
2897
2898 /* Add to the ready list all pending insns that can be issued now.
2899 If there are no ready insns, increment clock until one
2900 is ready and add all pending insns at that point to the ready
2901 list. */
2902 queue_to_ready (&ready);
2903
2904 gcc_assert (ready.n_ready);
2905
2906 if (sched_verbose >= 2)
2907 {
2908 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2909 debug_ready_list (&ready);
2910 }
2911 advance -= clock_var - start_clock_var;
2912 }
2913 while (advance > 0);
2914
2915 if (sort_p)
2916 {
2917 /* Sort the ready list based on priority. */
2918 ready_sort (&ready);
2919
2920 if (sched_verbose >= 2)
2921 {
2922 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2923 debug_ready_list (&ready);
2924 }
2925 }
2926
2927 /* We don't want md sched reorder to even see debug isns, so put
2928 them out right away. */
2929 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2930 {
2931 if (control_flow_insn_p (last_scheduled_insn))
2932 {
2933 *target_bb = current_sched_info->advance_target_bb
2934 (*target_bb, 0);
2935
2936 if (sched_verbose)
2937 {
2938 rtx x;
2939
2940 x = next_real_insn (last_scheduled_insn);
2941 gcc_assert (x);
2942 dump_new_block_header (1, *target_bb, x, tail);
2943 }
2944
2945 last_scheduled_insn = bb_note (*target_bb);
2946 }
2947
2948 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2949 {
2950 rtx insn = ready_remove_first (&ready);
2951 gcc_assert (DEBUG_INSN_P (insn));
2952 (*current_sched_info->begin_schedule_ready) (insn,
2953 last_scheduled_insn);
2954 move_insn (insn, last_scheduled_insn,
2955 current_sched_info->next_tail);
2956 last_scheduled_insn = insn;
2957 advance = schedule_insn (insn);
2958 gcc_assert (advance == 0);
2959 if (ready.n_ready > 0)
2960 ready_sort (&ready);
2961 }
2962
2963 if (!ready.n_ready)
2964 continue;
2965 }
2966
2967 /* Allow the target to reorder the list, typically for
2968 better instruction bundling. */
2969 if (sort_p && targetm.sched.reorder
2970 && (ready.n_ready == 0
2971 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2972 can_issue_more =
2973 targetm.sched.reorder (sched_dump, sched_verbose,
2974 ready_lastpos (&ready),
2975 &ready.n_ready, clock_var);
2976 else
2977 can_issue_more = issue_rate;
2978
2979 first_cycle_insn_p = true;
2980 cycle_issued_insns = 0;
2981 for (;;)
2982 {
2983 rtx insn;
2984 int cost;
2985 bool asm_p = false;
2986
2987 if (sched_verbose >= 2)
2988 {
2989 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2990 clock_var);
2991 debug_ready_list (&ready);
2992 if (sched_pressure_p)
2993 print_curr_reg_pressure ();
2994 }
2995
2996 if (ready.n_ready == 0
2997 && can_issue_more
2998 && reload_completed)
2999 {
3000 /* Allow scheduling insns directly from the queue in case
3001 there's nothing better to do (ready list is empty) but
3002 there are still vacant dispatch slots in the current cycle. */
3003 if (sched_verbose >= 6)
3004 fprintf (sched_dump,";;\t\tSecond chance\n");
3005 memcpy (temp_state, curr_state, dfa_state_size);
3006 if (early_queue_to_ready (temp_state, &ready))
3007 ready_sort (&ready);
3008 }
3009
3010 if (ready.n_ready == 0
3011 || !can_issue_more
3012 || state_dead_lock_p (curr_state)
3013 || !(*current_sched_info->schedule_more_p) ())
3014 break;
3015
3016 /* Select and remove the insn from the ready list. */
3017 if (sort_p)
3018 {
3019 int res;
3020
3021 insn = NULL_RTX;
3022 res = choose_ready (&ready, first_cycle_insn_p, &insn);
3023
3024 if (res < 0)
3025 /* Finish cycle. */
3026 break;
3027 if (res > 0)
3028 /* Restart choose_ready (). */
3029 continue;
3030
3031 gcc_assert (insn != NULL_RTX);
3032 }
3033 else
3034 insn = ready_remove_first (&ready);
3035
3036 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3037 {
3038 ready_add (&ready, insn, true);
3039 advance = 1;
3040 break;
3041 }
3042
3043 if (targetm.sched.dfa_new_cycle
3044 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3045 insn, last_clock_var,
3046 clock_var, &sort_p))
3047 /* SORT_P is used by the target to override sorting
3048 of the ready list. This is needed when the target
3049 has modified its internal structures expecting that
3050 the insn will be issued next. As we need the insn
3051 to have the highest priority (so it will be returned by
3052 the ready_remove_first call above), we invoke
3053 ready_add (&ready, insn, true).
3054 But, still, there is one issue: INSN can be later
3055 discarded by scheduler's front end through
3056 current_sched_info->can_schedule_ready_p, hence, won't
3057 be issued next. */
3058 {
3059 ready_add (&ready, insn, true);
3060 break;
3061 }
3062
3063 sort_p = TRUE;
3064 memcpy (temp_state, curr_state, dfa_state_size);
3065 if (recog_memoized (insn) < 0)
3066 {
3067 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3068 || asm_noperands (PATTERN (insn)) >= 0);
3069 if (!first_cycle_insn_p && asm_p)
3070 /* This is asm insn which is tried to be issued on the
3071 cycle not first. Issue it on the next cycle. */
3072 cost = 1;
3073 else
3074 /* A USE insn, or something else we don't need to
3075 understand. We can't pass these directly to
3076 state_transition because it will trigger a
3077 fatal error for unrecognizable insns. */
3078 cost = 0;
3079 }
3080 else if (sched_pressure_p)
3081 cost = 0;
3082 else
3083 {
3084 cost = state_transition (temp_state, insn);
3085 if (cost < 0)
3086 cost = 0;
3087 else if (cost == 0)
3088 cost = 1;
3089 }
3090
3091 if (cost >= 1)
3092 {
3093 queue_insn (insn, cost);
3094 if (SCHED_GROUP_P (insn))
3095 {
3096 advance = cost;
3097 break;
3098 }
3099
3100 continue;
3101 }
3102
3103 if (current_sched_info->can_schedule_ready_p
3104 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3105 /* We normally get here only if we don't want to move
3106 insn from the split block. */
3107 {
3108 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3109 continue;
3110 }
3111
3112 /* DECISION is made. */
3113
3114 if (TODO_SPEC (insn) & SPECULATIVE)
3115 generate_recovery_code (insn);
3116
3117 if (control_flow_insn_p (last_scheduled_insn)
3118 /* This is used to switch basic blocks by request
3119 from scheduler front-end (actually, sched-ebb.c only).
3120 This is used to process blocks with single fallthru
3121 edge. If succeeding block has jump, it [jump] will try
3122 move at the end of current bb, thus corrupting CFG. */
3123 || current_sched_info->advance_target_bb (*target_bb, insn))
3124 {
3125 *target_bb = current_sched_info->advance_target_bb
3126 (*target_bb, 0);
3127
3128 if (sched_verbose)
3129 {
3130 rtx x;
3131
3132 x = next_real_insn (last_scheduled_insn);
3133 gcc_assert (x);
3134 dump_new_block_header (1, *target_bb, x, tail);
3135 }
3136
3137 last_scheduled_insn = bb_note (*target_bb);
3138 }
3139
3140 /* Update counters, etc in the scheduler's front end. */
3141 (*current_sched_info->begin_schedule_ready) (insn,
3142 last_scheduled_insn);
3143
3144 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3145
3146 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3147 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3148
3149 reemit_notes (insn);
3150 last_scheduled_insn = insn;
3151
3152 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3153 {
3154 cycle_issued_insns++;
3155 memcpy (curr_state, temp_state, dfa_state_size);
3156 }
3157
3158 if (targetm.sched.variable_issue)
3159 can_issue_more =
3160 targetm.sched.variable_issue (sched_dump, sched_verbose,
3161 insn, can_issue_more);
3162 /* A naked CLOBBER or USE generates no instruction, so do
3163 not count them against the issue rate. */
3164 else if (GET_CODE (PATTERN (insn)) != USE
3165 && GET_CODE (PATTERN (insn)) != CLOBBER)
3166 can_issue_more--;
3167 advance = schedule_insn (insn);
3168
3169 /* After issuing an asm insn we should start a new cycle. */
3170 if (advance == 0 && asm_p)
3171 advance = 1;
3172 if (advance != 0)
3173 break;
3174
3175 first_cycle_insn_p = false;
3176
3177 /* Sort the ready list based on priority. This must be
3178 redone here, as schedule_insn may have readied additional
3179 insns that will not be sorted correctly. */
3180 if (ready.n_ready > 0)
3181 ready_sort (&ready);
3182
3183 /* Quickly go through debug insns such that md sched
3184 reorder2 doesn't have to deal with debug insns. */
3185 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3186 && (*current_sched_info->schedule_more_p) ())
3187 {
3188 if (control_flow_insn_p (last_scheduled_insn))
3189 {
3190 *target_bb = current_sched_info->advance_target_bb
3191 (*target_bb, 0);
3192
3193 if (sched_verbose)
3194 {
3195 rtx x;
3196
3197 x = next_real_insn (last_scheduled_insn);
3198 gcc_assert (x);
3199 dump_new_block_header (1, *target_bb, x, tail);
3200 }
3201
3202 last_scheduled_insn = bb_note (*target_bb);
3203 }
3204
3205 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3206 {
3207 insn = ready_remove_first (&ready);
3208 gcc_assert (DEBUG_INSN_P (insn));
3209 (*current_sched_info->begin_schedule_ready)
3210 (insn, last_scheduled_insn);
3211 move_insn (insn, last_scheduled_insn,
3212 current_sched_info->next_tail);
3213 advance = schedule_insn (insn);
3214 last_scheduled_insn = insn;
3215 gcc_assert (advance == 0);
3216 if (ready.n_ready > 0)
3217 ready_sort (&ready);
3218 }
3219 }
3220
3221 if (targetm.sched.reorder2
3222 && (ready.n_ready == 0
3223 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3224 {
3225 can_issue_more =
3226 targetm.sched.reorder2 (sched_dump, sched_verbose,
3227 ready.n_ready
3228 ? ready_lastpos (&ready) : NULL,
3229 &ready.n_ready, clock_var);
3230 }
3231 }
3232 }
3233
3234 /* Debug info. */
3235 if (sched_verbose)
3236 {
3237 fprintf (sched_dump, ";;\tReady list (final): ");
3238 debug_ready_list (&ready);
3239 }
3240
3241 if (current_sched_info->queue_must_finish_empty)
3242 /* Sanity check -- queue must be empty now. Meaningless if region has
3243 multiple bbs. */
3244 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3245 else
3246 {
3247 /* We must maintain QUEUE_INDEX between blocks in region. */
3248 for (i = ready.n_ready - 1; i >= 0; i--)
3249 {
3250 rtx x;
3251
3252 x = ready_element (&ready, i);
3253 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3254 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3255 }
3256
3257 if (q_size)
3258 for (i = 0; i <= max_insn_queue_index; i++)
3259 {
3260 rtx link;
3261 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3262 {
3263 rtx x;
3264
3265 x = XEXP (link, 0);
3266 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3267 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3268 }
3269 free_INSN_LIST_list (&insn_queue[i]);
3270 }
3271 }
3272
3273 if (sched_verbose)
3274 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3275
3276 if (!current_sched_info->queue_must_finish_empty
3277 || haifa_recovery_bb_recently_added_p)
3278 {
3279 /* INSN_TICK (minimum clock tick at which the insn becomes
3280 ready) may be not correct for the insn in the subsequent
3281 blocks of the region. We should use a correct value of
3282 `clock_var' or modify INSN_TICK. It is better to keep
3283 clock_var value equal to 0 at the start of a basic block.
3284 Therefore we modify INSN_TICK here. */
3285 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3286 }
3287
3288 if (targetm.sched.finish)
3289 {
3290 targetm.sched.finish (sched_dump, sched_verbose);
3291 /* Target might have added some instructions to the scheduled block
3292 in its md_finish () hook. These new insns don't have any data
3293 initialized and to identify them we extend h_i_d so that they'll
3294 get zero luids. */
3295 sched_init_luids (NULL, NULL, NULL, NULL);
3296 }
3297
3298 if (sched_verbose)
3299 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3300 INSN_UID (head), INSN_UID (tail));
3301
3302 /* Update head/tail boundaries. */
3303 head = NEXT_INSN (prev_head);
3304 tail = last_scheduled_insn;
3305
3306 head = restore_other_notes (head, NULL);
3307
3308 current_sched_info->head = head;
3309 current_sched_info->tail = tail;
3310 }
3311 \f
3312 /* Set_priorities: compute priority of each insn in the block. */
3313
3314 int
3315 set_priorities (rtx head, rtx tail)
3316 {
3317 rtx insn;
3318 int n_insn;
3319 int sched_max_insns_priority =
3320 current_sched_info->sched_max_insns_priority;
3321 rtx prev_head;
3322
3323 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3324 gcc_unreachable ();
3325
3326 n_insn = 0;
3327
3328 prev_head = PREV_INSN (head);
3329 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3330 {
3331 if (!INSN_P (insn))
3332 continue;
3333
3334 n_insn++;
3335 (void) priority (insn);
3336
3337 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3338
3339 sched_max_insns_priority = MAX (sched_max_insns_priority,
3340 INSN_PRIORITY (insn));
3341 }
3342
3343 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3344
3345 return n_insn;
3346 }
3347
3348 /* Set dump and sched_verbose for the desired debugging output. If no
3349 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3350 For -fsched-verbose=N, N>=10, print everything to stderr. */
3351 void
3352 setup_sched_dump (void)
3353 {
3354 sched_verbose = sched_verbose_param;
3355 if (sched_verbose_param == 0 && dump_file)
3356 sched_verbose = 1;
3357 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3358 ? stderr : dump_file);
3359 }
3360
3361 /* Initialize some global state for the scheduler. This function works
3362 with the common data shared between all the schedulers. It is called
3363 from the scheduler specific initialization routine. */
3364
3365 void
3366 sched_init (void)
3367 {
3368 /* Disable speculative loads in their presence if cc0 defined. */
3369 #ifdef HAVE_cc0
3370 flag_schedule_speculative_load = 0;
3371 #endif
3372
3373 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3374 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3375
3376 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3377 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3378
3379 if (sched_pressure_p)
3380 ira_setup_eliminable_regset ();
3381
3382 /* Initialize SPEC_INFO. */
3383 if (targetm.sched.set_sched_flags)
3384 {
3385 spec_info = &spec_info_var;
3386 targetm.sched.set_sched_flags (spec_info);
3387
3388 if (spec_info->mask != 0)
3389 {
3390 spec_info->data_weakness_cutoff =
3391 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3392 spec_info->control_weakness_cutoff =
3393 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3394 * REG_BR_PROB_BASE) / 100;
3395 }
3396 else
3397 /* So we won't read anything accidentally. */
3398 spec_info = NULL;
3399
3400 }
3401 else
3402 /* So we won't read anything accidentally. */
3403 spec_info = 0;
3404
3405 /* Initialize issue_rate. */
3406 if (targetm.sched.issue_rate)
3407 issue_rate = targetm.sched.issue_rate ();
3408 else
3409 issue_rate = 1;
3410
3411 if (cached_issue_rate != issue_rate)
3412 {
3413 cached_issue_rate = issue_rate;
3414 /* To invalidate max_lookahead_tries: */
3415 cached_first_cycle_multipass_dfa_lookahead = 0;
3416 }
3417
3418 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3419 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3420 else
3421 dfa_lookahead = 0;
3422
3423 if (targetm.sched.init_dfa_pre_cycle_insn)
3424 targetm.sched.init_dfa_pre_cycle_insn ();
3425
3426 if (targetm.sched.init_dfa_post_cycle_insn)
3427 targetm.sched.init_dfa_post_cycle_insn ();
3428
3429 dfa_start ();
3430 dfa_state_size = state_size ();
3431
3432 init_alias_analysis ();
3433
3434 df_set_flags (DF_LR_RUN_DCE);
3435 df_note_add_problem ();
3436
3437 /* More problems needed for interloop dep calculation in SMS. */
3438 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3439 {
3440 df_rd_add_problem ();
3441 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3442 }
3443
3444 df_analyze ();
3445
3446 /* Do not run DCE after reload, as this can kill nops inserted
3447 by bundling. */
3448 if (reload_completed)
3449 df_clear_flags (DF_LR_RUN_DCE);
3450
3451 regstat_compute_calls_crossed ();
3452
3453 if (targetm.sched.init_global)
3454 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3455
3456 if (sched_pressure_p)
3457 {
3458 int i, max_regno = max_reg_num ();
3459
3460 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3461 sched_regno_cover_class
3462 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3463 for (i = 0; i < max_regno; i++)
3464 sched_regno_cover_class[i]
3465 = (i < FIRST_PSEUDO_REGISTER
3466 ? ira_class_translate[REGNO_REG_CLASS (i)]
3467 : reg_cover_class (i));
3468 curr_reg_live = BITMAP_ALLOC (NULL);
3469 saved_reg_live = BITMAP_ALLOC (NULL);
3470 region_ref_regs = BITMAP_ALLOC (NULL);
3471 }
3472
3473 curr_state = xmalloc (dfa_state_size);
3474 }
3475
3476 static void haifa_init_only_bb (basic_block, basic_block);
3477
3478 /* Initialize data structures specific to the Haifa scheduler. */
3479 void
3480 haifa_sched_init (void)
3481 {
3482 setup_sched_dump ();
3483 sched_init ();
3484
3485 if (spec_info != NULL)
3486 {
3487 sched_deps_info->use_deps_list = 1;
3488 sched_deps_info->generate_spec_deps = 1;
3489 }
3490
3491 /* Initialize luids, dependency caches, target and h_i_d for the
3492 whole function. */
3493 {
3494 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3495 basic_block bb;
3496
3497 sched_init_bbs ();
3498
3499 FOR_EACH_BB (bb)
3500 VEC_quick_push (basic_block, bbs, bb);
3501 sched_init_luids (bbs, NULL, NULL, NULL);
3502 sched_deps_init (true);
3503 sched_extend_target ();
3504 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3505
3506 VEC_free (basic_block, heap, bbs);
3507 }
3508
3509 sched_init_only_bb = haifa_init_only_bb;
3510 sched_split_block = sched_split_block_1;
3511 sched_create_empty_bb = sched_create_empty_bb_1;
3512 haifa_recovery_bb_ever_added_p = false;
3513
3514 #ifdef ENABLE_CHECKING
3515 /* This is used preferably for finding bugs in check_cfg () itself.
3516 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3517 assumes that the last insn in the last bb has a non-null successor. */
3518 check_cfg (0, 0);
3519 #endif
3520
3521 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3522 before_recovery = 0;
3523 after_recovery = 0;
3524 }
3525
3526 /* Finish work with the data specific to the Haifa scheduler. */
3527 void
3528 haifa_sched_finish (void)
3529 {
3530 sched_create_empty_bb = NULL;
3531 sched_split_block = NULL;
3532 sched_init_only_bb = NULL;
3533
3534 if (spec_info && spec_info->dump)
3535 {
3536 char c = reload_completed ? 'a' : 'b';
3537
3538 fprintf (spec_info->dump,
3539 ";; %s:\n", current_function_name ());
3540
3541 fprintf (spec_info->dump,
3542 ";; Procedure %cr-begin-data-spec motions == %d\n",
3543 c, nr_begin_data);
3544 fprintf (spec_info->dump,
3545 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3546 c, nr_be_in_data);
3547 fprintf (spec_info->dump,
3548 ";; Procedure %cr-begin-control-spec motions == %d\n",
3549 c, nr_begin_control);
3550 fprintf (spec_info->dump,
3551 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3552 c, nr_be_in_control);
3553 }
3554
3555 /* Finalize h_i_d, dependency caches, and luids for the whole
3556 function. Target will be finalized in md_global_finish (). */
3557 sched_deps_finish ();
3558 sched_finish_luids ();
3559 current_sched_info = NULL;
3560 sched_finish ();
3561 }
3562
3563 /* Free global data used during insn scheduling. This function works with
3564 the common data shared between the schedulers. */
3565
3566 void
3567 sched_finish (void)
3568 {
3569 haifa_finish_h_i_d ();
3570 if (sched_pressure_p)
3571 {
3572 free (sched_regno_cover_class);
3573 BITMAP_FREE (region_ref_regs);
3574 BITMAP_FREE (saved_reg_live);
3575 BITMAP_FREE (curr_reg_live);
3576 }
3577 free (curr_state);
3578
3579 if (targetm.sched.finish_global)
3580 targetm.sched.finish_global (sched_dump, sched_verbose);
3581
3582 end_alias_analysis ();
3583
3584 regstat_free_calls_crossed ();
3585
3586 dfa_finish ();
3587
3588 #ifdef ENABLE_CHECKING
3589 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3590 if (!reload_completed)
3591 check_cfg (0, 0);
3592 #endif
3593 }
3594
3595 /* Fix INSN_TICKs of the instructions in the current block as well as
3596 INSN_TICKs of their dependents.
3597 HEAD and TAIL are the begin and the end of the current scheduled block. */
3598 static void
3599 fix_inter_tick (rtx head, rtx tail)
3600 {
3601 /* Set of instructions with corrected INSN_TICK. */
3602 bitmap_head processed;
3603 /* ??? It is doubtful if we should assume that cycle advance happens on
3604 basic block boundaries. Basically insns that are unconditionally ready
3605 on the start of the block are more preferable then those which have
3606 a one cycle dependency over insn from the previous block. */
3607 int next_clock = clock_var + 1;
3608
3609 bitmap_initialize (&processed, 0);
3610
3611 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3612 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3613 across different blocks. */
3614 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3615 {
3616 if (INSN_P (head))
3617 {
3618 int tick;
3619 sd_iterator_def sd_it;
3620 dep_t dep;
3621
3622 tick = INSN_TICK (head);
3623 gcc_assert (tick >= MIN_TICK);
3624
3625 /* Fix INSN_TICK of instruction from just scheduled block. */
3626 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3627 {
3628 tick -= next_clock;
3629
3630 if (tick < MIN_TICK)
3631 tick = MIN_TICK;
3632
3633 INSN_TICK (head) = tick;
3634 }
3635
3636 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3637 {
3638 rtx next;
3639
3640 next = DEP_CON (dep);
3641 tick = INSN_TICK (next);
3642
3643 if (tick != INVALID_TICK
3644 /* If NEXT has its INSN_TICK calculated, fix it.
3645 If not - it will be properly calculated from
3646 scratch later in fix_tick_ready. */
3647 && bitmap_set_bit (&processed, INSN_LUID (next)))
3648 {
3649 tick -= next_clock;
3650
3651 if (tick < MIN_TICK)
3652 tick = MIN_TICK;
3653
3654 if (tick > INTER_TICK (next))
3655 INTER_TICK (next) = tick;
3656 else
3657 tick = INTER_TICK (next);
3658
3659 INSN_TICK (next) = tick;
3660 }
3661 }
3662 }
3663 }
3664 bitmap_clear (&processed);
3665 }
3666
3667 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3668
3669 /* Check if NEXT is ready to be added to the ready or queue list.
3670 If "yes", add it to the proper list.
3671 Returns:
3672 -1 - is not ready yet,
3673 0 - added to the ready list,
3674 0 < N - queued for N cycles. */
3675 int
3676 try_ready (rtx next)
3677 {
3678 ds_t old_ts, *ts;
3679
3680 ts = &TODO_SPEC (next);
3681 old_ts = *ts;
3682
3683 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3684 && ((old_ts & HARD_DEP)
3685 || (old_ts & SPECULATIVE)));
3686
3687 if (sd_lists_empty_p (next, SD_LIST_BACK))
3688 /* NEXT has all its dependencies resolved. */
3689 {
3690 /* Remove HARD_DEP bit from NEXT's status. */
3691 *ts &= ~HARD_DEP;
3692
3693 if (current_sched_info->flags & DO_SPECULATION)
3694 /* Remove all speculative bits from NEXT's status. */
3695 *ts &= ~SPECULATIVE;
3696 }
3697 else
3698 {
3699 /* One of the NEXT's dependencies has been resolved.
3700 Recalculate NEXT's status. */
3701
3702 *ts &= ~SPECULATIVE & ~HARD_DEP;
3703
3704 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3705 /* Now we've got NEXT with speculative deps only.
3706 1. Look at the deps to see what we have to do.
3707 2. Check if we can do 'todo'. */
3708 {
3709 sd_iterator_def sd_it;
3710 dep_t dep;
3711 bool first_p = true;
3712
3713 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3714 {
3715 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3716
3717 if (DEBUG_INSN_P (DEP_PRO (dep))
3718 && !DEBUG_INSN_P (next))
3719 continue;
3720
3721 if (first_p)
3722 {
3723 first_p = false;
3724
3725 *ts = ds;
3726 }
3727 else
3728 *ts = ds_merge (*ts, ds);
3729 }
3730
3731 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3732 /* Too few points. */
3733 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3734 }
3735 else
3736 *ts |= HARD_DEP;
3737 }
3738
3739 if (*ts & HARD_DEP)
3740 gcc_assert (*ts == old_ts
3741 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3742 else if (current_sched_info->new_ready)
3743 *ts = current_sched_info->new_ready (next, *ts);
3744
3745 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3746 have its original pattern or changed (speculative) one. This is due
3747 to changing ebb in region scheduling.
3748 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3749 has speculative pattern.
3750
3751 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3752 control-speculative NEXT could have been discarded by sched-rgn.c
3753 (the same case as when discarded by can_schedule_ready_p ()). */
3754
3755 if ((*ts & SPECULATIVE)
3756 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3757 need to change anything. */
3758 && *ts != old_ts)
3759 {
3760 int res;
3761 rtx new_pat;
3762
3763 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3764
3765 res = haifa_speculate_insn (next, *ts, &new_pat);
3766
3767 switch (res)
3768 {
3769 case -1:
3770 /* It would be nice to change DEP_STATUS of all dependences,
3771 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3772 so we won't reanalyze anything. */
3773 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3774 break;
3775
3776 case 0:
3777 /* We follow the rule, that every speculative insn
3778 has non-null ORIG_PAT. */
3779 if (!ORIG_PAT (next))
3780 ORIG_PAT (next) = PATTERN (next);
3781 break;
3782
3783 case 1:
3784 if (!ORIG_PAT (next))
3785 /* If we gonna to overwrite the original pattern of insn,
3786 save it. */
3787 ORIG_PAT (next) = PATTERN (next);
3788
3789 haifa_change_pattern (next, new_pat);
3790 break;
3791
3792 default:
3793 gcc_unreachable ();
3794 }
3795 }
3796
3797 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3798 either correct (*ts & SPECULATIVE),
3799 or we simply don't care (*ts & HARD_DEP). */
3800
3801 gcc_assert (!ORIG_PAT (next)
3802 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3803
3804 if (*ts & HARD_DEP)
3805 {
3806 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3807 control-speculative NEXT could have been discarded by sched-rgn.c
3808 (the same case as when discarded by can_schedule_ready_p ()). */
3809 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3810
3811 change_queue_index (next, QUEUE_NOWHERE);
3812 return -1;
3813 }
3814 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3815 /* We should change pattern of every previously speculative
3816 instruction - and we determine if NEXT was speculative by using
3817 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3818 pat too, so skip them. */
3819 {
3820 haifa_change_pattern (next, ORIG_PAT (next));
3821 ORIG_PAT (next) = 0;
3822 }
3823
3824 if (sched_verbose >= 2)
3825 {
3826 int s = TODO_SPEC (next);
3827
3828 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3829 (*current_sched_info->print_insn) (next, 0));
3830
3831 if (spec_info && spec_info->dump)
3832 {
3833 if (s & BEGIN_DATA)
3834 fprintf (spec_info->dump, "; data-spec;");
3835 if (s & BEGIN_CONTROL)
3836 fprintf (spec_info->dump, "; control-spec;");
3837 if (s & BE_IN_CONTROL)
3838 fprintf (spec_info->dump, "; in-control-spec;");
3839 }
3840
3841 fprintf (sched_dump, "\n");
3842 }
3843
3844 adjust_priority (next);
3845
3846 return fix_tick_ready (next);
3847 }
3848
3849 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3850 static int
3851 fix_tick_ready (rtx next)
3852 {
3853 int tick, delay;
3854
3855 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3856 {
3857 int full_p;
3858 sd_iterator_def sd_it;
3859 dep_t dep;
3860
3861 tick = INSN_TICK (next);
3862 /* if tick is not equal to INVALID_TICK, then update
3863 INSN_TICK of NEXT with the most recent resolved dependence
3864 cost. Otherwise, recalculate from scratch. */
3865 full_p = (tick == INVALID_TICK);
3866
3867 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3868 {
3869 rtx pro = DEP_PRO (dep);
3870 int tick1;
3871
3872 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3873
3874 tick1 = INSN_TICK (pro) + dep_cost (dep);
3875 if (tick1 > tick)
3876 tick = tick1;
3877
3878 if (!full_p)
3879 break;
3880 }
3881 }
3882 else
3883 tick = -1;
3884
3885 INSN_TICK (next) = tick;
3886
3887 delay = tick - clock_var;
3888 if (delay <= 0 || sched_pressure_p)
3889 delay = QUEUE_READY;
3890
3891 change_queue_index (next, delay);
3892
3893 return delay;
3894 }
3895
3896 /* Move NEXT to the proper queue list with (DELAY >= 1),
3897 or add it to the ready list (DELAY == QUEUE_READY),
3898 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3899 static void
3900 change_queue_index (rtx next, int delay)
3901 {
3902 int i = QUEUE_INDEX (next);
3903
3904 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3905 && delay != 0);
3906 gcc_assert (i != QUEUE_SCHEDULED);
3907
3908 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3909 || (delay < 0 && delay == i))
3910 /* We have nothing to do. */
3911 return;
3912
3913 /* Remove NEXT from wherever it is now. */
3914 if (i == QUEUE_READY)
3915 ready_remove_insn (next);
3916 else if (i >= 0)
3917 queue_remove (next);
3918
3919 /* Add it to the proper place. */
3920 if (delay == QUEUE_READY)
3921 ready_add (readyp, next, false);
3922 else if (delay >= 1)
3923 queue_insn (next, delay);
3924
3925 if (sched_verbose >= 2)
3926 {
3927 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3928 (*current_sched_info->print_insn) (next, 0));
3929
3930 if (delay == QUEUE_READY)
3931 fprintf (sched_dump, " into ready\n");
3932 else if (delay >= 1)
3933 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3934 else
3935 fprintf (sched_dump, " removed from ready or queue lists\n");
3936 }
3937 }
3938
3939 static int sched_ready_n_insns = -1;
3940
3941 /* Initialize per region data structures. */
3942 void
3943 sched_extend_ready_list (int new_sched_ready_n_insns)
3944 {
3945 int i;
3946
3947 if (sched_ready_n_insns == -1)
3948 /* At the first call we need to initialize one more choice_stack
3949 entry. */
3950 {
3951 i = 0;
3952 sched_ready_n_insns = 0;
3953 }
3954 else
3955 i = sched_ready_n_insns + 1;
3956
3957 ready.veclen = new_sched_ready_n_insns + issue_rate;
3958 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3959
3960 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3961
3962 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3963 sched_ready_n_insns, sizeof (*ready_try));
3964
3965 /* We allocate +1 element to save initial state in the choice_stack[0]
3966 entry. */
3967 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3968 new_sched_ready_n_insns + 1);
3969
3970 for (; i <= new_sched_ready_n_insns; i++)
3971 {
3972 choice_stack[i].state = xmalloc (dfa_state_size);
3973
3974 if (targetm.sched.first_cycle_multipass_init)
3975 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
3976 .target_data));
3977 }
3978
3979 sched_ready_n_insns = new_sched_ready_n_insns;
3980 }
3981
3982 /* Free per region data structures. */
3983 void
3984 sched_finish_ready_list (void)
3985 {
3986 int i;
3987
3988 free (ready.vec);
3989 ready.vec = NULL;
3990 ready.veclen = 0;
3991
3992 free (ready_try);
3993 ready_try = NULL;
3994
3995 for (i = 0; i <= sched_ready_n_insns; i++)
3996 {
3997 if (targetm.sched.first_cycle_multipass_fini)
3998 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
3999 .target_data));
4000
4001 free (choice_stack [i].state);
4002 }
4003 free (choice_stack);
4004 choice_stack = NULL;
4005
4006 sched_ready_n_insns = -1;
4007 }
4008
4009 static int
4010 haifa_luid_for_non_insn (rtx x)
4011 {
4012 gcc_assert (NOTE_P (x) || LABEL_P (x));
4013
4014 return 0;
4015 }
4016
4017 /* Generates recovery code for INSN. */
4018 static void
4019 generate_recovery_code (rtx insn)
4020 {
4021 if (TODO_SPEC (insn) & BEGIN_SPEC)
4022 begin_speculative_block (insn);
4023
4024 /* Here we have insn with no dependencies to
4025 instructions other then CHECK_SPEC ones. */
4026
4027 if (TODO_SPEC (insn) & BE_IN_SPEC)
4028 add_to_speculative_block (insn);
4029 }
4030
4031 /* Helper function.
4032 Tries to add speculative dependencies of type FS between instructions
4033 in deps_list L and TWIN. */
4034 static void
4035 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4036 {
4037 sd_iterator_def sd_it;
4038 dep_t dep;
4039
4040 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4041 {
4042 ds_t ds;
4043 rtx consumer;
4044
4045 consumer = DEP_CON (dep);
4046
4047 ds = DEP_STATUS (dep);
4048
4049 if (/* If we want to create speculative dep. */
4050 fs
4051 /* And we can do that because this is a true dep. */
4052 && (ds & DEP_TYPES) == DEP_TRUE)
4053 {
4054 gcc_assert (!(ds & BE_IN_SPEC));
4055
4056 if (/* If this dep can be overcome with 'begin speculation'. */
4057 ds & BEGIN_SPEC)
4058 /* Then we have a choice: keep the dep 'begin speculative'
4059 or transform it into 'be in speculative'. */
4060 {
4061 if (/* In try_ready we assert that if insn once became ready
4062 it can be removed from the ready (or queue) list only
4063 due to backend decision. Hence we can't let the
4064 probability of the speculative dep to decrease. */
4065 ds_weak (ds) <= ds_weak (fs))
4066 {
4067 ds_t new_ds;
4068
4069 new_ds = (ds & ~BEGIN_SPEC) | fs;
4070
4071 if (/* consumer can 'be in speculative'. */
4072 sched_insn_is_legitimate_for_speculation_p (consumer,
4073 new_ds))
4074 /* Transform it to be in speculative. */
4075 ds = new_ds;
4076 }
4077 }
4078 else
4079 /* Mark the dep as 'be in speculative'. */
4080 ds |= fs;
4081 }
4082
4083 {
4084 dep_def _new_dep, *new_dep = &_new_dep;
4085
4086 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4087 sd_add_dep (new_dep, false);
4088 }
4089 }
4090 }
4091
4092 /* Generates recovery code for BEGIN speculative INSN. */
4093 static void
4094 begin_speculative_block (rtx insn)
4095 {
4096 if (TODO_SPEC (insn) & BEGIN_DATA)
4097 nr_begin_data++;
4098 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4099 nr_begin_control++;
4100
4101 create_check_block_twin (insn, false);
4102
4103 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4104 }
4105
4106 static void haifa_init_insn (rtx);
4107
4108 /* Generates recovery code for BE_IN speculative INSN. */
4109 static void
4110 add_to_speculative_block (rtx insn)
4111 {
4112 ds_t ts;
4113 sd_iterator_def sd_it;
4114 dep_t dep;
4115 rtx twins = NULL;
4116 rtx_vec_t priorities_roots;
4117
4118 ts = TODO_SPEC (insn);
4119 gcc_assert (!(ts & ~BE_IN_SPEC));
4120
4121 if (ts & BE_IN_DATA)
4122 nr_be_in_data++;
4123 if (ts & BE_IN_CONTROL)
4124 nr_be_in_control++;
4125
4126 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4127 gcc_assert (!TODO_SPEC (insn));
4128
4129 DONE_SPEC (insn) |= ts;
4130
4131 /* First we convert all simple checks to branchy. */
4132 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4133 sd_iterator_cond (&sd_it, &dep);)
4134 {
4135 rtx check = DEP_PRO (dep);
4136
4137 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4138 {
4139 create_check_block_twin (check, true);
4140
4141 /* Restart search. */
4142 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4143 }
4144 else
4145 /* Continue search. */
4146 sd_iterator_next (&sd_it);
4147 }
4148
4149 priorities_roots = NULL;
4150 clear_priorities (insn, &priorities_roots);
4151
4152 while (1)
4153 {
4154 rtx check, twin;
4155 basic_block rec;
4156
4157 /* Get the first backward dependency of INSN. */
4158 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4159 if (!sd_iterator_cond (&sd_it, &dep))
4160 /* INSN has no backward dependencies left. */
4161 break;
4162
4163 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4164 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4165 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4166
4167 check = DEP_PRO (dep);
4168
4169 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4170 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4171
4172 rec = BLOCK_FOR_INSN (check);
4173
4174 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4175 haifa_init_insn (twin);
4176
4177 sd_copy_back_deps (twin, insn, true);
4178
4179 if (sched_verbose && spec_info->dump)
4180 /* INSN_BB (insn) isn't determined for twin insns yet.
4181 So we can't use current_sched_info->print_insn. */
4182 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4183 INSN_UID (twin), rec->index);
4184
4185 twins = alloc_INSN_LIST (twin, twins);
4186
4187 /* Add dependences between TWIN and all appropriate
4188 instructions from REC. */
4189 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4190 {
4191 rtx pro = DEP_PRO (dep);
4192
4193 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4194
4195 /* INSN might have dependencies from the instructions from
4196 several recovery blocks. At this iteration we process those
4197 producers that reside in REC. */
4198 if (BLOCK_FOR_INSN (pro) == rec)
4199 {
4200 dep_def _new_dep, *new_dep = &_new_dep;
4201
4202 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4203 sd_add_dep (new_dep, false);
4204 }
4205 }
4206
4207 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4208
4209 /* Remove all dependencies between INSN and insns in REC. */
4210 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4211 sd_iterator_cond (&sd_it, &dep);)
4212 {
4213 rtx pro = DEP_PRO (dep);
4214
4215 if (BLOCK_FOR_INSN (pro) == rec)
4216 sd_delete_dep (sd_it);
4217 else
4218 sd_iterator_next (&sd_it);
4219 }
4220 }
4221
4222 /* We couldn't have added the dependencies between INSN and TWINS earlier
4223 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4224 while (twins)
4225 {
4226 rtx twin;
4227
4228 twin = XEXP (twins, 0);
4229
4230 {
4231 dep_def _new_dep, *new_dep = &_new_dep;
4232
4233 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4234 sd_add_dep (new_dep, false);
4235 }
4236
4237 twin = XEXP (twins, 1);
4238 free_INSN_LIST_node (twins);
4239 twins = twin;
4240 }
4241
4242 calc_priorities (priorities_roots);
4243 VEC_free (rtx, heap, priorities_roots);
4244 }
4245
4246 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4247 void *
4248 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4249 {
4250 gcc_assert (new_nmemb >= old_nmemb);
4251 p = XRESIZEVAR (void, p, new_nmemb * size);
4252 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4253 return p;
4254 }
4255
4256 /* Helper function.
4257 Find fallthru edge from PRED. */
4258 edge
4259 find_fallthru_edge_from (basic_block pred)
4260 {
4261 edge e;
4262 basic_block succ;
4263
4264 succ = pred->next_bb;
4265 gcc_assert (succ->prev_bb == pred);
4266
4267 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4268 {
4269 e = find_fallthru_edge (pred->succs);
4270
4271 if (e)
4272 {
4273 gcc_assert (e->dest == succ);
4274 return e;
4275 }
4276 }
4277 else
4278 {
4279 e = find_fallthru_edge (succ->preds);
4280
4281 if (e)
4282 {
4283 gcc_assert (e->src == pred);
4284 return e;
4285 }
4286 }
4287
4288 return NULL;
4289 }
4290
4291 /* Extend per basic block data structures. */
4292 static void
4293 sched_extend_bb (void)
4294 {
4295 rtx insn;
4296
4297 /* The following is done to keep current_sched_info->next_tail non null. */
4298 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4299 if (NEXT_INSN (insn) == 0
4300 || (!NOTE_P (insn)
4301 && !LABEL_P (insn)
4302 /* Don't emit a NOTE if it would end up before a BARRIER. */
4303 && !BARRIER_P (NEXT_INSN (insn))))
4304 {
4305 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4306 /* Make insn appear outside BB. */
4307 set_block_for_insn (note, NULL);
4308 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4309 }
4310 }
4311
4312 /* Init per basic block data structures. */
4313 void
4314 sched_init_bbs (void)
4315 {
4316 sched_extend_bb ();
4317 }
4318
4319 /* Initialize BEFORE_RECOVERY variable. */
4320 static void
4321 init_before_recovery (basic_block *before_recovery_ptr)
4322 {
4323 basic_block last;
4324 edge e;
4325
4326 last = EXIT_BLOCK_PTR->prev_bb;
4327 e = find_fallthru_edge_from (last);
4328
4329 if (e)
4330 {
4331 /* We create two basic blocks:
4332 1. Single instruction block is inserted right after E->SRC
4333 and has jump to
4334 2. Empty block right before EXIT_BLOCK.
4335 Between these two blocks recovery blocks will be emitted. */
4336
4337 basic_block single, empty;
4338 rtx x, label;
4339
4340 /* If the fallthrough edge to exit we've found is from the block we've
4341 created before, don't do anything more. */
4342 if (last == after_recovery)
4343 return;
4344
4345 adding_bb_to_current_region_p = false;
4346
4347 single = sched_create_empty_bb (last);
4348 empty = sched_create_empty_bb (single);
4349
4350 /* Add new blocks to the root loop. */
4351 if (current_loops != NULL)
4352 {
4353 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4354 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4355 }
4356
4357 single->count = last->count;
4358 empty->count = last->count;
4359 single->frequency = last->frequency;
4360 empty->frequency = last->frequency;
4361 BB_COPY_PARTITION (single, last);
4362 BB_COPY_PARTITION (empty, last);
4363
4364 redirect_edge_succ (e, single);
4365 make_single_succ_edge (single, empty, 0);
4366 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4367 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4368
4369 label = block_label (empty);
4370 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4371 JUMP_LABEL (x) = label;
4372 LABEL_NUSES (label)++;
4373 haifa_init_insn (x);
4374
4375 emit_barrier_after (x);
4376
4377 sched_init_only_bb (empty, NULL);
4378 sched_init_only_bb (single, NULL);
4379 sched_extend_bb ();
4380
4381 adding_bb_to_current_region_p = true;
4382 before_recovery = single;
4383 after_recovery = empty;
4384
4385 if (before_recovery_ptr)
4386 *before_recovery_ptr = before_recovery;
4387
4388 if (sched_verbose >= 2 && spec_info->dump)
4389 fprintf (spec_info->dump,
4390 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4391 last->index, single->index, empty->index);
4392 }
4393 else
4394 before_recovery = last;
4395 }
4396
4397 /* Returns new recovery block. */
4398 basic_block
4399 sched_create_recovery_block (basic_block *before_recovery_ptr)
4400 {
4401 rtx label;
4402 rtx barrier;
4403 basic_block rec;
4404
4405 haifa_recovery_bb_recently_added_p = true;
4406 haifa_recovery_bb_ever_added_p = true;
4407
4408 init_before_recovery (before_recovery_ptr);
4409
4410 barrier = get_last_bb_insn (before_recovery);
4411 gcc_assert (BARRIER_P (barrier));
4412
4413 label = emit_label_after (gen_label_rtx (), barrier);
4414
4415 rec = create_basic_block (label, label, before_recovery);
4416
4417 /* A recovery block always ends with an unconditional jump. */
4418 emit_barrier_after (BB_END (rec));
4419
4420 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4421 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4422
4423 if (sched_verbose && spec_info->dump)
4424 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4425 rec->index);
4426
4427 return rec;
4428 }
4429
4430 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4431 and emit necessary jumps. */
4432 void
4433 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4434 basic_block second_bb)
4435 {
4436 rtx label;
4437 rtx jump;
4438 int edge_flags;
4439
4440 /* This is fixing of incoming edge. */
4441 /* ??? Which other flags should be specified? */
4442 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4443 /* Partition type is the same, if it is "unpartitioned". */
4444 edge_flags = EDGE_CROSSING;
4445 else
4446 edge_flags = 0;
4447
4448 make_edge (first_bb, rec, edge_flags);
4449 label = block_label (second_bb);
4450 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4451 JUMP_LABEL (jump) = label;
4452 LABEL_NUSES (label)++;
4453
4454 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4455 /* Partition type is the same, if it is "unpartitioned". */
4456 {
4457 /* Rewritten from cfgrtl.c. */
4458 if (flag_reorder_blocks_and_partition
4459 && targetm.have_named_sections)
4460 {
4461 /* We don't need the same note for the check because
4462 any_condjump_p (check) == true. */
4463 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4464 }
4465 edge_flags = EDGE_CROSSING;
4466 }
4467 else
4468 edge_flags = 0;
4469
4470 make_single_succ_edge (rec, second_bb, edge_flags);
4471 }
4472
4473 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4474 INSN is a simple check, that should be converted to branchy one. */
4475 static void
4476 create_check_block_twin (rtx insn, bool mutate_p)
4477 {
4478 basic_block rec;
4479 rtx label, check, twin;
4480 ds_t fs;
4481 sd_iterator_def sd_it;
4482 dep_t dep;
4483 dep_def _new_dep, *new_dep = &_new_dep;
4484 ds_t todo_spec;
4485
4486 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4487
4488 if (!mutate_p)
4489 todo_spec = TODO_SPEC (insn);
4490 else
4491 {
4492 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4493 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4494
4495 todo_spec = CHECK_SPEC (insn);
4496 }
4497
4498 todo_spec &= SPECULATIVE;
4499
4500 /* Create recovery block. */
4501 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4502 {
4503 rec = sched_create_recovery_block (NULL);
4504 label = BB_HEAD (rec);
4505 }
4506 else
4507 {
4508 rec = EXIT_BLOCK_PTR;
4509 label = NULL_RTX;
4510 }
4511
4512 /* Emit CHECK. */
4513 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4514
4515 if (rec != EXIT_BLOCK_PTR)
4516 {
4517 /* To have mem_reg alive at the beginning of second_bb,
4518 we emit check BEFORE insn, so insn after splitting
4519 insn will be at the beginning of second_bb, which will
4520 provide us with the correct life information. */
4521 check = emit_jump_insn_before (check, insn);
4522 JUMP_LABEL (check) = label;
4523 LABEL_NUSES (label)++;
4524 }
4525 else
4526 check = emit_insn_before (check, insn);
4527
4528 /* Extend data structures. */
4529 haifa_init_insn (check);
4530
4531 /* CHECK is being added to current region. Extend ready list. */
4532 gcc_assert (sched_ready_n_insns != -1);
4533 sched_extend_ready_list (sched_ready_n_insns + 1);
4534
4535 if (current_sched_info->add_remove_insn)
4536 current_sched_info->add_remove_insn (insn, 0);
4537
4538 RECOVERY_BLOCK (check) = rec;
4539
4540 if (sched_verbose && spec_info->dump)
4541 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4542 (*current_sched_info->print_insn) (check, 0));
4543
4544 gcc_assert (ORIG_PAT (insn));
4545
4546 /* Initialize TWIN (twin is a duplicate of original instruction
4547 in the recovery block). */
4548 if (rec != EXIT_BLOCK_PTR)
4549 {
4550 sd_iterator_def sd_it;
4551 dep_t dep;
4552
4553 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4554 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4555 {
4556 struct _dep _dep2, *dep2 = &_dep2;
4557
4558 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4559
4560 sd_add_dep (dep2, true);
4561 }
4562
4563 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4564 haifa_init_insn (twin);
4565
4566 if (sched_verbose && spec_info->dump)
4567 /* INSN_BB (insn) isn't determined for twin insns yet.
4568 So we can't use current_sched_info->print_insn. */
4569 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4570 INSN_UID (twin), rec->index);
4571 }
4572 else
4573 {
4574 ORIG_PAT (check) = ORIG_PAT (insn);
4575 HAS_INTERNAL_DEP (check) = 1;
4576 twin = check;
4577 /* ??? We probably should change all OUTPUT dependencies to
4578 (TRUE | OUTPUT). */
4579 }
4580
4581 /* Copy all resolved back dependencies of INSN to TWIN. This will
4582 provide correct value for INSN_TICK (TWIN). */
4583 sd_copy_back_deps (twin, insn, true);
4584
4585 if (rec != EXIT_BLOCK_PTR)
4586 /* In case of branchy check, fix CFG. */
4587 {
4588 basic_block first_bb, second_bb;
4589 rtx jump;
4590
4591 first_bb = BLOCK_FOR_INSN (check);
4592 second_bb = sched_split_block (first_bb, check);
4593
4594 sched_create_recovery_edges (first_bb, rec, second_bb);
4595
4596 sched_init_only_bb (second_bb, first_bb);
4597 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4598
4599 jump = BB_END (rec);
4600 haifa_init_insn (jump);
4601 }
4602
4603 /* Move backward dependences from INSN to CHECK and
4604 move forward dependences from INSN to TWIN. */
4605
4606 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4607 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4608 {
4609 rtx pro = DEP_PRO (dep);
4610 ds_t ds;
4611
4612 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4613 check --TRUE--> producer ??? or ANTI ???
4614 twin --TRUE--> producer
4615 twin --ANTI--> check
4616
4617 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4618 check --ANTI--> producer
4619 twin --ANTI--> producer
4620 twin --ANTI--> check
4621
4622 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4623 check ~~TRUE~~> producer
4624 twin ~~TRUE~~> producer
4625 twin --ANTI--> check */
4626
4627 ds = DEP_STATUS (dep);
4628
4629 if (ds & BEGIN_SPEC)
4630 {
4631 gcc_assert (!mutate_p);
4632 ds &= ~BEGIN_SPEC;
4633 }
4634
4635 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4636 sd_add_dep (new_dep, false);
4637
4638 if (rec != EXIT_BLOCK_PTR)
4639 {
4640 DEP_CON (new_dep) = twin;
4641 sd_add_dep (new_dep, false);
4642 }
4643 }
4644
4645 /* Second, remove backward dependencies of INSN. */
4646 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4647 sd_iterator_cond (&sd_it, &dep);)
4648 {
4649 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4650 || mutate_p)
4651 /* We can delete this dep because we overcome it with
4652 BEGIN_SPECULATION. */
4653 sd_delete_dep (sd_it);
4654 else
4655 sd_iterator_next (&sd_it);
4656 }
4657
4658 /* Future Speculations. Determine what BE_IN speculations will be like. */
4659 fs = 0;
4660
4661 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4662 here. */
4663
4664 gcc_assert (!DONE_SPEC (insn));
4665
4666 if (!mutate_p)
4667 {
4668 ds_t ts = TODO_SPEC (insn);
4669
4670 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4671 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4672
4673 /* Luckiness of future speculations solely depends upon initial
4674 BEGIN speculation. */
4675 if (ts & BEGIN_DATA)
4676 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4677 if (ts & BEGIN_CONTROL)
4678 fs = set_dep_weak (fs, BE_IN_CONTROL,
4679 get_dep_weak (ts, BEGIN_CONTROL));
4680 }
4681 else
4682 CHECK_SPEC (check) = CHECK_SPEC (insn);
4683
4684 /* Future speculations: call the helper. */
4685 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4686
4687 if (rec != EXIT_BLOCK_PTR)
4688 {
4689 /* Which types of dependencies should we use here is,
4690 generally, machine-dependent question... But, for now,
4691 it is not. */
4692
4693 if (!mutate_p)
4694 {
4695 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4696 sd_add_dep (new_dep, false);
4697
4698 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4699 sd_add_dep (new_dep, false);
4700 }
4701 else
4702 {
4703 if (spec_info->dump)
4704 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4705 (*current_sched_info->print_insn) (insn, 0));
4706
4707 /* Remove all dependencies of the INSN. */
4708 {
4709 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4710 | SD_LIST_BACK
4711 | SD_LIST_RES_BACK));
4712 while (sd_iterator_cond (&sd_it, &dep))
4713 sd_delete_dep (sd_it);
4714 }
4715
4716 /* If former check (INSN) already was moved to the ready (or queue)
4717 list, add new check (CHECK) there too. */
4718 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4719 try_ready (check);
4720
4721 /* Remove old check from instruction stream and free its
4722 data. */
4723 sched_remove_insn (insn);
4724 }
4725
4726 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4727 sd_add_dep (new_dep, false);
4728 }
4729 else
4730 {
4731 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4732 sd_add_dep (new_dep, false);
4733 }
4734
4735 if (!mutate_p)
4736 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4737 because it'll be done later in add_to_speculative_block. */
4738 {
4739 rtx_vec_t priorities_roots = NULL;
4740
4741 clear_priorities (twin, &priorities_roots);
4742 calc_priorities (priorities_roots);
4743 VEC_free (rtx, heap, priorities_roots);
4744 }
4745 }
4746
4747 /* Removes dependency between instructions in the recovery block REC
4748 and usual region instructions. It keeps inner dependences so it
4749 won't be necessary to recompute them. */
4750 static void
4751 fix_recovery_deps (basic_block rec)
4752 {
4753 rtx note, insn, jump, ready_list = 0;
4754 bitmap_head in_ready;
4755 rtx link;
4756
4757 bitmap_initialize (&in_ready, 0);
4758
4759 /* NOTE - a basic block note. */
4760 note = NEXT_INSN (BB_HEAD (rec));
4761 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4762 insn = BB_END (rec);
4763 gcc_assert (JUMP_P (insn));
4764 insn = PREV_INSN (insn);
4765
4766 do
4767 {
4768 sd_iterator_def sd_it;
4769 dep_t dep;
4770
4771 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4772 sd_iterator_cond (&sd_it, &dep);)
4773 {
4774 rtx consumer = DEP_CON (dep);
4775
4776 if (BLOCK_FOR_INSN (consumer) != rec)
4777 {
4778 sd_delete_dep (sd_it);
4779
4780 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4781 ready_list = alloc_INSN_LIST (consumer, ready_list);
4782 }
4783 else
4784 {
4785 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4786
4787 sd_iterator_next (&sd_it);
4788 }
4789 }
4790
4791 insn = PREV_INSN (insn);
4792 }
4793 while (insn != note);
4794
4795 bitmap_clear (&in_ready);
4796
4797 /* Try to add instructions to the ready or queue list. */
4798 for (link = ready_list; link; link = XEXP (link, 1))
4799 try_ready (XEXP (link, 0));
4800 free_INSN_LIST_list (&ready_list);
4801
4802 /* Fixing jump's dependences. */
4803 insn = BB_HEAD (rec);
4804 jump = BB_END (rec);
4805
4806 gcc_assert (LABEL_P (insn));
4807 insn = NEXT_INSN (insn);
4808
4809 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4810 add_jump_dependencies (insn, jump);
4811 }
4812
4813 /* Change pattern of INSN to NEW_PAT. */
4814 void
4815 sched_change_pattern (rtx insn, rtx new_pat)
4816 {
4817 int t;
4818
4819 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4820 gcc_assert (t);
4821 dfa_clear_single_insn_cache (insn);
4822 }
4823
4824 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4825 instruction data. */
4826 static void
4827 haifa_change_pattern (rtx insn, rtx new_pat)
4828 {
4829 sched_change_pattern (insn, new_pat);
4830
4831 /* Invalidate INSN_COST, so it'll be recalculated. */
4832 INSN_COST (insn) = -1;
4833 /* Invalidate INSN_TICK, so it'll be recalculated. */
4834 INSN_TICK (insn) = INVALID_TICK;
4835 }
4836
4837 /* -1 - can't speculate,
4838 0 - for speculation with REQUEST mode it is OK to use
4839 current instruction pattern,
4840 1 - need to change pattern for *NEW_PAT to be speculative. */
4841 int
4842 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4843 {
4844 gcc_assert (current_sched_info->flags & DO_SPECULATION
4845 && (request & SPECULATIVE)
4846 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4847
4848 if ((request & spec_info->mask) != request)
4849 return -1;
4850
4851 if (request & BE_IN_SPEC
4852 && !(request & BEGIN_SPEC))
4853 return 0;
4854
4855 return targetm.sched.speculate_insn (insn, request, new_pat);
4856 }
4857
4858 static int
4859 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4860 {
4861 gcc_assert (sched_deps_info->generate_spec_deps
4862 && !IS_SPECULATION_CHECK_P (insn));
4863
4864 if (HAS_INTERNAL_DEP (insn)
4865 || SCHED_GROUP_P (insn))
4866 return -1;
4867
4868 return sched_speculate_insn (insn, request, new_pat);
4869 }
4870
4871 /* Print some information about block BB, which starts with HEAD and
4872 ends with TAIL, before scheduling it.
4873 I is zero, if scheduler is about to start with the fresh ebb. */
4874 static void
4875 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4876 {
4877 if (!i)
4878 fprintf (sched_dump,
4879 ";; ======================================================\n");
4880 else
4881 fprintf (sched_dump,
4882 ";; =====================ADVANCING TO=====================\n");
4883 fprintf (sched_dump,
4884 ";; -- basic block %d from %d to %d -- %s reload\n",
4885 bb->index, INSN_UID (head), INSN_UID (tail),
4886 (reload_completed ? "after" : "before"));
4887 fprintf (sched_dump,
4888 ";; ======================================================\n");
4889 fprintf (sched_dump, "\n");
4890 }
4891
4892 /* Unlink basic block notes and labels and saves them, so they
4893 can be easily restored. We unlink basic block notes in EBB to
4894 provide back-compatibility with the previous code, as target backends
4895 assume, that there'll be only instructions between
4896 current_sched_info->{head and tail}. We restore these notes as soon
4897 as we can.
4898 FIRST (LAST) is the first (last) basic block in the ebb.
4899 NB: In usual case (FIRST == LAST) nothing is really done. */
4900 void
4901 unlink_bb_notes (basic_block first, basic_block last)
4902 {
4903 /* We DON'T unlink basic block notes of the first block in the ebb. */
4904 if (first == last)
4905 return;
4906
4907 bb_header = XNEWVEC (rtx, last_basic_block);
4908
4909 /* Make a sentinel. */
4910 if (last->next_bb != EXIT_BLOCK_PTR)
4911 bb_header[last->next_bb->index] = 0;
4912
4913 first = first->next_bb;
4914 do
4915 {
4916 rtx prev, label, note, next;
4917
4918 label = BB_HEAD (last);
4919 if (LABEL_P (label))
4920 note = NEXT_INSN (label);
4921 else
4922 note = label;
4923 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4924
4925 prev = PREV_INSN (label);
4926 next = NEXT_INSN (note);
4927 gcc_assert (prev && next);
4928
4929 NEXT_INSN (prev) = next;
4930 PREV_INSN (next) = prev;
4931
4932 bb_header[last->index] = label;
4933
4934 if (last == first)
4935 break;
4936
4937 last = last->prev_bb;
4938 }
4939 while (1);
4940 }
4941
4942 /* Restore basic block notes.
4943 FIRST is the first basic block in the ebb. */
4944 static void
4945 restore_bb_notes (basic_block first)
4946 {
4947 if (!bb_header)
4948 return;
4949
4950 /* We DON'T unlink basic block notes of the first block in the ebb. */
4951 first = first->next_bb;
4952 /* Remember: FIRST is actually a second basic block in the ebb. */
4953
4954 while (first != EXIT_BLOCK_PTR
4955 && bb_header[first->index])
4956 {
4957 rtx prev, label, note, next;
4958
4959 label = bb_header[first->index];
4960 prev = PREV_INSN (label);
4961 next = NEXT_INSN (prev);
4962
4963 if (LABEL_P (label))
4964 note = NEXT_INSN (label);
4965 else
4966 note = label;
4967 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4968
4969 bb_header[first->index] = 0;
4970
4971 NEXT_INSN (prev) = label;
4972 NEXT_INSN (note) = next;
4973 PREV_INSN (next) = note;
4974
4975 first = first->next_bb;
4976 }
4977
4978 free (bb_header);
4979 bb_header = 0;
4980 }
4981
4982 /* Helper function.
4983 Fix CFG after both in- and inter-block movement of
4984 control_flow_insn_p JUMP. */
4985 static void
4986 fix_jump_move (rtx jump)
4987 {
4988 basic_block bb, jump_bb, jump_bb_next;
4989
4990 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4991 jump_bb = BLOCK_FOR_INSN (jump);
4992 jump_bb_next = jump_bb->next_bb;
4993
4994 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4995 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4996
4997 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4998 /* if jump_bb_next is not empty. */
4999 BB_END (jump_bb) = BB_END (jump_bb_next);
5000
5001 if (BB_END (bb) != PREV_INSN (jump))
5002 /* Then there are instruction after jump that should be placed
5003 to jump_bb_next. */
5004 BB_END (jump_bb_next) = BB_END (bb);
5005 else
5006 /* Otherwise jump_bb_next is empty. */
5007 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5008
5009 /* To make assertion in move_insn happy. */
5010 BB_END (bb) = PREV_INSN (jump);
5011
5012 update_bb_for_insn (jump_bb_next);
5013 }
5014
5015 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5016 static void
5017 move_block_after_check (rtx jump)
5018 {
5019 basic_block bb, jump_bb, jump_bb_next;
5020 VEC(edge,gc) *t;
5021
5022 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5023 jump_bb = BLOCK_FOR_INSN (jump);
5024 jump_bb_next = jump_bb->next_bb;
5025
5026 update_bb_for_insn (jump_bb);
5027
5028 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5029 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5030
5031 unlink_block (jump_bb_next);
5032 link_block (jump_bb_next, bb);
5033
5034 t = bb->succs;
5035 bb->succs = 0;
5036 move_succs (&(jump_bb->succs), bb);
5037 move_succs (&(jump_bb_next->succs), jump_bb);
5038 move_succs (&t, jump_bb_next);
5039
5040 df_mark_solutions_dirty ();
5041
5042 common_sched_info->fix_recovery_cfg
5043 (bb->index, jump_bb->index, jump_bb_next->index);
5044 }
5045
5046 /* Helper function for move_block_after_check.
5047 This functions attaches edge vector pointed to by SUCCSP to
5048 block TO. */
5049 static void
5050 move_succs (VEC(edge,gc) **succsp, basic_block to)
5051 {
5052 edge e;
5053 edge_iterator ei;
5054
5055 gcc_assert (to->succs == 0);
5056
5057 to->succs = *succsp;
5058
5059 FOR_EACH_EDGE (e, ei, to->succs)
5060 e->src = to;
5061
5062 *succsp = 0;
5063 }
5064
5065 /* Remove INSN from the instruction stream.
5066 INSN should have any dependencies. */
5067 static void
5068 sched_remove_insn (rtx insn)
5069 {
5070 sd_finish_insn (insn);
5071
5072 change_queue_index (insn, QUEUE_NOWHERE);
5073 current_sched_info->add_remove_insn (insn, 1);
5074 remove_insn (insn);
5075 }
5076
5077 /* Clear priorities of all instructions, that are forward dependent on INSN.
5078 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5079 be invoked to initialize all cleared priorities. */
5080 static void
5081 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5082 {
5083 sd_iterator_def sd_it;
5084 dep_t dep;
5085 bool insn_is_root_p = true;
5086
5087 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5088
5089 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5090 {
5091 rtx pro = DEP_PRO (dep);
5092
5093 if (INSN_PRIORITY_STATUS (pro) >= 0
5094 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5095 {
5096 /* If DEP doesn't contribute to priority then INSN itself should
5097 be added to priority roots. */
5098 if (contributes_to_priority_p (dep))
5099 insn_is_root_p = false;
5100
5101 INSN_PRIORITY_STATUS (pro) = -1;
5102 clear_priorities (pro, roots_ptr);
5103 }
5104 }
5105
5106 if (insn_is_root_p)
5107 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5108 }
5109
5110 /* Recompute priorities of instructions, whose priorities might have been
5111 changed. ROOTS is a vector of instructions whose priority computation will
5112 trigger initialization of all cleared priorities. */
5113 static void
5114 calc_priorities (rtx_vec_t roots)
5115 {
5116 int i;
5117 rtx insn;
5118
5119 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5120 priority (insn);
5121 }
5122
5123
5124 /* Add dependences between JUMP and other instructions in the recovery
5125 block. INSN is the first insn the recovery block. */
5126 static void
5127 add_jump_dependencies (rtx insn, rtx jump)
5128 {
5129 do
5130 {
5131 insn = NEXT_INSN (insn);
5132 if (insn == jump)
5133 break;
5134
5135 if (dep_list_size (insn) == 0)
5136 {
5137 dep_def _new_dep, *new_dep = &_new_dep;
5138
5139 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5140 sd_add_dep (new_dep, false);
5141 }
5142 }
5143 while (1);
5144
5145 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5146 }
5147
5148 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5149 rtx
5150 bb_note (basic_block bb)
5151 {
5152 rtx note;
5153
5154 note = BB_HEAD (bb);
5155 if (LABEL_P (note))
5156 note = NEXT_INSN (note);
5157
5158 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5159 return note;
5160 }
5161
5162 #ifdef ENABLE_CHECKING
5163 /* Helper function for check_cfg.
5164 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5165 its flags. */
5166 static int
5167 has_edge_p (VEC(edge,gc) *el, int type)
5168 {
5169 edge e;
5170 edge_iterator ei;
5171
5172 FOR_EACH_EDGE (e, ei, el)
5173 if (e->flags & type)
5174 return 1;
5175 return 0;
5176 }
5177
5178 /* Search back, starting at INSN, for an insn that is not a
5179 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5180 no such insn can be found. */
5181 static inline rtx
5182 prev_non_location_insn (rtx insn, rtx head)
5183 {
5184 while (insn != head && NOTE_P (insn)
5185 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5186 insn = PREV_INSN (insn);
5187
5188 return insn;
5189 }
5190
5191 /* Check few properties of CFG between HEAD and TAIL.
5192 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5193 instruction stream. */
5194 static void
5195 check_cfg (rtx head, rtx tail)
5196 {
5197 rtx next_tail;
5198 basic_block bb = 0;
5199 int not_first = 0, not_last;
5200
5201 if (head == NULL)
5202 head = get_insns ();
5203 if (tail == NULL)
5204 tail = get_last_insn ();
5205 next_tail = NEXT_INSN (tail);
5206
5207 do
5208 {
5209 not_last = head != tail;
5210
5211 if (not_first)
5212 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5213 if (not_last)
5214 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5215
5216 if (LABEL_P (head)
5217 || (NOTE_INSN_BASIC_BLOCK_P (head)
5218 && (!not_first
5219 || (not_first && !LABEL_P (PREV_INSN (head))))))
5220 {
5221 gcc_assert (bb == 0);
5222 bb = BLOCK_FOR_INSN (head);
5223 if (bb != 0)
5224 gcc_assert (BB_HEAD (bb) == head);
5225 else
5226 /* This is the case of jump table. See inside_basic_block_p (). */
5227 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5228 }
5229
5230 if (bb == 0)
5231 {
5232 gcc_assert (!inside_basic_block_p (head));
5233 head = NEXT_INSN (head);
5234 }
5235 else
5236 {
5237 gcc_assert (inside_basic_block_p (head)
5238 || NOTE_P (head));
5239 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5240
5241 if (LABEL_P (head))
5242 {
5243 head = NEXT_INSN (head);
5244 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5245 }
5246 else
5247 {
5248 if (control_flow_insn_p (head))
5249 {
5250 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5251 == head);
5252
5253 if (any_uncondjump_p (head))
5254 gcc_assert (EDGE_COUNT (bb->succs) == 1
5255 && BARRIER_P (NEXT_INSN (head)));
5256 else if (any_condjump_p (head))
5257 gcc_assert (/* Usual case. */
5258 (EDGE_COUNT (bb->succs) > 1
5259 && !BARRIER_P (NEXT_INSN (head)))
5260 /* Or jump to the next instruction. */
5261 || (EDGE_COUNT (bb->succs) == 1
5262 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5263 == JUMP_LABEL (head))));
5264 }
5265 if (BB_END (bb) == head)
5266 {
5267 if (EDGE_COUNT (bb->succs) > 1)
5268 gcc_assert (control_flow_insn_p (prev_non_location_insn
5269 (head, BB_HEAD (bb)))
5270 || has_edge_p (bb->succs, EDGE_COMPLEX));
5271 bb = 0;
5272 }
5273
5274 head = NEXT_INSN (head);
5275 }
5276 }
5277
5278 not_first = 1;
5279 }
5280 while (head != next_tail);
5281
5282 gcc_assert (bb == 0);
5283 }
5284
5285 #endif /* ENABLE_CHECKING */
5286
5287 /* Extend per basic block data structures. */
5288 static void
5289 extend_bb (void)
5290 {
5291 if (sched_scan_info->extend_bb)
5292 sched_scan_info->extend_bb ();
5293 }
5294
5295 /* Init data for BB. */
5296 static void
5297 init_bb (basic_block bb)
5298 {
5299 if (sched_scan_info->init_bb)
5300 sched_scan_info->init_bb (bb);
5301 }
5302
5303 /* Extend per insn data structures. */
5304 static void
5305 extend_insn (void)
5306 {
5307 if (sched_scan_info->extend_insn)
5308 sched_scan_info->extend_insn ();
5309 }
5310
5311 /* Init data structures for INSN. */
5312 static void
5313 init_insn (rtx insn)
5314 {
5315 if (sched_scan_info->init_insn)
5316 sched_scan_info->init_insn (insn);
5317 }
5318
5319 /* Init all insns in BB. */
5320 static void
5321 init_insns_in_bb (basic_block bb)
5322 {
5323 rtx insn;
5324
5325 FOR_BB_INSNS (bb, insn)
5326 init_insn (insn);
5327 }
5328
5329 /* A driver function to add a set of basic blocks (BBS),
5330 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5331 to the scheduling region. */
5332 void
5333 sched_scan (const struct sched_scan_info_def *ssi,
5334 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5335 {
5336 sched_scan_info = ssi;
5337
5338 if (bbs != NULL || bb != NULL)
5339 {
5340 extend_bb ();
5341
5342 if (bbs != NULL)
5343 {
5344 unsigned i;
5345 basic_block x;
5346
5347 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5348 init_bb (x);
5349 }
5350
5351 if (bb != NULL)
5352 init_bb (bb);
5353 }
5354
5355 extend_insn ();
5356
5357 if (bbs != NULL)
5358 {
5359 unsigned i;
5360 basic_block x;
5361
5362 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5363 init_insns_in_bb (x);
5364 }
5365
5366 if (bb != NULL)
5367 init_insns_in_bb (bb);
5368
5369 if (insns != NULL)
5370 {
5371 unsigned i;
5372 rtx x;
5373
5374 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5375 init_insn (x);
5376 }
5377
5378 if (insn != NULL)
5379 init_insn (insn);
5380 }
5381
5382
5383 /* Extend data structures for logical insn UID. */
5384 static void
5385 luids_extend_insn (void)
5386 {
5387 int new_luids_max_uid = get_max_uid () + 1;
5388
5389 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5390 }
5391
5392 /* Initialize LUID for INSN. */
5393 static void
5394 luids_init_insn (rtx insn)
5395 {
5396 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5397 int luid;
5398
5399 if (i >= 0)
5400 {
5401 luid = sched_max_luid;
5402 sched_max_luid += i;
5403 }
5404 else
5405 luid = -1;
5406
5407 SET_INSN_LUID (insn, luid);
5408 }
5409
5410 /* Initialize luids for BBS, BB, INSNS and INSN.
5411 The hook common_sched_info->luid_for_non_insn () is used to determine
5412 if notes, labels, etc. need luids. */
5413 void
5414 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5415 {
5416 const struct sched_scan_info_def ssi =
5417 {
5418 NULL, /* extend_bb */
5419 NULL, /* init_bb */
5420 luids_extend_insn, /* extend_insn */
5421 luids_init_insn /* init_insn */
5422 };
5423
5424 sched_scan (&ssi, bbs, bb, insns, insn);
5425 }
5426
5427 /* Free LUIDs. */
5428 void
5429 sched_finish_luids (void)
5430 {
5431 VEC_free (int, heap, sched_luids);
5432 sched_max_luid = 1;
5433 }
5434
5435 /* Return logical uid of INSN. Helpful while debugging. */
5436 int
5437 insn_luid (rtx insn)
5438 {
5439 return INSN_LUID (insn);
5440 }
5441
5442 /* Extend per insn data in the target. */
5443 void
5444 sched_extend_target (void)
5445 {
5446 if (targetm.sched.h_i_d_extended)
5447 targetm.sched.h_i_d_extended ();
5448 }
5449
5450 /* Extend global scheduler structures (those, that live across calls to
5451 schedule_block) to include information about just emitted INSN. */
5452 static void
5453 extend_h_i_d (void)
5454 {
5455 int reserve = (get_max_uid () + 1
5456 - VEC_length (haifa_insn_data_def, h_i_d));
5457 if (reserve > 0
5458 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5459 {
5460 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5461 3 * get_max_uid () / 2);
5462 sched_extend_target ();
5463 }
5464 }
5465
5466 /* Initialize h_i_d entry of the INSN with default values.
5467 Values, that are not explicitly initialized here, hold zero. */
5468 static void
5469 init_h_i_d (rtx insn)
5470 {
5471 if (INSN_LUID (insn) > 0)
5472 {
5473 INSN_COST (insn) = -1;
5474 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5475 INSN_TICK (insn) = INVALID_TICK;
5476 INTER_TICK (insn) = INVALID_TICK;
5477 TODO_SPEC (insn) = HARD_DEP;
5478 }
5479 }
5480
5481 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5482 void
5483 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5484 {
5485 const struct sched_scan_info_def ssi =
5486 {
5487 NULL, /* extend_bb */
5488 NULL, /* init_bb */
5489 extend_h_i_d, /* extend_insn */
5490 init_h_i_d /* init_insn */
5491 };
5492
5493 sched_scan (&ssi, bbs, bb, insns, insn);
5494 }
5495
5496 /* Finalize haifa_insn_data. */
5497 void
5498 haifa_finish_h_i_d (void)
5499 {
5500 int i;
5501 haifa_insn_data_t data;
5502 struct reg_use_data *use, *next;
5503
5504 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5505 {
5506 if (data->reg_pressure != NULL)
5507 free (data->reg_pressure);
5508 for (use = data->reg_use_list; use != NULL; use = next)
5509 {
5510 next = use->next_insn_use;
5511 free (use);
5512 }
5513 }
5514 VEC_free (haifa_insn_data_def, heap, h_i_d);
5515 }
5516
5517 /* Init data for the new insn INSN. */
5518 static void
5519 haifa_init_insn (rtx insn)
5520 {
5521 gcc_assert (insn != NULL);
5522
5523 sched_init_luids (NULL, NULL, NULL, insn);
5524 sched_extend_target ();
5525 sched_deps_init (false);
5526 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5527
5528 if (adding_bb_to_current_region_p)
5529 {
5530 sd_init_insn (insn);
5531
5532 /* Extend dependency caches by one element. */
5533 extend_dependency_caches (1, false);
5534 }
5535 }
5536
5537 /* Init data for the new basic block BB which comes after AFTER. */
5538 static void
5539 haifa_init_only_bb (basic_block bb, basic_block after)
5540 {
5541 gcc_assert (bb != NULL);
5542
5543 sched_init_bbs ();
5544
5545 if (common_sched_info->add_block)
5546 /* This changes only data structures of the front-end. */
5547 common_sched_info->add_block (bb, after);
5548 }
5549
5550 /* A generic version of sched_split_block (). */
5551 basic_block
5552 sched_split_block_1 (basic_block first_bb, rtx after)
5553 {
5554 edge e;
5555
5556 e = split_block (first_bb, after);
5557 gcc_assert (e->src == first_bb);
5558
5559 /* sched_split_block emits note if *check == BB_END. Probably it
5560 is better to rip that note off. */
5561
5562 return e->dest;
5563 }
5564
5565 /* A generic version of sched_create_empty_bb (). */
5566 basic_block
5567 sched_create_empty_bb_1 (basic_block after)
5568 {
5569 return create_empty_bb (after);
5570 }
5571
5572 /* Insert PAT as an INSN into the schedule and update the necessary data
5573 structures to account for it. */
5574 rtx
5575 sched_emit_insn (rtx pat)
5576 {
5577 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5578 last_scheduled_insn = insn;
5579 haifa_init_insn (insn);
5580 return insn;
5581 }
5582
5583 /* This function returns a candidate satisfying dispatch constraints from
5584 the ready list. */
5585
5586 static rtx
5587 ready_remove_first_dispatch (struct ready_list *ready)
5588 {
5589 int i;
5590 rtx insn = ready_element (ready, 0);
5591
5592 if (ready->n_ready == 1
5593 || INSN_CODE (insn) < 0
5594 || !INSN_P (insn)
5595 || !active_insn_p (insn)
5596 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5597 return ready_remove_first (ready);
5598
5599 for (i = 1; i < ready->n_ready; i++)
5600 {
5601 insn = ready_element (ready, i);
5602
5603 if (INSN_CODE (insn) < 0
5604 || !INSN_P (insn)
5605 || !active_insn_p (insn))
5606 continue;
5607
5608 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5609 {
5610 /* Return ith element of ready. */
5611 insn = ready_remove (ready, i);
5612 return insn;
5613 }
5614 }
5615
5616 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5617 return ready_remove_first (ready);
5618
5619 for (i = 1; i < ready->n_ready; i++)
5620 {
5621 insn = ready_element (ready, i);
5622
5623 if (INSN_CODE (insn) < 0
5624 || !INSN_P (insn)
5625 || !active_insn_p (insn))
5626 continue;
5627
5628 /* Return i-th element of ready. */
5629 if (targetm.sched.dispatch (insn, IS_CMP))
5630 return ready_remove (ready, i);
5631 }
5632
5633 return ready_remove_first (ready);
5634 }
5635
5636 /* Get number of ready insn in the ready list. */
5637
5638 int
5639 number_in_ready (void)
5640 {
5641 return ready.n_ready;
5642 }
5643
5644 /* Get number of ready's in the ready list. */
5645
5646 rtx
5647 get_ready_element (int i)
5648 {
5649 return ready_element (&ready, i);
5650 }
5651
5652 #endif /* INSN_SCHEDULING */