re PR bootstrap/48403 (bootstrap comparison failure)
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "output.h"
145 #include "params.h"
146 #include "vecprim.h"
147 #include "dbgcnt.h"
148 #include "cfgloop.h"
149 #include "ira.h"
150 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
151
152 #ifdef INSN_SCHEDULING
153
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
157
158 int issue_rate;
159
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
164 N=1: same as -dSR.
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
168
169 int sched_verbose = 0;
170
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
174
175 /* This is a placeholder for the scheduler parameters common
176 to all schedulers. */
177 struct common_sched_info_def *common_sched_info;
178
179 #define INSN_TICK(INSN) (HID (INSN)->tick)
180 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
181
182 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
183 then it should be recalculated from scratch. */
184 #define INVALID_TICK (-(max_insn_queue_index + 1))
185 /* The minimal value of the INSN_TICK of an instruction. */
186 #define MIN_TICK (-max_insn_queue_index)
187
188 /* List of important notes we must keep around. This is a pointer to the
189 last element in the list. */
190 rtx note_list;
191
192 static struct spec_info_def spec_info_var;
193 /* Description of the speculative part of the scheduling.
194 If NULL - no speculation. */
195 spec_info_t spec_info = NULL;
196
197 /* True, if recovery block was added during scheduling of current block.
198 Used to determine, if we need to fix INSN_TICKs. */
199 static bool haifa_recovery_bb_recently_added_p;
200
201 /* True, if recovery block was added during this scheduling pass.
202 Used to determine if we should have empty memory pools of dependencies
203 after finishing current region. */
204 bool haifa_recovery_bb_ever_added_p;
205
206 /* Counters of different types of speculative instructions. */
207 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
208
209 /* Array used in {unlink, restore}_bb_notes. */
210 static rtx *bb_header = 0;
211
212 /* Basic block after which recovery blocks will be created. */
213 static basic_block before_recovery;
214
215 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
216 created it. */
217 basic_block after_recovery;
218
219 /* FALSE if we add bb to another region, so we don't need to initialize it. */
220 bool adding_bb_to_current_region_p = true;
221
222 /* Queues, etc. */
223
224 /* An instruction is ready to be scheduled when all insns preceding it
225 have already been scheduled. It is important to ensure that all
226 insns which use its result will not be executed until its result
227 has been computed. An insn is maintained in one of four structures:
228
229 (P) the "Pending" set of insns which cannot be scheduled until
230 their dependencies have been satisfied.
231 (Q) the "Queued" set of insns that can be scheduled when sufficient
232 time has passed.
233 (R) the "Ready" list of unscheduled, uncommitted insns.
234 (S) the "Scheduled" list of insns.
235
236 Initially, all insns are either "Pending" or "Ready" depending on
237 whether their dependencies are satisfied.
238
239 Insns move from the "Ready" list to the "Scheduled" list as they
240 are committed to the schedule. As this occurs, the insns in the
241 "Pending" list have their dependencies satisfied and move to either
242 the "Ready" list or the "Queued" set depending on whether
243 sufficient time has passed to make them ready. As time passes,
244 insns move from the "Queued" set to the "Ready" list.
245
246 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
247 unscheduled insns, i.e., those that are ready, queued, and pending.
248 The "Queued" set (Q) is implemented by the variable `insn_queue'.
249 The "Ready" list (R) is implemented by the variables `ready' and
250 `n_ready'.
251 The "Scheduled" list (S) is the new insn chain built by this pass.
252
253 The transition (R->S) is implemented in the scheduling loop in
254 `schedule_block' when the best insn to schedule is chosen.
255 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
256 insns move from the ready list to the scheduled list.
257 The transition (Q->R) is implemented in 'queue_to_insn' as time
258 passes or stalls are introduced. */
259
260 /* Implement a circular buffer to delay instructions until sufficient
261 time has passed. For the new pipeline description interface,
262 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
263 than maximal time of instruction execution computed by genattr.c on
264 the base maximal time of functional unit reservations and getting a
265 result. This is the longest time an insn may be queued. */
266
267 static rtx *insn_queue;
268 static int q_ptr = 0;
269 static int q_size = 0;
270 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
271 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
272
273 #define QUEUE_SCHEDULED (-3)
274 #define QUEUE_NOWHERE (-2)
275 #define QUEUE_READY (-1)
276 /* QUEUE_SCHEDULED - INSN is scheduled.
277 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
278 queue or ready list.
279 QUEUE_READY - INSN is in ready list.
280 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
281
282 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
283
284 /* The following variable value refers for all current and future
285 reservations of the processor units. */
286 state_t curr_state;
287
288 /* The following variable value is size of memory representing all
289 current and future reservations of the processor units. */
290 size_t dfa_state_size;
291
292 /* The following array is used to find the best insn from ready when
293 the automaton pipeline interface is used. */
294 char *ready_try = NULL;
295
296 /* The ready list. */
297 struct ready_list ready = {NULL, 0, 0, 0, 0};
298
299 /* The pointer to the ready list (to be removed). */
300 static struct ready_list *readyp = &ready;
301
302 /* Scheduling clock. */
303 static int clock_var;
304
305 /* This records the actual schedule. It is built up during the main phase
306 of schedule_block, and afterwards used to reorder the insns in the RTL. */
307 static VEC(rtx, heap) *scheduled_insns;
308
309 static int may_trap_exp (const_rtx, int);
310
311 /* Nonzero iff the address is comprised from at most 1 register. */
312 #define CONST_BASED_ADDRESS_P(x) \
313 (REG_P (x) \
314 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
315 || (GET_CODE (x) == LO_SUM)) \
316 && (CONSTANT_P (XEXP (x, 0)) \
317 || CONSTANT_P (XEXP (x, 1)))))
318
319 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
320 as found by analyzing insn's expression. */
321
322 \f
323 static int haifa_luid_for_non_insn (rtx x);
324
325 /* Haifa version of sched_info hooks common to all headers. */
326 const struct common_sched_info_def haifa_common_sched_info =
327 {
328 NULL, /* fix_recovery_cfg */
329 NULL, /* add_block */
330 NULL, /* estimate_number_of_insns */
331 haifa_luid_for_non_insn, /* luid_for_non_insn */
332 SCHED_PASS_UNKNOWN /* sched_pass_id */
333 };
334
335 const struct sched_scan_info_def *sched_scan_info;
336
337 /* Mapping from instruction UID to its Logical UID. */
338 VEC (int, heap) *sched_luids = NULL;
339
340 /* Next LUID to assign to an instruction. */
341 int sched_max_luid = 1;
342
343 /* Haifa Instruction Data. */
344 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
345
346 void (* sched_init_only_bb) (basic_block, basic_block);
347
348 /* Split block function. Different schedulers might use different functions
349 to handle their internal data consistent. */
350 basic_block (* sched_split_block) (basic_block, rtx);
351
352 /* Create empty basic block after the specified block. */
353 basic_block (* sched_create_empty_bb) (basic_block);
354
355 static int
356 may_trap_exp (const_rtx x, int is_store)
357 {
358 enum rtx_code code;
359
360 if (x == 0)
361 return TRAP_FREE;
362 code = GET_CODE (x);
363 if (is_store)
364 {
365 if (code == MEM && may_trap_p (x))
366 return TRAP_RISKY;
367 else
368 return TRAP_FREE;
369 }
370 if (code == MEM)
371 {
372 /* The insn uses memory: a volatile load. */
373 if (MEM_VOLATILE_P (x))
374 return IRISKY;
375 /* An exception-free load. */
376 if (!may_trap_p (x))
377 return IFREE;
378 /* A load with 1 base register, to be further checked. */
379 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
380 return PFREE_CANDIDATE;
381 /* No info on the load, to be further checked. */
382 return PRISKY_CANDIDATE;
383 }
384 else
385 {
386 const char *fmt;
387 int i, insn_class = TRAP_FREE;
388
389 /* Neither store nor load, check if it may cause a trap. */
390 if (may_trap_p (x))
391 return TRAP_RISKY;
392 /* Recursive step: walk the insn... */
393 fmt = GET_RTX_FORMAT (code);
394 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
395 {
396 if (fmt[i] == 'e')
397 {
398 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 }
401 else if (fmt[i] == 'E')
402 {
403 int j;
404 for (j = 0; j < XVECLEN (x, i); j++)
405 {
406 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
407 insn_class = WORST_CLASS (insn_class, tmp_class);
408 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
409 break;
410 }
411 }
412 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
413 break;
414 }
415 return insn_class;
416 }
417 }
418
419 /* Classifies rtx X of an insn for the purpose of verifying that X can be
420 executed speculatively (and consequently the insn can be moved
421 speculatively), by examining X, returning:
422 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
423 TRAP_FREE: non-load insn.
424 IFREE: load from a globally safe location.
425 IRISKY: volatile load.
426 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
427 being either PFREE or PRISKY. */
428
429 static int
430 haifa_classify_rtx (const_rtx x)
431 {
432 int tmp_class = TRAP_FREE;
433 int insn_class = TRAP_FREE;
434 enum rtx_code code;
435
436 if (GET_CODE (x) == PARALLEL)
437 {
438 int i, len = XVECLEN (x, 0);
439
440 for (i = len - 1; i >= 0; i--)
441 {
442 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
443 insn_class = WORST_CLASS (insn_class, tmp_class);
444 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
445 break;
446 }
447 }
448 else
449 {
450 code = GET_CODE (x);
451 switch (code)
452 {
453 case CLOBBER:
454 /* Test if it is a 'store'. */
455 tmp_class = may_trap_exp (XEXP (x, 0), 1);
456 break;
457 case SET:
458 /* Test if it is a store. */
459 tmp_class = may_trap_exp (SET_DEST (x), 1);
460 if (tmp_class == TRAP_RISKY)
461 break;
462 /* Test if it is a load. */
463 tmp_class =
464 WORST_CLASS (tmp_class,
465 may_trap_exp (SET_SRC (x), 0));
466 break;
467 case COND_EXEC:
468 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
469 if (tmp_class == TRAP_RISKY)
470 break;
471 tmp_class = WORST_CLASS (tmp_class,
472 may_trap_exp (COND_EXEC_TEST (x), 0));
473 break;
474 case TRAP_IF:
475 tmp_class = TRAP_RISKY;
476 break;
477 default:;
478 }
479 insn_class = tmp_class;
480 }
481
482 return insn_class;
483 }
484
485 int
486 haifa_classify_insn (const_rtx insn)
487 {
488 return haifa_classify_rtx (PATTERN (insn));
489 }
490
491 /* Forward declarations. */
492
493 static int priority (rtx);
494 static int rank_for_schedule (const void *, const void *);
495 static void swap_sort (rtx *, int);
496 static void queue_insn (rtx, int, const char *);
497 static int schedule_insn (rtx);
498 static void adjust_priority (rtx);
499 static void advance_one_cycle (void);
500 static void extend_h_i_d (void);
501
502
503 /* Notes handling mechanism:
504 =========================
505 Generally, NOTES are saved before scheduling and restored after scheduling.
506 The scheduler distinguishes between two types of notes:
507
508 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
509 Before scheduling a region, a pointer to the note is added to the insn
510 that follows or precedes it. (This happens as part of the data dependence
511 computation). After scheduling an insn, the pointer contained in it is
512 used for regenerating the corresponding note (in reemit_notes).
513
514 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
515 these notes are put in a list (in rm_other_notes() and
516 unlink_other_notes ()). After scheduling the block, these notes are
517 inserted at the beginning of the block (in schedule_block()). */
518
519 static void ready_add (struct ready_list *, rtx, bool);
520 static rtx ready_remove_first (struct ready_list *);
521 static rtx ready_remove_first_dispatch (struct ready_list *ready);
522
523 static void queue_to_ready (struct ready_list *);
524 static int early_queue_to_ready (state_t, struct ready_list *);
525
526 static void debug_ready_list (struct ready_list *);
527
528 /* The following functions are used to implement multi-pass scheduling
529 on the first cycle. */
530 static rtx ready_remove (struct ready_list *, int);
531 static void ready_remove_insn (rtx);
532
533 static void fix_inter_tick (rtx, rtx);
534 static int fix_tick_ready (rtx);
535 static void change_queue_index (rtx, int);
536
537 /* The following functions are used to implement scheduling of data/control
538 speculative instructions. */
539
540 static void extend_h_i_d (void);
541 static void init_h_i_d (rtx);
542 static void generate_recovery_code (rtx);
543 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
544 static void begin_speculative_block (rtx);
545 static void add_to_speculative_block (rtx);
546 static void init_before_recovery (basic_block *);
547 static void create_check_block_twin (rtx, bool);
548 static void fix_recovery_deps (basic_block);
549 static void haifa_change_pattern (rtx, rtx);
550 static void dump_new_block_header (int, basic_block, rtx, rtx);
551 static void restore_bb_notes (basic_block);
552 static void fix_jump_move (rtx);
553 static void move_block_after_check (rtx);
554 static void move_succs (VEC(edge,gc) **, basic_block);
555 static void sched_remove_insn (rtx);
556 static void clear_priorities (rtx, rtx_vec_t *);
557 static void calc_priorities (rtx_vec_t);
558 static void add_jump_dependencies (rtx, rtx);
559 #ifdef ENABLE_CHECKING
560 static int has_edge_p (VEC(edge,gc) *, int);
561 static void check_cfg (rtx, rtx);
562 #endif
563
564 #endif /* INSN_SCHEDULING */
565 \f
566 /* Point to state used for the current scheduling pass. */
567 struct haifa_sched_info *current_sched_info;
568 \f
569 #ifndef INSN_SCHEDULING
570 void
571 schedule_insns (void)
572 {
573 }
574 #else
575
576 /* Do register pressure sensitive insn scheduling if the flag is set
577 up. */
578 bool sched_pressure_p;
579
580 /* Map regno -> its pressure class. The map defined only when
581 SCHED_PRESSURE_P is true. */
582 enum reg_class *sched_regno_pressure_class;
583
584 /* The current register pressure. Only elements corresponding pressure
585 classes are defined. */
586 static int curr_reg_pressure[N_REG_CLASSES];
587
588 /* Saved value of the previous array. */
589 static int saved_reg_pressure[N_REG_CLASSES];
590
591 /* Register living at given scheduling point. */
592 static bitmap curr_reg_live;
593
594 /* Saved value of the previous array. */
595 static bitmap saved_reg_live;
596
597 /* Registers mentioned in the current region. */
598 static bitmap region_ref_regs;
599
600 /* Initiate register pressure relative info for scheduling the current
601 region. Currently it is only clearing register mentioned in the
602 current region. */
603 void
604 sched_init_region_reg_pressure_info (void)
605 {
606 bitmap_clear (region_ref_regs);
607 }
608
609 /* Update current register pressure related info after birth (if
610 BIRTH_P) or death of register REGNO. */
611 static void
612 mark_regno_birth_or_death (int regno, bool birth_p)
613 {
614 enum reg_class pressure_class;
615
616 pressure_class = sched_regno_pressure_class[regno];
617 if (regno >= FIRST_PSEUDO_REGISTER)
618 {
619 if (pressure_class != NO_REGS)
620 {
621 if (birth_p)
622 {
623 bitmap_set_bit (curr_reg_live, regno);
624 curr_reg_pressure[pressure_class]
625 += (ira_reg_class_max_nregs
626 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
627 }
628 else
629 {
630 bitmap_clear_bit (curr_reg_live, regno);
631 curr_reg_pressure[pressure_class]
632 -= (ira_reg_class_max_nregs
633 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
634 }
635 }
636 }
637 else if (pressure_class != NO_REGS
638 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
639 {
640 if (birth_p)
641 {
642 bitmap_set_bit (curr_reg_live, regno);
643 curr_reg_pressure[pressure_class]++;
644 }
645 else
646 {
647 bitmap_clear_bit (curr_reg_live, regno);
648 curr_reg_pressure[pressure_class]--;
649 }
650 }
651 }
652
653 /* Initiate current register pressure related info from living
654 registers given by LIVE. */
655 static void
656 initiate_reg_pressure_info (bitmap live)
657 {
658 int i;
659 unsigned int j;
660 bitmap_iterator bi;
661
662 for (i = 0; i < ira_pressure_classes_num; i++)
663 curr_reg_pressure[ira_pressure_classes[i]] = 0;
664 bitmap_clear (curr_reg_live);
665 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
666 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
667 mark_regno_birth_or_death (j, true);
668 }
669
670 /* Mark registers in X as mentioned in the current region. */
671 static void
672 setup_ref_regs (rtx x)
673 {
674 int i, j, regno;
675 const RTX_CODE code = GET_CODE (x);
676 const char *fmt;
677
678 if (REG_P (x))
679 {
680 regno = REGNO (x);
681 if (HARD_REGISTER_NUM_P (regno))
682 bitmap_set_range (region_ref_regs, regno,
683 hard_regno_nregs[regno][GET_MODE (x)]);
684 else
685 bitmap_set_bit (region_ref_regs, REGNO (x));
686 return;
687 }
688 fmt = GET_RTX_FORMAT (code);
689 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
690 if (fmt[i] == 'e')
691 setup_ref_regs (XEXP (x, i));
692 else if (fmt[i] == 'E')
693 {
694 for (j = 0; j < XVECLEN (x, i); j++)
695 setup_ref_regs (XVECEXP (x, i, j));
696 }
697 }
698
699 /* Initiate current register pressure related info at the start of
700 basic block BB. */
701 static void
702 initiate_bb_reg_pressure_info (basic_block bb)
703 {
704 unsigned int i ATTRIBUTE_UNUSED;
705 rtx insn;
706
707 if (current_nr_blocks > 1)
708 FOR_BB_INSNS (bb, insn)
709 if (NONDEBUG_INSN_P (insn))
710 setup_ref_regs (PATTERN (insn));
711 initiate_reg_pressure_info (df_get_live_in (bb));
712 #ifdef EH_RETURN_DATA_REGNO
713 if (bb_has_eh_pred (bb))
714 for (i = 0; ; ++i)
715 {
716 unsigned int regno = EH_RETURN_DATA_REGNO (i);
717
718 if (regno == INVALID_REGNUM)
719 break;
720 if (! bitmap_bit_p (df_get_live_in (bb), regno))
721 mark_regno_birth_or_death (regno, true);
722 }
723 #endif
724 }
725
726 /* Save current register pressure related info. */
727 static void
728 save_reg_pressure (void)
729 {
730 int i;
731
732 for (i = 0; i < ira_pressure_classes_num; i++)
733 saved_reg_pressure[ira_pressure_classes[i]]
734 = curr_reg_pressure[ira_pressure_classes[i]];
735 bitmap_copy (saved_reg_live, curr_reg_live);
736 }
737
738 /* Restore saved register pressure related info. */
739 static void
740 restore_reg_pressure (void)
741 {
742 int i;
743
744 for (i = 0; i < ira_pressure_classes_num; i++)
745 curr_reg_pressure[ira_pressure_classes[i]]
746 = saved_reg_pressure[ira_pressure_classes[i]];
747 bitmap_copy (curr_reg_live, saved_reg_live);
748 }
749
750 /* Return TRUE if the register is dying after its USE. */
751 static bool
752 dying_use_p (struct reg_use_data *use)
753 {
754 struct reg_use_data *next;
755
756 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
757 if (NONDEBUG_INSN_P (next->insn)
758 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
759 return false;
760 return true;
761 }
762
763 /* Print info about the current register pressure and its excess for
764 each pressure class. */
765 static void
766 print_curr_reg_pressure (void)
767 {
768 int i;
769 enum reg_class cl;
770
771 fprintf (sched_dump, ";;\t");
772 for (i = 0; i < ira_pressure_classes_num; i++)
773 {
774 cl = ira_pressure_classes[i];
775 gcc_assert (curr_reg_pressure[cl] >= 0);
776 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
777 curr_reg_pressure[cl],
778 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
779 }
780 fprintf (sched_dump, "\n");
781 }
782
783 /* Pointer to the last instruction scheduled. */
784 static rtx last_scheduled_insn;
785
786 /* Pointer that iterates through the list of unscheduled insns if we
787 have a dbg_cnt enabled. It always points at an insn prior to the
788 first unscheduled one. */
789 static rtx nonscheduled_insns_begin;
790
791 /* Cached cost of the instruction. Use below function to get cost of the
792 insn. -1 here means that the field is not initialized. */
793 #define INSN_COST(INSN) (HID (INSN)->cost)
794
795 /* Compute cost of executing INSN.
796 This is the number of cycles between instruction issue and
797 instruction results. */
798 int
799 insn_cost (rtx insn)
800 {
801 int cost;
802
803 if (sel_sched_p ())
804 {
805 if (recog_memoized (insn) < 0)
806 return 0;
807
808 cost = insn_default_latency (insn);
809 if (cost < 0)
810 cost = 0;
811
812 return cost;
813 }
814
815 cost = INSN_COST (insn);
816
817 if (cost < 0)
818 {
819 /* A USE insn, or something else we don't need to
820 understand. We can't pass these directly to
821 result_ready_cost or insn_default_latency because it will
822 trigger a fatal error for unrecognizable insns. */
823 if (recog_memoized (insn) < 0)
824 {
825 INSN_COST (insn) = 0;
826 return 0;
827 }
828 else
829 {
830 cost = insn_default_latency (insn);
831 if (cost < 0)
832 cost = 0;
833
834 INSN_COST (insn) = cost;
835 }
836 }
837
838 return cost;
839 }
840
841 /* Compute cost of dependence LINK.
842 This is the number of cycles between instruction issue and
843 instruction results.
844 ??? We also use this function to call recog_memoized on all insns. */
845 int
846 dep_cost_1 (dep_t link, dw_t dw)
847 {
848 rtx insn = DEP_PRO (link);
849 rtx used = DEP_CON (link);
850 int cost;
851
852 /* A USE insn should never require the value used to be computed.
853 This allows the computation of a function's result and parameter
854 values to overlap the return and call. We don't care about the
855 the dependence cost when only decreasing register pressure. */
856 if (recog_memoized (used) < 0)
857 {
858 cost = 0;
859 recog_memoized (insn);
860 }
861 else
862 {
863 enum reg_note dep_type = DEP_TYPE (link);
864
865 cost = insn_cost (insn);
866
867 if (INSN_CODE (insn) >= 0)
868 {
869 if (dep_type == REG_DEP_ANTI)
870 cost = 0;
871 else if (dep_type == REG_DEP_OUTPUT)
872 {
873 cost = (insn_default_latency (insn)
874 - insn_default_latency (used));
875 if (cost <= 0)
876 cost = 1;
877 }
878 else if (bypass_p (insn))
879 cost = insn_latency (insn, used);
880 }
881
882
883 if (targetm.sched.adjust_cost_2)
884 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
885 dw);
886 else if (targetm.sched.adjust_cost != NULL)
887 {
888 /* This variable is used for backward compatibility with the
889 targets. */
890 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
891
892 /* Make it self-cycled, so that if some tries to walk over this
893 incomplete list he/she will be caught in an endless loop. */
894 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
895
896 /* Targets use only REG_NOTE_KIND of the link. */
897 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
898
899 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
900 insn, cost);
901
902 free_INSN_LIST_node (dep_cost_rtx_link);
903 }
904
905 if (cost < 0)
906 cost = 0;
907 }
908
909 return cost;
910 }
911
912 /* Compute cost of dependence LINK.
913 This is the number of cycles between instruction issue and
914 instruction results. */
915 int
916 dep_cost (dep_t link)
917 {
918 return dep_cost_1 (link, 0);
919 }
920
921 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
922 INSN_PRIORITY explicitly. */
923 void
924 increase_insn_priority (rtx insn, int amount)
925 {
926 if (!sel_sched_p ())
927 {
928 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
929 if (INSN_PRIORITY_KNOWN (insn))
930 INSN_PRIORITY (insn) += amount;
931 }
932 else
933 {
934 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
935 Use EXPR_PRIORITY instead. */
936 sel_add_to_insn_priority (insn, amount);
937 }
938 }
939
940 /* Return 'true' if DEP should be included in priority calculations. */
941 static bool
942 contributes_to_priority_p (dep_t dep)
943 {
944 if (DEBUG_INSN_P (DEP_CON (dep))
945 || DEBUG_INSN_P (DEP_PRO (dep)))
946 return false;
947
948 /* Critical path is meaningful in block boundaries only. */
949 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
950 DEP_PRO (dep)))
951 return false;
952
953 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
954 then speculative instructions will less likely be
955 scheduled. That is because the priority of
956 their producers will increase, and, thus, the
957 producers will more likely be scheduled, thus,
958 resolving the dependence. */
959 if (sched_deps_info->generate_spec_deps
960 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
961 && (DEP_STATUS (dep) & SPECULATIVE))
962 return false;
963
964 return true;
965 }
966
967 /* Compute the number of nondebug forward deps of an insn. */
968
969 static int
970 dep_list_size (rtx insn)
971 {
972 sd_iterator_def sd_it;
973 dep_t dep;
974 int dbgcount = 0, nodbgcount = 0;
975
976 if (!MAY_HAVE_DEBUG_INSNS)
977 return sd_lists_size (insn, SD_LIST_FORW);
978
979 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
980 {
981 if (DEBUG_INSN_P (DEP_CON (dep)))
982 dbgcount++;
983 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
984 nodbgcount++;
985 }
986
987 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
988
989 return nodbgcount;
990 }
991
992 /* Compute the priority number for INSN. */
993 static int
994 priority (rtx insn)
995 {
996 if (! INSN_P (insn))
997 return 0;
998
999 /* We should not be interested in priority of an already scheduled insn. */
1000 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1001
1002 if (!INSN_PRIORITY_KNOWN (insn))
1003 {
1004 int this_priority = -1;
1005
1006 if (dep_list_size (insn) == 0)
1007 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1008 some forward deps but all of them are ignored by
1009 contributes_to_priority hook. At the moment we set priority of
1010 such insn to 0. */
1011 this_priority = insn_cost (insn);
1012 else
1013 {
1014 rtx prev_first, twin;
1015 basic_block rec;
1016
1017 /* For recovery check instructions we calculate priority slightly
1018 different than that of normal instructions. Instead of walking
1019 through INSN_FORW_DEPS (check) list, we walk through
1020 INSN_FORW_DEPS list of each instruction in the corresponding
1021 recovery block. */
1022
1023 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1024 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1025 if (!rec || rec == EXIT_BLOCK_PTR)
1026 {
1027 prev_first = PREV_INSN (insn);
1028 twin = insn;
1029 }
1030 else
1031 {
1032 prev_first = NEXT_INSN (BB_HEAD (rec));
1033 twin = PREV_INSN (BB_END (rec));
1034 }
1035
1036 do
1037 {
1038 sd_iterator_def sd_it;
1039 dep_t dep;
1040
1041 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1042 {
1043 rtx next;
1044 int next_priority;
1045
1046 next = DEP_CON (dep);
1047
1048 if (BLOCK_FOR_INSN (next) != rec)
1049 {
1050 int cost;
1051
1052 if (!contributes_to_priority_p (dep))
1053 continue;
1054
1055 if (twin == insn)
1056 cost = dep_cost (dep);
1057 else
1058 {
1059 struct _dep _dep1, *dep1 = &_dep1;
1060
1061 init_dep (dep1, insn, next, REG_DEP_ANTI);
1062
1063 cost = dep_cost (dep1);
1064 }
1065
1066 next_priority = cost + priority (next);
1067
1068 if (next_priority > this_priority)
1069 this_priority = next_priority;
1070 }
1071 }
1072
1073 twin = PREV_INSN (twin);
1074 }
1075 while (twin != prev_first);
1076 }
1077
1078 if (this_priority < 0)
1079 {
1080 gcc_assert (this_priority == -1);
1081
1082 this_priority = insn_cost (insn);
1083 }
1084
1085 INSN_PRIORITY (insn) = this_priority;
1086 INSN_PRIORITY_STATUS (insn) = 1;
1087 }
1088
1089 return INSN_PRIORITY (insn);
1090 }
1091 \f
1092 /* Macros and functions for keeping the priority queue sorted, and
1093 dealing with queuing and dequeuing of instructions. */
1094
1095 #define SCHED_SORT(READY, N_READY) \
1096 do { if ((N_READY) == 2) \
1097 swap_sort (READY, N_READY); \
1098 else if ((N_READY) > 2) \
1099 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1100 while (0)
1101
1102 /* Setup info about the current register pressure impact of scheduling
1103 INSN at the current scheduling point. */
1104 static void
1105 setup_insn_reg_pressure_info (rtx insn)
1106 {
1107 int i, change, before, after, hard_regno;
1108 int excess_cost_change;
1109 enum machine_mode mode;
1110 enum reg_class cl;
1111 struct reg_pressure_data *pressure_info;
1112 int *max_reg_pressure;
1113 struct reg_use_data *use;
1114 static int death[N_REG_CLASSES];
1115
1116 gcc_checking_assert (!DEBUG_INSN_P (insn));
1117
1118 excess_cost_change = 0;
1119 for (i = 0; i < ira_pressure_classes_num; i++)
1120 death[ira_pressure_classes[i]] = 0;
1121 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1122 if (dying_use_p (use))
1123 {
1124 cl = sched_regno_pressure_class[use->regno];
1125 if (use->regno < FIRST_PSEUDO_REGISTER)
1126 death[cl]++;
1127 else
1128 death[cl]
1129 += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1130 }
1131 pressure_info = INSN_REG_PRESSURE (insn);
1132 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1133 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1134 for (i = 0; i < ira_pressure_classes_num; i++)
1135 {
1136 cl = ira_pressure_classes[i];
1137 gcc_assert (curr_reg_pressure[cl] >= 0);
1138 change = (int) pressure_info[i].set_increase - death[cl];
1139 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1140 after = MAX (0, max_reg_pressure[i] + change
1141 - ira_available_class_regs[cl]);
1142 hard_regno = ira_class_hard_regs[cl][0];
1143 gcc_assert (hard_regno >= 0);
1144 mode = reg_raw_mode[hard_regno];
1145 excess_cost_change += ((after - before)
1146 * (ira_memory_move_cost[mode][cl][0]
1147 + ira_memory_move_cost[mode][cl][1]));
1148 }
1149 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1150 }
1151
1152 /* Returns a positive value if x is preferred; returns a negative value if
1153 y is preferred. Should never return 0, since that will make the sort
1154 unstable. */
1155
1156 static int
1157 rank_for_schedule (const void *x, const void *y)
1158 {
1159 rtx tmp = *(const rtx *) y;
1160 rtx tmp2 = *(const rtx *) x;
1161 rtx last;
1162 int tmp_class, tmp2_class;
1163 int val, priority_val, info_val;
1164
1165 if (MAY_HAVE_DEBUG_INSNS)
1166 {
1167 /* Schedule debug insns as early as possible. */
1168 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1169 return -1;
1170 else if (DEBUG_INSN_P (tmp2))
1171 return 1;
1172 }
1173
1174 /* The insn in a schedule group should be issued the first. */
1175 if (flag_sched_group_heuristic &&
1176 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1177 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1178
1179 /* Make sure that priority of TMP and TMP2 are initialized. */
1180 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1181
1182 if (sched_pressure_p)
1183 {
1184 int diff;
1185
1186 /* Prefer insn whose scheduling results in the smallest register
1187 pressure excess. */
1188 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1189 + (INSN_TICK (tmp) > clock_var
1190 ? INSN_TICK (tmp) - clock_var : 0)
1191 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1192 - (INSN_TICK (tmp2) > clock_var
1193 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1194 return diff;
1195 }
1196
1197
1198 if (sched_pressure_p
1199 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1200 {
1201 if (INSN_TICK (tmp) <= clock_var)
1202 return -1;
1203 else if (INSN_TICK (tmp2) <= clock_var)
1204 return 1;
1205 else
1206 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1207 }
1208 /* Prefer insn with higher priority. */
1209 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1210
1211 if (flag_sched_critical_path_heuristic && priority_val)
1212 return priority_val;
1213
1214 /* Prefer speculative insn with greater dependencies weakness. */
1215 if (flag_sched_spec_insn_heuristic && spec_info)
1216 {
1217 ds_t ds1, ds2;
1218 dw_t dw1, dw2;
1219 int dw;
1220
1221 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1222 if (ds1)
1223 dw1 = ds_weak (ds1);
1224 else
1225 dw1 = NO_DEP_WEAK;
1226
1227 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1228 if (ds2)
1229 dw2 = ds_weak (ds2);
1230 else
1231 dw2 = NO_DEP_WEAK;
1232
1233 dw = dw2 - dw1;
1234 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1235 return dw;
1236 }
1237
1238 info_val = (*current_sched_info->rank) (tmp, tmp2);
1239 if(flag_sched_rank_heuristic && info_val)
1240 return info_val;
1241
1242 if (flag_sched_last_insn_heuristic)
1243 {
1244 int i = VEC_length (rtx, scheduled_insns);
1245 last = NULL_RTX;
1246 while (i-- > 0)
1247 {
1248 last = VEC_index (rtx, scheduled_insns, i);
1249 if (NONDEBUG_INSN_P (last))
1250 break;
1251 }
1252 }
1253
1254 /* Compare insns based on their relation to the last scheduled
1255 non-debug insn. */
1256 if (flag_sched_last_insn_heuristic && last && NONDEBUG_INSN_P (last))
1257 {
1258 dep_t dep1;
1259 dep_t dep2;
1260
1261 /* Classify the instructions into three classes:
1262 1) Data dependent on last schedule insn.
1263 2) Anti/Output dependent on last scheduled insn.
1264 3) Independent of last scheduled insn, or has latency of one.
1265 Choose the insn from the highest numbered class if different. */
1266 dep1 = sd_find_dep_between (last, tmp, true);
1267
1268 if (dep1 == NULL || dep_cost (dep1) == 1)
1269 tmp_class = 3;
1270 else if (/* Data dependence. */
1271 DEP_TYPE (dep1) == REG_DEP_TRUE)
1272 tmp_class = 1;
1273 else
1274 tmp_class = 2;
1275
1276 dep2 = sd_find_dep_between (last, tmp2, true);
1277
1278 if (dep2 == NULL || dep_cost (dep2) == 1)
1279 tmp2_class = 3;
1280 else if (/* Data dependence. */
1281 DEP_TYPE (dep2) == REG_DEP_TRUE)
1282 tmp2_class = 1;
1283 else
1284 tmp2_class = 2;
1285
1286 if ((val = tmp2_class - tmp_class))
1287 return val;
1288 }
1289
1290 /* Prefer the insn which has more later insns that depend on it.
1291 This gives the scheduler more freedom when scheduling later
1292 instructions at the expense of added register pressure. */
1293
1294 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1295
1296 if (flag_sched_dep_count_heuristic && val != 0)
1297 return val;
1298
1299 /* If insns are equally good, sort by INSN_LUID (original insn order),
1300 so that we make the sort stable. This minimizes instruction movement,
1301 thus minimizing sched's effect on debugging and cross-jumping. */
1302 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1303 }
1304
1305 /* Resort the array A in which only element at index N may be out of order. */
1306
1307 HAIFA_INLINE static void
1308 swap_sort (rtx *a, int n)
1309 {
1310 rtx insn = a[n - 1];
1311 int i = n - 2;
1312
1313 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1314 {
1315 a[i + 1] = a[i];
1316 i -= 1;
1317 }
1318 a[i + 1] = insn;
1319 }
1320
1321 /* Add INSN to the insn queue so that it can be executed at least
1322 N_CYCLES after the currently executing insn. Preserve insns
1323 chain for debugging purposes. REASON will be printed in debugging
1324 output. */
1325
1326 HAIFA_INLINE static void
1327 queue_insn (rtx insn, int n_cycles, const char *reason)
1328 {
1329 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1330 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1331
1332 gcc_assert (n_cycles <= max_insn_queue_index);
1333 gcc_assert (!DEBUG_INSN_P (insn));
1334
1335 insn_queue[next_q] = link;
1336 q_size += 1;
1337
1338 if (sched_verbose >= 2)
1339 {
1340 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1341 (*current_sched_info->print_insn) (insn, 0));
1342
1343 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
1344 }
1345
1346 QUEUE_INDEX (insn) = next_q;
1347 }
1348
1349 /* Remove INSN from queue. */
1350 static void
1351 queue_remove (rtx insn)
1352 {
1353 gcc_assert (QUEUE_INDEX (insn) >= 0);
1354 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1355 q_size--;
1356 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1357 }
1358
1359 /* Return a pointer to the bottom of the ready list, i.e. the insn
1360 with the lowest priority. */
1361
1362 rtx *
1363 ready_lastpos (struct ready_list *ready)
1364 {
1365 gcc_assert (ready->n_ready >= 1);
1366 return ready->vec + ready->first - ready->n_ready + 1;
1367 }
1368
1369 /* Add an element INSN to the ready list so that it ends up with the
1370 lowest/highest priority depending on FIRST_P. */
1371
1372 HAIFA_INLINE static void
1373 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1374 {
1375 if (!first_p)
1376 {
1377 if (ready->first == ready->n_ready)
1378 {
1379 memmove (ready->vec + ready->veclen - ready->n_ready,
1380 ready_lastpos (ready),
1381 ready->n_ready * sizeof (rtx));
1382 ready->first = ready->veclen - 1;
1383 }
1384 ready->vec[ready->first - ready->n_ready] = insn;
1385 }
1386 else
1387 {
1388 if (ready->first == ready->veclen - 1)
1389 {
1390 if (ready->n_ready)
1391 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1392 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1393 ready_lastpos (ready),
1394 ready->n_ready * sizeof (rtx));
1395 ready->first = ready->veclen - 2;
1396 }
1397 ready->vec[++(ready->first)] = insn;
1398 }
1399
1400 ready->n_ready++;
1401 if (DEBUG_INSN_P (insn))
1402 ready->n_debug++;
1403
1404 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1405 QUEUE_INDEX (insn) = QUEUE_READY;
1406 }
1407
1408 /* Remove the element with the highest priority from the ready list and
1409 return it. */
1410
1411 HAIFA_INLINE static rtx
1412 ready_remove_first (struct ready_list *ready)
1413 {
1414 rtx t;
1415
1416 gcc_assert (ready->n_ready);
1417 t = ready->vec[ready->first--];
1418 ready->n_ready--;
1419 if (DEBUG_INSN_P (t))
1420 ready->n_debug--;
1421 /* If the queue becomes empty, reset it. */
1422 if (ready->n_ready == 0)
1423 ready->first = ready->veclen - 1;
1424
1425 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1426 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1427
1428 return t;
1429 }
1430
1431 /* The following code implements multi-pass scheduling for the first
1432 cycle. In other words, we will try to choose ready insn which
1433 permits to start maximum number of insns on the same cycle. */
1434
1435 /* Return a pointer to the element INDEX from the ready. INDEX for
1436 insn with the highest priority is 0, and the lowest priority has
1437 N_READY - 1. */
1438
1439 rtx
1440 ready_element (struct ready_list *ready, int index)
1441 {
1442 gcc_assert (ready->n_ready && index < ready->n_ready);
1443
1444 return ready->vec[ready->first - index];
1445 }
1446
1447 /* Remove the element INDEX from the ready list and return it. INDEX
1448 for insn with the highest priority is 0, and the lowest priority
1449 has N_READY - 1. */
1450
1451 HAIFA_INLINE static rtx
1452 ready_remove (struct ready_list *ready, int index)
1453 {
1454 rtx t;
1455 int i;
1456
1457 if (index == 0)
1458 return ready_remove_first (ready);
1459 gcc_assert (ready->n_ready && index < ready->n_ready);
1460 t = ready->vec[ready->first - index];
1461 ready->n_ready--;
1462 if (DEBUG_INSN_P (t))
1463 ready->n_debug--;
1464 for (i = index; i < ready->n_ready; i++)
1465 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1466 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1467 return t;
1468 }
1469
1470 /* Remove INSN from the ready list. */
1471 static void
1472 ready_remove_insn (rtx insn)
1473 {
1474 int i;
1475
1476 for (i = 0; i < readyp->n_ready; i++)
1477 if (ready_element (readyp, i) == insn)
1478 {
1479 ready_remove (readyp, i);
1480 return;
1481 }
1482 gcc_unreachable ();
1483 }
1484
1485 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1486 macro. */
1487
1488 void
1489 ready_sort (struct ready_list *ready)
1490 {
1491 int i;
1492 rtx *first = ready_lastpos (ready);
1493
1494 if (sched_pressure_p)
1495 {
1496 for (i = 0; i < ready->n_ready; i++)
1497 if (!DEBUG_INSN_P (first[i]))
1498 setup_insn_reg_pressure_info (first[i]);
1499 }
1500 SCHED_SORT (first, ready->n_ready);
1501 }
1502
1503 /* PREV is an insn that is ready to execute. Adjust its priority if that
1504 will help shorten or lengthen register lifetimes as appropriate. Also
1505 provide a hook for the target to tweak itself. */
1506
1507 HAIFA_INLINE static void
1508 adjust_priority (rtx prev)
1509 {
1510 /* ??? There used to be code here to try and estimate how an insn
1511 affected register lifetimes, but it did it by looking at REG_DEAD
1512 notes, which we removed in schedule_region. Nor did it try to
1513 take into account register pressure or anything useful like that.
1514
1515 Revisit when we have a machine model to work with and not before. */
1516
1517 if (targetm.sched.adjust_priority)
1518 INSN_PRIORITY (prev) =
1519 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1520 }
1521
1522 /* Advance DFA state STATE on one cycle. */
1523 void
1524 advance_state (state_t state)
1525 {
1526 if (targetm.sched.dfa_pre_advance_cycle)
1527 targetm.sched.dfa_pre_advance_cycle ();
1528
1529 if (targetm.sched.dfa_pre_cycle_insn)
1530 state_transition (state,
1531 targetm.sched.dfa_pre_cycle_insn ());
1532
1533 state_transition (state, NULL);
1534
1535 if (targetm.sched.dfa_post_cycle_insn)
1536 state_transition (state,
1537 targetm.sched.dfa_post_cycle_insn ());
1538
1539 if (targetm.sched.dfa_post_advance_cycle)
1540 targetm.sched.dfa_post_advance_cycle ();
1541 }
1542
1543 /* Advance time on one cycle. */
1544 HAIFA_INLINE static void
1545 advance_one_cycle (void)
1546 {
1547 advance_state (curr_state);
1548 if (sched_verbose >= 6)
1549 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1550 }
1551
1552 /* Clock at which the previous instruction was issued. */
1553 static int last_clock_var;
1554
1555 /* Update register pressure after scheduling INSN. */
1556 static void
1557 update_register_pressure (rtx insn)
1558 {
1559 struct reg_use_data *use;
1560 struct reg_set_data *set;
1561
1562 gcc_checking_assert (!DEBUG_INSN_P (insn));
1563
1564 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1565 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1566 mark_regno_birth_or_death (use->regno, false);
1567 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1568 mark_regno_birth_or_death (set->regno, true);
1569 }
1570
1571 /* Set up or update (if UPDATE_P) max register pressure (see its
1572 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1573 after insn AFTER. */
1574 static void
1575 setup_insn_max_reg_pressure (rtx after, bool update_p)
1576 {
1577 int i, p;
1578 bool eq_p;
1579 rtx insn;
1580 static int max_reg_pressure[N_REG_CLASSES];
1581
1582 save_reg_pressure ();
1583 for (i = 0; i < ira_pressure_classes_num; i++)
1584 max_reg_pressure[ira_pressure_classes[i]]
1585 = curr_reg_pressure[ira_pressure_classes[i]];
1586 for (insn = NEXT_INSN (after);
1587 insn != NULL_RTX && ! BARRIER_P (insn)
1588 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1589 insn = NEXT_INSN (insn))
1590 if (NONDEBUG_INSN_P (insn))
1591 {
1592 eq_p = true;
1593 for (i = 0; i < ira_pressure_classes_num; i++)
1594 {
1595 p = max_reg_pressure[ira_pressure_classes[i]];
1596 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1597 {
1598 eq_p = false;
1599 INSN_MAX_REG_PRESSURE (insn)[i]
1600 = max_reg_pressure[ira_pressure_classes[i]];
1601 }
1602 }
1603 if (update_p && eq_p)
1604 break;
1605 update_register_pressure (insn);
1606 for (i = 0; i < ira_pressure_classes_num; i++)
1607 if (max_reg_pressure[ira_pressure_classes[i]]
1608 < curr_reg_pressure[ira_pressure_classes[i]])
1609 max_reg_pressure[ira_pressure_classes[i]]
1610 = curr_reg_pressure[ira_pressure_classes[i]];
1611 }
1612 restore_reg_pressure ();
1613 }
1614
1615 /* Update the current register pressure after scheduling INSN. Update
1616 also max register pressure for unscheduled insns of the current
1617 BB. */
1618 static void
1619 update_reg_and_insn_max_reg_pressure (rtx insn)
1620 {
1621 int i;
1622 int before[N_REG_CLASSES];
1623
1624 for (i = 0; i < ira_pressure_classes_num; i++)
1625 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
1626 update_register_pressure (insn);
1627 for (i = 0; i < ira_pressure_classes_num; i++)
1628 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
1629 break;
1630 if (i < ira_pressure_classes_num)
1631 setup_insn_max_reg_pressure (insn, true);
1632 }
1633
1634 /* Set up register pressure at the beginning of basic block BB whose
1635 insns starting after insn AFTER. Set up also max register pressure
1636 for all insns of the basic block. */
1637 void
1638 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1639 {
1640 gcc_assert (sched_pressure_p);
1641 initiate_bb_reg_pressure_info (bb);
1642 setup_insn_max_reg_pressure (after, false);
1643 }
1644
1645 /* INSN is the "currently executing insn". Launch each insn which was
1646 waiting on INSN. READY is the ready list which contains the insns
1647 that are ready to fire. CLOCK is the current cycle. The function
1648 returns necessary cycle advance after issuing the insn (it is not
1649 zero for insns in a schedule group). */
1650
1651 static int
1652 schedule_insn (rtx insn)
1653 {
1654 sd_iterator_def sd_it;
1655 dep_t dep;
1656 int i;
1657 int advance = 0;
1658
1659 if (sched_verbose >= 1)
1660 {
1661 struct reg_pressure_data *pressure_info;
1662 char buf[2048];
1663
1664 print_insn (buf, insn, 0);
1665 buf[40] = 0;
1666 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1667
1668 if (recog_memoized (insn) < 0)
1669 fprintf (sched_dump, "nothing");
1670 else
1671 print_reservation (sched_dump, insn);
1672 pressure_info = INSN_REG_PRESSURE (insn);
1673 if (pressure_info != NULL)
1674 {
1675 fputc (':', sched_dump);
1676 for (i = 0; i < ira_pressure_classes_num; i++)
1677 fprintf (sched_dump, "%s%+d(%d)",
1678 reg_class_names[ira_pressure_classes[i]],
1679 pressure_info[i].set_increase, pressure_info[i].change);
1680 }
1681 fputc ('\n', sched_dump);
1682 }
1683
1684 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1685 update_reg_and_insn_max_reg_pressure (insn);
1686
1687 /* Scheduling instruction should have all its dependencies resolved and
1688 should have been removed from the ready list. */
1689 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1690
1691 /* Reset debug insns invalidated by moving this insn. */
1692 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1693 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1694 sd_iterator_cond (&sd_it, &dep);)
1695 {
1696 rtx dbg = DEP_PRO (dep);
1697 struct reg_use_data *use, *next;
1698
1699 gcc_assert (DEBUG_INSN_P (dbg));
1700
1701 if (sched_verbose >= 6)
1702 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1703 INSN_UID (dbg));
1704
1705 /* ??? Rather than resetting the debug insn, we might be able
1706 to emit a debug temp before the just-scheduled insn, but
1707 this would involve checking that the expression at the
1708 point of the debug insn is equivalent to the expression
1709 before the just-scheduled insn. They might not be: the
1710 expression in the debug insn may depend on other insns not
1711 yet scheduled that set MEMs, REGs or even other debug
1712 insns. It's not clear that attempting to preserve debug
1713 information in these cases is worth the effort, given how
1714 uncommon these resets are and the likelihood that the debug
1715 temps introduced won't survive the schedule change. */
1716 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1717 df_insn_rescan (dbg);
1718
1719 /* Unknown location doesn't use any registers. */
1720 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1721 {
1722 struct reg_use_data *prev = use;
1723
1724 /* Remove use from the cyclic next_regno_use chain first. */
1725 while (prev->next_regno_use != use)
1726 prev = prev->next_regno_use;
1727 prev->next_regno_use = use->next_regno_use;
1728 next = use->next_insn_use;
1729 free (use);
1730 }
1731 INSN_REG_USE_LIST (dbg) = NULL;
1732
1733 /* We delete rather than resolve these deps, otherwise we
1734 crash in sched_free_deps(), because forward deps are
1735 expected to be released before backward deps. */
1736 sd_delete_dep (sd_it);
1737 }
1738
1739 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1740 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1741
1742 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1743 if (INSN_TICK (insn) > clock_var)
1744 /* INSN has been prematurely moved from the queue to the ready list.
1745 This is possible only if following flag is set. */
1746 gcc_assert (flag_sched_stalled_insns);
1747
1748 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1749 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1750 INSN_TICK (insn) = clock_var;
1751
1752 /* Update dependent instructions. */
1753 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1754 sd_iterator_cond (&sd_it, &dep);)
1755 {
1756 rtx next = DEP_CON (dep);
1757
1758 /* Resolve the dependence between INSN and NEXT.
1759 sd_resolve_dep () moves current dep to another list thus
1760 advancing the iterator. */
1761 sd_resolve_dep (sd_it);
1762
1763 /* Don't bother trying to mark next as ready if insn is a debug
1764 insn. If insn is the last hard dependency, it will have
1765 already been discounted. */
1766 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1767 continue;
1768
1769 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1770 {
1771 int effective_cost;
1772
1773 effective_cost = try_ready (next);
1774
1775 if (effective_cost >= 0
1776 && SCHED_GROUP_P (next)
1777 && advance < effective_cost)
1778 advance = effective_cost;
1779 }
1780 else
1781 /* Check always has only one forward dependence (to the first insn in
1782 the recovery block), therefore, this will be executed only once. */
1783 {
1784 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1785 fix_recovery_deps (RECOVERY_BLOCK (insn));
1786 }
1787 }
1788
1789 /* This is the place where scheduler doesn't *basically* need backward and
1790 forward dependencies for INSN anymore. Nevertheless they are used in
1791 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1792 some targets (e.g. rs6000). Thus the earliest place where we *can*
1793 remove dependencies is after targetm.sched.finish () call in
1794 schedule_block (). But, on the other side, the safest place to remove
1795 dependencies is when we are finishing scheduling entire region. As we
1796 don't generate [many] dependencies during scheduling itself, we won't
1797 need memory until beginning of next region.
1798 Bottom line: Dependencies are removed for all insns in the end of
1799 scheduling the region. */
1800
1801 /* Annotate the instruction with issue information -- TImode
1802 indicates that the instruction is expected not to be able
1803 to issue on the same cycle as the previous insn. A machine
1804 may use this information to decide how the instruction should
1805 be aligned. */
1806 if (issue_rate > 1
1807 && GET_CODE (PATTERN (insn)) != USE
1808 && GET_CODE (PATTERN (insn)) != CLOBBER
1809 && !DEBUG_INSN_P (insn))
1810 {
1811 if (reload_completed)
1812 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1813 last_clock_var = clock_var;
1814 }
1815
1816 return advance;
1817 }
1818
1819 /* Functions for handling of notes. */
1820
1821 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1822 void
1823 concat_note_lists (rtx from_end, rtx *to_endp)
1824 {
1825 rtx from_start;
1826
1827 /* It's easy when have nothing to concat. */
1828 if (from_end == NULL)
1829 return;
1830
1831 /* It's also easy when destination is empty. */
1832 if (*to_endp == NULL)
1833 {
1834 *to_endp = from_end;
1835 return;
1836 }
1837
1838 from_start = from_end;
1839 while (PREV_INSN (from_start) != NULL)
1840 from_start = PREV_INSN (from_start);
1841
1842 PREV_INSN (from_start) = *to_endp;
1843 NEXT_INSN (*to_endp) = from_start;
1844 *to_endp = from_end;
1845 }
1846
1847 /* Delete notes between HEAD and TAIL and put them in the chain
1848 of notes ended by NOTE_LIST. */
1849 void
1850 remove_notes (rtx head, rtx tail)
1851 {
1852 rtx next_tail, insn, next;
1853
1854 note_list = 0;
1855 if (head == tail && !INSN_P (head))
1856 return;
1857
1858 next_tail = NEXT_INSN (tail);
1859 for (insn = head; insn != next_tail; insn = next)
1860 {
1861 next = NEXT_INSN (insn);
1862 if (!NOTE_P (insn))
1863 continue;
1864
1865 switch (NOTE_KIND (insn))
1866 {
1867 case NOTE_INSN_BASIC_BLOCK:
1868 continue;
1869
1870 case NOTE_INSN_EPILOGUE_BEG:
1871 if (insn != tail)
1872 {
1873 remove_insn (insn);
1874 add_reg_note (next, REG_SAVE_NOTE,
1875 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1876 break;
1877 }
1878 /* FALLTHRU */
1879
1880 default:
1881 remove_insn (insn);
1882
1883 /* Add the note to list that ends at NOTE_LIST. */
1884 PREV_INSN (insn) = note_list;
1885 NEXT_INSN (insn) = NULL_RTX;
1886 if (note_list)
1887 NEXT_INSN (note_list) = insn;
1888 note_list = insn;
1889 break;
1890 }
1891
1892 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1893 }
1894 }
1895
1896
1897 /* Return the head and tail pointers of ebb starting at BEG and ending
1898 at END. */
1899 void
1900 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1901 {
1902 rtx beg_head = BB_HEAD (beg);
1903 rtx beg_tail = BB_END (beg);
1904 rtx end_head = BB_HEAD (end);
1905 rtx end_tail = BB_END (end);
1906
1907 /* Don't include any notes or labels at the beginning of the BEG
1908 basic block, or notes at the end of the END basic blocks. */
1909
1910 if (LABEL_P (beg_head))
1911 beg_head = NEXT_INSN (beg_head);
1912
1913 while (beg_head != beg_tail)
1914 if (NOTE_P (beg_head))
1915 beg_head = NEXT_INSN (beg_head);
1916 else if (DEBUG_INSN_P (beg_head))
1917 {
1918 rtx note, next;
1919
1920 for (note = NEXT_INSN (beg_head);
1921 note != beg_tail;
1922 note = next)
1923 {
1924 next = NEXT_INSN (note);
1925 if (NOTE_P (note))
1926 {
1927 if (sched_verbose >= 9)
1928 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1929
1930 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
1931
1932 if (BLOCK_FOR_INSN (note) != beg)
1933 df_insn_change_bb (note, beg);
1934 }
1935 else if (!DEBUG_INSN_P (note))
1936 break;
1937 }
1938
1939 break;
1940 }
1941 else
1942 break;
1943
1944 *headp = beg_head;
1945
1946 if (beg == end)
1947 end_head = beg_head;
1948 else if (LABEL_P (end_head))
1949 end_head = NEXT_INSN (end_head);
1950
1951 while (end_head != end_tail)
1952 if (NOTE_P (end_tail))
1953 end_tail = PREV_INSN (end_tail);
1954 else if (DEBUG_INSN_P (end_tail))
1955 {
1956 rtx note, prev;
1957
1958 for (note = PREV_INSN (end_tail);
1959 note != end_head;
1960 note = prev)
1961 {
1962 prev = PREV_INSN (note);
1963 if (NOTE_P (note))
1964 {
1965 if (sched_verbose >= 9)
1966 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1967
1968 reorder_insns_nobb (note, note, end_tail);
1969
1970 if (end_tail == BB_END (end))
1971 BB_END (end) = note;
1972
1973 if (BLOCK_FOR_INSN (note) != end)
1974 df_insn_change_bb (note, end);
1975 }
1976 else if (!DEBUG_INSN_P (note))
1977 break;
1978 }
1979
1980 break;
1981 }
1982 else
1983 break;
1984
1985 *tailp = end_tail;
1986 }
1987
1988 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1989
1990 int
1991 no_real_insns_p (const_rtx head, const_rtx tail)
1992 {
1993 while (head != NEXT_INSN (tail))
1994 {
1995 if (!NOTE_P (head) && !LABEL_P (head))
1996 return 0;
1997 head = NEXT_INSN (head);
1998 }
1999 return 1;
2000 }
2001
2002 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2003 previously found among the insns. Insert them just before HEAD. */
2004 rtx
2005 restore_other_notes (rtx head, basic_block head_bb)
2006 {
2007 if (note_list != 0)
2008 {
2009 rtx note_head = note_list;
2010
2011 if (head)
2012 head_bb = BLOCK_FOR_INSN (head);
2013 else
2014 head = NEXT_INSN (bb_note (head_bb));
2015
2016 while (PREV_INSN (note_head))
2017 {
2018 set_block_for_insn (note_head, head_bb);
2019 note_head = PREV_INSN (note_head);
2020 }
2021 /* In the above cycle we've missed this note. */
2022 set_block_for_insn (note_head, head_bb);
2023
2024 PREV_INSN (note_head) = PREV_INSN (head);
2025 NEXT_INSN (PREV_INSN (head)) = note_head;
2026 PREV_INSN (head) = note_list;
2027 NEXT_INSN (note_list) = head;
2028
2029 if (BLOCK_FOR_INSN (head) != head_bb)
2030 BB_END (head_bb) = note_list;
2031
2032 head = note_head;
2033 }
2034
2035 return head;
2036 }
2037
2038 /* Move insns that became ready to fire from queue to ready list. */
2039
2040 static void
2041 queue_to_ready (struct ready_list *ready)
2042 {
2043 rtx insn;
2044 rtx link;
2045 rtx skip_insn;
2046
2047 q_ptr = NEXT_Q (q_ptr);
2048
2049 if (dbg_cnt (sched_insn) == false)
2050 {
2051 /* If debug counter is activated do not requeue the first
2052 nonscheduled insn. */
2053 skip_insn = nonscheduled_insns_begin;
2054 do
2055 {
2056 skip_insn = next_nonnote_nondebug_insn (skip_insn);
2057 }
2058 while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
2059 }
2060 else
2061 skip_insn = NULL_RTX;
2062
2063 /* Add all pending insns that can be scheduled without stalls to the
2064 ready list. */
2065 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2066 {
2067 insn = XEXP (link, 0);
2068 q_size -= 1;
2069
2070 if (sched_verbose >= 2)
2071 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2072 (*current_sched_info->print_insn) (insn, 0));
2073
2074 /* If the ready list is full, delay the insn for 1 cycle.
2075 See the comment in schedule_block for the rationale. */
2076 if (!reload_completed
2077 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2078 && !SCHED_GROUP_P (insn)
2079 && insn != skip_insn)
2080 queue_insn (insn, 1, "ready full");
2081 else
2082 {
2083 ready_add (ready, insn, false);
2084 if (sched_verbose >= 2)
2085 fprintf (sched_dump, "moving to ready without stalls\n");
2086 }
2087 }
2088 free_INSN_LIST_list (&insn_queue[q_ptr]);
2089
2090 /* If there are no ready insns, stall until one is ready and add all
2091 of the pending insns at that point to the ready list. */
2092 if (ready->n_ready == 0)
2093 {
2094 int stalls;
2095
2096 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2097 {
2098 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2099 {
2100 for (; link; link = XEXP (link, 1))
2101 {
2102 insn = XEXP (link, 0);
2103 q_size -= 1;
2104
2105 if (sched_verbose >= 2)
2106 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2107 (*current_sched_info->print_insn) (insn, 0));
2108
2109 ready_add (ready, insn, false);
2110 if (sched_verbose >= 2)
2111 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2112 }
2113 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2114
2115 advance_one_cycle ();
2116
2117 break;
2118 }
2119
2120 advance_one_cycle ();
2121 }
2122
2123 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2124 clock_var += stalls;
2125 }
2126 }
2127
2128 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2129 prematurely move INSN from the queue to the ready list. Currently,
2130 if a target defines the hook 'is_costly_dependence', this function
2131 uses the hook to check whether there exist any dependences which are
2132 considered costly by the target, between INSN and other insns that
2133 have already been scheduled. Dependences are checked up to Y cycles
2134 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2135 controlling this value.
2136 (Other considerations could be taken into account instead (or in
2137 addition) depending on user flags and target hooks. */
2138
2139 static bool
2140 ok_for_early_queue_removal (rtx insn)
2141 {
2142 if (targetm.sched.is_costly_dependence)
2143 {
2144 rtx prev_insn;
2145 int n_cycles;
2146 int i = VEC_length (rtx, scheduled_insns);
2147 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2148 {
2149 while (i-- > 0)
2150 {
2151 int cost;
2152
2153 prev_insn = VEC_index (rtx, scheduled_insns, i);
2154
2155 if (!NOTE_P (prev_insn))
2156 {
2157 dep_t dep;
2158
2159 dep = sd_find_dep_between (prev_insn, insn, true);
2160
2161 if (dep != NULL)
2162 {
2163 cost = dep_cost (dep);
2164
2165 if (targetm.sched.is_costly_dependence (dep, cost,
2166 flag_sched_stalled_insns_dep - n_cycles))
2167 return false;
2168 }
2169 }
2170
2171 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2172 break;
2173 }
2174
2175 if (i == 0)
2176 break;
2177 }
2178 }
2179
2180 return true;
2181 }
2182
2183
2184 /* Remove insns from the queue, before they become "ready" with respect
2185 to FU latency considerations. */
2186
2187 static int
2188 early_queue_to_ready (state_t state, struct ready_list *ready)
2189 {
2190 rtx insn;
2191 rtx link;
2192 rtx next_link;
2193 rtx prev_link;
2194 bool move_to_ready;
2195 int cost;
2196 state_t temp_state = alloca (dfa_state_size);
2197 int stalls;
2198 int insns_removed = 0;
2199
2200 /*
2201 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2202 function:
2203
2204 X == 0: There is no limit on how many queued insns can be removed
2205 prematurely. (flag_sched_stalled_insns = -1).
2206
2207 X >= 1: Only X queued insns can be removed prematurely in each
2208 invocation. (flag_sched_stalled_insns = X).
2209
2210 Otherwise: Early queue removal is disabled.
2211 (flag_sched_stalled_insns = 0)
2212 */
2213
2214 if (! flag_sched_stalled_insns)
2215 return 0;
2216
2217 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2218 {
2219 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2220 {
2221 if (sched_verbose > 6)
2222 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2223
2224 prev_link = 0;
2225 while (link)
2226 {
2227 next_link = XEXP (link, 1);
2228 insn = XEXP (link, 0);
2229 if (insn && sched_verbose > 6)
2230 print_rtl_single (sched_dump, insn);
2231
2232 memcpy (temp_state, state, dfa_state_size);
2233 if (recog_memoized (insn) < 0)
2234 /* non-negative to indicate that it's not ready
2235 to avoid infinite Q->R->Q->R... */
2236 cost = 0;
2237 else
2238 cost = state_transition (temp_state, insn);
2239
2240 if (sched_verbose >= 6)
2241 fprintf (sched_dump, "transition cost = %d\n", cost);
2242
2243 move_to_ready = false;
2244 if (cost < 0)
2245 {
2246 move_to_ready = ok_for_early_queue_removal (insn);
2247 if (move_to_ready == true)
2248 {
2249 /* move from Q to R */
2250 q_size -= 1;
2251 ready_add (ready, insn, false);
2252
2253 if (prev_link)
2254 XEXP (prev_link, 1) = next_link;
2255 else
2256 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2257
2258 free_INSN_LIST_node (link);
2259
2260 if (sched_verbose >= 2)
2261 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2262 (*current_sched_info->print_insn) (insn, 0));
2263
2264 insns_removed++;
2265 if (insns_removed == flag_sched_stalled_insns)
2266 /* Remove no more than flag_sched_stalled_insns insns
2267 from Q at a time. */
2268 return insns_removed;
2269 }
2270 }
2271
2272 if (move_to_ready == false)
2273 prev_link = link;
2274
2275 link = next_link;
2276 } /* while link */
2277 } /* if link */
2278
2279 } /* for stalls.. */
2280
2281 return insns_removed;
2282 }
2283
2284
2285 /* Print the ready list for debugging purposes. Callable from debugger. */
2286
2287 static void
2288 debug_ready_list (struct ready_list *ready)
2289 {
2290 rtx *p;
2291 int i;
2292
2293 if (ready->n_ready == 0)
2294 {
2295 fprintf (sched_dump, "\n");
2296 return;
2297 }
2298
2299 p = ready_lastpos (ready);
2300 for (i = 0; i < ready->n_ready; i++)
2301 {
2302 fprintf (sched_dump, " %s:%d",
2303 (*current_sched_info->print_insn) (p[i], 0),
2304 INSN_LUID (p[i]));
2305 if (sched_pressure_p)
2306 fprintf (sched_dump, "(cost=%d",
2307 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2308 if (INSN_TICK (p[i]) > clock_var)
2309 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2310 if (sched_pressure_p)
2311 fprintf (sched_dump, ")");
2312 }
2313 fprintf (sched_dump, "\n");
2314 }
2315
2316 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2317 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2318 replaces the epilogue note in the correct basic block. */
2319 void
2320 reemit_notes (rtx insn)
2321 {
2322 rtx note, last = insn;
2323
2324 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2325 {
2326 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2327 {
2328 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2329
2330 last = emit_note_before (note_type, last);
2331 remove_note (insn, note);
2332 }
2333 }
2334 }
2335
2336 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2337 static void
2338 move_insn (rtx insn, rtx last, rtx nt)
2339 {
2340 if (PREV_INSN (insn) != last)
2341 {
2342 basic_block bb;
2343 rtx note;
2344 int jump_p = 0;
2345
2346 bb = BLOCK_FOR_INSN (insn);
2347
2348 /* BB_HEAD is either LABEL or NOTE. */
2349 gcc_assert (BB_HEAD (bb) != insn);
2350
2351 if (BB_END (bb) == insn)
2352 /* If this is last instruction in BB, move end marker one
2353 instruction up. */
2354 {
2355 /* Jumps are always placed at the end of basic block. */
2356 jump_p = control_flow_insn_p (insn);
2357
2358 gcc_assert (!jump_p
2359 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2360 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2361 || (common_sched_info->sched_pass_id
2362 == SCHED_EBB_PASS));
2363
2364 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2365
2366 BB_END (bb) = PREV_INSN (insn);
2367 }
2368
2369 gcc_assert (BB_END (bb) != last);
2370
2371 if (jump_p)
2372 /* We move the block note along with jump. */
2373 {
2374 gcc_assert (nt);
2375
2376 note = NEXT_INSN (insn);
2377 while (NOTE_NOT_BB_P (note) && note != nt)
2378 note = NEXT_INSN (note);
2379
2380 if (note != nt
2381 && (LABEL_P (note)
2382 || BARRIER_P (note)))
2383 note = NEXT_INSN (note);
2384
2385 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2386 }
2387 else
2388 note = insn;
2389
2390 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2391 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2392
2393 NEXT_INSN (note) = NEXT_INSN (last);
2394 PREV_INSN (NEXT_INSN (last)) = note;
2395
2396 NEXT_INSN (last) = insn;
2397 PREV_INSN (insn) = last;
2398
2399 bb = BLOCK_FOR_INSN (last);
2400
2401 if (jump_p)
2402 {
2403 fix_jump_move (insn);
2404
2405 if (BLOCK_FOR_INSN (insn) != bb)
2406 move_block_after_check (insn);
2407
2408 gcc_assert (BB_END (bb) == last);
2409 }
2410
2411 df_insn_change_bb (insn, bb);
2412
2413 /* Update BB_END, if needed. */
2414 if (BB_END (bb) == last)
2415 BB_END (bb) = insn;
2416 }
2417
2418 SCHED_GROUP_P (insn) = 0;
2419 }
2420
2421 /* Return true if scheduling INSN will finish current clock cycle. */
2422 static bool
2423 insn_finishes_cycle_p (rtx insn)
2424 {
2425 if (SCHED_GROUP_P (insn))
2426 /* After issuing INSN, rest of the sched_group will be forced to issue
2427 in order. Don't make any plans for the rest of cycle. */
2428 return true;
2429
2430 /* Finishing the block will, apparently, finish the cycle. */
2431 if (current_sched_info->insn_finishes_block_p
2432 && current_sched_info->insn_finishes_block_p (insn))
2433 return true;
2434
2435 return false;
2436 }
2437
2438 /* Define type for target data used in multipass scheduling. */
2439 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
2440 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
2441 #endif
2442 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
2443
2444 /* The following structure describe an entry of the stack of choices. */
2445 struct choice_entry
2446 {
2447 /* Ordinal number of the issued insn in the ready queue. */
2448 int index;
2449 /* The number of the rest insns whose issues we should try. */
2450 int rest;
2451 /* The number of issued essential insns. */
2452 int n;
2453 /* State after issuing the insn. */
2454 state_t state;
2455 /* Target-specific data. */
2456 first_cycle_multipass_data_t target_data;
2457 };
2458
2459 /* The following array is used to implement a stack of choices used in
2460 function max_issue. */
2461 static struct choice_entry *choice_stack;
2462
2463 /* The following variable value is number of essential insns issued on
2464 the current cycle. An insn is essential one if it changes the
2465 processors state. */
2466 int cycle_issued_insns;
2467
2468 /* This holds the value of the target dfa_lookahead hook. */
2469 int dfa_lookahead;
2470
2471 /* The following variable value is maximal number of tries of issuing
2472 insns for the first cycle multipass insn scheduling. We define
2473 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2474 need this constraint if all real insns (with non-negative codes)
2475 had reservations because in this case the algorithm complexity is
2476 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2477 might be incomplete and such insn might occur. For such
2478 descriptions, the complexity of algorithm (without the constraint)
2479 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2480 static int max_lookahead_tries;
2481
2482 /* The following value is value of hook
2483 `first_cycle_multipass_dfa_lookahead' at the last call of
2484 `max_issue'. */
2485 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2486
2487 /* The following value is value of `issue_rate' at the last call of
2488 `sched_init'. */
2489 static int cached_issue_rate = 0;
2490
2491 /* The following function returns maximal (or close to maximal) number
2492 of insns which can be issued on the same cycle and one of which
2493 insns is insns with the best rank (the first insn in READY). To
2494 make this function tries different samples of ready insns. READY
2495 is current queue `ready'. Global array READY_TRY reflects what
2496 insns are already issued in this try. The function stops immediately,
2497 if it reached the such a solution, that all instruction can be issued.
2498 INDEX will contain index of the best insn in READY. The following
2499 function is used only for first cycle multipass scheduling.
2500
2501 PRIVILEGED_N >= 0
2502
2503 This function expects recognized insns only. All USEs,
2504 CLOBBERs, etc must be filtered elsewhere. */
2505 int
2506 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2507 bool first_cycle_insn_p, int *index)
2508 {
2509 int n, i, all, n_ready, best, delay, tries_num;
2510 int more_issue;
2511 struct choice_entry *top;
2512 rtx insn;
2513
2514 n_ready = ready->n_ready;
2515 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2516 && privileged_n <= n_ready);
2517
2518 /* Init MAX_LOOKAHEAD_TRIES. */
2519 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2520 {
2521 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2522 max_lookahead_tries = 100;
2523 for (i = 0; i < issue_rate; i++)
2524 max_lookahead_tries *= dfa_lookahead;
2525 }
2526
2527 /* Init max_points. */
2528 more_issue = issue_rate - cycle_issued_insns;
2529 gcc_assert (more_issue >= 0);
2530
2531 /* The number of the issued insns in the best solution. */
2532 best = 0;
2533
2534 top = choice_stack;
2535
2536 /* Set initial state of the search. */
2537 memcpy (top->state, state, dfa_state_size);
2538 top->rest = dfa_lookahead;
2539 top->n = 0;
2540 if (targetm.sched.first_cycle_multipass_begin)
2541 targetm.sched.first_cycle_multipass_begin (&top->target_data,
2542 ready_try, n_ready,
2543 first_cycle_insn_p);
2544
2545 /* Count the number of the insns to search among. */
2546 for (all = i = 0; i < n_ready; i++)
2547 if (!ready_try [i])
2548 all++;
2549
2550 /* I is the index of the insn to try next. */
2551 i = 0;
2552 tries_num = 0;
2553 for (;;)
2554 {
2555 if (/* If we've reached a dead end or searched enough of what we have
2556 been asked... */
2557 top->rest == 0
2558 /* or have nothing else to try... */
2559 || i >= n_ready
2560 /* or should not issue more. */
2561 || top->n >= more_issue)
2562 {
2563 /* ??? (... || i == n_ready). */
2564 gcc_assert (i <= n_ready);
2565
2566 /* We should not issue more than issue_rate instructions. */
2567 gcc_assert (top->n <= more_issue);
2568
2569 if (top == choice_stack)
2570 break;
2571
2572 if (best < top - choice_stack)
2573 {
2574 if (privileged_n)
2575 {
2576 n = privileged_n;
2577 /* Try to find issued privileged insn. */
2578 while (n && !ready_try[--n]);
2579 }
2580
2581 if (/* If all insns are equally good... */
2582 privileged_n == 0
2583 /* Or a privileged insn will be issued. */
2584 || ready_try[n])
2585 /* Then we have a solution. */
2586 {
2587 best = top - choice_stack;
2588 /* This is the index of the insn issued first in this
2589 solution. */
2590 *index = choice_stack [1].index;
2591 if (top->n == more_issue || best == all)
2592 break;
2593 }
2594 }
2595
2596 /* Set ready-list index to point to the last insn
2597 ('i++' below will advance it to the next insn). */
2598 i = top->index;
2599
2600 /* Backtrack. */
2601 ready_try [i] = 0;
2602
2603 if (targetm.sched.first_cycle_multipass_backtrack)
2604 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
2605 ready_try, n_ready);
2606
2607 top--;
2608 memcpy (state, top->state, dfa_state_size);
2609 }
2610 else if (!ready_try [i])
2611 {
2612 tries_num++;
2613 if (tries_num > max_lookahead_tries)
2614 break;
2615 insn = ready_element (ready, i);
2616 delay = state_transition (state, insn);
2617 if (delay < 0)
2618 {
2619 if (state_dead_lock_p (state)
2620 || insn_finishes_cycle_p (insn))
2621 /* We won't issue any more instructions in the next
2622 choice_state. */
2623 top->rest = 0;
2624 else
2625 top->rest--;
2626
2627 n = top->n;
2628 if (memcmp (top->state, state, dfa_state_size) != 0)
2629 n++;
2630
2631 /* Advance to the next choice_entry. */
2632 top++;
2633 /* Initialize it. */
2634 top->rest = dfa_lookahead;
2635 top->index = i;
2636 top->n = n;
2637 memcpy (top->state, state, dfa_state_size);
2638 ready_try [i] = 1;
2639
2640 if (targetm.sched.first_cycle_multipass_issue)
2641 targetm.sched.first_cycle_multipass_issue (&top->target_data,
2642 ready_try, n_ready,
2643 insn,
2644 &((top - 1)
2645 ->target_data));
2646
2647 i = -1;
2648 }
2649 }
2650
2651 /* Increase ready-list index. */
2652 i++;
2653 }
2654
2655 if (targetm.sched.first_cycle_multipass_end)
2656 targetm.sched.first_cycle_multipass_end (best != 0
2657 ? &choice_stack[1].target_data
2658 : NULL);
2659
2660 /* Restore the original state of the DFA. */
2661 memcpy (state, choice_stack->state, dfa_state_size);
2662
2663 return best;
2664 }
2665
2666 /* The following function chooses insn from READY and modifies
2667 READY. The following function is used only for first
2668 cycle multipass scheduling.
2669 Return:
2670 -1 if cycle should be advanced,
2671 0 if INSN_PTR is set to point to the desirable insn,
2672 1 if choose_ready () should be restarted without advancing the cycle. */
2673 static int
2674 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
2675 rtx *insn_ptr)
2676 {
2677 int lookahead;
2678
2679 if (dbg_cnt (sched_insn) == false)
2680 {
2681 rtx insn = nonscheduled_insns_begin;
2682 do
2683 {
2684 insn = next_nonnote_insn (insn);
2685 }
2686 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
2687
2688 if (QUEUE_INDEX (insn) == QUEUE_READY)
2689 /* INSN is in the ready_list. */
2690 {
2691 nonscheduled_insns_begin = insn;
2692 ready_remove_insn (insn);
2693 *insn_ptr = insn;
2694 return 0;
2695 }
2696
2697 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2698 return -1;
2699 }
2700
2701 lookahead = 0;
2702
2703 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2704 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2705 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2706 || DEBUG_INSN_P (ready_element (ready, 0)))
2707 {
2708 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2709 *insn_ptr = ready_remove_first_dispatch (ready);
2710 else
2711 *insn_ptr = ready_remove_first (ready);
2712
2713 return 0;
2714 }
2715 else
2716 {
2717 /* Try to choose the better insn. */
2718 int index = 0, i, n;
2719 rtx insn;
2720 int try_data = 1, try_control = 1;
2721 ds_t ts;
2722
2723 insn = ready_element (ready, 0);
2724 if (INSN_CODE (insn) < 0)
2725 {
2726 *insn_ptr = ready_remove_first (ready);
2727 return 0;
2728 }
2729
2730 if (spec_info
2731 && spec_info->flags & (PREFER_NON_DATA_SPEC
2732 | PREFER_NON_CONTROL_SPEC))
2733 {
2734 for (i = 0, n = ready->n_ready; i < n; i++)
2735 {
2736 rtx x;
2737 ds_t s;
2738
2739 x = ready_element (ready, i);
2740 s = TODO_SPEC (x);
2741
2742 if (spec_info->flags & PREFER_NON_DATA_SPEC
2743 && !(s & DATA_SPEC))
2744 {
2745 try_data = 0;
2746 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2747 || !try_control)
2748 break;
2749 }
2750
2751 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2752 && !(s & CONTROL_SPEC))
2753 {
2754 try_control = 0;
2755 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2756 break;
2757 }
2758 }
2759 }
2760
2761 ts = TODO_SPEC (insn);
2762 if ((ts & SPECULATIVE)
2763 && (((!try_data && (ts & DATA_SPEC))
2764 || (!try_control && (ts & CONTROL_SPEC)))
2765 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2766 && !targetm.sched
2767 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2768 /* Discard speculative instruction that stands first in the ready
2769 list. */
2770 {
2771 change_queue_index (insn, 1);
2772 return 1;
2773 }
2774
2775 ready_try[0] = 0;
2776
2777 for (i = 1; i < ready->n_ready; i++)
2778 {
2779 insn = ready_element (ready, i);
2780
2781 ready_try [i]
2782 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2783 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2784 }
2785
2786 /* Let the target filter the search space. */
2787 for (i = 1; i < ready->n_ready; i++)
2788 if (!ready_try[i])
2789 {
2790 insn = ready_element (ready, i);
2791
2792 /* If this insn is recognizable we should have already
2793 recognized it earlier.
2794 ??? Not very clear where this is supposed to be done.
2795 See dep_cost_1. */
2796 gcc_checking_assert (INSN_CODE (insn) >= 0
2797 || recog_memoized (insn) < 0);
2798
2799 ready_try [i]
2800 = (/* INSN_CODE check can be omitted here as it is also done later
2801 in max_issue (). */
2802 INSN_CODE (insn) < 0
2803 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2804 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2805 (insn)));
2806 }
2807
2808 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
2809 {
2810 *insn_ptr = ready_remove_first (ready);
2811 if (sched_verbose >= 4)
2812 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2813 (*current_sched_info->print_insn) (*insn_ptr, 0));
2814 return 0;
2815 }
2816 else
2817 {
2818 if (sched_verbose >= 4)
2819 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2820 (*current_sched_info->print_insn)
2821 (ready_element (ready, index), 0));
2822
2823 *insn_ptr = ready_remove (ready, index);
2824 return 0;
2825 }
2826 }
2827 }
2828
2829 /* This function is called when we have successfully scheduled a
2830 block. It uses the schedule stored in the scheduled_insns vector
2831 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
2832 append the scheduled insns; TAIL is the insn after the scheduled
2833 block. TARGET_BB is the argument passed to schedule_block. */
2834
2835 static void
2836 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
2837 {
2838 unsigned int i;
2839 rtx insn;
2840
2841 last_scheduled_insn = prev_head;
2842 for (i = 0;
2843 VEC_iterate (rtx, scheduled_insns, i, insn);
2844 i++)
2845 {
2846 if (control_flow_insn_p (last_scheduled_insn)
2847 || current_sched_info->advance_target_bb (*target_bb, insn))
2848 {
2849 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
2850
2851 if (sched_verbose)
2852 {
2853 rtx x;
2854
2855 x = next_real_insn (last_scheduled_insn);
2856 gcc_assert (x);
2857 dump_new_block_header (1, *target_bb, x, tail);
2858 }
2859
2860 last_scheduled_insn = bb_note (*target_bb);
2861 }
2862
2863 if (current_sched_info->begin_move_insn)
2864 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
2865 move_insn (insn, last_scheduled_insn,
2866 current_sched_info->next_tail);
2867 if (!DEBUG_INSN_P (insn))
2868 reemit_notes (insn);
2869 last_scheduled_insn = insn;
2870 }
2871
2872 VEC_truncate (rtx, scheduled_insns, 0);
2873 }
2874
2875 /* Examine all insns on the ready list and queue those which can't be
2876 issued in this cycle. TEMP_STATE is temporary scheduler state we
2877 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
2878 have been issued for the current cycle, which means it is valid to
2879 issue an asm statement. */
2880
2881 static void
2882 prune_ready_list (state_t temp_state, bool first_cycle_insn_p)
2883 {
2884 int i;
2885
2886 restart:
2887 for (i = 0; i < ready.n_ready; i++)
2888 {
2889 rtx insn = ready_element (&ready, i);
2890 int cost = 0;
2891 const char *reason = "resource conflict";
2892
2893 if (recog_memoized (insn) < 0)
2894 {
2895 if (!first_cycle_insn_p
2896 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
2897 || asm_noperands (PATTERN (insn)) >= 0))
2898 cost = 1;
2899 reason = "asm";
2900 }
2901 else if (flag_sched_pressure)
2902 cost = 0;
2903 else
2904 {
2905 memcpy (temp_state, curr_state, dfa_state_size);
2906 cost = state_transition (temp_state, insn);
2907 if (cost < 0)
2908 cost = 0;
2909 else if (cost == 0)
2910 cost = 1;
2911 }
2912 if (cost >= 1)
2913 {
2914 ready_remove (&ready, i);
2915 queue_insn (insn, cost, reason);
2916 goto restart;
2917 }
2918 }
2919 }
2920
2921 /* Use forward list scheduling to rearrange insns of block pointed to by
2922 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2923 region. */
2924
2925 void
2926 schedule_block (basic_block *target_bb)
2927 {
2928 int i;
2929 bool first_cycle_insn_p;
2930 int can_issue_more;
2931 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2932 int sort_p, advance, start_clock_var;
2933
2934 /* Head/tail info for this block. */
2935 rtx prev_head = current_sched_info->prev_head;
2936 rtx next_tail = current_sched_info->next_tail;
2937 rtx head = NEXT_INSN (prev_head);
2938 rtx tail = PREV_INSN (next_tail);
2939
2940 /* We used to have code to avoid getting parameters moved from hard
2941 argument registers into pseudos.
2942
2943 However, it was removed when it proved to be of marginal benefit
2944 and caused problems because schedule_block and compute_forward_dependences
2945 had different notions of what the "head" insn was. */
2946
2947 gcc_assert (head != tail || INSN_P (head));
2948
2949 haifa_recovery_bb_recently_added_p = false;
2950
2951 /* Debug info. */
2952 if (sched_verbose)
2953 dump_new_block_header (0, *target_bb, head, tail);
2954
2955 state_reset (curr_state);
2956
2957 /* Clear the ready list. */
2958 ready.first = ready.veclen - 1;
2959 ready.n_ready = 0;
2960 ready.n_debug = 0;
2961
2962 /* It is used for first cycle multipass scheduling. */
2963 temp_state = alloca (dfa_state_size);
2964
2965 if (targetm.sched.init)
2966 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2967
2968 /* We start inserting insns after PREV_HEAD. */
2969 last_scheduled_insn = nonscheduled_insns_begin = prev_head;
2970
2971 gcc_assert ((NOTE_P (last_scheduled_insn)
2972 || DEBUG_INSN_P (last_scheduled_insn))
2973 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2974
2975 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2976 queue. */
2977 q_ptr = 0;
2978 q_size = 0;
2979
2980 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2981 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2982
2983 /* Start just before the beginning of time. */
2984 clock_var = -1;
2985
2986 /* We need queue and ready lists and clock_var be initialized
2987 in try_ready () (which is called through init_ready_list ()). */
2988 (*current_sched_info->init_ready_list) ();
2989
2990 /* The algorithm is O(n^2) in the number of ready insns at any given
2991 time in the worst case. Before reload we are more likely to have
2992 big lists so truncate them to a reasonable size. */
2993 if (!reload_completed
2994 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2995 {
2996 ready_sort (&ready);
2997
2998 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2999 If there are debug insns, we know they're first. */
3000 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
3001 if (!SCHED_GROUP_P (ready_element (&ready, i)))
3002 break;
3003
3004 if (sched_verbose >= 2)
3005 {
3006 fprintf (sched_dump,
3007 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
3008 fprintf (sched_dump,
3009 ";;\t\t before reload => truncated to %d insns\n", i);
3010 }
3011
3012 /* Delay all insns past it for 1 cycle. If debug counter is
3013 activated make an exception for the insn right after
3014 nonscheduled_insns_begin. */
3015 {
3016 rtx skip_insn;
3017
3018 if (dbg_cnt (sched_insn) == false)
3019 skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
3020 else
3021 skip_insn = NULL_RTX;
3022
3023 while (i < ready.n_ready)
3024 {
3025 rtx insn;
3026
3027 insn = ready_remove (&ready, i);
3028
3029 if (insn != skip_insn)
3030 queue_insn (insn, 1, "list truncated");
3031 }
3032 if (skip_insn)
3033 ready_add (&ready, skip_insn, true);
3034 }
3035 }
3036
3037 /* Now we can restore basic block notes and maintain precise cfg. */
3038 restore_bb_notes (*target_bb);
3039
3040 last_clock_var = -1;
3041
3042 advance = 0;
3043
3044 gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
3045 sort_p = TRUE;
3046 /* Loop until all the insns in BB are scheduled. */
3047 while ((*current_sched_info->schedule_more_p) ())
3048 {
3049 do
3050 {
3051 start_clock_var = clock_var;
3052
3053 clock_var++;
3054
3055 advance_one_cycle ();
3056
3057 /* Add to the ready list all pending insns that can be issued now.
3058 If there are no ready insns, increment clock until one
3059 is ready and add all pending insns at that point to the ready
3060 list. */
3061 queue_to_ready (&ready);
3062
3063 gcc_assert (ready.n_ready);
3064
3065 if (sched_verbose >= 2)
3066 {
3067 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
3068 debug_ready_list (&ready);
3069 }
3070 advance -= clock_var - start_clock_var;
3071 }
3072 while (advance > 0);
3073
3074 prune_ready_list (temp_state, true);
3075 if (ready.n_ready == 0)
3076 continue;
3077
3078 if (sort_p)
3079 {
3080 /* Sort the ready list based on priority. */
3081 ready_sort (&ready);
3082
3083 if (sched_verbose >= 2)
3084 {
3085 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
3086 debug_ready_list (&ready);
3087 }
3088 }
3089
3090 /* We don't want md sched reorder to even see debug isns, so put
3091 them out right away. */
3092 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3093 {
3094 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3095 {
3096 rtx insn = ready_remove_first (&ready);
3097 gcc_assert (DEBUG_INSN_P (insn));
3098 (*current_sched_info->begin_schedule_ready) (insn);
3099 VEC_safe_push (rtx, heap, scheduled_insns, insn);
3100 last_scheduled_insn = insn;
3101 advance = schedule_insn (insn);
3102 gcc_assert (advance == 0);
3103 if (ready.n_ready > 0)
3104 ready_sort (&ready);
3105 }
3106
3107 if (!ready.n_ready)
3108 continue;
3109 }
3110
3111 /* Allow the target to reorder the list, typically for
3112 better instruction bundling. */
3113 if (sort_p && targetm.sched.reorder
3114 && (ready.n_ready == 0
3115 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3116 can_issue_more =
3117 targetm.sched.reorder (sched_dump, sched_verbose,
3118 ready_lastpos (&ready),
3119 &ready.n_ready, clock_var);
3120 else
3121 can_issue_more = issue_rate;
3122
3123 first_cycle_insn_p = true;
3124 cycle_issued_insns = 0;
3125 for (;;)
3126 {
3127 rtx insn;
3128 int cost;
3129 bool asm_p = false;
3130
3131 if (sched_verbose >= 2)
3132 {
3133 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
3134 clock_var);
3135 debug_ready_list (&ready);
3136 if (sched_pressure_p)
3137 print_curr_reg_pressure ();
3138 }
3139
3140 if (ready.n_ready == 0
3141 && can_issue_more
3142 && reload_completed)
3143 {
3144 /* Allow scheduling insns directly from the queue in case
3145 there's nothing better to do (ready list is empty) but
3146 there are still vacant dispatch slots in the current cycle. */
3147 if (sched_verbose >= 6)
3148 fprintf (sched_dump,";;\t\tSecond chance\n");
3149 memcpy (temp_state, curr_state, dfa_state_size);
3150 if (early_queue_to_ready (temp_state, &ready))
3151 ready_sort (&ready);
3152 }
3153
3154 if (ready.n_ready == 0
3155 || !can_issue_more
3156 || state_dead_lock_p (curr_state)
3157 || !(*current_sched_info->schedule_more_p) ())
3158 break;
3159
3160 /* Select and remove the insn from the ready list. */
3161 if (sort_p)
3162 {
3163 int res;
3164
3165 insn = NULL_RTX;
3166 res = choose_ready (&ready, first_cycle_insn_p, &insn);
3167
3168 if (res < 0)
3169 /* Finish cycle. */
3170 break;
3171 if (res > 0)
3172 /* Restart choose_ready (). */
3173 continue;
3174
3175 gcc_assert (insn != NULL_RTX);
3176 }
3177 else
3178 insn = ready_remove_first (&ready);
3179
3180 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3181 {
3182 ready_add (&ready, insn, true);
3183 advance = 1;
3184 break;
3185 }
3186
3187 if (targetm.sched.dfa_new_cycle
3188 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3189 insn, last_clock_var,
3190 clock_var, &sort_p))
3191 /* SORT_P is used by the target to override sorting
3192 of the ready list. This is needed when the target
3193 has modified its internal structures expecting that
3194 the insn will be issued next. As we need the insn
3195 to have the highest priority (so it will be returned by
3196 the ready_remove_first call above), we invoke
3197 ready_add (&ready, insn, true).
3198 But, still, there is one issue: INSN can be later
3199 discarded by scheduler's front end through
3200 current_sched_info->can_schedule_ready_p, hence, won't
3201 be issued next. */
3202 {
3203 ready_add (&ready, insn, true);
3204 break;
3205 }
3206
3207 sort_p = TRUE;
3208
3209 if (current_sched_info->can_schedule_ready_p
3210 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3211 /* We normally get here only if we don't want to move
3212 insn from the split block. */
3213 {
3214 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3215 continue;
3216 }
3217
3218 /* DECISION is made. */
3219
3220 if (TODO_SPEC (insn) & SPECULATIVE)
3221 generate_recovery_code (insn);
3222
3223 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3224 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3225
3226 /* Update counters, etc in the scheduler's front end. */
3227 (*current_sched_info->begin_schedule_ready) (insn);
3228 VEC_safe_push (rtx, heap, scheduled_insns, insn);
3229 last_scheduled_insn = insn;
3230
3231 if (recog_memoized (insn) >= 0)
3232 {
3233 cost = state_transition (curr_state, insn);
3234 if (!flag_sched_pressure)
3235 gcc_assert (cost < 0);
3236 cycle_issued_insns++;
3237 asm_p = false;
3238 }
3239 else
3240 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3241 || asm_noperands (PATTERN (insn)) >= 0);
3242
3243 if (targetm.sched.variable_issue)
3244 can_issue_more =
3245 targetm.sched.variable_issue (sched_dump, sched_verbose,
3246 insn, can_issue_more);
3247 /* A naked CLOBBER or USE generates no instruction, so do
3248 not count them against the issue rate. */
3249 else if (GET_CODE (PATTERN (insn)) != USE
3250 && GET_CODE (PATTERN (insn)) != CLOBBER)
3251 can_issue_more--;
3252 advance = schedule_insn (insn);
3253
3254 /* After issuing an asm insn we should start a new cycle. */
3255 if (advance == 0 && asm_p)
3256 advance = 1;
3257 if (advance != 0)
3258 break;
3259
3260 first_cycle_insn_p = false;
3261
3262 if (ready.n_ready > 0)
3263 prune_ready_list (temp_state, false);
3264
3265 /* Sort the ready list based on priority. This must be
3266 redone here, as schedule_insn may have readied additional
3267 insns that will not be sorted correctly. */
3268 if (ready.n_ready > 0)
3269 ready_sort (&ready);
3270
3271 /* Quickly go through debug insns such that md sched
3272 reorder2 doesn't have to deal with debug insns. */
3273 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3274 && (*current_sched_info->schedule_more_p) ())
3275 {
3276 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3277 {
3278 insn = ready_remove_first (&ready);
3279 gcc_assert (DEBUG_INSN_P (insn));
3280 (*current_sched_info->begin_schedule_ready) (insn);
3281 VEC_safe_push (rtx, heap, scheduled_insns, insn);
3282 advance = schedule_insn (insn);
3283 last_scheduled_insn = insn;
3284 gcc_assert (advance == 0);
3285 if (ready.n_ready > 0)
3286 ready_sort (&ready);
3287 }
3288 }
3289
3290 if (targetm.sched.reorder2
3291 && (ready.n_ready == 0
3292 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3293 {
3294 can_issue_more =
3295 targetm.sched.reorder2 (sched_dump, sched_verbose,
3296 ready.n_ready
3297 ? ready_lastpos (&ready) : NULL,
3298 &ready.n_ready, clock_var);
3299 }
3300 }
3301 }
3302
3303 /* Debug info. */
3304 if (sched_verbose)
3305 {
3306 fprintf (sched_dump, ";;\tReady list (final): ");
3307 debug_ready_list (&ready);
3308 }
3309
3310 if (current_sched_info->queue_must_finish_empty)
3311 /* Sanity check -- queue must be empty now. Meaningless if region has
3312 multiple bbs. */
3313 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3314 else
3315 {
3316 /* We must maintain QUEUE_INDEX between blocks in region. */
3317 for (i = ready.n_ready - 1; i >= 0; i--)
3318 {
3319 rtx x;
3320
3321 x = ready_element (&ready, i);
3322 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3323 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3324 }
3325
3326 if (q_size)
3327 for (i = 0; i <= max_insn_queue_index; i++)
3328 {
3329 rtx link;
3330 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3331 {
3332 rtx x;
3333
3334 x = XEXP (link, 0);
3335 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3336 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3337 }
3338 free_INSN_LIST_list (&insn_queue[i]);
3339 }
3340 }
3341
3342 commit_schedule (prev_head, tail, target_bb);
3343 if (sched_verbose)
3344 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3345
3346 if (!current_sched_info->queue_must_finish_empty
3347 || haifa_recovery_bb_recently_added_p)
3348 {
3349 /* INSN_TICK (minimum clock tick at which the insn becomes
3350 ready) may be not correct for the insn in the subsequent
3351 blocks of the region. We should use a correct value of
3352 `clock_var' or modify INSN_TICK. It is better to keep
3353 clock_var value equal to 0 at the start of a basic block.
3354 Therefore we modify INSN_TICK here. */
3355 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3356 }
3357
3358 if (targetm.sched.finish)
3359 {
3360 targetm.sched.finish (sched_dump, sched_verbose);
3361 /* Target might have added some instructions to the scheduled block
3362 in its md_finish () hook. These new insns don't have any data
3363 initialized and to identify them we extend h_i_d so that they'll
3364 get zero luids. */
3365 sched_init_luids (NULL, NULL, NULL, NULL);
3366 }
3367
3368 if (sched_verbose)
3369 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3370 INSN_UID (head), INSN_UID (tail));
3371
3372 /* Update head/tail boundaries. */
3373 head = NEXT_INSN (prev_head);
3374 tail = last_scheduled_insn;
3375
3376 head = restore_other_notes (head, NULL);
3377
3378 current_sched_info->head = head;
3379 current_sched_info->tail = tail;
3380 }
3381 \f
3382 /* Set_priorities: compute priority of each insn in the block. */
3383
3384 int
3385 set_priorities (rtx head, rtx tail)
3386 {
3387 rtx insn;
3388 int n_insn;
3389 int sched_max_insns_priority =
3390 current_sched_info->sched_max_insns_priority;
3391 rtx prev_head;
3392
3393 if (head == tail && ! INSN_P (head))
3394 gcc_unreachable ();
3395
3396 n_insn = 0;
3397
3398 prev_head = PREV_INSN (head);
3399 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3400 {
3401 if (!INSN_P (insn))
3402 continue;
3403
3404 n_insn++;
3405 (void) priority (insn);
3406
3407 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3408
3409 sched_max_insns_priority = MAX (sched_max_insns_priority,
3410 INSN_PRIORITY (insn));
3411 }
3412
3413 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3414
3415 return n_insn;
3416 }
3417
3418 /* Set dump and sched_verbose for the desired debugging output. If no
3419 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3420 For -fsched-verbose=N, N>=10, print everything to stderr. */
3421 void
3422 setup_sched_dump (void)
3423 {
3424 sched_verbose = sched_verbose_param;
3425 if (sched_verbose_param == 0 && dump_file)
3426 sched_verbose = 1;
3427 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3428 ? stderr : dump_file);
3429 }
3430
3431 /* Initialize some global state for the scheduler. This function works
3432 with the common data shared between all the schedulers. It is called
3433 from the scheduler specific initialization routine. */
3434
3435 void
3436 sched_init (void)
3437 {
3438 /* Disable speculative loads in their presence if cc0 defined. */
3439 #ifdef HAVE_cc0
3440 flag_schedule_speculative_load = 0;
3441 #endif
3442
3443 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3444 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3445
3446 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3447 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3448
3449 if (sched_pressure_p)
3450 ira_setup_eliminable_regset ();
3451
3452 /* Initialize SPEC_INFO. */
3453 if (targetm.sched.set_sched_flags)
3454 {
3455 spec_info = &spec_info_var;
3456 targetm.sched.set_sched_flags (spec_info);
3457
3458 if (spec_info->mask != 0)
3459 {
3460 spec_info->data_weakness_cutoff =
3461 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3462 spec_info->control_weakness_cutoff =
3463 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3464 * REG_BR_PROB_BASE) / 100;
3465 }
3466 else
3467 /* So we won't read anything accidentally. */
3468 spec_info = NULL;
3469
3470 }
3471 else
3472 /* So we won't read anything accidentally. */
3473 spec_info = 0;
3474
3475 /* Initialize issue_rate. */
3476 if (targetm.sched.issue_rate)
3477 issue_rate = targetm.sched.issue_rate ();
3478 else
3479 issue_rate = 1;
3480
3481 if (cached_issue_rate != issue_rate)
3482 {
3483 cached_issue_rate = issue_rate;
3484 /* To invalidate max_lookahead_tries: */
3485 cached_first_cycle_multipass_dfa_lookahead = 0;
3486 }
3487
3488 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3489 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3490 else
3491 dfa_lookahead = 0;
3492
3493 if (targetm.sched.init_dfa_pre_cycle_insn)
3494 targetm.sched.init_dfa_pre_cycle_insn ();
3495
3496 if (targetm.sched.init_dfa_post_cycle_insn)
3497 targetm.sched.init_dfa_post_cycle_insn ();
3498
3499 dfa_start ();
3500 dfa_state_size = state_size ();
3501
3502 init_alias_analysis ();
3503
3504 df_set_flags (DF_LR_RUN_DCE);
3505 df_note_add_problem ();
3506
3507 /* More problems needed for interloop dep calculation in SMS. */
3508 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3509 {
3510 df_rd_add_problem ();
3511 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3512 }
3513
3514 df_analyze ();
3515
3516 /* Do not run DCE after reload, as this can kill nops inserted
3517 by bundling. */
3518 if (reload_completed)
3519 df_clear_flags (DF_LR_RUN_DCE);
3520
3521 regstat_compute_calls_crossed ();
3522
3523 if (targetm.sched.init_global)
3524 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3525
3526 if (sched_pressure_p)
3527 {
3528 int i, max_regno = max_reg_num ();
3529
3530 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3531 sched_regno_pressure_class
3532 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3533 for (i = 0; i < max_regno; i++)
3534 sched_regno_pressure_class[i]
3535 = (i < FIRST_PSEUDO_REGISTER
3536 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
3537 : ira_pressure_class_translate[reg_allocno_class (i)]);
3538 curr_reg_live = BITMAP_ALLOC (NULL);
3539 saved_reg_live = BITMAP_ALLOC (NULL);
3540 region_ref_regs = BITMAP_ALLOC (NULL);
3541 }
3542
3543 curr_state = xmalloc (dfa_state_size);
3544 }
3545
3546 static void haifa_init_only_bb (basic_block, basic_block);
3547
3548 /* Initialize data structures specific to the Haifa scheduler. */
3549 void
3550 haifa_sched_init (void)
3551 {
3552 setup_sched_dump ();
3553 sched_init ();
3554
3555 scheduled_insns = VEC_alloc (rtx, heap, 0);
3556
3557 if (spec_info != NULL)
3558 {
3559 sched_deps_info->use_deps_list = 1;
3560 sched_deps_info->generate_spec_deps = 1;
3561 }
3562
3563 /* Initialize luids, dependency caches, target and h_i_d for the
3564 whole function. */
3565 {
3566 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3567 basic_block bb;
3568
3569 sched_init_bbs ();
3570
3571 FOR_EACH_BB (bb)
3572 VEC_quick_push (basic_block, bbs, bb);
3573 sched_init_luids (bbs, NULL, NULL, NULL);
3574 sched_deps_init (true);
3575 sched_extend_target ();
3576 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3577
3578 VEC_free (basic_block, heap, bbs);
3579 }
3580
3581 sched_init_only_bb = haifa_init_only_bb;
3582 sched_split_block = sched_split_block_1;
3583 sched_create_empty_bb = sched_create_empty_bb_1;
3584 haifa_recovery_bb_ever_added_p = false;
3585
3586 #ifdef ENABLE_CHECKING
3587 /* This is used preferably for finding bugs in check_cfg () itself.
3588 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3589 assumes that the last insn in the last bb has a non-null successor. */
3590 check_cfg (0, 0);
3591 #endif
3592
3593 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3594 before_recovery = 0;
3595 after_recovery = 0;
3596 }
3597
3598 /* Finish work with the data specific to the Haifa scheduler. */
3599 void
3600 haifa_sched_finish (void)
3601 {
3602 sched_create_empty_bb = NULL;
3603 sched_split_block = NULL;
3604 sched_init_only_bb = NULL;
3605
3606 if (spec_info && spec_info->dump)
3607 {
3608 char c = reload_completed ? 'a' : 'b';
3609
3610 fprintf (spec_info->dump,
3611 ";; %s:\n", current_function_name ());
3612
3613 fprintf (spec_info->dump,
3614 ";; Procedure %cr-begin-data-spec motions == %d\n",
3615 c, nr_begin_data);
3616 fprintf (spec_info->dump,
3617 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3618 c, nr_be_in_data);
3619 fprintf (spec_info->dump,
3620 ";; Procedure %cr-begin-control-spec motions == %d\n",
3621 c, nr_begin_control);
3622 fprintf (spec_info->dump,
3623 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3624 c, nr_be_in_control);
3625 }
3626
3627 VEC_free (rtx, heap, scheduled_insns);
3628
3629 /* Finalize h_i_d, dependency caches, and luids for the whole
3630 function. Target will be finalized in md_global_finish (). */
3631 sched_deps_finish ();
3632 sched_finish_luids ();
3633 current_sched_info = NULL;
3634 sched_finish ();
3635 }
3636
3637 /* Free global data used during insn scheduling. This function works with
3638 the common data shared between the schedulers. */
3639
3640 void
3641 sched_finish (void)
3642 {
3643 haifa_finish_h_i_d ();
3644 if (sched_pressure_p)
3645 {
3646 free (sched_regno_pressure_class);
3647 BITMAP_FREE (region_ref_regs);
3648 BITMAP_FREE (saved_reg_live);
3649 BITMAP_FREE (curr_reg_live);
3650 }
3651 free (curr_state);
3652
3653 if (targetm.sched.finish_global)
3654 targetm.sched.finish_global (sched_dump, sched_verbose);
3655
3656 end_alias_analysis ();
3657
3658 regstat_free_calls_crossed ();
3659
3660 dfa_finish ();
3661
3662 #ifdef ENABLE_CHECKING
3663 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3664 if (!reload_completed)
3665 check_cfg (0, 0);
3666 #endif
3667 }
3668
3669 /* Fix INSN_TICKs of the instructions in the current block as well as
3670 INSN_TICKs of their dependents.
3671 HEAD and TAIL are the begin and the end of the current scheduled block. */
3672 static void
3673 fix_inter_tick (rtx head, rtx tail)
3674 {
3675 /* Set of instructions with corrected INSN_TICK. */
3676 bitmap_head processed;
3677 /* ??? It is doubtful if we should assume that cycle advance happens on
3678 basic block boundaries. Basically insns that are unconditionally ready
3679 on the start of the block are more preferable then those which have
3680 a one cycle dependency over insn from the previous block. */
3681 int next_clock = clock_var + 1;
3682
3683 bitmap_initialize (&processed, 0);
3684
3685 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3686 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3687 across different blocks. */
3688 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3689 {
3690 if (INSN_P (head))
3691 {
3692 int tick;
3693 sd_iterator_def sd_it;
3694 dep_t dep;
3695
3696 tick = INSN_TICK (head);
3697 gcc_assert (tick >= MIN_TICK);
3698
3699 /* Fix INSN_TICK of instruction from just scheduled block. */
3700 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3701 {
3702 tick -= next_clock;
3703
3704 if (tick < MIN_TICK)
3705 tick = MIN_TICK;
3706
3707 INSN_TICK (head) = tick;
3708 }
3709
3710 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3711 {
3712 rtx next;
3713
3714 next = DEP_CON (dep);
3715 tick = INSN_TICK (next);
3716
3717 if (tick != INVALID_TICK
3718 /* If NEXT has its INSN_TICK calculated, fix it.
3719 If not - it will be properly calculated from
3720 scratch later in fix_tick_ready. */
3721 && bitmap_set_bit (&processed, INSN_LUID (next)))
3722 {
3723 tick -= next_clock;
3724
3725 if (tick < MIN_TICK)
3726 tick = MIN_TICK;
3727
3728 if (tick > INTER_TICK (next))
3729 INTER_TICK (next) = tick;
3730 else
3731 tick = INTER_TICK (next);
3732
3733 INSN_TICK (next) = tick;
3734 }
3735 }
3736 }
3737 }
3738 bitmap_clear (&processed);
3739 }
3740
3741 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3742
3743 /* Check if NEXT is ready to be added to the ready or queue list.
3744 If "yes", add it to the proper list.
3745 Returns:
3746 -1 - is not ready yet,
3747 0 - added to the ready list,
3748 0 < N - queued for N cycles. */
3749 int
3750 try_ready (rtx next)
3751 {
3752 ds_t old_ts, *ts;
3753
3754 ts = &TODO_SPEC (next);
3755 old_ts = *ts;
3756
3757 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3758 && ((old_ts & HARD_DEP)
3759 || (old_ts & SPECULATIVE)));
3760
3761 if (sd_lists_empty_p (next, SD_LIST_BACK))
3762 /* NEXT has all its dependencies resolved. */
3763 {
3764 /* Remove HARD_DEP bit from NEXT's status. */
3765 *ts &= ~HARD_DEP;
3766
3767 if (current_sched_info->flags & DO_SPECULATION)
3768 /* Remove all speculative bits from NEXT's status. */
3769 *ts &= ~SPECULATIVE;
3770 }
3771 else
3772 {
3773 /* One of the NEXT's dependencies has been resolved.
3774 Recalculate NEXT's status. */
3775
3776 *ts &= ~SPECULATIVE & ~HARD_DEP;
3777
3778 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3779 /* Now we've got NEXT with speculative deps only.
3780 1. Look at the deps to see what we have to do.
3781 2. Check if we can do 'todo'. */
3782 {
3783 sd_iterator_def sd_it;
3784 dep_t dep;
3785 bool first_p = true;
3786
3787 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3788 {
3789 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3790
3791 if (DEBUG_INSN_P (DEP_PRO (dep))
3792 && !DEBUG_INSN_P (next))
3793 continue;
3794
3795 if (first_p)
3796 {
3797 first_p = false;
3798
3799 *ts = ds;
3800 }
3801 else
3802 *ts = ds_merge (*ts, ds);
3803 }
3804
3805 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3806 /* Too few points. */
3807 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3808 }
3809 else
3810 *ts |= HARD_DEP;
3811 }
3812
3813 if (*ts & HARD_DEP)
3814 gcc_assert (*ts == old_ts
3815 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3816 else if (current_sched_info->new_ready)
3817 *ts = current_sched_info->new_ready (next, *ts);
3818
3819 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3820 have its original pattern or changed (speculative) one. This is due
3821 to changing ebb in region scheduling.
3822 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3823 has speculative pattern.
3824
3825 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3826 control-speculative NEXT could have been discarded by sched-rgn.c
3827 (the same case as when discarded by can_schedule_ready_p ()). */
3828
3829 if ((*ts & SPECULATIVE)
3830 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3831 need to change anything. */
3832 && *ts != old_ts)
3833 {
3834 int res;
3835 rtx new_pat;
3836
3837 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3838
3839 res = haifa_speculate_insn (next, *ts, &new_pat);
3840
3841 switch (res)
3842 {
3843 case -1:
3844 /* It would be nice to change DEP_STATUS of all dependences,
3845 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3846 so we won't reanalyze anything. */
3847 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3848 break;
3849
3850 case 0:
3851 /* We follow the rule, that every speculative insn
3852 has non-null ORIG_PAT. */
3853 if (!ORIG_PAT (next))
3854 ORIG_PAT (next) = PATTERN (next);
3855 break;
3856
3857 case 1:
3858 if (!ORIG_PAT (next))
3859 /* If we gonna to overwrite the original pattern of insn,
3860 save it. */
3861 ORIG_PAT (next) = PATTERN (next);
3862
3863 haifa_change_pattern (next, new_pat);
3864 break;
3865
3866 default:
3867 gcc_unreachable ();
3868 }
3869 }
3870
3871 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3872 either correct (*ts & SPECULATIVE),
3873 or we simply don't care (*ts & HARD_DEP). */
3874
3875 gcc_assert (!ORIG_PAT (next)
3876 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3877
3878 if (*ts & HARD_DEP)
3879 {
3880 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3881 control-speculative NEXT could have been discarded by sched-rgn.c
3882 (the same case as when discarded by can_schedule_ready_p ()). */
3883 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3884
3885 change_queue_index (next, QUEUE_NOWHERE);
3886 return -1;
3887 }
3888 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3889 /* We should change pattern of every previously speculative
3890 instruction - and we determine if NEXT was speculative by using
3891 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3892 pat too, so skip them. */
3893 {
3894 haifa_change_pattern (next, ORIG_PAT (next));
3895 ORIG_PAT (next) = 0;
3896 }
3897
3898 if (sched_verbose >= 2)
3899 {
3900 int s = TODO_SPEC (next);
3901
3902 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3903 (*current_sched_info->print_insn) (next, 0));
3904
3905 if (spec_info && spec_info->dump)
3906 {
3907 if (s & BEGIN_DATA)
3908 fprintf (spec_info->dump, "; data-spec;");
3909 if (s & BEGIN_CONTROL)
3910 fprintf (spec_info->dump, "; control-spec;");
3911 if (s & BE_IN_CONTROL)
3912 fprintf (spec_info->dump, "; in-control-spec;");
3913 }
3914
3915 fprintf (sched_dump, "\n");
3916 }
3917
3918 adjust_priority (next);
3919
3920 return fix_tick_ready (next);
3921 }
3922
3923 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3924 static int
3925 fix_tick_ready (rtx next)
3926 {
3927 int tick, delay;
3928
3929 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
3930 {
3931 int full_p;
3932 sd_iterator_def sd_it;
3933 dep_t dep;
3934
3935 tick = INSN_TICK (next);
3936 /* if tick is not equal to INVALID_TICK, then update
3937 INSN_TICK of NEXT with the most recent resolved dependence
3938 cost. Otherwise, recalculate from scratch. */
3939 full_p = (tick == INVALID_TICK);
3940
3941 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3942 {
3943 rtx pro = DEP_PRO (dep);
3944 int tick1;
3945
3946 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3947
3948 tick1 = INSN_TICK (pro) + dep_cost (dep);
3949 if (tick1 > tick)
3950 tick = tick1;
3951
3952 if (!full_p)
3953 break;
3954 }
3955 }
3956 else
3957 tick = -1;
3958
3959 INSN_TICK (next) = tick;
3960
3961 delay = tick - clock_var;
3962 if (delay <= 0 || sched_pressure_p)
3963 delay = QUEUE_READY;
3964
3965 change_queue_index (next, delay);
3966
3967 return delay;
3968 }
3969
3970 /* Move NEXT to the proper queue list with (DELAY >= 1),
3971 or add it to the ready list (DELAY == QUEUE_READY),
3972 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3973 static void
3974 change_queue_index (rtx next, int delay)
3975 {
3976 int i = QUEUE_INDEX (next);
3977
3978 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3979 && delay != 0);
3980 gcc_assert (i != QUEUE_SCHEDULED);
3981
3982 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3983 || (delay < 0 && delay == i))
3984 /* We have nothing to do. */
3985 return;
3986
3987 /* Remove NEXT from wherever it is now. */
3988 if (i == QUEUE_READY)
3989 ready_remove_insn (next);
3990 else if (i >= 0)
3991 queue_remove (next);
3992
3993 /* Add it to the proper place. */
3994 if (delay == QUEUE_READY)
3995 ready_add (readyp, next, false);
3996 else if (delay >= 1)
3997 queue_insn (next, delay, "change queue index");
3998
3999 if (sched_verbose >= 2)
4000 {
4001 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
4002 (*current_sched_info->print_insn) (next, 0));
4003
4004 if (delay == QUEUE_READY)
4005 fprintf (sched_dump, " into ready\n");
4006 else if (delay >= 1)
4007 fprintf (sched_dump, " into queue with cost=%d\n", delay);
4008 else
4009 fprintf (sched_dump, " removed from ready or queue lists\n");
4010 }
4011 }
4012
4013 static int sched_ready_n_insns = -1;
4014
4015 /* Initialize per region data structures. */
4016 void
4017 sched_extend_ready_list (int new_sched_ready_n_insns)
4018 {
4019 int i;
4020
4021 if (sched_ready_n_insns == -1)
4022 /* At the first call we need to initialize one more choice_stack
4023 entry. */
4024 {
4025 i = 0;
4026 sched_ready_n_insns = 0;
4027 VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
4028 }
4029 else
4030 i = sched_ready_n_insns + 1;
4031
4032 ready.veclen = new_sched_ready_n_insns + issue_rate;
4033 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
4034
4035 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
4036
4037 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
4038 sched_ready_n_insns, sizeof (*ready_try));
4039
4040 /* We allocate +1 element to save initial state in the choice_stack[0]
4041 entry. */
4042 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
4043 new_sched_ready_n_insns + 1);
4044
4045 for (; i <= new_sched_ready_n_insns; i++)
4046 {
4047 choice_stack[i].state = xmalloc (dfa_state_size);
4048
4049 if (targetm.sched.first_cycle_multipass_init)
4050 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
4051 .target_data));
4052 }
4053
4054 sched_ready_n_insns = new_sched_ready_n_insns;
4055 }
4056
4057 /* Free per region data structures. */
4058 void
4059 sched_finish_ready_list (void)
4060 {
4061 int i;
4062
4063 free (ready.vec);
4064 ready.vec = NULL;
4065 ready.veclen = 0;
4066
4067 free (ready_try);
4068 ready_try = NULL;
4069
4070 for (i = 0; i <= sched_ready_n_insns; i++)
4071 {
4072 if (targetm.sched.first_cycle_multipass_fini)
4073 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
4074 .target_data));
4075
4076 free (choice_stack [i].state);
4077 }
4078 free (choice_stack);
4079 choice_stack = NULL;
4080
4081 sched_ready_n_insns = -1;
4082 }
4083
4084 static int
4085 haifa_luid_for_non_insn (rtx x)
4086 {
4087 gcc_assert (NOTE_P (x) || LABEL_P (x));
4088
4089 return 0;
4090 }
4091
4092 /* Generates recovery code for INSN. */
4093 static void
4094 generate_recovery_code (rtx insn)
4095 {
4096 if (TODO_SPEC (insn) & BEGIN_SPEC)
4097 begin_speculative_block (insn);
4098
4099 /* Here we have insn with no dependencies to
4100 instructions other then CHECK_SPEC ones. */
4101
4102 if (TODO_SPEC (insn) & BE_IN_SPEC)
4103 add_to_speculative_block (insn);
4104 }
4105
4106 /* Helper function.
4107 Tries to add speculative dependencies of type FS between instructions
4108 in deps_list L and TWIN. */
4109 static void
4110 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4111 {
4112 sd_iterator_def sd_it;
4113 dep_t dep;
4114
4115 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4116 {
4117 ds_t ds;
4118 rtx consumer;
4119
4120 consumer = DEP_CON (dep);
4121
4122 ds = DEP_STATUS (dep);
4123
4124 if (/* If we want to create speculative dep. */
4125 fs
4126 /* And we can do that because this is a true dep. */
4127 && (ds & DEP_TYPES) == DEP_TRUE)
4128 {
4129 gcc_assert (!(ds & BE_IN_SPEC));
4130
4131 if (/* If this dep can be overcome with 'begin speculation'. */
4132 ds & BEGIN_SPEC)
4133 /* Then we have a choice: keep the dep 'begin speculative'
4134 or transform it into 'be in speculative'. */
4135 {
4136 if (/* In try_ready we assert that if insn once became ready
4137 it can be removed from the ready (or queue) list only
4138 due to backend decision. Hence we can't let the
4139 probability of the speculative dep to decrease. */
4140 ds_weak (ds) <= ds_weak (fs))
4141 {
4142 ds_t new_ds;
4143
4144 new_ds = (ds & ~BEGIN_SPEC) | fs;
4145
4146 if (/* consumer can 'be in speculative'. */
4147 sched_insn_is_legitimate_for_speculation_p (consumer,
4148 new_ds))
4149 /* Transform it to be in speculative. */
4150 ds = new_ds;
4151 }
4152 }
4153 else
4154 /* Mark the dep as 'be in speculative'. */
4155 ds |= fs;
4156 }
4157
4158 {
4159 dep_def _new_dep, *new_dep = &_new_dep;
4160
4161 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4162 sd_add_dep (new_dep, false);
4163 }
4164 }
4165 }
4166
4167 /* Generates recovery code for BEGIN speculative INSN. */
4168 static void
4169 begin_speculative_block (rtx insn)
4170 {
4171 if (TODO_SPEC (insn) & BEGIN_DATA)
4172 nr_begin_data++;
4173 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4174 nr_begin_control++;
4175
4176 create_check_block_twin (insn, false);
4177
4178 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4179 }
4180
4181 static void haifa_init_insn (rtx);
4182
4183 /* Generates recovery code for BE_IN speculative INSN. */
4184 static void
4185 add_to_speculative_block (rtx insn)
4186 {
4187 ds_t ts;
4188 sd_iterator_def sd_it;
4189 dep_t dep;
4190 rtx twins = NULL;
4191 rtx_vec_t priorities_roots;
4192
4193 ts = TODO_SPEC (insn);
4194 gcc_assert (!(ts & ~BE_IN_SPEC));
4195
4196 if (ts & BE_IN_DATA)
4197 nr_be_in_data++;
4198 if (ts & BE_IN_CONTROL)
4199 nr_be_in_control++;
4200
4201 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4202 gcc_assert (!TODO_SPEC (insn));
4203
4204 DONE_SPEC (insn) |= ts;
4205
4206 /* First we convert all simple checks to branchy. */
4207 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4208 sd_iterator_cond (&sd_it, &dep);)
4209 {
4210 rtx check = DEP_PRO (dep);
4211
4212 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4213 {
4214 create_check_block_twin (check, true);
4215
4216 /* Restart search. */
4217 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4218 }
4219 else
4220 /* Continue search. */
4221 sd_iterator_next (&sd_it);
4222 }
4223
4224 priorities_roots = NULL;
4225 clear_priorities (insn, &priorities_roots);
4226
4227 while (1)
4228 {
4229 rtx check, twin;
4230 basic_block rec;
4231
4232 /* Get the first backward dependency of INSN. */
4233 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4234 if (!sd_iterator_cond (&sd_it, &dep))
4235 /* INSN has no backward dependencies left. */
4236 break;
4237
4238 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4239 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4240 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4241
4242 check = DEP_PRO (dep);
4243
4244 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4245 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4246
4247 rec = BLOCK_FOR_INSN (check);
4248
4249 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4250 haifa_init_insn (twin);
4251
4252 sd_copy_back_deps (twin, insn, true);
4253
4254 if (sched_verbose && spec_info->dump)
4255 /* INSN_BB (insn) isn't determined for twin insns yet.
4256 So we can't use current_sched_info->print_insn. */
4257 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4258 INSN_UID (twin), rec->index);
4259
4260 twins = alloc_INSN_LIST (twin, twins);
4261
4262 /* Add dependences between TWIN and all appropriate
4263 instructions from REC. */
4264 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4265 {
4266 rtx pro = DEP_PRO (dep);
4267
4268 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4269
4270 /* INSN might have dependencies from the instructions from
4271 several recovery blocks. At this iteration we process those
4272 producers that reside in REC. */
4273 if (BLOCK_FOR_INSN (pro) == rec)
4274 {
4275 dep_def _new_dep, *new_dep = &_new_dep;
4276
4277 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4278 sd_add_dep (new_dep, false);
4279 }
4280 }
4281
4282 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4283
4284 /* Remove all dependencies between INSN and insns in REC. */
4285 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4286 sd_iterator_cond (&sd_it, &dep);)
4287 {
4288 rtx pro = DEP_PRO (dep);
4289
4290 if (BLOCK_FOR_INSN (pro) == rec)
4291 sd_delete_dep (sd_it);
4292 else
4293 sd_iterator_next (&sd_it);
4294 }
4295 }
4296
4297 /* We couldn't have added the dependencies between INSN and TWINS earlier
4298 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4299 while (twins)
4300 {
4301 rtx twin;
4302
4303 twin = XEXP (twins, 0);
4304
4305 {
4306 dep_def _new_dep, *new_dep = &_new_dep;
4307
4308 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4309 sd_add_dep (new_dep, false);
4310 }
4311
4312 twin = XEXP (twins, 1);
4313 free_INSN_LIST_node (twins);
4314 twins = twin;
4315 }
4316
4317 calc_priorities (priorities_roots);
4318 VEC_free (rtx, heap, priorities_roots);
4319 }
4320
4321 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4322 void *
4323 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4324 {
4325 gcc_assert (new_nmemb >= old_nmemb);
4326 p = XRESIZEVAR (void, p, new_nmemb * size);
4327 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4328 return p;
4329 }
4330
4331 /* Helper function.
4332 Find fallthru edge from PRED. */
4333 edge
4334 find_fallthru_edge_from (basic_block pred)
4335 {
4336 edge e;
4337 basic_block succ;
4338
4339 succ = pred->next_bb;
4340 gcc_assert (succ->prev_bb == pred);
4341
4342 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4343 {
4344 e = find_fallthru_edge (pred->succs);
4345
4346 if (e)
4347 {
4348 gcc_assert (e->dest == succ);
4349 return e;
4350 }
4351 }
4352 else
4353 {
4354 e = find_fallthru_edge (succ->preds);
4355
4356 if (e)
4357 {
4358 gcc_assert (e->src == pred);
4359 return e;
4360 }
4361 }
4362
4363 return NULL;
4364 }
4365
4366 /* Extend per basic block data structures. */
4367 static void
4368 sched_extend_bb (void)
4369 {
4370 rtx insn;
4371
4372 /* The following is done to keep current_sched_info->next_tail non null. */
4373 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4374 if (NEXT_INSN (insn) == 0
4375 || (!NOTE_P (insn)
4376 && !LABEL_P (insn)
4377 /* Don't emit a NOTE if it would end up before a BARRIER. */
4378 && !BARRIER_P (NEXT_INSN (insn))))
4379 {
4380 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4381 /* Make insn appear outside BB. */
4382 set_block_for_insn (note, NULL);
4383 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4384 }
4385 }
4386
4387 /* Init per basic block data structures. */
4388 void
4389 sched_init_bbs (void)
4390 {
4391 sched_extend_bb ();
4392 }
4393
4394 /* Initialize BEFORE_RECOVERY variable. */
4395 static void
4396 init_before_recovery (basic_block *before_recovery_ptr)
4397 {
4398 basic_block last;
4399 edge e;
4400
4401 last = EXIT_BLOCK_PTR->prev_bb;
4402 e = find_fallthru_edge_from (last);
4403
4404 if (e)
4405 {
4406 /* We create two basic blocks:
4407 1. Single instruction block is inserted right after E->SRC
4408 and has jump to
4409 2. Empty block right before EXIT_BLOCK.
4410 Between these two blocks recovery blocks will be emitted. */
4411
4412 basic_block single, empty;
4413 rtx x, label;
4414
4415 /* If the fallthrough edge to exit we've found is from the block we've
4416 created before, don't do anything more. */
4417 if (last == after_recovery)
4418 return;
4419
4420 adding_bb_to_current_region_p = false;
4421
4422 single = sched_create_empty_bb (last);
4423 empty = sched_create_empty_bb (single);
4424
4425 /* Add new blocks to the root loop. */
4426 if (current_loops != NULL)
4427 {
4428 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4429 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4430 }
4431
4432 single->count = last->count;
4433 empty->count = last->count;
4434 single->frequency = last->frequency;
4435 empty->frequency = last->frequency;
4436 BB_COPY_PARTITION (single, last);
4437 BB_COPY_PARTITION (empty, last);
4438
4439 redirect_edge_succ (e, single);
4440 make_single_succ_edge (single, empty, 0);
4441 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4442 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4443
4444 label = block_label (empty);
4445 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4446 JUMP_LABEL (x) = label;
4447 LABEL_NUSES (label)++;
4448 haifa_init_insn (x);
4449
4450 emit_barrier_after (x);
4451
4452 sched_init_only_bb (empty, NULL);
4453 sched_init_only_bb (single, NULL);
4454 sched_extend_bb ();
4455
4456 adding_bb_to_current_region_p = true;
4457 before_recovery = single;
4458 after_recovery = empty;
4459
4460 if (before_recovery_ptr)
4461 *before_recovery_ptr = before_recovery;
4462
4463 if (sched_verbose >= 2 && spec_info->dump)
4464 fprintf (spec_info->dump,
4465 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4466 last->index, single->index, empty->index);
4467 }
4468 else
4469 before_recovery = last;
4470 }
4471
4472 /* Returns new recovery block. */
4473 basic_block
4474 sched_create_recovery_block (basic_block *before_recovery_ptr)
4475 {
4476 rtx label;
4477 rtx barrier;
4478 basic_block rec;
4479
4480 haifa_recovery_bb_recently_added_p = true;
4481 haifa_recovery_bb_ever_added_p = true;
4482
4483 init_before_recovery (before_recovery_ptr);
4484
4485 barrier = get_last_bb_insn (before_recovery);
4486 gcc_assert (BARRIER_P (barrier));
4487
4488 label = emit_label_after (gen_label_rtx (), barrier);
4489
4490 rec = create_basic_block (label, label, before_recovery);
4491
4492 /* A recovery block always ends with an unconditional jump. */
4493 emit_barrier_after (BB_END (rec));
4494
4495 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4496 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4497
4498 if (sched_verbose && spec_info->dump)
4499 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4500 rec->index);
4501
4502 return rec;
4503 }
4504
4505 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4506 and emit necessary jumps. */
4507 void
4508 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4509 basic_block second_bb)
4510 {
4511 rtx label;
4512 rtx jump;
4513 int edge_flags;
4514
4515 /* This is fixing of incoming edge. */
4516 /* ??? Which other flags should be specified? */
4517 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4518 /* Partition type is the same, if it is "unpartitioned". */
4519 edge_flags = EDGE_CROSSING;
4520 else
4521 edge_flags = 0;
4522
4523 make_edge (first_bb, rec, edge_flags);
4524 label = block_label (second_bb);
4525 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4526 JUMP_LABEL (jump) = label;
4527 LABEL_NUSES (label)++;
4528
4529 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4530 /* Partition type is the same, if it is "unpartitioned". */
4531 {
4532 /* Rewritten from cfgrtl.c. */
4533 if (flag_reorder_blocks_and_partition
4534 && targetm.have_named_sections)
4535 {
4536 /* We don't need the same note for the check because
4537 any_condjump_p (check) == true. */
4538 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4539 }
4540 edge_flags = EDGE_CROSSING;
4541 }
4542 else
4543 edge_flags = 0;
4544
4545 make_single_succ_edge (rec, second_bb, edge_flags);
4546 if (dom_info_available_p (CDI_DOMINATORS))
4547 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
4548 }
4549
4550 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4551 INSN is a simple check, that should be converted to branchy one. */
4552 static void
4553 create_check_block_twin (rtx insn, bool mutate_p)
4554 {
4555 basic_block rec;
4556 rtx label, check, twin;
4557 ds_t fs;
4558 sd_iterator_def sd_it;
4559 dep_t dep;
4560 dep_def _new_dep, *new_dep = &_new_dep;
4561 ds_t todo_spec;
4562
4563 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4564
4565 if (!mutate_p)
4566 todo_spec = TODO_SPEC (insn);
4567 else
4568 {
4569 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4570 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4571
4572 todo_spec = CHECK_SPEC (insn);
4573 }
4574
4575 todo_spec &= SPECULATIVE;
4576
4577 /* Create recovery block. */
4578 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4579 {
4580 rec = sched_create_recovery_block (NULL);
4581 label = BB_HEAD (rec);
4582 }
4583 else
4584 {
4585 rec = EXIT_BLOCK_PTR;
4586 label = NULL_RTX;
4587 }
4588
4589 /* Emit CHECK. */
4590 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4591
4592 if (rec != EXIT_BLOCK_PTR)
4593 {
4594 /* To have mem_reg alive at the beginning of second_bb,
4595 we emit check BEFORE insn, so insn after splitting
4596 insn will be at the beginning of second_bb, which will
4597 provide us with the correct life information. */
4598 check = emit_jump_insn_before (check, insn);
4599 JUMP_LABEL (check) = label;
4600 LABEL_NUSES (label)++;
4601 }
4602 else
4603 check = emit_insn_before (check, insn);
4604
4605 /* Extend data structures. */
4606 haifa_init_insn (check);
4607
4608 /* CHECK is being added to current region. Extend ready list. */
4609 gcc_assert (sched_ready_n_insns != -1);
4610 sched_extend_ready_list (sched_ready_n_insns + 1);
4611
4612 if (current_sched_info->add_remove_insn)
4613 current_sched_info->add_remove_insn (insn, 0);
4614
4615 RECOVERY_BLOCK (check) = rec;
4616
4617 if (sched_verbose && spec_info->dump)
4618 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4619 (*current_sched_info->print_insn) (check, 0));
4620
4621 gcc_assert (ORIG_PAT (insn));
4622
4623 /* Initialize TWIN (twin is a duplicate of original instruction
4624 in the recovery block). */
4625 if (rec != EXIT_BLOCK_PTR)
4626 {
4627 sd_iterator_def sd_it;
4628 dep_t dep;
4629
4630 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4631 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4632 {
4633 struct _dep _dep2, *dep2 = &_dep2;
4634
4635 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4636
4637 sd_add_dep (dep2, true);
4638 }
4639
4640 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4641 haifa_init_insn (twin);
4642
4643 if (sched_verbose && spec_info->dump)
4644 /* INSN_BB (insn) isn't determined for twin insns yet.
4645 So we can't use current_sched_info->print_insn. */
4646 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4647 INSN_UID (twin), rec->index);
4648 }
4649 else
4650 {
4651 ORIG_PAT (check) = ORIG_PAT (insn);
4652 HAS_INTERNAL_DEP (check) = 1;
4653 twin = check;
4654 /* ??? We probably should change all OUTPUT dependencies to
4655 (TRUE | OUTPUT). */
4656 }
4657
4658 /* Copy all resolved back dependencies of INSN to TWIN. This will
4659 provide correct value for INSN_TICK (TWIN). */
4660 sd_copy_back_deps (twin, insn, true);
4661
4662 if (rec != EXIT_BLOCK_PTR)
4663 /* In case of branchy check, fix CFG. */
4664 {
4665 basic_block first_bb, second_bb;
4666 rtx jump;
4667
4668 first_bb = BLOCK_FOR_INSN (check);
4669 second_bb = sched_split_block (first_bb, check);
4670
4671 sched_create_recovery_edges (first_bb, rec, second_bb);
4672
4673 sched_init_only_bb (second_bb, first_bb);
4674 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4675
4676 jump = BB_END (rec);
4677 haifa_init_insn (jump);
4678 }
4679
4680 /* Move backward dependences from INSN to CHECK and
4681 move forward dependences from INSN to TWIN. */
4682
4683 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4684 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4685 {
4686 rtx pro = DEP_PRO (dep);
4687 ds_t ds;
4688
4689 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4690 check --TRUE--> producer ??? or ANTI ???
4691 twin --TRUE--> producer
4692 twin --ANTI--> check
4693
4694 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4695 check --ANTI--> producer
4696 twin --ANTI--> producer
4697 twin --ANTI--> check
4698
4699 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4700 check ~~TRUE~~> producer
4701 twin ~~TRUE~~> producer
4702 twin --ANTI--> check */
4703
4704 ds = DEP_STATUS (dep);
4705
4706 if (ds & BEGIN_SPEC)
4707 {
4708 gcc_assert (!mutate_p);
4709 ds &= ~BEGIN_SPEC;
4710 }
4711
4712 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4713 sd_add_dep (new_dep, false);
4714
4715 if (rec != EXIT_BLOCK_PTR)
4716 {
4717 DEP_CON (new_dep) = twin;
4718 sd_add_dep (new_dep, false);
4719 }
4720 }
4721
4722 /* Second, remove backward dependencies of INSN. */
4723 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4724 sd_iterator_cond (&sd_it, &dep);)
4725 {
4726 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4727 || mutate_p)
4728 /* We can delete this dep because we overcome it with
4729 BEGIN_SPECULATION. */
4730 sd_delete_dep (sd_it);
4731 else
4732 sd_iterator_next (&sd_it);
4733 }
4734
4735 /* Future Speculations. Determine what BE_IN speculations will be like. */
4736 fs = 0;
4737
4738 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4739 here. */
4740
4741 gcc_assert (!DONE_SPEC (insn));
4742
4743 if (!mutate_p)
4744 {
4745 ds_t ts = TODO_SPEC (insn);
4746
4747 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4748 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4749
4750 /* Luckiness of future speculations solely depends upon initial
4751 BEGIN speculation. */
4752 if (ts & BEGIN_DATA)
4753 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4754 if (ts & BEGIN_CONTROL)
4755 fs = set_dep_weak (fs, BE_IN_CONTROL,
4756 get_dep_weak (ts, BEGIN_CONTROL));
4757 }
4758 else
4759 CHECK_SPEC (check) = CHECK_SPEC (insn);
4760
4761 /* Future speculations: call the helper. */
4762 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4763
4764 if (rec != EXIT_BLOCK_PTR)
4765 {
4766 /* Which types of dependencies should we use here is,
4767 generally, machine-dependent question... But, for now,
4768 it is not. */
4769
4770 if (!mutate_p)
4771 {
4772 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4773 sd_add_dep (new_dep, false);
4774
4775 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4776 sd_add_dep (new_dep, false);
4777 }
4778 else
4779 {
4780 if (spec_info->dump)
4781 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4782 (*current_sched_info->print_insn) (insn, 0));
4783
4784 /* Remove all dependencies of the INSN. */
4785 {
4786 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4787 | SD_LIST_BACK
4788 | SD_LIST_RES_BACK));
4789 while (sd_iterator_cond (&sd_it, &dep))
4790 sd_delete_dep (sd_it);
4791 }
4792
4793 /* If former check (INSN) already was moved to the ready (or queue)
4794 list, add new check (CHECK) there too. */
4795 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4796 try_ready (check);
4797
4798 /* Remove old check from instruction stream and free its
4799 data. */
4800 sched_remove_insn (insn);
4801 }
4802
4803 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4804 sd_add_dep (new_dep, false);
4805 }
4806 else
4807 {
4808 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4809 sd_add_dep (new_dep, false);
4810 }
4811
4812 if (!mutate_p)
4813 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4814 because it'll be done later in add_to_speculative_block. */
4815 {
4816 rtx_vec_t priorities_roots = NULL;
4817
4818 clear_priorities (twin, &priorities_roots);
4819 calc_priorities (priorities_roots);
4820 VEC_free (rtx, heap, priorities_roots);
4821 }
4822 }
4823
4824 /* Removes dependency between instructions in the recovery block REC
4825 and usual region instructions. It keeps inner dependences so it
4826 won't be necessary to recompute them. */
4827 static void
4828 fix_recovery_deps (basic_block rec)
4829 {
4830 rtx note, insn, jump, ready_list = 0;
4831 bitmap_head in_ready;
4832 rtx link;
4833
4834 bitmap_initialize (&in_ready, 0);
4835
4836 /* NOTE - a basic block note. */
4837 note = NEXT_INSN (BB_HEAD (rec));
4838 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4839 insn = BB_END (rec);
4840 gcc_assert (JUMP_P (insn));
4841 insn = PREV_INSN (insn);
4842
4843 do
4844 {
4845 sd_iterator_def sd_it;
4846 dep_t dep;
4847
4848 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4849 sd_iterator_cond (&sd_it, &dep);)
4850 {
4851 rtx consumer = DEP_CON (dep);
4852
4853 if (BLOCK_FOR_INSN (consumer) != rec)
4854 {
4855 sd_delete_dep (sd_it);
4856
4857 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4858 ready_list = alloc_INSN_LIST (consumer, ready_list);
4859 }
4860 else
4861 {
4862 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4863
4864 sd_iterator_next (&sd_it);
4865 }
4866 }
4867
4868 insn = PREV_INSN (insn);
4869 }
4870 while (insn != note);
4871
4872 bitmap_clear (&in_ready);
4873
4874 /* Try to add instructions to the ready or queue list. */
4875 for (link = ready_list; link; link = XEXP (link, 1))
4876 try_ready (XEXP (link, 0));
4877 free_INSN_LIST_list (&ready_list);
4878
4879 /* Fixing jump's dependences. */
4880 insn = BB_HEAD (rec);
4881 jump = BB_END (rec);
4882
4883 gcc_assert (LABEL_P (insn));
4884 insn = NEXT_INSN (insn);
4885
4886 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4887 add_jump_dependencies (insn, jump);
4888 }
4889
4890 /* Change pattern of INSN to NEW_PAT. */
4891 void
4892 sched_change_pattern (rtx insn, rtx new_pat)
4893 {
4894 int t;
4895
4896 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4897 gcc_assert (t);
4898 dfa_clear_single_insn_cache (insn);
4899 }
4900
4901 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4902 instruction data. */
4903 static void
4904 haifa_change_pattern (rtx insn, rtx new_pat)
4905 {
4906 sched_change_pattern (insn, new_pat);
4907
4908 /* Invalidate INSN_COST, so it'll be recalculated. */
4909 INSN_COST (insn) = -1;
4910 /* Invalidate INSN_TICK, so it'll be recalculated. */
4911 INSN_TICK (insn) = INVALID_TICK;
4912 }
4913
4914 /* -1 - can't speculate,
4915 0 - for speculation with REQUEST mode it is OK to use
4916 current instruction pattern,
4917 1 - need to change pattern for *NEW_PAT to be speculative. */
4918 int
4919 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4920 {
4921 gcc_assert (current_sched_info->flags & DO_SPECULATION
4922 && (request & SPECULATIVE)
4923 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4924
4925 if ((request & spec_info->mask) != request)
4926 return -1;
4927
4928 if (request & BE_IN_SPEC
4929 && !(request & BEGIN_SPEC))
4930 return 0;
4931
4932 return targetm.sched.speculate_insn (insn, request, new_pat);
4933 }
4934
4935 static int
4936 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4937 {
4938 gcc_assert (sched_deps_info->generate_spec_deps
4939 && !IS_SPECULATION_CHECK_P (insn));
4940
4941 if (HAS_INTERNAL_DEP (insn)
4942 || SCHED_GROUP_P (insn))
4943 return -1;
4944
4945 return sched_speculate_insn (insn, request, new_pat);
4946 }
4947
4948 /* Print some information about block BB, which starts with HEAD and
4949 ends with TAIL, before scheduling it.
4950 I is zero, if scheduler is about to start with the fresh ebb. */
4951 static void
4952 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4953 {
4954 if (!i)
4955 fprintf (sched_dump,
4956 ";; ======================================================\n");
4957 else
4958 fprintf (sched_dump,
4959 ";; =====================ADVANCING TO=====================\n");
4960 fprintf (sched_dump,
4961 ";; -- basic block %d from %d to %d -- %s reload\n",
4962 bb->index, INSN_UID (head), INSN_UID (tail),
4963 (reload_completed ? "after" : "before"));
4964 fprintf (sched_dump,
4965 ";; ======================================================\n");
4966 fprintf (sched_dump, "\n");
4967 }
4968
4969 /* Unlink basic block notes and labels and saves them, so they
4970 can be easily restored. We unlink basic block notes in EBB to
4971 provide back-compatibility with the previous code, as target backends
4972 assume, that there'll be only instructions between
4973 current_sched_info->{head and tail}. We restore these notes as soon
4974 as we can.
4975 FIRST (LAST) is the first (last) basic block in the ebb.
4976 NB: In usual case (FIRST == LAST) nothing is really done. */
4977 void
4978 unlink_bb_notes (basic_block first, basic_block last)
4979 {
4980 /* We DON'T unlink basic block notes of the first block in the ebb. */
4981 if (first == last)
4982 return;
4983
4984 bb_header = XNEWVEC (rtx, last_basic_block);
4985
4986 /* Make a sentinel. */
4987 if (last->next_bb != EXIT_BLOCK_PTR)
4988 bb_header[last->next_bb->index] = 0;
4989
4990 first = first->next_bb;
4991 do
4992 {
4993 rtx prev, label, note, next;
4994
4995 label = BB_HEAD (last);
4996 if (LABEL_P (label))
4997 note = NEXT_INSN (label);
4998 else
4999 note = label;
5000 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5001
5002 prev = PREV_INSN (label);
5003 next = NEXT_INSN (note);
5004 gcc_assert (prev && next);
5005
5006 NEXT_INSN (prev) = next;
5007 PREV_INSN (next) = prev;
5008
5009 bb_header[last->index] = label;
5010
5011 if (last == first)
5012 break;
5013
5014 last = last->prev_bb;
5015 }
5016 while (1);
5017 }
5018
5019 /* Restore basic block notes.
5020 FIRST is the first basic block in the ebb. */
5021 static void
5022 restore_bb_notes (basic_block first)
5023 {
5024 if (!bb_header)
5025 return;
5026
5027 /* We DON'T unlink basic block notes of the first block in the ebb. */
5028 first = first->next_bb;
5029 /* Remember: FIRST is actually a second basic block in the ebb. */
5030
5031 while (first != EXIT_BLOCK_PTR
5032 && bb_header[first->index])
5033 {
5034 rtx prev, label, note, next;
5035
5036 label = bb_header[first->index];
5037 prev = PREV_INSN (label);
5038 next = NEXT_INSN (prev);
5039
5040 if (LABEL_P (label))
5041 note = NEXT_INSN (label);
5042 else
5043 note = label;
5044 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5045
5046 bb_header[first->index] = 0;
5047
5048 NEXT_INSN (prev) = label;
5049 NEXT_INSN (note) = next;
5050 PREV_INSN (next) = note;
5051
5052 first = first->next_bb;
5053 }
5054
5055 free (bb_header);
5056 bb_header = 0;
5057 }
5058
5059 /* Helper function.
5060 Fix CFG after both in- and inter-block movement of
5061 control_flow_insn_p JUMP. */
5062 static void
5063 fix_jump_move (rtx jump)
5064 {
5065 basic_block bb, jump_bb, jump_bb_next;
5066
5067 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5068 jump_bb = BLOCK_FOR_INSN (jump);
5069 jump_bb_next = jump_bb->next_bb;
5070
5071 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
5072 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
5073
5074 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
5075 /* if jump_bb_next is not empty. */
5076 BB_END (jump_bb) = BB_END (jump_bb_next);
5077
5078 if (BB_END (bb) != PREV_INSN (jump))
5079 /* Then there are instruction after jump that should be placed
5080 to jump_bb_next. */
5081 BB_END (jump_bb_next) = BB_END (bb);
5082 else
5083 /* Otherwise jump_bb_next is empty. */
5084 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5085
5086 /* To make assertion in move_insn happy. */
5087 BB_END (bb) = PREV_INSN (jump);
5088
5089 update_bb_for_insn (jump_bb_next);
5090 }
5091
5092 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5093 static void
5094 move_block_after_check (rtx jump)
5095 {
5096 basic_block bb, jump_bb, jump_bb_next;
5097 VEC(edge,gc) *t;
5098
5099 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5100 jump_bb = BLOCK_FOR_INSN (jump);
5101 jump_bb_next = jump_bb->next_bb;
5102
5103 update_bb_for_insn (jump_bb);
5104
5105 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5106 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5107
5108 unlink_block (jump_bb_next);
5109 link_block (jump_bb_next, bb);
5110
5111 t = bb->succs;
5112 bb->succs = 0;
5113 move_succs (&(jump_bb->succs), bb);
5114 move_succs (&(jump_bb_next->succs), jump_bb);
5115 move_succs (&t, jump_bb_next);
5116
5117 df_mark_solutions_dirty ();
5118
5119 common_sched_info->fix_recovery_cfg
5120 (bb->index, jump_bb->index, jump_bb_next->index);
5121 }
5122
5123 /* Helper function for move_block_after_check.
5124 This functions attaches edge vector pointed to by SUCCSP to
5125 block TO. */
5126 static void
5127 move_succs (VEC(edge,gc) **succsp, basic_block to)
5128 {
5129 edge e;
5130 edge_iterator ei;
5131
5132 gcc_assert (to->succs == 0);
5133
5134 to->succs = *succsp;
5135
5136 FOR_EACH_EDGE (e, ei, to->succs)
5137 e->src = to;
5138
5139 *succsp = 0;
5140 }
5141
5142 /* Remove INSN from the instruction stream.
5143 INSN should have any dependencies. */
5144 static void
5145 sched_remove_insn (rtx insn)
5146 {
5147 sd_finish_insn (insn);
5148
5149 change_queue_index (insn, QUEUE_NOWHERE);
5150 current_sched_info->add_remove_insn (insn, 1);
5151 remove_insn (insn);
5152 }
5153
5154 /* Clear priorities of all instructions, that are forward dependent on INSN.
5155 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5156 be invoked to initialize all cleared priorities. */
5157 static void
5158 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5159 {
5160 sd_iterator_def sd_it;
5161 dep_t dep;
5162 bool insn_is_root_p = true;
5163
5164 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5165
5166 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5167 {
5168 rtx pro = DEP_PRO (dep);
5169
5170 if (INSN_PRIORITY_STATUS (pro) >= 0
5171 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5172 {
5173 /* If DEP doesn't contribute to priority then INSN itself should
5174 be added to priority roots. */
5175 if (contributes_to_priority_p (dep))
5176 insn_is_root_p = false;
5177
5178 INSN_PRIORITY_STATUS (pro) = -1;
5179 clear_priorities (pro, roots_ptr);
5180 }
5181 }
5182
5183 if (insn_is_root_p)
5184 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5185 }
5186
5187 /* Recompute priorities of instructions, whose priorities might have been
5188 changed. ROOTS is a vector of instructions whose priority computation will
5189 trigger initialization of all cleared priorities. */
5190 static void
5191 calc_priorities (rtx_vec_t roots)
5192 {
5193 int i;
5194 rtx insn;
5195
5196 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5197 priority (insn);
5198 }
5199
5200
5201 /* Add dependences between JUMP and other instructions in the recovery
5202 block. INSN is the first insn the recovery block. */
5203 static void
5204 add_jump_dependencies (rtx insn, rtx jump)
5205 {
5206 do
5207 {
5208 insn = NEXT_INSN (insn);
5209 if (insn == jump)
5210 break;
5211
5212 if (dep_list_size (insn) == 0)
5213 {
5214 dep_def _new_dep, *new_dep = &_new_dep;
5215
5216 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5217 sd_add_dep (new_dep, false);
5218 }
5219 }
5220 while (1);
5221
5222 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5223 }
5224
5225 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5226 rtx
5227 bb_note (basic_block bb)
5228 {
5229 rtx note;
5230
5231 note = BB_HEAD (bb);
5232 if (LABEL_P (note))
5233 note = NEXT_INSN (note);
5234
5235 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5236 return note;
5237 }
5238
5239 #ifdef ENABLE_CHECKING
5240 /* Helper function for check_cfg.
5241 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5242 its flags. */
5243 static int
5244 has_edge_p (VEC(edge,gc) *el, int type)
5245 {
5246 edge e;
5247 edge_iterator ei;
5248
5249 FOR_EACH_EDGE (e, ei, el)
5250 if (e->flags & type)
5251 return 1;
5252 return 0;
5253 }
5254
5255 /* Search back, starting at INSN, for an insn that is not a
5256 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5257 no such insn can be found. */
5258 static inline rtx
5259 prev_non_location_insn (rtx insn, rtx head)
5260 {
5261 while (insn != head && NOTE_P (insn)
5262 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5263 insn = PREV_INSN (insn);
5264
5265 return insn;
5266 }
5267
5268 /* Check few properties of CFG between HEAD and TAIL.
5269 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5270 instruction stream. */
5271 static void
5272 check_cfg (rtx head, rtx tail)
5273 {
5274 rtx next_tail;
5275 basic_block bb = 0;
5276 int not_first = 0, not_last;
5277
5278 if (head == NULL)
5279 head = get_insns ();
5280 if (tail == NULL)
5281 tail = get_last_insn ();
5282 next_tail = NEXT_INSN (tail);
5283
5284 do
5285 {
5286 not_last = head != tail;
5287
5288 if (not_first)
5289 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5290 if (not_last)
5291 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5292
5293 if (LABEL_P (head)
5294 || (NOTE_INSN_BASIC_BLOCK_P (head)
5295 && (!not_first
5296 || (not_first && !LABEL_P (PREV_INSN (head))))))
5297 {
5298 gcc_assert (bb == 0);
5299 bb = BLOCK_FOR_INSN (head);
5300 if (bb != 0)
5301 gcc_assert (BB_HEAD (bb) == head);
5302 else
5303 /* This is the case of jump table. See inside_basic_block_p (). */
5304 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5305 }
5306
5307 if (bb == 0)
5308 {
5309 gcc_assert (!inside_basic_block_p (head));
5310 head = NEXT_INSN (head);
5311 }
5312 else
5313 {
5314 gcc_assert (inside_basic_block_p (head)
5315 || NOTE_P (head));
5316 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5317
5318 if (LABEL_P (head))
5319 {
5320 head = NEXT_INSN (head);
5321 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5322 }
5323 else
5324 {
5325 if (control_flow_insn_p (head))
5326 {
5327 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5328 == head);
5329
5330 if (any_uncondjump_p (head))
5331 gcc_assert (EDGE_COUNT (bb->succs) == 1
5332 && BARRIER_P (NEXT_INSN (head)));
5333 else if (any_condjump_p (head))
5334 gcc_assert (/* Usual case. */
5335 (EDGE_COUNT (bb->succs) > 1
5336 && !BARRIER_P (NEXT_INSN (head)))
5337 /* Or jump to the next instruction. */
5338 || (EDGE_COUNT (bb->succs) == 1
5339 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5340 == JUMP_LABEL (head))));
5341 }
5342 if (BB_END (bb) == head)
5343 {
5344 if (EDGE_COUNT (bb->succs) > 1)
5345 gcc_assert (control_flow_insn_p (prev_non_location_insn
5346 (head, BB_HEAD (bb)))
5347 || has_edge_p (bb->succs, EDGE_COMPLEX));
5348 bb = 0;
5349 }
5350
5351 head = NEXT_INSN (head);
5352 }
5353 }
5354
5355 not_first = 1;
5356 }
5357 while (head != next_tail);
5358
5359 gcc_assert (bb == 0);
5360 }
5361
5362 #endif /* ENABLE_CHECKING */
5363
5364 /* Extend per basic block data structures. */
5365 static void
5366 extend_bb (void)
5367 {
5368 if (sched_scan_info->extend_bb)
5369 sched_scan_info->extend_bb ();
5370 }
5371
5372 /* Init data for BB. */
5373 static void
5374 init_bb (basic_block bb)
5375 {
5376 if (sched_scan_info->init_bb)
5377 sched_scan_info->init_bb (bb);
5378 }
5379
5380 /* Extend per insn data structures. */
5381 static void
5382 extend_insn (void)
5383 {
5384 if (sched_scan_info->extend_insn)
5385 sched_scan_info->extend_insn ();
5386 }
5387
5388 /* Init data structures for INSN. */
5389 static void
5390 init_insn (rtx insn)
5391 {
5392 if (sched_scan_info->init_insn)
5393 sched_scan_info->init_insn (insn);
5394 }
5395
5396 /* Init all insns in BB. */
5397 static void
5398 init_insns_in_bb (basic_block bb)
5399 {
5400 rtx insn;
5401
5402 FOR_BB_INSNS (bb, insn)
5403 init_insn (insn);
5404 }
5405
5406 /* A driver function to add a set of basic blocks (BBS),
5407 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5408 to the scheduling region. */
5409 void
5410 sched_scan (const struct sched_scan_info_def *ssi,
5411 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5412 {
5413 sched_scan_info = ssi;
5414
5415 if (bbs != NULL || bb != NULL)
5416 {
5417 extend_bb ();
5418
5419 if (bbs != NULL)
5420 {
5421 unsigned i;
5422 basic_block x;
5423
5424 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5425 init_bb (x);
5426 }
5427
5428 if (bb != NULL)
5429 init_bb (bb);
5430 }
5431
5432 extend_insn ();
5433
5434 if (bbs != NULL)
5435 {
5436 unsigned i;
5437 basic_block x;
5438
5439 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5440 init_insns_in_bb (x);
5441 }
5442
5443 if (bb != NULL)
5444 init_insns_in_bb (bb);
5445
5446 if (insns != NULL)
5447 {
5448 unsigned i;
5449 rtx x;
5450
5451 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5452 init_insn (x);
5453 }
5454
5455 if (insn != NULL)
5456 init_insn (insn);
5457 }
5458
5459
5460 /* Extend data structures for logical insn UID. */
5461 static void
5462 luids_extend_insn (void)
5463 {
5464 int new_luids_max_uid = get_max_uid () + 1;
5465
5466 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5467 }
5468
5469 /* Initialize LUID for INSN. */
5470 static void
5471 luids_init_insn (rtx insn)
5472 {
5473 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5474 int luid;
5475
5476 if (i >= 0)
5477 {
5478 luid = sched_max_luid;
5479 sched_max_luid += i;
5480 }
5481 else
5482 luid = -1;
5483
5484 SET_INSN_LUID (insn, luid);
5485 }
5486
5487 /* Initialize luids for BBS, BB, INSNS and INSN.
5488 The hook common_sched_info->luid_for_non_insn () is used to determine
5489 if notes, labels, etc. need luids. */
5490 void
5491 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5492 {
5493 const struct sched_scan_info_def ssi =
5494 {
5495 NULL, /* extend_bb */
5496 NULL, /* init_bb */
5497 luids_extend_insn, /* extend_insn */
5498 luids_init_insn /* init_insn */
5499 };
5500
5501 sched_scan (&ssi, bbs, bb, insns, insn);
5502 }
5503
5504 /* Free LUIDs. */
5505 void
5506 sched_finish_luids (void)
5507 {
5508 VEC_free (int, heap, sched_luids);
5509 sched_max_luid = 1;
5510 }
5511
5512 /* Return logical uid of INSN. Helpful while debugging. */
5513 int
5514 insn_luid (rtx insn)
5515 {
5516 return INSN_LUID (insn);
5517 }
5518
5519 /* Extend per insn data in the target. */
5520 void
5521 sched_extend_target (void)
5522 {
5523 if (targetm.sched.h_i_d_extended)
5524 targetm.sched.h_i_d_extended ();
5525 }
5526
5527 /* Extend global scheduler structures (those, that live across calls to
5528 schedule_block) to include information about just emitted INSN. */
5529 static void
5530 extend_h_i_d (void)
5531 {
5532 int reserve = (get_max_uid () + 1
5533 - VEC_length (haifa_insn_data_def, h_i_d));
5534 if (reserve > 0
5535 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5536 {
5537 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5538 3 * get_max_uid () / 2);
5539 sched_extend_target ();
5540 }
5541 }
5542
5543 /* Initialize h_i_d entry of the INSN with default values.
5544 Values, that are not explicitly initialized here, hold zero. */
5545 static void
5546 init_h_i_d (rtx insn)
5547 {
5548 if (INSN_LUID (insn) > 0)
5549 {
5550 INSN_COST (insn) = -1;
5551 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5552 INSN_TICK (insn) = INVALID_TICK;
5553 INTER_TICK (insn) = INVALID_TICK;
5554 TODO_SPEC (insn) = HARD_DEP;
5555 }
5556 }
5557
5558 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5559 void
5560 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5561 {
5562 const struct sched_scan_info_def ssi =
5563 {
5564 NULL, /* extend_bb */
5565 NULL, /* init_bb */
5566 extend_h_i_d, /* extend_insn */
5567 init_h_i_d /* init_insn */
5568 };
5569
5570 sched_scan (&ssi, bbs, bb, insns, insn);
5571 }
5572
5573 /* Finalize haifa_insn_data. */
5574 void
5575 haifa_finish_h_i_d (void)
5576 {
5577 int i;
5578 haifa_insn_data_t data;
5579 struct reg_use_data *use, *next;
5580
5581 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5582 {
5583 if (data->reg_pressure != NULL)
5584 free (data->reg_pressure);
5585 for (use = data->reg_use_list; use != NULL; use = next)
5586 {
5587 next = use->next_insn_use;
5588 free (use);
5589 }
5590 }
5591 VEC_free (haifa_insn_data_def, heap, h_i_d);
5592 }
5593
5594 /* Init data for the new insn INSN. */
5595 static void
5596 haifa_init_insn (rtx insn)
5597 {
5598 gcc_assert (insn != NULL);
5599
5600 sched_init_luids (NULL, NULL, NULL, insn);
5601 sched_extend_target ();
5602 sched_deps_init (false);
5603 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5604
5605 if (adding_bb_to_current_region_p)
5606 {
5607 sd_init_insn (insn);
5608
5609 /* Extend dependency caches by one element. */
5610 extend_dependency_caches (1, false);
5611 }
5612 }
5613
5614 /* Init data for the new basic block BB which comes after AFTER. */
5615 static void
5616 haifa_init_only_bb (basic_block bb, basic_block after)
5617 {
5618 gcc_assert (bb != NULL);
5619
5620 sched_init_bbs ();
5621
5622 if (common_sched_info->add_block)
5623 /* This changes only data structures of the front-end. */
5624 common_sched_info->add_block (bb, after);
5625 }
5626
5627 /* A generic version of sched_split_block (). */
5628 basic_block
5629 sched_split_block_1 (basic_block first_bb, rtx after)
5630 {
5631 edge e;
5632
5633 e = split_block (first_bb, after);
5634 gcc_assert (e->src == first_bb);
5635
5636 /* sched_split_block emits note if *check == BB_END. Probably it
5637 is better to rip that note off. */
5638
5639 return e->dest;
5640 }
5641
5642 /* A generic version of sched_create_empty_bb (). */
5643 basic_block
5644 sched_create_empty_bb_1 (basic_block after)
5645 {
5646 return create_empty_bb (after);
5647 }
5648
5649 /* Insert PAT as an INSN into the schedule and update the necessary data
5650 structures to account for it. */
5651 rtx
5652 sched_emit_insn (rtx pat)
5653 {
5654 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5655 last_scheduled_insn = insn;
5656 haifa_init_insn (insn);
5657 return insn;
5658 }
5659
5660 /* This function returns a candidate satisfying dispatch constraints from
5661 the ready list. */
5662
5663 static rtx
5664 ready_remove_first_dispatch (struct ready_list *ready)
5665 {
5666 int i;
5667 rtx insn = ready_element (ready, 0);
5668
5669 if (ready->n_ready == 1
5670 || INSN_CODE (insn) < 0
5671 || !INSN_P (insn)
5672 || !active_insn_p (insn)
5673 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5674 return ready_remove_first (ready);
5675
5676 for (i = 1; i < ready->n_ready; i++)
5677 {
5678 insn = ready_element (ready, i);
5679
5680 if (INSN_CODE (insn) < 0
5681 || !INSN_P (insn)
5682 || !active_insn_p (insn))
5683 continue;
5684
5685 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5686 {
5687 /* Return ith element of ready. */
5688 insn = ready_remove (ready, i);
5689 return insn;
5690 }
5691 }
5692
5693 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5694 return ready_remove_first (ready);
5695
5696 for (i = 1; i < ready->n_ready; i++)
5697 {
5698 insn = ready_element (ready, i);
5699
5700 if (INSN_CODE (insn) < 0
5701 || !INSN_P (insn)
5702 || !active_insn_p (insn))
5703 continue;
5704
5705 /* Return i-th element of ready. */
5706 if (targetm.sched.dispatch (insn, IS_CMP))
5707 return ready_remove (ready, i);
5708 }
5709
5710 return ready_remove_first (ready);
5711 }
5712
5713 /* Get number of ready insn in the ready list. */
5714
5715 int
5716 number_in_ready (void)
5717 {
5718 return ready.n_ready;
5719 }
5720
5721 /* Get number of ready's in the ready list. */
5722
5723 rtx
5724 get_ready_element (int i)
5725 {
5726 return ready_element (&ready, i);
5727 }
5728
5729 #endif /* INSN_SCHEDULING */