9f7d06af6d54c001b06a9ea35fdadf46547a038a
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "output.h"
145 #include "params.h"
146 #include "vecprim.h"
147 #include "dbgcnt.h"
148 #include "cfgloop.h"
149 #include "ira.h"
150 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
151
152 #ifdef INSN_SCHEDULING
153
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
157
158 int issue_rate;
159
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
164 N=1: same as -dSR.
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
168
169 int sched_verbose = 0;
170
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
174
175 /* This is a placeholder for the scheduler parameters common
176 to all schedulers. */
177 struct common_sched_info_def *common_sched_info;
178
179 #define INSN_TICK(INSN) (HID (INSN)->tick)
180 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
181
182 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
183 then it should be recalculated from scratch. */
184 #define INVALID_TICK (-(max_insn_queue_index + 1))
185 /* The minimal value of the INSN_TICK of an instruction. */
186 #define MIN_TICK (-max_insn_queue_index)
187
188 /* List of important notes we must keep around. This is a pointer to the
189 last element in the list. */
190 rtx note_list;
191
192 static struct spec_info_def spec_info_var;
193 /* Description of the speculative part of the scheduling.
194 If NULL - no speculation. */
195 spec_info_t spec_info = NULL;
196
197 /* True, if recovery block was added during scheduling of current block.
198 Used to determine, if we need to fix INSN_TICKs. */
199 static bool haifa_recovery_bb_recently_added_p;
200
201 /* True, if recovery block was added during this scheduling pass.
202 Used to determine if we should have empty memory pools of dependencies
203 after finishing current region. */
204 bool haifa_recovery_bb_ever_added_p;
205
206 /* Counters of different types of speculative instructions. */
207 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
208
209 /* Array used in {unlink, restore}_bb_notes. */
210 static rtx *bb_header = 0;
211
212 /* Basic block after which recovery blocks will be created. */
213 static basic_block before_recovery;
214
215 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
216 created it. */
217 basic_block after_recovery;
218
219 /* FALSE if we add bb to another region, so we don't need to initialize it. */
220 bool adding_bb_to_current_region_p = true;
221
222 /* Queues, etc. */
223
224 /* An instruction is ready to be scheduled when all insns preceding it
225 have already been scheduled. It is important to ensure that all
226 insns which use its result will not be executed until its result
227 has been computed. An insn is maintained in one of four structures:
228
229 (P) the "Pending" set of insns which cannot be scheduled until
230 their dependencies have been satisfied.
231 (Q) the "Queued" set of insns that can be scheduled when sufficient
232 time has passed.
233 (R) the "Ready" list of unscheduled, uncommitted insns.
234 (S) the "Scheduled" list of insns.
235
236 Initially, all insns are either "Pending" or "Ready" depending on
237 whether their dependencies are satisfied.
238
239 Insns move from the "Ready" list to the "Scheduled" list as they
240 are committed to the schedule. As this occurs, the insns in the
241 "Pending" list have their dependencies satisfied and move to either
242 the "Ready" list or the "Queued" set depending on whether
243 sufficient time has passed to make them ready. As time passes,
244 insns move from the "Queued" set to the "Ready" list.
245
246 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
247 unscheduled insns, i.e., those that are ready, queued, and pending.
248 The "Queued" set (Q) is implemented by the variable `insn_queue'.
249 The "Ready" list (R) is implemented by the variables `ready' and
250 `n_ready'.
251 The "Scheduled" list (S) is the new insn chain built by this pass.
252
253 The transition (R->S) is implemented in the scheduling loop in
254 `schedule_block' when the best insn to schedule is chosen.
255 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
256 insns move from the ready list to the scheduled list.
257 The transition (Q->R) is implemented in 'queue_to_insn' as time
258 passes or stalls are introduced. */
259
260 /* Implement a circular buffer to delay instructions until sufficient
261 time has passed. For the new pipeline description interface,
262 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
263 than maximal time of instruction execution computed by genattr.c on
264 the base maximal time of functional unit reservations and getting a
265 result. This is the longest time an insn may be queued. */
266
267 static rtx *insn_queue;
268 static int q_ptr = 0;
269 static int q_size = 0;
270 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
271 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
272
273 #define QUEUE_SCHEDULED (-3)
274 #define QUEUE_NOWHERE (-2)
275 #define QUEUE_READY (-1)
276 /* QUEUE_SCHEDULED - INSN is scheduled.
277 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
278 queue or ready list.
279 QUEUE_READY - INSN is in ready list.
280 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
281
282 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
283
284 /* The following variable value refers for all current and future
285 reservations of the processor units. */
286 state_t curr_state;
287
288 /* The following variable value is size of memory representing all
289 current and future reservations of the processor units. */
290 size_t dfa_state_size;
291
292 /* The following array is used to find the best insn from ready when
293 the automaton pipeline interface is used. */
294 char *ready_try = NULL;
295
296 /* The ready list. */
297 struct ready_list ready = {NULL, 0, 0, 0, 0};
298
299 /* The pointer to the ready list (to be removed). */
300 static struct ready_list *readyp = &ready;
301
302 /* Scheduling clock. */
303 static int clock_var;
304
305 static int may_trap_exp (const_rtx, int);
306
307 /* Nonzero iff the address is comprised from at most 1 register. */
308 #define CONST_BASED_ADDRESS_P(x) \
309 (REG_P (x) \
310 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
311 || (GET_CODE (x) == LO_SUM)) \
312 && (CONSTANT_P (XEXP (x, 0)) \
313 || CONSTANT_P (XEXP (x, 1)))))
314
315 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
316 as found by analyzing insn's expression. */
317
318 \f
319 static int haifa_luid_for_non_insn (rtx x);
320
321 /* Haifa version of sched_info hooks common to all headers. */
322 const struct common_sched_info_def haifa_common_sched_info =
323 {
324 NULL, /* fix_recovery_cfg */
325 NULL, /* add_block */
326 NULL, /* estimate_number_of_insns */
327 haifa_luid_for_non_insn, /* luid_for_non_insn */
328 SCHED_PASS_UNKNOWN /* sched_pass_id */
329 };
330
331 const struct sched_scan_info_def *sched_scan_info;
332
333 /* Mapping from instruction UID to its Logical UID. */
334 VEC (int, heap) *sched_luids = NULL;
335
336 /* Next LUID to assign to an instruction. */
337 int sched_max_luid = 1;
338
339 /* Haifa Instruction Data. */
340 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
341
342 void (* sched_init_only_bb) (basic_block, basic_block);
343
344 /* Split block function. Different schedulers might use different functions
345 to handle their internal data consistent. */
346 basic_block (* sched_split_block) (basic_block, rtx);
347
348 /* Create empty basic block after the specified block. */
349 basic_block (* sched_create_empty_bb) (basic_block);
350
351 static int
352 may_trap_exp (const_rtx x, int is_store)
353 {
354 enum rtx_code code;
355
356 if (x == 0)
357 return TRAP_FREE;
358 code = GET_CODE (x);
359 if (is_store)
360 {
361 if (code == MEM && may_trap_p (x))
362 return TRAP_RISKY;
363 else
364 return TRAP_FREE;
365 }
366 if (code == MEM)
367 {
368 /* The insn uses memory: a volatile load. */
369 if (MEM_VOLATILE_P (x))
370 return IRISKY;
371 /* An exception-free load. */
372 if (!may_trap_p (x))
373 return IFREE;
374 /* A load with 1 base register, to be further checked. */
375 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
376 return PFREE_CANDIDATE;
377 /* No info on the load, to be further checked. */
378 return PRISKY_CANDIDATE;
379 }
380 else
381 {
382 const char *fmt;
383 int i, insn_class = TRAP_FREE;
384
385 /* Neither store nor load, check if it may cause a trap. */
386 if (may_trap_p (x))
387 return TRAP_RISKY;
388 /* Recursive step: walk the insn... */
389 fmt = GET_RTX_FORMAT (code);
390 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
391 {
392 if (fmt[i] == 'e')
393 {
394 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
395 insn_class = WORST_CLASS (insn_class, tmp_class);
396 }
397 else if (fmt[i] == 'E')
398 {
399 int j;
400 for (j = 0; j < XVECLEN (x, i); j++)
401 {
402 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
403 insn_class = WORST_CLASS (insn_class, tmp_class);
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
406 }
407 }
408 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
409 break;
410 }
411 return insn_class;
412 }
413 }
414
415 /* Classifies rtx X of an insn for the purpose of verifying that X can be
416 executed speculatively (and consequently the insn can be moved
417 speculatively), by examining X, returning:
418 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
419 TRAP_FREE: non-load insn.
420 IFREE: load from a globally safe location.
421 IRISKY: volatile load.
422 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
423 being either PFREE or PRISKY. */
424
425 static int
426 haifa_classify_rtx (const_rtx x)
427 {
428 int tmp_class = TRAP_FREE;
429 int insn_class = TRAP_FREE;
430 enum rtx_code code;
431
432 if (GET_CODE (x) == PARALLEL)
433 {
434 int i, len = XVECLEN (x, 0);
435
436 for (i = len - 1; i >= 0; i--)
437 {
438 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
439 insn_class = WORST_CLASS (insn_class, tmp_class);
440 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
441 break;
442 }
443 }
444 else
445 {
446 code = GET_CODE (x);
447 switch (code)
448 {
449 case CLOBBER:
450 /* Test if it is a 'store'. */
451 tmp_class = may_trap_exp (XEXP (x, 0), 1);
452 break;
453 case SET:
454 /* Test if it is a store. */
455 tmp_class = may_trap_exp (SET_DEST (x), 1);
456 if (tmp_class == TRAP_RISKY)
457 break;
458 /* Test if it is a load. */
459 tmp_class =
460 WORST_CLASS (tmp_class,
461 may_trap_exp (SET_SRC (x), 0));
462 break;
463 case COND_EXEC:
464 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
465 if (tmp_class == TRAP_RISKY)
466 break;
467 tmp_class = WORST_CLASS (tmp_class,
468 may_trap_exp (COND_EXEC_TEST (x), 0));
469 break;
470 case TRAP_IF:
471 tmp_class = TRAP_RISKY;
472 break;
473 default:;
474 }
475 insn_class = tmp_class;
476 }
477
478 return insn_class;
479 }
480
481 int
482 haifa_classify_insn (const_rtx insn)
483 {
484 return haifa_classify_rtx (PATTERN (insn));
485 }
486
487 /* Forward declarations. */
488
489 static int priority (rtx);
490 static int rank_for_schedule (const void *, const void *);
491 static void swap_sort (rtx *, int);
492 static void queue_insn (rtx, int);
493 static int schedule_insn (rtx);
494 static void adjust_priority (rtx);
495 static void advance_one_cycle (void);
496 static void extend_h_i_d (void);
497
498
499 /* Notes handling mechanism:
500 =========================
501 Generally, NOTES are saved before scheduling and restored after scheduling.
502 The scheduler distinguishes between two types of notes:
503
504 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
505 Before scheduling a region, a pointer to the note is added to the insn
506 that follows or precedes it. (This happens as part of the data dependence
507 computation). After scheduling an insn, the pointer contained in it is
508 used for regenerating the corresponding note (in reemit_notes).
509
510 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
511 these notes are put in a list (in rm_other_notes() and
512 unlink_other_notes ()). After scheduling the block, these notes are
513 inserted at the beginning of the block (in schedule_block()). */
514
515 static void ready_add (struct ready_list *, rtx, bool);
516 static rtx ready_remove_first (struct ready_list *);
517 static rtx ready_remove_first_dispatch (struct ready_list *ready);
518
519 static void queue_to_ready (struct ready_list *);
520 static int early_queue_to_ready (state_t, struct ready_list *);
521
522 static void debug_ready_list (struct ready_list *);
523
524 /* The following functions are used to implement multi-pass scheduling
525 on the first cycle. */
526 static rtx ready_remove (struct ready_list *, int);
527 static void ready_remove_insn (rtx);
528
529 static void fix_inter_tick (rtx, rtx);
530 static int fix_tick_ready (rtx);
531 static void change_queue_index (rtx, int);
532
533 /* The following functions are used to implement scheduling of data/control
534 speculative instructions. */
535
536 static void extend_h_i_d (void);
537 static void init_h_i_d (rtx);
538 static void generate_recovery_code (rtx);
539 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
540 static void begin_speculative_block (rtx);
541 static void add_to_speculative_block (rtx);
542 static void init_before_recovery (basic_block *);
543 static void create_check_block_twin (rtx, bool);
544 static void fix_recovery_deps (basic_block);
545 static void haifa_change_pattern (rtx, rtx);
546 static void dump_new_block_header (int, basic_block, rtx, rtx);
547 static void restore_bb_notes (basic_block);
548 static void fix_jump_move (rtx);
549 static void move_block_after_check (rtx);
550 static void move_succs (VEC(edge,gc) **, basic_block);
551 static void sched_remove_insn (rtx);
552 static void clear_priorities (rtx, rtx_vec_t *);
553 static void calc_priorities (rtx_vec_t);
554 static void add_jump_dependencies (rtx, rtx);
555 #ifdef ENABLE_CHECKING
556 static int has_edge_p (VEC(edge,gc) *, int);
557 static void check_cfg (rtx, rtx);
558 #endif
559
560 #endif /* INSN_SCHEDULING */
561 \f
562 /* Point to state used for the current scheduling pass. */
563 struct haifa_sched_info *current_sched_info;
564 \f
565 #ifndef INSN_SCHEDULING
566 void
567 schedule_insns (void)
568 {
569 }
570 #else
571
572 /* Do register pressure sensitive insn scheduling if the flag is set
573 up. */
574 bool sched_pressure_p;
575
576 /* Map regno -> its cover class. The map defined only when
577 SCHED_PRESSURE_P is true. */
578 enum reg_class *sched_regno_cover_class;
579
580 /* The current register pressure. Only elements corresponding cover
581 classes are defined. */
582 static int curr_reg_pressure[N_REG_CLASSES];
583
584 /* Saved value of the previous array. */
585 static int saved_reg_pressure[N_REG_CLASSES];
586
587 /* Register living at given scheduling point. */
588 static bitmap curr_reg_live;
589
590 /* Saved value of the previous array. */
591 static bitmap saved_reg_live;
592
593 /* Registers mentioned in the current region. */
594 static bitmap region_ref_regs;
595
596 /* Initiate register pressure relative info for scheduling the current
597 region. Currently it is only clearing register mentioned in the
598 current region. */
599 void
600 sched_init_region_reg_pressure_info (void)
601 {
602 bitmap_clear (region_ref_regs);
603 }
604
605 /* Update current register pressure related info after birth (if
606 BIRTH_P) or death of register REGNO. */
607 static void
608 mark_regno_birth_or_death (int regno, bool birth_p)
609 {
610 enum reg_class cover_class;
611
612 cover_class = sched_regno_cover_class[regno];
613 if (regno >= FIRST_PSEUDO_REGISTER)
614 {
615 if (cover_class != NO_REGS)
616 {
617 if (birth_p)
618 {
619 bitmap_set_bit (curr_reg_live, regno);
620 curr_reg_pressure[cover_class]
621 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
622 }
623 else
624 {
625 bitmap_clear_bit (curr_reg_live, regno);
626 curr_reg_pressure[cover_class]
627 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
628 }
629 }
630 }
631 else if (cover_class != NO_REGS
632 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
633 {
634 if (birth_p)
635 {
636 bitmap_set_bit (curr_reg_live, regno);
637 curr_reg_pressure[cover_class]++;
638 }
639 else
640 {
641 bitmap_clear_bit (curr_reg_live, regno);
642 curr_reg_pressure[cover_class]--;
643 }
644 }
645 }
646
647 /* Initiate current register pressure related info from living
648 registers given by LIVE. */
649 static void
650 initiate_reg_pressure_info (bitmap live)
651 {
652 int i;
653 unsigned int j;
654 bitmap_iterator bi;
655
656 for (i = 0; i < ira_reg_class_cover_size; i++)
657 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
658 bitmap_clear (curr_reg_live);
659 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
660 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
661 mark_regno_birth_or_death (j, true);
662 }
663
664 /* Mark registers in X as mentioned in the current region. */
665 static void
666 setup_ref_regs (rtx x)
667 {
668 int i, j, regno;
669 const RTX_CODE code = GET_CODE (x);
670 const char *fmt;
671
672 if (REG_P (x))
673 {
674 regno = REGNO (x);
675 if (regno >= FIRST_PSEUDO_REGISTER)
676 bitmap_set_bit (region_ref_regs, REGNO (x));
677 else
678 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
679 bitmap_set_bit (region_ref_regs, regno + i);
680 return;
681 }
682 fmt = GET_RTX_FORMAT (code);
683 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
684 if (fmt[i] == 'e')
685 setup_ref_regs (XEXP (x, i));
686 else if (fmt[i] == 'E')
687 {
688 for (j = 0; j < XVECLEN (x, i); j++)
689 setup_ref_regs (XVECEXP (x, i, j));
690 }
691 }
692
693 /* Initiate current register pressure related info at the start of
694 basic block BB. */
695 static void
696 initiate_bb_reg_pressure_info (basic_block bb)
697 {
698 unsigned int i ATTRIBUTE_UNUSED;
699 rtx insn;
700
701 if (current_nr_blocks > 1)
702 FOR_BB_INSNS (bb, insn)
703 if (NONDEBUG_INSN_P (insn))
704 setup_ref_regs (PATTERN (insn));
705 initiate_reg_pressure_info (df_get_live_in (bb));
706 #ifdef EH_RETURN_DATA_REGNO
707 if (bb_has_eh_pred (bb))
708 for (i = 0; ; ++i)
709 {
710 unsigned int regno = EH_RETURN_DATA_REGNO (i);
711
712 if (regno == INVALID_REGNUM)
713 break;
714 if (! bitmap_bit_p (df_get_live_in (bb), regno))
715 mark_regno_birth_or_death (regno, true);
716 }
717 #endif
718 }
719
720 /* Save current register pressure related info. */
721 static void
722 save_reg_pressure (void)
723 {
724 int i;
725
726 for (i = 0; i < ira_reg_class_cover_size; i++)
727 saved_reg_pressure[ira_reg_class_cover[i]]
728 = curr_reg_pressure[ira_reg_class_cover[i]];
729 bitmap_copy (saved_reg_live, curr_reg_live);
730 }
731
732 /* Restore saved register pressure related info. */
733 static void
734 restore_reg_pressure (void)
735 {
736 int i;
737
738 for (i = 0; i < ira_reg_class_cover_size; i++)
739 curr_reg_pressure[ira_reg_class_cover[i]]
740 = saved_reg_pressure[ira_reg_class_cover[i]];
741 bitmap_copy (curr_reg_live, saved_reg_live);
742 }
743
744 /* Return TRUE if the register is dying after its USE. */
745 static bool
746 dying_use_p (struct reg_use_data *use)
747 {
748 struct reg_use_data *next;
749
750 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
751 if (NONDEBUG_INSN_P (next->insn)
752 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
753 return false;
754 return true;
755 }
756
757 /* Print info about the current register pressure and its excess for
758 each cover class. */
759 static void
760 print_curr_reg_pressure (void)
761 {
762 int i;
763 enum reg_class cl;
764
765 fprintf (sched_dump, ";;\t");
766 for (i = 0; i < ira_reg_class_cover_size; i++)
767 {
768 cl = ira_reg_class_cover[i];
769 gcc_assert (curr_reg_pressure[cl] >= 0);
770 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
771 curr_reg_pressure[cl],
772 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
773 }
774 fprintf (sched_dump, "\n");
775 }
776
777 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
778 so that insns independent of the last scheduled insn will be preferred
779 over dependent instructions. */
780
781 static rtx last_scheduled_insn;
782
783 /* Cached cost of the instruction. Use below function to get cost of the
784 insn. -1 here means that the field is not initialized. */
785 #define INSN_COST(INSN) (HID (INSN)->cost)
786
787 /* Compute cost of executing INSN.
788 This is the number of cycles between instruction issue and
789 instruction results. */
790 int
791 insn_cost (rtx insn)
792 {
793 int cost;
794
795 if (sel_sched_p ())
796 {
797 if (recog_memoized (insn) < 0)
798 return 0;
799
800 cost = insn_default_latency (insn);
801 if (cost < 0)
802 cost = 0;
803
804 return cost;
805 }
806
807 cost = INSN_COST (insn);
808
809 if (cost < 0)
810 {
811 /* A USE insn, or something else we don't need to
812 understand. We can't pass these directly to
813 result_ready_cost or insn_default_latency because it will
814 trigger a fatal error for unrecognizable insns. */
815 if (recog_memoized (insn) < 0)
816 {
817 INSN_COST (insn) = 0;
818 return 0;
819 }
820 else
821 {
822 cost = insn_default_latency (insn);
823 if (cost < 0)
824 cost = 0;
825
826 INSN_COST (insn) = cost;
827 }
828 }
829
830 return cost;
831 }
832
833 /* Compute cost of dependence LINK.
834 This is the number of cycles between instruction issue and
835 instruction results.
836 ??? We also use this function to call recog_memoized on all insns. */
837 int
838 dep_cost_1 (dep_t link, dw_t dw)
839 {
840 rtx insn = DEP_PRO (link);
841 rtx used = DEP_CON (link);
842 int cost;
843
844 /* A USE insn should never require the value used to be computed.
845 This allows the computation of a function's result and parameter
846 values to overlap the return and call. We don't care about the
847 the dependence cost when only decreasing register pressure. */
848 if (recog_memoized (used) < 0)
849 {
850 cost = 0;
851 recog_memoized (insn);
852 }
853 else
854 {
855 enum reg_note dep_type = DEP_TYPE (link);
856
857 cost = insn_cost (insn);
858
859 if (INSN_CODE (insn) >= 0)
860 {
861 if (dep_type == REG_DEP_ANTI)
862 cost = 0;
863 else if (dep_type == REG_DEP_OUTPUT)
864 {
865 cost = (insn_default_latency (insn)
866 - insn_default_latency (used));
867 if (cost <= 0)
868 cost = 1;
869 }
870 else if (bypass_p (insn))
871 cost = insn_latency (insn, used);
872 }
873
874
875 if (targetm.sched.adjust_cost_2)
876 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
877 dw);
878 else if (targetm.sched.adjust_cost != NULL)
879 {
880 /* This variable is used for backward compatibility with the
881 targets. */
882 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
883
884 /* Make it self-cycled, so that if some tries to walk over this
885 incomplete list he/she will be caught in an endless loop. */
886 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
887
888 /* Targets use only REG_NOTE_KIND of the link. */
889 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
890
891 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
892 insn, cost);
893
894 free_INSN_LIST_node (dep_cost_rtx_link);
895 }
896
897 if (cost < 0)
898 cost = 0;
899 }
900
901 return cost;
902 }
903
904 /* Compute cost of dependence LINK.
905 This is the number of cycles between instruction issue and
906 instruction results. */
907 int
908 dep_cost (dep_t link)
909 {
910 return dep_cost_1 (link, 0);
911 }
912
913 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
914 INSN_PRIORITY explicitly. */
915 void
916 increase_insn_priority (rtx insn, int amount)
917 {
918 if (!sel_sched_p ())
919 {
920 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
921 if (INSN_PRIORITY_KNOWN (insn))
922 INSN_PRIORITY (insn) += amount;
923 }
924 else
925 {
926 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
927 Use EXPR_PRIORITY instead. */
928 sel_add_to_insn_priority (insn, amount);
929 }
930 }
931
932 /* Return 'true' if DEP should be included in priority calculations. */
933 static bool
934 contributes_to_priority_p (dep_t dep)
935 {
936 if (DEBUG_INSN_P (DEP_CON (dep))
937 || DEBUG_INSN_P (DEP_PRO (dep)))
938 return false;
939
940 /* Critical path is meaningful in block boundaries only. */
941 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
942 DEP_PRO (dep)))
943 return false;
944
945 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
946 then speculative instructions will less likely be
947 scheduled. That is because the priority of
948 their producers will increase, and, thus, the
949 producers will more likely be scheduled, thus,
950 resolving the dependence. */
951 if (sched_deps_info->generate_spec_deps
952 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
953 && (DEP_STATUS (dep) & SPECULATIVE))
954 return false;
955
956 return true;
957 }
958
959 /* Compute the number of nondebug forward deps of an insn. */
960
961 static int
962 dep_list_size (rtx insn)
963 {
964 sd_iterator_def sd_it;
965 dep_t dep;
966 int dbgcount = 0, nodbgcount = 0;
967
968 if (!MAY_HAVE_DEBUG_INSNS)
969 return sd_lists_size (insn, SD_LIST_FORW);
970
971 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
972 {
973 if (DEBUG_INSN_P (DEP_CON (dep)))
974 dbgcount++;
975 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
976 nodbgcount++;
977 }
978
979 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
980
981 return nodbgcount;
982 }
983
984 /* Compute the priority number for INSN. */
985 static int
986 priority (rtx insn)
987 {
988 if (! INSN_P (insn))
989 return 0;
990
991 /* We should not be interested in priority of an already scheduled insn. */
992 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
993
994 if (!INSN_PRIORITY_KNOWN (insn))
995 {
996 int this_priority = -1;
997
998 if (dep_list_size (insn) == 0)
999 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1000 some forward deps but all of them are ignored by
1001 contributes_to_priority hook. At the moment we set priority of
1002 such insn to 0. */
1003 this_priority = insn_cost (insn);
1004 else
1005 {
1006 rtx prev_first, twin;
1007 basic_block rec;
1008
1009 /* For recovery check instructions we calculate priority slightly
1010 different than that of normal instructions. Instead of walking
1011 through INSN_FORW_DEPS (check) list, we walk through
1012 INSN_FORW_DEPS list of each instruction in the corresponding
1013 recovery block. */
1014
1015 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1016 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1017 if (!rec || rec == EXIT_BLOCK_PTR)
1018 {
1019 prev_first = PREV_INSN (insn);
1020 twin = insn;
1021 }
1022 else
1023 {
1024 prev_first = NEXT_INSN (BB_HEAD (rec));
1025 twin = PREV_INSN (BB_END (rec));
1026 }
1027
1028 do
1029 {
1030 sd_iterator_def sd_it;
1031 dep_t dep;
1032
1033 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1034 {
1035 rtx next;
1036 int next_priority;
1037
1038 next = DEP_CON (dep);
1039
1040 if (BLOCK_FOR_INSN (next) != rec)
1041 {
1042 int cost;
1043
1044 if (!contributes_to_priority_p (dep))
1045 continue;
1046
1047 if (twin == insn)
1048 cost = dep_cost (dep);
1049 else
1050 {
1051 struct _dep _dep1, *dep1 = &_dep1;
1052
1053 init_dep (dep1, insn, next, REG_DEP_ANTI);
1054
1055 cost = dep_cost (dep1);
1056 }
1057
1058 next_priority = cost + priority (next);
1059
1060 if (next_priority > this_priority)
1061 this_priority = next_priority;
1062 }
1063 }
1064
1065 twin = PREV_INSN (twin);
1066 }
1067 while (twin != prev_first);
1068 }
1069
1070 if (this_priority < 0)
1071 {
1072 gcc_assert (this_priority == -1);
1073
1074 this_priority = insn_cost (insn);
1075 }
1076
1077 INSN_PRIORITY (insn) = this_priority;
1078 INSN_PRIORITY_STATUS (insn) = 1;
1079 }
1080
1081 return INSN_PRIORITY (insn);
1082 }
1083 \f
1084 /* Macros and functions for keeping the priority queue sorted, and
1085 dealing with queuing and dequeuing of instructions. */
1086
1087 #define SCHED_SORT(READY, N_READY) \
1088 do { if ((N_READY) == 2) \
1089 swap_sort (READY, N_READY); \
1090 else if ((N_READY) > 2) \
1091 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1092 while (0)
1093
1094 /* Setup info about the current register pressure impact of scheduling
1095 INSN at the current scheduling point. */
1096 static void
1097 setup_insn_reg_pressure_info (rtx insn)
1098 {
1099 int i, change, before, after, hard_regno;
1100 int excess_cost_change;
1101 enum machine_mode mode;
1102 enum reg_class cl;
1103 struct reg_pressure_data *pressure_info;
1104 int *max_reg_pressure;
1105 struct reg_use_data *use;
1106 static int death[N_REG_CLASSES];
1107
1108 gcc_checking_assert (!DEBUG_INSN_P (insn));
1109
1110 excess_cost_change = 0;
1111 for (i = 0; i < ira_reg_class_cover_size; i++)
1112 death[ira_reg_class_cover[i]] = 0;
1113 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1114 if (dying_use_p (use))
1115 {
1116 cl = sched_regno_cover_class[use->regno];
1117 if (use->regno < FIRST_PSEUDO_REGISTER)
1118 death[cl]++;
1119 else
1120 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1121 }
1122 pressure_info = INSN_REG_PRESSURE (insn);
1123 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1124 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1125 for (i = 0; i < ira_reg_class_cover_size; i++)
1126 {
1127 cl = ira_reg_class_cover[i];
1128 gcc_assert (curr_reg_pressure[cl] >= 0);
1129 change = (int) pressure_info[i].set_increase - death[cl];
1130 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1131 after = MAX (0, max_reg_pressure[i] + change
1132 - ira_available_class_regs[cl]);
1133 hard_regno = ira_class_hard_regs[cl][0];
1134 gcc_assert (hard_regno >= 0);
1135 mode = reg_raw_mode[hard_regno];
1136 excess_cost_change += ((after - before)
1137 * (ira_memory_move_cost[mode][cl][0]
1138 + ira_memory_move_cost[mode][cl][1]));
1139 }
1140 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1141 }
1142
1143 /* Returns a positive value if x is preferred; returns a negative value if
1144 y is preferred. Should never return 0, since that will make the sort
1145 unstable. */
1146
1147 static int
1148 rank_for_schedule (const void *x, const void *y)
1149 {
1150 rtx tmp = *(const rtx *) y;
1151 rtx tmp2 = *(const rtx *) x;
1152 rtx last;
1153 int tmp_class, tmp2_class;
1154 int val, priority_val, info_val;
1155
1156 if (MAY_HAVE_DEBUG_INSNS)
1157 {
1158 /* Schedule debug insns as early as possible. */
1159 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1160 return -1;
1161 else if (DEBUG_INSN_P (tmp2))
1162 return 1;
1163 }
1164
1165 /* The insn in a schedule group should be issued the first. */
1166 if (flag_sched_group_heuristic &&
1167 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1168 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1169
1170 /* Make sure that priority of TMP and TMP2 are initialized. */
1171 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1172
1173 if (sched_pressure_p)
1174 {
1175 int diff;
1176
1177 /* Prefer insn whose scheduling results in the smallest register
1178 pressure excess. */
1179 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1180 + (INSN_TICK (tmp) > clock_var
1181 ? INSN_TICK (tmp) - clock_var : 0)
1182 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1183 - (INSN_TICK (tmp2) > clock_var
1184 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1185 return diff;
1186 }
1187
1188
1189 if (sched_pressure_p
1190 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1191 {
1192 if (INSN_TICK (tmp) <= clock_var)
1193 return -1;
1194 else if (INSN_TICK (tmp2) <= clock_var)
1195 return 1;
1196 else
1197 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1198 }
1199 /* Prefer insn with higher priority. */
1200 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1201
1202 if (flag_sched_critical_path_heuristic && priority_val)
1203 return priority_val;
1204
1205 /* Prefer speculative insn with greater dependencies weakness. */
1206 if (flag_sched_spec_insn_heuristic && spec_info)
1207 {
1208 ds_t ds1, ds2;
1209 dw_t dw1, dw2;
1210 int dw;
1211
1212 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1213 if (ds1)
1214 dw1 = ds_weak (ds1);
1215 else
1216 dw1 = NO_DEP_WEAK;
1217
1218 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1219 if (ds2)
1220 dw2 = ds_weak (ds2);
1221 else
1222 dw2 = NO_DEP_WEAK;
1223
1224 dw = dw2 - dw1;
1225 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1226 return dw;
1227 }
1228
1229 info_val = (*current_sched_info->rank) (tmp, tmp2);
1230 if(flag_sched_rank_heuristic && info_val)
1231 return info_val;
1232
1233 if (flag_sched_last_insn_heuristic)
1234 {
1235 last = last_scheduled_insn;
1236
1237 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1238 do
1239 last = PREV_INSN (last);
1240 while (!NONDEBUG_INSN_P (last)
1241 && last != current_sched_info->prev_head);
1242 }
1243
1244 /* Compare insns based on their relation to the last scheduled
1245 non-debug insn. */
1246 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1247 {
1248 dep_t dep1;
1249 dep_t dep2;
1250
1251 /* Classify the instructions into three classes:
1252 1) Data dependent on last schedule insn.
1253 2) Anti/Output dependent on last scheduled insn.
1254 3) Independent of last scheduled insn, or has latency of one.
1255 Choose the insn from the highest numbered class if different. */
1256 dep1 = sd_find_dep_between (last, tmp, true);
1257
1258 if (dep1 == NULL || dep_cost (dep1) == 1)
1259 tmp_class = 3;
1260 else if (/* Data dependence. */
1261 DEP_TYPE (dep1) == REG_DEP_TRUE)
1262 tmp_class = 1;
1263 else
1264 tmp_class = 2;
1265
1266 dep2 = sd_find_dep_between (last, tmp2, true);
1267
1268 if (dep2 == NULL || dep_cost (dep2) == 1)
1269 tmp2_class = 3;
1270 else if (/* Data dependence. */
1271 DEP_TYPE (dep2) == REG_DEP_TRUE)
1272 tmp2_class = 1;
1273 else
1274 tmp2_class = 2;
1275
1276 if ((val = tmp2_class - tmp_class))
1277 return val;
1278 }
1279
1280 /* Prefer the insn which has more later insns that depend on it.
1281 This gives the scheduler more freedom when scheduling later
1282 instructions at the expense of added register pressure. */
1283
1284 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1285
1286 if (flag_sched_dep_count_heuristic && val != 0)
1287 return val;
1288
1289 /* If insns are equally good, sort by INSN_LUID (original insn order),
1290 so that we make the sort stable. This minimizes instruction movement,
1291 thus minimizing sched's effect on debugging and cross-jumping. */
1292 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1293 }
1294
1295 /* Resort the array A in which only element at index N may be out of order. */
1296
1297 HAIFA_INLINE static void
1298 swap_sort (rtx *a, int n)
1299 {
1300 rtx insn = a[n - 1];
1301 int i = n - 2;
1302
1303 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1304 {
1305 a[i + 1] = a[i];
1306 i -= 1;
1307 }
1308 a[i + 1] = insn;
1309 }
1310
1311 /* Add INSN to the insn queue so that it can be executed at least
1312 N_CYCLES after the currently executing insn. Preserve insns
1313 chain for debugging purposes. */
1314
1315 HAIFA_INLINE static void
1316 queue_insn (rtx insn, int n_cycles)
1317 {
1318 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1319 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1320
1321 gcc_assert (n_cycles <= max_insn_queue_index);
1322 gcc_assert (!DEBUG_INSN_P (insn));
1323
1324 insn_queue[next_q] = link;
1325 q_size += 1;
1326
1327 if (sched_verbose >= 2)
1328 {
1329 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1330 (*current_sched_info->print_insn) (insn, 0));
1331
1332 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1333 }
1334
1335 QUEUE_INDEX (insn) = next_q;
1336 }
1337
1338 /* Remove INSN from queue. */
1339 static void
1340 queue_remove (rtx insn)
1341 {
1342 gcc_assert (QUEUE_INDEX (insn) >= 0);
1343 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1344 q_size--;
1345 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1346 }
1347
1348 /* Return a pointer to the bottom of the ready list, i.e. the insn
1349 with the lowest priority. */
1350
1351 rtx *
1352 ready_lastpos (struct ready_list *ready)
1353 {
1354 gcc_assert (ready->n_ready >= 1);
1355 return ready->vec + ready->first - ready->n_ready + 1;
1356 }
1357
1358 /* Add an element INSN to the ready list so that it ends up with the
1359 lowest/highest priority depending on FIRST_P. */
1360
1361 HAIFA_INLINE static void
1362 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1363 {
1364 if (!first_p)
1365 {
1366 if (ready->first == ready->n_ready)
1367 {
1368 memmove (ready->vec + ready->veclen - ready->n_ready,
1369 ready_lastpos (ready),
1370 ready->n_ready * sizeof (rtx));
1371 ready->first = ready->veclen - 1;
1372 }
1373 ready->vec[ready->first - ready->n_ready] = insn;
1374 }
1375 else
1376 {
1377 if (ready->first == ready->veclen - 1)
1378 {
1379 if (ready->n_ready)
1380 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1381 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1382 ready_lastpos (ready),
1383 ready->n_ready * sizeof (rtx));
1384 ready->first = ready->veclen - 2;
1385 }
1386 ready->vec[++(ready->first)] = insn;
1387 }
1388
1389 ready->n_ready++;
1390 if (DEBUG_INSN_P (insn))
1391 ready->n_debug++;
1392
1393 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1394 QUEUE_INDEX (insn) = QUEUE_READY;
1395 }
1396
1397 /* Remove the element with the highest priority from the ready list and
1398 return it. */
1399
1400 HAIFA_INLINE static rtx
1401 ready_remove_first (struct ready_list *ready)
1402 {
1403 rtx t;
1404
1405 gcc_assert (ready->n_ready);
1406 t = ready->vec[ready->first--];
1407 ready->n_ready--;
1408 if (DEBUG_INSN_P (t))
1409 ready->n_debug--;
1410 /* If the queue becomes empty, reset it. */
1411 if (ready->n_ready == 0)
1412 ready->first = ready->veclen - 1;
1413
1414 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1415 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1416
1417 return t;
1418 }
1419
1420 /* The following code implements multi-pass scheduling for the first
1421 cycle. In other words, we will try to choose ready insn which
1422 permits to start maximum number of insns on the same cycle. */
1423
1424 /* Return a pointer to the element INDEX from the ready. INDEX for
1425 insn with the highest priority is 0, and the lowest priority has
1426 N_READY - 1. */
1427
1428 rtx
1429 ready_element (struct ready_list *ready, int index)
1430 {
1431 gcc_assert (ready->n_ready && index < ready->n_ready);
1432
1433 return ready->vec[ready->first - index];
1434 }
1435
1436 /* Remove the element INDEX from the ready list and return it. INDEX
1437 for insn with the highest priority is 0, and the lowest priority
1438 has N_READY - 1. */
1439
1440 HAIFA_INLINE static rtx
1441 ready_remove (struct ready_list *ready, int index)
1442 {
1443 rtx t;
1444 int i;
1445
1446 if (index == 0)
1447 return ready_remove_first (ready);
1448 gcc_assert (ready->n_ready && index < ready->n_ready);
1449 t = ready->vec[ready->first - index];
1450 ready->n_ready--;
1451 if (DEBUG_INSN_P (t))
1452 ready->n_debug--;
1453 for (i = index; i < ready->n_ready; i++)
1454 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1455 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1456 return t;
1457 }
1458
1459 /* Remove INSN from the ready list. */
1460 static void
1461 ready_remove_insn (rtx insn)
1462 {
1463 int i;
1464
1465 for (i = 0; i < readyp->n_ready; i++)
1466 if (ready_element (readyp, i) == insn)
1467 {
1468 ready_remove (readyp, i);
1469 return;
1470 }
1471 gcc_unreachable ();
1472 }
1473
1474 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1475 macro. */
1476
1477 void
1478 ready_sort (struct ready_list *ready)
1479 {
1480 int i;
1481 rtx *first = ready_lastpos (ready);
1482
1483 if (sched_pressure_p)
1484 {
1485 for (i = 0; i < ready->n_ready; i++)
1486 if (!DEBUG_INSN_P (first[i]))
1487 setup_insn_reg_pressure_info (first[i]);
1488 }
1489 SCHED_SORT (first, ready->n_ready);
1490 }
1491
1492 /* PREV is an insn that is ready to execute. Adjust its priority if that
1493 will help shorten or lengthen register lifetimes as appropriate. Also
1494 provide a hook for the target to tweak itself. */
1495
1496 HAIFA_INLINE static void
1497 adjust_priority (rtx prev)
1498 {
1499 /* ??? There used to be code here to try and estimate how an insn
1500 affected register lifetimes, but it did it by looking at REG_DEAD
1501 notes, which we removed in schedule_region. Nor did it try to
1502 take into account register pressure or anything useful like that.
1503
1504 Revisit when we have a machine model to work with and not before. */
1505
1506 if (targetm.sched.adjust_priority)
1507 INSN_PRIORITY (prev) =
1508 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1509 }
1510
1511 /* Advance DFA state STATE on one cycle. */
1512 void
1513 advance_state (state_t state)
1514 {
1515 if (targetm.sched.dfa_pre_advance_cycle)
1516 targetm.sched.dfa_pre_advance_cycle ();
1517
1518 if (targetm.sched.dfa_pre_cycle_insn)
1519 state_transition (state,
1520 targetm.sched.dfa_pre_cycle_insn ());
1521
1522 state_transition (state, NULL);
1523
1524 if (targetm.sched.dfa_post_cycle_insn)
1525 state_transition (state,
1526 targetm.sched.dfa_post_cycle_insn ());
1527
1528 if (targetm.sched.dfa_post_advance_cycle)
1529 targetm.sched.dfa_post_advance_cycle ();
1530 }
1531
1532 /* Advance time on one cycle. */
1533 HAIFA_INLINE static void
1534 advance_one_cycle (void)
1535 {
1536 advance_state (curr_state);
1537 if (sched_verbose >= 6)
1538 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1539 }
1540
1541 /* Clock at which the previous instruction was issued. */
1542 static int last_clock_var;
1543
1544 /* Update register pressure after scheduling INSN. */
1545 static void
1546 update_register_pressure (rtx insn)
1547 {
1548 struct reg_use_data *use;
1549 struct reg_set_data *set;
1550
1551 gcc_checking_assert (!DEBUG_INSN_P (insn));
1552
1553 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1554 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1555 mark_regno_birth_or_death (use->regno, false);
1556 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1557 mark_regno_birth_or_death (set->regno, true);
1558 }
1559
1560 /* Set up or update (if UPDATE_P) max register pressure (see its
1561 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1562 after insn AFTER. */
1563 static void
1564 setup_insn_max_reg_pressure (rtx after, bool update_p)
1565 {
1566 int i, p;
1567 bool eq_p;
1568 rtx insn;
1569 static int max_reg_pressure[N_REG_CLASSES];
1570
1571 save_reg_pressure ();
1572 for (i = 0; i < ira_reg_class_cover_size; i++)
1573 max_reg_pressure[ira_reg_class_cover[i]]
1574 = curr_reg_pressure[ira_reg_class_cover[i]];
1575 for (insn = NEXT_INSN (after);
1576 insn != NULL_RTX && ! BARRIER_P (insn)
1577 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1578 insn = NEXT_INSN (insn))
1579 if (NONDEBUG_INSN_P (insn))
1580 {
1581 eq_p = true;
1582 for (i = 0; i < ira_reg_class_cover_size; i++)
1583 {
1584 p = max_reg_pressure[ira_reg_class_cover[i]];
1585 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1586 {
1587 eq_p = false;
1588 INSN_MAX_REG_PRESSURE (insn)[i]
1589 = max_reg_pressure[ira_reg_class_cover[i]];
1590 }
1591 }
1592 if (update_p && eq_p)
1593 break;
1594 update_register_pressure (insn);
1595 for (i = 0; i < ira_reg_class_cover_size; i++)
1596 if (max_reg_pressure[ira_reg_class_cover[i]]
1597 < curr_reg_pressure[ira_reg_class_cover[i]])
1598 max_reg_pressure[ira_reg_class_cover[i]]
1599 = curr_reg_pressure[ira_reg_class_cover[i]];
1600 }
1601 restore_reg_pressure ();
1602 }
1603
1604 /* Update the current register pressure after scheduling INSN. Update
1605 also max register pressure for unscheduled insns of the current
1606 BB. */
1607 static void
1608 update_reg_and_insn_max_reg_pressure (rtx insn)
1609 {
1610 int i;
1611 int before[N_REG_CLASSES];
1612
1613 for (i = 0; i < ira_reg_class_cover_size; i++)
1614 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1615 update_register_pressure (insn);
1616 for (i = 0; i < ira_reg_class_cover_size; i++)
1617 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1618 break;
1619 if (i < ira_reg_class_cover_size)
1620 setup_insn_max_reg_pressure (insn, true);
1621 }
1622
1623 /* Set up register pressure at the beginning of basic block BB whose
1624 insns starting after insn AFTER. Set up also max register pressure
1625 for all insns of the basic block. */
1626 void
1627 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1628 {
1629 gcc_assert (sched_pressure_p);
1630 initiate_bb_reg_pressure_info (bb);
1631 setup_insn_max_reg_pressure (after, false);
1632 }
1633
1634 /* INSN is the "currently executing insn". Launch each insn which was
1635 waiting on INSN. READY is the ready list which contains the insns
1636 that are ready to fire. CLOCK is the current cycle. The function
1637 returns necessary cycle advance after issuing the insn (it is not
1638 zero for insns in a schedule group). */
1639
1640 static int
1641 schedule_insn (rtx insn)
1642 {
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1645 int i;
1646 int advance = 0;
1647
1648 if (sched_verbose >= 1)
1649 {
1650 struct reg_pressure_data *pressure_info;
1651 char buf[2048];
1652
1653 print_insn (buf, insn, 0);
1654 buf[40] = 0;
1655 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1656
1657 if (recog_memoized (insn) < 0)
1658 fprintf (sched_dump, "nothing");
1659 else
1660 print_reservation (sched_dump, insn);
1661 pressure_info = INSN_REG_PRESSURE (insn);
1662 if (pressure_info != NULL)
1663 {
1664 fputc (':', sched_dump);
1665 for (i = 0; i < ira_reg_class_cover_size; i++)
1666 fprintf (sched_dump, "%s%+d(%d)",
1667 reg_class_names[ira_reg_class_cover[i]],
1668 pressure_info[i].set_increase, pressure_info[i].change);
1669 }
1670 fputc ('\n', sched_dump);
1671 }
1672
1673 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1674 update_reg_and_insn_max_reg_pressure (insn);
1675
1676 /* Scheduling instruction should have all its dependencies resolved and
1677 should have been removed from the ready list. */
1678 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1679
1680 /* Reset debug insns invalidated by moving this insn. */
1681 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1682 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1683 sd_iterator_cond (&sd_it, &dep);)
1684 {
1685 rtx dbg = DEP_PRO (dep);
1686 struct reg_use_data *use, *next;
1687
1688 gcc_assert (DEBUG_INSN_P (dbg));
1689
1690 if (sched_verbose >= 6)
1691 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1692 INSN_UID (dbg));
1693
1694 /* ??? Rather than resetting the debug insn, we might be able
1695 to emit a debug temp before the just-scheduled insn, but
1696 this would involve checking that the expression at the
1697 point of the debug insn is equivalent to the expression
1698 before the just-scheduled insn. They might not be: the
1699 expression in the debug insn may depend on other insns not
1700 yet scheduled that set MEMs, REGs or even other debug
1701 insns. It's not clear that attempting to preserve debug
1702 information in these cases is worth the effort, given how
1703 uncommon these resets are and the likelihood that the debug
1704 temps introduced won't survive the schedule change. */
1705 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1706 df_insn_rescan (dbg);
1707
1708 /* Unknown location doesn't use any registers. */
1709 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1710 {
1711 struct reg_use_data *prev = use;
1712
1713 /* Remove use from the cyclic next_regno_use chain first. */
1714 while (prev->next_regno_use != use)
1715 prev = prev->next_regno_use;
1716 prev->next_regno_use = use->next_regno_use;
1717 next = use->next_insn_use;
1718 free (use);
1719 }
1720 INSN_REG_USE_LIST (dbg) = NULL;
1721
1722 /* We delete rather than resolve these deps, otherwise we
1723 crash in sched_free_deps(), because forward deps are
1724 expected to be released before backward deps. */
1725 sd_delete_dep (sd_it);
1726 }
1727
1728 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1729 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1730
1731 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1732 if (INSN_TICK (insn) > clock_var)
1733 /* INSN has been prematurely moved from the queue to the ready list.
1734 This is possible only if following flag is set. */
1735 gcc_assert (flag_sched_stalled_insns);
1736
1737 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1738 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1739 INSN_TICK (insn) = clock_var;
1740
1741 /* Update dependent instructions. */
1742 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1743 sd_iterator_cond (&sd_it, &dep);)
1744 {
1745 rtx next = DEP_CON (dep);
1746
1747 /* Resolve the dependence between INSN and NEXT.
1748 sd_resolve_dep () moves current dep to another list thus
1749 advancing the iterator. */
1750 sd_resolve_dep (sd_it);
1751
1752 /* Don't bother trying to mark next as ready if insn is a debug
1753 insn. If insn is the last hard dependency, it will have
1754 already been discounted. */
1755 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1756 continue;
1757
1758 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1759 {
1760 int effective_cost;
1761
1762 effective_cost = try_ready (next);
1763
1764 if (effective_cost >= 0
1765 && SCHED_GROUP_P (next)
1766 && advance < effective_cost)
1767 advance = effective_cost;
1768 }
1769 else
1770 /* Check always has only one forward dependence (to the first insn in
1771 the recovery block), therefore, this will be executed only once. */
1772 {
1773 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1774 fix_recovery_deps (RECOVERY_BLOCK (insn));
1775 }
1776 }
1777
1778 /* This is the place where scheduler doesn't *basically* need backward and
1779 forward dependencies for INSN anymore. Nevertheless they are used in
1780 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1781 some targets (e.g. rs6000). Thus the earliest place where we *can*
1782 remove dependencies is after targetm.sched.finish () call in
1783 schedule_block (). But, on the other side, the safest place to remove
1784 dependencies is when we are finishing scheduling entire region. As we
1785 don't generate [many] dependencies during scheduling itself, we won't
1786 need memory until beginning of next region.
1787 Bottom line: Dependencies are removed for all insns in the end of
1788 scheduling the region. */
1789
1790 /* Annotate the instruction with issue information -- TImode
1791 indicates that the instruction is expected not to be able
1792 to issue on the same cycle as the previous insn. A machine
1793 may use this information to decide how the instruction should
1794 be aligned. */
1795 if (issue_rate > 1
1796 && GET_CODE (PATTERN (insn)) != USE
1797 && GET_CODE (PATTERN (insn)) != CLOBBER
1798 && !DEBUG_INSN_P (insn))
1799 {
1800 if (reload_completed)
1801 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1802 last_clock_var = clock_var;
1803 }
1804
1805 return advance;
1806 }
1807
1808 /* Functions for handling of notes. */
1809
1810 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1811 void
1812 concat_note_lists (rtx from_end, rtx *to_endp)
1813 {
1814 rtx from_start;
1815
1816 /* It's easy when have nothing to concat. */
1817 if (from_end == NULL)
1818 return;
1819
1820 /* It's also easy when destination is empty. */
1821 if (*to_endp == NULL)
1822 {
1823 *to_endp = from_end;
1824 return;
1825 }
1826
1827 from_start = from_end;
1828 while (PREV_INSN (from_start) != NULL)
1829 from_start = PREV_INSN (from_start);
1830
1831 PREV_INSN (from_start) = *to_endp;
1832 NEXT_INSN (*to_endp) = from_start;
1833 *to_endp = from_end;
1834 }
1835
1836 /* Delete notes between HEAD and TAIL and put them in the chain
1837 of notes ended by NOTE_LIST. */
1838 void
1839 remove_notes (rtx head, rtx tail)
1840 {
1841 rtx next_tail, insn, next;
1842
1843 note_list = 0;
1844 if (head == tail && !INSN_P (head))
1845 return;
1846
1847 next_tail = NEXT_INSN (tail);
1848 for (insn = head; insn != next_tail; insn = next)
1849 {
1850 next = NEXT_INSN (insn);
1851 if (!NOTE_P (insn))
1852 continue;
1853
1854 switch (NOTE_KIND (insn))
1855 {
1856 case NOTE_INSN_BASIC_BLOCK:
1857 continue;
1858
1859 case NOTE_INSN_EPILOGUE_BEG:
1860 if (insn != tail)
1861 {
1862 remove_insn (insn);
1863 add_reg_note (next, REG_SAVE_NOTE,
1864 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1865 break;
1866 }
1867 /* FALLTHRU */
1868
1869 default:
1870 remove_insn (insn);
1871
1872 /* Add the note to list that ends at NOTE_LIST. */
1873 PREV_INSN (insn) = note_list;
1874 NEXT_INSN (insn) = NULL_RTX;
1875 if (note_list)
1876 NEXT_INSN (note_list) = insn;
1877 note_list = insn;
1878 break;
1879 }
1880
1881 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1882 }
1883 }
1884
1885
1886 /* Return the head and tail pointers of ebb starting at BEG and ending
1887 at END. */
1888 void
1889 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1890 {
1891 rtx beg_head = BB_HEAD (beg);
1892 rtx beg_tail = BB_END (beg);
1893 rtx end_head = BB_HEAD (end);
1894 rtx end_tail = BB_END (end);
1895
1896 /* Don't include any notes or labels at the beginning of the BEG
1897 basic block, or notes at the end of the END basic blocks. */
1898
1899 if (LABEL_P (beg_head))
1900 beg_head = NEXT_INSN (beg_head);
1901
1902 while (beg_head != beg_tail)
1903 if (NOTE_P (beg_head))
1904 beg_head = NEXT_INSN (beg_head);
1905 else if (DEBUG_INSN_P (beg_head))
1906 {
1907 rtx note, next;
1908
1909 for (note = NEXT_INSN (beg_head);
1910 note != beg_tail;
1911 note = next)
1912 {
1913 next = NEXT_INSN (note);
1914 if (NOTE_P (note))
1915 {
1916 if (sched_verbose >= 9)
1917 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1918
1919 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
1920
1921 if (BLOCK_FOR_INSN (note) != beg)
1922 df_insn_change_bb (note, beg);
1923 }
1924 else if (!DEBUG_INSN_P (note))
1925 break;
1926 }
1927
1928 break;
1929 }
1930 else
1931 break;
1932
1933 *headp = beg_head;
1934
1935 if (beg == end)
1936 end_head = beg_head;
1937 else if (LABEL_P (end_head))
1938 end_head = NEXT_INSN (end_head);
1939
1940 while (end_head != end_tail)
1941 if (NOTE_P (end_tail))
1942 end_tail = PREV_INSN (end_tail);
1943 else if (DEBUG_INSN_P (end_tail))
1944 {
1945 rtx note, prev;
1946
1947 for (note = PREV_INSN (end_tail);
1948 note != end_head;
1949 note = prev)
1950 {
1951 prev = PREV_INSN (note);
1952 if (NOTE_P (note))
1953 {
1954 if (sched_verbose >= 9)
1955 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1956
1957 reorder_insns_nobb (note, note, end_tail);
1958
1959 if (end_tail == BB_END (end))
1960 BB_END (end) = note;
1961
1962 if (BLOCK_FOR_INSN (note) != end)
1963 df_insn_change_bb (note, end);
1964 }
1965 else if (!DEBUG_INSN_P (note))
1966 break;
1967 }
1968
1969 break;
1970 }
1971 else
1972 break;
1973
1974 *tailp = end_tail;
1975 }
1976
1977 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1978
1979 int
1980 no_real_insns_p (const_rtx head, const_rtx tail)
1981 {
1982 while (head != NEXT_INSN (tail))
1983 {
1984 if (!NOTE_P (head) && !LABEL_P (head))
1985 return 0;
1986 head = NEXT_INSN (head);
1987 }
1988 return 1;
1989 }
1990
1991 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1992 previously found among the insns. Insert them just before HEAD. */
1993 rtx
1994 restore_other_notes (rtx head, basic_block head_bb)
1995 {
1996 if (note_list != 0)
1997 {
1998 rtx note_head = note_list;
1999
2000 if (head)
2001 head_bb = BLOCK_FOR_INSN (head);
2002 else
2003 head = NEXT_INSN (bb_note (head_bb));
2004
2005 while (PREV_INSN (note_head))
2006 {
2007 set_block_for_insn (note_head, head_bb);
2008 note_head = PREV_INSN (note_head);
2009 }
2010 /* In the above cycle we've missed this note. */
2011 set_block_for_insn (note_head, head_bb);
2012
2013 PREV_INSN (note_head) = PREV_INSN (head);
2014 NEXT_INSN (PREV_INSN (head)) = note_head;
2015 PREV_INSN (head) = note_list;
2016 NEXT_INSN (note_list) = head;
2017
2018 if (BLOCK_FOR_INSN (head) != head_bb)
2019 BB_END (head_bb) = note_list;
2020
2021 head = note_head;
2022 }
2023
2024 return head;
2025 }
2026
2027 /* Move insns that became ready to fire from queue to ready list. */
2028
2029 static void
2030 queue_to_ready (struct ready_list *ready)
2031 {
2032 rtx insn;
2033 rtx link;
2034 rtx skip_insn;
2035
2036 q_ptr = NEXT_Q (q_ptr);
2037
2038 if (dbg_cnt (sched_insn) == false)
2039 /* If debug counter is activated do not requeue insn next after
2040 last_scheduled_insn. */
2041 skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
2042 else
2043 skip_insn = NULL_RTX;
2044
2045 /* Add all pending insns that can be scheduled without stalls to the
2046 ready list. */
2047 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2048 {
2049 insn = XEXP (link, 0);
2050 q_size -= 1;
2051
2052 if (sched_verbose >= 2)
2053 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2054 (*current_sched_info->print_insn) (insn, 0));
2055
2056 /* If the ready list is full, delay the insn for 1 cycle.
2057 See the comment in schedule_block for the rationale. */
2058 if (!reload_completed
2059 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2060 && !SCHED_GROUP_P (insn)
2061 && insn != skip_insn)
2062 {
2063 if (sched_verbose >= 2)
2064 fprintf (sched_dump, "requeued because ready full\n");
2065 queue_insn (insn, 1);
2066 }
2067 else
2068 {
2069 ready_add (ready, insn, false);
2070 if (sched_verbose >= 2)
2071 fprintf (sched_dump, "moving to ready without stalls\n");
2072 }
2073 }
2074 free_INSN_LIST_list (&insn_queue[q_ptr]);
2075
2076 /* If there are no ready insns, stall until one is ready and add all
2077 of the pending insns at that point to the ready list. */
2078 if (ready->n_ready == 0)
2079 {
2080 int stalls;
2081
2082 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2083 {
2084 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2085 {
2086 for (; link; link = XEXP (link, 1))
2087 {
2088 insn = XEXP (link, 0);
2089 q_size -= 1;
2090
2091 if (sched_verbose >= 2)
2092 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2093 (*current_sched_info->print_insn) (insn, 0));
2094
2095 ready_add (ready, insn, false);
2096 if (sched_verbose >= 2)
2097 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2098 }
2099 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2100
2101 advance_one_cycle ();
2102
2103 break;
2104 }
2105
2106 advance_one_cycle ();
2107 }
2108
2109 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2110 clock_var += stalls;
2111 }
2112 }
2113
2114 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2115 prematurely move INSN from the queue to the ready list. Currently,
2116 if a target defines the hook 'is_costly_dependence', this function
2117 uses the hook to check whether there exist any dependences which are
2118 considered costly by the target, between INSN and other insns that
2119 have already been scheduled. Dependences are checked up to Y cycles
2120 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2121 controlling this value.
2122 (Other considerations could be taken into account instead (or in
2123 addition) depending on user flags and target hooks. */
2124
2125 static bool
2126 ok_for_early_queue_removal (rtx insn)
2127 {
2128 int n_cycles;
2129 rtx prev_insn = last_scheduled_insn;
2130
2131 if (targetm.sched.is_costly_dependence)
2132 {
2133 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2134 {
2135 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2136 {
2137 int cost;
2138
2139 if (prev_insn == current_sched_info->prev_head)
2140 {
2141 prev_insn = NULL;
2142 break;
2143 }
2144
2145 if (!NOTE_P (prev_insn))
2146 {
2147 dep_t dep;
2148
2149 dep = sd_find_dep_between (prev_insn, insn, true);
2150
2151 if (dep != NULL)
2152 {
2153 cost = dep_cost (dep);
2154
2155 if (targetm.sched.is_costly_dependence (dep, cost,
2156 flag_sched_stalled_insns_dep - n_cycles))
2157 return false;
2158 }
2159 }
2160
2161 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2162 break;
2163 }
2164
2165 if (!prev_insn)
2166 break;
2167 prev_insn = PREV_INSN (prev_insn);
2168 }
2169 }
2170
2171 return true;
2172 }
2173
2174
2175 /* Remove insns from the queue, before they become "ready" with respect
2176 to FU latency considerations. */
2177
2178 static int
2179 early_queue_to_ready (state_t state, struct ready_list *ready)
2180 {
2181 rtx insn;
2182 rtx link;
2183 rtx next_link;
2184 rtx prev_link;
2185 bool move_to_ready;
2186 int cost;
2187 state_t temp_state = alloca (dfa_state_size);
2188 int stalls;
2189 int insns_removed = 0;
2190
2191 /*
2192 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2193 function:
2194
2195 X == 0: There is no limit on how many queued insns can be removed
2196 prematurely. (flag_sched_stalled_insns = -1).
2197
2198 X >= 1: Only X queued insns can be removed prematurely in each
2199 invocation. (flag_sched_stalled_insns = X).
2200
2201 Otherwise: Early queue removal is disabled.
2202 (flag_sched_stalled_insns = 0)
2203 */
2204
2205 if (! flag_sched_stalled_insns)
2206 return 0;
2207
2208 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2209 {
2210 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2211 {
2212 if (sched_verbose > 6)
2213 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2214
2215 prev_link = 0;
2216 while (link)
2217 {
2218 next_link = XEXP (link, 1);
2219 insn = XEXP (link, 0);
2220 if (insn && sched_verbose > 6)
2221 print_rtl_single (sched_dump, insn);
2222
2223 memcpy (temp_state, state, dfa_state_size);
2224 if (recog_memoized (insn) < 0)
2225 /* non-negative to indicate that it's not ready
2226 to avoid infinite Q->R->Q->R... */
2227 cost = 0;
2228 else
2229 cost = state_transition (temp_state, insn);
2230
2231 if (sched_verbose >= 6)
2232 fprintf (sched_dump, "transition cost = %d\n", cost);
2233
2234 move_to_ready = false;
2235 if (cost < 0)
2236 {
2237 move_to_ready = ok_for_early_queue_removal (insn);
2238 if (move_to_ready == true)
2239 {
2240 /* move from Q to R */
2241 q_size -= 1;
2242 ready_add (ready, insn, false);
2243
2244 if (prev_link)
2245 XEXP (prev_link, 1) = next_link;
2246 else
2247 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2248
2249 free_INSN_LIST_node (link);
2250
2251 if (sched_verbose >= 2)
2252 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2253 (*current_sched_info->print_insn) (insn, 0));
2254
2255 insns_removed++;
2256 if (insns_removed == flag_sched_stalled_insns)
2257 /* Remove no more than flag_sched_stalled_insns insns
2258 from Q at a time. */
2259 return insns_removed;
2260 }
2261 }
2262
2263 if (move_to_ready == false)
2264 prev_link = link;
2265
2266 link = next_link;
2267 } /* while link */
2268 } /* if link */
2269
2270 } /* for stalls.. */
2271
2272 return insns_removed;
2273 }
2274
2275
2276 /* Print the ready list for debugging purposes. Callable from debugger. */
2277
2278 static void
2279 debug_ready_list (struct ready_list *ready)
2280 {
2281 rtx *p;
2282 int i;
2283
2284 if (ready->n_ready == 0)
2285 {
2286 fprintf (sched_dump, "\n");
2287 return;
2288 }
2289
2290 p = ready_lastpos (ready);
2291 for (i = 0; i < ready->n_ready; i++)
2292 {
2293 fprintf (sched_dump, " %s:%d",
2294 (*current_sched_info->print_insn) (p[i], 0),
2295 INSN_LUID (p[i]));
2296 if (sched_pressure_p)
2297 fprintf (sched_dump, "(cost=%d",
2298 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2299 if (INSN_TICK (p[i]) > clock_var)
2300 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2301 if (sched_pressure_p)
2302 fprintf (sched_dump, ")");
2303 }
2304 fprintf (sched_dump, "\n");
2305 }
2306
2307 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2308 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2309 replaces the epilogue note in the correct basic block. */
2310 void
2311 reemit_notes (rtx insn)
2312 {
2313 rtx note, last = insn;
2314
2315 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2316 {
2317 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2318 {
2319 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2320
2321 last = emit_note_before (note_type, last);
2322 remove_note (insn, note);
2323 }
2324 }
2325 }
2326
2327 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2328 static void
2329 move_insn (rtx insn, rtx last, rtx nt)
2330 {
2331 if (PREV_INSN (insn) != last)
2332 {
2333 basic_block bb;
2334 rtx note;
2335 int jump_p = 0;
2336
2337 bb = BLOCK_FOR_INSN (insn);
2338
2339 /* BB_HEAD is either LABEL or NOTE. */
2340 gcc_assert (BB_HEAD (bb) != insn);
2341
2342 if (BB_END (bb) == insn)
2343 /* If this is last instruction in BB, move end marker one
2344 instruction up. */
2345 {
2346 /* Jumps are always placed at the end of basic block. */
2347 jump_p = control_flow_insn_p (insn);
2348
2349 gcc_assert (!jump_p
2350 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2351 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2352 || (common_sched_info->sched_pass_id
2353 == SCHED_EBB_PASS));
2354
2355 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2356
2357 BB_END (bb) = PREV_INSN (insn);
2358 }
2359
2360 gcc_assert (BB_END (bb) != last);
2361
2362 if (jump_p)
2363 /* We move the block note along with jump. */
2364 {
2365 gcc_assert (nt);
2366
2367 note = NEXT_INSN (insn);
2368 while (NOTE_NOT_BB_P (note) && note != nt)
2369 note = NEXT_INSN (note);
2370
2371 if (note != nt
2372 && (LABEL_P (note)
2373 || BARRIER_P (note)))
2374 note = NEXT_INSN (note);
2375
2376 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2377 }
2378 else
2379 note = insn;
2380
2381 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2382 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2383
2384 NEXT_INSN (note) = NEXT_INSN (last);
2385 PREV_INSN (NEXT_INSN (last)) = note;
2386
2387 NEXT_INSN (last) = insn;
2388 PREV_INSN (insn) = last;
2389
2390 bb = BLOCK_FOR_INSN (last);
2391
2392 if (jump_p)
2393 {
2394 fix_jump_move (insn);
2395
2396 if (BLOCK_FOR_INSN (insn) != bb)
2397 move_block_after_check (insn);
2398
2399 gcc_assert (BB_END (bb) == last);
2400 }
2401
2402 df_insn_change_bb (insn, bb);
2403
2404 /* Update BB_END, if needed. */
2405 if (BB_END (bb) == last)
2406 BB_END (bb) = insn;
2407 }
2408
2409 SCHED_GROUP_P (insn) = 0;
2410 }
2411
2412 /* Return true if scheduling INSN will finish current clock cycle. */
2413 static bool
2414 insn_finishes_cycle_p (rtx insn)
2415 {
2416 if (SCHED_GROUP_P (insn))
2417 /* After issuing INSN, rest of the sched_group will be forced to issue
2418 in order. Don't make any plans for the rest of cycle. */
2419 return true;
2420
2421 /* Finishing the block will, apparently, finish the cycle. */
2422 if (current_sched_info->insn_finishes_block_p
2423 && current_sched_info->insn_finishes_block_p (insn))
2424 return true;
2425
2426 return false;
2427 }
2428
2429 /* Define type for target data used in multipass scheduling. */
2430 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
2431 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
2432 #endif
2433 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
2434
2435 /* The following structure describe an entry of the stack of choices. */
2436 struct choice_entry
2437 {
2438 /* Ordinal number of the issued insn in the ready queue. */
2439 int index;
2440 /* The number of the rest insns whose issues we should try. */
2441 int rest;
2442 /* The number of issued essential insns. */
2443 int n;
2444 /* State after issuing the insn. */
2445 state_t state;
2446 /* Target-specific data. */
2447 first_cycle_multipass_data_t target_data;
2448 };
2449
2450 /* The following array is used to implement a stack of choices used in
2451 function max_issue. */
2452 static struct choice_entry *choice_stack;
2453
2454 /* The following variable value is number of essential insns issued on
2455 the current cycle. An insn is essential one if it changes the
2456 processors state. */
2457 int cycle_issued_insns;
2458
2459 /* This holds the value of the target dfa_lookahead hook. */
2460 int dfa_lookahead;
2461
2462 /* The following variable value is maximal number of tries of issuing
2463 insns for the first cycle multipass insn scheduling. We define
2464 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2465 need this constraint if all real insns (with non-negative codes)
2466 had reservations because in this case the algorithm complexity is
2467 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2468 might be incomplete and such insn might occur. For such
2469 descriptions, the complexity of algorithm (without the constraint)
2470 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2471 static int max_lookahead_tries;
2472
2473 /* The following value is value of hook
2474 `first_cycle_multipass_dfa_lookahead' at the last call of
2475 `max_issue'. */
2476 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2477
2478 /* The following value is value of `issue_rate' at the last call of
2479 `sched_init'. */
2480 static int cached_issue_rate = 0;
2481
2482 /* The following function returns maximal (or close to maximal) number
2483 of insns which can be issued on the same cycle and one of which
2484 insns is insns with the best rank (the first insn in READY). To
2485 make this function tries different samples of ready insns. READY
2486 is current queue `ready'. Global array READY_TRY reflects what
2487 insns are already issued in this try. The function stops immediately,
2488 if it reached the such a solution, that all instruction can be issued.
2489 INDEX will contain index of the best insn in READY. The following
2490 function is used only for first cycle multipass scheduling.
2491
2492 PRIVILEGED_N >= 0
2493
2494 This function expects recognized insns only. All USEs,
2495 CLOBBERs, etc must be filtered elsewhere. */
2496 int
2497 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2498 bool first_cycle_insn_p, int *index)
2499 {
2500 int n, i, all, n_ready, best, delay, tries_num;
2501 int more_issue;
2502 struct choice_entry *top;
2503 rtx insn;
2504
2505 n_ready = ready->n_ready;
2506 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2507 && privileged_n <= n_ready);
2508
2509 /* Init MAX_LOOKAHEAD_TRIES. */
2510 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2511 {
2512 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2513 max_lookahead_tries = 100;
2514 for (i = 0; i < issue_rate; i++)
2515 max_lookahead_tries *= dfa_lookahead;
2516 }
2517
2518 /* Init max_points. */
2519 more_issue = issue_rate - cycle_issued_insns;
2520 gcc_assert (more_issue >= 0);
2521
2522 /* The number of the issued insns in the best solution. */
2523 best = 0;
2524
2525 top = choice_stack;
2526
2527 /* Set initial state of the search. */
2528 memcpy (top->state, state, dfa_state_size);
2529 top->rest = dfa_lookahead;
2530 top->n = 0;
2531 if (targetm.sched.first_cycle_multipass_begin)
2532 targetm.sched.first_cycle_multipass_begin (&top->target_data,
2533 ready_try, n_ready,
2534 first_cycle_insn_p);
2535
2536 /* Count the number of the insns to search among. */
2537 for (all = i = 0; i < n_ready; i++)
2538 if (!ready_try [i])
2539 all++;
2540
2541 /* I is the index of the insn to try next. */
2542 i = 0;
2543 tries_num = 0;
2544 for (;;)
2545 {
2546 if (/* If we've reached a dead end or searched enough of what we have
2547 been asked... */
2548 top->rest == 0
2549 /* or have nothing else to try... */
2550 || i >= n_ready
2551 /* or should not issue more. */
2552 || top->n >= more_issue)
2553 {
2554 /* ??? (... || i == n_ready). */
2555 gcc_assert (i <= n_ready);
2556
2557 /* We should not issue more than issue_rate instructions. */
2558 gcc_assert (top->n <= more_issue);
2559
2560 if (top == choice_stack)
2561 break;
2562
2563 if (best < top - choice_stack)
2564 {
2565 if (privileged_n)
2566 {
2567 n = privileged_n;
2568 /* Try to find issued privileged insn. */
2569 while (n && !ready_try[--n]);
2570 }
2571
2572 if (/* If all insns are equally good... */
2573 privileged_n == 0
2574 /* Or a privileged insn will be issued. */
2575 || ready_try[n])
2576 /* Then we have a solution. */
2577 {
2578 best = top - choice_stack;
2579 /* This is the index of the insn issued first in this
2580 solution. */
2581 *index = choice_stack [1].index;
2582 if (top->n == more_issue || best == all)
2583 break;
2584 }
2585 }
2586
2587 /* Set ready-list index to point to the last insn
2588 ('i++' below will advance it to the next insn). */
2589 i = top->index;
2590
2591 /* Backtrack. */
2592 ready_try [i] = 0;
2593
2594 if (targetm.sched.first_cycle_multipass_backtrack)
2595 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
2596 ready_try, n_ready);
2597
2598 top--;
2599 memcpy (state, top->state, dfa_state_size);
2600 }
2601 else if (!ready_try [i])
2602 {
2603 tries_num++;
2604 if (tries_num > max_lookahead_tries)
2605 break;
2606 insn = ready_element (ready, i);
2607 delay = state_transition (state, insn);
2608 if (delay < 0)
2609 {
2610 if (state_dead_lock_p (state)
2611 || insn_finishes_cycle_p (insn))
2612 /* We won't issue any more instructions in the next
2613 choice_state. */
2614 top->rest = 0;
2615 else
2616 top->rest--;
2617
2618 n = top->n;
2619 if (memcmp (top->state, state, dfa_state_size) != 0)
2620 n++;
2621
2622 /* Advance to the next choice_entry. */
2623 top++;
2624 /* Initialize it. */
2625 top->rest = dfa_lookahead;
2626 top->index = i;
2627 top->n = n;
2628 memcpy (top->state, state, dfa_state_size);
2629 ready_try [i] = 1;
2630
2631 if (targetm.sched.first_cycle_multipass_issue)
2632 targetm.sched.first_cycle_multipass_issue (&top->target_data,
2633 ready_try, n_ready,
2634 insn,
2635 &((top - 1)
2636 ->target_data));
2637
2638 i = -1;
2639 }
2640 }
2641
2642 /* Increase ready-list index. */
2643 i++;
2644 }
2645
2646 if (targetm.sched.first_cycle_multipass_end)
2647 targetm.sched.first_cycle_multipass_end (best != 0
2648 ? &choice_stack[1].target_data
2649 : NULL);
2650
2651 /* Restore the original state of the DFA. */
2652 memcpy (state, choice_stack->state, dfa_state_size);
2653
2654 return best;
2655 }
2656
2657 /* The following function chooses insn from READY and modifies
2658 READY. The following function is used only for first
2659 cycle multipass scheduling.
2660 Return:
2661 -1 if cycle should be advanced,
2662 0 if INSN_PTR is set to point to the desirable insn,
2663 1 if choose_ready () should be restarted without advancing the cycle. */
2664 static int
2665 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
2666 rtx *insn_ptr)
2667 {
2668 int lookahead;
2669
2670 if (dbg_cnt (sched_insn) == false)
2671 {
2672 rtx insn;
2673
2674 insn = next_nonnote_insn (last_scheduled_insn);
2675
2676 if (QUEUE_INDEX (insn) == QUEUE_READY)
2677 /* INSN is in the ready_list. */
2678 {
2679 ready_remove_insn (insn);
2680 *insn_ptr = insn;
2681 return 0;
2682 }
2683
2684 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2685 return -1;
2686 }
2687
2688 lookahead = 0;
2689
2690 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2691 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2692 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2693 || DEBUG_INSN_P (ready_element (ready, 0)))
2694 {
2695 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2696 *insn_ptr = ready_remove_first_dispatch (ready);
2697 else
2698 *insn_ptr = ready_remove_first (ready);
2699
2700 return 0;
2701 }
2702 else
2703 {
2704 /* Try to choose the better insn. */
2705 int index = 0, i, n;
2706 rtx insn;
2707 int try_data = 1, try_control = 1;
2708 ds_t ts;
2709
2710 insn = ready_element (ready, 0);
2711 if (INSN_CODE (insn) < 0)
2712 {
2713 *insn_ptr = ready_remove_first (ready);
2714 return 0;
2715 }
2716
2717 if (spec_info
2718 && spec_info->flags & (PREFER_NON_DATA_SPEC
2719 | PREFER_NON_CONTROL_SPEC))
2720 {
2721 for (i = 0, n = ready->n_ready; i < n; i++)
2722 {
2723 rtx x;
2724 ds_t s;
2725
2726 x = ready_element (ready, i);
2727 s = TODO_SPEC (x);
2728
2729 if (spec_info->flags & PREFER_NON_DATA_SPEC
2730 && !(s & DATA_SPEC))
2731 {
2732 try_data = 0;
2733 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2734 || !try_control)
2735 break;
2736 }
2737
2738 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2739 && !(s & CONTROL_SPEC))
2740 {
2741 try_control = 0;
2742 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2743 break;
2744 }
2745 }
2746 }
2747
2748 ts = TODO_SPEC (insn);
2749 if ((ts & SPECULATIVE)
2750 && (((!try_data && (ts & DATA_SPEC))
2751 || (!try_control && (ts & CONTROL_SPEC)))
2752 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2753 && !targetm.sched
2754 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2755 /* Discard speculative instruction that stands first in the ready
2756 list. */
2757 {
2758 change_queue_index (insn, 1);
2759 return 1;
2760 }
2761
2762 ready_try[0] = 0;
2763
2764 for (i = 1; i < ready->n_ready; i++)
2765 {
2766 insn = ready_element (ready, i);
2767
2768 ready_try [i]
2769 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2770 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2771 }
2772
2773 /* Let the target filter the search space. */
2774 for (i = 1; i < ready->n_ready; i++)
2775 if (!ready_try[i])
2776 {
2777 insn = ready_element (ready, i);
2778
2779 /* If this insn is recognizable we should have already
2780 recognized it earlier.
2781 ??? Not very clear where this is supposed to be done.
2782 See dep_cost_1. */
2783 gcc_checking_assert (INSN_CODE (insn) >= 0
2784 || recog_memoized (insn) < 0);
2785
2786 ready_try [i]
2787 = (/* INSN_CODE check can be omitted here as it is also done later
2788 in max_issue (). */
2789 INSN_CODE (insn) < 0
2790 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2791 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2792 (insn)));
2793 }
2794
2795 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
2796 {
2797 *insn_ptr = ready_remove_first (ready);
2798 if (sched_verbose >= 4)
2799 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2800 (*current_sched_info->print_insn) (*insn_ptr, 0));
2801 return 0;
2802 }
2803 else
2804 {
2805 if (sched_verbose >= 4)
2806 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2807 (*current_sched_info->print_insn)
2808 (ready_element (ready, index), 0));
2809
2810 *insn_ptr = ready_remove (ready, index);
2811 return 0;
2812 }
2813 }
2814 }
2815
2816 /* Use forward list scheduling to rearrange insns of block pointed to by
2817 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2818 region. */
2819
2820 void
2821 schedule_block (basic_block *target_bb)
2822 {
2823 int i;
2824 bool first_cycle_insn_p;
2825 int can_issue_more;
2826 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2827 int sort_p, advance, start_clock_var;
2828
2829 /* Head/tail info for this block. */
2830 rtx prev_head = current_sched_info->prev_head;
2831 rtx next_tail = current_sched_info->next_tail;
2832 rtx head = NEXT_INSN (prev_head);
2833 rtx tail = PREV_INSN (next_tail);
2834
2835 /* We used to have code to avoid getting parameters moved from hard
2836 argument registers into pseudos.
2837
2838 However, it was removed when it proved to be of marginal benefit
2839 and caused problems because schedule_block and compute_forward_dependences
2840 had different notions of what the "head" insn was. */
2841
2842 gcc_assert (head != tail || INSN_P (head));
2843
2844 haifa_recovery_bb_recently_added_p = false;
2845
2846 /* Debug info. */
2847 if (sched_verbose)
2848 dump_new_block_header (0, *target_bb, head, tail);
2849
2850 state_reset (curr_state);
2851
2852 /* Clear the ready list. */
2853 ready.first = ready.veclen - 1;
2854 ready.n_ready = 0;
2855 ready.n_debug = 0;
2856
2857 /* It is used for first cycle multipass scheduling. */
2858 temp_state = alloca (dfa_state_size);
2859
2860 if (targetm.sched.init)
2861 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2862
2863 /* We start inserting insns after PREV_HEAD. */
2864 last_scheduled_insn = prev_head;
2865
2866 gcc_assert ((NOTE_P (last_scheduled_insn)
2867 || DEBUG_INSN_P (last_scheduled_insn))
2868 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2869
2870 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2871 queue. */
2872 q_ptr = 0;
2873 q_size = 0;
2874
2875 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2876 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2877
2878 /* Start just before the beginning of time. */
2879 clock_var = -1;
2880
2881 /* We need queue and ready lists and clock_var be initialized
2882 in try_ready () (which is called through init_ready_list ()). */
2883 (*current_sched_info->init_ready_list) ();
2884
2885 /* The algorithm is O(n^2) in the number of ready insns at any given
2886 time in the worst case. Before reload we are more likely to have
2887 big lists so truncate them to a reasonable size. */
2888 if (!reload_completed
2889 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2890 {
2891 ready_sort (&ready);
2892
2893 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2894 If there are debug insns, we know they're first. */
2895 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2896 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2897 break;
2898
2899 if (sched_verbose >= 2)
2900 {
2901 fprintf (sched_dump,
2902 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2903 fprintf (sched_dump,
2904 ";;\t\t before reload => truncated to %d insns\n", i);
2905 }
2906
2907 /* Delay all insns past it for 1 cycle. If debug counter is
2908 activated make an exception for the insn right after
2909 last_scheduled_insn. */
2910 {
2911 rtx skip_insn;
2912
2913 if (dbg_cnt (sched_insn) == false)
2914 skip_insn = next_nonnote_insn (last_scheduled_insn);
2915 else
2916 skip_insn = NULL_RTX;
2917
2918 while (i < ready.n_ready)
2919 {
2920 rtx insn;
2921
2922 insn = ready_remove (&ready, i);
2923
2924 if (insn != skip_insn)
2925 queue_insn (insn, 1);
2926 }
2927 }
2928 }
2929
2930 /* Now we can restore basic block notes and maintain precise cfg. */
2931 restore_bb_notes (*target_bb);
2932
2933 last_clock_var = -1;
2934
2935 advance = 0;
2936
2937 sort_p = TRUE;
2938 /* Loop until all the insns in BB are scheduled. */
2939 while ((*current_sched_info->schedule_more_p) ())
2940 {
2941 do
2942 {
2943 start_clock_var = clock_var;
2944
2945 clock_var++;
2946
2947 advance_one_cycle ();
2948
2949 /* Add to the ready list all pending insns that can be issued now.
2950 If there are no ready insns, increment clock until one
2951 is ready and add all pending insns at that point to the ready
2952 list. */
2953 queue_to_ready (&ready);
2954
2955 gcc_assert (ready.n_ready);
2956
2957 if (sched_verbose >= 2)
2958 {
2959 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2960 debug_ready_list (&ready);
2961 }
2962 advance -= clock_var - start_clock_var;
2963 }
2964 while (advance > 0);
2965
2966 if (sort_p)
2967 {
2968 /* Sort the ready list based on priority. */
2969 ready_sort (&ready);
2970
2971 if (sched_verbose >= 2)
2972 {
2973 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2974 debug_ready_list (&ready);
2975 }
2976 }
2977
2978 /* We don't want md sched reorder to even see debug isns, so put
2979 them out right away. */
2980 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2981 {
2982 if (control_flow_insn_p (last_scheduled_insn))
2983 {
2984 *target_bb = current_sched_info->advance_target_bb
2985 (*target_bb, 0);
2986
2987 if (sched_verbose)
2988 {
2989 rtx x;
2990
2991 x = next_real_insn (last_scheduled_insn);
2992 gcc_assert (x);
2993 dump_new_block_header (1, *target_bb, x, tail);
2994 }
2995
2996 last_scheduled_insn = bb_note (*target_bb);
2997 }
2998
2999 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3000 {
3001 rtx insn = ready_remove_first (&ready);
3002 gcc_assert (DEBUG_INSN_P (insn));
3003 (*current_sched_info->begin_schedule_ready) (insn,
3004 last_scheduled_insn);
3005 move_insn (insn, last_scheduled_insn,
3006 current_sched_info->next_tail);
3007 last_scheduled_insn = insn;
3008 advance = schedule_insn (insn);
3009 gcc_assert (advance == 0);
3010 if (ready.n_ready > 0)
3011 ready_sort (&ready);
3012 }
3013
3014 if (!ready.n_ready)
3015 continue;
3016 }
3017
3018 /* Allow the target to reorder the list, typically for
3019 better instruction bundling. */
3020 if (sort_p && targetm.sched.reorder
3021 && (ready.n_ready == 0
3022 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3023 can_issue_more =
3024 targetm.sched.reorder (sched_dump, sched_verbose,
3025 ready_lastpos (&ready),
3026 &ready.n_ready, clock_var);
3027 else
3028 can_issue_more = issue_rate;
3029
3030 first_cycle_insn_p = true;
3031 cycle_issued_insns = 0;
3032 for (;;)
3033 {
3034 rtx insn;
3035 int cost;
3036 bool asm_p = false;
3037
3038 if (sched_verbose >= 2)
3039 {
3040 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
3041 clock_var);
3042 debug_ready_list (&ready);
3043 if (sched_pressure_p)
3044 print_curr_reg_pressure ();
3045 }
3046
3047 if (ready.n_ready == 0
3048 && can_issue_more
3049 && reload_completed)
3050 {
3051 /* Allow scheduling insns directly from the queue in case
3052 there's nothing better to do (ready list is empty) but
3053 there are still vacant dispatch slots in the current cycle. */
3054 if (sched_verbose >= 6)
3055 fprintf (sched_dump,";;\t\tSecond chance\n");
3056 memcpy (temp_state, curr_state, dfa_state_size);
3057 if (early_queue_to_ready (temp_state, &ready))
3058 ready_sort (&ready);
3059 }
3060
3061 if (ready.n_ready == 0
3062 || !can_issue_more
3063 || state_dead_lock_p (curr_state)
3064 || !(*current_sched_info->schedule_more_p) ())
3065 break;
3066
3067 /* Select and remove the insn from the ready list. */
3068 if (sort_p)
3069 {
3070 int res;
3071
3072 insn = NULL_RTX;
3073 res = choose_ready (&ready, first_cycle_insn_p, &insn);
3074
3075 if (res < 0)
3076 /* Finish cycle. */
3077 break;
3078 if (res > 0)
3079 /* Restart choose_ready (). */
3080 continue;
3081
3082 gcc_assert (insn != NULL_RTX);
3083 }
3084 else
3085 insn = ready_remove_first (&ready);
3086
3087 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3088 {
3089 ready_add (&ready, insn, true);
3090 advance = 1;
3091 break;
3092 }
3093
3094 if (targetm.sched.dfa_new_cycle
3095 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3096 insn, last_clock_var,
3097 clock_var, &sort_p))
3098 /* SORT_P is used by the target to override sorting
3099 of the ready list. This is needed when the target
3100 has modified its internal structures expecting that
3101 the insn will be issued next. As we need the insn
3102 to have the highest priority (so it will be returned by
3103 the ready_remove_first call above), we invoke
3104 ready_add (&ready, insn, true).
3105 But, still, there is one issue: INSN can be later
3106 discarded by scheduler's front end through
3107 current_sched_info->can_schedule_ready_p, hence, won't
3108 be issued next. */
3109 {
3110 ready_add (&ready, insn, true);
3111 break;
3112 }
3113
3114 sort_p = TRUE;
3115 memcpy (temp_state, curr_state, dfa_state_size);
3116 if (recog_memoized (insn) < 0)
3117 {
3118 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3119 || asm_noperands (PATTERN (insn)) >= 0);
3120 if (!first_cycle_insn_p && asm_p)
3121 /* This is asm insn which is tried to be issued on the
3122 cycle not first. Issue it on the next cycle. */
3123 cost = 1;
3124 else
3125 /* A USE insn, or something else we don't need to
3126 understand. We can't pass these directly to
3127 state_transition because it will trigger a
3128 fatal error for unrecognizable insns. */
3129 cost = 0;
3130 }
3131 else if (sched_pressure_p)
3132 cost = 0;
3133 else
3134 {
3135 cost = state_transition (temp_state, insn);
3136 if (cost < 0)
3137 cost = 0;
3138 else if (cost == 0)
3139 cost = 1;
3140 }
3141
3142 if (cost >= 1)
3143 {
3144 queue_insn (insn, cost);
3145 if (SCHED_GROUP_P (insn))
3146 {
3147 advance = cost;
3148 break;
3149 }
3150
3151 continue;
3152 }
3153
3154 if (current_sched_info->can_schedule_ready_p
3155 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3156 /* We normally get here only if we don't want to move
3157 insn from the split block. */
3158 {
3159 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3160 continue;
3161 }
3162
3163 /* DECISION is made. */
3164
3165 if (TODO_SPEC (insn) & SPECULATIVE)
3166 generate_recovery_code (insn);
3167
3168 if (control_flow_insn_p (last_scheduled_insn)
3169 /* This is used to switch basic blocks by request
3170 from scheduler front-end (actually, sched-ebb.c only).
3171 This is used to process blocks with single fallthru
3172 edge. If succeeding block has jump, it [jump] will try
3173 move at the end of current bb, thus corrupting CFG. */
3174 || current_sched_info->advance_target_bb (*target_bb, insn))
3175 {
3176 *target_bb = current_sched_info->advance_target_bb
3177 (*target_bb, 0);
3178
3179 if (sched_verbose)
3180 {
3181 rtx x;
3182
3183 x = next_real_insn (last_scheduled_insn);
3184 gcc_assert (x);
3185 dump_new_block_header (1, *target_bb, x, tail);
3186 }
3187
3188 last_scheduled_insn = bb_note (*target_bb);
3189 }
3190
3191 /* Update counters, etc in the scheduler's front end. */
3192 (*current_sched_info->begin_schedule_ready) (insn,
3193 last_scheduled_insn);
3194
3195 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3196
3197 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3198 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3199
3200 reemit_notes (insn);
3201 last_scheduled_insn = insn;
3202
3203 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3204 {
3205 cycle_issued_insns++;
3206 memcpy (curr_state, temp_state, dfa_state_size);
3207 }
3208
3209 if (targetm.sched.variable_issue)
3210 can_issue_more =
3211 targetm.sched.variable_issue (sched_dump, sched_verbose,
3212 insn, can_issue_more);
3213 /* A naked CLOBBER or USE generates no instruction, so do
3214 not count them against the issue rate. */
3215 else if (GET_CODE (PATTERN (insn)) != USE
3216 && GET_CODE (PATTERN (insn)) != CLOBBER)
3217 can_issue_more--;
3218 advance = schedule_insn (insn);
3219
3220 /* After issuing an asm insn we should start a new cycle. */
3221 if (advance == 0 && asm_p)
3222 advance = 1;
3223 if (advance != 0)
3224 break;
3225
3226 first_cycle_insn_p = false;
3227
3228 /* Sort the ready list based on priority. This must be
3229 redone here, as schedule_insn may have readied additional
3230 insns that will not be sorted correctly. */
3231 if (ready.n_ready > 0)
3232 ready_sort (&ready);
3233
3234 /* Quickly go through debug insns such that md sched
3235 reorder2 doesn't have to deal with debug insns. */
3236 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3237 && (*current_sched_info->schedule_more_p) ())
3238 {
3239 if (control_flow_insn_p (last_scheduled_insn))
3240 {
3241 *target_bb = current_sched_info->advance_target_bb
3242 (*target_bb, 0);
3243
3244 if (sched_verbose)
3245 {
3246 rtx x;
3247
3248 x = next_real_insn (last_scheduled_insn);
3249 gcc_assert (x);
3250 dump_new_block_header (1, *target_bb, x, tail);
3251 }
3252
3253 last_scheduled_insn = bb_note (*target_bb);
3254 }
3255
3256 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3257 {
3258 insn = ready_remove_first (&ready);
3259 gcc_assert (DEBUG_INSN_P (insn));
3260 (*current_sched_info->begin_schedule_ready)
3261 (insn, last_scheduled_insn);
3262 move_insn (insn, last_scheduled_insn,
3263 current_sched_info->next_tail);
3264 advance = schedule_insn (insn);
3265 last_scheduled_insn = insn;
3266 gcc_assert (advance == 0);
3267 if (ready.n_ready > 0)
3268 ready_sort (&ready);
3269 }
3270 }
3271
3272 if (targetm.sched.reorder2
3273 && (ready.n_ready == 0
3274 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3275 {
3276 can_issue_more =
3277 targetm.sched.reorder2 (sched_dump, sched_verbose,
3278 ready.n_ready
3279 ? ready_lastpos (&ready) : NULL,
3280 &ready.n_ready, clock_var);
3281 }
3282 }
3283 }
3284
3285 /* Debug info. */
3286 if (sched_verbose)
3287 {
3288 fprintf (sched_dump, ";;\tReady list (final): ");
3289 debug_ready_list (&ready);
3290 }
3291
3292 if (current_sched_info->queue_must_finish_empty)
3293 /* Sanity check -- queue must be empty now. Meaningless if region has
3294 multiple bbs. */
3295 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3296 else
3297 {
3298 /* We must maintain QUEUE_INDEX between blocks in region. */
3299 for (i = ready.n_ready - 1; i >= 0; i--)
3300 {
3301 rtx x;
3302
3303 x = ready_element (&ready, i);
3304 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3305 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3306 }
3307
3308 if (q_size)
3309 for (i = 0; i <= max_insn_queue_index; i++)
3310 {
3311 rtx link;
3312 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3313 {
3314 rtx x;
3315
3316 x = XEXP (link, 0);
3317 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3318 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3319 }
3320 free_INSN_LIST_list (&insn_queue[i]);
3321 }
3322 }
3323
3324 if (sched_verbose)
3325 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3326
3327 if (!current_sched_info->queue_must_finish_empty
3328 || haifa_recovery_bb_recently_added_p)
3329 {
3330 /* INSN_TICK (minimum clock tick at which the insn becomes
3331 ready) may be not correct for the insn in the subsequent
3332 blocks of the region. We should use a correct value of
3333 `clock_var' or modify INSN_TICK. It is better to keep
3334 clock_var value equal to 0 at the start of a basic block.
3335 Therefore we modify INSN_TICK here. */
3336 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3337 }
3338
3339 if (targetm.sched.finish)
3340 {
3341 targetm.sched.finish (sched_dump, sched_verbose);
3342 /* Target might have added some instructions to the scheduled block
3343 in its md_finish () hook. These new insns don't have any data
3344 initialized and to identify them we extend h_i_d so that they'll
3345 get zero luids. */
3346 sched_init_luids (NULL, NULL, NULL, NULL);
3347 }
3348
3349 if (sched_verbose)
3350 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3351 INSN_UID (head), INSN_UID (tail));
3352
3353 /* Update head/tail boundaries. */
3354 head = NEXT_INSN (prev_head);
3355 tail = last_scheduled_insn;
3356
3357 head = restore_other_notes (head, NULL);
3358
3359 current_sched_info->head = head;
3360 current_sched_info->tail = tail;
3361 }
3362 \f
3363 /* Set_priorities: compute priority of each insn in the block. */
3364
3365 int
3366 set_priorities (rtx head, rtx tail)
3367 {
3368 rtx insn;
3369 int n_insn;
3370 int sched_max_insns_priority =
3371 current_sched_info->sched_max_insns_priority;
3372 rtx prev_head;
3373
3374 if (head == tail && ! INSN_P (head))
3375 gcc_unreachable ();
3376
3377 n_insn = 0;
3378
3379 prev_head = PREV_INSN (head);
3380 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3381 {
3382 if (!INSN_P (insn))
3383 continue;
3384
3385 n_insn++;
3386 (void) priority (insn);
3387
3388 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3389
3390 sched_max_insns_priority = MAX (sched_max_insns_priority,
3391 INSN_PRIORITY (insn));
3392 }
3393
3394 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3395
3396 return n_insn;
3397 }
3398
3399 /* Set dump and sched_verbose for the desired debugging output. If no
3400 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3401 For -fsched-verbose=N, N>=10, print everything to stderr. */
3402 void
3403 setup_sched_dump (void)
3404 {
3405 sched_verbose = sched_verbose_param;
3406 if (sched_verbose_param == 0 && dump_file)
3407 sched_verbose = 1;
3408 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3409 ? stderr : dump_file);
3410 }
3411
3412 /* Initialize some global state for the scheduler. This function works
3413 with the common data shared between all the schedulers. It is called
3414 from the scheduler specific initialization routine. */
3415
3416 void
3417 sched_init (void)
3418 {
3419 /* Disable speculative loads in their presence if cc0 defined. */
3420 #ifdef HAVE_cc0
3421 flag_schedule_speculative_load = 0;
3422 #endif
3423
3424 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3425 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3426
3427 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3428 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3429
3430 if (sched_pressure_p)
3431 ira_setup_eliminable_regset ();
3432
3433 /* Initialize SPEC_INFO. */
3434 if (targetm.sched.set_sched_flags)
3435 {
3436 spec_info = &spec_info_var;
3437 targetm.sched.set_sched_flags (spec_info);
3438
3439 if (spec_info->mask != 0)
3440 {
3441 spec_info->data_weakness_cutoff =
3442 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3443 spec_info->control_weakness_cutoff =
3444 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3445 * REG_BR_PROB_BASE) / 100;
3446 }
3447 else
3448 /* So we won't read anything accidentally. */
3449 spec_info = NULL;
3450
3451 }
3452 else
3453 /* So we won't read anything accidentally. */
3454 spec_info = 0;
3455
3456 /* Initialize issue_rate. */
3457 if (targetm.sched.issue_rate)
3458 issue_rate = targetm.sched.issue_rate ();
3459 else
3460 issue_rate = 1;
3461
3462 if (cached_issue_rate != issue_rate)
3463 {
3464 cached_issue_rate = issue_rate;
3465 /* To invalidate max_lookahead_tries: */
3466 cached_first_cycle_multipass_dfa_lookahead = 0;
3467 }
3468
3469 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3470 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3471 else
3472 dfa_lookahead = 0;
3473
3474 if (targetm.sched.init_dfa_pre_cycle_insn)
3475 targetm.sched.init_dfa_pre_cycle_insn ();
3476
3477 if (targetm.sched.init_dfa_post_cycle_insn)
3478 targetm.sched.init_dfa_post_cycle_insn ();
3479
3480 dfa_start ();
3481 dfa_state_size = state_size ();
3482
3483 init_alias_analysis ();
3484
3485 df_set_flags (DF_LR_RUN_DCE);
3486 df_note_add_problem ();
3487
3488 /* More problems needed for interloop dep calculation in SMS. */
3489 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3490 {
3491 df_rd_add_problem ();
3492 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3493 }
3494
3495 df_analyze ();
3496
3497 /* Do not run DCE after reload, as this can kill nops inserted
3498 by bundling. */
3499 if (reload_completed)
3500 df_clear_flags (DF_LR_RUN_DCE);
3501
3502 regstat_compute_calls_crossed ();
3503
3504 if (targetm.sched.init_global)
3505 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3506
3507 if (sched_pressure_p)
3508 {
3509 int i, max_regno = max_reg_num ();
3510
3511 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3512 sched_regno_cover_class
3513 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3514 for (i = 0; i < max_regno; i++)
3515 sched_regno_cover_class[i]
3516 = (i < FIRST_PSEUDO_REGISTER
3517 ? ira_class_translate[REGNO_REG_CLASS (i)]
3518 : reg_cover_class (i));
3519 curr_reg_live = BITMAP_ALLOC (NULL);
3520 saved_reg_live = BITMAP_ALLOC (NULL);
3521 region_ref_regs = BITMAP_ALLOC (NULL);
3522 }
3523
3524 curr_state = xmalloc (dfa_state_size);
3525 }
3526
3527 static void haifa_init_only_bb (basic_block, basic_block);
3528
3529 /* Initialize data structures specific to the Haifa scheduler. */
3530 void
3531 haifa_sched_init (void)
3532 {
3533 setup_sched_dump ();
3534 sched_init ();
3535
3536 if (spec_info != NULL)
3537 {
3538 sched_deps_info->use_deps_list = 1;
3539 sched_deps_info->generate_spec_deps = 1;
3540 }
3541
3542 /* Initialize luids, dependency caches, target and h_i_d for the
3543 whole function. */
3544 {
3545 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3546 basic_block bb;
3547
3548 sched_init_bbs ();
3549
3550 FOR_EACH_BB (bb)
3551 VEC_quick_push (basic_block, bbs, bb);
3552 sched_init_luids (bbs, NULL, NULL, NULL);
3553 sched_deps_init (true);
3554 sched_extend_target ();
3555 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3556
3557 VEC_free (basic_block, heap, bbs);
3558 }
3559
3560 sched_init_only_bb = haifa_init_only_bb;
3561 sched_split_block = sched_split_block_1;
3562 sched_create_empty_bb = sched_create_empty_bb_1;
3563 haifa_recovery_bb_ever_added_p = false;
3564
3565 #ifdef ENABLE_CHECKING
3566 /* This is used preferably for finding bugs in check_cfg () itself.
3567 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3568 assumes that the last insn in the last bb has a non-null successor. */
3569 check_cfg (0, 0);
3570 #endif
3571
3572 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3573 before_recovery = 0;
3574 after_recovery = 0;
3575 }
3576
3577 /* Finish work with the data specific to the Haifa scheduler. */
3578 void
3579 haifa_sched_finish (void)
3580 {
3581 sched_create_empty_bb = NULL;
3582 sched_split_block = NULL;
3583 sched_init_only_bb = NULL;
3584
3585 if (spec_info && spec_info->dump)
3586 {
3587 char c = reload_completed ? 'a' : 'b';
3588
3589 fprintf (spec_info->dump,
3590 ";; %s:\n", current_function_name ());
3591
3592 fprintf (spec_info->dump,
3593 ";; Procedure %cr-begin-data-spec motions == %d\n",
3594 c, nr_begin_data);
3595 fprintf (spec_info->dump,
3596 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3597 c, nr_be_in_data);
3598 fprintf (spec_info->dump,
3599 ";; Procedure %cr-begin-control-spec motions == %d\n",
3600 c, nr_begin_control);
3601 fprintf (spec_info->dump,
3602 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3603 c, nr_be_in_control);
3604 }
3605
3606 /* Finalize h_i_d, dependency caches, and luids for the whole
3607 function. Target will be finalized in md_global_finish (). */
3608 sched_deps_finish ();
3609 sched_finish_luids ();
3610 current_sched_info = NULL;
3611 sched_finish ();
3612 }
3613
3614 /* Free global data used during insn scheduling. This function works with
3615 the common data shared between the schedulers. */
3616
3617 void
3618 sched_finish (void)
3619 {
3620 haifa_finish_h_i_d ();
3621 if (sched_pressure_p)
3622 {
3623 free (sched_regno_cover_class);
3624 BITMAP_FREE (region_ref_regs);
3625 BITMAP_FREE (saved_reg_live);
3626 BITMAP_FREE (curr_reg_live);
3627 }
3628 free (curr_state);
3629
3630 if (targetm.sched.finish_global)
3631 targetm.sched.finish_global (sched_dump, sched_verbose);
3632
3633 end_alias_analysis ();
3634
3635 regstat_free_calls_crossed ();
3636
3637 dfa_finish ();
3638
3639 #ifdef ENABLE_CHECKING
3640 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3641 if (!reload_completed)
3642 check_cfg (0, 0);
3643 #endif
3644 }
3645
3646 /* Fix INSN_TICKs of the instructions in the current block as well as
3647 INSN_TICKs of their dependents.
3648 HEAD and TAIL are the begin and the end of the current scheduled block. */
3649 static void
3650 fix_inter_tick (rtx head, rtx tail)
3651 {
3652 /* Set of instructions with corrected INSN_TICK. */
3653 bitmap_head processed;
3654 /* ??? It is doubtful if we should assume that cycle advance happens on
3655 basic block boundaries. Basically insns that are unconditionally ready
3656 on the start of the block are more preferable then those which have
3657 a one cycle dependency over insn from the previous block. */
3658 int next_clock = clock_var + 1;
3659
3660 bitmap_initialize (&processed, 0);
3661
3662 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3663 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3664 across different blocks. */
3665 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3666 {
3667 if (INSN_P (head))
3668 {
3669 int tick;
3670 sd_iterator_def sd_it;
3671 dep_t dep;
3672
3673 tick = INSN_TICK (head);
3674 gcc_assert (tick >= MIN_TICK);
3675
3676 /* Fix INSN_TICK of instruction from just scheduled block. */
3677 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3678 {
3679 tick -= next_clock;
3680
3681 if (tick < MIN_TICK)
3682 tick = MIN_TICK;
3683
3684 INSN_TICK (head) = tick;
3685 }
3686
3687 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3688 {
3689 rtx next;
3690
3691 next = DEP_CON (dep);
3692 tick = INSN_TICK (next);
3693
3694 if (tick != INVALID_TICK
3695 /* If NEXT has its INSN_TICK calculated, fix it.
3696 If not - it will be properly calculated from
3697 scratch later in fix_tick_ready. */
3698 && bitmap_set_bit (&processed, INSN_LUID (next)))
3699 {
3700 tick -= next_clock;
3701
3702 if (tick < MIN_TICK)
3703 tick = MIN_TICK;
3704
3705 if (tick > INTER_TICK (next))
3706 INTER_TICK (next) = tick;
3707 else
3708 tick = INTER_TICK (next);
3709
3710 INSN_TICK (next) = tick;
3711 }
3712 }
3713 }
3714 }
3715 bitmap_clear (&processed);
3716 }
3717
3718 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3719
3720 /* Check if NEXT is ready to be added to the ready or queue list.
3721 If "yes", add it to the proper list.
3722 Returns:
3723 -1 - is not ready yet,
3724 0 - added to the ready list,
3725 0 < N - queued for N cycles. */
3726 int
3727 try_ready (rtx next)
3728 {
3729 ds_t old_ts, *ts;
3730
3731 ts = &TODO_SPEC (next);
3732 old_ts = *ts;
3733
3734 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3735 && ((old_ts & HARD_DEP)
3736 || (old_ts & SPECULATIVE)));
3737
3738 if (sd_lists_empty_p (next, SD_LIST_BACK))
3739 /* NEXT has all its dependencies resolved. */
3740 {
3741 /* Remove HARD_DEP bit from NEXT's status. */
3742 *ts &= ~HARD_DEP;
3743
3744 if (current_sched_info->flags & DO_SPECULATION)
3745 /* Remove all speculative bits from NEXT's status. */
3746 *ts &= ~SPECULATIVE;
3747 }
3748 else
3749 {
3750 /* One of the NEXT's dependencies has been resolved.
3751 Recalculate NEXT's status. */
3752
3753 *ts &= ~SPECULATIVE & ~HARD_DEP;
3754
3755 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3756 /* Now we've got NEXT with speculative deps only.
3757 1. Look at the deps to see what we have to do.
3758 2. Check if we can do 'todo'. */
3759 {
3760 sd_iterator_def sd_it;
3761 dep_t dep;
3762 bool first_p = true;
3763
3764 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3765 {
3766 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3767
3768 if (DEBUG_INSN_P (DEP_PRO (dep))
3769 && !DEBUG_INSN_P (next))
3770 continue;
3771
3772 if (first_p)
3773 {
3774 first_p = false;
3775
3776 *ts = ds;
3777 }
3778 else
3779 *ts = ds_merge (*ts, ds);
3780 }
3781
3782 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3783 /* Too few points. */
3784 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3785 }
3786 else
3787 *ts |= HARD_DEP;
3788 }
3789
3790 if (*ts & HARD_DEP)
3791 gcc_assert (*ts == old_ts
3792 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3793 else if (current_sched_info->new_ready)
3794 *ts = current_sched_info->new_ready (next, *ts);
3795
3796 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3797 have its original pattern or changed (speculative) one. This is due
3798 to changing ebb in region scheduling.
3799 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3800 has speculative pattern.
3801
3802 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3803 control-speculative NEXT could have been discarded by sched-rgn.c
3804 (the same case as when discarded by can_schedule_ready_p ()). */
3805
3806 if ((*ts & SPECULATIVE)
3807 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3808 need to change anything. */
3809 && *ts != old_ts)
3810 {
3811 int res;
3812 rtx new_pat;
3813
3814 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3815
3816 res = haifa_speculate_insn (next, *ts, &new_pat);
3817
3818 switch (res)
3819 {
3820 case -1:
3821 /* It would be nice to change DEP_STATUS of all dependences,
3822 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3823 so we won't reanalyze anything. */
3824 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3825 break;
3826
3827 case 0:
3828 /* We follow the rule, that every speculative insn
3829 has non-null ORIG_PAT. */
3830 if (!ORIG_PAT (next))
3831 ORIG_PAT (next) = PATTERN (next);
3832 break;
3833
3834 case 1:
3835 if (!ORIG_PAT (next))
3836 /* If we gonna to overwrite the original pattern of insn,
3837 save it. */
3838 ORIG_PAT (next) = PATTERN (next);
3839
3840 haifa_change_pattern (next, new_pat);
3841 break;
3842
3843 default:
3844 gcc_unreachable ();
3845 }
3846 }
3847
3848 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3849 either correct (*ts & SPECULATIVE),
3850 or we simply don't care (*ts & HARD_DEP). */
3851
3852 gcc_assert (!ORIG_PAT (next)
3853 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3854
3855 if (*ts & HARD_DEP)
3856 {
3857 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3858 control-speculative NEXT could have been discarded by sched-rgn.c
3859 (the same case as when discarded by can_schedule_ready_p ()). */
3860 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3861
3862 change_queue_index (next, QUEUE_NOWHERE);
3863 return -1;
3864 }
3865 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3866 /* We should change pattern of every previously speculative
3867 instruction - and we determine if NEXT was speculative by using
3868 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3869 pat too, so skip them. */
3870 {
3871 haifa_change_pattern (next, ORIG_PAT (next));
3872 ORIG_PAT (next) = 0;
3873 }
3874
3875 if (sched_verbose >= 2)
3876 {
3877 int s = TODO_SPEC (next);
3878
3879 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3880 (*current_sched_info->print_insn) (next, 0));
3881
3882 if (spec_info && spec_info->dump)
3883 {
3884 if (s & BEGIN_DATA)
3885 fprintf (spec_info->dump, "; data-spec;");
3886 if (s & BEGIN_CONTROL)
3887 fprintf (spec_info->dump, "; control-spec;");
3888 if (s & BE_IN_CONTROL)
3889 fprintf (spec_info->dump, "; in-control-spec;");
3890 }
3891
3892 fprintf (sched_dump, "\n");
3893 }
3894
3895 adjust_priority (next);
3896
3897 return fix_tick_ready (next);
3898 }
3899
3900 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3901 static int
3902 fix_tick_ready (rtx next)
3903 {
3904 int tick, delay;
3905
3906 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3907 {
3908 int full_p;
3909 sd_iterator_def sd_it;
3910 dep_t dep;
3911
3912 tick = INSN_TICK (next);
3913 /* if tick is not equal to INVALID_TICK, then update
3914 INSN_TICK of NEXT with the most recent resolved dependence
3915 cost. Otherwise, recalculate from scratch. */
3916 full_p = (tick == INVALID_TICK);
3917
3918 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3919 {
3920 rtx pro = DEP_PRO (dep);
3921 int tick1;
3922
3923 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3924
3925 tick1 = INSN_TICK (pro) + dep_cost (dep);
3926 if (tick1 > tick)
3927 tick = tick1;
3928
3929 if (!full_p)
3930 break;
3931 }
3932 }
3933 else
3934 tick = -1;
3935
3936 INSN_TICK (next) = tick;
3937
3938 delay = tick - clock_var;
3939 if (delay <= 0 || sched_pressure_p)
3940 delay = QUEUE_READY;
3941
3942 change_queue_index (next, delay);
3943
3944 return delay;
3945 }
3946
3947 /* Move NEXT to the proper queue list with (DELAY >= 1),
3948 or add it to the ready list (DELAY == QUEUE_READY),
3949 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3950 static void
3951 change_queue_index (rtx next, int delay)
3952 {
3953 int i = QUEUE_INDEX (next);
3954
3955 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3956 && delay != 0);
3957 gcc_assert (i != QUEUE_SCHEDULED);
3958
3959 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3960 || (delay < 0 && delay == i))
3961 /* We have nothing to do. */
3962 return;
3963
3964 /* Remove NEXT from wherever it is now. */
3965 if (i == QUEUE_READY)
3966 ready_remove_insn (next);
3967 else if (i >= 0)
3968 queue_remove (next);
3969
3970 /* Add it to the proper place. */
3971 if (delay == QUEUE_READY)
3972 ready_add (readyp, next, false);
3973 else if (delay >= 1)
3974 queue_insn (next, delay);
3975
3976 if (sched_verbose >= 2)
3977 {
3978 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3979 (*current_sched_info->print_insn) (next, 0));
3980
3981 if (delay == QUEUE_READY)
3982 fprintf (sched_dump, " into ready\n");
3983 else if (delay >= 1)
3984 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3985 else
3986 fprintf (sched_dump, " removed from ready or queue lists\n");
3987 }
3988 }
3989
3990 static int sched_ready_n_insns = -1;
3991
3992 /* Initialize per region data structures. */
3993 void
3994 sched_extend_ready_list (int new_sched_ready_n_insns)
3995 {
3996 int i;
3997
3998 if (sched_ready_n_insns == -1)
3999 /* At the first call we need to initialize one more choice_stack
4000 entry. */
4001 {
4002 i = 0;
4003 sched_ready_n_insns = 0;
4004 }
4005 else
4006 i = sched_ready_n_insns + 1;
4007
4008 ready.veclen = new_sched_ready_n_insns + issue_rate;
4009 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
4010
4011 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
4012
4013 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
4014 sched_ready_n_insns, sizeof (*ready_try));
4015
4016 /* We allocate +1 element to save initial state in the choice_stack[0]
4017 entry. */
4018 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
4019 new_sched_ready_n_insns + 1);
4020
4021 for (; i <= new_sched_ready_n_insns; i++)
4022 {
4023 choice_stack[i].state = xmalloc (dfa_state_size);
4024
4025 if (targetm.sched.first_cycle_multipass_init)
4026 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
4027 .target_data));
4028 }
4029
4030 sched_ready_n_insns = new_sched_ready_n_insns;
4031 }
4032
4033 /* Free per region data structures. */
4034 void
4035 sched_finish_ready_list (void)
4036 {
4037 int i;
4038
4039 free (ready.vec);
4040 ready.vec = NULL;
4041 ready.veclen = 0;
4042
4043 free (ready_try);
4044 ready_try = NULL;
4045
4046 for (i = 0; i <= sched_ready_n_insns; i++)
4047 {
4048 if (targetm.sched.first_cycle_multipass_fini)
4049 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
4050 .target_data));
4051
4052 free (choice_stack [i].state);
4053 }
4054 free (choice_stack);
4055 choice_stack = NULL;
4056
4057 sched_ready_n_insns = -1;
4058 }
4059
4060 static int
4061 haifa_luid_for_non_insn (rtx x)
4062 {
4063 gcc_assert (NOTE_P (x) || LABEL_P (x));
4064
4065 return 0;
4066 }
4067
4068 /* Generates recovery code for INSN. */
4069 static void
4070 generate_recovery_code (rtx insn)
4071 {
4072 if (TODO_SPEC (insn) & BEGIN_SPEC)
4073 begin_speculative_block (insn);
4074
4075 /* Here we have insn with no dependencies to
4076 instructions other then CHECK_SPEC ones. */
4077
4078 if (TODO_SPEC (insn) & BE_IN_SPEC)
4079 add_to_speculative_block (insn);
4080 }
4081
4082 /* Helper function.
4083 Tries to add speculative dependencies of type FS between instructions
4084 in deps_list L and TWIN. */
4085 static void
4086 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4087 {
4088 sd_iterator_def sd_it;
4089 dep_t dep;
4090
4091 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4092 {
4093 ds_t ds;
4094 rtx consumer;
4095
4096 consumer = DEP_CON (dep);
4097
4098 ds = DEP_STATUS (dep);
4099
4100 if (/* If we want to create speculative dep. */
4101 fs
4102 /* And we can do that because this is a true dep. */
4103 && (ds & DEP_TYPES) == DEP_TRUE)
4104 {
4105 gcc_assert (!(ds & BE_IN_SPEC));
4106
4107 if (/* If this dep can be overcome with 'begin speculation'. */
4108 ds & BEGIN_SPEC)
4109 /* Then we have a choice: keep the dep 'begin speculative'
4110 or transform it into 'be in speculative'. */
4111 {
4112 if (/* In try_ready we assert that if insn once became ready
4113 it can be removed from the ready (or queue) list only
4114 due to backend decision. Hence we can't let the
4115 probability of the speculative dep to decrease. */
4116 ds_weak (ds) <= ds_weak (fs))
4117 {
4118 ds_t new_ds;
4119
4120 new_ds = (ds & ~BEGIN_SPEC) | fs;
4121
4122 if (/* consumer can 'be in speculative'. */
4123 sched_insn_is_legitimate_for_speculation_p (consumer,
4124 new_ds))
4125 /* Transform it to be in speculative. */
4126 ds = new_ds;
4127 }
4128 }
4129 else
4130 /* Mark the dep as 'be in speculative'. */
4131 ds |= fs;
4132 }
4133
4134 {
4135 dep_def _new_dep, *new_dep = &_new_dep;
4136
4137 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4138 sd_add_dep (new_dep, false);
4139 }
4140 }
4141 }
4142
4143 /* Generates recovery code for BEGIN speculative INSN. */
4144 static void
4145 begin_speculative_block (rtx insn)
4146 {
4147 if (TODO_SPEC (insn) & BEGIN_DATA)
4148 nr_begin_data++;
4149 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4150 nr_begin_control++;
4151
4152 create_check_block_twin (insn, false);
4153
4154 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4155 }
4156
4157 static void haifa_init_insn (rtx);
4158
4159 /* Generates recovery code for BE_IN speculative INSN. */
4160 static void
4161 add_to_speculative_block (rtx insn)
4162 {
4163 ds_t ts;
4164 sd_iterator_def sd_it;
4165 dep_t dep;
4166 rtx twins = NULL;
4167 rtx_vec_t priorities_roots;
4168
4169 ts = TODO_SPEC (insn);
4170 gcc_assert (!(ts & ~BE_IN_SPEC));
4171
4172 if (ts & BE_IN_DATA)
4173 nr_be_in_data++;
4174 if (ts & BE_IN_CONTROL)
4175 nr_be_in_control++;
4176
4177 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4178 gcc_assert (!TODO_SPEC (insn));
4179
4180 DONE_SPEC (insn) |= ts;
4181
4182 /* First we convert all simple checks to branchy. */
4183 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4184 sd_iterator_cond (&sd_it, &dep);)
4185 {
4186 rtx check = DEP_PRO (dep);
4187
4188 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4189 {
4190 create_check_block_twin (check, true);
4191
4192 /* Restart search. */
4193 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4194 }
4195 else
4196 /* Continue search. */
4197 sd_iterator_next (&sd_it);
4198 }
4199
4200 priorities_roots = NULL;
4201 clear_priorities (insn, &priorities_roots);
4202
4203 while (1)
4204 {
4205 rtx check, twin;
4206 basic_block rec;
4207
4208 /* Get the first backward dependency of INSN. */
4209 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4210 if (!sd_iterator_cond (&sd_it, &dep))
4211 /* INSN has no backward dependencies left. */
4212 break;
4213
4214 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4215 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4216 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4217
4218 check = DEP_PRO (dep);
4219
4220 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4221 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4222
4223 rec = BLOCK_FOR_INSN (check);
4224
4225 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4226 haifa_init_insn (twin);
4227
4228 sd_copy_back_deps (twin, insn, true);
4229
4230 if (sched_verbose && spec_info->dump)
4231 /* INSN_BB (insn) isn't determined for twin insns yet.
4232 So we can't use current_sched_info->print_insn. */
4233 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4234 INSN_UID (twin), rec->index);
4235
4236 twins = alloc_INSN_LIST (twin, twins);
4237
4238 /* Add dependences between TWIN and all appropriate
4239 instructions from REC. */
4240 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4241 {
4242 rtx pro = DEP_PRO (dep);
4243
4244 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4245
4246 /* INSN might have dependencies from the instructions from
4247 several recovery blocks. At this iteration we process those
4248 producers that reside in REC. */
4249 if (BLOCK_FOR_INSN (pro) == rec)
4250 {
4251 dep_def _new_dep, *new_dep = &_new_dep;
4252
4253 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4254 sd_add_dep (new_dep, false);
4255 }
4256 }
4257
4258 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4259
4260 /* Remove all dependencies between INSN and insns in REC. */
4261 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4262 sd_iterator_cond (&sd_it, &dep);)
4263 {
4264 rtx pro = DEP_PRO (dep);
4265
4266 if (BLOCK_FOR_INSN (pro) == rec)
4267 sd_delete_dep (sd_it);
4268 else
4269 sd_iterator_next (&sd_it);
4270 }
4271 }
4272
4273 /* We couldn't have added the dependencies between INSN and TWINS earlier
4274 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4275 while (twins)
4276 {
4277 rtx twin;
4278
4279 twin = XEXP (twins, 0);
4280
4281 {
4282 dep_def _new_dep, *new_dep = &_new_dep;
4283
4284 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4285 sd_add_dep (new_dep, false);
4286 }
4287
4288 twin = XEXP (twins, 1);
4289 free_INSN_LIST_node (twins);
4290 twins = twin;
4291 }
4292
4293 calc_priorities (priorities_roots);
4294 VEC_free (rtx, heap, priorities_roots);
4295 }
4296
4297 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4298 void *
4299 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4300 {
4301 gcc_assert (new_nmemb >= old_nmemb);
4302 p = XRESIZEVAR (void, p, new_nmemb * size);
4303 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4304 return p;
4305 }
4306
4307 /* Helper function.
4308 Find fallthru edge from PRED. */
4309 edge
4310 find_fallthru_edge_from (basic_block pred)
4311 {
4312 edge e;
4313 basic_block succ;
4314
4315 succ = pred->next_bb;
4316 gcc_assert (succ->prev_bb == pred);
4317
4318 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4319 {
4320 e = find_fallthru_edge (pred->succs);
4321
4322 if (e)
4323 {
4324 gcc_assert (e->dest == succ);
4325 return e;
4326 }
4327 }
4328 else
4329 {
4330 e = find_fallthru_edge (succ->preds);
4331
4332 if (e)
4333 {
4334 gcc_assert (e->src == pred);
4335 return e;
4336 }
4337 }
4338
4339 return NULL;
4340 }
4341
4342 /* Extend per basic block data structures. */
4343 static void
4344 sched_extend_bb (void)
4345 {
4346 rtx insn;
4347
4348 /* The following is done to keep current_sched_info->next_tail non null. */
4349 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4350 if (NEXT_INSN (insn) == 0
4351 || (!NOTE_P (insn)
4352 && !LABEL_P (insn)
4353 /* Don't emit a NOTE if it would end up before a BARRIER. */
4354 && !BARRIER_P (NEXT_INSN (insn))))
4355 {
4356 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4357 /* Make insn appear outside BB. */
4358 set_block_for_insn (note, NULL);
4359 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4360 }
4361 }
4362
4363 /* Init per basic block data structures. */
4364 void
4365 sched_init_bbs (void)
4366 {
4367 sched_extend_bb ();
4368 }
4369
4370 /* Initialize BEFORE_RECOVERY variable. */
4371 static void
4372 init_before_recovery (basic_block *before_recovery_ptr)
4373 {
4374 basic_block last;
4375 edge e;
4376
4377 last = EXIT_BLOCK_PTR->prev_bb;
4378 e = find_fallthru_edge_from (last);
4379
4380 if (e)
4381 {
4382 /* We create two basic blocks:
4383 1. Single instruction block is inserted right after E->SRC
4384 and has jump to
4385 2. Empty block right before EXIT_BLOCK.
4386 Between these two blocks recovery blocks will be emitted. */
4387
4388 basic_block single, empty;
4389 rtx x, label;
4390
4391 /* If the fallthrough edge to exit we've found is from the block we've
4392 created before, don't do anything more. */
4393 if (last == after_recovery)
4394 return;
4395
4396 adding_bb_to_current_region_p = false;
4397
4398 single = sched_create_empty_bb (last);
4399 empty = sched_create_empty_bb (single);
4400
4401 /* Add new blocks to the root loop. */
4402 if (current_loops != NULL)
4403 {
4404 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4405 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4406 }
4407
4408 single->count = last->count;
4409 empty->count = last->count;
4410 single->frequency = last->frequency;
4411 empty->frequency = last->frequency;
4412 BB_COPY_PARTITION (single, last);
4413 BB_COPY_PARTITION (empty, last);
4414
4415 redirect_edge_succ (e, single);
4416 make_single_succ_edge (single, empty, 0);
4417 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4418 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4419
4420 label = block_label (empty);
4421 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4422 JUMP_LABEL (x) = label;
4423 LABEL_NUSES (label)++;
4424 haifa_init_insn (x);
4425
4426 emit_barrier_after (x);
4427
4428 sched_init_only_bb (empty, NULL);
4429 sched_init_only_bb (single, NULL);
4430 sched_extend_bb ();
4431
4432 adding_bb_to_current_region_p = true;
4433 before_recovery = single;
4434 after_recovery = empty;
4435
4436 if (before_recovery_ptr)
4437 *before_recovery_ptr = before_recovery;
4438
4439 if (sched_verbose >= 2 && spec_info->dump)
4440 fprintf (spec_info->dump,
4441 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4442 last->index, single->index, empty->index);
4443 }
4444 else
4445 before_recovery = last;
4446 }
4447
4448 /* Returns new recovery block. */
4449 basic_block
4450 sched_create_recovery_block (basic_block *before_recovery_ptr)
4451 {
4452 rtx label;
4453 rtx barrier;
4454 basic_block rec;
4455
4456 haifa_recovery_bb_recently_added_p = true;
4457 haifa_recovery_bb_ever_added_p = true;
4458
4459 init_before_recovery (before_recovery_ptr);
4460
4461 barrier = get_last_bb_insn (before_recovery);
4462 gcc_assert (BARRIER_P (barrier));
4463
4464 label = emit_label_after (gen_label_rtx (), barrier);
4465
4466 rec = create_basic_block (label, label, before_recovery);
4467
4468 /* A recovery block always ends with an unconditional jump. */
4469 emit_barrier_after (BB_END (rec));
4470
4471 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4472 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4473
4474 if (sched_verbose && spec_info->dump)
4475 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4476 rec->index);
4477
4478 return rec;
4479 }
4480
4481 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4482 and emit necessary jumps. */
4483 void
4484 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4485 basic_block second_bb)
4486 {
4487 rtx label;
4488 rtx jump;
4489 int edge_flags;
4490
4491 /* This is fixing of incoming edge. */
4492 /* ??? Which other flags should be specified? */
4493 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4494 /* Partition type is the same, if it is "unpartitioned". */
4495 edge_flags = EDGE_CROSSING;
4496 else
4497 edge_flags = 0;
4498
4499 make_edge (first_bb, rec, edge_flags);
4500 label = block_label (second_bb);
4501 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4502 JUMP_LABEL (jump) = label;
4503 LABEL_NUSES (label)++;
4504
4505 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4506 /* Partition type is the same, if it is "unpartitioned". */
4507 {
4508 /* Rewritten from cfgrtl.c. */
4509 if (flag_reorder_blocks_and_partition
4510 && targetm.have_named_sections)
4511 {
4512 /* We don't need the same note for the check because
4513 any_condjump_p (check) == true. */
4514 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4515 }
4516 edge_flags = EDGE_CROSSING;
4517 }
4518 else
4519 edge_flags = 0;
4520
4521 make_single_succ_edge (rec, second_bb, edge_flags);
4522 if (dom_info_available_p (CDI_DOMINATORS))
4523 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
4524 }
4525
4526 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4527 INSN is a simple check, that should be converted to branchy one. */
4528 static void
4529 create_check_block_twin (rtx insn, bool mutate_p)
4530 {
4531 basic_block rec;
4532 rtx label, check, twin;
4533 ds_t fs;
4534 sd_iterator_def sd_it;
4535 dep_t dep;
4536 dep_def _new_dep, *new_dep = &_new_dep;
4537 ds_t todo_spec;
4538
4539 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4540
4541 if (!mutate_p)
4542 todo_spec = TODO_SPEC (insn);
4543 else
4544 {
4545 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4546 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4547
4548 todo_spec = CHECK_SPEC (insn);
4549 }
4550
4551 todo_spec &= SPECULATIVE;
4552
4553 /* Create recovery block. */
4554 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4555 {
4556 rec = sched_create_recovery_block (NULL);
4557 label = BB_HEAD (rec);
4558 }
4559 else
4560 {
4561 rec = EXIT_BLOCK_PTR;
4562 label = NULL_RTX;
4563 }
4564
4565 /* Emit CHECK. */
4566 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4567
4568 if (rec != EXIT_BLOCK_PTR)
4569 {
4570 /* To have mem_reg alive at the beginning of second_bb,
4571 we emit check BEFORE insn, so insn after splitting
4572 insn will be at the beginning of second_bb, which will
4573 provide us with the correct life information. */
4574 check = emit_jump_insn_before (check, insn);
4575 JUMP_LABEL (check) = label;
4576 LABEL_NUSES (label)++;
4577 }
4578 else
4579 check = emit_insn_before (check, insn);
4580
4581 /* Extend data structures. */
4582 haifa_init_insn (check);
4583
4584 /* CHECK is being added to current region. Extend ready list. */
4585 gcc_assert (sched_ready_n_insns != -1);
4586 sched_extend_ready_list (sched_ready_n_insns + 1);
4587
4588 if (current_sched_info->add_remove_insn)
4589 current_sched_info->add_remove_insn (insn, 0);
4590
4591 RECOVERY_BLOCK (check) = rec;
4592
4593 if (sched_verbose && spec_info->dump)
4594 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4595 (*current_sched_info->print_insn) (check, 0));
4596
4597 gcc_assert (ORIG_PAT (insn));
4598
4599 /* Initialize TWIN (twin is a duplicate of original instruction
4600 in the recovery block). */
4601 if (rec != EXIT_BLOCK_PTR)
4602 {
4603 sd_iterator_def sd_it;
4604 dep_t dep;
4605
4606 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4607 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4608 {
4609 struct _dep _dep2, *dep2 = &_dep2;
4610
4611 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4612
4613 sd_add_dep (dep2, true);
4614 }
4615
4616 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4617 haifa_init_insn (twin);
4618
4619 if (sched_verbose && spec_info->dump)
4620 /* INSN_BB (insn) isn't determined for twin insns yet.
4621 So we can't use current_sched_info->print_insn. */
4622 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4623 INSN_UID (twin), rec->index);
4624 }
4625 else
4626 {
4627 ORIG_PAT (check) = ORIG_PAT (insn);
4628 HAS_INTERNAL_DEP (check) = 1;
4629 twin = check;
4630 /* ??? We probably should change all OUTPUT dependencies to
4631 (TRUE | OUTPUT). */
4632 }
4633
4634 /* Copy all resolved back dependencies of INSN to TWIN. This will
4635 provide correct value for INSN_TICK (TWIN). */
4636 sd_copy_back_deps (twin, insn, true);
4637
4638 if (rec != EXIT_BLOCK_PTR)
4639 /* In case of branchy check, fix CFG. */
4640 {
4641 basic_block first_bb, second_bb;
4642 rtx jump;
4643
4644 first_bb = BLOCK_FOR_INSN (check);
4645 second_bb = sched_split_block (first_bb, check);
4646
4647 sched_create_recovery_edges (first_bb, rec, second_bb);
4648
4649 sched_init_only_bb (second_bb, first_bb);
4650 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4651
4652 jump = BB_END (rec);
4653 haifa_init_insn (jump);
4654 }
4655
4656 /* Move backward dependences from INSN to CHECK and
4657 move forward dependences from INSN to TWIN. */
4658
4659 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4660 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4661 {
4662 rtx pro = DEP_PRO (dep);
4663 ds_t ds;
4664
4665 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4666 check --TRUE--> producer ??? or ANTI ???
4667 twin --TRUE--> producer
4668 twin --ANTI--> check
4669
4670 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4671 check --ANTI--> producer
4672 twin --ANTI--> producer
4673 twin --ANTI--> check
4674
4675 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4676 check ~~TRUE~~> producer
4677 twin ~~TRUE~~> producer
4678 twin --ANTI--> check */
4679
4680 ds = DEP_STATUS (dep);
4681
4682 if (ds & BEGIN_SPEC)
4683 {
4684 gcc_assert (!mutate_p);
4685 ds &= ~BEGIN_SPEC;
4686 }
4687
4688 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4689 sd_add_dep (new_dep, false);
4690
4691 if (rec != EXIT_BLOCK_PTR)
4692 {
4693 DEP_CON (new_dep) = twin;
4694 sd_add_dep (new_dep, false);
4695 }
4696 }
4697
4698 /* Second, remove backward dependencies of INSN. */
4699 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4700 sd_iterator_cond (&sd_it, &dep);)
4701 {
4702 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4703 || mutate_p)
4704 /* We can delete this dep because we overcome it with
4705 BEGIN_SPECULATION. */
4706 sd_delete_dep (sd_it);
4707 else
4708 sd_iterator_next (&sd_it);
4709 }
4710
4711 /* Future Speculations. Determine what BE_IN speculations will be like. */
4712 fs = 0;
4713
4714 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4715 here. */
4716
4717 gcc_assert (!DONE_SPEC (insn));
4718
4719 if (!mutate_p)
4720 {
4721 ds_t ts = TODO_SPEC (insn);
4722
4723 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4724 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4725
4726 /* Luckiness of future speculations solely depends upon initial
4727 BEGIN speculation. */
4728 if (ts & BEGIN_DATA)
4729 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4730 if (ts & BEGIN_CONTROL)
4731 fs = set_dep_weak (fs, BE_IN_CONTROL,
4732 get_dep_weak (ts, BEGIN_CONTROL));
4733 }
4734 else
4735 CHECK_SPEC (check) = CHECK_SPEC (insn);
4736
4737 /* Future speculations: call the helper. */
4738 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4739
4740 if (rec != EXIT_BLOCK_PTR)
4741 {
4742 /* Which types of dependencies should we use here is,
4743 generally, machine-dependent question... But, for now,
4744 it is not. */
4745
4746 if (!mutate_p)
4747 {
4748 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4749 sd_add_dep (new_dep, false);
4750
4751 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4752 sd_add_dep (new_dep, false);
4753 }
4754 else
4755 {
4756 if (spec_info->dump)
4757 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4758 (*current_sched_info->print_insn) (insn, 0));
4759
4760 /* Remove all dependencies of the INSN. */
4761 {
4762 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4763 | SD_LIST_BACK
4764 | SD_LIST_RES_BACK));
4765 while (sd_iterator_cond (&sd_it, &dep))
4766 sd_delete_dep (sd_it);
4767 }
4768
4769 /* If former check (INSN) already was moved to the ready (or queue)
4770 list, add new check (CHECK) there too. */
4771 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4772 try_ready (check);
4773
4774 /* Remove old check from instruction stream and free its
4775 data. */
4776 sched_remove_insn (insn);
4777 }
4778
4779 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4780 sd_add_dep (new_dep, false);
4781 }
4782 else
4783 {
4784 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4785 sd_add_dep (new_dep, false);
4786 }
4787
4788 if (!mutate_p)
4789 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4790 because it'll be done later in add_to_speculative_block. */
4791 {
4792 rtx_vec_t priorities_roots = NULL;
4793
4794 clear_priorities (twin, &priorities_roots);
4795 calc_priorities (priorities_roots);
4796 VEC_free (rtx, heap, priorities_roots);
4797 }
4798 }
4799
4800 /* Removes dependency between instructions in the recovery block REC
4801 and usual region instructions. It keeps inner dependences so it
4802 won't be necessary to recompute them. */
4803 static void
4804 fix_recovery_deps (basic_block rec)
4805 {
4806 rtx note, insn, jump, ready_list = 0;
4807 bitmap_head in_ready;
4808 rtx link;
4809
4810 bitmap_initialize (&in_ready, 0);
4811
4812 /* NOTE - a basic block note. */
4813 note = NEXT_INSN (BB_HEAD (rec));
4814 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4815 insn = BB_END (rec);
4816 gcc_assert (JUMP_P (insn));
4817 insn = PREV_INSN (insn);
4818
4819 do
4820 {
4821 sd_iterator_def sd_it;
4822 dep_t dep;
4823
4824 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4825 sd_iterator_cond (&sd_it, &dep);)
4826 {
4827 rtx consumer = DEP_CON (dep);
4828
4829 if (BLOCK_FOR_INSN (consumer) != rec)
4830 {
4831 sd_delete_dep (sd_it);
4832
4833 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4834 ready_list = alloc_INSN_LIST (consumer, ready_list);
4835 }
4836 else
4837 {
4838 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4839
4840 sd_iterator_next (&sd_it);
4841 }
4842 }
4843
4844 insn = PREV_INSN (insn);
4845 }
4846 while (insn != note);
4847
4848 bitmap_clear (&in_ready);
4849
4850 /* Try to add instructions to the ready or queue list. */
4851 for (link = ready_list; link; link = XEXP (link, 1))
4852 try_ready (XEXP (link, 0));
4853 free_INSN_LIST_list (&ready_list);
4854
4855 /* Fixing jump's dependences. */
4856 insn = BB_HEAD (rec);
4857 jump = BB_END (rec);
4858
4859 gcc_assert (LABEL_P (insn));
4860 insn = NEXT_INSN (insn);
4861
4862 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4863 add_jump_dependencies (insn, jump);
4864 }
4865
4866 /* Change pattern of INSN to NEW_PAT. */
4867 void
4868 sched_change_pattern (rtx insn, rtx new_pat)
4869 {
4870 int t;
4871
4872 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4873 gcc_assert (t);
4874 dfa_clear_single_insn_cache (insn);
4875 }
4876
4877 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4878 instruction data. */
4879 static void
4880 haifa_change_pattern (rtx insn, rtx new_pat)
4881 {
4882 sched_change_pattern (insn, new_pat);
4883
4884 /* Invalidate INSN_COST, so it'll be recalculated. */
4885 INSN_COST (insn) = -1;
4886 /* Invalidate INSN_TICK, so it'll be recalculated. */
4887 INSN_TICK (insn) = INVALID_TICK;
4888 }
4889
4890 /* -1 - can't speculate,
4891 0 - for speculation with REQUEST mode it is OK to use
4892 current instruction pattern,
4893 1 - need to change pattern for *NEW_PAT to be speculative. */
4894 int
4895 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4896 {
4897 gcc_assert (current_sched_info->flags & DO_SPECULATION
4898 && (request & SPECULATIVE)
4899 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4900
4901 if ((request & spec_info->mask) != request)
4902 return -1;
4903
4904 if (request & BE_IN_SPEC
4905 && !(request & BEGIN_SPEC))
4906 return 0;
4907
4908 return targetm.sched.speculate_insn (insn, request, new_pat);
4909 }
4910
4911 static int
4912 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4913 {
4914 gcc_assert (sched_deps_info->generate_spec_deps
4915 && !IS_SPECULATION_CHECK_P (insn));
4916
4917 if (HAS_INTERNAL_DEP (insn)
4918 || SCHED_GROUP_P (insn))
4919 return -1;
4920
4921 return sched_speculate_insn (insn, request, new_pat);
4922 }
4923
4924 /* Print some information about block BB, which starts with HEAD and
4925 ends with TAIL, before scheduling it.
4926 I is zero, if scheduler is about to start with the fresh ebb. */
4927 static void
4928 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4929 {
4930 if (!i)
4931 fprintf (sched_dump,
4932 ";; ======================================================\n");
4933 else
4934 fprintf (sched_dump,
4935 ";; =====================ADVANCING TO=====================\n");
4936 fprintf (sched_dump,
4937 ";; -- basic block %d from %d to %d -- %s reload\n",
4938 bb->index, INSN_UID (head), INSN_UID (tail),
4939 (reload_completed ? "after" : "before"));
4940 fprintf (sched_dump,
4941 ";; ======================================================\n");
4942 fprintf (sched_dump, "\n");
4943 }
4944
4945 /* Unlink basic block notes and labels and saves them, so they
4946 can be easily restored. We unlink basic block notes in EBB to
4947 provide back-compatibility with the previous code, as target backends
4948 assume, that there'll be only instructions between
4949 current_sched_info->{head and tail}. We restore these notes as soon
4950 as we can.
4951 FIRST (LAST) is the first (last) basic block in the ebb.
4952 NB: In usual case (FIRST == LAST) nothing is really done. */
4953 void
4954 unlink_bb_notes (basic_block first, basic_block last)
4955 {
4956 /* We DON'T unlink basic block notes of the first block in the ebb. */
4957 if (first == last)
4958 return;
4959
4960 bb_header = XNEWVEC (rtx, last_basic_block);
4961
4962 /* Make a sentinel. */
4963 if (last->next_bb != EXIT_BLOCK_PTR)
4964 bb_header[last->next_bb->index] = 0;
4965
4966 first = first->next_bb;
4967 do
4968 {
4969 rtx prev, label, note, next;
4970
4971 label = BB_HEAD (last);
4972 if (LABEL_P (label))
4973 note = NEXT_INSN (label);
4974 else
4975 note = label;
4976 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4977
4978 prev = PREV_INSN (label);
4979 next = NEXT_INSN (note);
4980 gcc_assert (prev && next);
4981
4982 NEXT_INSN (prev) = next;
4983 PREV_INSN (next) = prev;
4984
4985 bb_header[last->index] = label;
4986
4987 if (last == first)
4988 break;
4989
4990 last = last->prev_bb;
4991 }
4992 while (1);
4993 }
4994
4995 /* Restore basic block notes.
4996 FIRST is the first basic block in the ebb. */
4997 static void
4998 restore_bb_notes (basic_block first)
4999 {
5000 if (!bb_header)
5001 return;
5002
5003 /* We DON'T unlink basic block notes of the first block in the ebb. */
5004 first = first->next_bb;
5005 /* Remember: FIRST is actually a second basic block in the ebb. */
5006
5007 while (first != EXIT_BLOCK_PTR
5008 && bb_header[first->index])
5009 {
5010 rtx prev, label, note, next;
5011
5012 label = bb_header[first->index];
5013 prev = PREV_INSN (label);
5014 next = NEXT_INSN (prev);
5015
5016 if (LABEL_P (label))
5017 note = NEXT_INSN (label);
5018 else
5019 note = label;
5020 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5021
5022 bb_header[first->index] = 0;
5023
5024 NEXT_INSN (prev) = label;
5025 NEXT_INSN (note) = next;
5026 PREV_INSN (next) = note;
5027
5028 first = first->next_bb;
5029 }
5030
5031 free (bb_header);
5032 bb_header = 0;
5033 }
5034
5035 /* Helper function.
5036 Fix CFG after both in- and inter-block movement of
5037 control_flow_insn_p JUMP. */
5038 static void
5039 fix_jump_move (rtx jump)
5040 {
5041 basic_block bb, jump_bb, jump_bb_next;
5042
5043 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5044 jump_bb = BLOCK_FOR_INSN (jump);
5045 jump_bb_next = jump_bb->next_bb;
5046
5047 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
5048 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
5049
5050 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
5051 /* if jump_bb_next is not empty. */
5052 BB_END (jump_bb) = BB_END (jump_bb_next);
5053
5054 if (BB_END (bb) != PREV_INSN (jump))
5055 /* Then there are instruction after jump that should be placed
5056 to jump_bb_next. */
5057 BB_END (jump_bb_next) = BB_END (bb);
5058 else
5059 /* Otherwise jump_bb_next is empty. */
5060 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5061
5062 /* To make assertion in move_insn happy. */
5063 BB_END (bb) = PREV_INSN (jump);
5064
5065 update_bb_for_insn (jump_bb_next);
5066 }
5067
5068 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5069 static void
5070 move_block_after_check (rtx jump)
5071 {
5072 basic_block bb, jump_bb, jump_bb_next;
5073 VEC(edge,gc) *t;
5074
5075 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5076 jump_bb = BLOCK_FOR_INSN (jump);
5077 jump_bb_next = jump_bb->next_bb;
5078
5079 update_bb_for_insn (jump_bb);
5080
5081 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5082 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5083
5084 unlink_block (jump_bb_next);
5085 link_block (jump_bb_next, bb);
5086
5087 t = bb->succs;
5088 bb->succs = 0;
5089 move_succs (&(jump_bb->succs), bb);
5090 move_succs (&(jump_bb_next->succs), jump_bb);
5091 move_succs (&t, jump_bb_next);
5092
5093 df_mark_solutions_dirty ();
5094
5095 common_sched_info->fix_recovery_cfg
5096 (bb->index, jump_bb->index, jump_bb_next->index);
5097 }
5098
5099 /* Helper function for move_block_after_check.
5100 This functions attaches edge vector pointed to by SUCCSP to
5101 block TO. */
5102 static void
5103 move_succs (VEC(edge,gc) **succsp, basic_block to)
5104 {
5105 edge e;
5106 edge_iterator ei;
5107
5108 gcc_assert (to->succs == 0);
5109
5110 to->succs = *succsp;
5111
5112 FOR_EACH_EDGE (e, ei, to->succs)
5113 e->src = to;
5114
5115 *succsp = 0;
5116 }
5117
5118 /* Remove INSN from the instruction stream.
5119 INSN should have any dependencies. */
5120 static void
5121 sched_remove_insn (rtx insn)
5122 {
5123 sd_finish_insn (insn);
5124
5125 change_queue_index (insn, QUEUE_NOWHERE);
5126 current_sched_info->add_remove_insn (insn, 1);
5127 remove_insn (insn);
5128 }
5129
5130 /* Clear priorities of all instructions, that are forward dependent on INSN.
5131 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5132 be invoked to initialize all cleared priorities. */
5133 static void
5134 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5135 {
5136 sd_iterator_def sd_it;
5137 dep_t dep;
5138 bool insn_is_root_p = true;
5139
5140 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5141
5142 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5143 {
5144 rtx pro = DEP_PRO (dep);
5145
5146 if (INSN_PRIORITY_STATUS (pro) >= 0
5147 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5148 {
5149 /* If DEP doesn't contribute to priority then INSN itself should
5150 be added to priority roots. */
5151 if (contributes_to_priority_p (dep))
5152 insn_is_root_p = false;
5153
5154 INSN_PRIORITY_STATUS (pro) = -1;
5155 clear_priorities (pro, roots_ptr);
5156 }
5157 }
5158
5159 if (insn_is_root_p)
5160 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5161 }
5162
5163 /* Recompute priorities of instructions, whose priorities might have been
5164 changed. ROOTS is a vector of instructions whose priority computation will
5165 trigger initialization of all cleared priorities. */
5166 static void
5167 calc_priorities (rtx_vec_t roots)
5168 {
5169 int i;
5170 rtx insn;
5171
5172 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5173 priority (insn);
5174 }
5175
5176
5177 /* Add dependences between JUMP and other instructions in the recovery
5178 block. INSN is the first insn the recovery block. */
5179 static void
5180 add_jump_dependencies (rtx insn, rtx jump)
5181 {
5182 do
5183 {
5184 insn = NEXT_INSN (insn);
5185 if (insn == jump)
5186 break;
5187
5188 if (dep_list_size (insn) == 0)
5189 {
5190 dep_def _new_dep, *new_dep = &_new_dep;
5191
5192 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5193 sd_add_dep (new_dep, false);
5194 }
5195 }
5196 while (1);
5197
5198 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5199 }
5200
5201 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5202 rtx
5203 bb_note (basic_block bb)
5204 {
5205 rtx note;
5206
5207 note = BB_HEAD (bb);
5208 if (LABEL_P (note))
5209 note = NEXT_INSN (note);
5210
5211 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5212 return note;
5213 }
5214
5215 #ifdef ENABLE_CHECKING
5216 /* Helper function for check_cfg.
5217 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5218 its flags. */
5219 static int
5220 has_edge_p (VEC(edge,gc) *el, int type)
5221 {
5222 edge e;
5223 edge_iterator ei;
5224
5225 FOR_EACH_EDGE (e, ei, el)
5226 if (e->flags & type)
5227 return 1;
5228 return 0;
5229 }
5230
5231 /* Search back, starting at INSN, for an insn that is not a
5232 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5233 no such insn can be found. */
5234 static inline rtx
5235 prev_non_location_insn (rtx insn, rtx head)
5236 {
5237 while (insn != head && NOTE_P (insn)
5238 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5239 insn = PREV_INSN (insn);
5240
5241 return insn;
5242 }
5243
5244 /* Check few properties of CFG between HEAD and TAIL.
5245 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5246 instruction stream. */
5247 static void
5248 check_cfg (rtx head, rtx tail)
5249 {
5250 rtx next_tail;
5251 basic_block bb = 0;
5252 int not_first = 0, not_last;
5253
5254 if (head == NULL)
5255 head = get_insns ();
5256 if (tail == NULL)
5257 tail = get_last_insn ();
5258 next_tail = NEXT_INSN (tail);
5259
5260 do
5261 {
5262 not_last = head != tail;
5263
5264 if (not_first)
5265 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5266 if (not_last)
5267 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5268
5269 if (LABEL_P (head)
5270 || (NOTE_INSN_BASIC_BLOCK_P (head)
5271 && (!not_first
5272 || (not_first && !LABEL_P (PREV_INSN (head))))))
5273 {
5274 gcc_assert (bb == 0);
5275 bb = BLOCK_FOR_INSN (head);
5276 if (bb != 0)
5277 gcc_assert (BB_HEAD (bb) == head);
5278 else
5279 /* This is the case of jump table. See inside_basic_block_p (). */
5280 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5281 }
5282
5283 if (bb == 0)
5284 {
5285 gcc_assert (!inside_basic_block_p (head));
5286 head = NEXT_INSN (head);
5287 }
5288 else
5289 {
5290 gcc_assert (inside_basic_block_p (head)
5291 || NOTE_P (head));
5292 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5293
5294 if (LABEL_P (head))
5295 {
5296 head = NEXT_INSN (head);
5297 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5298 }
5299 else
5300 {
5301 if (control_flow_insn_p (head))
5302 {
5303 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5304 == head);
5305
5306 if (any_uncondjump_p (head))
5307 gcc_assert (EDGE_COUNT (bb->succs) == 1
5308 && BARRIER_P (NEXT_INSN (head)));
5309 else if (any_condjump_p (head))
5310 gcc_assert (/* Usual case. */
5311 (EDGE_COUNT (bb->succs) > 1
5312 && !BARRIER_P (NEXT_INSN (head)))
5313 /* Or jump to the next instruction. */
5314 || (EDGE_COUNT (bb->succs) == 1
5315 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5316 == JUMP_LABEL (head))));
5317 }
5318 if (BB_END (bb) == head)
5319 {
5320 if (EDGE_COUNT (bb->succs) > 1)
5321 gcc_assert (control_flow_insn_p (prev_non_location_insn
5322 (head, BB_HEAD (bb)))
5323 || has_edge_p (bb->succs, EDGE_COMPLEX));
5324 bb = 0;
5325 }
5326
5327 head = NEXT_INSN (head);
5328 }
5329 }
5330
5331 not_first = 1;
5332 }
5333 while (head != next_tail);
5334
5335 gcc_assert (bb == 0);
5336 }
5337
5338 #endif /* ENABLE_CHECKING */
5339
5340 /* Extend per basic block data structures. */
5341 static void
5342 extend_bb (void)
5343 {
5344 if (sched_scan_info->extend_bb)
5345 sched_scan_info->extend_bb ();
5346 }
5347
5348 /* Init data for BB. */
5349 static void
5350 init_bb (basic_block bb)
5351 {
5352 if (sched_scan_info->init_bb)
5353 sched_scan_info->init_bb (bb);
5354 }
5355
5356 /* Extend per insn data structures. */
5357 static void
5358 extend_insn (void)
5359 {
5360 if (sched_scan_info->extend_insn)
5361 sched_scan_info->extend_insn ();
5362 }
5363
5364 /* Init data structures for INSN. */
5365 static void
5366 init_insn (rtx insn)
5367 {
5368 if (sched_scan_info->init_insn)
5369 sched_scan_info->init_insn (insn);
5370 }
5371
5372 /* Init all insns in BB. */
5373 static void
5374 init_insns_in_bb (basic_block bb)
5375 {
5376 rtx insn;
5377
5378 FOR_BB_INSNS (bb, insn)
5379 init_insn (insn);
5380 }
5381
5382 /* A driver function to add a set of basic blocks (BBS),
5383 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5384 to the scheduling region. */
5385 void
5386 sched_scan (const struct sched_scan_info_def *ssi,
5387 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5388 {
5389 sched_scan_info = ssi;
5390
5391 if (bbs != NULL || bb != NULL)
5392 {
5393 extend_bb ();
5394
5395 if (bbs != NULL)
5396 {
5397 unsigned i;
5398 basic_block x;
5399
5400 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5401 init_bb (x);
5402 }
5403
5404 if (bb != NULL)
5405 init_bb (bb);
5406 }
5407
5408 extend_insn ();
5409
5410 if (bbs != NULL)
5411 {
5412 unsigned i;
5413 basic_block x;
5414
5415 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5416 init_insns_in_bb (x);
5417 }
5418
5419 if (bb != NULL)
5420 init_insns_in_bb (bb);
5421
5422 if (insns != NULL)
5423 {
5424 unsigned i;
5425 rtx x;
5426
5427 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5428 init_insn (x);
5429 }
5430
5431 if (insn != NULL)
5432 init_insn (insn);
5433 }
5434
5435
5436 /* Extend data structures for logical insn UID. */
5437 static void
5438 luids_extend_insn (void)
5439 {
5440 int new_luids_max_uid = get_max_uid () + 1;
5441
5442 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5443 }
5444
5445 /* Initialize LUID for INSN. */
5446 static void
5447 luids_init_insn (rtx insn)
5448 {
5449 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5450 int luid;
5451
5452 if (i >= 0)
5453 {
5454 luid = sched_max_luid;
5455 sched_max_luid += i;
5456 }
5457 else
5458 luid = -1;
5459
5460 SET_INSN_LUID (insn, luid);
5461 }
5462
5463 /* Initialize luids for BBS, BB, INSNS and INSN.
5464 The hook common_sched_info->luid_for_non_insn () is used to determine
5465 if notes, labels, etc. need luids. */
5466 void
5467 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5468 {
5469 const struct sched_scan_info_def ssi =
5470 {
5471 NULL, /* extend_bb */
5472 NULL, /* init_bb */
5473 luids_extend_insn, /* extend_insn */
5474 luids_init_insn /* init_insn */
5475 };
5476
5477 sched_scan (&ssi, bbs, bb, insns, insn);
5478 }
5479
5480 /* Free LUIDs. */
5481 void
5482 sched_finish_luids (void)
5483 {
5484 VEC_free (int, heap, sched_luids);
5485 sched_max_luid = 1;
5486 }
5487
5488 /* Return logical uid of INSN. Helpful while debugging. */
5489 int
5490 insn_luid (rtx insn)
5491 {
5492 return INSN_LUID (insn);
5493 }
5494
5495 /* Extend per insn data in the target. */
5496 void
5497 sched_extend_target (void)
5498 {
5499 if (targetm.sched.h_i_d_extended)
5500 targetm.sched.h_i_d_extended ();
5501 }
5502
5503 /* Extend global scheduler structures (those, that live across calls to
5504 schedule_block) to include information about just emitted INSN. */
5505 static void
5506 extend_h_i_d (void)
5507 {
5508 int reserve = (get_max_uid () + 1
5509 - VEC_length (haifa_insn_data_def, h_i_d));
5510 if (reserve > 0
5511 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5512 {
5513 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5514 3 * get_max_uid () / 2);
5515 sched_extend_target ();
5516 }
5517 }
5518
5519 /* Initialize h_i_d entry of the INSN with default values.
5520 Values, that are not explicitly initialized here, hold zero. */
5521 static void
5522 init_h_i_d (rtx insn)
5523 {
5524 if (INSN_LUID (insn) > 0)
5525 {
5526 INSN_COST (insn) = -1;
5527 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5528 INSN_TICK (insn) = INVALID_TICK;
5529 INTER_TICK (insn) = INVALID_TICK;
5530 TODO_SPEC (insn) = HARD_DEP;
5531 }
5532 }
5533
5534 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5535 void
5536 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5537 {
5538 const struct sched_scan_info_def ssi =
5539 {
5540 NULL, /* extend_bb */
5541 NULL, /* init_bb */
5542 extend_h_i_d, /* extend_insn */
5543 init_h_i_d /* init_insn */
5544 };
5545
5546 sched_scan (&ssi, bbs, bb, insns, insn);
5547 }
5548
5549 /* Finalize haifa_insn_data. */
5550 void
5551 haifa_finish_h_i_d (void)
5552 {
5553 int i;
5554 haifa_insn_data_t data;
5555 struct reg_use_data *use, *next;
5556
5557 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5558 {
5559 if (data->reg_pressure != NULL)
5560 free (data->reg_pressure);
5561 for (use = data->reg_use_list; use != NULL; use = next)
5562 {
5563 next = use->next_insn_use;
5564 free (use);
5565 }
5566 }
5567 VEC_free (haifa_insn_data_def, heap, h_i_d);
5568 }
5569
5570 /* Init data for the new insn INSN. */
5571 static void
5572 haifa_init_insn (rtx insn)
5573 {
5574 gcc_assert (insn != NULL);
5575
5576 sched_init_luids (NULL, NULL, NULL, insn);
5577 sched_extend_target ();
5578 sched_deps_init (false);
5579 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5580
5581 if (adding_bb_to_current_region_p)
5582 {
5583 sd_init_insn (insn);
5584
5585 /* Extend dependency caches by one element. */
5586 extend_dependency_caches (1, false);
5587 }
5588 }
5589
5590 /* Init data for the new basic block BB which comes after AFTER. */
5591 static void
5592 haifa_init_only_bb (basic_block bb, basic_block after)
5593 {
5594 gcc_assert (bb != NULL);
5595
5596 sched_init_bbs ();
5597
5598 if (common_sched_info->add_block)
5599 /* This changes only data structures of the front-end. */
5600 common_sched_info->add_block (bb, after);
5601 }
5602
5603 /* A generic version of sched_split_block (). */
5604 basic_block
5605 sched_split_block_1 (basic_block first_bb, rtx after)
5606 {
5607 edge e;
5608
5609 e = split_block (first_bb, after);
5610 gcc_assert (e->src == first_bb);
5611
5612 /* sched_split_block emits note if *check == BB_END. Probably it
5613 is better to rip that note off. */
5614
5615 return e->dest;
5616 }
5617
5618 /* A generic version of sched_create_empty_bb (). */
5619 basic_block
5620 sched_create_empty_bb_1 (basic_block after)
5621 {
5622 return create_empty_bb (after);
5623 }
5624
5625 /* Insert PAT as an INSN into the schedule and update the necessary data
5626 structures to account for it. */
5627 rtx
5628 sched_emit_insn (rtx pat)
5629 {
5630 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5631 last_scheduled_insn = insn;
5632 haifa_init_insn (insn);
5633 return insn;
5634 }
5635
5636 /* This function returns a candidate satisfying dispatch constraints from
5637 the ready list. */
5638
5639 static rtx
5640 ready_remove_first_dispatch (struct ready_list *ready)
5641 {
5642 int i;
5643 rtx insn = ready_element (ready, 0);
5644
5645 if (ready->n_ready == 1
5646 || INSN_CODE (insn) < 0
5647 || !INSN_P (insn)
5648 || !active_insn_p (insn)
5649 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5650 return ready_remove_first (ready);
5651
5652 for (i = 1; i < ready->n_ready; i++)
5653 {
5654 insn = ready_element (ready, i);
5655
5656 if (INSN_CODE (insn) < 0
5657 || !INSN_P (insn)
5658 || !active_insn_p (insn))
5659 continue;
5660
5661 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5662 {
5663 /* Return ith element of ready. */
5664 insn = ready_remove (ready, i);
5665 return insn;
5666 }
5667 }
5668
5669 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5670 return ready_remove_first (ready);
5671
5672 for (i = 1; i < ready->n_ready; i++)
5673 {
5674 insn = ready_element (ready, i);
5675
5676 if (INSN_CODE (insn) < 0
5677 || !INSN_P (insn)
5678 || !active_insn_p (insn))
5679 continue;
5680
5681 /* Return i-th element of ready. */
5682 if (targetm.sched.dispatch (insn, IS_CMP))
5683 return ready_remove (ready, i);
5684 }
5685
5686 return ready_remove_first (ready);
5687 }
5688
5689 /* Get number of ready insn in the ready list. */
5690
5691 int
5692 number_in_ready (void)
5693 {
5694 return ready.n_ready;
5695 }
5696
5697 /* Get number of ready's in the ready list. */
5698
5699 rtx
5700 get_ready_element (int i)
5701 {
5702 return ready_element (&ready, i);
5703 }
5704
5705 #endif /* INSN_SCHEDULING */