sel-sched.h, [...]: New files.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150
151 #ifdef INSN_SCHEDULING
152
153 /* issue_rate is the number of insns that can be scheduled in the same
154 machine cycle. It can be defined in the config/mach/mach.h file,
155 otherwise we set it to 1. */
156
157 int issue_rate;
158
159 /* sched-verbose controls the amount of debugging output the
160 scheduler prints. It is controlled by -fsched-verbose=N:
161 N>0 and no -DSR : the output is directed to stderr.
162 N>=10 will direct the printouts to stderr (regardless of -dSR).
163 N=1: same as -dSR.
164 N=2: bb's probabilities, detailed ready list info, unit/insn info.
165 N=3: rtl at abort point, control-flow, regions info.
166 N=5: dependences info. */
167
168 static int sched_verbose_param = 0;
169 int sched_verbose = 0;
170
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
174
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
177
178 void
179 fix_sched_param (const char *param, const char *val)
180 {
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
185 }
186
187 /* This is a placeholder for the scheduler parameters common
188 to all schedulers. */
189 struct common_sched_info_def *common_sched_info;
190
191 #define INSN_TICK(INSN) (HID (INSN)->tick)
192 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
193
194 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
195 then it should be recalculated from scratch. */
196 #define INVALID_TICK (-(max_insn_queue_index + 1))
197 /* The minimal value of the INSN_TICK of an instruction. */
198 #define MIN_TICK (-max_insn_queue_index)
199
200 /* Issue points are used to distinguish between instructions in max_issue ().
201 For now, all instructions are equally good. */
202 #define ISSUE_POINTS(INSN) 1
203
204 /* List of important notes we must keep around. This is a pointer to the
205 last element in the list. */
206 rtx note_list;
207
208 static struct spec_info_def spec_info_var;
209 /* Description of the speculative part of the scheduling.
210 If NULL - no speculation. */
211 spec_info_t spec_info = NULL;
212
213 /* True, if recovery block was added during scheduling of current block.
214 Used to determine, if we need to fix INSN_TICKs. */
215 static bool haifa_recovery_bb_recently_added_p;
216
217 /* True, if recovery block was added during this scheduling pass.
218 Used to determine if we should have empty memory pools of dependencies
219 after finishing current region. */
220 bool haifa_recovery_bb_ever_added_p;
221
222 /* Counters of different types of speculative instructions. */
223 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
224
225 /* Array used in {unlink, restore}_bb_notes. */
226 static rtx *bb_header = 0;
227
228 /* Basic block after which recovery blocks will be created. */
229 static basic_block before_recovery;
230
231 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
232 created it. */
233 basic_block after_recovery;
234
235 /* FALSE if we add bb to another region, so we don't need to initialize it. */
236 bool adding_bb_to_current_region_p = true;
237
238 /* Queues, etc. */
239
240 /* An instruction is ready to be scheduled when all insns preceding it
241 have already been scheduled. It is important to ensure that all
242 insns which use its result will not be executed until its result
243 has been computed. An insn is maintained in one of four structures:
244
245 (P) the "Pending" set of insns which cannot be scheduled until
246 their dependencies have been satisfied.
247 (Q) the "Queued" set of insns that can be scheduled when sufficient
248 time has passed.
249 (R) the "Ready" list of unscheduled, uncommitted insns.
250 (S) the "Scheduled" list of insns.
251
252 Initially, all insns are either "Pending" or "Ready" depending on
253 whether their dependencies are satisfied.
254
255 Insns move from the "Ready" list to the "Scheduled" list as they
256 are committed to the schedule. As this occurs, the insns in the
257 "Pending" list have their dependencies satisfied and move to either
258 the "Ready" list or the "Queued" set depending on whether
259 sufficient time has passed to make them ready. As time passes,
260 insns move from the "Queued" set to the "Ready" list.
261
262 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
263 unscheduled insns, i.e., those that are ready, queued, and pending.
264 The "Queued" set (Q) is implemented by the variable `insn_queue'.
265 The "Ready" list (R) is implemented by the variables `ready' and
266 `n_ready'.
267 The "Scheduled" list (S) is the new insn chain built by this pass.
268
269 The transition (R->S) is implemented in the scheduling loop in
270 `schedule_block' when the best insn to schedule is chosen.
271 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
272 insns move from the ready list to the scheduled list.
273 The transition (Q->R) is implemented in 'queue_to_insn' as time
274 passes or stalls are introduced. */
275
276 /* Implement a circular buffer to delay instructions until sufficient
277 time has passed. For the new pipeline description interface,
278 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
279 than maximal time of instruction execution computed by genattr.c on
280 the base maximal time of functional unit reservations and getting a
281 result. This is the longest time an insn may be queued. */
282
283 static rtx *insn_queue;
284 static int q_ptr = 0;
285 static int q_size = 0;
286 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
287 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
288
289 #define QUEUE_SCHEDULED (-3)
290 #define QUEUE_NOWHERE (-2)
291 #define QUEUE_READY (-1)
292 /* QUEUE_SCHEDULED - INSN is scheduled.
293 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
294 queue or ready list.
295 QUEUE_READY - INSN is in ready list.
296 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
297
298 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
299
300 /* The following variable value refers for all current and future
301 reservations of the processor units. */
302 state_t curr_state;
303
304 /* The following variable value is size of memory representing all
305 current and future reservations of the processor units. */
306 size_t dfa_state_size;
307
308 /* The following array is used to find the best insn from ready when
309 the automaton pipeline interface is used. */
310 char *ready_try = NULL;
311
312 /* The ready list. */
313 struct ready_list ready = {NULL, 0, 0, 0};
314
315 /* The pointer to the ready list (to be removed). */
316 static struct ready_list *readyp = &ready;
317
318 /* Scheduling clock. */
319 static int clock_var;
320
321 static int may_trap_exp (const_rtx, int);
322
323 /* Nonzero iff the address is comprised from at most 1 register. */
324 #define CONST_BASED_ADDRESS_P(x) \
325 (REG_P (x) \
326 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
327 || (GET_CODE (x) == LO_SUM)) \
328 && (CONSTANT_P (XEXP (x, 0)) \
329 || CONSTANT_P (XEXP (x, 1)))))
330
331 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
332 as found by analyzing insn's expression. */
333
334 \f
335 static int haifa_luid_for_non_insn (rtx x);
336
337 /* Haifa version of sched_info hooks common to all headers. */
338 const struct common_sched_info_def haifa_common_sched_info =
339 {
340 NULL, /* fix_recovery_cfg */
341 NULL, /* add_block */
342 NULL, /* estimate_number_of_insns */
343 haifa_luid_for_non_insn, /* luid_for_non_insn */
344 SCHED_PASS_UNKNOWN /* sched_pass_id */
345 };
346
347 const struct sched_scan_info_def *sched_scan_info;
348
349 /* Mapping from instruction UID to its Logical UID. */
350 VEC (int, heap) *sched_luids = NULL;
351
352 /* Next LUID to assign to an instruction. */
353 int sched_max_luid = 1;
354
355 /* Haifa Instruction Data. */
356 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
357
358 void (* sched_init_only_bb) (basic_block, basic_block);
359
360 /* Split block function. Different schedulers might use different functions
361 to handle their internal data consistent. */
362 basic_block (* sched_split_block) (basic_block, rtx);
363
364 /* Create empty basic block after the specified block. */
365 basic_block (* sched_create_empty_bb) (basic_block);
366
367 static int
368 may_trap_exp (const_rtx x, int is_store)
369 {
370 enum rtx_code code;
371
372 if (x == 0)
373 return TRAP_FREE;
374 code = GET_CODE (x);
375 if (is_store)
376 {
377 if (code == MEM && may_trap_p (x))
378 return TRAP_RISKY;
379 else
380 return TRAP_FREE;
381 }
382 if (code == MEM)
383 {
384 /* The insn uses memory: a volatile load. */
385 if (MEM_VOLATILE_P (x))
386 return IRISKY;
387 /* An exception-free load. */
388 if (!may_trap_p (x))
389 return IFREE;
390 /* A load with 1 base register, to be further checked. */
391 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
392 return PFREE_CANDIDATE;
393 /* No info on the load, to be further checked. */
394 return PRISKY_CANDIDATE;
395 }
396 else
397 {
398 const char *fmt;
399 int i, insn_class = TRAP_FREE;
400
401 /* Neither store nor load, check if it may cause a trap. */
402 if (may_trap_p (x))
403 return TRAP_RISKY;
404 /* Recursive step: walk the insn... */
405 fmt = GET_RTX_FORMAT (code);
406 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
407 {
408 if (fmt[i] == 'e')
409 {
410 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
411 insn_class = WORST_CLASS (insn_class, tmp_class);
412 }
413 else if (fmt[i] == 'E')
414 {
415 int j;
416 for (j = 0; j < XVECLEN (x, i); j++)
417 {
418 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
419 insn_class = WORST_CLASS (insn_class, tmp_class);
420 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
421 break;
422 }
423 }
424 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
425 break;
426 }
427 return insn_class;
428 }
429 }
430
431 /* Classifies rtx X of an insn for the purpose of verifying that X can be
432 executed speculatively (and consequently the insn can be moved
433 speculatively), by examining X, returning:
434 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
435 TRAP_FREE: non-load insn.
436 IFREE: load from a globally safe location.
437 IRISKY: volatile load.
438 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
439 being either PFREE or PRISKY. */
440
441 static int
442 haifa_classify_rtx (const_rtx x)
443 {
444 int tmp_class = TRAP_FREE;
445 int insn_class = TRAP_FREE;
446 enum rtx_code code;
447
448 if (GET_CODE (x) == PARALLEL)
449 {
450 int i, len = XVECLEN (x, 0);
451
452 for (i = len - 1; i >= 0; i--)
453 {
454 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
455 insn_class = WORST_CLASS (insn_class, tmp_class);
456 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
457 break;
458 }
459 }
460 else
461 {
462 code = GET_CODE (x);
463 switch (code)
464 {
465 case CLOBBER:
466 /* Test if it is a 'store'. */
467 tmp_class = may_trap_exp (XEXP (x, 0), 1);
468 break;
469 case SET:
470 /* Test if it is a store. */
471 tmp_class = may_trap_exp (SET_DEST (x), 1);
472 if (tmp_class == TRAP_RISKY)
473 break;
474 /* Test if it is a load. */
475 tmp_class =
476 WORST_CLASS (tmp_class,
477 may_trap_exp (SET_SRC (x), 0));
478 break;
479 case COND_EXEC:
480 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
481 if (tmp_class == TRAP_RISKY)
482 break;
483 tmp_class = WORST_CLASS (tmp_class,
484 may_trap_exp (COND_EXEC_TEST (x), 0));
485 break;
486 case TRAP_IF:
487 tmp_class = TRAP_RISKY;
488 break;
489 default:;
490 }
491 insn_class = tmp_class;
492 }
493
494 return insn_class;
495 }
496
497 int
498 haifa_classify_insn (const_rtx insn)
499 {
500 return haifa_classify_rtx (PATTERN (insn));
501 }
502
503 /* Forward declarations. */
504
505 static int priority (rtx);
506 static int rank_for_schedule (const void *, const void *);
507 static void swap_sort (rtx *, int);
508 static void queue_insn (rtx, int);
509 static int schedule_insn (rtx);
510 static int find_set_reg_weight (const_rtx);
511 static void find_insn_reg_weight (const_rtx);
512 static void adjust_priority (rtx);
513 static void advance_one_cycle (void);
514 static void extend_h_i_d (void);
515
516
517 /* Notes handling mechanism:
518 =========================
519 Generally, NOTES are saved before scheduling and restored after scheduling.
520 The scheduler distinguishes between two types of notes:
521
522 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
523 Before scheduling a region, a pointer to the note is added to the insn
524 that follows or precedes it. (This happens as part of the data dependence
525 computation). After scheduling an insn, the pointer contained in it is
526 used for regenerating the corresponding note (in reemit_notes).
527
528 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
529 these notes are put in a list (in rm_other_notes() and
530 unlink_other_notes ()). After scheduling the block, these notes are
531 inserted at the beginning of the block (in schedule_block()). */
532
533 static void ready_add (struct ready_list *, rtx, bool);
534 static rtx ready_remove_first (struct ready_list *);
535
536 static void queue_to_ready (struct ready_list *);
537 static int early_queue_to_ready (state_t, struct ready_list *);
538
539 static void debug_ready_list (struct ready_list *);
540
541 /* The following functions are used to implement multi-pass scheduling
542 on the first cycle. */
543 static rtx ready_remove (struct ready_list *, int);
544 static void ready_remove_insn (rtx);
545
546 static int choose_ready (struct ready_list *, rtx *);
547
548 static void fix_inter_tick (rtx, rtx);
549 static int fix_tick_ready (rtx);
550 static void change_queue_index (rtx, int);
551
552 /* The following functions are used to implement scheduling of data/control
553 speculative instructions. */
554
555 static void extend_h_i_d (void);
556 static void init_h_i_d (rtx);
557 static void generate_recovery_code (rtx);
558 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
559 static void begin_speculative_block (rtx);
560 static void add_to_speculative_block (rtx);
561 static void init_before_recovery (basic_block *);
562 static void create_check_block_twin (rtx, bool);
563 static void fix_recovery_deps (basic_block);
564 static void haifa_change_pattern (rtx, rtx);
565 static void dump_new_block_header (int, basic_block, rtx, rtx);
566 static void restore_bb_notes (basic_block);
567 static void fix_jump_move (rtx);
568 static void move_block_after_check (rtx);
569 static void move_succs (VEC(edge,gc) **, basic_block);
570 static void sched_remove_insn (rtx);
571 static void clear_priorities (rtx, rtx_vec_t *);
572 static void calc_priorities (rtx_vec_t);
573 static void add_jump_dependencies (rtx, rtx);
574 #ifdef ENABLE_CHECKING
575 static int has_edge_p (VEC(edge,gc) *, int);
576 static void check_cfg (rtx, rtx);
577 #endif
578
579 #endif /* INSN_SCHEDULING */
580 \f
581 /* Point to state used for the current scheduling pass. */
582 struct haifa_sched_info *current_sched_info;
583 \f
584 #ifndef INSN_SCHEDULING
585 void
586 schedule_insns (void)
587 {
588 }
589 #else
590
591 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
592 so that insns independent of the last scheduled insn will be preferred
593 over dependent instructions. */
594
595 static rtx last_scheduled_insn;
596
597 /* Cached cost of the instruction. Use below function to get cost of the
598 insn. -1 here means that the field is not initialized. */
599 #define INSN_COST(INSN) (HID (INSN)->cost)
600
601 /* Compute cost of executing INSN.
602 This is the number of cycles between instruction issue and
603 instruction results. */
604 HAIFA_INLINE int
605 insn_cost (rtx insn)
606 {
607 int cost;
608
609 if (sel_sched_p ())
610 {
611 if (recog_memoized (insn) < 0)
612 return 0;
613
614 cost = insn_default_latency (insn);
615 if (cost < 0)
616 cost = 0;
617
618 return cost;
619 }
620
621 cost = INSN_COST (insn);
622
623 if (cost < 0)
624 {
625 /* A USE insn, or something else we don't need to
626 understand. We can't pass these directly to
627 result_ready_cost or insn_default_latency because it will
628 trigger a fatal error for unrecognizable insns. */
629 if (recog_memoized (insn) < 0)
630 {
631 INSN_COST (insn) = 0;
632 return 0;
633 }
634 else
635 {
636 cost = insn_default_latency (insn);
637 if (cost < 0)
638 cost = 0;
639
640 INSN_COST (insn) = cost;
641 }
642 }
643
644 return cost;
645 }
646
647 /* Compute cost of dependence LINK.
648 This is the number of cycles between instruction issue and
649 instruction results. */
650 int
651 dep_cost_1 (dep_t link, dw_t dw)
652 {
653 rtx used = DEP_CON (link);
654 int cost;
655
656 /* A USE insn should never require the value used to be computed.
657 This allows the computation of a function's result and parameter
658 values to overlap the return and call. */
659 if (recog_memoized (used) < 0)
660 cost = 0;
661 else
662 {
663 rtx insn = DEP_PRO (link);
664 enum reg_note dep_type = DEP_TYPE (link);
665
666 cost = insn_cost (insn);
667
668 if (INSN_CODE (insn) >= 0)
669 {
670 if (dep_type == REG_DEP_ANTI)
671 cost = 0;
672 else if (dep_type == REG_DEP_OUTPUT)
673 {
674 cost = (insn_default_latency (insn)
675 - insn_default_latency (used));
676 if (cost <= 0)
677 cost = 1;
678 }
679 else if (bypass_p (insn))
680 cost = insn_latency (insn, used);
681 }
682
683
684 if (targetm.sched.adjust_cost_2)
685 {
686 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
687 dw);
688 }
689 else if (targetm.sched.adjust_cost != NULL)
690 {
691 /* This variable is used for backward compatibility with the
692 targets. */
693 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
694
695 /* Make it self-cycled, so that if some tries to walk over this
696 incomplete list he/she will be caught in an endless loop. */
697 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
698
699 /* Targets use only REG_NOTE_KIND of the link. */
700 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
701
702 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
703 insn, cost);
704
705 free_INSN_LIST_node (dep_cost_rtx_link);
706 }
707
708 if (cost < 0)
709 cost = 0;
710 }
711
712 return cost;
713 }
714
715 /* Compute cost of dependence LINK.
716 This is the number of cycles between instruction issue and
717 instruction results. */
718 int
719 dep_cost (dep_t link)
720 {
721 return dep_cost_1 (link, 0);
722 }
723
724 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
725 INSN_PRIORITY explicitly. */
726 void
727 increase_insn_priority (rtx insn, int amount)
728 {
729 if (!sel_sched_p ())
730 {
731 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
732 if (INSN_PRIORITY_KNOWN (insn))
733 INSN_PRIORITY (insn) += amount;
734 }
735 else
736 {
737 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
738 Use EXPR_PRIORITY instead. */
739 sel_add_to_insn_priority (insn, amount);
740 }
741 }
742
743 /* Return 'true' if DEP should be included in priority calculations. */
744 static bool
745 contributes_to_priority_p (dep_t dep)
746 {
747 /* Critical path is meaningful in block boundaries only. */
748 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
749 DEP_PRO (dep)))
750 return false;
751
752 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
753 then speculative instructions will less likely be
754 scheduled. That is because the priority of
755 their producers will increase, and, thus, the
756 producers will more likely be scheduled, thus,
757 resolving the dependence. */
758 if (sched_deps_info->generate_spec_deps
759 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
760 && (DEP_STATUS (dep) & SPECULATIVE))
761 return false;
762
763 return true;
764 }
765
766 /* Compute the priority number for INSN. */
767 static int
768 priority (rtx insn)
769 {
770 if (! INSN_P (insn))
771 return 0;
772
773 /* We should not be interested in priority of an already scheduled insn. */
774 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
775
776 if (!INSN_PRIORITY_KNOWN (insn))
777 {
778 int this_priority = -1;
779
780 if (sd_lists_empty_p (insn, SD_LIST_FORW))
781 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
782 some forward deps but all of them are ignored by
783 contributes_to_priority hook. At the moment we set priority of
784 such insn to 0. */
785 this_priority = insn_cost (insn);
786 else
787 {
788 rtx prev_first, twin;
789 basic_block rec;
790
791 /* For recovery check instructions we calculate priority slightly
792 different than that of normal instructions. Instead of walking
793 through INSN_FORW_DEPS (check) list, we walk through
794 INSN_FORW_DEPS list of each instruction in the corresponding
795 recovery block. */
796
797 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
798 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
799 if (!rec || rec == EXIT_BLOCK_PTR)
800 {
801 prev_first = PREV_INSN (insn);
802 twin = insn;
803 }
804 else
805 {
806 prev_first = NEXT_INSN (BB_HEAD (rec));
807 twin = PREV_INSN (BB_END (rec));
808 }
809
810 do
811 {
812 sd_iterator_def sd_it;
813 dep_t dep;
814
815 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
816 {
817 rtx next;
818 int next_priority;
819
820 next = DEP_CON (dep);
821
822 if (BLOCK_FOR_INSN (next) != rec)
823 {
824 int cost;
825
826 if (!contributes_to_priority_p (dep))
827 continue;
828
829 if (twin == insn)
830 cost = dep_cost (dep);
831 else
832 {
833 struct _dep _dep1, *dep1 = &_dep1;
834
835 init_dep (dep1, insn, next, REG_DEP_ANTI);
836
837 cost = dep_cost (dep1);
838 }
839
840 next_priority = cost + priority (next);
841
842 if (next_priority > this_priority)
843 this_priority = next_priority;
844 }
845 }
846
847 twin = PREV_INSN (twin);
848 }
849 while (twin != prev_first);
850 }
851
852 if (this_priority < 0)
853 {
854 gcc_assert (this_priority == -1);
855
856 this_priority = insn_cost (insn);
857 }
858
859 INSN_PRIORITY (insn) = this_priority;
860 INSN_PRIORITY_STATUS (insn) = 1;
861 }
862
863 return INSN_PRIORITY (insn);
864 }
865 \f
866 /* Macros and functions for keeping the priority queue sorted, and
867 dealing with queuing and dequeuing of instructions. */
868
869 #define SCHED_SORT(READY, N_READY) \
870 do { if ((N_READY) == 2) \
871 swap_sort (READY, N_READY); \
872 else if ((N_READY) > 2) \
873 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
874 while (0)
875
876 /* Returns a positive value if x is preferred; returns a negative value if
877 y is preferred. Should never return 0, since that will make the sort
878 unstable. */
879
880 static int
881 rank_for_schedule (const void *x, const void *y)
882 {
883 rtx tmp = *(const rtx *) y;
884 rtx tmp2 = *(const rtx *) x;
885 int tmp_class, tmp2_class;
886 int val, priority_val, weight_val, info_val;
887
888 /* The insn in a schedule group should be issued the first. */
889 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
890 return SCHED_GROUP_P (tmp2) ? 1 : -1;
891
892 /* Make sure that priority of TMP and TMP2 are initialized. */
893 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
894
895 /* Prefer insn with higher priority. */
896 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
897
898 if (priority_val)
899 return priority_val;
900
901 /* Prefer speculative insn with greater dependencies weakness. */
902 if (spec_info)
903 {
904 ds_t ds1, ds2;
905 dw_t dw1, dw2;
906 int dw;
907
908 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
909 if (ds1)
910 dw1 = ds_weak (ds1);
911 else
912 dw1 = NO_DEP_WEAK;
913
914 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
915 if (ds2)
916 dw2 = ds_weak (ds2);
917 else
918 dw2 = NO_DEP_WEAK;
919
920 dw = dw2 - dw1;
921 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
922 return dw;
923 }
924
925 /* Prefer an insn with smaller contribution to registers-pressure. */
926 if (!reload_completed &&
927 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
928 return weight_val;
929
930 info_val = (*current_sched_info->rank) (tmp, tmp2);
931 if (info_val)
932 return info_val;
933
934 /* Compare insns based on their relation to the last-scheduled-insn. */
935 if (INSN_P (last_scheduled_insn))
936 {
937 dep_t dep1;
938 dep_t dep2;
939
940 /* Classify the instructions into three classes:
941 1) Data dependent on last schedule insn.
942 2) Anti/Output dependent on last scheduled insn.
943 3) Independent of last scheduled insn, or has latency of one.
944 Choose the insn from the highest numbered class if different. */
945 dep1 = sd_find_dep_between (last_scheduled_insn, tmp, true);
946
947 if (dep1 == NULL || dep_cost (dep1) == 1)
948 tmp_class = 3;
949 else if (/* Data dependence. */
950 DEP_TYPE (dep1) == REG_DEP_TRUE)
951 tmp_class = 1;
952 else
953 tmp_class = 2;
954
955 dep2 = sd_find_dep_between (last_scheduled_insn, tmp2, true);
956
957 if (dep2 == NULL || dep_cost (dep2) == 1)
958 tmp2_class = 3;
959 else if (/* Data dependence. */
960 DEP_TYPE (dep2) == REG_DEP_TRUE)
961 tmp2_class = 1;
962 else
963 tmp2_class = 2;
964
965 if ((val = tmp2_class - tmp_class))
966 return val;
967 }
968
969 /* Prefer the insn which has more later insns that depend on it.
970 This gives the scheduler more freedom when scheduling later
971 instructions at the expense of added register pressure. */
972
973 val = (sd_lists_size (tmp2, SD_LIST_FORW)
974 - sd_lists_size (tmp, SD_LIST_FORW));
975
976 if (val != 0)
977 return val;
978
979 /* If insns are equally good, sort by INSN_LUID (original insn order),
980 so that we make the sort stable. This minimizes instruction movement,
981 thus minimizing sched's effect on debugging and cross-jumping. */
982 return INSN_LUID (tmp) - INSN_LUID (tmp2);
983 }
984
985 /* Resort the array A in which only element at index N may be out of order. */
986
987 HAIFA_INLINE static void
988 swap_sort (rtx *a, int n)
989 {
990 rtx insn = a[n - 1];
991 int i = n - 2;
992
993 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
994 {
995 a[i + 1] = a[i];
996 i -= 1;
997 }
998 a[i + 1] = insn;
999 }
1000
1001 /* Add INSN to the insn queue so that it can be executed at least
1002 N_CYCLES after the currently executing insn. Preserve insns
1003 chain for debugging purposes. */
1004
1005 HAIFA_INLINE static void
1006 queue_insn (rtx insn, int n_cycles)
1007 {
1008 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1009 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1010
1011 gcc_assert (n_cycles <= max_insn_queue_index);
1012
1013 insn_queue[next_q] = link;
1014 q_size += 1;
1015
1016 if (sched_verbose >= 2)
1017 {
1018 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1019 (*current_sched_info->print_insn) (insn, 0));
1020
1021 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1022 }
1023
1024 QUEUE_INDEX (insn) = next_q;
1025 }
1026
1027 /* Remove INSN from queue. */
1028 static void
1029 queue_remove (rtx insn)
1030 {
1031 gcc_assert (QUEUE_INDEX (insn) >= 0);
1032 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1033 q_size--;
1034 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1035 }
1036
1037 /* Return a pointer to the bottom of the ready list, i.e. the insn
1038 with the lowest priority. */
1039
1040 rtx *
1041 ready_lastpos (struct ready_list *ready)
1042 {
1043 gcc_assert (ready->n_ready >= 1);
1044 return ready->vec + ready->first - ready->n_ready + 1;
1045 }
1046
1047 /* Add an element INSN to the ready list so that it ends up with the
1048 lowest/highest priority depending on FIRST_P. */
1049
1050 HAIFA_INLINE static void
1051 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1052 {
1053 if (!first_p)
1054 {
1055 if (ready->first == ready->n_ready)
1056 {
1057 memmove (ready->vec + ready->veclen - ready->n_ready,
1058 ready_lastpos (ready),
1059 ready->n_ready * sizeof (rtx));
1060 ready->first = ready->veclen - 1;
1061 }
1062 ready->vec[ready->first - ready->n_ready] = insn;
1063 }
1064 else
1065 {
1066 if (ready->first == ready->veclen - 1)
1067 {
1068 if (ready->n_ready)
1069 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1070 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1071 ready_lastpos (ready),
1072 ready->n_ready * sizeof (rtx));
1073 ready->first = ready->veclen - 2;
1074 }
1075 ready->vec[++(ready->first)] = insn;
1076 }
1077
1078 ready->n_ready++;
1079
1080 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1081 QUEUE_INDEX (insn) = QUEUE_READY;
1082 }
1083
1084 /* Remove the element with the highest priority from the ready list and
1085 return it. */
1086
1087 HAIFA_INLINE static rtx
1088 ready_remove_first (struct ready_list *ready)
1089 {
1090 rtx t;
1091
1092 gcc_assert (ready->n_ready);
1093 t = ready->vec[ready->first--];
1094 ready->n_ready--;
1095 /* If the queue becomes empty, reset it. */
1096 if (ready->n_ready == 0)
1097 ready->first = ready->veclen - 1;
1098
1099 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1100 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1101
1102 return t;
1103 }
1104
1105 /* The following code implements multi-pass scheduling for the first
1106 cycle. In other words, we will try to choose ready insn which
1107 permits to start maximum number of insns on the same cycle. */
1108
1109 /* Return a pointer to the element INDEX from the ready. INDEX for
1110 insn with the highest priority is 0, and the lowest priority has
1111 N_READY - 1. */
1112
1113 rtx
1114 ready_element (struct ready_list *ready, int index)
1115 {
1116 gcc_assert (ready->n_ready && index < ready->n_ready);
1117
1118 return ready->vec[ready->first - index];
1119 }
1120
1121 /* Remove the element INDEX from the ready list and return it. INDEX
1122 for insn with the highest priority is 0, and the lowest priority
1123 has N_READY - 1. */
1124
1125 HAIFA_INLINE static rtx
1126 ready_remove (struct ready_list *ready, int index)
1127 {
1128 rtx t;
1129 int i;
1130
1131 if (index == 0)
1132 return ready_remove_first (ready);
1133 gcc_assert (ready->n_ready && index < ready->n_ready);
1134 t = ready->vec[ready->first - index];
1135 ready->n_ready--;
1136 for (i = index; i < ready->n_ready; i++)
1137 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1138 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1139 return t;
1140 }
1141
1142 /* Remove INSN from the ready list. */
1143 static void
1144 ready_remove_insn (rtx insn)
1145 {
1146 int i;
1147
1148 for (i = 0; i < readyp->n_ready; i++)
1149 if (ready_element (readyp, i) == insn)
1150 {
1151 ready_remove (readyp, i);
1152 return;
1153 }
1154 gcc_unreachable ();
1155 }
1156
1157 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1158 macro. */
1159
1160 void
1161 ready_sort (struct ready_list *ready)
1162 {
1163 rtx *first = ready_lastpos (ready);
1164 SCHED_SORT (first, ready->n_ready);
1165 }
1166
1167 /* PREV is an insn that is ready to execute. Adjust its priority if that
1168 will help shorten or lengthen register lifetimes as appropriate. Also
1169 provide a hook for the target to tweak itself. */
1170
1171 HAIFA_INLINE static void
1172 adjust_priority (rtx prev)
1173 {
1174 /* ??? There used to be code here to try and estimate how an insn
1175 affected register lifetimes, but it did it by looking at REG_DEAD
1176 notes, which we removed in schedule_region. Nor did it try to
1177 take into account register pressure or anything useful like that.
1178
1179 Revisit when we have a machine model to work with and not before. */
1180
1181 if (targetm.sched.adjust_priority)
1182 INSN_PRIORITY (prev) =
1183 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1184 }
1185
1186 /* Advance DFA state STATE on one cycle. */
1187 void
1188 advance_state (state_t state)
1189 {
1190 if (targetm.sched.dfa_pre_advance_cycle)
1191 targetm.sched.dfa_pre_advance_cycle ();
1192
1193 if (targetm.sched.dfa_pre_cycle_insn)
1194 state_transition (state,
1195 targetm.sched.dfa_pre_cycle_insn ());
1196
1197 state_transition (state, NULL);
1198
1199 if (targetm.sched.dfa_post_cycle_insn)
1200 state_transition (state,
1201 targetm.sched.dfa_post_cycle_insn ());
1202
1203 if (targetm.sched.dfa_post_advance_cycle)
1204 targetm.sched.dfa_post_advance_cycle ();
1205 }
1206
1207 /* Advance time on one cycle. */
1208 HAIFA_INLINE static void
1209 advance_one_cycle (void)
1210 {
1211 advance_state (curr_state);
1212 if (sched_verbose >= 6)
1213 fprintf (sched_dump, "\n;;\tAdvanced a state.\n");
1214 }
1215
1216 /* Clock at which the previous instruction was issued. */
1217 static int last_clock_var;
1218
1219 /* INSN is the "currently executing insn". Launch each insn which was
1220 waiting on INSN. READY is the ready list which contains the insns
1221 that are ready to fire. CLOCK is the current cycle. The function
1222 returns necessary cycle advance after issuing the insn (it is not
1223 zero for insns in a schedule group). */
1224
1225 static int
1226 schedule_insn (rtx insn)
1227 {
1228 sd_iterator_def sd_it;
1229 dep_t dep;
1230 int advance = 0;
1231
1232 if (sched_verbose >= 1)
1233 {
1234 char buf[2048];
1235
1236 print_insn (buf, insn, 0);
1237 buf[40] = 0;
1238 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1239
1240 if (recog_memoized (insn) < 0)
1241 fprintf (sched_dump, "nothing");
1242 else
1243 print_reservation (sched_dump, insn);
1244 fputc ('\n', sched_dump);
1245 }
1246
1247 /* Scheduling instruction should have all its dependencies resolved and
1248 should have been removed from the ready list. */
1249 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1250
1251 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1252 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1253
1254 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1255 if (INSN_TICK (insn) > clock_var)
1256 /* INSN has been prematurely moved from the queue to the ready list.
1257 This is possible only if following flag is set. */
1258 gcc_assert (flag_sched_stalled_insns);
1259
1260 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1261 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1262 INSN_TICK (insn) = clock_var;
1263
1264 /* Update dependent instructions. */
1265 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1266 sd_iterator_cond (&sd_it, &dep);)
1267 {
1268 rtx next = DEP_CON (dep);
1269
1270 /* Resolve the dependence between INSN and NEXT.
1271 sd_resolve_dep () moves current dep to another list thus
1272 advancing the iterator. */
1273 sd_resolve_dep (sd_it);
1274
1275 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1276 {
1277 int effective_cost;
1278
1279 effective_cost = try_ready (next);
1280
1281 if (effective_cost >= 0
1282 && SCHED_GROUP_P (next)
1283 && advance < effective_cost)
1284 advance = effective_cost;
1285 }
1286 else
1287 /* Check always has only one forward dependence (to the first insn in
1288 the recovery block), therefore, this will be executed only once. */
1289 {
1290 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1291 fix_recovery_deps (RECOVERY_BLOCK (insn));
1292 }
1293 }
1294
1295 /* This is the place where scheduler doesn't *basically* need backward and
1296 forward dependencies for INSN anymore. Nevertheless they are used in
1297 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1298 some targets (e.g. rs6000). Thus the earliest place where we *can*
1299 remove dependencies is after targetm.sched.md_finish () call in
1300 schedule_block (). But, on the other side, the safest place to remove
1301 dependencies is when we are finishing scheduling entire region. As we
1302 don't generate [many] dependencies during scheduling itself, we won't
1303 need memory until beginning of next region.
1304 Bottom line: Dependencies are removed for all insns in the end of
1305 scheduling the region. */
1306
1307 /* Annotate the instruction with issue information -- TImode
1308 indicates that the instruction is expected not to be able
1309 to issue on the same cycle as the previous insn. A machine
1310 may use this information to decide how the instruction should
1311 be aligned. */
1312 if (issue_rate > 1
1313 && GET_CODE (PATTERN (insn)) != USE
1314 && GET_CODE (PATTERN (insn)) != CLOBBER)
1315 {
1316 if (reload_completed)
1317 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1318 last_clock_var = clock_var;
1319 }
1320
1321 return advance;
1322 }
1323
1324 /* Functions for handling of notes. */
1325
1326 /* Insert the INSN note at the end of the notes list. */
1327 static void
1328 add_to_note_list (rtx insn, rtx *note_list_end_p)
1329 {
1330 PREV_INSN (insn) = *note_list_end_p;
1331 if (*note_list_end_p)
1332 NEXT_INSN (*note_list_end_p) = insn;
1333 *note_list_end_p = insn;
1334 }
1335
1336 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1337 void
1338 concat_note_lists (rtx from_end, rtx *to_endp)
1339 {
1340 rtx from_start;
1341
1342 if (from_end == NULL)
1343 /* It's easy when have nothing to concat. */
1344 return;
1345
1346 if (*to_endp == NULL)
1347 /* It's also easy when destination is empty. */
1348 {
1349 *to_endp = from_end;
1350 return;
1351 }
1352
1353 from_start = from_end;
1354 /* A note list should be traversed via PREV_INSN. */
1355 while (PREV_INSN (from_start) != NULL)
1356 from_start = PREV_INSN (from_start);
1357
1358 add_to_note_list (from_start, to_endp);
1359 *to_endp = from_end;
1360 }
1361
1362 /* Delete notes beginning with INSN and put them in the chain
1363 of notes ended by NOTE_LIST.
1364 Returns the insn following the notes. */
1365 static rtx
1366 unlink_other_notes (rtx insn, rtx tail)
1367 {
1368 rtx prev = PREV_INSN (insn);
1369
1370 while (insn != tail && NOTE_NOT_BB_P (insn))
1371 {
1372 rtx next = NEXT_INSN (insn);
1373 basic_block bb = BLOCK_FOR_INSN (insn);
1374
1375 /* Delete the note from its current position. */
1376 if (prev)
1377 NEXT_INSN (prev) = next;
1378 if (next)
1379 PREV_INSN (next) = prev;
1380
1381 if (bb)
1382 {
1383 /* Basic block can begin with either LABEL or
1384 NOTE_INSN_BASIC_BLOCK. */
1385 gcc_assert (BB_HEAD (bb) != insn);
1386
1387 /* Check if we are removing last insn in the BB. */
1388 if (BB_END (bb) == insn)
1389 BB_END (bb) = prev;
1390 }
1391
1392 /* See sched_analyze to see how these are handled. */
1393 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1394 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1395 add_to_note_list (insn, &note_list);
1396
1397 insn = next;
1398 }
1399
1400 if (insn == tail)
1401 {
1402 gcc_assert (sel_sched_p ());
1403 return prev;
1404 }
1405
1406 return insn;
1407 }
1408
1409 /* Return the head and tail pointers of ebb starting at BEG and ending
1410 at END. */
1411 void
1412 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1413 {
1414 rtx beg_head = BB_HEAD (beg);
1415 rtx beg_tail = BB_END (beg);
1416 rtx end_head = BB_HEAD (end);
1417 rtx end_tail = BB_END (end);
1418
1419 /* Don't include any notes or labels at the beginning of the BEG
1420 basic block, or notes at the end of the END basic blocks. */
1421
1422 if (LABEL_P (beg_head))
1423 beg_head = NEXT_INSN (beg_head);
1424
1425 while (beg_head != beg_tail)
1426 if (NOTE_P (beg_head))
1427 beg_head = NEXT_INSN (beg_head);
1428 else
1429 break;
1430
1431 *headp = beg_head;
1432
1433 if (beg == end)
1434 end_head = beg_head;
1435 else if (LABEL_P (end_head))
1436 end_head = NEXT_INSN (end_head);
1437
1438 while (end_head != end_tail)
1439 if (NOTE_P (end_tail))
1440 end_tail = PREV_INSN (end_tail);
1441 else
1442 break;
1443
1444 *tailp = end_tail;
1445 }
1446
1447 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1448
1449 int
1450 no_real_insns_p (const_rtx head, const_rtx tail)
1451 {
1452 while (head != NEXT_INSN (tail))
1453 {
1454 if (!NOTE_P (head) && !LABEL_P (head))
1455 return 0;
1456 head = NEXT_INSN (head);
1457 }
1458 return 1;
1459 }
1460
1461 /* Delete notes between HEAD and TAIL and put them in the chain
1462 of notes ended by NOTE_LIST. */
1463 static void
1464 rm_other_notes (rtx head, rtx tail)
1465 {
1466 rtx next_tail;
1467 rtx insn;
1468
1469 note_list = 0;
1470 if (head == tail && (! INSN_P (head)))
1471 return;
1472
1473 next_tail = NEXT_INSN (tail);
1474 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1475 {
1476 rtx prev;
1477
1478 /* Farm out notes, and maybe save them in NOTE_LIST.
1479 This is needed to keep the debugger from
1480 getting completely deranged. */
1481 if (NOTE_NOT_BB_P (insn))
1482 {
1483 prev = insn;
1484 insn = unlink_other_notes (insn, next_tail);
1485
1486 gcc_assert ((sel_sched_p ()
1487 || prev != tail) && prev != head && insn != next_tail);
1488 }
1489 }
1490 }
1491
1492 /* Same as above, but also process REG_SAVE_NOTEs of HEAD. */
1493 void
1494 remove_notes (rtx head, rtx tail)
1495 {
1496 /* rm_other_notes only removes notes which are _inside_ the
1497 block---that is, it won't remove notes before the first real insn
1498 or after the last real insn of the block. So if the first insn
1499 has a REG_SAVE_NOTE which would otherwise be emitted before the
1500 insn, it is redundant with the note before the start of the
1501 block, and so we have to take it out. */
1502 if (INSN_P (head))
1503 {
1504 rtx note;
1505
1506 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
1507 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1508 remove_note (head, note);
1509 }
1510
1511 /* Remove remaining note insns from the block, save them in
1512 note_list. These notes are restored at the end of
1513 schedule_block (). */
1514 rm_other_notes (head, tail);
1515 }
1516
1517 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1518 previously found among the insns. Insert them just before HEAD. */
1519 rtx
1520 restore_other_notes (rtx head, basic_block head_bb)
1521 {
1522 if (note_list != 0)
1523 {
1524 rtx note_head = note_list;
1525
1526 if (head)
1527 head_bb = BLOCK_FOR_INSN (head);
1528 else
1529 head = NEXT_INSN (bb_note (head_bb));
1530
1531 while (PREV_INSN (note_head))
1532 {
1533 set_block_for_insn (note_head, head_bb);
1534 note_head = PREV_INSN (note_head);
1535 }
1536 /* In the above cycle we've missed this note. */
1537 set_block_for_insn (note_head, head_bb);
1538
1539 PREV_INSN (note_head) = PREV_INSN (head);
1540 NEXT_INSN (PREV_INSN (head)) = note_head;
1541 PREV_INSN (head) = note_list;
1542 NEXT_INSN (note_list) = head;
1543
1544 if (BLOCK_FOR_INSN (head) != head_bb)
1545 BB_END (head_bb) = note_list;
1546
1547 head = note_head;
1548 }
1549
1550 return head;
1551 }
1552
1553 /* Functions for computation of registers live/usage info. */
1554
1555 /* This function looks for a new register being defined.
1556 If the destination register is already used by the source,
1557 a new register is not needed. */
1558 static int
1559 find_set_reg_weight (const_rtx x)
1560 {
1561 if (GET_CODE (x) == CLOBBER
1562 && register_operand (SET_DEST (x), VOIDmode))
1563 return 1;
1564 if (GET_CODE (x) == SET
1565 && register_operand (SET_DEST (x), VOIDmode))
1566 {
1567 if (REG_P (SET_DEST (x)))
1568 {
1569 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1570 return 1;
1571 else
1572 return 0;
1573 }
1574 return 1;
1575 }
1576 return 0;
1577 }
1578
1579 /* Calculate INSN_REG_WEIGHT for INSN. */
1580 static void
1581 find_insn_reg_weight (const_rtx insn)
1582 {
1583 int reg_weight = 0;
1584 rtx x;
1585
1586 /* Handle register life information. */
1587 if (! INSN_P (insn))
1588 return;
1589
1590 /* Increment weight for each register born here. */
1591 x = PATTERN (insn);
1592 reg_weight += find_set_reg_weight (x);
1593 if (GET_CODE (x) == PARALLEL)
1594 {
1595 int j;
1596 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1597 {
1598 x = XVECEXP (PATTERN (insn), 0, j);
1599 reg_weight += find_set_reg_weight (x);
1600 }
1601 }
1602 /* Decrement weight for each register that dies here. */
1603 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1604 {
1605 if (REG_NOTE_KIND (x) == REG_DEAD
1606 || REG_NOTE_KIND (x) == REG_UNUSED)
1607 reg_weight--;
1608 }
1609
1610 INSN_REG_WEIGHT (insn) = reg_weight;
1611 }
1612
1613 /* Move insns that became ready to fire from queue to ready list. */
1614
1615 static void
1616 queue_to_ready (struct ready_list *ready)
1617 {
1618 rtx insn;
1619 rtx link;
1620 rtx skip_insn;
1621
1622 q_ptr = NEXT_Q (q_ptr);
1623
1624 if (dbg_cnt (sched_insn) == false)
1625 /* If debug counter is activated do not requeue insn next after
1626 last_scheduled_insn. */
1627 skip_insn = next_nonnote_insn (last_scheduled_insn);
1628 else
1629 skip_insn = NULL_RTX;
1630
1631 /* Add all pending insns that can be scheduled without stalls to the
1632 ready list. */
1633 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1634 {
1635 insn = XEXP (link, 0);
1636 q_size -= 1;
1637
1638 if (sched_verbose >= 2)
1639 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1640 (*current_sched_info->print_insn) (insn, 0));
1641
1642 /* If the ready list is full, delay the insn for 1 cycle.
1643 See the comment in schedule_block for the rationale. */
1644 if (!reload_completed
1645 && ready->n_ready > MAX_SCHED_READY_INSNS
1646 && !SCHED_GROUP_P (insn)
1647 && insn != skip_insn)
1648 {
1649 if (sched_verbose >= 2)
1650 fprintf (sched_dump, "requeued because ready full\n");
1651 queue_insn (insn, 1);
1652 }
1653 else
1654 {
1655 ready_add (ready, insn, false);
1656 if (sched_verbose >= 2)
1657 fprintf (sched_dump, "moving to ready without stalls\n");
1658 }
1659 }
1660 free_INSN_LIST_list (&insn_queue[q_ptr]);
1661
1662 /* If there are no ready insns, stall until one is ready and add all
1663 of the pending insns at that point to the ready list. */
1664 if (ready->n_ready == 0)
1665 {
1666 int stalls;
1667
1668 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1669 {
1670 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1671 {
1672 for (; link; link = XEXP (link, 1))
1673 {
1674 insn = XEXP (link, 0);
1675 q_size -= 1;
1676
1677 if (sched_verbose >= 2)
1678 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1679 (*current_sched_info->print_insn) (insn, 0));
1680
1681 ready_add (ready, insn, false);
1682 if (sched_verbose >= 2)
1683 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1684 }
1685 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1686
1687 advance_one_cycle ();
1688
1689 break;
1690 }
1691
1692 advance_one_cycle ();
1693 }
1694
1695 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1696 clock_var += stalls;
1697 }
1698 }
1699
1700 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1701 prematurely move INSN from the queue to the ready list. Currently,
1702 if a target defines the hook 'is_costly_dependence', this function
1703 uses the hook to check whether there exist any dependences which are
1704 considered costly by the target, between INSN and other insns that
1705 have already been scheduled. Dependences are checked up to Y cycles
1706 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1707 controlling this value.
1708 (Other considerations could be taken into account instead (or in
1709 addition) depending on user flags and target hooks. */
1710
1711 static bool
1712 ok_for_early_queue_removal (rtx insn)
1713 {
1714 int n_cycles;
1715 rtx prev_insn = last_scheduled_insn;
1716
1717 if (targetm.sched.is_costly_dependence)
1718 {
1719 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1720 {
1721 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1722 {
1723 int cost;
1724
1725 if (prev_insn == current_sched_info->prev_head)
1726 {
1727 prev_insn = NULL;
1728 break;
1729 }
1730
1731 if (!NOTE_P (prev_insn))
1732 {
1733 dep_t dep;
1734
1735 dep = sd_find_dep_between (prev_insn, insn, true);
1736
1737 if (dep != NULL)
1738 {
1739 cost = dep_cost (dep);
1740
1741 if (targetm.sched.is_costly_dependence (dep, cost,
1742 flag_sched_stalled_insns_dep - n_cycles))
1743 return false;
1744 }
1745 }
1746
1747 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1748 break;
1749 }
1750
1751 if (!prev_insn)
1752 break;
1753 prev_insn = PREV_INSN (prev_insn);
1754 }
1755 }
1756
1757 return true;
1758 }
1759
1760
1761 /* Remove insns from the queue, before they become "ready" with respect
1762 to FU latency considerations. */
1763
1764 static int
1765 early_queue_to_ready (state_t state, struct ready_list *ready)
1766 {
1767 rtx insn;
1768 rtx link;
1769 rtx next_link;
1770 rtx prev_link;
1771 bool move_to_ready;
1772 int cost;
1773 state_t temp_state = alloca (dfa_state_size);
1774 int stalls;
1775 int insns_removed = 0;
1776
1777 /*
1778 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1779 function:
1780
1781 X == 0: There is no limit on how many queued insns can be removed
1782 prematurely. (flag_sched_stalled_insns = -1).
1783
1784 X >= 1: Only X queued insns can be removed prematurely in each
1785 invocation. (flag_sched_stalled_insns = X).
1786
1787 Otherwise: Early queue removal is disabled.
1788 (flag_sched_stalled_insns = 0)
1789 */
1790
1791 if (! flag_sched_stalled_insns)
1792 return 0;
1793
1794 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1795 {
1796 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1797 {
1798 if (sched_verbose > 6)
1799 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1800
1801 prev_link = 0;
1802 while (link)
1803 {
1804 next_link = XEXP (link, 1);
1805 insn = XEXP (link, 0);
1806 if (insn && sched_verbose > 6)
1807 print_rtl_single (sched_dump, insn);
1808
1809 memcpy (temp_state, state, dfa_state_size);
1810 if (recog_memoized (insn) < 0)
1811 /* non-negative to indicate that it's not ready
1812 to avoid infinite Q->R->Q->R... */
1813 cost = 0;
1814 else
1815 cost = state_transition (temp_state, insn);
1816
1817 if (sched_verbose >= 6)
1818 fprintf (sched_dump, "transition cost = %d\n", cost);
1819
1820 move_to_ready = false;
1821 if (cost < 0)
1822 {
1823 move_to_ready = ok_for_early_queue_removal (insn);
1824 if (move_to_ready == true)
1825 {
1826 /* move from Q to R */
1827 q_size -= 1;
1828 ready_add (ready, insn, false);
1829
1830 if (prev_link)
1831 XEXP (prev_link, 1) = next_link;
1832 else
1833 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1834
1835 free_INSN_LIST_node (link);
1836
1837 if (sched_verbose >= 2)
1838 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1839 (*current_sched_info->print_insn) (insn, 0));
1840
1841 insns_removed++;
1842 if (insns_removed == flag_sched_stalled_insns)
1843 /* Remove no more than flag_sched_stalled_insns insns
1844 from Q at a time. */
1845 return insns_removed;
1846 }
1847 }
1848
1849 if (move_to_ready == false)
1850 prev_link = link;
1851
1852 link = next_link;
1853 } /* while link */
1854 } /* if link */
1855
1856 } /* for stalls.. */
1857
1858 return insns_removed;
1859 }
1860
1861
1862 /* Print the ready list for debugging purposes. Callable from debugger. */
1863
1864 static void
1865 debug_ready_list (struct ready_list *ready)
1866 {
1867 rtx *p;
1868 int i;
1869
1870 if (ready->n_ready == 0)
1871 {
1872 fprintf (sched_dump, "\n");
1873 return;
1874 }
1875
1876 p = ready_lastpos (ready);
1877 for (i = 0; i < ready->n_ready; i++)
1878 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1879 fprintf (sched_dump, "\n");
1880 }
1881
1882 /* Search INSN for REG_SAVE_NOTE note pairs for
1883 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1884 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1885 saved value for NOTE_BLOCK_NUMBER which is useful for
1886 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1887 void
1888 reemit_notes (rtx insn)
1889 {
1890 rtx note, last = insn;
1891
1892 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1893 {
1894 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1895 {
1896 enum insn_note note_type = INTVAL (XEXP (note, 0));
1897
1898 last = emit_note_before (note_type, last);
1899 remove_note (insn, note);
1900 }
1901 }
1902 }
1903
1904 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1905 static void
1906 move_insn (rtx insn, rtx last, rtx nt)
1907 {
1908 if (PREV_INSN (insn) != last)
1909 {
1910 basic_block bb;
1911 rtx note;
1912 int jump_p = 0;
1913
1914 bb = BLOCK_FOR_INSN (insn);
1915
1916 /* BB_HEAD is either LABEL or NOTE. */
1917 gcc_assert (BB_HEAD (bb) != insn);
1918
1919 if (BB_END (bb) == insn)
1920 /* If this is last instruction in BB, move end marker one
1921 instruction up. */
1922 {
1923 /* Jumps are always placed at the end of basic block. */
1924 jump_p = control_flow_insn_p (insn);
1925
1926 gcc_assert (!jump_p
1927 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
1928 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1929 || (common_sched_info->sched_pass_id
1930 == SCHED_EBB_PASS));
1931
1932 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1933
1934 BB_END (bb) = PREV_INSN (insn);
1935 }
1936
1937 gcc_assert (BB_END (bb) != last);
1938
1939 if (jump_p)
1940 /* We move the block note along with jump. */
1941 {
1942 gcc_assert (nt);
1943
1944 note = NEXT_INSN (insn);
1945 while (NOTE_NOT_BB_P (note) && note != nt)
1946 note = NEXT_INSN (note);
1947
1948 if (note != nt
1949 && (LABEL_P (note)
1950 || BARRIER_P (note)))
1951 note = NEXT_INSN (note);
1952
1953 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1954 }
1955 else
1956 note = insn;
1957
1958 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1959 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1960
1961 NEXT_INSN (note) = NEXT_INSN (last);
1962 PREV_INSN (NEXT_INSN (last)) = note;
1963
1964 NEXT_INSN (last) = insn;
1965 PREV_INSN (insn) = last;
1966
1967 bb = BLOCK_FOR_INSN (last);
1968
1969 if (jump_p)
1970 {
1971 fix_jump_move (insn);
1972
1973 if (BLOCK_FOR_INSN (insn) != bb)
1974 move_block_after_check (insn);
1975
1976 gcc_assert (BB_END (bb) == last);
1977 }
1978
1979 df_insn_change_bb (insn, bb);
1980
1981 /* Update BB_END, if needed. */
1982 if (BB_END (bb) == last)
1983 BB_END (bb) = insn;
1984 }
1985
1986 SCHED_GROUP_P (insn) = 0;
1987 }
1988
1989 /* The following structure describe an entry of the stack of choices. */
1990 struct choice_entry
1991 {
1992 /* Ordinal number of the issued insn in the ready queue. */
1993 int index;
1994 /* The number of the rest insns whose issues we should try. */
1995 int rest;
1996 /* The number of issued essential insns. */
1997 int n;
1998 /* State after issuing the insn. */
1999 state_t state;
2000 };
2001
2002 /* The following array is used to implement a stack of choices used in
2003 function max_issue. */
2004 static struct choice_entry *choice_stack;
2005
2006 /* The following variable value is number of essential insns issued on
2007 the current cycle. An insn is essential one if it changes the
2008 processors state. */
2009 int cycle_issued_insns;
2010
2011 /* This holds the value of the target dfa_lookahead hook. */
2012 int dfa_lookahead;
2013
2014 /* The following variable value is maximal number of tries of issuing
2015 insns for the first cycle multipass insn scheduling. We define
2016 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2017 need this constraint if all real insns (with non-negative codes)
2018 had reservations because in this case the algorithm complexity is
2019 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2020 might be incomplete and such insn might occur. For such
2021 descriptions, the complexity of algorithm (without the constraint)
2022 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2023 static int max_lookahead_tries;
2024
2025 /* The following value is value of hook
2026 `first_cycle_multipass_dfa_lookahead' at the last call of
2027 `max_issue'. */
2028 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2029
2030 /* The following value is value of `issue_rate' at the last call of
2031 `sched_init'. */
2032 static int cached_issue_rate = 0;
2033
2034 /* The following function returns maximal (or close to maximal) number
2035 of insns which can be issued on the same cycle and one of which
2036 insns is insns with the best rank (the first insn in READY). To
2037 make this function tries different samples of ready insns. READY
2038 is current queue `ready'. Global array READY_TRY reflects what
2039 insns are already issued in this try. MAX_POINTS is the sum of points
2040 of all instructions in READY. The function stops immediately,
2041 if it reached the such a solution, that all instruction can be issued.
2042 INDEX will contain index of the best insn in READY. The following
2043 function is used only for first cycle multipass scheduling.
2044
2045 PRIVILEGED_N >= 0
2046
2047 This function expects recognized insns only. All USEs,
2048 CLOBBERs, etc must be filtered elsewhere. */
2049 int
2050 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2051 int *index)
2052 {
2053 int n, i, all, n_ready, best, delay, tries_num, points = -1, max_points;
2054 int more_issue;
2055 struct choice_entry *top;
2056 rtx insn;
2057
2058 n_ready = ready->n_ready;
2059 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2060 && privileged_n <= n_ready);
2061
2062 /* Init MAX_LOOKAHEAD_TRIES. */
2063 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2064 {
2065 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2066 max_lookahead_tries = 100;
2067 for (i = 0; i < issue_rate; i++)
2068 max_lookahead_tries *= dfa_lookahead;
2069 }
2070
2071 /* Init max_points. */
2072 max_points = 0;
2073 more_issue = issue_rate - cycle_issued_insns;
2074 gcc_assert (more_issue >= 0);
2075
2076 for (i = 0; i < n_ready; i++)
2077 if (!ready_try [i])
2078 {
2079 if (more_issue-- > 0)
2080 max_points += ISSUE_POINTS (ready_element (ready, i));
2081 else
2082 break;
2083 }
2084
2085 /* The number of the issued insns in the best solution. */
2086 best = 0;
2087
2088 top = choice_stack;
2089
2090 /* Set initial state of the search. */
2091 memcpy (top->state, state, dfa_state_size);
2092 top->rest = dfa_lookahead;
2093 top->n = 0;
2094
2095 /* Count the number of the insns to search among. */
2096 for (all = i = 0; i < n_ready; i++)
2097 if (!ready_try [i])
2098 all++;
2099
2100 /* I is the index of the insn to try next. */
2101 i = 0;
2102 tries_num = 0;
2103 for (;;)
2104 {
2105 if (/* If we've reached a dead end or searched enough of what we have
2106 been asked... */
2107 top->rest == 0
2108 /* Or have nothing else to try. */
2109 || i >= n_ready)
2110 {
2111 /* ??? (... || i == n_ready). */
2112 gcc_assert (i <= n_ready);
2113
2114 if (top == choice_stack)
2115 break;
2116
2117 if (best < top - choice_stack)
2118 {
2119 if (privileged_n)
2120 {
2121 n = privileged_n;
2122 /* Try to find issued privileged insn. */
2123 while (n && !ready_try[--n]);
2124 }
2125
2126 if (/* If all insns are equally good... */
2127 privileged_n == 0
2128 /* Or a privileged insn will be issued. */
2129 || ready_try[n])
2130 /* Then we have a solution. */
2131 {
2132 best = top - choice_stack;
2133 /* This is the index of the insn issued first in this
2134 solution. */
2135 *index = choice_stack [1].index;
2136 points = top->n;
2137 if (top->n == max_points || best == all)
2138 break;
2139 }
2140 }
2141
2142 /* Set ready-list index to point to the last insn
2143 ('i++' below will advance it to the next insn). */
2144 i = top->index;
2145
2146 /* Backtrack. */
2147 ready_try [i] = 0;
2148 top--;
2149 memcpy (state, top->state, dfa_state_size);
2150 }
2151 else if (!ready_try [i])
2152 {
2153 tries_num++;
2154 if (tries_num > max_lookahead_tries)
2155 break;
2156 insn = ready_element (ready, i);
2157 delay = state_transition (state, insn);
2158 if (delay < 0)
2159 {
2160 if (state_dead_lock_p (state))
2161 top->rest = 0;
2162 else
2163 top->rest--;
2164
2165 n = top->n;
2166 if (memcmp (top->state, state, dfa_state_size) != 0)
2167 n += ISSUE_POINTS (insn);
2168
2169 /* Advance to the next choice_entry. */
2170 top++;
2171 /* Initialize it. */
2172 top->rest = dfa_lookahead;
2173 top->index = i;
2174 top->n = n;
2175 memcpy (top->state, state, dfa_state_size);
2176
2177 ready_try [i] = 1;
2178 i = -1;
2179 }
2180 }
2181
2182 /* Increase ready-list index. */
2183 i++;
2184 }
2185
2186 /* Restore the original state of the DFA. */
2187 memcpy (state, choice_stack->state, dfa_state_size);
2188
2189 return best;
2190 }
2191
2192 /* The following function chooses insn from READY and modifies
2193 READY. The following function is used only for first
2194 cycle multipass scheduling.
2195 Return:
2196 -1 if cycle should be advanced,
2197 0 if INSN_PTR is set to point to the desirable insn,
2198 1 if choose_ready () should be restarted without advancing the cycle. */
2199 static int
2200 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2201 {
2202 int lookahead;
2203
2204 if (dbg_cnt (sched_insn) == false)
2205 {
2206 rtx insn;
2207
2208 insn = next_nonnote_insn (last_scheduled_insn);
2209
2210 if (QUEUE_INDEX (insn) == QUEUE_READY)
2211 /* INSN is in the ready_list. */
2212 {
2213 ready_remove_insn (insn);
2214 *insn_ptr = insn;
2215 return 0;
2216 }
2217
2218 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2219 return -1;
2220 }
2221
2222 lookahead = 0;
2223
2224 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2225 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2226 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2227 {
2228 *insn_ptr = ready_remove_first (ready);
2229 return 0;
2230 }
2231 else
2232 {
2233 /* Try to choose the better insn. */
2234 int index = 0, i, n;
2235 rtx insn;
2236 int try_data = 1, try_control = 1;
2237 ds_t ts;
2238
2239 insn = ready_element (ready, 0);
2240 if (INSN_CODE (insn) < 0)
2241 {
2242 *insn_ptr = ready_remove_first (ready);
2243 return 0;
2244 }
2245
2246 if (spec_info
2247 && spec_info->flags & (PREFER_NON_DATA_SPEC
2248 | PREFER_NON_CONTROL_SPEC))
2249 {
2250 for (i = 0, n = ready->n_ready; i < n; i++)
2251 {
2252 rtx x;
2253 ds_t s;
2254
2255 x = ready_element (ready, i);
2256 s = TODO_SPEC (x);
2257
2258 if (spec_info->flags & PREFER_NON_DATA_SPEC
2259 && !(s & DATA_SPEC))
2260 {
2261 try_data = 0;
2262 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2263 || !try_control)
2264 break;
2265 }
2266
2267 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2268 && !(s & CONTROL_SPEC))
2269 {
2270 try_control = 0;
2271 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2272 break;
2273 }
2274 }
2275 }
2276
2277 ts = TODO_SPEC (insn);
2278 if ((ts & SPECULATIVE)
2279 && (((!try_data && (ts & DATA_SPEC))
2280 || (!try_control && (ts & CONTROL_SPEC)))
2281 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2282 && !targetm.sched
2283 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2284 /* Discard speculative instruction that stands first in the ready
2285 list. */
2286 {
2287 change_queue_index (insn, 1);
2288 return 1;
2289 }
2290
2291 ready_try[0] = 0;
2292
2293 for (i = 1; i < ready->n_ready; i++)
2294 {
2295 insn = ready_element (ready, i);
2296
2297 ready_try [i]
2298 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2299 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2300 }
2301
2302 /* Let the target filter the search space. */
2303 for (i = 1; i < ready->n_ready; i++)
2304 if (!ready_try[i])
2305 {
2306 insn = ready_element (ready, i);
2307
2308 gcc_assert (INSN_CODE (insn) >= 0
2309 || recog_memoized (insn) < 0);
2310
2311 ready_try [i]
2312 = (/* INSN_CODE check can be omitted here as it is also done later
2313 in max_issue (). */
2314 INSN_CODE (insn) < 0
2315 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2316 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2317 (insn)));
2318 }
2319
2320 if (max_issue (ready, 1, curr_state, &index) == 0)
2321 {
2322 if (sched_verbose >= 4)
2323 fprintf (sched_dump, ";;\t\tChosen none\n");
2324 *insn_ptr = ready_remove_first (ready);
2325 return 0;
2326 }
2327 else
2328 {
2329 if (sched_verbose >= 4)
2330 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2331 (*current_sched_info->print_insn)
2332 (ready_element (ready, index), 0));
2333
2334 *insn_ptr = ready_remove (ready, index);
2335 return 0;
2336 }
2337 }
2338 }
2339
2340 /* Use forward list scheduling to rearrange insns of block pointed to by
2341 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2342 region. */
2343
2344 void
2345 schedule_block (basic_block *target_bb)
2346 {
2347 int i, first_cycle_insn_p;
2348 int can_issue_more;
2349 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2350 int sort_p, advance, start_clock_var;
2351
2352 /* Head/tail info for this block. */
2353 rtx prev_head = current_sched_info->prev_head;
2354 rtx next_tail = current_sched_info->next_tail;
2355 rtx head = NEXT_INSN (prev_head);
2356 rtx tail = PREV_INSN (next_tail);
2357
2358 /* We used to have code to avoid getting parameters moved from hard
2359 argument registers into pseudos.
2360
2361 However, it was removed when it proved to be of marginal benefit
2362 and caused problems because schedule_block and compute_forward_dependences
2363 had different notions of what the "head" insn was. */
2364
2365 gcc_assert (head != tail || INSN_P (head));
2366
2367 haifa_recovery_bb_recently_added_p = false;
2368
2369 /* Debug info. */
2370 if (sched_verbose)
2371 dump_new_block_header (0, *target_bb, head, tail);
2372
2373 state_reset (curr_state);
2374
2375 /* Clear the ready list. */
2376 ready.first = ready.veclen - 1;
2377 ready.n_ready = 0;
2378
2379 /* It is used for first cycle multipass scheduling. */
2380 temp_state = alloca (dfa_state_size);
2381
2382 if (targetm.sched.md_init)
2383 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2384
2385 /* We start inserting insns after PREV_HEAD. */
2386 last_scheduled_insn = prev_head;
2387
2388 gcc_assert (NOTE_P (last_scheduled_insn)
2389 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2390
2391 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2392 queue. */
2393 q_ptr = 0;
2394 q_size = 0;
2395
2396 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2397 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2398
2399 /* Start just before the beginning of time. */
2400 clock_var = -1;
2401
2402 /* We need queue and ready lists and clock_var be initialized
2403 in try_ready () (which is called through init_ready_list ()). */
2404 (*current_sched_info->init_ready_list) ();
2405
2406 /* The algorithm is O(n^2) in the number of ready insns at any given
2407 time in the worst case. Before reload we are more likely to have
2408 big lists so truncate them to a reasonable size. */
2409 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2410 {
2411 ready_sort (&ready);
2412
2413 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2414 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2415 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2416 break;
2417
2418 if (sched_verbose >= 2)
2419 {
2420 fprintf (sched_dump,
2421 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2422 fprintf (sched_dump,
2423 ";;\t\t before reload => truncated to %d insns\n", i);
2424 }
2425
2426 /* Delay all insns past it for 1 cycle. If debug counter is
2427 activated make an exception for the insn right after
2428 last_scheduled_insn. */
2429 {
2430 rtx skip_insn;
2431
2432 if (dbg_cnt (sched_insn) == false)
2433 skip_insn = next_nonnote_insn (last_scheduled_insn);
2434 else
2435 skip_insn = NULL_RTX;
2436
2437 while (i < ready.n_ready)
2438 {
2439 rtx insn;
2440
2441 insn = ready_remove (&ready, i);
2442
2443 if (insn != skip_insn)
2444 queue_insn (insn, 1);
2445 }
2446 }
2447 }
2448
2449 /* Now we can restore basic block notes and maintain precise cfg. */
2450 restore_bb_notes (*target_bb);
2451
2452 last_clock_var = -1;
2453
2454 advance = 0;
2455
2456 sort_p = TRUE;
2457 /* Loop until all the insns in BB are scheduled. */
2458 while ((*current_sched_info->schedule_more_p) ())
2459 {
2460 do
2461 {
2462 start_clock_var = clock_var;
2463
2464 clock_var++;
2465
2466 advance_one_cycle ();
2467
2468 /* Add to the ready list all pending insns that can be issued now.
2469 If there are no ready insns, increment clock until one
2470 is ready and add all pending insns at that point to the ready
2471 list. */
2472 queue_to_ready (&ready);
2473
2474 gcc_assert (ready.n_ready);
2475
2476 if (sched_verbose >= 2)
2477 {
2478 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2479 debug_ready_list (&ready);
2480 }
2481 advance -= clock_var - start_clock_var;
2482 }
2483 while (advance > 0);
2484
2485 if (sort_p)
2486 {
2487 /* Sort the ready list based on priority. */
2488 ready_sort (&ready);
2489
2490 if (sched_verbose >= 2)
2491 {
2492 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2493 debug_ready_list (&ready);
2494 }
2495 }
2496
2497 /* Allow the target to reorder the list, typically for
2498 better instruction bundling. */
2499 if (sort_p && targetm.sched.reorder
2500 && (ready.n_ready == 0
2501 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2502 can_issue_more =
2503 targetm.sched.reorder (sched_dump, sched_verbose,
2504 ready_lastpos (&ready),
2505 &ready.n_ready, clock_var);
2506 else
2507 can_issue_more = issue_rate;
2508
2509 first_cycle_insn_p = 1;
2510 cycle_issued_insns = 0;
2511 for (;;)
2512 {
2513 rtx insn;
2514 int cost;
2515 bool asm_p = false;
2516
2517 if (sched_verbose >= 2)
2518 {
2519 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2520 clock_var);
2521 debug_ready_list (&ready);
2522 }
2523
2524 if (ready.n_ready == 0
2525 && can_issue_more
2526 && reload_completed)
2527 {
2528 /* Allow scheduling insns directly from the queue in case
2529 there's nothing better to do (ready list is empty) but
2530 there are still vacant dispatch slots in the current cycle. */
2531 if (sched_verbose >= 6)
2532 fprintf (sched_dump,";;\t\tSecond chance\n");
2533 memcpy (temp_state, curr_state, dfa_state_size);
2534 if (early_queue_to_ready (temp_state, &ready))
2535 ready_sort (&ready);
2536 }
2537
2538 if (ready.n_ready == 0 || !can_issue_more
2539 || state_dead_lock_p (curr_state)
2540 || !(*current_sched_info->schedule_more_p) ())
2541 break;
2542
2543 /* Select and remove the insn from the ready list. */
2544 if (sort_p)
2545 {
2546 int res;
2547
2548 insn = NULL_RTX;
2549 res = choose_ready (&ready, &insn);
2550
2551 if (res < 0)
2552 /* Finish cycle. */
2553 break;
2554 if (res > 0)
2555 /* Restart choose_ready (). */
2556 continue;
2557
2558 gcc_assert (insn != NULL_RTX);
2559 }
2560 else
2561 insn = ready_remove_first (&ready);
2562
2563 if (targetm.sched.dfa_new_cycle
2564 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2565 insn, last_clock_var,
2566 clock_var, &sort_p))
2567 /* SORT_P is used by the target to override sorting
2568 of the ready list. This is needed when the target
2569 has modified its internal structures expecting that
2570 the insn will be issued next. As we need the insn
2571 to have the highest priority (so it will be returned by
2572 the ready_remove_first call above), we invoke
2573 ready_add (&ready, insn, true).
2574 But, still, there is one issue: INSN can be later
2575 discarded by scheduler's front end through
2576 current_sched_info->can_schedule_ready_p, hence, won't
2577 be issued next. */
2578 {
2579 ready_add (&ready, insn, true);
2580 break;
2581 }
2582
2583 sort_p = TRUE;
2584 memcpy (temp_state, curr_state, dfa_state_size);
2585 if (recog_memoized (insn) < 0)
2586 {
2587 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2588 || asm_noperands (PATTERN (insn)) >= 0);
2589 if (!first_cycle_insn_p && asm_p)
2590 /* This is asm insn which is tried to be issued on the
2591 cycle not first. Issue it on the next cycle. */
2592 cost = 1;
2593 else
2594 /* A USE insn, or something else we don't need to
2595 understand. We can't pass these directly to
2596 state_transition because it will trigger a
2597 fatal error for unrecognizable insns. */
2598 cost = 0;
2599 }
2600 else
2601 {
2602 cost = state_transition (temp_state, insn);
2603 if (cost < 0)
2604 cost = 0;
2605 else if (cost == 0)
2606 cost = 1;
2607 }
2608
2609 if (cost >= 1)
2610 {
2611 queue_insn (insn, cost);
2612 if (SCHED_GROUP_P (insn))
2613 {
2614 advance = cost;
2615 break;
2616 }
2617
2618 continue;
2619 }
2620
2621 if (current_sched_info->can_schedule_ready_p
2622 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2623 /* We normally get here only if we don't want to move
2624 insn from the split block. */
2625 {
2626 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2627 continue;
2628 }
2629
2630 /* DECISION is made. */
2631
2632 if (TODO_SPEC (insn) & SPECULATIVE)
2633 generate_recovery_code (insn);
2634
2635 if (control_flow_insn_p (last_scheduled_insn)
2636 /* This is used to switch basic blocks by request
2637 from scheduler front-end (actually, sched-ebb.c only).
2638 This is used to process blocks with single fallthru
2639 edge. If succeeding block has jump, it [jump] will try
2640 move at the end of current bb, thus corrupting CFG. */
2641 || current_sched_info->advance_target_bb (*target_bb, insn))
2642 {
2643 *target_bb = current_sched_info->advance_target_bb
2644 (*target_bb, 0);
2645
2646 if (sched_verbose)
2647 {
2648 rtx x;
2649
2650 x = next_real_insn (last_scheduled_insn);
2651 gcc_assert (x);
2652 dump_new_block_header (1, *target_bb, x, tail);
2653 }
2654
2655 last_scheduled_insn = bb_note (*target_bb);
2656 }
2657
2658 /* Update counters, etc in the scheduler's front end. */
2659 (*current_sched_info->begin_schedule_ready) (insn,
2660 last_scheduled_insn);
2661
2662 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
2663 reemit_notes (insn);
2664 last_scheduled_insn = insn;
2665
2666 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2667 {
2668 cycle_issued_insns++;
2669 memcpy (curr_state, temp_state, dfa_state_size);
2670 }
2671
2672 if (targetm.sched.variable_issue)
2673 can_issue_more =
2674 targetm.sched.variable_issue (sched_dump, sched_verbose,
2675 insn, can_issue_more);
2676 /* A naked CLOBBER or USE generates no instruction, so do
2677 not count them against the issue rate. */
2678 else if (GET_CODE (PATTERN (insn)) != USE
2679 && GET_CODE (PATTERN (insn)) != CLOBBER)
2680 can_issue_more--;
2681
2682 advance = schedule_insn (insn);
2683
2684 /* After issuing an asm insn we should start a new cycle. */
2685 if (advance == 0 && asm_p)
2686 advance = 1;
2687 if (advance != 0)
2688 break;
2689
2690 first_cycle_insn_p = 0;
2691
2692 /* Sort the ready list based on priority. This must be
2693 redone here, as schedule_insn may have readied additional
2694 insns that will not be sorted correctly. */
2695 if (ready.n_ready > 0)
2696 ready_sort (&ready);
2697
2698 if (targetm.sched.reorder2
2699 && (ready.n_ready == 0
2700 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2701 {
2702 can_issue_more =
2703 targetm.sched.reorder2 (sched_dump, sched_verbose,
2704 ready.n_ready
2705 ? ready_lastpos (&ready) : NULL,
2706 &ready.n_ready, clock_var);
2707 }
2708 }
2709 }
2710
2711 /* Debug info. */
2712 if (sched_verbose)
2713 {
2714 fprintf (sched_dump, ";;\tReady list (final): ");
2715 debug_ready_list (&ready);
2716 }
2717
2718 if (current_sched_info->queue_must_finish_empty)
2719 /* Sanity check -- queue must be empty now. Meaningless if region has
2720 multiple bbs. */
2721 gcc_assert (!q_size && !ready.n_ready);
2722 else
2723 {
2724 /* We must maintain QUEUE_INDEX between blocks in region. */
2725 for (i = ready.n_ready - 1; i >= 0; i--)
2726 {
2727 rtx x;
2728
2729 x = ready_element (&ready, i);
2730 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2731 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2732 }
2733
2734 if (q_size)
2735 for (i = 0; i <= max_insn_queue_index; i++)
2736 {
2737 rtx link;
2738 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2739 {
2740 rtx x;
2741
2742 x = XEXP (link, 0);
2743 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2744 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2745 }
2746 free_INSN_LIST_list (&insn_queue[i]);
2747 }
2748 }
2749
2750 if (sched_verbose)
2751 fprintf (sched_dump, ";; total time = %d\n", clock_var);
2752
2753 if (!current_sched_info->queue_must_finish_empty
2754 || haifa_recovery_bb_recently_added_p)
2755 {
2756 /* INSN_TICK (minimum clock tick at which the insn becomes
2757 ready) may be not correct for the insn in the subsequent
2758 blocks of the region. We should use a correct value of
2759 `clock_var' or modify INSN_TICK. It is better to keep
2760 clock_var value equal to 0 at the start of a basic block.
2761 Therefore we modify INSN_TICK here. */
2762 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2763 }
2764
2765 if (targetm.sched.md_finish)
2766 {
2767 targetm.sched.md_finish (sched_dump, sched_verbose);
2768 /* Target might have added some instructions to the scheduled block
2769 in its md_finish () hook. These new insns don't have any data
2770 initialized and to identify them we extend h_i_d so that they'll
2771 get zero luids. */
2772 sched_init_luids (NULL, NULL, NULL, NULL);
2773 }
2774
2775 if (sched_verbose)
2776 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
2777 INSN_UID (head), INSN_UID (tail));
2778
2779 /* Update head/tail boundaries. */
2780 head = NEXT_INSN (prev_head);
2781 tail = last_scheduled_insn;
2782
2783 head = restore_other_notes (head, NULL);
2784
2785 current_sched_info->head = head;
2786 current_sched_info->tail = tail;
2787 }
2788 \f
2789 /* Set_priorities: compute priority of each insn in the block. */
2790
2791 int
2792 set_priorities (rtx head, rtx tail)
2793 {
2794 rtx insn;
2795 int n_insn;
2796 int sched_max_insns_priority =
2797 current_sched_info->sched_max_insns_priority;
2798 rtx prev_head;
2799
2800 if (head == tail && (! INSN_P (head)))
2801 return 0;
2802
2803 n_insn = 0;
2804
2805 prev_head = PREV_INSN (head);
2806 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2807 {
2808 if (!INSN_P (insn))
2809 continue;
2810
2811 n_insn++;
2812 (void) priority (insn);
2813
2814 gcc_assert (INSN_PRIORITY_KNOWN (insn));
2815
2816 sched_max_insns_priority = MAX (sched_max_insns_priority,
2817 INSN_PRIORITY (insn));
2818 }
2819
2820 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2821
2822 return n_insn;
2823 }
2824
2825 /* Set dump and sched_verbose for the desired debugging output. If no
2826 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2827 For -fsched-verbose=N, N>=10, print everything to stderr. */
2828 void
2829 setup_sched_dump (void)
2830 {
2831 sched_verbose = sched_verbose_param;
2832 if (sched_verbose_param == 0 && dump_file)
2833 sched_verbose = 1;
2834 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2835 ? stderr : dump_file);
2836 }
2837
2838 /* Initialize some global state for the scheduler. This function works
2839 with the common data shared between all the schedulers. It is called
2840 from the scheduler specific initialization routine. */
2841
2842 void
2843 sched_init (void)
2844 {
2845 /* Disable speculative loads in their presence if cc0 defined. */
2846 #ifdef HAVE_cc0
2847 flag_schedule_speculative_load = 0;
2848 #endif
2849
2850 /* Initialize SPEC_INFO. */
2851 if (targetm.sched.set_sched_flags)
2852 {
2853 spec_info = &spec_info_var;
2854 targetm.sched.set_sched_flags (spec_info);
2855
2856 if (spec_info->mask != 0)
2857 {
2858 spec_info->data_weakness_cutoff =
2859 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2860 spec_info->control_weakness_cutoff =
2861 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
2862 * REG_BR_PROB_BASE) / 100;
2863 }
2864 else
2865 /* So we won't read anything accidentally. */
2866 spec_info = NULL;
2867
2868 }
2869 else
2870 /* So we won't read anything accidentally. */
2871 spec_info = 0;
2872
2873 /* Initialize issue_rate. */
2874 if (targetm.sched.issue_rate)
2875 issue_rate = targetm.sched.issue_rate ();
2876 else
2877 issue_rate = 1;
2878
2879 if (cached_issue_rate != issue_rate)
2880 {
2881 cached_issue_rate = issue_rate;
2882 /* To invalidate max_lookahead_tries: */
2883 cached_first_cycle_multipass_dfa_lookahead = 0;
2884 }
2885
2886 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2887 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2888 else
2889 dfa_lookahead = 0;
2890
2891 if (targetm.sched.init_dfa_pre_cycle_insn)
2892 targetm.sched.init_dfa_pre_cycle_insn ();
2893
2894 if (targetm.sched.init_dfa_post_cycle_insn)
2895 targetm.sched.init_dfa_post_cycle_insn ();
2896
2897 dfa_start ();
2898 dfa_state_size = state_size ();
2899
2900 init_alias_analysis ();
2901
2902 df_set_flags (DF_LR_RUN_DCE);
2903 df_note_add_problem ();
2904
2905 /* More problems needed for interloop dep calculation in SMS. */
2906 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
2907 {
2908 df_rd_add_problem ();
2909 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
2910 }
2911
2912 df_analyze ();
2913
2914 /* Do not run DCE after reload, as this can kill nops inserted
2915 by bundling. */
2916 if (reload_completed)
2917 df_clear_flags (DF_LR_RUN_DCE);
2918
2919 regstat_compute_calls_crossed ();
2920
2921 if (targetm.sched.md_init_global)
2922 targetm.sched.md_init_global (sched_dump, sched_verbose,
2923 get_max_uid () + 1);
2924
2925 curr_state = xmalloc (dfa_state_size);
2926 }
2927
2928 static void haifa_init_only_bb (basic_block, basic_block);
2929
2930 /* Initialize data structures specific to the Haifa scheduler. */
2931 void
2932 haifa_sched_init (void)
2933 {
2934 setup_sched_dump ();
2935 sched_init ();
2936
2937 if (spec_info != NULL)
2938 {
2939 sched_deps_info->use_deps_list = 1;
2940 sched_deps_info->generate_spec_deps = 1;
2941 }
2942
2943 /* Initialize luids, dependency caches, target and h_i_d for the
2944 whole function. */
2945 {
2946 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
2947 basic_block bb;
2948
2949 sched_init_bbs ();
2950
2951 FOR_EACH_BB (bb)
2952 VEC_quick_push (basic_block, bbs, bb);
2953 sched_init_luids (bbs, NULL, NULL, NULL);
2954 sched_deps_init (true);
2955 sched_extend_target ();
2956 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
2957
2958 VEC_free (basic_block, heap, bbs);
2959 }
2960
2961 sched_init_only_bb = haifa_init_only_bb;
2962 sched_split_block = sched_split_block_1;
2963 sched_create_empty_bb = sched_create_empty_bb_1;
2964 haifa_recovery_bb_ever_added_p = false;
2965
2966 #ifdef ENABLE_CHECKING
2967 /* This is used preferably for finding bugs in check_cfg () itself.
2968 We must call sched_bbs_init () before check_cfg () because check_cfg ()
2969 assumes that the last insn in the last bb has a non-null successor. */
2970 check_cfg (0, 0);
2971 #endif
2972
2973 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2974 before_recovery = 0;
2975 after_recovery = 0;
2976 }
2977
2978 /* Finish work with the data specific to the Haifa scheduler. */
2979 void
2980 haifa_sched_finish (void)
2981 {
2982 sched_create_empty_bb = NULL;
2983 sched_split_block = NULL;
2984 sched_init_only_bb = NULL;
2985
2986 if (spec_info && spec_info->dump)
2987 {
2988 char c = reload_completed ? 'a' : 'b';
2989
2990 fprintf (spec_info->dump,
2991 ";; %s:\n", current_function_name ());
2992
2993 fprintf (spec_info->dump,
2994 ";; Procedure %cr-begin-data-spec motions == %d\n",
2995 c, nr_begin_data);
2996 fprintf (spec_info->dump,
2997 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2998 c, nr_be_in_data);
2999 fprintf (spec_info->dump,
3000 ";; Procedure %cr-begin-control-spec motions == %d\n",
3001 c, nr_begin_control);
3002 fprintf (spec_info->dump,
3003 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3004 c, nr_be_in_control);
3005 }
3006
3007 /* Finalize h_i_d, dependency caches, and luids for the whole
3008 function. Target will be finalized in md_global_finish (). */
3009 sched_deps_finish ();
3010 sched_finish_luids ();
3011 current_sched_info = NULL;
3012 sched_finish ();
3013 }
3014
3015 /* Free global data used during insn scheduling. This function works with
3016 the common data shared between the schedulers. */
3017
3018 void
3019 sched_finish (void)
3020 {
3021 haifa_finish_h_i_d ();
3022 free (curr_state);
3023
3024 if (targetm.sched.md_finish_global)
3025 targetm.sched.md_finish_global (sched_dump, sched_verbose);
3026
3027 end_alias_analysis ();
3028
3029 regstat_free_calls_crossed ();
3030
3031 dfa_finish ();
3032
3033 #ifdef ENABLE_CHECKING
3034 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3035 if (!reload_completed)
3036 check_cfg (0, 0);
3037 #endif
3038 }
3039
3040 /* Fix INSN_TICKs of the instructions in the current block as well as
3041 INSN_TICKs of their dependents.
3042 HEAD and TAIL are the begin and the end of the current scheduled block. */
3043 static void
3044 fix_inter_tick (rtx head, rtx tail)
3045 {
3046 /* Set of instructions with corrected INSN_TICK. */
3047 bitmap_head processed;
3048 /* ??? It is doubtful if we should assume that cycle advance happens on
3049 basic block boundaries. Basically insns that are unconditionally ready
3050 on the start of the block are more preferable then those which have
3051 a one cycle dependency over insn from the previous block. */
3052 int next_clock = clock_var + 1;
3053
3054 bitmap_initialize (&processed, 0);
3055
3056 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3057 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3058 across different blocks. */
3059 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3060 {
3061 if (INSN_P (head))
3062 {
3063 int tick;
3064 sd_iterator_def sd_it;
3065 dep_t dep;
3066
3067 tick = INSN_TICK (head);
3068 gcc_assert (tick >= MIN_TICK);
3069
3070 /* Fix INSN_TICK of instruction from just scheduled block. */
3071 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3072 {
3073 bitmap_set_bit (&processed, INSN_LUID (head));
3074 tick -= next_clock;
3075
3076 if (tick < MIN_TICK)
3077 tick = MIN_TICK;
3078
3079 INSN_TICK (head) = tick;
3080 }
3081
3082 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3083 {
3084 rtx next;
3085
3086 next = DEP_CON (dep);
3087 tick = INSN_TICK (next);
3088
3089 if (tick != INVALID_TICK
3090 /* If NEXT has its INSN_TICK calculated, fix it.
3091 If not - it will be properly calculated from
3092 scratch later in fix_tick_ready. */
3093 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3094 {
3095 bitmap_set_bit (&processed, INSN_LUID (next));
3096 tick -= next_clock;
3097
3098 if (tick < MIN_TICK)
3099 tick = MIN_TICK;
3100
3101 if (tick > INTER_TICK (next))
3102 INTER_TICK (next) = tick;
3103 else
3104 tick = INTER_TICK (next);
3105
3106 INSN_TICK (next) = tick;
3107 }
3108 }
3109 }
3110 }
3111 bitmap_clear (&processed);
3112 }
3113
3114 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3115
3116 /* Check if NEXT is ready to be added to the ready or queue list.
3117 If "yes", add it to the proper list.
3118 Returns:
3119 -1 - is not ready yet,
3120 0 - added to the ready list,
3121 0 < N - queued for N cycles. */
3122 int
3123 try_ready (rtx next)
3124 {
3125 ds_t old_ts, *ts;
3126
3127 ts = &TODO_SPEC (next);
3128 old_ts = *ts;
3129
3130 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3131 && ((old_ts & HARD_DEP)
3132 || (old_ts & SPECULATIVE)));
3133
3134 if (sd_lists_empty_p (next, SD_LIST_BACK))
3135 /* NEXT has all its dependencies resolved. */
3136 {
3137 /* Remove HARD_DEP bit from NEXT's status. */
3138 *ts &= ~HARD_DEP;
3139
3140 if (current_sched_info->flags & DO_SPECULATION)
3141 /* Remove all speculative bits from NEXT's status. */
3142 *ts &= ~SPECULATIVE;
3143 }
3144 else
3145 {
3146 /* One of the NEXT's dependencies has been resolved.
3147 Recalculate NEXT's status. */
3148
3149 *ts &= ~SPECULATIVE & ~HARD_DEP;
3150
3151 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3152 /* Now we've got NEXT with speculative deps only.
3153 1. Look at the deps to see what we have to do.
3154 2. Check if we can do 'todo'. */
3155 {
3156 sd_iterator_def sd_it;
3157 dep_t dep;
3158 bool first_p = true;
3159
3160 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3161 {
3162 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3163
3164 if (first_p)
3165 {
3166 first_p = false;
3167
3168 *ts = ds;
3169 }
3170 else
3171 *ts = ds_merge (*ts, ds);
3172 }
3173
3174 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3175 /* Too few points. */
3176 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3177 }
3178 else
3179 *ts |= HARD_DEP;
3180 }
3181
3182 if (*ts & HARD_DEP)
3183 gcc_assert (*ts == old_ts
3184 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3185 else if (current_sched_info->new_ready)
3186 *ts = current_sched_info->new_ready (next, *ts);
3187
3188 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3189 have its original pattern or changed (speculative) one. This is due
3190 to changing ebb in region scheduling.
3191 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3192 has speculative pattern.
3193
3194 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3195 control-speculative NEXT could have been discarded by sched-rgn.c
3196 (the same case as when discarded by can_schedule_ready_p ()). */
3197
3198 if ((*ts & SPECULATIVE)
3199 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3200 need to change anything. */
3201 && *ts != old_ts)
3202 {
3203 int res;
3204 rtx new_pat;
3205
3206 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3207
3208 res = haifa_speculate_insn (next, *ts, &new_pat);
3209
3210 switch (res)
3211 {
3212 case -1:
3213 /* It would be nice to change DEP_STATUS of all dependences,
3214 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3215 so we won't reanalyze anything. */
3216 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3217 break;
3218
3219 case 0:
3220 /* We follow the rule, that every speculative insn
3221 has non-null ORIG_PAT. */
3222 if (!ORIG_PAT (next))
3223 ORIG_PAT (next) = PATTERN (next);
3224 break;
3225
3226 case 1:
3227 if (!ORIG_PAT (next))
3228 /* If we gonna to overwrite the original pattern of insn,
3229 save it. */
3230 ORIG_PAT (next) = PATTERN (next);
3231
3232 haifa_change_pattern (next, new_pat);
3233 break;
3234
3235 default:
3236 gcc_unreachable ();
3237 }
3238 }
3239
3240 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3241 either correct (*ts & SPECULATIVE),
3242 or we simply don't care (*ts & HARD_DEP). */
3243
3244 gcc_assert (!ORIG_PAT (next)
3245 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3246
3247 if (*ts & HARD_DEP)
3248 {
3249 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3250 control-speculative NEXT could have been discarded by sched-rgn.c
3251 (the same case as when discarded by can_schedule_ready_p ()). */
3252 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3253
3254 change_queue_index (next, QUEUE_NOWHERE);
3255 return -1;
3256 }
3257 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3258 /* We should change pattern of every previously speculative
3259 instruction - and we determine if NEXT was speculative by using
3260 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3261 pat too, so skip them. */
3262 {
3263 haifa_change_pattern (next, ORIG_PAT (next));
3264 ORIG_PAT (next) = 0;
3265 }
3266
3267 if (sched_verbose >= 2)
3268 {
3269 int s = TODO_SPEC (next);
3270
3271 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3272 (*current_sched_info->print_insn) (next, 0));
3273
3274 if (spec_info && spec_info->dump)
3275 {
3276 if (s & BEGIN_DATA)
3277 fprintf (spec_info->dump, "; data-spec;");
3278 if (s & BEGIN_CONTROL)
3279 fprintf (spec_info->dump, "; control-spec;");
3280 if (s & BE_IN_CONTROL)
3281 fprintf (spec_info->dump, "; in-control-spec;");
3282 }
3283
3284 fprintf (sched_dump, "\n");
3285 }
3286
3287 adjust_priority (next);
3288
3289 return fix_tick_ready (next);
3290 }
3291
3292 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3293 static int
3294 fix_tick_ready (rtx next)
3295 {
3296 int tick, delay;
3297
3298 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3299 {
3300 int full_p;
3301 sd_iterator_def sd_it;
3302 dep_t dep;
3303
3304 tick = INSN_TICK (next);
3305 /* if tick is not equal to INVALID_TICK, then update
3306 INSN_TICK of NEXT with the most recent resolved dependence
3307 cost. Otherwise, recalculate from scratch. */
3308 full_p = (tick == INVALID_TICK);
3309
3310 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3311 {
3312 rtx pro = DEP_PRO (dep);
3313 int tick1;
3314
3315 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3316
3317 tick1 = INSN_TICK (pro) + dep_cost (dep);
3318 if (tick1 > tick)
3319 tick = tick1;
3320
3321 if (!full_p)
3322 break;
3323 }
3324 }
3325 else
3326 tick = -1;
3327
3328 INSN_TICK (next) = tick;
3329
3330 delay = tick - clock_var;
3331 if (delay <= 0)
3332 delay = QUEUE_READY;
3333
3334 change_queue_index (next, delay);
3335
3336 return delay;
3337 }
3338
3339 /* Move NEXT to the proper queue list with (DELAY >= 1),
3340 or add it to the ready list (DELAY == QUEUE_READY),
3341 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3342 static void
3343 change_queue_index (rtx next, int delay)
3344 {
3345 int i = QUEUE_INDEX (next);
3346
3347 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3348 && delay != 0);
3349 gcc_assert (i != QUEUE_SCHEDULED);
3350
3351 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3352 || (delay < 0 && delay == i))
3353 /* We have nothing to do. */
3354 return;
3355
3356 /* Remove NEXT from wherever it is now. */
3357 if (i == QUEUE_READY)
3358 ready_remove_insn (next);
3359 else if (i >= 0)
3360 queue_remove (next);
3361
3362 /* Add it to the proper place. */
3363 if (delay == QUEUE_READY)
3364 ready_add (readyp, next, false);
3365 else if (delay >= 1)
3366 queue_insn (next, delay);
3367
3368 if (sched_verbose >= 2)
3369 {
3370 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3371 (*current_sched_info->print_insn) (next, 0));
3372
3373 if (delay == QUEUE_READY)
3374 fprintf (sched_dump, " into ready\n");
3375 else if (delay >= 1)
3376 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3377 else
3378 fprintf (sched_dump, " removed from ready or queue lists\n");
3379 }
3380 }
3381
3382 static int sched_ready_n_insns = -1;
3383
3384 /* Initialize per region data structures. */
3385 void
3386 sched_extend_ready_list (int new_sched_ready_n_insns)
3387 {
3388 int i;
3389
3390 if (sched_ready_n_insns == -1)
3391 /* At the first call we need to initialize one more choice_stack
3392 entry. */
3393 {
3394 i = 0;
3395 sched_ready_n_insns = 0;
3396 }
3397 else
3398 i = sched_ready_n_insns + 1;
3399
3400 ready.veclen = new_sched_ready_n_insns + issue_rate;
3401 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3402
3403 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3404
3405 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3406 sched_ready_n_insns, sizeof (*ready_try));
3407
3408 /* We allocate +1 element to save initial state in the choice_stack[0]
3409 entry. */
3410 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3411 new_sched_ready_n_insns + 1);
3412
3413 for (; i <= new_sched_ready_n_insns; i++)
3414 choice_stack[i].state = xmalloc (dfa_state_size);
3415
3416 sched_ready_n_insns = new_sched_ready_n_insns;
3417 }
3418
3419 /* Free per region data structures. */
3420 void
3421 sched_finish_ready_list (void)
3422 {
3423 int i;
3424
3425 free (ready.vec);
3426 ready.vec = NULL;
3427 ready.veclen = 0;
3428
3429 free (ready_try);
3430 ready_try = NULL;
3431
3432 for (i = 0; i <= sched_ready_n_insns; i++)
3433 free (choice_stack [i].state);
3434 free (choice_stack);
3435 choice_stack = NULL;
3436
3437 sched_ready_n_insns = -1;
3438 }
3439
3440 static int
3441 haifa_luid_for_non_insn (rtx x)
3442 {
3443 gcc_assert (NOTE_P (x) || LABEL_P (x));
3444
3445 return 0;
3446 }
3447
3448 /* Generates recovery code for INSN. */
3449 static void
3450 generate_recovery_code (rtx insn)
3451 {
3452 if (TODO_SPEC (insn) & BEGIN_SPEC)
3453 begin_speculative_block (insn);
3454
3455 /* Here we have insn with no dependencies to
3456 instructions other then CHECK_SPEC ones. */
3457
3458 if (TODO_SPEC (insn) & BE_IN_SPEC)
3459 add_to_speculative_block (insn);
3460 }
3461
3462 /* Helper function.
3463 Tries to add speculative dependencies of type FS between instructions
3464 in deps_list L and TWIN. */
3465 static void
3466 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
3467 {
3468 sd_iterator_def sd_it;
3469 dep_t dep;
3470
3471 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
3472 {
3473 ds_t ds;
3474 rtx consumer;
3475
3476 consumer = DEP_CON (dep);
3477
3478 ds = DEP_STATUS (dep);
3479
3480 if (/* If we want to create speculative dep. */
3481 fs
3482 /* And we can do that because this is a true dep. */
3483 && (ds & DEP_TYPES) == DEP_TRUE)
3484 {
3485 gcc_assert (!(ds & BE_IN_SPEC));
3486
3487 if (/* If this dep can be overcome with 'begin speculation'. */
3488 ds & BEGIN_SPEC)
3489 /* Then we have a choice: keep the dep 'begin speculative'
3490 or transform it into 'be in speculative'. */
3491 {
3492 if (/* In try_ready we assert that if insn once became ready
3493 it can be removed from the ready (or queue) list only
3494 due to backend decision. Hence we can't let the
3495 probability of the speculative dep to decrease. */
3496 ds_weak (ds) <= ds_weak (fs))
3497 {
3498 ds_t new_ds;
3499
3500 new_ds = (ds & ~BEGIN_SPEC) | fs;
3501
3502 if (/* consumer can 'be in speculative'. */
3503 sched_insn_is_legitimate_for_speculation_p (consumer,
3504 new_ds))
3505 /* Transform it to be in speculative. */
3506 ds = new_ds;
3507 }
3508 }
3509 else
3510 /* Mark the dep as 'be in speculative'. */
3511 ds |= fs;
3512 }
3513
3514 {
3515 dep_def _new_dep, *new_dep = &_new_dep;
3516
3517 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
3518 sd_add_dep (new_dep, false);
3519 }
3520 }
3521 }
3522
3523 /* Generates recovery code for BEGIN speculative INSN. */
3524 static void
3525 begin_speculative_block (rtx insn)
3526 {
3527 if (TODO_SPEC (insn) & BEGIN_DATA)
3528 nr_begin_data++;
3529 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3530 nr_begin_control++;
3531
3532 create_check_block_twin (insn, false);
3533
3534 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3535 }
3536
3537 static void haifa_init_insn (rtx);
3538
3539 /* Generates recovery code for BE_IN speculative INSN. */
3540 static void
3541 add_to_speculative_block (rtx insn)
3542 {
3543 ds_t ts;
3544 sd_iterator_def sd_it;
3545 dep_t dep;
3546 rtx twins = NULL;
3547 rtx_vec_t priorities_roots;
3548
3549 ts = TODO_SPEC (insn);
3550 gcc_assert (!(ts & ~BE_IN_SPEC));
3551
3552 if (ts & BE_IN_DATA)
3553 nr_be_in_data++;
3554 if (ts & BE_IN_CONTROL)
3555 nr_be_in_control++;
3556
3557 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3558 gcc_assert (!TODO_SPEC (insn));
3559
3560 DONE_SPEC (insn) |= ts;
3561
3562 /* First we convert all simple checks to branchy. */
3563 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3564 sd_iterator_cond (&sd_it, &dep);)
3565 {
3566 rtx check = DEP_PRO (dep);
3567
3568 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3569 {
3570 create_check_block_twin (check, true);
3571
3572 /* Restart search. */
3573 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3574 }
3575 else
3576 /* Continue search. */
3577 sd_iterator_next (&sd_it);
3578 }
3579
3580 priorities_roots = NULL;
3581 clear_priorities (insn, &priorities_roots);
3582
3583 while (1)
3584 {
3585 rtx check, twin;
3586 basic_block rec;
3587
3588 /* Get the first backward dependency of INSN. */
3589 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3590 if (!sd_iterator_cond (&sd_it, &dep))
3591 /* INSN has no backward dependencies left. */
3592 break;
3593
3594 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
3595 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
3596 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
3597
3598 check = DEP_PRO (dep);
3599
3600 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3601 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3602
3603 rec = BLOCK_FOR_INSN (check);
3604
3605 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
3606 haifa_init_insn (twin);
3607
3608 sd_copy_back_deps (twin, insn, true);
3609
3610 if (sched_verbose && spec_info->dump)
3611 /* INSN_BB (insn) isn't determined for twin insns yet.
3612 So we can't use current_sched_info->print_insn. */
3613 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3614 INSN_UID (twin), rec->index);
3615
3616 twins = alloc_INSN_LIST (twin, twins);
3617
3618 /* Add dependences between TWIN and all appropriate
3619 instructions from REC. */
3620 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
3621 {
3622 rtx pro = DEP_PRO (dep);
3623
3624 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
3625
3626 /* INSN might have dependencies from the instructions from
3627 several recovery blocks. At this iteration we process those
3628 producers that reside in REC. */
3629 if (BLOCK_FOR_INSN (pro) == rec)
3630 {
3631 dep_def _new_dep, *new_dep = &_new_dep;
3632
3633 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
3634 sd_add_dep (new_dep, false);
3635 }
3636 }
3637
3638 process_insn_forw_deps_be_in_spec (insn, twin, ts);
3639
3640 /* Remove all dependencies between INSN and insns in REC. */
3641 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3642 sd_iterator_cond (&sd_it, &dep);)
3643 {
3644 rtx pro = DEP_PRO (dep);
3645
3646 if (BLOCK_FOR_INSN (pro) == rec)
3647 sd_delete_dep (sd_it);
3648 else
3649 sd_iterator_next (&sd_it);
3650 }
3651 }
3652
3653 /* We couldn't have added the dependencies between INSN and TWINS earlier
3654 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3655 while (twins)
3656 {
3657 rtx twin;
3658
3659 twin = XEXP (twins, 0);
3660
3661 {
3662 dep_def _new_dep, *new_dep = &_new_dep;
3663
3664 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
3665 sd_add_dep (new_dep, false);
3666 }
3667
3668 twin = XEXP (twins, 1);
3669 free_INSN_LIST_node (twins);
3670 twins = twin;
3671 }
3672
3673 calc_priorities (priorities_roots);
3674 VEC_free (rtx, heap, priorities_roots);
3675 }
3676
3677 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3678 void *
3679 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3680 {
3681 gcc_assert (new_nmemb >= old_nmemb);
3682 p = XRESIZEVAR (void, p, new_nmemb * size);
3683 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3684 return p;
3685 }
3686
3687 /* Helper function.
3688 Find fallthru edge from PRED. */
3689 edge
3690 find_fallthru_edge (basic_block pred)
3691 {
3692 edge e;
3693 edge_iterator ei;
3694 basic_block succ;
3695
3696 succ = pred->next_bb;
3697 gcc_assert (succ->prev_bb == pred);
3698
3699 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3700 {
3701 FOR_EACH_EDGE (e, ei, pred->succs)
3702 if (e->flags & EDGE_FALLTHRU)
3703 {
3704 gcc_assert (e->dest == succ);
3705 return e;
3706 }
3707 }
3708 else
3709 {
3710 FOR_EACH_EDGE (e, ei, succ->preds)
3711 if (e->flags & EDGE_FALLTHRU)
3712 {
3713 gcc_assert (e->src == pred);
3714 return e;
3715 }
3716 }
3717
3718 return NULL;
3719 }
3720
3721 /* Extend per basic block data structures. */
3722 static void
3723 sched_extend_bb (void)
3724 {
3725 rtx insn;
3726
3727 /* The following is done to keep current_sched_info->next_tail non null. */
3728 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
3729 if (NEXT_INSN (insn) == 0
3730 || (!NOTE_P (insn)
3731 && !LABEL_P (insn)
3732 /* Don't emit a NOTE if it would end up before a BARRIER. */
3733 && !BARRIER_P (NEXT_INSN (insn))))
3734 {
3735 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
3736 /* Make insn appear outside BB. */
3737 set_block_for_insn (note, NULL);
3738 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
3739 }
3740 }
3741
3742 /* Init per basic block data structures. */
3743 void
3744 sched_init_bbs (void)
3745 {
3746 sched_extend_bb ();
3747 }
3748
3749 /* Initialize BEFORE_RECOVERY variable. */
3750 static void
3751 init_before_recovery (basic_block *before_recovery_ptr)
3752 {
3753 basic_block last;
3754 edge e;
3755
3756 last = EXIT_BLOCK_PTR->prev_bb;
3757 e = find_fallthru_edge (last);
3758
3759 if (e)
3760 {
3761 /* We create two basic blocks:
3762 1. Single instruction block is inserted right after E->SRC
3763 and has jump to
3764 2. Empty block right before EXIT_BLOCK.
3765 Between these two blocks recovery blocks will be emitted. */
3766
3767 basic_block single, empty;
3768 rtx x, label;
3769
3770 /* If the fallthrough edge to exit we've found is from the block we've
3771 created before, don't do anything more. */
3772 if (last == after_recovery)
3773 return;
3774
3775 adding_bb_to_current_region_p = false;
3776
3777 single = sched_create_empty_bb (last);
3778 empty = sched_create_empty_bb (single);
3779
3780 /* Add new blocks to the root loop. */
3781 if (current_loops != NULL)
3782 {
3783 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
3784 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
3785 }
3786
3787 single->count = last->count;
3788 empty->count = last->count;
3789 single->frequency = last->frequency;
3790 empty->frequency = last->frequency;
3791 BB_COPY_PARTITION (single, last);
3792 BB_COPY_PARTITION (empty, last);
3793
3794 redirect_edge_succ (e, single);
3795 make_single_succ_edge (single, empty, 0);
3796 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3797 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3798
3799 label = block_label (empty);
3800 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3801 JUMP_LABEL (x) = label;
3802 LABEL_NUSES (label)++;
3803 haifa_init_insn (x);
3804
3805 emit_barrier_after (x);
3806
3807 sched_init_only_bb (empty, NULL);
3808 sched_init_only_bb (single, NULL);
3809 sched_extend_bb ();
3810
3811 adding_bb_to_current_region_p = true;
3812 before_recovery = single;
3813 after_recovery = empty;
3814
3815 if (before_recovery_ptr)
3816 *before_recovery_ptr = before_recovery;
3817
3818 if (sched_verbose >= 2 && spec_info->dump)
3819 fprintf (spec_info->dump,
3820 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3821 last->index, single->index, empty->index);
3822 }
3823 else
3824 before_recovery = last;
3825 }
3826
3827 /* Returns new recovery block. */
3828 basic_block
3829 sched_create_recovery_block (basic_block *before_recovery_ptr)
3830 {
3831 rtx label;
3832 rtx barrier;
3833 basic_block rec;
3834
3835 haifa_recovery_bb_recently_added_p = true;
3836 haifa_recovery_bb_ever_added_p = true;
3837
3838 init_before_recovery (before_recovery_ptr);
3839
3840 barrier = get_last_bb_insn (before_recovery);
3841 gcc_assert (BARRIER_P (barrier));
3842
3843 label = emit_label_after (gen_label_rtx (), barrier);
3844
3845 rec = create_basic_block (label, label, before_recovery);
3846
3847 /* A recovery block always ends with an unconditional jump. */
3848 emit_barrier_after (BB_END (rec));
3849
3850 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3851 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3852
3853 if (sched_verbose && spec_info->dump)
3854 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3855 rec->index);
3856
3857 return rec;
3858 }
3859
3860 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
3861 and emit necessary jumps. */
3862 void
3863 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
3864 basic_block second_bb)
3865 {
3866 rtx label;
3867 rtx jump;
3868 edge e;
3869 int edge_flags;
3870
3871 /* This is fixing of incoming edge. */
3872 /* ??? Which other flags should be specified? */
3873 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3874 /* Partition type is the same, if it is "unpartitioned". */
3875 edge_flags = EDGE_CROSSING;
3876 else
3877 edge_flags = 0;
3878
3879 e = make_edge (first_bb, rec, edge_flags);
3880 label = block_label (second_bb);
3881 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3882 JUMP_LABEL (jump) = label;
3883 LABEL_NUSES (label)++;
3884
3885 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3886 /* Partition type is the same, if it is "unpartitioned". */
3887 {
3888 /* Rewritten from cfgrtl.c. */
3889 if (flag_reorder_blocks_and_partition
3890 && targetm.have_named_sections)
3891 /* We don't need the same note for the check because
3892 any_condjump_p (check) == true. */
3893 {
3894 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3895 NULL_RTX,
3896 REG_NOTES (jump));
3897 }
3898 edge_flags = EDGE_CROSSING;
3899 }
3900 else
3901 edge_flags = 0;
3902
3903 make_single_succ_edge (rec, second_bb, edge_flags);
3904 }
3905
3906 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3907 INSN is a simple check, that should be converted to branchy one. */
3908 static void
3909 create_check_block_twin (rtx insn, bool mutate_p)
3910 {
3911 basic_block rec;
3912 rtx label, check, twin;
3913 ds_t fs;
3914 sd_iterator_def sd_it;
3915 dep_t dep;
3916 dep_def _new_dep, *new_dep = &_new_dep;
3917 ds_t todo_spec;
3918
3919 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
3920
3921 if (!mutate_p)
3922 todo_spec = TODO_SPEC (insn);
3923 else
3924 {
3925 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3926 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
3927
3928 todo_spec = CHECK_SPEC (insn);
3929 }
3930
3931 todo_spec &= SPECULATIVE;
3932
3933 /* Create recovery block. */
3934 if (mutate_p || targetm.sched.needs_block_p (insn))
3935 {
3936 rec = sched_create_recovery_block (NULL);
3937 label = BB_HEAD (rec);
3938 }
3939 else
3940 {
3941 rec = EXIT_BLOCK_PTR;
3942 label = NULL_RTX;
3943 }
3944
3945 /* Emit CHECK. */
3946 check = targetm.sched.gen_spec_check (insn, label, mutate_p);
3947
3948 if (rec != EXIT_BLOCK_PTR)
3949 {
3950 /* To have mem_reg alive at the beginning of second_bb,
3951 we emit check BEFORE insn, so insn after splitting
3952 insn will be at the beginning of second_bb, which will
3953 provide us with the correct life information. */
3954 check = emit_jump_insn_before (check, insn);
3955 JUMP_LABEL (check) = label;
3956 LABEL_NUSES (label)++;
3957 }
3958 else
3959 check = emit_insn_before (check, insn);
3960
3961 /* Extend data structures. */
3962 haifa_init_insn (check);
3963
3964 /* CHECK is being added to current region. Extend ready list. */
3965 gcc_assert (sched_ready_n_insns != -1);
3966 sched_extend_ready_list (sched_ready_n_insns + 1);
3967
3968 if (current_sched_info->add_remove_insn)
3969 current_sched_info->add_remove_insn (insn, 0);
3970
3971 RECOVERY_BLOCK (check) = rec;
3972
3973 if (sched_verbose && spec_info->dump)
3974 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3975 (*current_sched_info->print_insn) (check, 0));
3976
3977 gcc_assert (ORIG_PAT (insn));
3978
3979 /* Initialize TWIN (twin is a duplicate of original instruction
3980 in the recovery block). */
3981 if (rec != EXIT_BLOCK_PTR)
3982 {
3983 sd_iterator_def sd_it;
3984 dep_t dep;
3985
3986 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
3987 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
3988 {
3989 struct _dep _dep2, *dep2 = &_dep2;
3990
3991 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
3992
3993 sd_add_dep (dep2, true);
3994 }
3995
3996 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3997 haifa_init_insn (twin);
3998
3999 if (sched_verbose && spec_info->dump)
4000 /* INSN_BB (insn) isn't determined for twin insns yet.
4001 So we can't use current_sched_info->print_insn. */
4002 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4003 INSN_UID (twin), rec->index);
4004 }
4005 else
4006 {
4007 ORIG_PAT (check) = ORIG_PAT (insn);
4008 HAS_INTERNAL_DEP (check) = 1;
4009 twin = check;
4010 /* ??? We probably should change all OUTPUT dependencies to
4011 (TRUE | OUTPUT). */
4012 }
4013
4014 /* Copy all resolved back dependencies of INSN to TWIN. This will
4015 provide correct value for INSN_TICK (TWIN). */
4016 sd_copy_back_deps (twin, insn, true);
4017
4018 if (rec != EXIT_BLOCK_PTR)
4019 /* In case of branchy check, fix CFG. */
4020 {
4021 basic_block first_bb, second_bb;
4022 rtx jump;
4023
4024 first_bb = BLOCK_FOR_INSN (check);
4025 second_bb = sched_split_block (first_bb, check);
4026
4027 sched_create_recovery_edges (first_bb, rec, second_bb);
4028
4029 sched_init_only_bb (second_bb, first_bb);
4030 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4031
4032 jump = BB_END (rec);
4033 haifa_init_insn (jump);
4034 }
4035
4036 /* Move backward dependences from INSN to CHECK and
4037 move forward dependences from INSN to TWIN. */
4038
4039 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4040 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4041 {
4042 rtx pro = DEP_PRO (dep);
4043 ds_t ds;
4044
4045 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4046 check --TRUE--> producer ??? or ANTI ???
4047 twin --TRUE--> producer
4048 twin --ANTI--> check
4049
4050 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4051 check --ANTI--> producer
4052 twin --ANTI--> producer
4053 twin --ANTI--> check
4054
4055 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4056 check ~~TRUE~~> producer
4057 twin ~~TRUE~~> producer
4058 twin --ANTI--> check */
4059
4060 ds = DEP_STATUS (dep);
4061
4062 if (ds & BEGIN_SPEC)
4063 {
4064 gcc_assert (!mutate_p);
4065 ds &= ~BEGIN_SPEC;
4066 }
4067
4068 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4069 sd_add_dep (new_dep, false);
4070
4071 if (rec != EXIT_BLOCK_PTR)
4072 {
4073 DEP_CON (new_dep) = twin;
4074 sd_add_dep (new_dep, false);
4075 }
4076 }
4077
4078 /* Second, remove backward dependencies of INSN. */
4079 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4080 sd_iterator_cond (&sd_it, &dep);)
4081 {
4082 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4083 || mutate_p)
4084 /* We can delete this dep because we overcome it with
4085 BEGIN_SPECULATION. */
4086 sd_delete_dep (sd_it);
4087 else
4088 sd_iterator_next (&sd_it);
4089 }
4090
4091 /* Future Speculations. Determine what BE_IN speculations will be like. */
4092 fs = 0;
4093
4094 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4095 here. */
4096
4097 gcc_assert (!DONE_SPEC (insn));
4098
4099 if (!mutate_p)
4100 {
4101 ds_t ts = TODO_SPEC (insn);
4102
4103 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4104 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4105
4106 /* Luckiness of future speculations solely depends upon initial
4107 BEGIN speculation. */
4108 if (ts & BEGIN_DATA)
4109 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4110 if (ts & BEGIN_CONTROL)
4111 fs = set_dep_weak (fs, BE_IN_CONTROL,
4112 get_dep_weak (ts, BEGIN_CONTROL));
4113 }
4114 else
4115 CHECK_SPEC (check) = CHECK_SPEC (insn);
4116
4117 /* Future speculations: call the helper. */
4118 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4119
4120 if (rec != EXIT_BLOCK_PTR)
4121 {
4122 /* Which types of dependencies should we use here is,
4123 generally, machine-dependent question... But, for now,
4124 it is not. */
4125
4126 if (!mutate_p)
4127 {
4128 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4129 sd_add_dep (new_dep, false);
4130
4131 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4132 sd_add_dep (new_dep, false);
4133 }
4134 else
4135 {
4136 if (spec_info->dump)
4137 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4138 (*current_sched_info->print_insn) (insn, 0));
4139
4140 /* Remove all dependencies of the INSN. */
4141 {
4142 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4143 | SD_LIST_BACK
4144 | SD_LIST_RES_BACK));
4145 while (sd_iterator_cond (&sd_it, &dep))
4146 sd_delete_dep (sd_it);
4147 }
4148
4149 /* If former check (INSN) already was moved to the ready (or queue)
4150 list, add new check (CHECK) there too. */
4151 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4152 try_ready (check);
4153
4154 /* Remove old check from instruction stream and free its
4155 data. */
4156 sched_remove_insn (insn);
4157 }
4158
4159 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4160 sd_add_dep (new_dep, false);
4161 }
4162 else
4163 {
4164 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4165 sd_add_dep (new_dep, false);
4166 }
4167
4168 if (!mutate_p)
4169 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4170 because it'll be done later in add_to_speculative_block. */
4171 {
4172 rtx_vec_t priorities_roots = NULL;
4173
4174 clear_priorities (twin, &priorities_roots);
4175 calc_priorities (priorities_roots);
4176 VEC_free (rtx, heap, priorities_roots);
4177 }
4178 }
4179
4180 /* Removes dependency between instructions in the recovery block REC
4181 and usual region instructions. It keeps inner dependences so it
4182 won't be necessary to recompute them. */
4183 static void
4184 fix_recovery_deps (basic_block rec)
4185 {
4186 rtx note, insn, jump, ready_list = 0;
4187 bitmap_head in_ready;
4188 rtx link;
4189
4190 bitmap_initialize (&in_ready, 0);
4191
4192 /* NOTE - a basic block note. */
4193 note = NEXT_INSN (BB_HEAD (rec));
4194 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4195 insn = BB_END (rec);
4196 gcc_assert (JUMP_P (insn));
4197 insn = PREV_INSN (insn);
4198
4199 do
4200 {
4201 sd_iterator_def sd_it;
4202 dep_t dep;
4203
4204 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4205 sd_iterator_cond (&sd_it, &dep);)
4206 {
4207 rtx consumer = DEP_CON (dep);
4208
4209 if (BLOCK_FOR_INSN (consumer) != rec)
4210 {
4211 sd_delete_dep (sd_it);
4212
4213 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4214 {
4215 ready_list = alloc_INSN_LIST (consumer, ready_list);
4216 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4217 }
4218 }
4219 else
4220 {
4221 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4222
4223 sd_iterator_next (&sd_it);
4224 }
4225 }
4226
4227 insn = PREV_INSN (insn);
4228 }
4229 while (insn != note);
4230
4231 bitmap_clear (&in_ready);
4232
4233 /* Try to add instructions to the ready or queue list. */
4234 for (link = ready_list; link; link = XEXP (link, 1))
4235 try_ready (XEXP (link, 0));
4236 free_INSN_LIST_list (&ready_list);
4237
4238 /* Fixing jump's dependences. */
4239 insn = BB_HEAD (rec);
4240 jump = BB_END (rec);
4241
4242 gcc_assert (LABEL_P (insn));
4243 insn = NEXT_INSN (insn);
4244
4245 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4246 add_jump_dependencies (insn, jump);
4247 }
4248
4249 /* Change pattern of INSN to NEW_PAT. */
4250 void
4251 sched_change_pattern (rtx insn, rtx new_pat)
4252 {
4253 int t;
4254
4255 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4256 gcc_assert (t);
4257 dfa_clear_single_insn_cache (insn);
4258 }
4259
4260 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4261 instruction data. */
4262 static void
4263 haifa_change_pattern (rtx insn, rtx new_pat)
4264 {
4265 sched_change_pattern (insn, new_pat);
4266
4267 /* Invalidate INSN_COST, so it'll be recalculated. */
4268 INSN_COST (insn) = -1;
4269 /* Invalidate INSN_TICK, so it'll be recalculated. */
4270 INSN_TICK (insn) = INVALID_TICK;
4271 }
4272
4273 /* -1 - can't speculate,
4274 0 - for speculation with REQUEST mode it is OK to use
4275 current instruction pattern,
4276 1 - need to change pattern for *NEW_PAT to be speculative. */
4277 int
4278 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4279 {
4280 gcc_assert (current_sched_info->flags & DO_SPECULATION
4281 && (request & SPECULATIVE)
4282 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4283
4284 if ((request & spec_info->mask) != request)
4285 return -1;
4286
4287 if (request & BE_IN_SPEC
4288 && !(request & BEGIN_SPEC))
4289 return 0;
4290
4291 return targetm.sched.speculate_insn (insn, request, new_pat);
4292 }
4293
4294 static int
4295 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4296 {
4297 gcc_assert (sched_deps_info->generate_spec_deps
4298 && !IS_SPECULATION_CHECK_P (insn));
4299
4300 if (HAS_INTERNAL_DEP (insn)
4301 || SCHED_GROUP_P (insn))
4302 return -1;
4303
4304 return sched_speculate_insn (insn, request, new_pat);
4305 }
4306
4307 /* Print some information about block BB, which starts with HEAD and
4308 ends with TAIL, before scheduling it.
4309 I is zero, if scheduler is about to start with the fresh ebb. */
4310 static void
4311 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4312 {
4313 if (!i)
4314 fprintf (sched_dump,
4315 ";; ======================================================\n");
4316 else
4317 fprintf (sched_dump,
4318 ";; =====================ADVANCING TO=====================\n");
4319 fprintf (sched_dump,
4320 ";; -- basic block %d from %d to %d -- %s reload\n",
4321 bb->index, INSN_UID (head), INSN_UID (tail),
4322 (reload_completed ? "after" : "before"));
4323 fprintf (sched_dump,
4324 ";; ======================================================\n");
4325 fprintf (sched_dump, "\n");
4326 }
4327
4328 /* Unlink basic block notes and labels and saves them, so they
4329 can be easily restored. We unlink basic block notes in EBB to
4330 provide back-compatibility with the previous code, as target backends
4331 assume, that there'll be only instructions between
4332 current_sched_info->{head and tail}. We restore these notes as soon
4333 as we can.
4334 FIRST (LAST) is the first (last) basic block in the ebb.
4335 NB: In usual case (FIRST == LAST) nothing is really done. */
4336 void
4337 unlink_bb_notes (basic_block first, basic_block last)
4338 {
4339 /* We DON'T unlink basic block notes of the first block in the ebb. */
4340 if (first == last)
4341 return;
4342
4343 bb_header = XNEWVEC (rtx, last_basic_block);
4344
4345 /* Make a sentinel. */
4346 if (last->next_bb != EXIT_BLOCK_PTR)
4347 bb_header[last->next_bb->index] = 0;
4348
4349 first = first->next_bb;
4350 do
4351 {
4352 rtx prev, label, note, next;
4353
4354 label = BB_HEAD (last);
4355 if (LABEL_P (label))
4356 note = NEXT_INSN (label);
4357 else
4358 note = label;
4359 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4360
4361 prev = PREV_INSN (label);
4362 next = NEXT_INSN (note);
4363 gcc_assert (prev && next);
4364
4365 NEXT_INSN (prev) = next;
4366 PREV_INSN (next) = prev;
4367
4368 bb_header[last->index] = label;
4369
4370 if (last == first)
4371 break;
4372
4373 last = last->prev_bb;
4374 }
4375 while (1);
4376 }
4377
4378 /* Restore basic block notes.
4379 FIRST is the first basic block in the ebb. */
4380 static void
4381 restore_bb_notes (basic_block first)
4382 {
4383 if (!bb_header)
4384 return;
4385
4386 /* We DON'T unlink basic block notes of the first block in the ebb. */
4387 first = first->next_bb;
4388 /* Remember: FIRST is actually a second basic block in the ebb. */
4389
4390 while (first != EXIT_BLOCK_PTR
4391 && bb_header[first->index])
4392 {
4393 rtx prev, label, note, next;
4394
4395 label = bb_header[first->index];
4396 prev = PREV_INSN (label);
4397 next = NEXT_INSN (prev);
4398
4399 if (LABEL_P (label))
4400 note = NEXT_INSN (label);
4401 else
4402 note = label;
4403 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4404
4405 bb_header[first->index] = 0;
4406
4407 NEXT_INSN (prev) = label;
4408 NEXT_INSN (note) = next;
4409 PREV_INSN (next) = note;
4410
4411 first = first->next_bb;
4412 }
4413
4414 free (bb_header);
4415 bb_header = 0;
4416 }
4417
4418 /* Helper function.
4419 Fix CFG after both in- and inter-block movement of
4420 control_flow_insn_p JUMP. */
4421 static void
4422 fix_jump_move (rtx jump)
4423 {
4424 basic_block bb, jump_bb, jump_bb_next;
4425
4426 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4427 jump_bb = BLOCK_FOR_INSN (jump);
4428 jump_bb_next = jump_bb->next_bb;
4429
4430 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4431 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4432
4433 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4434 /* if jump_bb_next is not empty. */
4435 BB_END (jump_bb) = BB_END (jump_bb_next);
4436
4437 if (BB_END (bb) != PREV_INSN (jump))
4438 /* Then there are instruction after jump that should be placed
4439 to jump_bb_next. */
4440 BB_END (jump_bb_next) = BB_END (bb);
4441 else
4442 /* Otherwise jump_bb_next is empty. */
4443 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4444
4445 /* To make assertion in move_insn happy. */
4446 BB_END (bb) = PREV_INSN (jump);
4447
4448 update_bb_for_insn (jump_bb_next);
4449 }
4450
4451 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4452 static void
4453 move_block_after_check (rtx jump)
4454 {
4455 basic_block bb, jump_bb, jump_bb_next;
4456 VEC(edge,gc) *t;
4457
4458 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4459 jump_bb = BLOCK_FOR_INSN (jump);
4460 jump_bb_next = jump_bb->next_bb;
4461
4462 update_bb_for_insn (jump_bb);
4463
4464 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4465 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4466
4467 unlink_block (jump_bb_next);
4468 link_block (jump_bb_next, bb);
4469
4470 t = bb->succs;
4471 bb->succs = 0;
4472 move_succs (&(jump_bb->succs), bb);
4473 move_succs (&(jump_bb_next->succs), jump_bb);
4474 move_succs (&t, jump_bb_next);
4475
4476 df_mark_solutions_dirty ();
4477
4478 common_sched_info->fix_recovery_cfg
4479 (bb->index, jump_bb->index, jump_bb_next->index);
4480 }
4481
4482 /* Helper function for move_block_after_check.
4483 This functions attaches edge vector pointed to by SUCCSP to
4484 block TO. */
4485 static void
4486 move_succs (VEC(edge,gc) **succsp, basic_block to)
4487 {
4488 edge e;
4489 edge_iterator ei;
4490
4491 gcc_assert (to->succs == 0);
4492
4493 to->succs = *succsp;
4494
4495 FOR_EACH_EDGE (e, ei, to->succs)
4496 e->src = to;
4497
4498 *succsp = 0;
4499 }
4500
4501 /* Remove INSN from the instruction stream.
4502 INSN should have any dependencies. */
4503 static void
4504 sched_remove_insn (rtx insn)
4505 {
4506 sd_finish_insn (insn);
4507
4508 change_queue_index (insn, QUEUE_NOWHERE);
4509 current_sched_info->add_remove_insn (insn, 1);
4510 remove_insn (insn);
4511 }
4512
4513 /* Clear priorities of all instructions, that are forward dependent on INSN.
4514 Store in vector pointed to by ROOTS_PTR insns on which priority () should
4515 be invoked to initialize all cleared priorities. */
4516 static void
4517 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
4518 {
4519 sd_iterator_def sd_it;
4520 dep_t dep;
4521 bool insn_is_root_p = true;
4522
4523 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
4524
4525 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4526 {
4527 rtx pro = DEP_PRO (dep);
4528
4529 if (INSN_PRIORITY_STATUS (pro) >= 0
4530 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4531 {
4532 /* If DEP doesn't contribute to priority then INSN itself should
4533 be added to priority roots. */
4534 if (contributes_to_priority_p (dep))
4535 insn_is_root_p = false;
4536
4537 INSN_PRIORITY_STATUS (pro) = -1;
4538 clear_priorities (pro, roots_ptr);
4539 }
4540 }
4541
4542 if (insn_is_root_p)
4543 VEC_safe_push (rtx, heap, *roots_ptr, insn);
4544 }
4545
4546 /* Recompute priorities of instructions, whose priorities might have been
4547 changed. ROOTS is a vector of instructions whose priority computation will
4548 trigger initialization of all cleared priorities. */
4549 static void
4550 calc_priorities (rtx_vec_t roots)
4551 {
4552 int i;
4553 rtx insn;
4554
4555 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
4556 priority (insn);
4557 }
4558
4559
4560 /* Add dependences between JUMP and other instructions in the recovery
4561 block. INSN is the first insn the recovery block. */
4562 static void
4563 add_jump_dependencies (rtx insn, rtx jump)
4564 {
4565 do
4566 {
4567 insn = NEXT_INSN (insn);
4568 if (insn == jump)
4569 break;
4570
4571 if (sd_lists_empty_p (insn, SD_LIST_FORW))
4572 {
4573 dep_def _new_dep, *new_dep = &_new_dep;
4574
4575 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
4576 sd_add_dep (new_dep, false);
4577 }
4578 }
4579 while (1);
4580
4581 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
4582 }
4583
4584 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4585 rtx
4586 bb_note (basic_block bb)
4587 {
4588 rtx note;
4589
4590 note = BB_HEAD (bb);
4591 if (LABEL_P (note))
4592 note = NEXT_INSN (note);
4593
4594 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4595 return note;
4596 }
4597
4598 #ifdef ENABLE_CHECKING
4599 /* Helper function for check_cfg.
4600 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4601 its flags. */
4602 static int
4603 has_edge_p (VEC(edge,gc) *el, int type)
4604 {
4605 edge e;
4606 edge_iterator ei;
4607
4608 FOR_EACH_EDGE (e, ei, el)
4609 if (e->flags & type)
4610 return 1;
4611 return 0;
4612 }
4613
4614 /* Check few properties of CFG between HEAD and TAIL.
4615 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4616 instruction stream. */
4617 static void
4618 check_cfg (rtx head, rtx tail)
4619 {
4620 rtx next_tail;
4621 basic_block bb = 0;
4622 int not_first = 0, not_last;
4623
4624 if (head == NULL)
4625 head = get_insns ();
4626 if (tail == NULL)
4627 tail = get_last_insn ();
4628 next_tail = NEXT_INSN (tail);
4629
4630 do
4631 {
4632 not_last = head != tail;
4633
4634 if (not_first)
4635 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4636 if (not_last)
4637 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4638
4639 if (LABEL_P (head)
4640 || (NOTE_INSN_BASIC_BLOCK_P (head)
4641 && (!not_first
4642 || (not_first && !LABEL_P (PREV_INSN (head))))))
4643 {
4644 gcc_assert (bb == 0);
4645 bb = BLOCK_FOR_INSN (head);
4646 if (bb != 0)
4647 gcc_assert (BB_HEAD (bb) == head);
4648 else
4649 /* This is the case of jump table. See inside_basic_block_p (). */
4650 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4651 }
4652
4653 if (bb == 0)
4654 {
4655 gcc_assert (!inside_basic_block_p (head));
4656 head = NEXT_INSN (head);
4657 }
4658 else
4659 {
4660 gcc_assert (inside_basic_block_p (head)
4661 || NOTE_P (head));
4662 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4663
4664 if (LABEL_P (head))
4665 {
4666 head = NEXT_INSN (head);
4667 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4668 }
4669 else
4670 {
4671 if (control_flow_insn_p (head))
4672 {
4673 gcc_assert (BB_END (bb) == head);
4674
4675 if (any_uncondjump_p (head))
4676 gcc_assert (EDGE_COUNT (bb->succs) == 1
4677 && BARRIER_P (NEXT_INSN (head)));
4678 else if (any_condjump_p (head))
4679 gcc_assert (/* Usual case. */
4680 (EDGE_COUNT (bb->succs) > 1
4681 && !BARRIER_P (NEXT_INSN (head)))
4682 /* Or jump to the next instruction. */
4683 || (EDGE_COUNT (bb->succs) == 1
4684 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4685 == JUMP_LABEL (head))));
4686 }
4687 if (BB_END (bb) == head)
4688 {
4689 if (EDGE_COUNT (bb->succs) > 1)
4690 gcc_assert (control_flow_insn_p (head)
4691 || has_edge_p (bb->succs, EDGE_COMPLEX));
4692 bb = 0;
4693 }
4694
4695 head = NEXT_INSN (head);
4696 }
4697 }
4698
4699 not_first = 1;
4700 }
4701 while (head != next_tail);
4702
4703 gcc_assert (bb == 0);
4704 }
4705
4706 #endif /* ENABLE_CHECKING */
4707
4708 const struct sched_scan_info_def *sched_scan_info;
4709
4710 /* Extend per basic block data structures. */
4711 static void
4712 extend_bb (void)
4713 {
4714 if (sched_scan_info->extend_bb)
4715 sched_scan_info->extend_bb ();
4716 }
4717
4718 /* Init data for BB. */
4719 static void
4720 init_bb (basic_block bb)
4721 {
4722 if (sched_scan_info->init_bb)
4723 sched_scan_info->init_bb (bb);
4724 }
4725
4726 /* Extend per insn data structures. */
4727 static void
4728 extend_insn (void)
4729 {
4730 if (sched_scan_info->extend_insn)
4731 sched_scan_info->extend_insn ();
4732 }
4733
4734 /* Init data structures for INSN. */
4735 static void
4736 init_insn (rtx insn)
4737 {
4738 if (sched_scan_info->init_insn)
4739 sched_scan_info->init_insn (insn);
4740 }
4741
4742 /* Init all insns in BB. */
4743 static void
4744 init_insns_in_bb (basic_block bb)
4745 {
4746 rtx insn;
4747
4748 FOR_BB_INSNS (bb, insn)
4749 init_insn (insn);
4750 }
4751
4752 /* A driver function to add a set of basic blocks (BBS),
4753 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
4754 to the scheduling region. */
4755 void
4756 sched_scan (const struct sched_scan_info_def *ssi,
4757 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4758 {
4759 sched_scan_info = ssi;
4760
4761 if (bbs != NULL || bb != NULL)
4762 {
4763 extend_bb ();
4764
4765 if (bbs != NULL)
4766 {
4767 unsigned i;
4768 basic_block x;
4769
4770 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4771 init_bb (x);
4772 }
4773
4774 if (bb != NULL)
4775 init_bb (bb);
4776 }
4777
4778 extend_insn ();
4779
4780 if (bbs != NULL)
4781 {
4782 unsigned i;
4783 basic_block x;
4784
4785 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4786 init_insns_in_bb (x);
4787 }
4788
4789 if (bb != NULL)
4790 init_insns_in_bb (bb);
4791
4792 if (insns != NULL)
4793 {
4794 unsigned i;
4795 rtx x;
4796
4797 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
4798 init_insn (x);
4799 }
4800
4801 if (insn != NULL)
4802 init_insn (insn);
4803 }
4804
4805
4806 /* Extend data structures for logical insn UID. */
4807 static void
4808 luids_extend_insn (void)
4809 {
4810 int new_luids_max_uid = get_max_uid () + 1;
4811
4812 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
4813 }
4814
4815 /* Initialize LUID for INSN. */
4816 static void
4817 luids_init_insn (rtx insn)
4818 {
4819 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
4820 int luid;
4821
4822 if (i >= 0)
4823 {
4824 luid = sched_max_luid;
4825 sched_max_luid += i;
4826 }
4827 else
4828 luid = -1;
4829
4830 SET_INSN_LUID (insn, luid);
4831 }
4832
4833 /* Initialize luids for BBS, BB, INSNS and INSN.
4834 The hook common_sched_info->luid_for_non_insn () is used to determine
4835 if notes, labels, etc. need luids. */
4836 void
4837 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4838 {
4839 const struct sched_scan_info_def ssi =
4840 {
4841 NULL, /* extend_bb */
4842 NULL, /* init_bb */
4843 luids_extend_insn, /* extend_insn */
4844 luids_init_insn /* init_insn */
4845 };
4846
4847 sched_scan (&ssi, bbs, bb, insns, insn);
4848 }
4849
4850 /* Free LUIDs. */
4851 void
4852 sched_finish_luids (void)
4853 {
4854 VEC_free (int, heap, sched_luids);
4855 sched_max_luid = 1;
4856 }
4857
4858 /* Return logical uid of INSN. Helpful while debugging. */
4859 int
4860 insn_luid (rtx insn)
4861 {
4862 return INSN_LUID (insn);
4863 }
4864
4865 /* Extend per insn data in the target. */
4866 void
4867 sched_extend_target (void)
4868 {
4869 if (targetm.sched.h_i_d_extended)
4870 targetm.sched.h_i_d_extended ();
4871 }
4872
4873 /* Extend global scheduler structures (those, that live across calls to
4874 schedule_block) to include information about just emitted INSN. */
4875 static void
4876 extend_h_i_d (void)
4877 {
4878 int reserve = (get_max_uid () + 1
4879 - VEC_length (haifa_insn_data_def, h_i_d));
4880 if (reserve > 0
4881 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
4882 {
4883 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
4884 3 * get_max_uid () / 2);
4885 sched_extend_target ();
4886 }
4887 }
4888
4889 /* Initialize h_i_d entry of the INSN with default values.
4890 Values, that are not explicitly initialized here, hold zero. */
4891 static void
4892 init_h_i_d (rtx insn)
4893 {
4894 if (INSN_LUID (insn) > 0)
4895 {
4896 INSN_COST (insn) = -1;
4897 find_insn_reg_weight (insn);
4898 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4899 INSN_TICK (insn) = INVALID_TICK;
4900 INTER_TICK (insn) = INVALID_TICK;
4901 TODO_SPEC (insn) = HARD_DEP;
4902 }
4903 }
4904
4905 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
4906 void
4907 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4908 {
4909 const struct sched_scan_info_def ssi =
4910 {
4911 NULL, /* extend_bb */
4912 NULL, /* init_bb */
4913 extend_h_i_d, /* extend_insn */
4914 init_h_i_d /* init_insn */
4915 };
4916
4917 sched_scan (&ssi, bbs, bb, insns, insn);
4918 }
4919
4920 /* Finalize haifa_insn_data. */
4921 void
4922 haifa_finish_h_i_d (void)
4923 {
4924 VEC_free (haifa_insn_data_def, heap, h_i_d);
4925 }
4926
4927 /* Init data for the new insn INSN. */
4928 static void
4929 haifa_init_insn (rtx insn)
4930 {
4931 gcc_assert (insn != NULL);
4932
4933 sched_init_luids (NULL, NULL, NULL, insn);
4934 sched_extend_target ();
4935 sched_deps_init (false);
4936 haifa_init_h_i_d (NULL, NULL, NULL, insn);
4937
4938 if (adding_bb_to_current_region_p)
4939 {
4940 sd_init_insn (insn);
4941
4942 /* Extend dependency caches by one element. */
4943 extend_dependency_caches (1, false);
4944 }
4945 }
4946
4947 /* Init data for the new basic block BB which comes after AFTER. */
4948 static void
4949 haifa_init_only_bb (basic_block bb, basic_block after)
4950 {
4951 gcc_assert (bb != NULL);
4952
4953 sched_init_bbs ();
4954
4955 if (common_sched_info->add_block)
4956 /* This changes only data structures of the front-end. */
4957 common_sched_info->add_block (bb, after);
4958 }
4959
4960 /* A generic version of sched_split_block (). */
4961 basic_block
4962 sched_split_block_1 (basic_block first_bb, rtx after)
4963 {
4964 edge e;
4965
4966 e = split_block (first_bb, after);
4967 gcc_assert (e->src == first_bb);
4968
4969 /* sched_split_block emits note if *check == BB_END. Probably it
4970 is better to rip that note off. */
4971
4972 return e->dest;
4973 }
4974
4975 /* A generic version of sched_create_empty_bb (). */
4976 basic_block
4977 sched_create_empty_bb_1 (basic_block after)
4978 {
4979 return create_empty_bb (after);
4980 }
4981
4982 #endif /* INSN_SCHEDULING */