invoke.texi (-fvar-tracking-assignments): New.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150
151 #ifdef INSN_SCHEDULING
152
153 /* issue_rate is the number of insns that can be scheduled in the same
154 machine cycle. It can be defined in the config/mach/mach.h file,
155 otherwise we set it to 1. */
156
157 int issue_rate;
158
159 /* sched-verbose controls the amount of debugging output the
160 scheduler prints. It is controlled by -fsched-verbose=N:
161 N>0 and no -DSR : the output is directed to stderr.
162 N>=10 will direct the printouts to stderr (regardless of -dSR).
163 N=1: same as -dSR.
164 N=2: bb's probabilities, detailed ready list info, unit/insn info.
165 N=3: rtl at abort point, control-flow, regions info.
166 N=5: dependences info. */
167
168 static int sched_verbose_param = 0;
169 int sched_verbose = 0;
170
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
174
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
177
178 void
179 fix_sched_param (const char *param, const char *val)
180 {
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
185 }
186
187 /* This is a placeholder for the scheduler parameters common
188 to all schedulers. */
189 struct common_sched_info_def *common_sched_info;
190
191 #define INSN_TICK(INSN) (HID (INSN)->tick)
192 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
193
194 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
195 then it should be recalculated from scratch. */
196 #define INVALID_TICK (-(max_insn_queue_index + 1))
197 /* The minimal value of the INSN_TICK of an instruction. */
198 #define MIN_TICK (-max_insn_queue_index)
199
200 /* Issue points are used to distinguish between instructions in max_issue ().
201 For now, all instructions are equally good. */
202 #define ISSUE_POINTS(INSN) 1
203
204 /* List of important notes we must keep around. This is a pointer to the
205 last element in the list. */
206 rtx note_list;
207
208 static struct spec_info_def spec_info_var;
209 /* Description of the speculative part of the scheduling.
210 If NULL - no speculation. */
211 spec_info_t spec_info = NULL;
212
213 /* True, if recovery block was added during scheduling of current block.
214 Used to determine, if we need to fix INSN_TICKs. */
215 static bool haifa_recovery_bb_recently_added_p;
216
217 /* True, if recovery block was added during this scheduling pass.
218 Used to determine if we should have empty memory pools of dependencies
219 after finishing current region. */
220 bool haifa_recovery_bb_ever_added_p;
221
222 /* Counters of different types of speculative instructions. */
223 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
224
225 /* Array used in {unlink, restore}_bb_notes. */
226 static rtx *bb_header = 0;
227
228 /* Basic block after which recovery blocks will be created. */
229 static basic_block before_recovery;
230
231 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
232 created it. */
233 basic_block after_recovery;
234
235 /* FALSE if we add bb to another region, so we don't need to initialize it. */
236 bool adding_bb_to_current_region_p = true;
237
238 /* Queues, etc. */
239
240 /* An instruction is ready to be scheduled when all insns preceding it
241 have already been scheduled. It is important to ensure that all
242 insns which use its result will not be executed until its result
243 has been computed. An insn is maintained in one of four structures:
244
245 (P) the "Pending" set of insns which cannot be scheduled until
246 their dependencies have been satisfied.
247 (Q) the "Queued" set of insns that can be scheduled when sufficient
248 time has passed.
249 (R) the "Ready" list of unscheduled, uncommitted insns.
250 (S) the "Scheduled" list of insns.
251
252 Initially, all insns are either "Pending" or "Ready" depending on
253 whether their dependencies are satisfied.
254
255 Insns move from the "Ready" list to the "Scheduled" list as they
256 are committed to the schedule. As this occurs, the insns in the
257 "Pending" list have their dependencies satisfied and move to either
258 the "Ready" list or the "Queued" set depending on whether
259 sufficient time has passed to make them ready. As time passes,
260 insns move from the "Queued" set to the "Ready" list.
261
262 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
263 unscheduled insns, i.e., those that are ready, queued, and pending.
264 The "Queued" set (Q) is implemented by the variable `insn_queue'.
265 The "Ready" list (R) is implemented by the variables `ready' and
266 `n_ready'.
267 The "Scheduled" list (S) is the new insn chain built by this pass.
268
269 The transition (R->S) is implemented in the scheduling loop in
270 `schedule_block' when the best insn to schedule is chosen.
271 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
272 insns move from the ready list to the scheduled list.
273 The transition (Q->R) is implemented in 'queue_to_insn' as time
274 passes or stalls are introduced. */
275
276 /* Implement a circular buffer to delay instructions until sufficient
277 time has passed. For the new pipeline description interface,
278 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
279 than maximal time of instruction execution computed by genattr.c on
280 the base maximal time of functional unit reservations and getting a
281 result. This is the longest time an insn may be queued. */
282
283 static rtx *insn_queue;
284 static int q_ptr = 0;
285 static int q_size = 0;
286 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
287 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
288
289 #define QUEUE_SCHEDULED (-3)
290 #define QUEUE_NOWHERE (-2)
291 #define QUEUE_READY (-1)
292 /* QUEUE_SCHEDULED - INSN is scheduled.
293 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
294 queue or ready list.
295 QUEUE_READY - INSN is in ready list.
296 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
297
298 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
299
300 /* The following variable value refers for all current and future
301 reservations of the processor units. */
302 state_t curr_state;
303
304 /* The following variable value is size of memory representing all
305 current and future reservations of the processor units. */
306 size_t dfa_state_size;
307
308 /* The following array is used to find the best insn from ready when
309 the automaton pipeline interface is used. */
310 char *ready_try = NULL;
311
312 /* The ready list. */
313 struct ready_list ready = {NULL, 0, 0, 0, 0};
314
315 /* The pointer to the ready list (to be removed). */
316 static struct ready_list *readyp = &ready;
317
318 /* Scheduling clock. */
319 static int clock_var;
320
321 static int may_trap_exp (const_rtx, int);
322
323 /* Nonzero iff the address is comprised from at most 1 register. */
324 #define CONST_BASED_ADDRESS_P(x) \
325 (REG_P (x) \
326 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
327 || (GET_CODE (x) == LO_SUM)) \
328 && (CONSTANT_P (XEXP (x, 0)) \
329 || CONSTANT_P (XEXP (x, 1)))))
330
331 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
332 as found by analyzing insn's expression. */
333
334 \f
335 static int haifa_luid_for_non_insn (rtx x);
336
337 /* Haifa version of sched_info hooks common to all headers. */
338 const struct common_sched_info_def haifa_common_sched_info =
339 {
340 NULL, /* fix_recovery_cfg */
341 NULL, /* add_block */
342 NULL, /* estimate_number_of_insns */
343 haifa_luid_for_non_insn, /* luid_for_non_insn */
344 SCHED_PASS_UNKNOWN /* sched_pass_id */
345 };
346
347 const struct sched_scan_info_def *sched_scan_info;
348
349 /* Mapping from instruction UID to its Logical UID. */
350 VEC (int, heap) *sched_luids = NULL;
351
352 /* Next LUID to assign to an instruction. */
353 int sched_max_luid = 1;
354
355 /* Haifa Instruction Data. */
356 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
357
358 void (* sched_init_only_bb) (basic_block, basic_block);
359
360 /* Split block function. Different schedulers might use different functions
361 to handle their internal data consistent. */
362 basic_block (* sched_split_block) (basic_block, rtx);
363
364 /* Create empty basic block after the specified block. */
365 basic_block (* sched_create_empty_bb) (basic_block);
366
367 static int
368 may_trap_exp (const_rtx x, int is_store)
369 {
370 enum rtx_code code;
371
372 if (x == 0)
373 return TRAP_FREE;
374 code = GET_CODE (x);
375 if (is_store)
376 {
377 if (code == MEM && may_trap_p (x))
378 return TRAP_RISKY;
379 else
380 return TRAP_FREE;
381 }
382 if (code == MEM)
383 {
384 /* The insn uses memory: a volatile load. */
385 if (MEM_VOLATILE_P (x))
386 return IRISKY;
387 /* An exception-free load. */
388 if (!may_trap_p (x))
389 return IFREE;
390 /* A load with 1 base register, to be further checked. */
391 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
392 return PFREE_CANDIDATE;
393 /* No info on the load, to be further checked. */
394 return PRISKY_CANDIDATE;
395 }
396 else
397 {
398 const char *fmt;
399 int i, insn_class = TRAP_FREE;
400
401 /* Neither store nor load, check if it may cause a trap. */
402 if (may_trap_p (x))
403 return TRAP_RISKY;
404 /* Recursive step: walk the insn... */
405 fmt = GET_RTX_FORMAT (code);
406 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
407 {
408 if (fmt[i] == 'e')
409 {
410 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
411 insn_class = WORST_CLASS (insn_class, tmp_class);
412 }
413 else if (fmt[i] == 'E')
414 {
415 int j;
416 for (j = 0; j < XVECLEN (x, i); j++)
417 {
418 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
419 insn_class = WORST_CLASS (insn_class, tmp_class);
420 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
421 break;
422 }
423 }
424 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
425 break;
426 }
427 return insn_class;
428 }
429 }
430
431 /* Classifies rtx X of an insn for the purpose of verifying that X can be
432 executed speculatively (and consequently the insn can be moved
433 speculatively), by examining X, returning:
434 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
435 TRAP_FREE: non-load insn.
436 IFREE: load from a globally safe location.
437 IRISKY: volatile load.
438 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
439 being either PFREE or PRISKY. */
440
441 static int
442 haifa_classify_rtx (const_rtx x)
443 {
444 int tmp_class = TRAP_FREE;
445 int insn_class = TRAP_FREE;
446 enum rtx_code code;
447
448 if (GET_CODE (x) == PARALLEL)
449 {
450 int i, len = XVECLEN (x, 0);
451
452 for (i = len - 1; i >= 0; i--)
453 {
454 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
455 insn_class = WORST_CLASS (insn_class, tmp_class);
456 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
457 break;
458 }
459 }
460 else
461 {
462 code = GET_CODE (x);
463 switch (code)
464 {
465 case CLOBBER:
466 /* Test if it is a 'store'. */
467 tmp_class = may_trap_exp (XEXP (x, 0), 1);
468 break;
469 case SET:
470 /* Test if it is a store. */
471 tmp_class = may_trap_exp (SET_DEST (x), 1);
472 if (tmp_class == TRAP_RISKY)
473 break;
474 /* Test if it is a load. */
475 tmp_class =
476 WORST_CLASS (tmp_class,
477 may_trap_exp (SET_SRC (x), 0));
478 break;
479 case COND_EXEC:
480 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
481 if (tmp_class == TRAP_RISKY)
482 break;
483 tmp_class = WORST_CLASS (tmp_class,
484 may_trap_exp (COND_EXEC_TEST (x), 0));
485 break;
486 case TRAP_IF:
487 tmp_class = TRAP_RISKY;
488 break;
489 default:;
490 }
491 insn_class = tmp_class;
492 }
493
494 return insn_class;
495 }
496
497 int
498 haifa_classify_insn (const_rtx insn)
499 {
500 return haifa_classify_rtx (PATTERN (insn));
501 }
502
503 /* Forward declarations. */
504
505 static int priority (rtx);
506 static int rank_for_schedule (const void *, const void *);
507 static void swap_sort (rtx *, int);
508 static void queue_insn (rtx, int);
509 static int schedule_insn (rtx);
510 static int find_set_reg_weight (const_rtx);
511 static void find_insn_reg_weight (const_rtx);
512 static void adjust_priority (rtx);
513 static void advance_one_cycle (void);
514 static void extend_h_i_d (void);
515
516
517 /* Notes handling mechanism:
518 =========================
519 Generally, NOTES are saved before scheduling and restored after scheduling.
520 The scheduler distinguishes between two types of notes:
521
522 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
523 Before scheduling a region, a pointer to the note is added to the insn
524 that follows or precedes it. (This happens as part of the data dependence
525 computation). After scheduling an insn, the pointer contained in it is
526 used for regenerating the corresponding note (in reemit_notes).
527
528 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
529 these notes are put in a list (in rm_other_notes() and
530 unlink_other_notes ()). After scheduling the block, these notes are
531 inserted at the beginning of the block (in schedule_block()). */
532
533 static void ready_add (struct ready_list *, rtx, bool);
534 static rtx ready_remove_first (struct ready_list *);
535
536 static void queue_to_ready (struct ready_list *);
537 static int early_queue_to_ready (state_t, struct ready_list *);
538
539 static void debug_ready_list (struct ready_list *);
540
541 /* The following functions are used to implement multi-pass scheduling
542 on the first cycle. */
543 static rtx ready_remove (struct ready_list *, int);
544 static void ready_remove_insn (rtx);
545
546 static int choose_ready (struct ready_list *, rtx *);
547
548 static void fix_inter_tick (rtx, rtx);
549 static int fix_tick_ready (rtx);
550 static void change_queue_index (rtx, int);
551
552 /* The following functions are used to implement scheduling of data/control
553 speculative instructions. */
554
555 static void extend_h_i_d (void);
556 static void init_h_i_d (rtx);
557 static void generate_recovery_code (rtx);
558 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
559 static void begin_speculative_block (rtx);
560 static void add_to_speculative_block (rtx);
561 static void init_before_recovery (basic_block *);
562 static void create_check_block_twin (rtx, bool);
563 static void fix_recovery_deps (basic_block);
564 static void haifa_change_pattern (rtx, rtx);
565 static void dump_new_block_header (int, basic_block, rtx, rtx);
566 static void restore_bb_notes (basic_block);
567 static void fix_jump_move (rtx);
568 static void move_block_after_check (rtx);
569 static void move_succs (VEC(edge,gc) **, basic_block);
570 static void sched_remove_insn (rtx);
571 static void clear_priorities (rtx, rtx_vec_t *);
572 static void calc_priorities (rtx_vec_t);
573 static void add_jump_dependencies (rtx, rtx);
574 #ifdef ENABLE_CHECKING
575 static int has_edge_p (VEC(edge,gc) *, int);
576 static void check_cfg (rtx, rtx);
577 #endif
578
579 #endif /* INSN_SCHEDULING */
580 \f
581 /* Point to state used for the current scheduling pass. */
582 struct haifa_sched_info *current_sched_info;
583 \f
584 #ifndef INSN_SCHEDULING
585 void
586 schedule_insns (void)
587 {
588 }
589 #else
590
591 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
592 so that insns independent of the last scheduled insn will be preferred
593 over dependent instructions. */
594
595 static rtx last_scheduled_insn;
596
597 /* Cached cost of the instruction. Use below function to get cost of the
598 insn. -1 here means that the field is not initialized. */
599 #define INSN_COST(INSN) (HID (INSN)->cost)
600
601 /* Compute cost of executing INSN.
602 This is the number of cycles between instruction issue and
603 instruction results. */
604 int
605 insn_cost (rtx insn)
606 {
607 int cost;
608
609 if (sel_sched_p ())
610 {
611 if (recog_memoized (insn) < 0)
612 return 0;
613
614 cost = insn_default_latency (insn);
615 if (cost < 0)
616 cost = 0;
617
618 return cost;
619 }
620
621 cost = INSN_COST (insn);
622
623 if (cost < 0)
624 {
625 /* A USE insn, or something else we don't need to
626 understand. We can't pass these directly to
627 result_ready_cost or insn_default_latency because it will
628 trigger a fatal error for unrecognizable insns. */
629 if (recog_memoized (insn) < 0)
630 {
631 INSN_COST (insn) = 0;
632 return 0;
633 }
634 else
635 {
636 cost = insn_default_latency (insn);
637 if (cost < 0)
638 cost = 0;
639
640 INSN_COST (insn) = cost;
641 }
642 }
643
644 return cost;
645 }
646
647 /* Compute cost of dependence LINK.
648 This is the number of cycles between instruction issue and
649 instruction results.
650 ??? We also use this function to call recog_memoized on all insns. */
651 int
652 dep_cost_1 (dep_t link, dw_t dw)
653 {
654 rtx insn = DEP_PRO (link);
655 rtx used = DEP_CON (link);
656 int cost;
657
658 /* A USE insn should never require the value used to be computed.
659 This allows the computation of a function's result and parameter
660 values to overlap the return and call. */
661 if (recog_memoized (used) < 0)
662 {
663 cost = 0;
664 recog_memoized (insn);
665 }
666 else
667 {
668 enum reg_note dep_type = DEP_TYPE (link);
669
670 cost = insn_cost (insn);
671
672 if (INSN_CODE (insn) >= 0)
673 {
674 if (dep_type == REG_DEP_ANTI)
675 cost = 0;
676 else if (dep_type == REG_DEP_OUTPUT)
677 {
678 cost = (insn_default_latency (insn)
679 - insn_default_latency (used));
680 if (cost <= 0)
681 cost = 1;
682 }
683 else if (bypass_p (insn))
684 cost = insn_latency (insn, used);
685 }
686
687
688 if (targetm.sched.adjust_cost_2)
689 {
690 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
691 dw);
692 }
693 else if (targetm.sched.adjust_cost != NULL)
694 {
695 /* This variable is used for backward compatibility with the
696 targets. */
697 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
698
699 /* Make it self-cycled, so that if some tries to walk over this
700 incomplete list he/she will be caught in an endless loop. */
701 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
702
703 /* Targets use only REG_NOTE_KIND of the link. */
704 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
705
706 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
707 insn, cost);
708
709 free_INSN_LIST_node (dep_cost_rtx_link);
710 }
711
712 if (cost < 0)
713 cost = 0;
714 }
715
716 return cost;
717 }
718
719 /* Compute cost of dependence LINK.
720 This is the number of cycles between instruction issue and
721 instruction results. */
722 int
723 dep_cost (dep_t link)
724 {
725 return dep_cost_1 (link, 0);
726 }
727
728 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
729 INSN_PRIORITY explicitly. */
730 void
731 increase_insn_priority (rtx insn, int amount)
732 {
733 if (!sel_sched_p ())
734 {
735 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
736 if (INSN_PRIORITY_KNOWN (insn))
737 INSN_PRIORITY (insn) += amount;
738 }
739 else
740 {
741 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
742 Use EXPR_PRIORITY instead. */
743 sel_add_to_insn_priority (insn, amount);
744 }
745 }
746
747 /* Return 'true' if DEP should be included in priority calculations. */
748 static bool
749 contributes_to_priority_p (dep_t dep)
750 {
751 if (DEBUG_INSN_P (DEP_CON (dep))
752 || DEBUG_INSN_P (DEP_PRO (dep)))
753 return false;
754
755 /* Critical path is meaningful in block boundaries only. */
756 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
757 DEP_PRO (dep)))
758 return false;
759
760 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
761 then speculative instructions will less likely be
762 scheduled. That is because the priority of
763 their producers will increase, and, thus, the
764 producers will more likely be scheduled, thus,
765 resolving the dependence. */
766 if (sched_deps_info->generate_spec_deps
767 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
768 && (DEP_STATUS (dep) & SPECULATIVE))
769 return false;
770
771 return true;
772 }
773
774 /* Compute the number of nondebug forward deps of an insn. */
775
776 static int
777 dep_list_size (rtx insn)
778 {
779 sd_iterator_def sd_it;
780 dep_t dep;
781 int dbgcount = 0, nodbgcount = 0;
782
783 if (!MAY_HAVE_DEBUG_INSNS)
784 return sd_lists_size (insn, SD_LIST_FORW);
785
786 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
787 {
788 if (DEBUG_INSN_P (DEP_CON (dep)))
789 dbgcount++;
790 else
791 nodbgcount++;
792 }
793
794 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
795
796 return nodbgcount;
797 }
798
799 /* Compute the priority number for INSN. */
800 static int
801 priority (rtx insn)
802 {
803 if (! INSN_P (insn))
804 return 0;
805
806 /* We should not be interested in priority of an already scheduled insn. */
807 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
808
809 if (!INSN_PRIORITY_KNOWN (insn))
810 {
811 int this_priority = -1;
812
813 if (dep_list_size (insn) == 0)
814 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
815 some forward deps but all of them are ignored by
816 contributes_to_priority hook. At the moment we set priority of
817 such insn to 0. */
818 this_priority = insn_cost (insn);
819 else
820 {
821 rtx prev_first, twin;
822 basic_block rec;
823
824 /* For recovery check instructions we calculate priority slightly
825 different than that of normal instructions. Instead of walking
826 through INSN_FORW_DEPS (check) list, we walk through
827 INSN_FORW_DEPS list of each instruction in the corresponding
828 recovery block. */
829
830 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
831 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
832 if (!rec || rec == EXIT_BLOCK_PTR)
833 {
834 prev_first = PREV_INSN (insn);
835 twin = insn;
836 }
837 else
838 {
839 prev_first = NEXT_INSN (BB_HEAD (rec));
840 twin = PREV_INSN (BB_END (rec));
841 }
842
843 do
844 {
845 sd_iterator_def sd_it;
846 dep_t dep;
847
848 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
849 {
850 rtx next;
851 int next_priority;
852
853 next = DEP_CON (dep);
854
855 if (BLOCK_FOR_INSN (next) != rec)
856 {
857 int cost;
858
859 if (!contributes_to_priority_p (dep))
860 continue;
861
862 if (twin == insn)
863 cost = dep_cost (dep);
864 else
865 {
866 struct _dep _dep1, *dep1 = &_dep1;
867
868 init_dep (dep1, insn, next, REG_DEP_ANTI);
869
870 cost = dep_cost (dep1);
871 }
872
873 next_priority = cost + priority (next);
874
875 if (next_priority > this_priority)
876 this_priority = next_priority;
877 }
878 }
879
880 twin = PREV_INSN (twin);
881 }
882 while (twin != prev_first);
883 }
884
885 if (this_priority < 0)
886 {
887 gcc_assert (this_priority == -1);
888
889 this_priority = insn_cost (insn);
890 }
891
892 INSN_PRIORITY (insn) = this_priority;
893 INSN_PRIORITY_STATUS (insn) = 1;
894 }
895
896 return INSN_PRIORITY (insn);
897 }
898 \f
899 /* Macros and functions for keeping the priority queue sorted, and
900 dealing with queuing and dequeuing of instructions. */
901
902 #define SCHED_SORT(READY, N_READY) \
903 do { if ((N_READY) == 2) \
904 swap_sort (READY, N_READY); \
905 else if ((N_READY) > 2) \
906 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
907 while (0)
908
909 /* Returns a positive value if x is preferred; returns a negative value if
910 y is preferred. Should never return 0, since that will make the sort
911 unstable. */
912
913 static int
914 rank_for_schedule (const void *x, const void *y)
915 {
916 rtx tmp = *(const rtx *) y;
917 rtx tmp2 = *(const rtx *) x;
918 rtx last;
919 int tmp_class, tmp2_class;
920 int val, priority_val, weight_val, info_val;
921
922 if (MAY_HAVE_DEBUG_INSNS)
923 {
924 /* Schedule debug insns as early as possible. */
925 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
926 return -1;
927 else if (DEBUG_INSN_P (tmp2))
928 return 1;
929 }
930
931 /* The insn in a schedule group should be issued the first. */
932 if (flag_sched_group_heuristic &&
933 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
934 return SCHED_GROUP_P (tmp2) ? 1 : -1;
935
936 /* Make sure that priority of TMP and TMP2 are initialized. */
937 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
938
939 /* Prefer insn with higher priority. */
940 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
941
942 if (flag_sched_critical_path_heuristic && priority_val)
943 return priority_val;
944
945 /* Prefer speculative insn with greater dependencies weakness. */
946 if (flag_sched_spec_insn_heuristic && spec_info)
947 {
948 ds_t ds1, ds2;
949 dw_t dw1, dw2;
950 int dw;
951
952 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
953 if (ds1)
954 dw1 = ds_weak (ds1);
955 else
956 dw1 = NO_DEP_WEAK;
957
958 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
959 if (ds2)
960 dw2 = ds_weak (ds2);
961 else
962 dw2 = NO_DEP_WEAK;
963
964 dw = dw2 - dw1;
965 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
966 return dw;
967 }
968
969 /* Prefer an insn with smaller contribution to registers-pressure. */
970 if (flag_sched_reg_pressure_heuristic && !reload_completed &&
971 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
972 return weight_val;
973
974 info_val = (*current_sched_info->rank) (tmp, tmp2);
975 if(flag_sched_rank_heuristic && info_val)
976 return info_val;
977
978 if (flag_sched_last_insn_heuristic)
979 {
980 last = last_scheduled_insn;
981
982 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
983 do
984 last = PREV_INSN (last);
985 while (!NONDEBUG_INSN_P (last)
986 && last != current_sched_info->prev_head);
987 }
988
989 /* Compare insns based on their relation to the last scheduled
990 non-debug insn. */
991 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
992 {
993 dep_t dep1;
994 dep_t dep2;
995
996 /* Classify the instructions into three classes:
997 1) Data dependent on last schedule insn.
998 2) Anti/Output dependent on last scheduled insn.
999 3) Independent of last scheduled insn, or has latency of one.
1000 Choose the insn from the highest numbered class if different. */
1001 dep1 = sd_find_dep_between (last, tmp, true);
1002
1003 if (dep1 == NULL || dep_cost (dep1) == 1)
1004 tmp_class = 3;
1005 else if (/* Data dependence. */
1006 DEP_TYPE (dep1) == REG_DEP_TRUE)
1007 tmp_class = 1;
1008 else
1009 tmp_class = 2;
1010
1011 dep2 = sd_find_dep_between (last, tmp2, true);
1012
1013 if (dep2 == NULL || dep_cost (dep2) == 1)
1014 tmp2_class = 3;
1015 else if (/* Data dependence. */
1016 DEP_TYPE (dep2) == REG_DEP_TRUE)
1017 tmp2_class = 1;
1018 else
1019 tmp2_class = 2;
1020
1021 if ((val = tmp2_class - tmp_class))
1022 return val;
1023 }
1024
1025 /* Prefer the insn which has more later insns that depend on it.
1026 This gives the scheduler more freedom when scheduling later
1027 instructions at the expense of added register pressure. */
1028
1029 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1030
1031 if (flag_sched_dep_count_heuristic && val != 0)
1032 return val;
1033
1034 /* If insns are equally good, sort by INSN_LUID (original insn order),
1035 so that we make the sort stable. This minimizes instruction movement,
1036 thus minimizing sched's effect on debugging and cross-jumping. */
1037 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1038 }
1039
1040 /* Resort the array A in which only element at index N may be out of order. */
1041
1042 HAIFA_INLINE static void
1043 swap_sort (rtx *a, int n)
1044 {
1045 rtx insn = a[n - 1];
1046 int i = n - 2;
1047
1048 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1049 {
1050 a[i + 1] = a[i];
1051 i -= 1;
1052 }
1053 a[i + 1] = insn;
1054 }
1055
1056 /* Add INSN to the insn queue so that it can be executed at least
1057 N_CYCLES after the currently executing insn. Preserve insns
1058 chain for debugging purposes. */
1059
1060 HAIFA_INLINE static void
1061 queue_insn (rtx insn, int n_cycles)
1062 {
1063 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1064 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1065
1066 gcc_assert (n_cycles <= max_insn_queue_index);
1067 gcc_assert (!DEBUG_INSN_P (insn));
1068
1069 insn_queue[next_q] = link;
1070 q_size += 1;
1071
1072 if (sched_verbose >= 2)
1073 {
1074 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1075 (*current_sched_info->print_insn) (insn, 0));
1076
1077 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1078 }
1079
1080 QUEUE_INDEX (insn) = next_q;
1081 }
1082
1083 /* Remove INSN from queue. */
1084 static void
1085 queue_remove (rtx insn)
1086 {
1087 gcc_assert (QUEUE_INDEX (insn) >= 0);
1088 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1089 q_size--;
1090 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1091 }
1092
1093 /* Return a pointer to the bottom of the ready list, i.e. the insn
1094 with the lowest priority. */
1095
1096 rtx *
1097 ready_lastpos (struct ready_list *ready)
1098 {
1099 gcc_assert (ready->n_ready >= 1);
1100 return ready->vec + ready->first - ready->n_ready + 1;
1101 }
1102
1103 /* Add an element INSN to the ready list so that it ends up with the
1104 lowest/highest priority depending on FIRST_P. */
1105
1106 HAIFA_INLINE static void
1107 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1108 {
1109 if (!first_p)
1110 {
1111 if (ready->first == ready->n_ready)
1112 {
1113 memmove (ready->vec + ready->veclen - ready->n_ready,
1114 ready_lastpos (ready),
1115 ready->n_ready * sizeof (rtx));
1116 ready->first = ready->veclen - 1;
1117 }
1118 ready->vec[ready->first - ready->n_ready] = insn;
1119 }
1120 else
1121 {
1122 if (ready->first == ready->veclen - 1)
1123 {
1124 if (ready->n_ready)
1125 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1126 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1127 ready_lastpos (ready),
1128 ready->n_ready * sizeof (rtx));
1129 ready->first = ready->veclen - 2;
1130 }
1131 ready->vec[++(ready->first)] = insn;
1132 }
1133
1134 ready->n_ready++;
1135 if (DEBUG_INSN_P (insn))
1136 ready->n_debug++;
1137
1138 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1139 QUEUE_INDEX (insn) = QUEUE_READY;
1140 }
1141
1142 /* Remove the element with the highest priority from the ready list and
1143 return it. */
1144
1145 HAIFA_INLINE static rtx
1146 ready_remove_first (struct ready_list *ready)
1147 {
1148 rtx t;
1149
1150 gcc_assert (ready->n_ready);
1151 t = ready->vec[ready->first--];
1152 ready->n_ready--;
1153 if (DEBUG_INSN_P (t))
1154 ready->n_debug--;
1155 /* If the queue becomes empty, reset it. */
1156 if (ready->n_ready == 0)
1157 ready->first = ready->veclen - 1;
1158
1159 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1160 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1161
1162 return t;
1163 }
1164
1165 /* The following code implements multi-pass scheduling for the first
1166 cycle. In other words, we will try to choose ready insn which
1167 permits to start maximum number of insns on the same cycle. */
1168
1169 /* Return a pointer to the element INDEX from the ready. INDEX for
1170 insn with the highest priority is 0, and the lowest priority has
1171 N_READY - 1. */
1172
1173 rtx
1174 ready_element (struct ready_list *ready, int index)
1175 {
1176 gcc_assert (ready->n_ready && index < ready->n_ready);
1177
1178 return ready->vec[ready->first - index];
1179 }
1180
1181 /* Remove the element INDEX from the ready list and return it. INDEX
1182 for insn with the highest priority is 0, and the lowest priority
1183 has N_READY - 1. */
1184
1185 HAIFA_INLINE static rtx
1186 ready_remove (struct ready_list *ready, int index)
1187 {
1188 rtx t;
1189 int i;
1190
1191 if (index == 0)
1192 return ready_remove_first (ready);
1193 gcc_assert (ready->n_ready && index < ready->n_ready);
1194 t = ready->vec[ready->first - index];
1195 ready->n_ready--;
1196 if (DEBUG_INSN_P (t))
1197 ready->n_debug--;
1198 for (i = index; i < ready->n_ready; i++)
1199 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1200 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1201 return t;
1202 }
1203
1204 /* Remove INSN from the ready list. */
1205 static void
1206 ready_remove_insn (rtx insn)
1207 {
1208 int i;
1209
1210 for (i = 0; i < readyp->n_ready; i++)
1211 if (ready_element (readyp, i) == insn)
1212 {
1213 ready_remove (readyp, i);
1214 return;
1215 }
1216 gcc_unreachable ();
1217 }
1218
1219 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1220 macro. */
1221
1222 void
1223 ready_sort (struct ready_list *ready)
1224 {
1225 rtx *first = ready_lastpos (ready);
1226 SCHED_SORT (first, ready->n_ready);
1227 }
1228
1229 /* PREV is an insn that is ready to execute. Adjust its priority if that
1230 will help shorten or lengthen register lifetimes as appropriate. Also
1231 provide a hook for the target to tweak itself. */
1232
1233 HAIFA_INLINE static void
1234 adjust_priority (rtx prev)
1235 {
1236 /* ??? There used to be code here to try and estimate how an insn
1237 affected register lifetimes, but it did it by looking at REG_DEAD
1238 notes, which we removed in schedule_region. Nor did it try to
1239 take into account register pressure or anything useful like that.
1240
1241 Revisit when we have a machine model to work with and not before. */
1242
1243 if (targetm.sched.adjust_priority)
1244 INSN_PRIORITY (prev) =
1245 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1246 }
1247
1248 /* Advance DFA state STATE on one cycle. */
1249 void
1250 advance_state (state_t state)
1251 {
1252 if (targetm.sched.dfa_pre_advance_cycle)
1253 targetm.sched.dfa_pre_advance_cycle ();
1254
1255 if (targetm.sched.dfa_pre_cycle_insn)
1256 state_transition (state,
1257 targetm.sched.dfa_pre_cycle_insn ());
1258
1259 state_transition (state, NULL);
1260
1261 if (targetm.sched.dfa_post_cycle_insn)
1262 state_transition (state,
1263 targetm.sched.dfa_post_cycle_insn ());
1264
1265 if (targetm.sched.dfa_post_advance_cycle)
1266 targetm.sched.dfa_post_advance_cycle ();
1267 }
1268
1269 /* Advance time on one cycle. */
1270 HAIFA_INLINE static void
1271 advance_one_cycle (void)
1272 {
1273 advance_state (curr_state);
1274 if (sched_verbose >= 6)
1275 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1276 }
1277
1278 /* Clock at which the previous instruction was issued. */
1279 static int last_clock_var;
1280
1281 /* INSN is the "currently executing insn". Launch each insn which was
1282 waiting on INSN. READY is the ready list which contains the insns
1283 that are ready to fire. CLOCK is the current cycle. The function
1284 returns necessary cycle advance after issuing the insn (it is not
1285 zero for insns in a schedule group). */
1286
1287 static int
1288 schedule_insn (rtx insn)
1289 {
1290 sd_iterator_def sd_it;
1291 dep_t dep;
1292 int advance = 0;
1293
1294 if (sched_verbose >= 1)
1295 {
1296 char buf[2048];
1297
1298 print_insn (buf, insn, 0);
1299 buf[40] = 0;
1300 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1301
1302 if (recog_memoized (insn) < 0)
1303 fprintf (sched_dump, "nothing");
1304 else
1305 print_reservation (sched_dump, insn);
1306 fputc ('\n', sched_dump);
1307 }
1308
1309 /* Scheduling instruction should have all its dependencies resolved and
1310 should have been removed from the ready list. */
1311 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1312
1313 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1314 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1315
1316 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1317 if (INSN_TICK (insn) > clock_var)
1318 /* INSN has been prematurely moved from the queue to the ready list.
1319 This is possible only if following flag is set. */
1320 gcc_assert (flag_sched_stalled_insns);
1321
1322 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1323 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1324 INSN_TICK (insn) = clock_var;
1325
1326 /* Update dependent instructions. */
1327 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1328 sd_iterator_cond (&sd_it, &dep);)
1329 {
1330 rtx next = DEP_CON (dep);
1331
1332 /* Resolve the dependence between INSN and NEXT.
1333 sd_resolve_dep () moves current dep to another list thus
1334 advancing the iterator. */
1335 sd_resolve_dep (sd_it);
1336
1337 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1338 {
1339 int effective_cost;
1340
1341 effective_cost = try_ready (next);
1342
1343 if (effective_cost >= 0
1344 && SCHED_GROUP_P (next)
1345 && advance < effective_cost)
1346 advance = effective_cost;
1347 }
1348 else
1349 /* Check always has only one forward dependence (to the first insn in
1350 the recovery block), therefore, this will be executed only once. */
1351 {
1352 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1353 fix_recovery_deps (RECOVERY_BLOCK (insn));
1354 }
1355 }
1356
1357 /* This is the place where scheduler doesn't *basically* need backward and
1358 forward dependencies for INSN anymore. Nevertheless they are used in
1359 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1360 some targets (e.g. rs6000). Thus the earliest place where we *can*
1361 remove dependencies is after targetm.sched.md_finish () call in
1362 schedule_block (). But, on the other side, the safest place to remove
1363 dependencies is when we are finishing scheduling entire region. As we
1364 don't generate [many] dependencies during scheduling itself, we won't
1365 need memory until beginning of next region.
1366 Bottom line: Dependencies are removed for all insns in the end of
1367 scheduling the region. */
1368
1369 /* Annotate the instruction with issue information -- TImode
1370 indicates that the instruction is expected not to be able
1371 to issue on the same cycle as the previous insn. A machine
1372 may use this information to decide how the instruction should
1373 be aligned. */
1374 if (issue_rate > 1
1375 && GET_CODE (PATTERN (insn)) != USE
1376 && GET_CODE (PATTERN (insn)) != CLOBBER
1377 && !DEBUG_INSN_P (insn))
1378 {
1379 if (reload_completed)
1380 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1381 last_clock_var = clock_var;
1382 }
1383
1384 return advance;
1385 }
1386
1387 /* Functions for handling of notes. */
1388
1389 /* Insert the INSN note at the end of the notes list. */
1390 static void
1391 add_to_note_list (rtx insn, rtx *note_list_end_p)
1392 {
1393 PREV_INSN (insn) = *note_list_end_p;
1394 if (*note_list_end_p)
1395 NEXT_INSN (*note_list_end_p) = insn;
1396 *note_list_end_p = insn;
1397 }
1398
1399 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1400 void
1401 concat_note_lists (rtx from_end, rtx *to_endp)
1402 {
1403 rtx from_start;
1404
1405 if (from_end == NULL)
1406 /* It's easy when have nothing to concat. */
1407 return;
1408
1409 if (*to_endp == NULL)
1410 /* It's also easy when destination is empty. */
1411 {
1412 *to_endp = from_end;
1413 return;
1414 }
1415
1416 from_start = from_end;
1417 /* A note list should be traversed via PREV_INSN. */
1418 while (PREV_INSN (from_start) != NULL)
1419 from_start = PREV_INSN (from_start);
1420
1421 add_to_note_list (from_start, to_endp);
1422 *to_endp = from_end;
1423 }
1424
1425 /* Delete notes beginning with INSN and put them in the chain
1426 of notes ended by NOTE_LIST.
1427 Returns the insn following the notes. */
1428 static rtx
1429 unlink_other_notes (rtx insn, rtx tail)
1430 {
1431 rtx prev = PREV_INSN (insn);
1432
1433 while (insn != tail && NOTE_NOT_BB_P (insn))
1434 {
1435 rtx next = NEXT_INSN (insn);
1436 basic_block bb = BLOCK_FOR_INSN (insn);
1437
1438 /* Delete the note from its current position. */
1439 if (prev)
1440 NEXT_INSN (prev) = next;
1441 if (next)
1442 PREV_INSN (next) = prev;
1443
1444 if (bb)
1445 {
1446 /* Basic block can begin with either LABEL or
1447 NOTE_INSN_BASIC_BLOCK. */
1448 gcc_assert (BB_HEAD (bb) != insn);
1449
1450 /* Check if we are removing last insn in the BB. */
1451 if (BB_END (bb) == insn)
1452 BB_END (bb) = prev;
1453 }
1454
1455 /* See sched_analyze to see how these are handled. */
1456 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1457 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1458 add_to_note_list (insn, &note_list);
1459
1460 insn = next;
1461 }
1462
1463 if (insn == tail)
1464 {
1465 gcc_assert (sel_sched_p ());
1466 return prev;
1467 }
1468
1469 return insn;
1470 }
1471
1472 /* Return the head and tail pointers of ebb starting at BEG and ending
1473 at END. */
1474 void
1475 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1476 {
1477 rtx beg_head = BB_HEAD (beg);
1478 rtx beg_tail = BB_END (beg);
1479 rtx end_head = BB_HEAD (end);
1480 rtx end_tail = BB_END (end);
1481
1482 /* Don't include any notes or labels at the beginning of the BEG
1483 basic block, or notes at the end of the END basic blocks. */
1484
1485 if (LABEL_P (beg_head))
1486 beg_head = NEXT_INSN (beg_head);
1487
1488 while (beg_head != beg_tail)
1489 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1490 beg_head = NEXT_INSN (beg_head);
1491 else
1492 break;
1493
1494 *headp = beg_head;
1495
1496 if (beg == end)
1497 end_head = beg_head;
1498 else if (LABEL_P (end_head))
1499 end_head = NEXT_INSN (end_head);
1500
1501 while (end_head != end_tail)
1502 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1503 end_tail = PREV_INSN (end_tail);
1504 else
1505 break;
1506
1507 *tailp = end_tail;
1508 }
1509
1510 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1511
1512 int
1513 no_real_insns_p (const_rtx head, const_rtx tail)
1514 {
1515 while (head != NEXT_INSN (tail))
1516 {
1517 if (!NOTE_P (head) && !LABEL_P (head)
1518 && !BOUNDARY_DEBUG_INSN_P (head))
1519 return 0;
1520 head = NEXT_INSN (head);
1521 }
1522 return 1;
1523 }
1524
1525 /* Delete notes between HEAD and TAIL and put them in the chain
1526 of notes ended by NOTE_LIST. */
1527 static void
1528 rm_other_notes (rtx head, rtx tail)
1529 {
1530 rtx next_tail;
1531 rtx insn;
1532
1533 note_list = 0;
1534 if (head == tail && (! INSN_P (head)))
1535 return;
1536
1537 next_tail = NEXT_INSN (tail);
1538 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1539 {
1540 rtx prev;
1541
1542 /* Farm out notes, and maybe save them in NOTE_LIST.
1543 This is needed to keep the debugger from
1544 getting completely deranged. */
1545 if (NOTE_NOT_BB_P (insn))
1546 {
1547 prev = insn;
1548 insn = unlink_other_notes (insn, next_tail);
1549
1550 gcc_assert ((sel_sched_p ()
1551 || prev != tail) && prev != head && insn != next_tail);
1552 }
1553 }
1554 }
1555
1556 /* Same as above, but also process REG_SAVE_NOTEs of HEAD. */
1557 void
1558 remove_notes (rtx head, rtx tail)
1559 {
1560 /* rm_other_notes only removes notes which are _inside_ the
1561 block---that is, it won't remove notes before the first real insn
1562 or after the last real insn of the block. So if the first insn
1563 has a REG_SAVE_NOTE which would otherwise be emitted before the
1564 insn, it is redundant with the note before the start of the
1565 block, and so we have to take it out. */
1566 if (INSN_P (head))
1567 {
1568 rtx note;
1569
1570 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
1571 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1572 remove_note (head, note);
1573 }
1574
1575 /* Remove remaining note insns from the block, save them in
1576 note_list. These notes are restored at the end of
1577 schedule_block (). */
1578 rm_other_notes (head, tail);
1579 }
1580
1581 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1582 previously found among the insns. Insert them just before HEAD. */
1583 rtx
1584 restore_other_notes (rtx head, basic_block head_bb)
1585 {
1586 if (note_list != 0)
1587 {
1588 rtx note_head = note_list;
1589
1590 if (head)
1591 head_bb = BLOCK_FOR_INSN (head);
1592 else
1593 head = NEXT_INSN (bb_note (head_bb));
1594
1595 while (PREV_INSN (note_head))
1596 {
1597 set_block_for_insn (note_head, head_bb);
1598 note_head = PREV_INSN (note_head);
1599 }
1600 /* In the above cycle we've missed this note. */
1601 set_block_for_insn (note_head, head_bb);
1602
1603 PREV_INSN (note_head) = PREV_INSN (head);
1604 NEXT_INSN (PREV_INSN (head)) = note_head;
1605 PREV_INSN (head) = note_list;
1606 NEXT_INSN (note_list) = head;
1607
1608 if (BLOCK_FOR_INSN (head) != head_bb)
1609 BB_END (head_bb) = note_list;
1610
1611 head = note_head;
1612 }
1613
1614 return head;
1615 }
1616
1617 /* Functions for computation of registers live/usage info. */
1618
1619 /* This function looks for a new register being defined.
1620 If the destination register is already used by the source,
1621 a new register is not needed. */
1622 static int
1623 find_set_reg_weight (const_rtx x)
1624 {
1625 if (GET_CODE (x) == CLOBBER
1626 && register_operand (SET_DEST (x), VOIDmode))
1627 return 1;
1628 if (GET_CODE (x) == SET
1629 && register_operand (SET_DEST (x), VOIDmode))
1630 {
1631 if (REG_P (SET_DEST (x)))
1632 {
1633 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1634 return 1;
1635 else
1636 return 0;
1637 }
1638 return 1;
1639 }
1640 return 0;
1641 }
1642
1643 /* Calculate INSN_REG_WEIGHT for INSN. */
1644 static void
1645 find_insn_reg_weight (const_rtx insn)
1646 {
1647 int reg_weight = 0;
1648 rtx x;
1649
1650 /* Handle register life information. */
1651 if (! INSN_P (insn))
1652 return;
1653
1654 /* Increment weight for each register born here. */
1655 x = PATTERN (insn);
1656 reg_weight += find_set_reg_weight (x);
1657 if (GET_CODE (x) == PARALLEL)
1658 {
1659 int j;
1660 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1661 {
1662 x = XVECEXP (PATTERN (insn), 0, j);
1663 reg_weight += find_set_reg_weight (x);
1664 }
1665 }
1666 /* Decrement weight for each register that dies here. */
1667 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1668 {
1669 if (REG_NOTE_KIND (x) == REG_DEAD
1670 || REG_NOTE_KIND (x) == REG_UNUSED)
1671 reg_weight--;
1672 }
1673
1674 INSN_REG_WEIGHT (insn) = reg_weight;
1675 }
1676
1677 /* Move insns that became ready to fire from queue to ready list. */
1678
1679 static void
1680 queue_to_ready (struct ready_list *ready)
1681 {
1682 rtx insn;
1683 rtx link;
1684 rtx skip_insn;
1685
1686 q_ptr = NEXT_Q (q_ptr);
1687
1688 if (dbg_cnt (sched_insn) == false)
1689 {
1690 /* If debug counter is activated do not requeue insn next after
1691 last_scheduled_insn. */
1692 skip_insn = next_nonnote_insn (last_scheduled_insn);
1693 while (skip_insn && DEBUG_INSN_P (skip_insn))
1694 skip_insn = next_nonnote_insn (skip_insn);
1695 }
1696 else
1697 skip_insn = NULL_RTX;
1698
1699 /* Add all pending insns that can be scheduled without stalls to the
1700 ready list. */
1701 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1702 {
1703 insn = XEXP (link, 0);
1704 q_size -= 1;
1705
1706 if (sched_verbose >= 2)
1707 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1708 (*current_sched_info->print_insn) (insn, 0));
1709
1710 /* If the ready list is full, delay the insn for 1 cycle.
1711 See the comment in schedule_block for the rationale. */
1712 if (!reload_completed
1713 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
1714 && !SCHED_GROUP_P (insn)
1715 && insn != skip_insn)
1716 {
1717 if (sched_verbose >= 2)
1718 fprintf (sched_dump, "requeued because ready full\n");
1719 queue_insn (insn, 1);
1720 }
1721 else
1722 {
1723 ready_add (ready, insn, false);
1724 if (sched_verbose >= 2)
1725 fprintf (sched_dump, "moving to ready without stalls\n");
1726 }
1727 }
1728 free_INSN_LIST_list (&insn_queue[q_ptr]);
1729
1730 /* If there are no ready insns, stall until one is ready and add all
1731 of the pending insns at that point to the ready list. */
1732 if (ready->n_ready == 0)
1733 {
1734 int stalls;
1735
1736 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1737 {
1738 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1739 {
1740 for (; link; link = XEXP (link, 1))
1741 {
1742 insn = XEXP (link, 0);
1743 q_size -= 1;
1744
1745 if (sched_verbose >= 2)
1746 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1747 (*current_sched_info->print_insn) (insn, 0));
1748
1749 ready_add (ready, insn, false);
1750 if (sched_verbose >= 2)
1751 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1752 }
1753 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1754
1755 advance_one_cycle ();
1756
1757 break;
1758 }
1759
1760 advance_one_cycle ();
1761 }
1762
1763 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1764 clock_var += stalls;
1765 }
1766 }
1767
1768 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1769 prematurely move INSN from the queue to the ready list. Currently,
1770 if a target defines the hook 'is_costly_dependence', this function
1771 uses the hook to check whether there exist any dependences which are
1772 considered costly by the target, between INSN and other insns that
1773 have already been scheduled. Dependences are checked up to Y cycles
1774 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1775 controlling this value.
1776 (Other considerations could be taken into account instead (or in
1777 addition) depending on user flags and target hooks. */
1778
1779 static bool
1780 ok_for_early_queue_removal (rtx insn)
1781 {
1782 int n_cycles;
1783 rtx prev_insn = last_scheduled_insn;
1784
1785 if (targetm.sched.is_costly_dependence)
1786 {
1787 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1788 {
1789 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1790 {
1791 int cost;
1792
1793 if (prev_insn == current_sched_info->prev_head)
1794 {
1795 prev_insn = NULL;
1796 break;
1797 }
1798
1799 if (!NOTE_P (prev_insn))
1800 {
1801 dep_t dep;
1802
1803 dep = sd_find_dep_between (prev_insn, insn, true);
1804
1805 if (dep != NULL)
1806 {
1807 cost = dep_cost (dep);
1808
1809 if (targetm.sched.is_costly_dependence (dep, cost,
1810 flag_sched_stalled_insns_dep - n_cycles))
1811 return false;
1812 }
1813 }
1814
1815 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1816 break;
1817 }
1818
1819 if (!prev_insn)
1820 break;
1821 prev_insn = PREV_INSN (prev_insn);
1822 }
1823 }
1824
1825 return true;
1826 }
1827
1828
1829 /* Remove insns from the queue, before they become "ready" with respect
1830 to FU latency considerations. */
1831
1832 static int
1833 early_queue_to_ready (state_t state, struct ready_list *ready)
1834 {
1835 rtx insn;
1836 rtx link;
1837 rtx next_link;
1838 rtx prev_link;
1839 bool move_to_ready;
1840 int cost;
1841 state_t temp_state = alloca (dfa_state_size);
1842 int stalls;
1843 int insns_removed = 0;
1844
1845 /*
1846 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1847 function:
1848
1849 X == 0: There is no limit on how many queued insns can be removed
1850 prematurely. (flag_sched_stalled_insns = -1).
1851
1852 X >= 1: Only X queued insns can be removed prematurely in each
1853 invocation. (flag_sched_stalled_insns = X).
1854
1855 Otherwise: Early queue removal is disabled.
1856 (flag_sched_stalled_insns = 0)
1857 */
1858
1859 if (! flag_sched_stalled_insns)
1860 return 0;
1861
1862 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1863 {
1864 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1865 {
1866 if (sched_verbose > 6)
1867 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1868
1869 prev_link = 0;
1870 while (link)
1871 {
1872 next_link = XEXP (link, 1);
1873 insn = XEXP (link, 0);
1874 if (insn && sched_verbose > 6)
1875 print_rtl_single (sched_dump, insn);
1876
1877 memcpy (temp_state, state, dfa_state_size);
1878 if (recog_memoized (insn) < 0)
1879 /* non-negative to indicate that it's not ready
1880 to avoid infinite Q->R->Q->R... */
1881 cost = 0;
1882 else
1883 cost = state_transition (temp_state, insn);
1884
1885 if (sched_verbose >= 6)
1886 fprintf (sched_dump, "transition cost = %d\n", cost);
1887
1888 move_to_ready = false;
1889 if (cost < 0)
1890 {
1891 move_to_ready = ok_for_early_queue_removal (insn);
1892 if (move_to_ready == true)
1893 {
1894 /* move from Q to R */
1895 q_size -= 1;
1896 ready_add (ready, insn, false);
1897
1898 if (prev_link)
1899 XEXP (prev_link, 1) = next_link;
1900 else
1901 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1902
1903 free_INSN_LIST_node (link);
1904
1905 if (sched_verbose >= 2)
1906 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1907 (*current_sched_info->print_insn) (insn, 0));
1908
1909 insns_removed++;
1910 if (insns_removed == flag_sched_stalled_insns)
1911 /* Remove no more than flag_sched_stalled_insns insns
1912 from Q at a time. */
1913 return insns_removed;
1914 }
1915 }
1916
1917 if (move_to_ready == false)
1918 prev_link = link;
1919
1920 link = next_link;
1921 } /* while link */
1922 } /* if link */
1923
1924 } /* for stalls.. */
1925
1926 return insns_removed;
1927 }
1928
1929
1930 /* Print the ready list for debugging purposes. Callable from debugger. */
1931
1932 static void
1933 debug_ready_list (struct ready_list *ready)
1934 {
1935 rtx *p;
1936 int i;
1937
1938 if (ready->n_ready == 0)
1939 {
1940 fprintf (sched_dump, "\n");
1941 return;
1942 }
1943
1944 p = ready_lastpos (ready);
1945 for (i = 0; i < ready->n_ready; i++)
1946 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1947 fprintf (sched_dump, "\n");
1948 }
1949
1950 /* Search INSN for REG_SAVE_NOTE note pairs for
1951 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1952 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1953 saved value for NOTE_BLOCK_NUMBER which is useful for
1954 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1955 void
1956 reemit_notes (rtx insn)
1957 {
1958 rtx note, last = insn;
1959
1960 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1961 {
1962 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1963 {
1964 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
1965
1966 last = emit_note_before (note_type, last);
1967 remove_note (insn, note);
1968 }
1969 }
1970 }
1971
1972 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1973 static void
1974 move_insn (rtx insn, rtx last, rtx nt)
1975 {
1976 if (PREV_INSN (insn) != last)
1977 {
1978 basic_block bb;
1979 rtx note;
1980 int jump_p = 0;
1981
1982 bb = BLOCK_FOR_INSN (insn);
1983
1984 /* BB_HEAD is either LABEL or NOTE. */
1985 gcc_assert (BB_HEAD (bb) != insn);
1986
1987 if (BB_END (bb) == insn)
1988 /* If this is last instruction in BB, move end marker one
1989 instruction up. */
1990 {
1991 /* Jumps are always placed at the end of basic block. */
1992 jump_p = control_flow_insn_p (insn);
1993
1994 gcc_assert (!jump_p
1995 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
1996 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1997 || (common_sched_info->sched_pass_id
1998 == SCHED_EBB_PASS));
1999
2000 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2001
2002 BB_END (bb) = PREV_INSN (insn);
2003 }
2004
2005 gcc_assert (BB_END (bb) != last);
2006
2007 if (jump_p)
2008 /* We move the block note along with jump. */
2009 {
2010 gcc_assert (nt);
2011
2012 note = NEXT_INSN (insn);
2013 while (NOTE_NOT_BB_P (note) && note != nt)
2014 note = NEXT_INSN (note);
2015
2016 if (note != nt
2017 && (LABEL_P (note)
2018 || BARRIER_P (note)))
2019 note = NEXT_INSN (note);
2020
2021 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2022 }
2023 else
2024 note = insn;
2025
2026 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2027 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2028
2029 NEXT_INSN (note) = NEXT_INSN (last);
2030 PREV_INSN (NEXT_INSN (last)) = note;
2031
2032 NEXT_INSN (last) = insn;
2033 PREV_INSN (insn) = last;
2034
2035 bb = BLOCK_FOR_INSN (last);
2036
2037 if (jump_p)
2038 {
2039 fix_jump_move (insn);
2040
2041 if (BLOCK_FOR_INSN (insn) != bb)
2042 move_block_after_check (insn);
2043
2044 gcc_assert (BB_END (bb) == last);
2045 }
2046
2047 df_insn_change_bb (insn, bb);
2048
2049 /* Update BB_END, if needed. */
2050 if (BB_END (bb) == last)
2051 BB_END (bb) = insn;
2052 }
2053
2054 SCHED_GROUP_P (insn) = 0;
2055 }
2056
2057 /* Return true if scheduling INSN will finish current clock cycle. */
2058 static bool
2059 insn_finishes_cycle_p (rtx insn)
2060 {
2061 if (SCHED_GROUP_P (insn))
2062 /* After issuing INSN, rest of the sched_group will be forced to issue
2063 in order. Don't make any plans for the rest of cycle. */
2064 return true;
2065
2066 /* Finishing the block will, apparently, finish the cycle. */
2067 if (current_sched_info->insn_finishes_block_p
2068 && current_sched_info->insn_finishes_block_p (insn))
2069 return true;
2070
2071 return false;
2072 }
2073
2074 /* The following structure describe an entry of the stack of choices. */
2075 struct choice_entry
2076 {
2077 /* Ordinal number of the issued insn in the ready queue. */
2078 int index;
2079 /* The number of the rest insns whose issues we should try. */
2080 int rest;
2081 /* The number of issued essential insns. */
2082 int n;
2083 /* State after issuing the insn. */
2084 state_t state;
2085 };
2086
2087 /* The following array is used to implement a stack of choices used in
2088 function max_issue. */
2089 static struct choice_entry *choice_stack;
2090
2091 /* The following variable value is number of essential insns issued on
2092 the current cycle. An insn is essential one if it changes the
2093 processors state. */
2094 int cycle_issued_insns;
2095
2096 /* This holds the value of the target dfa_lookahead hook. */
2097 int dfa_lookahead;
2098
2099 /* The following variable value is maximal number of tries of issuing
2100 insns for the first cycle multipass insn scheduling. We define
2101 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2102 need this constraint if all real insns (with non-negative codes)
2103 had reservations because in this case the algorithm complexity is
2104 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2105 might be incomplete and such insn might occur. For such
2106 descriptions, the complexity of algorithm (without the constraint)
2107 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2108 static int max_lookahead_tries;
2109
2110 /* The following value is value of hook
2111 `first_cycle_multipass_dfa_lookahead' at the last call of
2112 `max_issue'. */
2113 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2114
2115 /* The following value is value of `issue_rate' at the last call of
2116 `sched_init'. */
2117 static int cached_issue_rate = 0;
2118
2119 /* The following function returns maximal (or close to maximal) number
2120 of insns which can be issued on the same cycle and one of which
2121 insns is insns with the best rank (the first insn in READY). To
2122 make this function tries different samples of ready insns. READY
2123 is current queue `ready'. Global array READY_TRY reflects what
2124 insns are already issued in this try. MAX_POINTS is the sum of points
2125 of all instructions in READY. The function stops immediately,
2126 if it reached the such a solution, that all instruction can be issued.
2127 INDEX will contain index of the best insn in READY. The following
2128 function is used only for first cycle multipass scheduling.
2129
2130 PRIVILEGED_N >= 0
2131
2132 This function expects recognized insns only. All USEs,
2133 CLOBBERs, etc must be filtered elsewhere. */
2134 int
2135 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2136 int *index)
2137 {
2138 int n, i, all, n_ready, best, delay, tries_num, points = -1, max_points;
2139 int more_issue;
2140 struct choice_entry *top;
2141 rtx insn;
2142
2143 n_ready = ready->n_ready;
2144 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2145 && privileged_n <= n_ready);
2146
2147 /* Init MAX_LOOKAHEAD_TRIES. */
2148 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2149 {
2150 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2151 max_lookahead_tries = 100;
2152 for (i = 0; i < issue_rate; i++)
2153 max_lookahead_tries *= dfa_lookahead;
2154 }
2155
2156 /* Init max_points. */
2157 max_points = 0;
2158 more_issue = issue_rate - cycle_issued_insns;
2159
2160 /* ??? We used to assert here that we never issue more insns than issue_rate.
2161 However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2162 achieved to get better performance. Until these targets are fixed to use
2163 scheduler hooks to manipulate insns priority instead, the assert should
2164 be disabled.
2165
2166 gcc_assert (more_issue >= 0); */
2167
2168 for (i = 0; i < n_ready; i++)
2169 if (!ready_try [i])
2170 {
2171 if (more_issue-- > 0)
2172 max_points += ISSUE_POINTS (ready_element (ready, i));
2173 else
2174 break;
2175 }
2176
2177 /* The number of the issued insns in the best solution. */
2178 best = 0;
2179
2180 top = choice_stack;
2181
2182 /* Set initial state of the search. */
2183 memcpy (top->state, state, dfa_state_size);
2184 top->rest = dfa_lookahead;
2185 top->n = 0;
2186
2187 /* Count the number of the insns to search among. */
2188 for (all = i = 0; i < n_ready; i++)
2189 if (!ready_try [i])
2190 all++;
2191
2192 /* I is the index of the insn to try next. */
2193 i = 0;
2194 tries_num = 0;
2195 for (;;)
2196 {
2197 if (/* If we've reached a dead end or searched enough of what we have
2198 been asked... */
2199 top->rest == 0
2200 /* Or have nothing else to try. */
2201 || i >= n_ready)
2202 {
2203 /* ??? (... || i == n_ready). */
2204 gcc_assert (i <= n_ready);
2205
2206 if (top == choice_stack)
2207 break;
2208
2209 if (best < top - choice_stack)
2210 {
2211 if (privileged_n)
2212 {
2213 n = privileged_n;
2214 /* Try to find issued privileged insn. */
2215 while (n && !ready_try[--n]);
2216 }
2217
2218 if (/* If all insns are equally good... */
2219 privileged_n == 0
2220 /* Or a privileged insn will be issued. */
2221 || ready_try[n])
2222 /* Then we have a solution. */
2223 {
2224 best = top - choice_stack;
2225 /* This is the index of the insn issued first in this
2226 solution. */
2227 *index = choice_stack [1].index;
2228 points = top->n;
2229 if (top->n == max_points || best == all)
2230 break;
2231 }
2232 }
2233
2234 /* Set ready-list index to point to the last insn
2235 ('i++' below will advance it to the next insn). */
2236 i = top->index;
2237
2238 /* Backtrack. */
2239 ready_try [i] = 0;
2240 top--;
2241 memcpy (state, top->state, dfa_state_size);
2242 }
2243 else if (!ready_try [i])
2244 {
2245 tries_num++;
2246 if (tries_num > max_lookahead_tries)
2247 break;
2248 insn = ready_element (ready, i);
2249 delay = state_transition (state, insn);
2250 if (delay < 0)
2251 {
2252 if (state_dead_lock_p (state)
2253 || insn_finishes_cycle_p (insn))
2254 /* We won't issue any more instructions in the next
2255 choice_state. */
2256 top->rest = 0;
2257 else
2258 top->rest--;
2259
2260 n = top->n;
2261 if (memcmp (top->state, state, dfa_state_size) != 0)
2262 n += ISSUE_POINTS (insn);
2263
2264 /* Advance to the next choice_entry. */
2265 top++;
2266 /* Initialize it. */
2267 top->rest = dfa_lookahead;
2268 top->index = i;
2269 top->n = n;
2270 memcpy (top->state, state, dfa_state_size);
2271
2272 ready_try [i] = 1;
2273 i = -1;
2274 }
2275 }
2276
2277 /* Increase ready-list index. */
2278 i++;
2279 }
2280
2281 /* Restore the original state of the DFA. */
2282 memcpy (state, choice_stack->state, dfa_state_size);
2283
2284 return best;
2285 }
2286
2287 /* The following function chooses insn from READY and modifies
2288 READY. The following function is used only for first
2289 cycle multipass scheduling.
2290 Return:
2291 -1 if cycle should be advanced,
2292 0 if INSN_PTR is set to point to the desirable insn,
2293 1 if choose_ready () should be restarted without advancing the cycle. */
2294 static int
2295 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2296 {
2297 int lookahead;
2298
2299 if (dbg_cnt (sched_insn) == false)
2300 {
2301 rtx insn;
2302
2303 insn = next_nonnote_insn (last_scheduled_insn);
2304
2305 if (QUEUE_INDEX (insn) == QUEUE_READY)
2306 /* INSN is in the ready_list. */
2307 {
2308 ready_remove_insn (insn);
2309 *insn_ptr = insn;
2310 return 0;
2311 }
2312
2313 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2314 return -1;
2315 }
2316
2317 lookahead = 0;
2318
2319 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2320 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2321 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2322 || DEBUG_INSN_P (ready_element (ready, 0)))
2323 {
2324 *insn_ptr = ready_remove_first (ready);
2325 return 0;
2326 }
2327 else
2328 {
2329 /* Try to choose the better insn. */
2330 int index = 0, i, n;
2331 rtx insn;
2332 int try_data = 1, try_control = 1;
2333 ds_t ts;
2334
2335 insn = ready_element (ready, 0);
2336 if (INSN_CODE (insn) < 0)
2337 {
2338 *insn_ptr = ready_remove_first (ready);
2339 return 0;
2340 }
2341
2342 if (spec_info
2343 && spec_info->flags & (PREFER_NON_DATA_SPEC
2344 | PREFER_NON_CONTROL_SPEC))
2345 {
2346 for (i = 0, n = ready->n_ready; i < n; i++)
2347 {
2348 rtx x;
2349 ds_t s;
2350
2351 x = ready_element (ready, i);
2352 s = TODO_SPEC (x);
2353
2354 if (spec_info->flags & PREFER_NON_DATA_SPEC
2355 && !(s & DATA_SPEC))
2356 {
2357 try_data = 0;
2358 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2359 || !try_control)
2360 break;
2361 }
2362
2363 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2364 && !(s & CONTROL_SPEC))
2365 {
2366 try_control = 0;
2367 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2368 break;
2369 }
2370 }
2371 }
2372
2373 ts = TODO_SPEC (insn);
2374 if ((ts & SPECULATIVE)
2375 && (((!try_data && (ts & DATA_SPEC))
2376 || (!try_control && (ts & CONTROL_SPEC)))
2377 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2378 && !targetm.sched
2379 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2380 /* Discard speculative instruction that stands first in the ready
2381 list. */
2382 {
2383 change_queue_index (insn, 1);
2384 return 1;
2385 }
2386
2387 ready_try[0] = 0;
2388
2389 for (i = 1; i < ready->n_ready; i++)
2390 {
2391 insn = ready_element (ready, i);
2392
2393 ready_try [i]
2394 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2395 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2396 }
2397
2398 /* Let the target filter the search space. */
2399 for (i = 1; i < ready->n_ready; i++)
2400 if (!ready_try[i])
2401 {
2402 insn = ready_element (ready, i);
2403
2404 #ifdef ENABLE_CHECKING
2405 /* If this insn is recognizable we should have already
2406 recognized it earlier.
2407 ??? Not very clear where this is supposed to be done.
2408 See dep_cost_1. */
2409 gcc_assert (INSN_CODE (insn) >= 0
2410 || recog_memoized (insn) < 0);
2411 #endif
2412
2413 ready_try [i]
2414 = (/* INSN_CODE check can be omitted here as it is also done later
2415 in max_issue (). */
2416 INSN_CODE (insn) < 0
2417 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2418 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2419 (insn)));
2420 }
2421
2422 if (max_issue (ready, 1, curr_state, &index) == 0)
2423 {
2424 *insn_ptr = ready_remove_first (ready);
2425 if (sched_verbose >= 4)
2426 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2427 (*current_sched_info->print_insn) (*insn_ptr, 0));
2428 return 0;
2429 }
2430 else
2431 {
2432 if (sched_verbose >= 4)
2433 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2434 (*current_sched_info->print_insn)
2435 (ready_element (ready, index), 0));
2436
2437 *insn_ptr = ready_remove (ready, index);
2438 return 0;
2439 }
2440 }
2441 }
2442
2443 /* Use forward list scheduling to rearrange insns of block pointed to by
2444 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2445 region. */
2446
2447 void
2448 schedule_block (basic_block *target_bb)
2449 {
2450 int i, first_cycle_insn_p;
2451 int can_issue_more;
2452 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2453 int sort_p, advance, start_clock_var;
2454
2455 /* Head/tail info for this block. */
2456 rtx prev_head = current_sched_info->prev_head;
2457 rtx next_tail = current_sched_info->next_tail;
2458 rtx head = NEXT_INSN (prev_head);
2459 rtx tail = PREV_INSN (next_tail);
2460
2461 /* We used to have code to avoid getting parameters moved from hard
2462 argument registers into pseudos.
2463
2464 However, it was removed when it proved to be of marginal benefit
2465 and caused problems because schedule_block and compute_forward_dependences
2466 had different notions of what the "head" insn was. */
2467
2468 gcc_assert (head != tail || INSN_P (head));
2469
2470 haifa_recovery_bb_recently_added_p = false;
2471
2472 /* Debug info. */
2473 if (sched_verbose)
2474 dump_new_block_header (0, *target_bb, head, tail);
2475
2476 state_reset (curr_state);
2477
2478 /* Clear the ready list. */
2479 ready.first = ready.veclen - 1;
2480 ready.n_ready = 0;
2481 ready.n_debug = 0;
2482
2483 /* It is used for first cycle multipass scheduling. */
2484 temp_state = alloca (dfa_state_size);
2485
2486 if (targetm.sched.md_init)
2487 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2488
2489 /* We start inserting insns after PREV_HEAD. */
2490 last_scheduled_insn = prev_head;
2491
2492 gcc_assert ((NOTE_P (last_scheduled_insn)
2493 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2494 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2495
2496 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2497 queue. */
2498 q_ptr = 0;
2499 q_size = 0;
2500
2501 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2502 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2503
2504 /* Start just before the beginning of time. */
2505 clock_var = -1;
2506
2507 /* We need queue and ready lists and clock_var be initialized
2508 in try_ready () (which is called through init_ready_list ()). */
2509 (*current_sched_info->init_ready_list) ();
2510
2511 /* The algorithm is O(n^2) in the number of ready insns at any given
2512 time in the worst case. Before reload we are more likely to have
2513 big lists so truncate them to a reasonable size. */
2514 if (!reload_completed
2515 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2516 {
2517 ready_sort (&ready);
2518
2519 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2520 If there are debug insns, we know they're first. */
2521 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2522 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2523 break;
2524
2525 if (sched_verbose >= 2)
2526 {
2527 fprintf (sched_dump,
2528 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2529 fprintf (sched_dump,
2530 ";;\t\t before reload => truncated to %d insns\n", i);
2531 }
2532
2533 /* Delay all insns past it for 1 cycle. If debug counter is
2534 activated make an exception for the insn right after
2535 last_scheduled_insn. */
2536 {
2537 rtx skip_insn;
2538
2539 if (dbg_cnt (sched_insn) == false)
2540 skip_insn = next_nonnote_insn (last_scheduled_insn);
2541 else
2542 skip_insn = NULL_RTX;
2543
2544 while (i < ready.n_ready)
2545 {
2546 rtx insn;
2547
2548 insn = ready_remove (&ready, i);
2549
2550 if (insn != skip_insn)
2551 queue_insn (insn, 1);
2552 }
2553 }
2554 }
2555
2556 /* Now we can restore basic block notes and maintain precise cfg. */
2557 restore_bb_notes (*target_bb);
2558
2559 last_clock_var = -1;
2560
2561 advance = 0;
2562
2563 sort_p = TRUE;
2564 /* Loop until all the insns in BB are scheduled. */
2565 while ((*current_sched_info->schedule_more_p) ())
2566 {
2567 do
2568 {
2569 start_clock_var = clock_var;
2570
2571 clock_var++;
2572
2573 advance_one_cycle ();
2574
2575 /* Add to the ready list all pending insns that can be issued now.
2576 If there are no ready insns, increment clock until one
2577 is ready and add all pending insns at that point to the ready
2578 list. */
2579 queue_to_ready (&ready);
2580
2581 gcc_assert (ready.n_ready);
2582
2583 if (sched_verbose >= 2)
2584 {
2585 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2586 debug_ready_list (&ready);
2587 }
2588 advance -= clock_var - start_clock_var;
2589 }
2590 while (advance > 0);
2591
2592 if (sort_p)
2593 {
2594 /* Sort the ready list based on priority. */
2595 ready_sort (&ready);
2596
2597 if (sched_verbose >= 2)
2598 {
2599 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2600 debug_ready_list (&ready);
2601 }
2602 }
2603
2604 /* We don't want md sched reorder to even see debug isns, so put
2605 them out right away. */
2606 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2607 {
2608 if (control_flow_insn_p (last_scheduled_insn))
2609 {
2610 *target_bb = current_sched_info->advance_target_bb
2611 (*target_bb, 0);
2612
2613 if (sched_verbose)
2614 {
2615 rtx x;
2616
2617 x = next_real_insn (last_scheduled_insn);
2618 gcc_assert (x);
2619 dump_new_block_header (1, *target_bb, x, tail);
2620 }
2621
2622 last_scheduled_insn = bb_note (*target_bb);
2623 }
2624
2625 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2626 {
2627 rtx insn = ready_remove_first (&ready);
2628 gcc_assert (DEBUG_INSN_P (insn));
2629 (*current_sched_info->begin_schedule_ready) (insn,
2630 last_scheduled_insn);
2631 move_insn (insn, last_scheduled_insn,
2632 current_sched_info->next_tail);
2633 last_scheduled_insn = insn;
2634 advance = schedule_insn (insn);
2635 gcc_assert (advance == 0);
2636 if (ready.n_ready > 0)
2637 ready_sort (&ready);
2638 }
2639
2640 if (!ready.n_ready)
2641 continue;
2642 }
2643
2644 /* Allow the target to reorder the list, typically for
2645 better instruction bundling. */
2646 if (sort_p && targetm.sched.reorder
2647 && (ready.n_ready == 0
2648 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2649 can_issue_more =
2650 targetm.sched.reorder (sched_dump, sched_verbose,
2651 ready_lastpos (&ready),
2652 &ready.n_ready, clock_var);
2653 else
2654 can_issue_more = issue_rate;
2655
2656 first_cycle_insn_p = 1;
2657 cycle_issued_insns = 0;
2658 for (;;)
2659 {
2660 rtx insn;
2661 int cost;
2662 bool asm_p = false;
2663
2664 if (sched_verbose >= 2)
2665 {
2666 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2667 clock_var);
2668 debug_ready_list (&ready);
2669 }
2670
2671 if (ready.n_ready == 0
2672 && can_issue_more
2673 && reload_completed)
2674 {
2675 /* Allow scheduling insns directly from the queue in case
2676 there's nothing better to do (ready list is empty) but
2677 there are still vacant dispatch slots in the current cycle. */
2678 if (sched_verbose >= 6)
2679 fprintf (sched_dump,";;\t\tSecond chance\n");
2680 memcpy (temp_state, curr_state, dfa_state_size);
2681 if (early_queue_to_ready (temp_state, &ready))
2682 ready_sort (&ready);
2683 }
2684
2685 if (ready.n_ready == 0
2686 || !can_issue_more
2687 || state_dead_lock_p (curr_state)
2688 || !(*current_sched_info->schedule_more_p) ())
2689 break;
2690
2691 /* Select and remove the insn from the ready list. */
2692 if (sort_p)
2693 {
2694 int res;
2695
2696 insn = NULL_RTX;
2697 res = choose_ready (&ready, &insn);
2698
2699 if (res < 0)
2700 /* Finish cycle. */
2701 break;
2702 if (res > 0)
2703 /* Restart choose_ready (). */
2704 continue;
2705
2706 gcc_assert (insn != NULL_RTX);
2707 }
2708 else
2709 insn = ready_remove_first (&ready);
2710
2711 if (targetm.sched.dfa_new_cycle
2712 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2713 insn, last_clock_var,
2714 clock_var, &sort_p))
2715 /* SORT_P is used by the target to override sorting
2716 of the ready list. This is needed when the target
2717 has modified its internal structures expecting that
2718 the insn will be issued next. As we need the insn
2719 to have the highest priority (so it will be returned by
2720 the ready_remove_first call above), we invoke
2721 ready_add (&ready, insn, true).
2722 But, still, there is one issue: INSN can be later
2723 discarded by scheduler's front end through
2724 current_sched_info->can_schedule_ready_p, hence, won't
2725 be issued next. */
2726 {
2727 ready_add (&ready, insn, true);
2728 break;
2729 }
2730
2731 sort_p = TRUE;
2732 memcpy (temp_state, curr_state, dfa_state_size);
2733 if (recog_memoized (insn) < 0)
2734 {
2735 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2736 || asm_noperands (PATTERN (insn)) >= 0);
2737 if (!first_cycle_insn_p && asm_p)
2738 /* This is asm insn which is tried to be issued on the
2739 cycle not first. Issue it on the next cycle. */
2740 cost = 1;
2741 else
2742 /* A USE insn, or something else we don't need to
2743 understand. We can't pass these directly to
2744 state_transition because it will trigger a
2745 fatal error for unrecognizable insns. */
2746 cost = 0;
2747 }
2748 else
2749 {
2750 cost = state_transition (temp_state, insn);
2751 if (cost < 0)
2752 cost = 0;
2753 else if (cost == 0)
2754 cost = 1;
2755 }
2756
2757 if (cost >= 1)
2758 {
2759 queue_insn (insn, cost);
2760 if (SCHED_GROUP_P (insn))
2761 {
2762 advance = cost;
2763 break;
2764 }
2765
2766 continue;
2767 }
2768
2769 if (current_sched_info->can_schedule_ready_p
2770 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2771 /* We normally get here only if we don't want to move
2772 insn from the split block. */
2773 {
2774 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2775 continue;
2776 }
2777
2778 /* DECISION is made. */
2779
2780 if (TODO_SPEC (insn) & SPECULATIVE)
2781 generate_recovery_code (insn);
2782
2783 if (control_flow_insn_p (last_scheduled_insn)
2784 /* This is used to switch basic blocks by request
2785 from scheduler front-end (actually, sched-ebb.c only).
2786 This is used to process blocks with single fallthru
2787 edge. If succeeding block has jump, it [jump] will try
2788 move at the end of current bb, thus corrupting CFG. */
2789 || current_sched_info->advance_target_bb (*target_bb, insn))
2790 {
2791 *target_bb = current_sched_info->advance_target_bb
2792 (*target_bb, 0);
2793
2794 if (sched_verbose)
2795 {
2796 rtx x;
2797
2798 x = next_real_insn (last_scheduled_insn);
2799 gcc_assert (x);
2800 dump_new_block_header (1, *target_bb, x, tail);
2801 }
2802
2803 last_scheduled_insn = bb_note (*target_bb);
2804 }
2805
2806 /* Update counters, etc in the scheduler's front end. */
2807 (*current_sched_info->begin_schedule_ready) (insn,
2808 last_scheduled_insn);
2809
2810 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
2811 reemit_notes (insn);
2812 last_scheduled_insn = insn;
2813
2814 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2815 {
2816 cycle_issued_insns++;
2817 memcpy (curr_state, temp_state, dfa_state_size);
2818 }
2819
2820 if (targetm.sched.variable_issue)
2821 can_issue_more =
2822 targetm.sched.variable_issue (sched_dump, sched_verbose,
2823 insn, can_issue_more);
2824 /* A naked CLOBBER or USE generates no instruction, so do
2825 not count them against the issue rate. */
2826 else if (GET_CODE (PATTERN (insn)) != USE
2827 && GET_CODE (PATTERN (insn)) != CLOBBER)
2828 can_issue_more--;
2829
2830 advance = schedule_insn (insn);
2831
2832 /* After issuing an asm insn we should start a new cycle. */
2833 if (advance == 0 && asm_p)
2834 advance = 1;
2835 if (advance != 0)
2836 break;
2837
2838 first_cycle_insn_p = 0;
2839
2840 /* Sort the ready list based on priority. This must be
2841 redone here, as schedule_insn may have readied additional
2842 insns that will not be sorted correctly. */
2843 if (ready.n_ready > 0)
2844 ready_sort (&ready);
2845
2846 /* Quickly go through debug insns such that md sched
2847 reorder2 doesn't have to deal with debug insns. */
2848 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
2849 && (*current_sched_info->schedule_more_p) ())
2850 {
2851 if (control_flow_insn_p (last_scheduled_insn))
2852 {
2853 *target_bb = current_sched_info->advance_target_bb
2854 (*target_bb, 0);
2855
2856 if (sched_verbose)
2857 {
2858 rtx x;
2859
2860 x = next_real_insn (last_scheduled_insn);
2861 gcc_assert (x);
2862 dump_new_block_header (1, *target_bb, x, tail);
2863 }
2864
2865 last_scheduled_insn = bb_note (*target_bb);
2866 }
2867
2868 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2869 {
2870 insn = ready_remove_first (&ready);
2871 gcc_assert (DEBUG_INSN_P (insn));
2872 (*current_sched_info->begin_schedule_ready)
2873 (insn, last_scheduled_insn);
2874 move_insn (insn, last_scheduled_insn,
2875 current_sched_info->next_tail);
2876 advance = schedule_insn (insn);
2877 last_scheduled_insn = insn;
2878 gcc_assert (advance == 0);
2879 if (ready.n_ready > 0)
2880 ready_sort (&ready);
2881 }
2882 }
2883
2884 if (targetm.sched.reorder2
2885 && (ready.n_ready == 0
2886 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2887 {
2888 can_issue_more =
2889 targetm.sched.reorder2 (sched_dump, sched_verbose,
2890 ready.n_ready
2891 ? ready_lastpos (&ready) : NULL,
2892 &ready.n_ready, clock_var);
2893 }
2894 }
2895 }
2896
2897 /* Debug info. */
2898 if (sched_verbose)
2899 {
2900 fprintf (sched_dump, ";;\tReady list (final): ");
2901 debug_ready_list (&ready);
2902 }
2903
2904 if (current_sched_info->queue_must_finish_empty)
2905 /* Sanity check -- queue must be empty now. Meaningless if region has
2906 multiple bbs. */
2907 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
2908 else
2909 {
2910 /* We must maintain QUEUE_INDEX between blocks in region. */
2911 for (i = ready.n_ready - 1; i >= 0; i--)
2912 {
2913 rtx x;
2914
2915 x = ready_element (&ready, i);
2916 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2917 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2918 }
2919
2920 if (q_size)
2921 for (i = 0; i <= max_insn_queue_index; i++)
2922 {
2923 rtx link;
2924 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2925 {
2926 rtx x;
2927
2928 x = XEXP (link, 0);
2929 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2930 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2931 }
2932 free_INSN_LIST_list (&insn_queue[i]);
2933 }
2934 }
2935
2936 if (sched_verbose)
2937 fprintf (sched_dump, ";; total time = %d\n", clock_var);
2938
2939 if (!current_sched_info->queue_must_finish_empty
2940 || haifa_recovery_bb_recently_added_p)
2941 {
2942 /* INSN_TICK (minimum clock tick at which the insn becomes
2943 ready) may be not correct for the insn in the subsequent
2944 blocks of the region. We should use a correct value of
2945 `clock_var' or modify INSN_TICK. It is better to keep
2946 clock_var value equal to 0 at the start of a basic block.
2947 Therefore we modify INSN_TICK here. */
2948 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2949 }
2950
2951 if (targetm.sched.md_finish)
2952 {
2953 targetm.sched.md_finish (sched_dump, sched_verbose);
2954 /* Target might have added some instructions to the scheduled block
2955 in its md_finish () hook. These new insns don't have any data
2956 initialized and to identify them we extend h_i_d so that they'll
2957 get zero luids. */
2958 sched_init_luids (NULL, NULL, NULL, NULL);
2959 }
2960
2961 if (sched_verbose)
2962 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
2963 INSN_UID (head), INSN_UID (tail));
2964
2965 /* Update head/tail boundaries. */
2966 head = NEXT_INSN (prev_head);
2967 tail = last_scheduled_insn;
2968
2969 head = restore_other_notes (head, NULL);
2970
2971 current_sched_info->head = head;
2972 current_sched_info->tail = tail;
2973 }
2974 \f
2975 /* Set_priorities: compute priority of each insn in the block. */
2976
2977 int
2978 set_priorities (rtx head, rtx tail)
2979 {
2980 rtx insn;
2981 int n_insn;
2982 int sched_max_insns_priority =
2983 current_sched_info->sched_max_insns_priority;
2984 rtx prev_head;
2985
2986 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
2987 gcc_unreachable ();
2988
2989 n_insn = 0;
2990
2991 prev_head = PREV_INSN (head);
2992 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2993 {
2994 if (!INSN_P (insn))
2995 continue;
2996
2997 n_insn++;
2998 (void) priority (insn);
2999
3000 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3001
3002 sched_max_insns_priority = MAX (sched_max_insns_priority,
3003 INSN_PRIORITY (insn));
3004 }
3005
3006 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3007
3008 return n_insn;
3009 }
3010
3011 /* Set dump and sched_verbose for the desired debugging output. If no
3012 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3013 For -fsched-verbose=N, N>=10, print everything to stderr. */
3014 void
3015 setup_sched_dump (void)
3016 {
3017 sched_verbose = sched_verbose_param;
3018 if (sched_verbose_param == 0 && dump_file)
3019 sched_verbose = 1;
3020 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3021 ? stderr : dump_file);
3022 }
3023
3024 /* Initialize some global state for the scheduler. This function works
3025 with the common data shared between all the schedulers. It is called
3026 from the scheduler specific initialization routine. */
3027
3028 void
3029 sched_init (void)
3030 {
3031 /* Disable speculative loads in their presence if cc0 defined. */
3032 #ifdef HAVE_cc0
3033 flag_schedule_speculative_load = 0;
3034 #endif
3035
3036 /* Initialize SPEC_INFO. */
3037 if (targetm.sched.set_sched_flags)
3038 {
3039 spec_info = &spec_info_var;
3040 targetm.sched.set_sched_flags (spec_info);
3041
3042 if (spec_info->mask != 0)
3043 {
3044 spec_info->data_weakness_cutoff =
3045 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3046 spec_info->control_weakness_cutoff =
3047 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3048 * REG_BR_PROB_BASE) / 100;
3049 }
3050 else
3051 /* So we won't read anything accidentally. */
3052 spec_info = NULL;
3053
3054 }
3055 else
3056 /* So we won't read anything accidentally. */
3057 spec_info = 0;
3058
3059 /* Initialize issue_rate. */
3060 if (targetm.sched.issue_rate)
3061 issue_rate = targetm.sched.issue_rate ();
3062 else
3063 issue_rate = 1;
3064
3065 if (cached_issue_rate != issue_rate)
3066 {
3067 cached_issue_rate = issue_rate;
3068 /* To invalidate max_lookahead_tries: */
3069 cached_first_cycle_multipass_dfa_lookahead = 0;
3070 }
3071
3072 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3073 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3074 else
3075 dfa_lookahead = 0;
3076
3077 if (targetm.sched.init_dfa_pre_cycle_insn)
3078 targetm.sched.init_dfa_pre_cycle_insn ();
3079
3080 if (targetm.sched.init_dfa_post_cycle_insn)
3081 targetm.sched.init_dfa_post_cycle_insn ();
3082
3083 dfa_start ();
3084 dfa_state_size = state_size ();
3085
3086 init_alias_analysis ();
3087
3088 df_set_flags (DF_LR_RUN_DCE);
3089 df_note_add_problem ();
3090
3091 /* More problems needed for interloop dep calculation in SMS. */
3092 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3093 {
3094 df_rd_add_problem ();
3095 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3096 }
3097
3098 df_analyze ();
3099
3100 /* Do not run DCE after reload, as this can kill nops inserted
3101 by bundling. */
3102 if (reload_completed)
3103 df_clear_flags (DF_LR_RUN_DCE);
3104
3105 regstat_compute_calls_crossed ();
3106
3107 if (targetm.sched.md_init_global)
3108 targetm.sched.md_init_global (sched_dump, sched_verbose,
3109 get_max_uid () + 1);
3110
3111 curr_state = xmalloc (dfa_state_size);
3112 }
3113
3114 static void haifa_init_only_bb (basic_block, basic_block);
3115
3116 /* Initialize data structures specific to the Haifa scheduler. */
3117 void
3118 haifa_sched_init (void)
3119 {
3120 setup_sched_dump ();
3121 sched_init ();
3122
3123 if (spec_info != NULL)
3124 {
3125 sched_deps_info->use_deps_list = 1;
3126 sched_deps_info->generate_spec_deps = 1;
3127 }
3128
3129 /* Initialize luids, dependency caches, target and h_i_d for the
3130 whole function. */
3131 {
3132 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3133 basic_block bb;
3134
3135 sched_init_bbs ();
3136
3137 FOR_EACH_BB (bb)
3138 VEC_quick_push (basic_block, bbs, bb);
3139 sched_init_luids (bbs, NULL, NULL, NULL);
3140 sched_deps_init (true);
3141 sched_extend_target ();
3142 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3143
3144 VEC_free (basic_block, heap, bbs);
3145 }
3146
3147 sched_init_only_bb = haifa_init_only_bb;
3148 sched_split_block = sched_split_block_1;
3149 sched_create_empty_bb = sched_create_empty_bb_1;
3150 haifa_recovery_bb_ever_added_p = false;
3151
3152 #ifdef ENABLE_CHECKING
3153 /* This is used preferably for finding bugs in check_cfg () itself.
3154 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3155 assumes that the last insn in the last bb has a non-null successor. */
3156 check_cfg (0, 0);
3157 #endif
3158
3159 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3160 before_recovery = 0;
3161 after_recovery = 0;
3162 }
3163
3164 /* Finish work with the data specific to the Haifa scheduler. */
3165 void
3166 haifa_sched_finish (void)
3167 {
3168 sched_create_empty_bb = NULL;
3169 sched_split_block = NULL;
3170 sched_init_only_bb = NULL;
3171
3172 if (spec_info && spec_info->dump)
3173 {
3174 char c = reload_completed ? 'a' : 'b';
3175
3176 fprintf (spec_info->dump,
3177 ";; %s:\n", current_function_name ());
3178
3179 fprintf (spec_info->dump,
3180 ";; Procedure %cr-begin-data-spec motions == %d\n",
3181 c, nr_begin_data);
3182 fprintf (spec_info->dump,
3183 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3184 c, nr_be_in_data);
3185 fprintf (spec_info->dump,
3186 ";; Procedure %cr-begin-control-spec motions == %d\n",
3187 c, nr_begin_control);
3188 fprintf (spec_info->dump,
3189 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3190 c, nr_be_in_control);
3191 }
3192
3193 /* Finalize h_i_d, dependency caches, and luids for the whole
3194 function. Target will be finalized in md_global_finish (). */
3195 sched_deps_finish ();
3196 sched_finish_luids ();
3197 current_sched_info = NULL;
3198 sched_finish ();
3199 }
3200
3201 /* Free global data used during insn scheduling. This function works with
3202 the common data shared between the schedulers. */
3203
3204 void
3205 sched_finish (void)
3206 {
3207 haifa_finish_h_i_d ();
3208 free (curr_state);
3209
3210 if (targetm.sched.md_finish_global)
3211 targetm.sched.md_finish_global (sched_dump, sched_verbose);
3212
3213 end_alias_analysis ();
3214
3215 regstat_free_calls_crossed ();
3216
3217 dfa_finish ();
3218
3219 #ifdef ENABLE_CHECKING
3220 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3221 if (!reload_completed)
3222 check_cfg (0, 0);
3223 #endif
3224 }
3225
3226 /* Fix INSN_TICKs of the instructions in the current block as well as
3227 INSN_TICKs of their dependents.
3228 HEAD and TAIL are the begin and the end of the current scheduled block. */
3229 static void
3230 fix_inter_tick (rtx head, rtx tail)
3231 {
3232 /* Set of instructions with corrected INSN_TICK. */
3233 bitmap_head processed;
3234 /* ??? It is doubtful if we should assume that cycle advance happens on
3235 basic block boundaries. Basically insns that are unconditionally ready
3236 on the start of the block are more preferable then those which have
3237 a one cycle dependency over insn from the previous block. */
3238 int next_clock = clock_var + 1;
3239
3240 bitmap_initialize (&processed, 0);
3241
3242 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3243 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3244 across different blocks. */
3245 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3246 {
3247 if (INSN_P (head))
3248 {
3249 int tick;
3250 sd_iterator_def sd_it;
3251 dep_t dep;
3252
3253 tick = INSN_TICK (head);
3254 gcc_assert (tick >= MIN_TICK);
3255
3256 /* Fix INSN_TICK of instruction from just scheduled block. */
3257 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3258 {
3259 bitmap_set_bit (&processed, INSN_LUID (head));
3260 tick -= next_clock;
3261
3262 if (tick < MIN_TICK)
3263 tick = MIN_TICK;
3264
3265 INSN_TICK (head) = tick;
3266 }
3267
3268 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3269 {
3270 rtx next;
3271
3272 next = DEP_CON (dep);
3273 tick = INSN_TICK (next);
3274
3275 if (tick != INVALID_TICK
3276 /* If NEXT has its INSN_TICK calculated, fix it.
3277 If not - it will be properly calculated from
3278 scratch later in fix_tick_ready. */
3279 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3280 {
3281 bitmap_set_bit (&processed, INSN_LUID (next));
3282 tick -= next_clock;
3283
3284 if (tick < MIN_TICK)
3285 tick = MIN_TICK;
3286
3287 if (tick > INTER_TICK (next))
3288 INTER_TICK (next) = tick;
3289 else
3290 tick = INTER_TICK (next);
3291
3292 INSN_TICK (next) = tick;
3293 }
3294 }
3295 }
3296 }
3297 bitmap_clear (&processed);
3298 }
3299
3300 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3301
3302 /* Check if NEXT is ready to be added to the ready or queue list.
3303 If "yes", add it to the proper list.
3304 Returns:
3305 -1 - is not ready yet,
3306 0 - added to the ready list,
3307 0 < N - queued for N cycles. */
3308 int
3309 try_ready (rtx next)
3310 {
3311 ds_t old_ts, *ts;
3312
3313 ts = &TODO_SPEC (next);
3314 old_ts = *ts;
3315
3316 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3317 && ((old_ts & HARD_DEP)
3318 || (old_ts & SPECULATIVE)));
3319
3320 if (sd_lists_empty_p (next, SD_LIST_BACK))
3321 /* NEXT has all its dependencies resolved. */
3322 {
3323 /* Remove HARD_DEP bit from NEXT's status. */
3324 *ts &= ~HARD_DEP;
3325
3326 if (current_sched_info->flags & DO_SPECULATION)
3327 /* Remove all speculative bits from NEXT's status. */
3328 *ts &= ~SPECULATIVE;
3329 }
3330 else
3331 {
3332 /* One of the NEXT's dependencies has been resolved.
3333 Recalculate NEXT's status. */
3334
3335 *ts &= ~SPECULATIVE & ~HARD_DEP;
3336
3337 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3338 /* Now we've got NEXT with speculative deps only.
3339 1. Look at the deps to see what we have to do.
3340 2. Check if we can do 'todo'. */
3341 {
3342 sd_iterator_def sd_it;
3343 dep_t dep;
3344 bool first_p = true;
3345
3346 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3347 {
3348 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3349
3350 if (first_p)
3351 {
3352 first_p = false;
3353
3354 *ts = ds;
3355 }
3356 else
3357 *ts = ds_merge (*ts, ds);
3358 }
3359
3360 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3361 /* Too few points. */
3362 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3363 }
3364 else
3365 *ts |= HARD_DEP;
3366 }
3367
3368 if (*ts & HARD_DEP)
3369 gcc_assert (*ts == old_ts
3370 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3371 else if (current_sched_info->new_ready)
3372 *ts = current_sched_info->new_ready (next, *ts);
3373
3374 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3375 have its original pattern or changed (speculative) one. This is due
3376 to changing ebb in region scheduling.
3377 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3378 has speculative pattern.
3379
3380 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3381 control-speculative NEXT could have been discarded by sched-rgn.c
3382 (the same case as when discarded by can_schedule_ready_p ()). */
3383
3384 if ((*ts & SPECULATIVE)
3385 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3386 need to change anything. */
3387 && *ts != old_ts)
3388 {
3389 int res;
3390 rtx new_pat;
3391
3392 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3393
3394 res = haifa_speculate_insn (next, *ts, &new_pat);
3395
3396 switch (res)
3397 {
3398 case -1:
3399 /* It would be nice to change DEP_STATUS of all dependences,
3400 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3401 so we won't reanalyze anything. */
3402 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3403 break;
3404
3405 case 0:
3406 /* We follow the rule, that every speculative insn
3407 has non-null ORIG_PAT. */
3408 if (!ORIG_PAT (next))
3409 ORIG_PAT (next) = PATTERN (next);
3410 break;
3411
3412 case 1:
3413 if (!ORIG_PAT (next))
3414 /* If we gonna to overwrite the original pattern of insn,
3415 save it. */
3416 ORIG_PAT (next) = PATTERN (next);
3417
3418 haifa_change_pattern (next, new_pat);
3419 break;
3420
3421 default:
3422 gcc_unreachable ();
3423 }
3424 }
3425
3426 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3427 either correct (*ts & SPECULATIVE),
3428 or we simply don't care (*ts & HARD_DEP). */
3429
3430 gcc_assert (!ORIG_PAT (next)
3431 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3432
3433 if (*ts & HARD_DEP)
3434 {
3435 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3436 control-speculative NEXT could have been discarded by sched-rgn.c
3437 (the same case as when discarded by can_schedule_ready_p ()). */
3438 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3439
3440 change_queue_index (next, QUEUE_NOWHERE);
3441 return -1;
3442 }
3443 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3444 /* We should change pattern of every previously speculative
3445 instruction - and we determine if NEXT was speculative by using
3446 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3447 pat too, so skip them. */
3448 {
3449 haifa_change_pattern (next, ORIG_PAT (next));
3450 ORIG_PAT (next) = 0;
3451 }
3452
3453 if (sched_verbose >= 2)
3454 {
3455 int s = TODO_SPEC (next);
3456
3457 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3458 (*current_sched_info->print_insn) (next, 0));
3459
3460 if (spec_info && spec_info->dump)
3461 {
3462 if (s & BEGIN_DATA)
3463 fprintf (spec_info->dump, "; data-spec;");
3464 if (s & BEGIN_CONTROL)
3465 fprintf (spec_info->dump, "; control-spec;");
3466 if (s & BE_IN_CONTROL)
3467 fprintf (spec_info->dump, "; in-control-spec;");
3468 }
3469
3470 fprintf (sched_dump, "\n");
3471 }
3472
3473 adjust_priority (next);
3474
3475 return fix_tick_ready (next);
3476 }
3477
3478 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3479 static int
3480 fix_tick_ready (rtx next)
3481 {
3482 int tick, delay;
3483
3484 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3485 {
3486 int full_p;
3487 sd_iterator_def sd_it;
3488 dep_t dep;
3489
3490 tick = INSN_TICK (next);
3491 /* if tick is not equal to INVALID_TICK, then update
3492 INSN_TICK of NEXT with the most recent resolved dependence
3493 cost. Otherwise, recalculate from scratch. */
3494 full_p = (tick == INVALID_TICK);
3495
3496 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3497 {
3498 rtx pro = DEP_PRO (dep);
3499 int tick1;
3500
3501 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3502
3503 tick1 = INSN_TICK (pro) + dep_cost (dep);
3504 if (tick1 > tick)
3505 tick = tick1;
3506
3507 if (!full_p)
3508 break;
3509 }
3510 }
3511 else
3512 tick = -1;
3513
3514 INSN_TICK (next) = tick;
3515
3516 delay = tick - clock_var;
3517 if (delay <= 0)
3518 delay = QUEUE_READY;
3519
3520 change_queue_index (next, delay);
3521
3522 return delay;
3523 }
3524
3525 /* Move NEXT to the proper queue list with (DELAY >= 1),
3526 or add it to the ready list (DELAY == QUEUE_READY),
3527 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3528 static void
3529 change_queue_index (rtx next, int delay)
3530 {
3531 int i = QUEUE_INDEX (next);
3532
3533 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3534 && delay != 0);
3535 gcc_assert (i != QUEUE_SCHEDULED);
3536
3537 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3538 || (delay < 0 && delay == i))
3539 /* We have nothing to do. */
3540 return;
3541
3542 /* Remove NEXT from wherever it is now. */
3543 if (i == QUEUE_READY)
3544 ready_remove_insn (next);
3545 else if (i >= 0)
3546 queue_remove (next);
3547
3548 /* Add it to the proper place. */
3549 if (delay == QUEUE_READY)
3550 ready_add (readyp, next, false);
3551 else if (delay >= 1)
3552 queue_insn (next, delay);
3553
3554 if (sched_verbose >= 2)
3555 {
3556 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3557 (*current_sched_info->print_insn) (next, 0));
3558
3559 if (delay == QUEUE_READY)
3560 fprintf (sched_dump, " into ready\n");
3561 else if (delay >= 1)
3562 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3563 else
3564 fprintf (sched_dump, " removed from ready or queue lists\n");
3565 }
3566 }
3567
3568 static int sched_ready_n_insns = -1;
3569
3570 /* Initialize per region data structures. */
3571 void
3572 sched_extend_ready_list (int new_sched_ready_n_insns)
3573 {
3574 int i;
3575
3576 if (sched_ready_n_insns == -1)
3577 /* At the first call we need to initialize one more choice_stack
3578 entry. */
3579 {
3580 i = 0;
3581 sched_ready_n_insns = 0;
3582 }
3583 else
3584 i = sched_ready_n_insns + 1;
3585
3586 ready.veclen = new_sched_ready_n_insns + issue_rate;
3587 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3588
3589 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3590
3591 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3592 sched_ready_n_insns, sizeof (*ready_try));
3593
3594 /* We allocate +1 element to save initial state in the choice_stack[0]
3595 entry. */
3596 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3597 new_sched_ready_n_insns + 1);
3598
3599 for (; i <= new_sched_ready_n_insns; i++)
3600 choice_stack[i].state = xmalloc (dfa_state_size);
3601
3602 sched_ready_n_insns = new_sched_ready_n_insns;
3603 }
3604
3605 /* Free per region data structures. */
3606 void
3607 sched_finish_ready_list (void)
3608 {
3609 int i;
3610
3611 free (ready.vec);
3612 ready.vec = NULL;
3613 ready.veclen = 0;
3614
3615 free (ready_try);
3616 ready_try = NULL;
3617
3618 for (i = 0; i <= sched_ready_n_insns; i++)
3619 free (choice_stack [i].state);
3620 free (choice_stack);
3621 choice_stack = NULL;
3622
3623 sched_ready_n_insns = -1;
3624 }
3625
3626 static int
3627 haifa_luid_for_non_insn (rtx x)
3628 {
3629 gcc_assert (NOTE_P (x) || LABEL_P (x));
3630
3631 return 0;
3632 }
3633
3634 /* Generates recovery code for INSN. */
3635 static void
3636 generate_recovery_code (rtx insn)
3637 {
3638 if (TODO_SPEC (insn) & BEGIN_SPEC)
3639 begin_speculative_block (insn);
3640
3641 /* Here we have insn with no dependencies to
3642 instructions other then CHECK_SPEC ones. */
3643
3644 if (TODO_SPEC (insn) & BE_IN_SPEC)
3645 add_to_speculative_block (insn);
3646 }
3647
3648 /* Helper function.
3649 Tries to add speculative dependencies of type FS between instructions
3650 in deps_list L and TWIN. */
3651 static void
3652 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
3653 {
3654 sd_iterator_def sd_it;
3655 dep_t dep;
3656
3657 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
3658 {
3659 ds_t ds;
3660 rtx consumer;
3661
3662 consumer = DEP_CON (dep);
3663
3664 ds = DEP_STATUS (dep);
3665
3666 if (/* If we want to create speculative dep. */
3667 fs
3668 /* And we can do that because this is a true dep. */
3669 && (ds & DEP_TYPES) == DEP_TRUE)
3670 {
3671 gcc_assert (!(ds & BE_IN_SPEC));
3672
3673 if (/* If this dep can be overcome with 'begin speculation'. */
3674 ds & BEGIN_SPEC)
3675 /* Then we have a choice: keep the dep 'begin speculative'
3676 or transform it into 'be in speculative'. */
3677 {
3678 if (/* In try_ready we assert that if insn once became ready
3679 it can be removed from the ready (or queue) list only
3680 due to backend decision. Hence we can't let the
3681 probability of the speculative dep to decrease. */
3682 ds_weak (ds) <= ds_weak (fs))
3683 {
3684 ds_t new_ds;
3685
3686 new_ds = (ds & ~BEGIN_SPEC) | fs;
3687
3688 if (/* consumer can 'be in speculative'. */
3689 sched_insn_is_legitimate_for_speculation_p (consumer,
3690 new_ds))
3691 /* Transform it to be in speculative. */
3692 ds = new_ds;
3693 }
3694 }
3695 else
3696 /* Mark the dep as 'be in speculative'. */
3697 ds |= fs;
3698 }
3699
3700 {
3701 dep_def _new_dep, *new_dep = &_new_dep;
3702
3703 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
3704 sd_add_dep (new_dep, false);
3705 }
3706 }
3707 }
3708
3709 /* Generates recovery code for BEGIN speculative INSN. */
3710 static void
3711 begin_speculative_block (rtx insn)
3712 {
3713 if (TODO_SPEC (insn) & BEGIN_DATA)
3714 nr_begin_data++;
3715 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3716 nr_begin_control++;
3717
3718 create_check_block_twin (insn, false);
3719
3720 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3721 }
3722
3723 static void haifa_init_insn (rtx);
3724
3725 /* Generates recovery code for BE_IN speculative INSN. */
3726 static void
3727 add_to_speculative_block (rtx insn)
3728 {
3729 ds_t ts;
3730 sd_iterator_def sd_it;
3731 dep_t dep;
3732 rtx twins = NULL;
3733 rtx_vec_t priorities_roots;
3734
3735 ts = TODO_SPEC (insn);
3736 gcc_assert (!(ts & ~BE_IN_SPEC));
3737
3738 if (ts & BE_IN_DATA)
3739 nr_be_in_data++;
3740 if (ts & BE_IN_CONTROL)
3741 nr_be_in_control++;
3742
3743 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3744 gcc_assert (!TODO_SPEC (insn));
3745
3746 DONE_SPEC (insn) |= ts;
3747
3748 /* First we convert all simple checks to branchy. */
3749 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3750 sd_iterator_cond (&sd_it, &dep);)
3751 {
3752 rtx check = DEP_PRO (dep);
3753
3754 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3755 {
3756 create_check_block_twin (check, true);
3757
3758 /* Restart search. */
3759 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3760 }
3761 else
3762 /* Continue search. */
3763 sd_iterator_next (&sd_it);
3764 }
3765
3766 priorities_roots = NULL;
3767 clear_priorities (insn, &priorities_roots);
3768
3769 while (1)
3770 {
3771 rtx check, twin;
3772 basic_block rec;
3773
3774 /* Get the first backward dependency of INSN. */
3775 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3776 if (!sd_iterator_cond (&sd_it, &dep))
3777 /* INSN has no backward dependencies left. */
3778 break;
3779
3780 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
3781 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
3782 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
3783
3784 check = DEP_PRO (dep);
3785
3786 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3787 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3788
3789 rec = BLOCK_FOR_INSN (check);
3790
3791 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
3792 haifa_init_insn (twin);
3793
3794 sd_copy_back_deps (twin, insn, true);
3795
3796 if (sched_verbose && spec_info->dump)
3797 /* INSN_BB (insn) isn't determined for twin insns yet.
3798 So we can't use current_sched_info->print_insn. */
3799 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3800 INSN_UID (twin), rec->index);
3801
3802 twins = alloc_INSN_LIST (twin, twins);
3803
3804 /* Add dependences between TWIN and all appropriate
3805 instructions from REC. */
3806 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
3807 {
3808 rtx pro = DEP_PRO (dep);
3809
3810 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
3811
3812 /* INSN might have dependencies from the instructions from
3813 several recovery blocks. At this iteration we process those
3814 producers that reside in REC. */
3815 if (BLOCK_FOR_INSN (pro) == rec)
3816 {
3817 dep_def _new_dep, *new_dep = &_new_dep;
3818
3819 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
3820 sd_add_dep (new_dep, false);
3821 }
3822 }
3823
3824 process_insn_forw_deps_be_in_spec (insn, twin, ts);
3825
3826 /* Remove all dependencies between INSN and insns in REC. */
3827 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3828 sd_iterator_cond (&sd_it, &dep);)
3829 {
3830 rtx pro = DEP_PRO (dep);
3831
3832 if (BLOCK_FOR_INSN (pro) == rec)
3833 sd_delete_dep (sd_it);
3834 else
3835 sd_iterator_next (&sd_it);
3836 }
3837 }
3838
3839 /* We couldn't have added the dependencies between INSN and TWINS earlier
3840 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3841 while (twins)
3842 {
3843 rtx twin;
3844
3845 twin = XEXP (twins, 0);
3846
3847 {
3848 dep_def _new_dep, *new_dep = &_new_dep;
3849
3850 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
3851 sd_add_dep (new_dep, false);
3852 }
3853
3854 twin = XEXP (twins, 1);
3855 free_INSN_LIST_node (twins);
3856 twins = twin;
3857 }
3858
3859 calc_priorities (priorities_roots);
3860 VEC_free (rtx, heap, priorities_roots);
3861 }
3862
3863 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3864 void *
3865 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3866 {
3867 gcc_assert (new_nmemb >= old_nmemb);
3868 p = XRESIZEVAR (void, p, new_nmemb * size);
3869 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3870 return p;
3871 }
3872
3873 /* Helper function.
3874 Find fallthru edge from PRED. */
3875 edge
3876 find_fallthru_edge (basic_block pred)
3877 {
3878 edge e;
3879 edge_iterator ei;
3880 basic_block succ;
3881
3882 succ = pred->next_bb;
3883 gcc_assert (succ->prev_bb == pred);
3884
3885 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3886 {
3887 FOR_EACH_EDGE (e, ei, pred->succs)
3888 if (e->flags & EDGE_FALLTHRU)
3889 {
3890 gcc_assert (e->dest == succ);
3891 return e;
3892 }
3893 }
3894 else
3895 {
3896 FOR_EACH_EDGE (e, ei, succ->preds)
3897 if (e->flags & EDGE_FALLTHRU)
3898 {
3899 gcc_assert (e->src == pred);
3900 return e;
3901 }
3902 }
3903
3904 return NULL;
3905 }
3906
3907 /* Extend per basic block data structures. */
3908 static void
3909 sched_extend_bb (void)
3910 {
3911 rtx insn;
3912
3913 /* The following is done to keep current_sched_info->next_tail non null. */
3914 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
3915 if (NEXT_INSN (insn) == 0
3916 || (!NOTE_P (insn)
3917 && !LABEL_P (insn)
3918 /* Don't emit a NOTE if it would end up before a BARRIER. */
3919 && !BARRIER_P (NEXT_INSN (insn))))
3920 {
3921 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
3922 /* Make insn appear outside BB. */
3923 set_block_for_insn (note, NULL);
3924 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
3925 }
3926 }
3927
3928 /* Init per basic block data structures. */
3929 void
3930 sched_init_bbs (void)
3931 {
3932 sched_extend_bb ();
3933 }
3934
3935 /* Initialize BEFORE_RECOVERY variable. */
3936 static void
3937 init_before_recovery (basic_block *before_recovery_ptr)
3938 {
3939 basic_block last;
3940 edge e;
3941
3942 last = EXIT_BLOCK_PTR->prev_bb;
3943 e = find_fallthru_edge (last);
3944
3945 if (e)
3946 {
3947 /* We create two basic blocks:
3948 1. Single instruction block is inserted right after E->SRC
3949 and has jump to
3950 2. Empty block right before EXIT_BLOCK.
3951 Between these two blocks recovery blocks will be emitted. */
3952
3953 basic_block single, empty;
3954 rtx x, label;
3955
3956 /* If the fallthrough edge to exit we've found is from the block we've
3957 created before, don't do anything more. */
3958 if (last == after_recovery)
3959 return;
3960
3961 adding_bb_to_current_region_p = false;
3962
3963 single = sched_create_empty_bb (last);
3964 empty = sched_create_empty_bb (single);
3965
3966 /* Add new blocks to the root loop. */
3967 if (current_loops != NULL)
3968 {
3969 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
3970 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
3971 }
3972
3973 single->count = last->count;
3974 empty->count = last->count;
3975 single->frequency = last->frequency;
3976 empty->frequency = last->frequency;
3977 BB_COPY_PARTITION (single, last);
3978 BB_COPY_PARTITION (empty, last);
3979
3980 redirect_edge_succ (e, single);
3981 make_single_succ_edge (single, empty, 0);
3982 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3983 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3984
3985 label = block_label (empty);
3986 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3987 JUMP_LABEL (x) = label;
3988 LABEL_NUSES (label)++;
3989 haifa_init_insn (x);
3990
3991 emit_barrier_after (x);
3992
3993 sched_init_only_bb (empty, NULL);
3994 sched_init_only_bb (single, NULL);
3995 sched_extend_bb ();
3996
3997 adding_bb_to_current_region_p = true;
3998 before_recovery = single;
3999 after_recovery = empty;
4000
4001 if (before_recovery_ptr)
4002 *before_recovery_ptr = before_recovery;
4003
4004 if (sched_verbose >= 2 && spec_info->dump)
4005 fprintf (spec_info->dump,
4006 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4007 last->index, single->index, empty->index);
4008 }
4009 else
4010 before_recovery = last;
4011 }
4012
4013 /* Returns new recovery block. */
4014 basic_block
4015 sched_create_recovery_block (basic_block *before_recovery_ptr)
4016 {
4017 rtx label;
4018 rtx barrier;
4019 basic_block rec;
4020
4021 haifa_recovery_bb_recently_added_p = true;
4022 haifa_recovery_bb_ever_added_p = true;
4023
4024 init_before_recovery (before_recovery_ptr);
4025
4026 barrier = get_last_bb_insn (before_recovery);
4027 gcc_assert (BARRIER_P (barrier));
4028
4029 label = emit_label_after (gen_label_rtx (), barrier);
4030
4031 rec = create_basic_block (label, label, before_recovery);
4032
4033 /* A recovery block always ends with an unconditional jump. */
4034 emit_barrier_after (BB_END (rec));
4035
4036 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4037 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4038
4039 if (sched_verbose && spec_info->dump)
4040 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4041 rec->index);
4042
4043 return rec;
4044 }
4045
4046 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4047 and emit necessary jumps. */
4048 void
4049 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4050 basic_block second_bb)
4051 {
4052 rtx label;
4053 rtx jump;
4054 edge e;
4055 int edge_flags;
4056
4057 /* This is fixing of incoming edge. */
4058 /* ??? Which other flags should be specified? */
4059 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4060 /* Partition type is the same, if it is "unpartitioned". */
4061 edge_flags = EDGE_CROSSING;
4062 else
4063 edge_flags = 0;
4064
4065 e = make_edge (first_bb, rec, edge_flags);
4066 label = block_label (second_bb);
4067 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4068 JUMP_LABEL (jump) = label;
4069 LABEL_NUSES (label)++;
4070
4071 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4072 /* Partition type is the same, if it is "unpartitioned". */
4073 {
4074 /* Rewritten from cfgrtl.c. */
4075 if (flag_reorder_blocks_and_partition
4076 && targetm.have_named_sections)
4077 {
4078 /* We don't need the same note for the check because
4079 any_condjump_p (check) == true. */
4080 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4081 }
4082 edge_flags = EDGE_CROSSING;
4083 }
4084 else
4085 edge_flags = 0;
4086
4087 make_single_succ_edge (rec, second_bb, edge_flags);
4088 }
4089
4090 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4091 INSN is a simple check, that should be converted to branchy one. */
4092 static void
4093 create_check_block_twin (rtx insn, bool mutate_p)
4094 {
4095 basic_block rec;
4096 rtx label, check, twin;
4097 ds_t fs;
4098 sd_iterator_def sd_it;
4099 dep_t dep;
4100 dep_def _new_dep, *new_dep = &_new_dep;
4101 ds_t todo_spec;
4102
4103 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4104
4105 if (!mutate_p)
4106 todo_spec = TODO_SPEC (insn);
4107 else
4108 {
4109 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4110 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4111
4112 todo_spec = CHECK_SPEC (insn);
4113 }
4114
4115 todo_spec &= SPECULATIVE;
4116
4117 /* Create recovery block. */
4118 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4119 {
4120 rec = sched_create_recovery_block (NULL);
4121 label = BB_HEAD (rec);
4122 }
4123 else
4124 {
4125 rec = EXIT_BLOCK_PTR;
4126 label = NULL_RTX;
4127 }
4128
4129 /* Emit CHECK. */
4130 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4131
4132 if (rec != EXIT_BLOCK_PTR)
4133 {
4134 /* To have mem_reg alive at the beginning of second_bb,
4135 we emit check BEFORE insn, so insn after splitting
4136 insn will be at the beginning of second_bb, which will
4137 provide us with the correct life information. */
4138 check = emit_jump_insn_before (check, insn);
4139 JUMP_LABEL (check) = label;
4140 LABEL_NUSES (label)++;
4141 }
4142 else
4143 check = emit_insn_before (check, insn);
4144
4145 /* Extend data structures. */
4146 haifa_init_insn (check);
4147
4148 /* CHECK is being added to current region. Extend ready list. */
4149 gcc_assert (sched_ready_n_insns != -1);
4150 sched_extend_ready_list (sched_ready_n_insns + 1);
4151
4152 if (current_sched_info->add_remove_insn)
4153 current_sched_info->add_remove_insn (insn, 0);
4154
4155 RECOVERY_BLOCK (check) = rec;
4156
4157 if (sched_verbose && spec_info->dump)
4158 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4159 (*current_sched_info->print_insn) (check, 0));
4160
4161 gcc_assert (ORIG_PAT (insn));
4162
4163 /* Initialize TWIN (twin is a duplicate of original instruction
4164 in the recovery block). */
4165 if (rec != EXIT_BLOCK_PTR)
4166 {
4167 sd_iterator_def sd_it;
4168 dep_t dep;
4169
4170 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4171 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4172 {
4173 struct _dep _dep2, *dep2 = &_dep2;
4174
4175 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4176
4177 sd_add_dep (dep2, true);
4178 }
4179
4180 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4181 haifa_init_insn (twin);
4182
4183 if (sched_verbose && spec_info->dump)
4184 /* INSN_BB (insn) isn't determined for twin insns yet.
4185 So we can't use current_sched_info->print_insn. */
4186 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4187 INSN_UID (twin), rec->index);
4188 }
4189 else
4190 {
4191 ORIG_PAT (check) = ORIG_PAT (insn);
4192 HAS_INTERNAL_DEP (check) = 1;
4193 twin = check;
4194 /* ??? We probably should change all OUTPUT dependencies to
4195 (TRUE | OUTPUT). */
4196 }
4197
4198 /* Copy all resolved back dependencies of INSN to TWIN. This will
4199 provide correct value for INSN_TICK (TWIN). */
4200 sd_copy_back_deps (twin, insn, true);
4201
4202 if (rec != EXIT_BLOCK_PTR)
4203 /* In case of branchy check, fix CFG. */
4204 {
4205 basic_block first_bb, second_bb;
4206 rtx jump;
4207
4208 first_bb = BLOCK_FOR_INSN (check);
4209 second_bb = sched_split_block (first_bb, check);
4210
4211 sched_create_recovery_edges (first_bb, rec, second_bb);
4212
4213 sched_init_only_bb (second_bb, first_bb);
4214 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4215
4216 jump = BB_END (rec);
4217 haifa_init_insn (jump);
4218 }
4219
4220 /* Move backward dependences from INSN to CHECK and
4221 move forward dependences from INSN to TWIN. */
4222
4223 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4224 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4225 {
4226 rtx pro = DEP_PRO (dep);
4227 ds_t ds;
4228
4229 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4230 check --TRUE--> producer ??? or ANTI ???
4231 twin --TRUE--> producer
4232 twin --ANTI--> check
4233
4234 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4235 check --ANTI--> producer
4236 twin --ANTI--> producer
4237 twin --ANTI--> check
4238
4239 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4240 check ~~TRUE~~> producer
4241 twin ~~TRUE~~> producer
4242 twin --ANTI--> check */
4243
4244 ds = DEP_STATUS (dep);
4245
4246 if (ds & BEGIN_SPEC)
4247 {
4248 gcc_assert (!mutate_p);
4249 ds &= ~BEGIN_SPEC;
4250 }
4251
4252 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4253 sd_add_dep (new_dep, false);
4254
4255 if (rec != EXIT_BLOCK_PTR)
4256 {
4257 DEP_CON (new_dep) = twin;
4258 sd_add_dep (new_dep, false);
4259 }
4260 }
4261
4262 /* Second, remove backward dependencies of INSN. */
4263 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4264 sd_iterator_cond (&sd_it, &dep);)
4265 {
4266 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4267 || mutate_p)
4268 /* We can delete this dep because we overcome it with
4269 BEGIN_SPECULATION. */
4270 sd_delete_dep (sd_it);
4271 else
4272 sd_iterator_next (&sd_it);
4273 }
4274
4275 /* Future Speculations. Determine what BE_IN speculations will be like. */
4276 fs = 0;
4277
4278 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4279 here. */
4280
4281 gcc_assert (!DONE_SPEC (insn));
4282
4283 if (!mutate_p)
4284 {
4285 ds_t ts = TODO_SPEC (insn);
4286
4287 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4288 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4289
4290 /* Luckiness of future speculations solely depends upon initial
4291 BEGIN speculation. */
4292 if (ts & BEGIN_DATA)
4293 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4294 if (ts & BEGIN_CONTROL)
4295 fs = set_dep_weak (fs, BE_IN_CONTROL,
4296 get_dep_weak (ts, BEGIN_CONTROL));
4297 }
4298 else
4299 CHECK_SPEC (check) = CHECK_SPEC (insn);
4300
4301 /* Future speculations: call the helper. */
4302 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4303
4304 if (rec != EXIT_BLOCK_PTR)
4305 {
4306 /* Which types of dependencies should we use here is,
4307 generally, machine-dependent question... But, for now,
4308 it is not. */
4309
4310 if (!mutate_p)
4311 {
4312 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4313 sd_add_dep (new_dep, false);
4314
4315 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4316 sd_add_dep (new_dep, false);
4317 }
4318 else
4319 {
4320 if (spec_info->dump)
4321 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4322 (*current_sched_info->print_insn) (insn, 0));
4323
4324 /* Remove all dependencies of the INSN. */
4325 {
4326 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4327 | SD_LIST_BACK
4328 | SD_LIST_RES_BACK));
4329 while (sd_iterator_cond (&sd_it, &dep))
4330 sd_delete_dep (sd_it);
4331 }
4332
4333 /* If former check (INSN) already was moved to the ready (or queue)
4334 list, add new check (CHECK) there too. */
4335 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4336 try_ready (check);
4337
4338 /* Remove old check from instruction stream and free its
4339 data. */
4340 sched_remove_insn (insn);
4341 }
4342
4343 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4344 sd_add_dep (new_dep, false);
4345 }
4346 else
4347 {
4348 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4349 sd_add_dep (new_dep, false);
4350 }
4351
4352 if (!mutate_p)
4353 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4354 because it'll be done later in add_to_speculative_block. */
4355 {
4356 rtx_vec_t priorities_roots = NULL;
4357
4358 clear_priorities (twin, &priorities_roots);
4359 calc_priorities (priorities_roots);
4360 VEC_free (rtx, heap, priorities_roots);
4361 }
4362 }
4363
4364 /* Removes dependency between instructions in the recovery block REC
4365 and usual region instructions. It keeps inner dependences so it
4366 won't be necessary to recompute them. */
4367 static void
4368 fix_recovery_deps (basic_block rec)
4369 {
4370 rtx note, insn, jump, ready_list = 0;
4371 bitmap_head in_ready;
4372 rtx link;
4373
4374 bitmap_initialize (&in_ready, 0);
4375
4376 /* NOTE - a basic block note. */
4377 note = NEXT_INSN (BB_HEAD (rec));
4378 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4379 insn = BB_END (rec);
4380 gcc_assert (JUMP_P (insn));
4381 insn = PREV_INSN (insn);
4382
4383 do
4384 {
4385 sd_iterator_def sd_it;
4386 dep_t dep;
4387
4388 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4389 sd_iterator_cond (&sd_it, &dep);)
4390 {
4391 rtx consumer = DEP_CON (dep);
4392
4393 if (BLOCK_FOR_INSN (consumer) != rec)
4394 {
4395 sd_delete_dep (sd_it);
4396
4397 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4398 {
4399 ready_list = alloc_INSN_LIST (consumer, ready_list);
4400 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4401 }
4402 }
4403 else
4404 {
4405 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4406
4407 sd_iterator_next (&sd_it);
4408 }
4409 }
4410
4411 insn = PREV_INSN (insn);
4412 }
4413 while (insn != note);
4414
4415 bitmap_clear (&in_ready);
4416
4417 /* Try to add instructions to the ready or queue list. */
4418 for (link = ready_list; link; link = XEXP (link, 1))
4419 try_ready (XEXP (link, 0));
4420 free_INSN_LIST_list (&ready_list);
4421
4422 /* Fixing jump's dependences. */
4423 insn = BB_HEAD (rec);
4424 jump = BB_END (rec);
4425
4426 gcc_assert (LABEL_P (insn));
4427 insn = NEXT_INSN (insn);
4428
4429 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4430 add_jump_dependencies (insn, jump);
4431 }
4432
4433 /* Change pattern of INSN to NEW_PAT. */
4434 void
4435 sched_change_pattern (rtx insn, rtx new_pat)
4436 {
4437 int t;
4438
4439 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4440 gcc_assert (t);
4441 dfa_clear_single_insn_cache (insn);
4442 }
4443
4444 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4445 instruction data. */
4446 static void
4447 haifa_change_pattern (rtx insn, rtx new_pat)
4448 {
4449 sched_change_pattern (insn, new_pat);
4450
4451 /* Invalidate INSN_COST, so it'll be recalculated. */
4452 INSN_COST (insn) = -1;
4453 /* Invalidate INSN_TICK, so it'll be recalculated. */
4454 INSN_TICK (insn) = INVALID_TICK;
4455 }
4456
4457 /* -1 - can't speculate,
4458 0 - for speculation with REQUEST mode it is OK to use
4459 current instruction pattern,
4460 1 - need to change pattern for *NEW_PAT to be speculative. */
4461 int
4462 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4463 {
4464 gcc_assert (current_sched_info->flags & DO_SPECULATION
4465 && (request & SPECULATIVE)
4466 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4467
4468 if ((request & spec_info->mask) != request)
4469 return -1;
4470
4471 if (request & BE_IN_SPEC
4472 && !(request & BEGIN_SPEC))
4473 return 0;
4474
4475 return targetm.sched.speculate_insn (insn, request, new_pat);
4476 }
4477
4478 static int
4479 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4480 {
4481 gcc_assert (sched_deps_info->generate_spec_deps
4482 && !IS_SPECULATION_CHECK_P (insn));
4483
4484 if (HAS_INTERNAL_DEP (insn)
4485 || SCHED_GROUP_P (insn))
4486 return -1;
4487
4488 return sched_speculate_insn (insn, request, new_pat);
4489 }
4490
4491 /* Print some information about block BB, which starts with HEAD and
4492 ends with TAIL, before scheduling it.
4493 I is zero, if scheduler is about to start with the fresh ebb. */
4494 static void
4495 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4496 {
4497 if (!i)
4498 fprintf (sched_dump,
4499 ";; ======================================================\n");
4500 else
4501 fprintf (sched_dump,
4502 ";; =====================ADVANCING TO=====================\n");
4503 fprintf (sched_dump,
4504 ";; -- basic block %d from %d to %d -- %s reload\n",
4505 bb->index, INSN_UID (head), INSN_UID (tail),
4506 (reload_completed ? "after" : "before"));
4507 fprintf (sched_dump,
4508 ";; ======================================================\n");
4509 fprintf (sched_dump, "\n");
4510 }
4511
4512 /* Unlink basic block notes and labels and saves them, so they
4513 can be easily restored. We unlink basic block notes in EBB to
4514 provide back-compatibility with the previous code, as target backends
4515 assume, that there'll be only instructions between
4516 current_sched_info->{head and tail}. We restore these notes as soon
4517 as we can.
4518 FIRST (LAST) is the first (last) basic block in the ebb.
4519 NB: In usual case (FIRST == LAST) nothing is really done. */
4520 void
4521 unlink_bb_notes (basic_block first, basic_block last)
4522 {
4523 /* We DON'T unlink basic block notes of the first block in the ebb. */
4524 if (first == last)
4525 return;
4526
4527 bb_header = XNEWVEC (rtx, last_basic_block);
4528
4529 /* Make a sentinel. */
4530 if (last->next_bb != EXIT_BLOCK_PTR)
4531 bb_header[last->next_bb->index] = 0;
4532
4533 first = first->next_bb;
4534 do
4535 {
4536 rtx prev, label, note, next;
4537
4538 label = BB_HEAD (last);
4539 if (LABEL_P (label))
4540 note = NEXT_INSN (label);
4541 else
4542 note = label;
4543 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4544
4545 prev = PREV_INSN (label);
4546 next = NEXT_INSN (note);
4547 gcc_assert (prev && next);
4548
4549 NEXT_INSN (prev) = next;
4550 PREV_INSN (next) = prev;
4551
4552 bb_header[last->index] = label;
4553
4554 if (last == first)
4555 break;
4556
4557 last = last->prev_bb;
4558 }
4559 while (1);
4560 }
4561
4562 /* Restore basic block notes.
4563 FIRST is the first basic block in the ebb. */
4564 static void
4565 restore_bb_notes (basic_block first)
4566 {
4567 if (!bb_header)
4568 return;
4569
4570 /* We DON'T unlink basic block notes of the first block in the ebb. */
4571 first = first->next_bb;
4572 /* Remember: FIRST is actually a second basic block in the ebb. */
4573
4574 while (first != EXIT_BLOCK_PTR
4575 && bb_header[first->index])
4576 {
4577 rtx prev, label, note, next;
4578
4579 label = bb_header[first->index];
4580 prev = PREV_INSN (label);
4581 next = NEXT_INSN (prev);
4582
4583 if (LABEL_P (label))
4584 note = NEXT_INSN (label);
4585 else
4586 note = label;
4587 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4588
4589 bb_header[first->index] = 0;
4590
4591 NEXT_INSN (prev) = label;
4592 NEXT_INSN (note) = next;
4593 PREV_INSN (next) = note;
4594
4595 first = first->next_bb;
4596 }
4597
4598 free (bb_header);
4599 bb_header = 0;
4600 }
4601
4602 /* Helper function.
4603 Fix CFG after both in- and inter-block movement of
4604 control_flow_insn_p JUMP. */
4605 static void
4606 fix_jump_move (rtx jump)
4607 {
4608 basic_block bb, jump_bb, jump_bb_next;
4609
4610 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4611 jump_bb = BLOCK_FOR_INSN (jump);
4612 jump_bb_next = jump_bb->next_bb;
4613
4614 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4615 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4616
4617 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4618 /* if jump_bb_next is not empty. */
4619 BB_END (jump_bb) = BB_END (jump_bb_next);
4620
4621 if (BB_END (bb) != PREV_INSN (jump))
4622 /* Then there are instruction after jump that should be placed
4623 to jump_bb_next. */
4624 BB_END (jump_bb_next) = BB_END (bb);
4625 else
4626 /* Otherwise jump_bb_next is empty. */
4627 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4628
4629 /* To make assertion in move_insn happy. */
4630 BB_END (bb) = PREV_INSN (jump);
4631
4632 update_bb_for_insn (jump_bb_next);
4633 }
4634
4635 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4636 static void
4637 move_block_after_check (rtx jump)
4638 {
4639 basic_block bb, jump_bb, jump_bb_next;
4640 VEC(edge,gc) *t;
4641
4642 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4643 jump_bb = BLOCK_FOR_INSN (jump);
4644 jump_bb_next = jump_bb->next_bb;
4645
4646 update_bb_for_insn (jump_bb);
4647
4648 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4649 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4650
4651 unlink_block (jump_bb_next);
4652 link_block (jump_bb_next, bb);
4653
4654 t = bb->succs;
4655 bb->succs = 0;
4656 move_succs (&(jump_bb->succs), bb);
4657 move_succs (&(jump_bb_next->succs), jump_bb);
4658 move_succs (&t, jump_bb_next);
4659
4660 df_mark_solutions_dirty ();
4661
4662 common_sched_info->fix_recovery_cfg
4663 (bb->index, jump_bb->index, jump_bb_next->index);
4664 }
4665
4666 /* Helper function for move_block_after_check.
4667 This functions attaches edge vector pointed to by SUCCSP to
4668 block TO. */
4669 static void
4670 move_succs (VEC(edge,gc) **succsp, basic_block to)
4671 {
4672 edge e;
4673 edge_iterator ei;
4674
4675 gcc_assert (to->succs == 0);
4676
4677 to->succs = *succsp;
4678
4679 FOR_EACH_EDGE (e, ei, to->succs)
4680 e->src = to;
4681
4682 *succsp = 0;
4683 }
4684
4685 /* Remove INSN from the instruction stream.
4686 INSN should have any dependencies. */
4687 static void
4688 sched_remove_insn (rtx insn)
4689 {
4690 sd_finish_insn (insn);
4691
4692 change_queue_index (insn, QUEUE_NOWHERE);
4693 current_sched_info->add_remove_insn (insn, 1);
4694 remove_insn (insn);
4695 }
4696
4697 /* Clear priorities of all instructions, that are forward dependent on INSN.
4698 Store in vector pointed to by ROOTS_PTR insns on which priority () should
4699 be invoked to initialize all cleared priorities. */
4700 static void
4701 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
4702 {
4703 sd_iterator_def sd_it;
4704 dep_t dep;
4705 bool insn_is_root_p = true;
4706
4707 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
4708
4709 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4710 {
4711 rtx pro = DEP_PRO (dep);
4712
4713 if (INSN_PRIORITY_STATUS (pro) >= 0
4714 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4715 {
4716 /* If DEP doesn't contribute to priority then INSN itself should
4717 be added to priority roots. */
4718 if (contributes_to_priority_p (dep))
4719 insn_is_root_p = false;
4720
4721 INSN_PRIORITY_STATUS (pro) = -1;
4722 clear_priorities (pro, roots_ptr);
4723 }
4724 }
4725
4726 if (insn_is_root_p)
4727 VEC_safe_push (rtx, heap, *roots_ptr, insn);
4728 }
4729
4730 /* Recompute priorities of instructions, whose priorities might have been
4731 changed. ROOTS is a vector of instructions whose priority computation will
4732 trigger initialization of all cleared priorities. */
4733 static void
4734 calc_priorities (rtx_vec_t roots)
4735 {
4736 int i;
4737 rtx insn;
4738
4739 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
4740 priority (insn);
4741 }
4742
4743
4744 /* Add dependences between JUMP and other instructions in the recovery
4745 block. INSN is the first insn the recovery block. */
4746 static void
4747 add_jump_dependencies (rtx insn, rtx jump)
4748 {
4749 do
4750 {
4751 insn = NEXT_INSN (insn);
4752 if (insn == jump)
4753 break;
4754
4755 if (dep_list_size (insn) == 0)
4756 {
4757 dep_def _new_dep, *new_dep = &_new_dep;
4758
4759 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
4760 sd_add_dep (new_dep, false);
4761 }
4762 }
4763 while (1);
4764
4765 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
4766 }
4767
4768 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4769 rtx
4770 bb_note (basic_block bb)
4771 {
4772 rtx note;
4773
4774 note = BB_HEAD (bb);
4775 if (LABEL_P (note))
4776 note = NEXT_INSN (note);
4777
4778 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4779 return note;
4780 }
4781
4782 #ifdef ENABLE_CHECKING
4783 /* Helper function for check_cfg.
4784 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4785 its flags. */
4786 static int
4787 has_edge_p (VEC(edge,gc) *el, int type)
4788 {
4789 edge e;
4790 edge_iterator ei;
4791
4792 FOR_EACH_EDGE (e, ei, el)
4793 if (e->flags & type)
4794 return 1;
4795 return 0;
4796 }
4797
4798 /* Search back, starting at INSN, for an insn that is not a
4799 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
4800 no such insn can be found. */
4801 static inline rtx
4802 prev_non_location_insn (rtx insn, rtx head)
4803 {
4804 while (insn != head && NOTE_P (insn)
4805 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
4806 insn = PREV_INSN (insn);
4807
4808 return insn;
4809 }
4810
4811 /* Check few properties of CFG between HEAD and TAIL.
4812 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4813 instruction stream. */
4814 static void
4815 check_cfg (rtx head, rtx tail)
4816 {
4817 rtx next_tail;
4818 basic_block bb = 0;
4819 int not_first = 0, not_last;
4820
4821 if (head == NULL)
4822 head = get_insns ();
4823 if (tail == NULL)
4824 tail = get_last_insn ();
4825 next_tail = NEXT_INSN (tail);
4826
4827 do
4828 {
4829 not_last = head != tail;
4830
4831 if (not_first)
4832 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4833 if (not_last)
4834 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4835
4836 if (LABEL_P (head)
4837 || (NOTE_INSN_BASIC_BLOCK_P (head)
4838 && (!not_first
4839 || (not_first && !LABEL_P (PREV_INSN (head))))))
4840 {
4841 gcc_assert (bb == 0);
4842 bb = BLOCK_FOR_INSN (head);
4843 if (bb != 0)
4844 gcc_assert (BB_HEAD (bb) == head);
4845 else
4846 /* This is the case of jump table. See inside_basic_block_p (). */
4847 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4848 }
4849
4850 if (bb == 0)
4851 {
4852 gcc_assert (!inside_basic_block_p (head));
4853 head = NEXT_INSN (head);
4854 }
4855 else
4856 {
4857 gcc_assert (inside_basic_block_p (head)
4858 || NOTE_P (head));
4859 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4860
4861 if (LABEL_P (head))
4862 {
4863 head = NEXT_INSN (head);
4864 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4865 }
4866 else
4867 {
4868 if (control_flow_insn_p (head))
4869 {
4870 gcc_assert (prev_non_location_insn (BB_END (bb), head)
4871 == head);
4872
4873 if (any_uncondjump_p (head))
4874 gcc_assert (EDGE_COUNT (bb->succs) == 1
4875 && BARRIER_P (NEXT_INSN (head)));
4876 else if (any_condjump_p (head))
4877 gcc_assert (/* Usual case. */
4878 (EDGE_COUNT (bb->succs) > 1
4879 && !BARRIER_P (NEXT_INSN (head)))
4880 /* Or jump to the next instruction. */
4881 || (EDGE_COUNT (bb->succs) == 1
4882 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4883 == JUMP_LABEL (head))));
4884 }
4885 if (BB_END (bb) == head)
4886 {
4887 if (EDGE_COUNT (bb->succs) > 1)
4888 gcc_assert (control_flow_insn_p (prev_non_location_insn
4889 (head, BB_HEAD (bb)))
4890 || has_edge_p (bb->succs, EDGE_COMPLEX));
4891 bb = 0;
4892 }
4893
4894 head = NEXT_INSN (head);
4895 }
4896 }
4897
4898 not_first = 1;
4899 }
4900 while (head != next_tail);
4901
4902 gcc_assert (bb == 0);
4903 }
4904
4905 #endif /* ENABLE_CHECKING */
4906
4907 /* Extend per basic block data structures. */
4908 static void
4909 extend_bb (void)
4910 {
4911 if (sched_scan_info->extend_bb)
4912 sched_scan_info->extend_bb ();
4913 }
4914
4915 /* Init data for BB. */
4916 static void
4917 init_bb (basic_block bb)
4918 {
4919 if (sched_scan_info->init_bb)
4920 sched_scan_info->init_bb (bb);
4921 }
4922
4923 /* Extend per insn data structures. */
4924 static void
4925 extend_insn (void)
4926 {
4927 if (sched_scan_info->extend_insn)
4928 sched_scan_info->extend_insn ();
4929 }
4930
4931 /* Init data structures for INSN. */
4932 static void
4933 init_insn (rtx insn)
4934 {
4935 if (sched_scan_info->init_insn)
4936 sched_scan_info->init_insn (insn);
4937 }
4938
4939 /* Init all insns in BB. */
4940 static void
4941 init_insns_in_bb (basic_block bb)
4942 {
4943 rtx insn;
4944
4945 FOR_BB_INSNS (bb, insn)
4946 init_insn (insn);
4947 }
4948
4949 /* A driver function to add a set of basic blocks (BBS),
4950 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
4951 to the scheduling region. */
4952 void
4953 sched_scan (const struct sched_scan_info_def *ssi,
4954 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4955 {
4956 sched_scan_info = ssi;
4957
4958 if (bbs != NULL || bb != NULL)
4959 {
4960 extend_bb ();
4961
4962 if (bbs != NULL)
4963 {
4964 unsigned i;
4965 basic_block x;
4966
4967 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4968 init_bb (x);
4969 }
4970
4971 if (bb != NULL)
4972 init_bb (bb);
4973 }
4974
4975 extend_insn ();
4976
4977 if (bbs != NULL)
4978 {
4979 unsigned i;
4980 basic_block x;
4981
4982 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4983 init_insns_in_bb (x);
4984 }
4985
4986 if (bb != NULL)
4987 init_insns_in_bb (bb);
4988
4989 if (insns != NULL)
4990 {
4991 unsigned i;
4992 rtx x;
4993
4994 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
4995 init_insn (x);
4996 }
4997
4998 if (insn != NULL)
4999 init_insn (insn);
5000 }
5001
5002
5003 /* Extend data structures for logical insn UID. */
5004 static void
5005 luids_extend_insn (void)
5006 {
5007 int new_luids_max_uid = get_max_uid () + 1;
5008
5009 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5010 }
5011
5012 /* Initialize LUID for INSN. */
5013 static void
5014 luids_init_insn (rtx insn)
5015 {
5016 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5017 int luid;
5018
5019 if (i >= 0)
5020 {
5021 luid = sched_max_luid;
5022 sched_max_luid += i;
5023 }
5024 else
5025 luid = -1;
5026
5027 SET_INSN_LUID (insn, luid);
5028 }
5029
5030 /* Initialize luids for BBS, BB, INSNS and INSN.
5031 The hook common_sched_info->luid_for_non_insn () is used to determine
5032 if notes, labels, etc. need luids. */
5033 void
5034 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5035 {
5036 const struct sched_scan_info_def ssi =
5037 {
5038 NULL, /* extend_bb */
5039 NULL, /* init_bb */
5040 luids_extend_insn, /* extend_insn */
5041 luids_init_insn /* init_insn */
5042 };
5043
5044 sched_scan (&ssi, bbs, bb, insns, insn);
5045 }
5046
5047 /* Free LUIDs. */
5048 void
5049 sched_finish_luids (void)
5050 {
5051 VEC_free (int, heap, sched_luids);
5052 sched_max_luid = 1;
5053 }
5054
5055 /* Return logical uid of INSN. Helpful while debugging. */
5056 int
5057 insn_luid (rtx insn)
5058 {
5059 return INSN_LUID (insn);
5060 }
5061
5062 /* Extend per insn data in the target. */
5063 void
5064 sched_extend_target (void)
5065 {
5066 if (targetm.sched.h_i_d_extended)
5067 targetm.sched.h_i_d_extended ();
5068 }
5069
5070 /* Extend global scheduler structures (those, that live across calls to
5071 schedule_block) to include information about just emitted INSN. */
5072 static void
5073 extend_h_i_d (void)
5074 {
5075 int reserve = (get_max_uid () + 1
5076 - VEC_length (haifa_insn_data_def, h_i_d));
5077 if (reserve > 0
5078 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5079 {
5080 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5081 3 * get_max_uid () / 2);
5082 sched_extend_target ();
5083 }
5084 }
5085
5086 /* Initialize h_i_d entry of the INSN with default values.
5087 Values, that are not explicitly initialized here, hold zero. */
5088 static void
5089 init_h_i_d (rtx insn)
5090 {
5091 if (INSN_LUID (insn) > 0)
5092 {
5093 INSN_COST (insn) = -1;
5094 find_insn_reg_weight (insn);
5095 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5096 INSN_TICK (insn) = INVALID_TICK;
5097 INTER_TICK (insn) = INVALID_TICK;
5098 TODO_SPEC (insn) = HARD_DEP;
5099 }
5100 }
5101
5102 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5103 void
5104 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5105 {
5106 const struct sched_scan_info_def ssi =
5107 {
5108 NULL, /* extend_bb */
5109 NULL, /* init_bb */
5110 extend_h_i_d, /* extend_insn */
5111 init_h_i_d /* init_insn */
5112 };
5113
5114 sched_scan (&ssi, bbs, bb, insns, insn);
5115 }
5116
5117 /* Finalize haifa_insn_data. */
5118 void
5119 haifa_finish_h_i_d (void)
5120 {
5121 VEC_free (haifa_insn_data_def, heap, h_i_d);
5122 }
5123
5124 /* Init data for the new insn INSN. */
5125 static void
5126 haifa_init_insn (rtx insn)
5127 {
5128 gcc_assert (insn != NULL);
5129
5130 sched_init_luids (NULL, NULL, NULL, insn);
5131 sched_extend_target ();
5132 sched_deps_init (false);
5133 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5134
5135 if (adding_bb_to_current_region_p)
5136 {
5137 sd_init_insn (insn);
5138
5139 /* Extend dependency caches by one element. */
5140 extend_dependency_caches (1, false);
5141 }
5142 }
5143
5144 /* Init data for the new basic block BB which comes after AFTER. */
5145 static void
5146 haifa_init_only_bb (basic_block bb, basic_block after)
5147 {
5148 gcc_assert (bb != NULL);
5149
5150 sched_init_bbs ();
5151
5152 if (common_sched_info->add_block)
5153 /* This changes only data structures of the front-end. */
5154 common_sched_info->add_block (bb, after);
5155 }
5156
5157 /* A generic version of sched_split_block (). */
5158 basic_block
5159 sched_split_block_1 (basic_block first_bb, rtx after)
5160 {
5161 edge e;
5162
5163 e = split_block (first_bb, after);
5164 gcc_assert (e->src == first_bb);
5165
5166 /* sched_split_block emits note if *check == BB_END. Probably it
5167 is better to rip that note off. */
5168
5169 return e->dest;
5170 }
5171
5172 /* A generic version of sched_create_empty_bb (). */
5173 basic_block
5174 sched_create_empty_bb_1 (basic_block after)
5175 {
5176 return create_empty_bb (after);
5177 }
5178
5179 /* Insert PAT as an INSN into the schedule and update the necessary data
5180 structures to account for it. */
5181 rtx
5182 sched_emit_insn (rtx pat)
5183 {
5184 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5185 last_scheduled_insn = insn;
5186 haifa_init_insn (insn);
5187 return insn;
5188 }
5189
5190 #endif /* INSN_SCHEDULING */