2009-07-13 Ghassan Shobaki <ghassan.shobaki@amd.com>
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150
151 #ifdef INSN_SCHEDULING
152
153 /* issue_rate is the number of insns that can be scheduled in the same
154 machine cycle. It can be defined in the config/mach/mach.h file,
155 otherwise we set it to 1. */
156
157 int issue_rate;
158
159 /* sched-verbose controls the amount of debugging output the
160 scheduler prints. It is controlled by -fsched-verbose=N:
161 N>0 and no -DSR : the output is directed to stderr.
162 N>=10 will direct the printouts to stderr (regardless of -dSR).
163 N=1: same as -dSR.
164 N=2: bb's probabilities, detailed ready list info, unit/insn info.
165 N=3: rtl at abort point, control-flow, regions info.
166 N=5: dependences info. */
167
168 static int sched_verbose_param = 0;
169 int sched_verbose = 0;
170
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
174
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
177
178 void
179 fix_sched_param (const char *param, const char *val)
180 {
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
185 }
186
187 /* This is a placeholder for the scheduler parameters common
188 to all schedulers. */
189 struct common_sched_info_def *common_sched_info;
190
191 #define INSN_TICK(INSN) (HID (INSN)->tick)
192 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
193
194 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
195 then it should be recalculated from scratch. */
196 #define INVALID_TICK (-(max_insn_queue_index + 1))
197 /* The minimal value of the INSN_TICK of an instruction. */
198 #define MIN_TICK (-max_insn_queue_index)
199
200 /* Issue points are used to distinguish between instructions in max_issue ().
201 For now, all instructions are equally good. */
202 #define ISSUE_POINTS(INSN) 1
203
204 /* List of important notes we must keep around. This is a pointer to the
205 last element in the list. */
206 rtx note_list;
207
208 static struct spec_info_def spec_info_var;
209 /* Description of the speculative part of the scheduling.
210 If NULL - no speculation. */
211 spec_info_t spec_info = NULL;
212
213 /* True, if recovery block was added during scheduling of current block.
214 Used to determine, if we need to fix INSN_TICKs. */
215 static bool haifa_recovery_bb_recently_added_p;
216
217 /* True, if recovery block was added during this scheduling pass.
218 Used to determine if we should have empty memory pools of dependencies
219 after finishing current region. */
220 bool haifa_recovery_bb_ever_added_p;
221
222 /* Counters of different types of speculative instructions. */
223 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
224
225 /* Array used in {unlink, restore}_bb_notes. */
226 static rtx *bb_header = 0;
227
228 /* Basic block after which recovery blocks will be created. */
229 static basic_block before_recovery;
230
231 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
232 created it. */
233 basic_block after_recovery;
234
235 /* FALSE if we add bb to another region, so we don't need to initialize it. */
236 bool adding_bb_to_current_region_p = true;
237
238 /* Queues, etc. */
239
240 /* An instruction is ready to be scheduled when all insns preceding it
241 have already been scheduled. It is important to ensure that all
242 insns which use its result will not be executed until its result
243 has been computed. An insn is maintained in one of four structures:
244
245 (P) the "Pending" set of insns which cannot be scheduled until
246 their dependencies have been satisfied.
247 (Q) the "Queued" set of insns that can be scheduled when sufficient
248 time has passed.
249 (R) the "Ready" list of unscheduled, uncommitted insns.
250 (S) the "Scheduled" list of insns.
251
252 Initially, all insns are either "Pending" or "Ready" depending on
253 whether their dependencies are satisfied.
254
255 Insns move from the "Ready" list to the "Scheduled" list as they
256 are committed to the schedule. As this occurs, the insns in the
257 "Pending" list have their dependencies satisfied and move to either
258 the "Ready" list or the "Queued" set depending on whether
259 sufficient time has passed to make them ready. As time passes,
260 insns move from the "Queued" set to the "Ready" list.
261
262 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
263 unscheduled insns, i.e., those that are ready, queued, and pending.
264 The "Queued" set (Q) is implemented by the variable `insn_queue'.
265 The "Ready" list (R) is implemented by the variables `ready' and
266 `n_ready'.
267 The "Scheduled" list (S) is the new insn chain built by this pass.
268
269 The transition (R->S) is implemented in the scheduling loop in
270 `schedule_block' when the best insn to schedule is chosen.
271 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
272 insns move from the ready list to the scheduled list.
273 The transition (Q->R) is implemented in 'queue_to_insn' as time
274 passes or stalls are introduced. */
275
276 /* Implement a circular buffer to delay instructions until sufficient
277 time has passed. For the new pipeline description interface,
278 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
279 than maximal time of instruction execution computed by genattr.c on
280 the base maximal time of functional unit reservations and getting a
281 result. This is the longest time an insn may be queued. */
282
283 static rtx *insn_queue;
284 static int q_ptr = 0;
285 static int q_size = 0;
286 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
287 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
288
289 #define QUEUE_SCHEDULED (-3)
290 #define QUEUE_NOWHERE (-2)
291 #define QUEUE_READY (-1)
292 /* QUEUE_SCHEDULED - INSN is scheduled.
293 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
294 queue or ready list.
295 QUEUE_READY - INSN is in ready list.
296 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
297
298 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
299
300 /* The following variable value refers for all current and future
301 reservations of the processor units. */
302 state_t curr_state;
303
304 /* The following variable value is size of memory representing all
305 current and future reservations of the processor units. */
306 size_t dfa_state_size;
307
308 /* The following array is used to find the best insn from ready when
309 the automaton pipeline interface is used. */
310 char *ready_try = NULL;
311
312 /* The ready list. */
313 struct ready_list ready = {NULL, 0, 0, 0};
314
315 /* The pointer to the ready list (to be removed). */
316 static struct ready_list *readyp = &ready;
317
318 /* Scheduling clock. */
319 static int clock_var;
320
321 static int may_trap_exp (const_rtx, int);
322
323 /* Nonzero iff the address is comprised from at most 1 register. */
324 #define CONST_BASED_ADDRESS_P(x) \
325 (REG_P (x) \
326 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
327 || (GET_CODE (x) == LO_SUM)) \
328 && (CONSTANT_P (XEXP (x, 0)) \
329 || CONSTANT_P (XEXP (x, 1)))))
330
331 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
332 as found by analyzing insn's expression. */
333
334 \f
335 static int haifa_luid_for_non_insn (rtx x);
336
337 /* Haifa version of sched_info hooks common to all headers. */
338 const struct common_sched_info_def haifa_common_sched_info =
339 {
340 NULL, /* fix_recovery_cfg */
341 NULL, /* add_block */
342 NULL, /* estimate_number_of_insns */
343 haifa_luid_for_non_insn, /* luid_for_non_insn */
344 SCHED_PASS_UNKNOWN /* sched_pass_id */
345 };
346
347 const struct sched_scan_info_def *sched_scan_info;
348
349 /* Mapping from instruction UID to its Logical UID. */
350 VEC (int, heap) *sched_luids = NULL;
351
352 /* Next LUID to assign to an instruction. */
353 int sched_max_luid = 1;
354
355 /* Haifa Instruction Data. */
356 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
357
358 void (* sched_init_only_bb) (basic_block, basic_block);
359
360 /* Split block function. Different schedulers might use different functions
361 to handle their internal data consistent. */
362 basic_block (* sched_split_block) (basic_block, rtx);
363
364 /* Create empty basic block after the specified block. */
365 basic_block (* sched_create_empty_bb) (basic_block);
366
367 static int
368 may_trap_exp (const_rtx x, int is_store)
369 {
370 enum rtx_code code;
371
372 if (x == 0)
373 return TRAP_FREE;
374 code = GET_CODE (x);
375 if (is_store)
376 {
377 if (code == MEM && may_trap_p (x))
378 return TRAP_RISKY;
379 else
380 return TRAP_FREE;
381 }
382 if (code == MEM)
383 {
384 /* The insn uses memory: a volatile load. */
385 if (MEM_VOLATILE_P (x))
386 return IRISKY;
387 /* An exception-free load. */
388 if (!may_trap_p (x))
389 return IFREE;
390 /* A load with 1 base register, to be further checked. */
391 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
392 return PFREE_CANDIDATE;
393 /* No info on the load, to be further checked. */
394 return PRISKY_CANDIDATE;
395 }
396 else
397 {
398 const char *fmt;
399 int i, insn_class = TRAP_FREE;
400
401 /* Neither store nor load, check if it may cause a trap. */
402 if (may_trap_p (x))
403 return TRAP_RISKY;
404 /* Recursive step: walk the insn... */
405 fmt = GET_RTX_FORMAT (code);
406 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
407 {
408 if (fmt[i] == 'e')
409 {
410 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
411 insn_class = WORST_CLASS (insn_class, tmp_class);
412 }
413 else if (fmt[i] == 'E')
414 {
415 int j;
416 for (j = 0; j < XVECLEN (x, i); j++)
417 {
418 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
419 insn_class = WORST_CLASS (insn_class, tmp_class);
420 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
421 break;
422 }
423 }
424 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
425 break;
426 }
427 return insn_class;
428 }
429 }
430
431 /* Classifies rtx X of an insn for the purpose of verifying that X can be
432 executed speculatively (and consequently the insn can be moved
433 speculatively), by examining X, returning:
434 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
435 TRAP_FREE: non-load insn.
436 IFREE: load from a globally safe location.
437 IRISKY: volatile load.
438 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
439 being either PFREE or PRISKY. */
440
441 static int
442 haifa_classify_rtx (const_rtx x)
443 {
444 int tmp_class = TRAP_FREE;
445 int insn_class = TRAP_FREE;
446 enum rtx_code code;
447
448 if (GET_CODE (x) == PARALLEL)
449 {
450 int i, len = XVECLEN (x, 0);
451
452 for (i = len - 1; i >= 0; i--)
453 {
454 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
455 insn_class = WORST_CLASS (insn_class, tmp_class);
456 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
457 break;
458 }
459 }
460 else
461 {
462 code = GET_CODE (x);
463 switch (code)
464 {
465 case CLOBBER:
466 /* Test if it is a 'store'. */
467 tmp_class = may_trap_exp (XEXP (x, 0), 1);
468 break;
469 case SET:
470 /* Test if it is a store. */
471 tmp_class = may_trap_exp (SET_DEST (x), 1);
472 if (tmp_class == TRAP_RISKY)
473 break;
474 /* Test if it is a load. */
475 tmp_class =
476 WORST_CLASS (tmp_class,
477 may_trap_exp (SET_SRC (x), 0));
478 break;
479 case COND_EXEC:
480 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
481 if (tmp_class == TRAP_RISKY)
482 break;
483 tmp_class = WORST_CLASS (tmp_class,
484 may_trap_exp (COND_EXEC_TEST (x), 0));
485 break;
486 case TRAP_IF:
487 tmp_class = TRAP_RISKY;
488 break;
489 default:;
490 }
491 insn_class = tmp_class;
492 }
493
494 return insn_class;
495 }
496
497 int
498 haifa_classify_insn (const_rtx insn)
499 {
500 return haifa_classify_rtx (PATTERN (insn));
501 }
502
503 /* Forward declarations. */
504
505 static int priority (rtx);
506 static int rank_for_schedule (const void *, const void *);
507 static void swap_sort (rtx *, int);
508 static void queue_insn (rtx, int);
509 static int schedule_insn (rtx);
510 static int find_set_reg_weight (const_rtx);
511 static void find_insn_reg_weight (const_rtx);
512 static void adjust_priority (rtx);
513 static void advance_one_cycle (void);
514 static void extend_h_i_d (void);
515
516
517 /* Notes handling mechanism:
518 =========================
519 Generally, NOTES are saved before scheduling and restored after scheduling.
520 The scheduler distinguishes between two types of notes:
521
522 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
523 Before scheduling a region, a pointer to the note is added to the insn
524 that follows or precedes it. (This happens as part of the data dependence
525 computation). After scheduling an insn, the pointer contained in it is
526 used for regenerating the corresponding note (in reemit_notes).
527
528 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
529 these notes are put in a list (in rm_other_notes() and
530 unlink_other_notes ()). After scheduling the block, these notes are
531 inserted at the beginning of the block (in schedule_block()). */
532
533 static void ready_add (struct ready_list *, rtx, bool);
534 static rtx ready_remove_first (struct ready_list *);
535
536 static void queue_to_ready (struct ready_list *);
537 static int early_queue_to_ready (state_t, struct ready_list *);
538
539 static void debug_ready_list (struct ready_list *);
540
541 /* The following functions are used to implement multi-pass scheduling
542 on the first cycle. */
543 static rtx ready_remove (struct ready_list *, int);
544 static void ready_remove_insn (rtx);
545
546 static int choose_ready (struct ready_list *, rtx *);
547
548 static void fix_inter_tick (rtx, rtx);
549 static int fix_tick_ready (rtx);
550 static void change_queue_index (rtx, int);
551
552 /* The following functions are used to implement scheduling of data/control
553 speculative instructions. */
554
555 static void extend_h_i_d (void);
556 static void init_h_i_d (rtx);
557 static void generate_recovery_code (rtx);
558 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
559 static void begin_speculative_block (rtx);
560 static void add_to_speculative_block (rtx);
561 static void init_before_recovery (basic_block *);
562 static void create_check_block_twin (rtx, bool);
563 static void fix_recovery_deps (basic_block);
564 static void haifa_change_pattern (rtx, rtx);
565 static void dump_new_block_header (int, basic_block, rtx, rtx);
566 static void restore_bb_notes (basic_block);
567 static void fix_jump_move (rtx);
568 static void move_block_after_check (rtx);
569 static void move_succs (VEC(edge,gc) **, basic_block);
570 static void sched_remove_insn (rtx);
571 static void clear_priorities (rtx, rtx_vec_t *);
572 static void calc_priorities (rtx_vec_t);
573 static void add_jump_dependencies (rtx, rtx);
574 #ifdef ENABLE_CHECKING
575 static int has_edge_p (VEC(edge,gc) *, int);
576 static void check_cfg (rtx, rtx);
577 #endif
578
579 #endif /* INSN_SCHEDULING */
580 \f
581 /* Point to state used for the current scheduling pass. */
582 struct haifa_sched_info *current_sched_info;
583 \f
584 #ifndef INSN_SCHEDULING
585 void
586 schedule_insns (void)
587 {
588 }
589 #else
590
591 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
592 so that insns independent of the last scheduled insn will be preferred
593 over dependent instructions. */
594
595 static rtx last_scheduled_insn;
596
597 /* Cached cost of the instruction. Use below function to get cost of the
598 insn. -1 here means that the field is not initialized. */
599 #define INSN_COST(INSN) (HID (INSN)->cost)
600
601 /* Compute cost of executing INSN.
602 This is the number of cycles between instruction issue and
603 instruction results. */
604 int
605 insn_cost (rtx insn)
606 {
607 int cost;
608
609 if (sel_sched_p ())
610 {
611 if (recog_memoized (insn) < 0)
612 return 0;
613
614 cost = insn_default_latency (insn);
615 if (cost < 0)
616 cost = 0;
617
618 return cost;
619 }
620
621 cost = INSN_COST (insn);
622
623 if (cost < 0)
624 {
625 /* A USE insn, or something else we don't need to
626 understand. We can't pass these directly to
627 result_ready_cost or insn_default_latency because it will
628 trigger a fatal error for unrecognizable insns. */
629 if (recog_memoized (insn) < 0)
630 {
631 INSN_COST (insn) = 0;
632 return 0;
633 }
634 else
635 {
636 cost = insn_default_latency (insn);
637 if (cost < 0)
638 cost = 0;
639
640 INSN_COST (insn) = cost;
641 }
642 }
643
644 return cost;
645 }
646
647 /* Compute cost of dependence LINK.
648 This is the number of cycles between instruction issue and
649 instruction results.
650 ??? We also use this function to call recog_memoized on all insns. */
651 int
652 dep_cost_1 (dep_t link, dw_t dw)
653 {
654 rtx insn = DEP_PRO (link);
655 rtx used = DEP_CON (link);
656 int cost;
657
658 /* A USE insn should never require the value used to be computed.
659 This allows the computation of a function's result and parameter
660 values to overlap the return and call. */
661 if (recog_memoized (used) < 0)
662 {
663 cost = 0;
664 recog_memoized (insn);
665 }
666 else
667 {
668 enum reg_note dep_type = DEP_TYPE (link);
669
670 cost = insn_cost (insn);
671
672 if (INSN_CODE (insn) >= 0)
673 {
674 if (dep_type == REG_DEP_ANTI)
675 cost = 0;
676 else if (dep_type == REG_DEP_OUTPUT)
677 {
678 cost = (insn_default_latency (insn)
679 - insn_default_latency (used));
680 if (cost <= 0)
681 cost = 1;
682 }
683 else if (bypass_p (insn))
684 cost = insn_latency (insn, used);
685 }
686
687
688 if (targetm.sched.adjust_cost_2)
689 {
690 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
691 dw);
692 }
693 else if (targetm.sched.adjust_cost != NULL)
694 {
695 /* This variable is used for backward compatibility with the
696 targets. */
697 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
698
699 /* Make it self-cycled, so that if some tries to walk over this
700 incomplete list he/she will be caught in an endless loop. */
701 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
702
703 /* Targets use only REG_NOTE_KIND of the link. */
704 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
705
706 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
707 insn, cost);
708
709 free_INSN_LIST_node (dep_cost_rtx_link);
710 }
711
712 if (cost < 0)
713 cost = 0;
714 }
715
716 return cost;
717 }
718
719 /* Compute cost of dependence LINK.
720 This is the number of cycles between instruction issue and
721 instruction results. */
722 int
723 dep_cost (dep_t link)
724 {
725 return dep_cost_1 (link, 0);
726 }
727
728 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
729 INSN_PRIORITY explicitly. */
730 void
731 increase_insn_priority (rtx insn, int amount)
732 {
733 if (!sel_sched_p ())
734 {
735 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
736 if (INSN_PRIORITY_KNOWN (insn))
737 INSN_PRIORITY (insn) += amount;
738 }
739 else
740 {
741 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
742 Use EXPR_PRIORITY instead. */
743 sel_add_to_insn_priority (insn, amount);
744 }
745 }
746
747 /* Return 'true' if DEP should be included in priority calculations. */
748 static bool
749 contributes_to_priority_p (dep_t dep)
750 {
751 /* Critical path is meaningful in block boundaries only. */
752 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
753 DEP_PRO (dep)))
754 return false;
755
756 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
757 then speculative instructions will less likely be
758 scheduled. That is because the priority of
759 their producers will increase, and, thus, the
760 producers will more likely be scheduled, thus,
761 resolving the dependence. */
762 if (sched_deps_info->generate_spec_deps
763 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
764 && (DEP_STATUS (dep) & SPECULATIVE))
765 return false;
766
767 return true;
768 }
769
770 /* Compute the priority number for INSN. */
771 static int
772 priority (rtx insn)
773 {
774 if (! INSN_P (insn))
775 return 0;
776
777 /* We should not be interested in priority of an already scheduled insn. */
778 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
779
780 if (!INSN_PRIORITY_KNOWN (insn))
781 {
782 int this_priority = -1;
783
784 if (sd_lists_empty_p (insn, SD_LIST_FORW))
785 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
786 some forward deps but all of them are ignored by
787 contributes_to_priority hook. At the moment we set priority of
788 such insn to 0. */
789 this_priority = insn_cost (insn);
790 else
791 {
792 rtx prev_first, twin;
793 basic_block rec;
794
795 /* For recovery check instructions we calculate priority slightly
796 different than that of normal instructions. Instead of walking
797 through INSN_FORW_DEPS (check) list, we walk through
798 INSN_FORW_DEPS list of each instruction in the corresponding
799 recovery block. */
800
801 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
802 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
803 if (!rec || rec == EXIT_BLOCK_PTR)
804 {
805 prev_first = PREV_INSN (insn);
806 twin = insn;
807 }
808 else
809 {
810 prev_first = NEXT_INSN (BB_HEAD (rec));
811 twin = PREV_INSN (BB_END (rec));
812 }
813
814 do
815 {
816 sd_iterator_def sd_it;
817 dep_t dep;
818
819 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
820 {
821 rtx next;
822 int next_priority;
823
824 next = DEP_CON (dep);
825
826 if (BLOCK_FOR_INSN (next) != rec)
827 {
828 int cost;
829
830 if (!contributes_to_priority_p (dep))
831 continue;
832
833 if (twin == insn)
834 cost = dep_cost (dep);
835 else
836 {
837 struct _dep _dep1, *dep1 = &_dep1;
838
839 init_dep (dep1, insn, next, REG_DEP_ANTI);
840
841 cost = dep_cost (dep1);
842 }
843
844 next_priority = cost + priority (next);
845
846 if (next_priority > this_priority)
847 this_priority = next_priority;
848 }
849 }
850
851 twin = PREV_INSN (twin);
852 }
853 while (twin != prev_first);
854 }
855
856 if (this_priority < 0)
857 {
858 gcc_assert (this_priority == -1);
859
860 this_priority = insn_cost (insn);
861 }
862
863 INSN_PRIORITY (insn) = this_priority;
864 INSN_PRIORITY_STATUS (insn) = 1;
865 }
866
867 return INSN_PRIORITY (insn);
868 }
869 \f
870 /* Macros and functions for keeping the priority queue sorted, and
871 dealing with queuing and dequeuing of instructions. */
872
873 #define SCHED_SORT(READY, N_READY) \
874 do { if ((N_READY) == 2) \
875 swap_sort (READY, N_READY); \
876 else if ((N_READY) > 2) \
877 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
878 while (0)
879
880 /* Returns a positive value if x is preferred; returns a negative value if
881 y is preferred. Should never return 0, since that will make the sort
882 unstable. */
883
884 static int
885 rank_for_schedule (const void *x, const void *y)
886 {
887 rtx tmp = *(const rtx *) y;
888 rtx tmp2 = *(const rtx *) x;
889 int tmp_class, tmp2_class;
890 int val, priority_val, weight_val, info_val;
891
892 /* The insn in a schedule group should be issued the first. */
893 if (flag_sched_group_heuristic &&
894 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
895 return SCHED_GROUP_P (tmp2) ? 1 : -1;
896
897 /* Make sure that priority of TMP and TMP2 are initialized. */
898 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
899
900 /* Prefer insn with higher priority. */
901 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
902
903 if (flag_sched_critical_path_heuristic && priority_val)
904 return priority_val;
905
906 /* Prefer speculative insn with greater dependencies weakness. */
907 if (flag_sched_spec_insn_heuristic && spec_info)
908 {
909 ds_t ds1, ds2;
910 dw_t dw1, dw2;
911 int dw;
912
913 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
914 if (ds1)
915 dw1 = ds_weak (ds1);
916 else
917 dw1 = NO_DEP_WEAK;
918
919 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
920 if (ds2)
921 dw2 = ds_weak (ds2);
922 else
923 dw2 = NO_DEP_WEAK;
924
925 dw = dw2 - dw1;
926 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
927 return dw;
928 }
929
930 /* Prefer an insn with smaller contribution to registers-pressure. */
931 if (flag_sched_reg_pressure_heuristic && !reload_completed &&
932 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
933 return weight_val;
934
935 info_val = (*current_sched_info->rank) (tmp, tmp2);
936 if(flag_sched_rank_heuristic && info_val)
937 return info_val;
938
939 /* Compare insns based on their relation to the last-scheduled-insn. */
940 if (flag_sched_last_insn_heuristic && INSN_P (last_scheduled_insn))
941 {
942 dep_t dep1;
943 dep_t dep2;
944
945 /* Classify the instructions into three classes:
946 1) Data dependent on last schedule insn.
947 2) Anti/Output dependent on last scheduled insn.
948 3) Independent of last scheduled insn, or has latency of one.
949 Choose the insn from the highest numbered class if different. */
950 dep1 = sd_find_dep_between (last_scheduled_insn, tmp, true);
951
952 if (dep1 == NULL || dep_cost (dep1) == 1)
953 tmp_class = 3;
954 else if (/* Data dependence. */
955 DEP_TYPE (dep1) == REG_DEP_TRUE)
956 tmp_class = 1;
957 else
958 tmp_class = 2;
959
960 dep2 = sd_find_dep_between (last_scheduled_insn, tmp2, true);
961
962 if (dep2 == NULL || dep_cost (dep2) == 1)
963 tmp2_class = 3;
964 else if (/* Data dependence. */
965 DEP_TYPE (dep2) == REG_DEP_TRUE)
966 tmp2_class = 1;
967 else
968 tmp2_class = 2;
969
970 if ((val = tmp2_class - tmp_class))
971 return val;
972 }
973
974 /* Prefer the insn which has more later insns that depend on it.
975 This gives the scheduler more freedom when scheduling later
976 instructions at the expense of added register pressure. */
977
978 val = (sd_lists_size (tmp2, SD_LIST_FORW)
979 - sd_lists_size (tmp, SD_LIST_FORW));
980
981 if (flag_sched_dep_count_heuristic && val != 0)
982 return val;
983
984 /* If insns are equally good, sort by INSN_LUID (original insn order),
985 so that we make the sort stable. This minimizes instruction movement,
986 thus minimizing sched's effect on debugging and cross-jumping. */
987 return INSN_LUID (tmp) - INSN_LUID (tmp2);
988 }
989
990 /* Resort the array A in which only element at index N may be out of order. */
991
992 HAIFA_INLINE static void
993 swap_sort (rtx *a, int n)
994 {
995 rtx insn = a[n - 1];
996 int i = n - 2;
997
998 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
999 {
1000 a[i + 1] = a[i];
1001 i -= 1;
1002 }
1003 a[i + 1] = insn;
1004 }
1005
1006 /* Add INSN to the insn queue so that it can be executed at least
1007 N_CYCLES after the currently executing insn. Preserve insns
1008 chain for debugging purposes. */
1009
1010 HAIFA_INLINE static void
1011 queue_insn (rtx insn, int n_cycles)
1012 {
1013 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1014 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1015
1016 gcc_assert (n_cycles <= max_insn_queue_index);
1017
1018 insn_queue[next_q] = link;
1019 q_size += 1;
1020
1021 if (sched_verbose >= 2)
1022 {
1023 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1024 (*current_sched_info->print_insn) (insn, 0));
1025
1026 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1027 }
1028
1029 QUEUE_INDEX (insn) = next_q;
1030 }
1031
1032 /* Remove INSN from queue. */
1033 static void
1034 queue_remove (rtx insn)
1035 {
1036 gcc_assert (QUEUE_INDEX (insn) >= 0);
1037 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1038 q_size--;
1039 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1040 }
1041
1042 /* Return a pointer to the bottom of the ready list, i.e. the insn
1043 with the lowest priority. */
1044
1045 rtx *
1046 ready_lastpos (struct ready_list *ready)
1047 {
1048 gcc_assert (ready->n_ready >= 1);
1049 return ready->vec + ready->first - ready->n_ready + 1;
1050 }
1051
1052 /* Add an element INSN to the ready list so that it ends up with the
1053 lowest/highest priority depending on FIRST_P. */
1054
1055 HAIFA_INLINE static void
1056 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1057 {
1058 if (!first_p)
1059 {
1060 if (ready->first == ready->n_ready)
1061 {
1062 memmove (ready->vec + ready->veclen - ready->n_ready,
1063 ready_lastpos (ready),
1064 ready->n_ready * sizeof (rtx));
1065 ready->first = ready->veclen - 1;
1066 }
1067 ready->vec[ready->first - ready->n_ready] = insn;
1068 }
1069 else
1070 {
1071 if (ready->first == ready->veclen - 1)
1072 {
1073 if (ready->n_ready)
1074 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1075 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1076 ready_lastpos (ready),
1077 ready->n_ready * sizeof (rtx));
1078 ready->first = ready->veclen - 2;
1079 }
1080 ready->vec[++(ready->first)] = insn;
1081 }
1082
1083 ready->n_ready++;
1084
1085 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1086 QUEUE_INDEX (insn) = QUEUE_READY;
1087 }
1088
1089 /* Remove the element with the highest priority from the ready list and
1090 return it. */
1091
1092 HAIFA_INLINE static rtx
1093 ready_remove_first (struct ready_list *ready)
1094 {
1095 rtx t;
1096
1097 gcc_assert (ready->n_ready);
1098 t = ready->vec[ready->first--];
1099 ready->n_ready--;
1100 /* If the queue becomes empty, reset it. */
1101 if (ready->n_ready == 0)
1102 ready->first = ready->veclen - 1;
1103
1104 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1105 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1106
1107 return t;
1108 }
1109
1110 /* The following code implements multi-pass scheduling for the first
1111 cycle. In other words, we will try to choose ready insn which
1112 permits to start maximum number of insns on the same cycle. */
1113
1114 /* Return a pointer to the element INDEX from the ready. INDEX for
1115 insn with the highest priority is 0, and the lowest priority has
1116 N_READY - 1. */
1117
1118 rtx
1119 ready_element (struct ready_list *ready, int index)
1120 {
1121 gcc_assert (ready->n_ready && index < ready->n_ready);
1122
1123 return ready->vec[ready->first - index];
1124 }
1125
1126 /* Remove the element INDEX from the ready list and return it. INDEX
1127 for insn with the highest priority is 0, and the lowest priority
1128 has N_READY - 1. */
1129
1130 HAIFA_INLINE static rtx
1131 ready_remove (struct ready_list *ready, int index)
1132 {
1133 rtx t;
1134 int i;
1135
1136 if (index == 0)
1137 return ready_remove_first (ready);
1138 gcc_assert (ready->n_ready && index < ready->n_ready);
1139 t = ready->vec[ready->first - index];
1140 ready->n_ready--;
1141 for (i = index; i < ready->n_ready; i++)
1142 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1143 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1144 return t;
1145 }
1146
1147 /* Remove INSN from the ready list. */
1148 static void
1149 ready_remove_insn (rtx insn)
1150 {
1151 int i;
1152
1153 for (i = 0; i < readyp->n_ready; i++)
1154 if (ready_element (readyp, i) == insn)
1155 {
1156 ready_remove (readyp, i);
1157 return;
1158 }
1159 gcc_unreachable ();
1160 }
1161
1162 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1163 macro. */
1164
1165 void
1166 ready_sort (struct ready_list *ready)
1167 {
1168 rtx *first = ready_lastpos (ready);
1169 SCHED_SORT (first, ready->n_ready);
1170 }
1171
1172 /* PREV is an insn that is ready to execute. Adjust its priority if that
1173 will help shorten or lengthen register lifetimes as appropriate. Also
1174 provide a hook for the target to tweak itself. */
1175
1176 HAIFA_INLINE static void
1177 adjust_priority (rtx prev)
1178 {
1179 /* ??? There used to be code here to try and estimate how an insn
1180 affected register lifetimes, but it did it by looking at REG_DEAD
1181 notes, which we removed in schedule_region. Nor did it try to
1182 take into account register pressure or anything useful like that.
1183
1184 Revisit when we have a machine model to work with and not before. */
1185
1186 if (targetm.sched.adjust_priority)
1187 INSN_PRIORITY (prev) =
1188 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1189 }
1190
1191 /* Advance DFA state STATE on one cycle. */
1192 void
1193 advance_state (state_t state)
1194 {
1195 if (targetm.sched.dfa_pre_advance_cycle)
1196 targetm.sched.dfa_pre_advance_cycle ();
1197
1198 if (targetm.sched.dfa_pre_cycle_insn)
1199 state_transition (state,
1200 targetm.sched.dfa_pre_cycle_insn ());
1201
1202 state_transition (state, NULL);
1203
1204 if (targetm.sched.dfa_post_cycle_insn)
1205 state_transition (state,
1206 targetm.sched.dfa_post_cycle_insn ());
1207
1208 if (targetm.sched.dfa_post_advance_cycle)
1209 targetm.sched.dfa_post_advance_cycle ();
1210 }
1211
1212 /* Advance time on one cycle. */
1213 HAIFA_INLINE static void
1214 advance_one_cycle (void)
1215 {
1216 advance_state (curr_state);
1217 if (sched_verbose >= 6)
1218 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1219 }
1220
1221 /* Clock at which the previous instruction was issued. */
1222 static int last_clock_var;
1223
1224 /* INSN is the "currently executing insn". Launch each insn which was
1225 waiting on INSN. READY is the ready list which contains the insns
1226 that are ready to fire. CLOCK is the current cycle. The function
1227 returns necessary cycle advance after issuing the insn (it is not
1228 zero for insns in a schedule group). */
1229
1230 static int
1231 schedule_insn (rtx insn)
1232 {
1233 sd_iterator_def sd_it;
1234 dep_t dep;
1235 int advance = 0;
1236
1237 if (sched_verbose >= 1)
1238 {
1239 char buf[2048];
1240
1241 print_insn (buf, insn, 0);
1242 buf[40] = 0;
1243 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1244
1245 if (recog_memoized (insn) < 0)
1246 fprintf (sched_dump, "nothing");
1247 else
1248 print_reservation (sched_dump, insn);
1249 fputc ('\n', sched_dump);
1250 }
1251
1252 /* Scheduling instruction should have all its dependencies resolved and
1253 should have been removed from the ready list. */
1254 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1255
1256 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1257 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1258
1259 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1260 if (INSN_TICK (insn) > clock_var)
1261 /* INSN has been prematurely moved from the queue to the ready list.
1262 This is possible only if following flag is set. */
1263 gcc_assert (flag_sched_stalled_insns);
1264
1265 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1266 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1267 INSN_TICK (insn) = clock_var;
1268
1269 /* Update dependent instructions. */
1270 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1271 sd_iterator_cond (&sd_it, &dep);)
1272 {
1273 rtx next = DEP_CON (dep);
1274
1275 /* Resolve the dependence between INSN and NEXT.
1276 sd_resolve_dep () moves current dep to another list thus
1277 advancing the iterator. */
1278 sd_resolve_dep (sd_it);
1279
1280 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1281 {
1282 int effective_cost;
1283
1284 effective_cost = try_ready (next);
1285
1286 if (effective_cost >= 0
1287 && SCHED_GROUP_P (next)
1288 && advance < effective_cost)
1289 advance = effective_cost;
1290 }
1291 else
1292 /* Check always has only one forward dependence (to the first insn in
1293 the recovery block), therefore, this will be executed only once. */
1294 {
1295 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1296 fix_recovery_deps (RECOVERY_BLOCK (insn));
1297 }
1298 }
1299
1300 /* This is the place where scheduler doesn't *basically* need backward and
1301 forward dependencies for INSN anymore. Nevertheless they are used in
1302 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1303 some targets (e.g. rs6000). Thus the earliest place where we *can*
1304 remove dependencies is after targetm.sched.md_finish () call in
1305 schedule_block (). But, on the other side, the safest place to remove
1306 dependencies is when we are finishing scheduling entire region. As we
1307 don't generate [many] dependencies during scheduling itself, we won't
1308 need memory until beginning of next region.
1309 Bottom line: Dependencies are removed for all insns in the end of
1310 scheduling the region. */
1311
1312 /* Annotate the instruction with issue information -- TImode
1313 indicates that the instruction is expected not to be able
1314 to issue on the same cycle as the previous insn. A machine
1315 may use this information to decide how the instruction should
1316 be aligned. */
1317 if (issue_rate > 1
1318 && GET_CODE (PATTERN (insn)) != USE
1319 && GET_CODE (PATTERN (insn)) != CLOBBER)
1320 {
1321 if (reload_completed)
1322 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1323 last_clock_var = clock_var;
1324 }
1325
1326 return advance;
1327 }
1328
1329 /* Functions for handling of notes. */
1330
1331 /* Insert the INSN note at the end of the notes list. */
1332 static void
1333 add_to_note_list (rtx insn, rtx *note_list_end_p)
1334 {
1335 PREV_INSN (insn) = *note_list_end_p;
1336 if (*note_list_end_p)
1337 NEXT_INSN (*note_list_end_p) = insn;
1338 *note_list_end_p = insn;
1339 }
1340
1341 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1342 void
1343 concat_note_lists (rtx from_end, rtx *to_endp)
1344 {
1345 rtx from_start;
1346
1347 if (from_end == NULL)
1348 /* It's easy when have nothing to concat. */
1349 return;
1350
1351 if (*to_endp == NULL)
1352 /* It's also easy when destination is empty. */
1353 {
1354 *to_endp = from_end;
1355 return;
1356 }
1357
1358 from_start = from_end;
1359 /* A note list should be traversed via PREV_INSN. */
1360 while (PREV_INSN (from_start) != NULL)
1361 from_start = PREV_INSN (from_start);
1362
1363 add_to_note_list (from_start, to_endp);
1364 *to_endp = from_end;
1365 }
1366
1367 /* Delete notes beginning with INSN and put them in the chain
1368 of notes ended by NOTE_LIST.
1369 Returns the insn following the notes. */
1370 static rtx
1371 unlink_other_notes (rtx insn, rtx tail)
1372 {
1373 rtx prev = PREV_INSN (insn);
1374
1375 while (insn != tail && NOTE_NOT_BB_P (insn))
1376 {
1377 rtx next = NEXT_INSN (insn);
1378 basic_block bb = BLOCK_FOR_INSN (insn);
1379
1380 /* Delete the note from its current position. */
1381 if (prev)
1382 NEXT_INSN (prev) = next;
1383 if (next)
1384 PREV_INSN (next) = prev;
1385
1386 if (bb)
1387 {
1388 /* Basic block can begin with either LABEL or
1389 NOTE_INSN_BASIC_BLOCK. */
1390 gcc_assert (BB_HEAD (bb) != insn);
1391
1392 /* Check if we are removing last insn in the BB. */
1393 if (BB_END (bb) == insn)
1394 BB_END (bb) = prev;
1395 }
1396
1397 /* See sched_analyze to see how these are handled. */
1398 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1399 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1400 add_to_note_list (insn, &note_list);
1401
1402 insn = next;
1403 }
1404
1405 if (insn == tail)
1406 {
1407 gcc_assert (sel_sched_p ());
1408 return prev;
1409 }
1410
1411 return insn;
1412 }
1413
1414 /* Return the head and tail pointers of ebb starting at BEG and ending
1415 at END. */
1416 void
1417 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1418 {
1419 rtx beg_head = BB_HEAD (beg);
1420 rtx beg_tail = BB_END (beg);
1421 rtx end_head = BB_HEAD (end);
1422 rtx end_tail = BB_END (end);
1423
1424 /* Don't include any notes or labels at the beginning of the BEG
1425 basic block, or notes at the end of the END basic blocks. */
1426
1427 if (LABEL_P (beg_head))
1428 beg_head = NEXT_INSN (beg_head);
1429
1430 while (beg_head != beg_tail)
1431 if (NOTE_P (beg_head))
1432 beg_head = NEXT_INSN (beg_head);
1433 else
1434 break;
1435
1436 *headp = beg_head;
1437
1438 if (beg == end)
1439 end_head = beg_head;
1440 else if (LABEL_P (end_head))
1441 end_head = NEXT_INSN (end_head);
1442
1443 while (end_head != end_tail)
1444 if (NOTE_P (end_tail))
1445 end_tail = PREV_INSN (end_tail);
1446 else
1447 break;
1448
1449 *tailp = end_tail;
1450 }
1451
1452 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1453
1454 int
1455 no_real_insns_p (const_rtx head, const_rtx tail)
1456 {
1457 while (head != NEXT_INSN (tail))
1458 {
1459 if (!NOTE_P (head) && !LABEL_P (head))
1460 return 0;
1461 head = NEXT_INSN (head);
1462 }
1463 return 1;
1464 }
1465
1466 /* Delete notes between HEAD and TAIL and put them in the chain
1467 of notes ended by NOTE_LIST. */
1468 static void
1469 rm_other_notes (rtx head, rtx tail)
1470 {
1471 rtx next_tail;
1472 rtx insn;
1473
1474 note_list = 0;
1475 if (head == tail && (! INSN_P (head)))
1476 return;
1477
1478 next_tail = NEXT_INSN (tail);
1479 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1480 {
1481 rtx prev;
1482
1483 /* Farm out notes, and maybe save them in NOTE_LIST.
1484 This is needed to keep the debugger from
1485 getting completely deranged. */
1486 if (NOTE_NOT_BB_P (insn))
1487 {
1488 prev = insn;
1489 insn = unlink_other_notes (insn, next_tail);
1490
1491 gcc_assert ((sel_sched_p ()
1492 || prev != tail) && prev != head && insn != next_tail);
1493 }
1494 }
1495 }
1496
1497 /* Same as above, but also process REG_SAVE_NOTEs of HEAD. */
1498 void
1499 remove_notes (rtx head, rtx tail)
1500 {
1501 /* rm_other_notes only removes notes which are _inside_ the
1502 block---that is, it won't remove notes before the first real insn
1503 or after the last real insn of the block. So if the first insn
1504 has a REG_SAVE_NOTE which would otherwise be emitted before the
1505 insn, it is redundant with the note before the start of the
1506 block, and so we have to take it out. */
1507 if (INSN_P (head))
1508 {
1509 rtx note;
1510
1511 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
1512 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1513 remove_note (head, note);
1514 }
1515
1516 /* Remove remaining note insns from the block, save them in
1517 note_list. These notes are restored at the end of
1518 schedule_block (). */
1519 rm_other_notes (head, tail);
1520 }
1521
1522 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1523 previously found among the insns. Insert them just before HEAD. */
1524 rtx
1525 restore_other_notes (rtx head, basic_block head_bb)
1526 {
1527 if (note_list != 0)
1528 {
1529 rtx note_head = note_list;
1530
1531 if (head)
1532 head_bb = BLOCK_FOR_INSN (head);
1533 else
1534 head = NEXT_INSN (bb_note (head_bb));
1535
1536 while (PREV_INSN (note_head))
1537 {
1538 set_block_for_insn (note_head, head_bb);
1539 note_head = PREV_INSN (note_head);
1540 }
1541 /* In the above cycle we've missed this note. */
1542 set_block_for_insn (note_head, head_bb);
1543
1544 PREV_INSN (note_head) = PREV_INSN (head);
1545 NEXT_INSN (PREV_INSN (head)) = note_head;
1546 PREV_INSN (head) = note_list;
1547 NEXT_INSN (note_list) = head;
1548
1549 if (BLOCK_FOR_INSN (head) != head_bb)
1550 BB_END (head_bb) = note_list;
1551
1552 head = note_head;
1553 }
1554
1555 return head;
1556 }
1557
1558 /* Functions for computation of registers live/usage info. */
1559
1560 /* This function looks for a new register being defined.
1561 If the destination register is already used by the source,
1562 a new register is not needed. */
1563 static int
1564 find_set_reg_weight (const_rtx x)
1565 {
1566 if (GET_CODE (x) == CLOBBER
1567 && register_operand (SET_DEST (x), VOIDmode))
1568 return 1;
1569 if (GET_CODE (x) == SET
1570 && register_operand (SET_DEST (x), VOIDmode))
1571 {
1572 if (REG_P (SET_DEST (x)))
1573 {
1574 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1575 return 1;
1576 else
1577 return 0;
1578 }
1579 return 1;
1580 }
1581 return 0;
1582 }
1583
1584 /* Calculate INSN_REG_WEIGHT for INSN. */
1585 static void
1586 find_insn_reg_weight (const_rtx insn)
1587 {
1588 int reg_weight = 0;
1589 rtx x;
1590
1591 /* Handle register life information. */
1592 if (! INSN_P (insn))
1593 return;
1594
1595 /* Increment weight for each register born here. */
1596 x = PATTERN (insn);
1597 reg_weight += find_set_reg_weight (x);
1598 if (GET_CODE (x) == PARALLEL)
1599 {
1600 int j;
1601 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1602 {
1603 x = XVECEXP (PATTERN (insn), 0, j);
1604 reg_weight += find_set_reg_weight (x);
1605 }
1606 }
1607 /* Decrement weight for each register that dies here. */
1608 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1609 {
1610 if (REG_NOTE_KIND (x) == REG_DEAD
1611 || REG_NOTE_KIND (x) == REG_UNUSED)
1612 reg_weight--;
1613 }
1614
1615 INSN_REG_WEIGHT (insn) = reg_weight;
1616 }
1617
1618 /* Move insns that became ready to fire from queue to ready list. */
1619
1620 static void
1621 queue_to_ready (struct ready_list *ready)
1622 {
1623 rtx insn;
1624 rtx link;
1625 rtx skip_insn;
1626
1627 q_ptr = NEXT_Q (q_ptr);
1628
1629 if (dbg_cnt (sched_insn) == false)
1630 /* If debug counter is activated do not requeue insn next after
1631 last_scheduled_insn. */
1632 skip_insn = next_nonnote_insn (last_scheduled_insn);
1633 else
1634 skip_insn = NULL_RTX;
1635
1636 /* Add all pending insns that can be scheduled without stalls to the
1637 ready list. */
1638 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1639 {
1640 insn = XEXP (link, 0);
1641 q_size -= 1;
1642
1643 if (sched_verbose >= 2)
1644 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1645 (*current_sched_info->print_insn) (insn, 0));
1646
1647 /* If the ready list is full, delay the insn for 1 cycle.
1648 See the comment in schedule_block for the rationale. */
1649 if (!reload_completed
1650 && ready->n_ready > MAX_SCHED_READY_INSNS
1651 && !SCHED_GROUP_P (insn)
1652 && insn != skip_insn)
1653 {
1654 if (sched_verbose >= 2)
1655 fprintf (sched_dump, "requeued because ready full\n");
1656 queue_insn (insn, 1);
1657 }
1658 else
1659 {
1660 ready_add (ready, insn, false);
1661 if (sched_verbose >= 2)
1662 fprintf (sched_dump, "moving to ready without stalls\n");
1663 }
1664 }
1665 free_INSN_LIST_list (&insn_queue[q_ptr]);
1666
1667 /* If there are no ready insns, stall until one is ready and add all
1668 of the pending insns at that point to the ready list. */
1669 if (ready->n_ready == 0)
1670 {
1671 int stalls;
1672
1673 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1674 {
1675 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1676 {
1677 for (; link; link = XEXP (link, 1))
1678 {
1679 insn = XEXP (link, 0);
1680 q_size -= 1;
1681
1682 if (sched_verbose >= 2)
1683 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1684 (*current_sched_info->print_insn) (insn, 0));
1685
1686 ready_add (ready, insn, false);
1687 if (sched_verbose >= 2)
1688 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1689 }
1690 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1691
1692 advance_one_cycle ();
1693
1694 break;
1695 }
1696
1697 advance_one_cycle ();
1698 }
1699
1700 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1701 clock_var += stalls;
1702 }
1703 }
1704
1705 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1706 prematurely move INSN from the queue to the ready list. Currently,
1707 if a target defines the hook 'is_costly_dependence', this function
1708 uses the hook to check whether there exist any dependences which are
1709 considered costly by the target, between INSN and other insns that
1710 have already been scheduled. Dependences are checked up to Y cycles
1711 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1712 controlling this value.
1713 (Other considerations could be taken into account instead (or in
1714 addition) depending on user flags and target hooks. */
1715
1716 static bool
1717 ok_for_early_queue_removal (rtx insn)
1718 {
1719 int n_cycles;
1720 rtx prev_insn = last_scheduled_insn;
1721
1722 if (targetm.sched.is_costly_dependence)
1723 {
1724 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1725 {
1726 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1727 {
1728 int cost;
1729
1730 if (prev_insn == current_sched_info->prev_head)
1731 {
1732 prev_insn = NULL;
1733 break;
1734 }
1735
1736 if (!NOTE_P (prev_insn))
1737 {
1738 dep_t dep;
1739
1740 dep = sd_find_dep_between (prev_insn, insn, true);
1741
1742 if (dep != NULL)
1743 {
1744 cost = dep_cost (dep);
1745
1746 if (targetm.sched.is_costly_dependence (dep, cost,
1747 flag_sched_stalled_insns_dep - n_cycles))
1748 return false;
1749 }
1750 }
1751
1752 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1753 break;
1754 }
1755
1756 if (!prev_insn)
1757 break;
1758 prev_insn = PREV_INSN (prev_insn);
1759 }
1760 }
1761
1762 return true;
1763 }
1764
1765
1766 /* Remove insns from the queue, before they become "ready" with respect
1767 to FU latency considerations. */
1768
1769 static int
1770 early_queue_to_ready (state_t state, struct ready_list *ready)
1771 {
1772 rtx insn;
1773 rtx link;
1774 rtx next_link;
1775 rtx prev_link;
1776 bool move_to_ready;
1777 int cost;
1778 state_t temp_state = alloca (dfa_state_size);
1779 int stalls;
1780 int insns_removed = 0;
1781
1782 /*
1783 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1784 function:
1785
1786 X == 0: There is no limit on how many queued insns can be removed
1787 prematurely. (flag_sched_stalled_insns = -1).
1788
1789 X >= 1: Only X queued insns can be removed prematurely in each
1790 invocation. (flag_sched_stalled_insns = X).
1791
1792 Otherwise: Early queue removal is disabled.
1793 (flag_sched_stalled_insns = 0)
1794 */
1795
1796 if (! flag_sched_stalled_insns)
1797 return 0;
1798
1799 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1800 {
1801 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1802 {
1803 if (sched_verbose > 6)
1804 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1805
1806 prev_link = 0;
1807 while (link)
1808 {
1809 next_link = XEXP (link, 1);
1810 insn = XEXP (link, 0);
1811 if (insn && sched_verbose > 6)
1812 print_rtl_single (sched_dump, insn);
1813
1814 memcpy (temp_state, state, dfa_state_size);
1815 if (recog_memoized (insn) < 0)
1816 /* non-negative to indicate that it's not ready
1817 to avoid infinite Q->R->Q->R... */
1818 cost = 0;
1819 else
1820 cost = state_transition (temp_state, insn);
1821
1822 if (sched_verbose >= 6)
1823 fprintf (sched_dump, "transition cost = %d\n", cost);
1824
1825 move_to_ready = false;
1826 if (cost < 0)
1827 {
1828 move_to_ready = ok_for_early_queue_removal (insn);
1829 if (move_to_ready == true)
1830 {
1831 /* move from Q to R */
1832 q_size -= 1;
1833 ready_add (ready, insn, false);
1834
1835 if (prev_link)
1836 XEXP (prev_link, 1) = next_link;
1837 else
1838 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1839
1840 free_INSN_LIST_node (link);
1841
1842 if (sched_verbose >= 2)
1843 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1844 (*current_sched_info->print_insn) (insn, 0));
1845
1846 insns_removed++;
1847 if (insns_removed == flag_sched_stalled_insns)
1848 /* Remove no more than flag_sched_stalled_insns insns
1849 from Q at a time. */
1850 return insns_removed;
1851 }
1852 }
1853
1854 if (move_to_ready == false)
1855 prev_link = link;
1856
1857 link = next_link;
1858 } /* while link */
1859 } /* if link */
1860
1861 } /* for stalls.. */
1862
1863 return insns_removed;
1864 }
1865
1866
1867 /* Print the ready list for debugging purposes. Callable from debugger. */
1868
1869 static void
1870 debug_ready_list (struct ready_list *ready)
1871 {
1872 rtx *p;
1873 int i;
1874
1875 if (ready->n_ready == 0)
1876 {
1877 fprintf (sched_dump, "\n");
1878 return;
1879 }
1880
1881 p = ready_lastpos (ready);
1882 for (i = 0; i < ready->n_ready; i++)
1883 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1884 fprintf (sched_dump, "\n");
1885 }
1886
1887 /* Search INSN for REG_SAVE_NOTE note pairs for
1888 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1889 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1890 saved value for NOTE_BLOCK_NUMBER which is useful for
1891 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1892 void
1893 reemit_notes (rtx insn)
1894 {
1895 rtx note, last = insn;
1896
1897 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1898 {
1899 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1900 {
1901 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
1902
1903 last = emit_note_before (note_type, last);
1904 remove_note (insn, note);
1905 }
1906 }
1907 }
1908
1909 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1910 static void
1911 move_insn (rtx insn, rtx last, rtx nt)
1912 {
1913 if (PREV_INSN (insn) != last)
1914 {
1915 basic_block bb;
1916 rtx note;
1917 int jump_p = 0;
1918
1919 bb = BLOCK_FOR_INSN (insn);
1920
1921 /* BB_HEAD is either LABEL or NOTE. */
1922 gcc_assert (BB_HEAD (bb) != insn);
1923
1924 if (BB_END (bb) == insn)
1925 /* If this is last instruction in BB, move end marker one
1926 instruction up. */
1927 {
1928 /* Jumps are always placed at the end of basic block. */
1929 jump_p = control_flow_insn_p (insn);
1930
1931 gcc_assert (!jump_p
1932 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
1933 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1934 || (common_sched_info->sched_pass_id
1935 == SCHED_EBB_PASS));
1936
1937 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1938
1939 BB_END (bb) = PREV_INSN (insn);
1940 }
1941
1942 gcc_assert (BB_END (bb) != last);
1943
1944 if (jump_p)
1945 /* We move the block note along with jump. */
1946 {
1947 gcc_assert (nt);
1948
1949 note = NEXT_INSN (insn);
1950 while (NOTE_NOT_BB_P (note) && note != nt)
1951 note = NEXT_INSN (note);
1952
1953 if (note != nt
1954 && (LABEL_P (note)
1955 || BARRIER_P (note)))
1956 note = NEXT_INSN (note);
1957
1958 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1959 }
1960 else
1961 note = insn;
1962
1963 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1964 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1965
1966 NEXT_INSN (note) = NEXT_INSN (last);
1967 PREV_INSN (NEXT_INSN (last)) = note;
1968
1969 NEXT_INSN (last) = insn;
1970 PREV_INSN (insn) = last;
1971
1972 bb = BLOCK_FOR_INSN (last);
1973
1974 if (jump_p)
1975 {
1976 fix_jump_move (insn);
1977
1978 if (BLOCK_FOR_INSN (insn) != bb)
1979 move_block_after_check (insn);
1980
1981 gcc_assert (BB_END (bb) == last);
1982 }
1983
1984 df_insn_change_bb (insn, bb);
1985
1986 /* Update BB_END, if needed. */
1987 if (BB_END (bb) == last)
1988 BB_END (bb) = insn;
1989 }
1990
1991 SCHED_GROUP_P (insn) = 0;
1992 }
1993
1994 /* Return true if scheduling INSN will finish current clock cycle. */
1995 static bool
1996 insn_finishes_cycle_p (rtx insn)
1997 {
1998 if (SCHED_GROUP_P (insn))
1999 /* After issuing INSN, rest of the sched_group will be forced to issue
2000 in order. Don't make any plans for the rest of cycle. */
2001 return true;
2002
2003 /* Finishing the block will, apparently, finish the cycle. */
2004 if (current_sched_info->insn_finishes_block_p
2005 && current_sched_info->insn_finishes_block_p (insn))
2006 return true;
2007
2008 return false;
2009 }
2010
2011 /* The following structure describe an entry of the stack of choices. */
2012 struct choice_entry
2013 {
2014 /* Ordinal number of the issued insn in the ready queue. */
2015 int index;
2016 /* The number of the rest insns whose issues we should try. */
2017 int rest;
2018 /* The number of issued essential insns. */
2019 int n;
2020 /* State after issuing the insn. */
2021 state_t state;
2022 };
2023
2024 /* The following array is used to implement a stack of choices used in
2025 function max_issue. */
2026 static struct choice_entry *choice_stack;
2027
2028 /* The following variable value is number of essential insns issued on
2029 the current cycle. An insn is essential one if it changes the
2030 processors state. */
2031 int cycle_issued_insns;
2032
2033 /* This holds the value of the target dfa_lookahead hook. */
2034 int dfa_lookahead;
2035
2036 /* The following variable value is maximal number of tries of issuing
2037 insns for the first cycle multipass insn scheduling. We define
2038 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2039 need this constraint if all real insns (with non-negative codes)
2040 had reservations because in this case the algorithm complexity is
2041 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2042 might be incomplete and such insn might occur. For such
2043 descriptions, the complexity of algorithm (without the constraint)
2044 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2045 static int max_lookahead_tries;
2046
2047 /* The following value is value of hook
2048 `first_cycle_multipass_dfa_lookahead' at the last call of
2049 `max_issue'. */
2050 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2051
2052 /* The following value is value of `issue_rate' at the last call of
2053 `sched_init'. */
2054 static int cached_issue_rate = 0;
2055
2056 /* The following function returns maximal (or close to maximal) number
2057 of insns which can be issued on the same cycle and one of which
2058 insns is insns with the best rank (the first insn in READY). To
2059 make this function tries different samples of ready insns. READY
2060 is current queue `ready'. Global array READY_TRY reflects what
2061 insns are already issued in this try. MAX_POINTS is the sum of points
2062 of all instructions in READY. The function stops immediately,
2063 if it reached the such a solution, that all instruction can be issued.
2064 INDEX will contain index of the best insn in READY. The following
2065 function is used only for first cycle multipass scheduling.
2066
2067 PRIVILEGED_N >= 0
2068
2069 This function expects recognized insns only. All USEs,
2070 CLOBBERs, etc must be filtered elsewhere. */
2071 int
2072 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2073 int *index)
2074 {
2075 int n, i, all, n_ready, best, delay, tries_num, points = -1, max_points;
2076 int more_issue;
2077 struct choice_entry *top;
2078 rtx insn;
2079
2080 n_ready = ready->n_ready;
2081 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2082 && privileged_n <= n_ready);
2083
2084 /* Init MAX_LOOKAHEAD_TRIES. */
2085 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2086 {
2087 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2088 max_lookahead_tries = 100;
2089 for (i = 0; i < issue_rate; i++)
2090 max_lookahead_tries *= dfa_lookahead;
2091 }
2092
2093 /* Init max_points. */
2094 max_points = 0;
2095 more_issue = issue_rate - cycle_issued_insns;
2096
2097 /* ??? We used to assert here that we never issue more insns than issue_rate.
2098 However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2099 achieved to get better performance. Until these targets are fixed to use
2100 scheduler hooks to manipulate insns priority instead, the assert should
2101 be disabled.
2102
2103 gcc_assert (more_issue >= 0); */
2104
2105 for (i = 0; i < n_ready; i++)
2106 if (!ready_try [i])
2107 {
2108 if (more_issue-- > 0)
2109 max_points += ISSUE_POINTS (ready_element (ready, i));
2110 else
2111 break;
2112 }
2113
2114 /* The number of the issued insns in the best solution. */
2115 best = 0;
2116
2117 top = choice_stack;
2118
2119 /* Set initial state of the search. */
2120 memcpy (top->state, state, dfa_state_size);
2121 top->rest = dfa_lookahead;
2122 top->n = 0;
2123
2124 /* Count the number of the insns to search among. */
2125 for (all = i = 0; i < n_ready; i++)
2126 if (!ready_try [i])
2127 all++;
2128
2129 /* I is the index of the insn to try next. */
2130 i = 0;
2131 tries_num = 0;
2132 for (;;)
2133 {
2134 if (/* If we've reached a dead end or searched enough of what we have
2135 been asked... */
2136 top->rest == 0
2137 /* Or have nothing else to try. */
2138 || i >= n_ready)
2139 {
2140 /* ??? (... || i == n_ready). */
2141 gcc_assert (i <= n_ready);
2142
2143 if (top == choice_stack)
2144 break;
2145
2146 if (best < top - choice_stack)
2147 {
2148 if (privileged_n)
2149 {
2150 n = privileged_n;
2151 /* Try to find issued privileged insn. */
2152 while (n && !ready_try[--n]);
2153 }
2154
2155 if (/* If all insns are equally good... */
2156 privileged_n == 0
2157 /* Or a privileged insn will be issued. */
2158 || ready_try[n])
2159 /* Then we have a solution. */
2160 {
2161 best = top - choice_stack;
2162 /* This is the index of the insn issued first in this
2163 solution. */
2164 *index = choice_stack [1].index;
2165 points = top->n;
2166 if (top->n == max_points || best == all)
2167 break;
2168 }
2169 }
2170
2171 /* Set ready-list index to point to the last insn
2172 ('i++' below will advance it to the next insn). */
2173 i = top->index;
2174
2175 /* Backtrack. */
2176 ready_try [i] = 0;
2177 top--;
2178 memcpy (state, top->state, dfa_state_size);
2179 }
2180 else if (!ready_try [i])
2181 {
2182 tries_num++;
2183 if (tries_num > max_lookahead_tries)
2184 break;
2185 insn = ready_element (ready, i);
2186 delay = state_transition (state, insn);
2187 if (delay < 0)
2188 {
2189 if (state_dead_lock_p (state)
2190 || insn_finishes_cycle_p (insn))
2191 /* We won't issue any more instructions in the next
2192 choice_state. */
2193 top->rest = 0;
2194 else
2195 top->rest--;
2196
2197 n = top->n;
2198 if (memcmp (top->state, state, dfa_state_size) != 0)
2199 n += ISSUE_POINTS (insn);
2200
2201 /* Advance to the next choice_entry. */
2202 top++;
2203 /* Initialize it. */
2204 top->rest = dfa_lookahead;
2205 top->index = i;
2206 top->n = n;
2207 memcpy (top->state, state, dfa_state_size);
2208
2209 ready_try [i] = 1;
2210 i = -1;
2211 }
2212 }
2213
2214 /* Increase ready-list index. */
2215 i++;
2216 }
2217
2218 /* Restore the original state of the DFA. */
2219 memcpy (state, choice_stack->state, dfa_state_size);
2220
2221 return best;
2222 }
2223
2224 /* The following function chooses insn from READY and modifies
2225 READY. The following function is used only for first
2226 cycle multipass scheduling.
2227 Return:
2228 -1 if cycle should be advanced,
2229 0 if INSN_PTR is set to point to the desirable insn,
2230 1 if choose_ready () should be restarted without advancing the cycle. */
2231 static int
2232 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2233 {
2234 int lookahead;
2235
2236 if (dbg_cnt (sched_insn) == false)
2237 {
2238 rtx insn;
2239
2240 insn = next_nonnote_insn (last_scheduled_insn);
2241
2242 if (QUEUE_INDEX (insn) == QUEUE_READY)
2243 /* INSN is in the ready_list. */
2244 {
2245 ready_remove_insn (insn);
2246 *insn_ptr = insn;
2247 return 0;
2248 }
2249
2250 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2251 return -1;
2252 }
2253
2254 lookahead = 0;
2255
2256 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2257 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2258 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2259 {
2260 *insn_ptr = ready_remove_first (ready);
2261 return 0;
2262 }
2263 else
2264 {
2265 /* Try to choose the better insn. */
2266 int index = 0, i, n;
2267 rtx insn;
2268 int try_data = 1, try_control = 1;
2269 ds_t ts;
2270
2271 insn = ready_element (ready, 0);
2272 if (INSN_CODE (insn) < 0)
2273 {
2274 *insn_ptr = ready_remove_first (ready);
2275 return 0;
2276 }
2277
2278 if (spec_info
2279 && spec_info->flags & (PREFER_NON_DATA_SPEC
2280 | PREFER_NON_CONTROL_SPEC))
2281 {
2282 for (i = 0, n = ready->n_ready; i < n; i++)
2283 {
2284 rtx x;
2285 ds_t s;
2286
2287 x = ready_element (ready, i);
2288 s = TODO_SPEC (x);
2289
2290 if (spec_info->flags & PREFER_NON_DATA_SPEC
2291 && !(s & DATA_SPEC))
2292 {
2293 try_data = 0;
2294 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2295 || !try_control)
2296 break;
2297 }
2298
2299 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2300 && !(s & CONTROL_SPEC))
2301 {
2302 try_control = 0;
2303 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2304 break;
2305 }
2306 }
2307 }
2308
2309 ts = TODO_SPEC (insn);
2310 if ((ts & SPECULATIVE)
2311 && (((!try_data && (ts & DATA_SPEC))
2312 || (!try_control && (ts & CONTROL_SPEC)))
2313 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2314 && !targetm.sched
2315 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2316 /* Discard speculative instruction that stands first in the ready
2317 list. */
2318 {
2319 change_queue_index (insn, 1);
2320 return 1;
2321 }
2322
2323 ready_try[0] = 0;
2324
2325 for (i = 1; i < ready->n_ready; i++)
2326 {
2327 insn = ready_element (ready, i);
2328
2329 ready_try [i]
2330 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2331 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2332 }
2333
2334 /* Let the target filter the search space. */
2335 for (i = 1; i < ready->n_ready; i++)
2336 if (!ready_try[i])
2337 {
2338 insn = ready_element (ready, i);
2339
2340 #ifdef ENABLE_CHECKING
2341 /* If this insn is recognizable we should have already
2342 recognized it earlier.
2343 ??? Not very clear where this is supposed to be done.
2344 See dep_cost_1. */
2345 gcc_assert (INSN_CODE (insn) >= 0
2346 || recog_memoized (insn) < 0);
2347 #endif
2348
2349 ready_try [i]
2350 = (/* INSN_CODE check can be omitted here as it is also done later
2351 in max_issue (). */
2352 INSN_CODE (insn) < 0
2353 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2354 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2355 (insn)));
2356 }
2357
2358 if (max_issue (ready, 1, curr_state, &index) == 0)
2359 {
2360 *insn_ptr = ready_remove_first (ready);
2361 if (sched_verbose >= 4)
2362 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2363 (*current_sched_info->print_insn) (*insn_ptr, 0));
2364 return 0;
2365 }
2366 else
2367 {
2368 if (sched_verbose >= 4)
2369 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2370 (*current_sched_info->print_insn)
2371 (ready_element (ready, index), 0));
2372
2373 *insn_ptr = ready_remove (ready, index);
2374 return 0;
2375 }
2376 }
2377 }
2378
2379 /* Use forward list scheduling to rearrange insns of block pointed to by
2380 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2381 region. */
2382
2383 void
2384 schedule_block (basic_block *target_bb)
2385 {
2386 int i, first_cycle_insn_p;
2387 int can_issue_more;
2388 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2389 int sort_p, advance, start_clock_var;
2390
2391 /* Head/tail info for this block. */
2392 rtx prev_head = current_sched_info->prev_head;
2393 rtx next_tail = current_sched_info->next_tail;
2394 rtx head = NEXT_INSN (prev_head);
2395 rtx tail = PREV_INSN (next_tail);
2396
2397 /* We used to have code to avoid getting parameters moved from hard
2398 argument registers into pseudos.
2399
2400 However, it was removed when it proved to be of marginal benefit
2401 and caused problems because schedule_block and compute_forward_dependences
2402 had different notions of what the "head" insn was. */
2403
2404 gcc_assert (head != tail || INSN_P (head));
2405
2406 haifa_recovery_bb_recently_added_p = false;
2407
2408 /* Debug info. */
2409 if (sched_verbose)
2410 dump_new_block_header (0, *target_bb, head, tail);
2411
2412 state_reset (curr_state);
2413
2414 /* Clear the ready list. */
2415 ready.first = ready.veclen - 1;
2416 ready.n_ready = 0;
2417
2418 /* It is used for first cycle multipass scheduling. */
2419 temp_state = alloca (dfa_state_size);
2420
2421 if (targetm.sched.md_init)
2422 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2423
2424 /* We start inserting insns after PREV_HEAD. */
2425 last_scheduled_insn = prev_head;
2426
2427 gcc_assert (NOTE_P (last_scheduled_insn)
2428 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2429
2430 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2431 queue. */
2432 q_ptr = 0;
2433 q_size = 0;
2434
2435 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2436 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2437
2438 /* Start just before the beginning of time. */
2439 clock_var = -1;
2440
2441 /* We need queue and ready lists and clock_var be initialized
2442 in try_ready () (which is called through init_ready_list ()). */
2443 (*current_sched_info->init_ready_list) ();
2444
2445 /* The algorithm is O(n^2) in the number of ready insns at any given
2446 time in the worst case. Before reload we are more likely to have
2447 big lists so truncate them to a reasonable size. */
2448 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2449 {
2450 ready_sort (&ready);
2451
2452 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2453 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2454 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2455 break;
2456
2457 if (sched_verbose >= 2)
2458 {
2459 fprintf (sched_dump,
2460 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2461 fprintf (sched_dump,
2462 ";;\t\t before reload => truncated to %d insns\n", i);
2463 }
2464
2465 /* Delay all insns past it for 1 cycle. If debug counter is
2466 activated make an exception for the insn right after
2467 last_scheduled_insn. */
2468 {
2469 rtx skip_insn;
2470
2471 if (dbg_cnt (sched_insn) == false)
2472 skip_insn = next_nonnote_insn (last_scheduled_insn);
2473 else
2474 skip_insn = NULL_RTX;
2475
2476 while (i < ready.n_ready)
2477 {
2478 rtx insn;
2479
2480 insn = ready_remove (&ready, i);
2481
2482 if (insn != skip_insn)
2483 queue_insn (insn, 1);
2484 }
2485 }
2486 }
2487
2488 /* Now we can restore basic block notes and maintain precise cfg. */
2489 restore_bb_notes (*target_bb);
2490
2491 last_clock_var = -1;
2492
2493 advance = 0;
2494
2495 sort_p = TRUE;
2496 /* Loop until all the insns in BB are scheduled. */
2497 while ((*current_sched_info->schedule_more_p) ())
2498 {
2499 do
2500 {
2501 start_clock_var = clock_var;
2502
2503 clock_var++;
2504
2505 advance_one_cycle ();
2506
2507 /* Add to the ready list all pending insns that can be issued now.
2508 If there are no ready insns, increment clock until one
2509 is ready and add all pending insns at that point to the ready
2510 list. */
2511 queue_to_ready (&ready);
2512
2513 gcc_assert (ready.n_ready);
2514
2515 if (sched_verbose >= 2)
2516 {
2517 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2518 debug_ready_list (&ready);
2519 }
2520 advance -= clock_var - start_clock_var;
2521 }
2522 while (advance > 0);
2523
2524 if (sort_p)
2525 {
2526 /* Sort the ready list based on priority. */
2527 ready_sort (&ready);
2528
2529 if (sched_verbose >= 2)
2530 {
2531 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2532 debug_ready_list (&ready);
2533 }
2534 }
2535
2536 /* Allow the target to reorder the list, typically for
2537 better instruction bundling. */
2538 if (sort_p && targetm.sched.reorder
2539 && (ready.n_ready == 0
2540 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2541 can_issue_more =
2542 targetm.sched.reorder (sched_dump, sched_verbose,
2543 ready_lastpos (&ready),
2544 &ready.n_ready, clock_var);
2545 else
2546 can_issue_more = issue_rate;
2547
2548 first_cycle_insn_p = 1;
2549 cycle_issued_insns = 0;
2550 for (;;)
2551 {
2552 rtx insn;
2553 int cost;
2554 bool asm_p = false;
2555
2556 if (sched_verbose >= 2)
2557 {
2558 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2559 clock_var);
2560 debug_ready_list (&ready);
2561 }
2562
2563 if (ready.n_ready == 0
2564 && can_issue_more
2565 && reload_completed)
2566 {
2567 /* Allow scheduling insns directly from the queue in case
2568 there's nothing better to do (ready list is empty) but
2569 there are still vacant dispatch slots in the current cycle. */
2570 if (sched_verbose >= 6)
2571 fprintf (sched_dump,";;\t\tSecond chance\n");
2572 memcpy (temp_state, curr_state, dfa_state_size);
2573 if (early_queue_to_ready (temp_state, &ready))
2574 ready_sort (&ready);
2575 }
2576
2577 if (ready.n_ready == 0 || !can_issue_more
2578 || state_dead_lock_p (curr_state)
2579 || !(*current_sched_info->schedule_more_p) ())
2580 break;
2581
2582 /* Select and remove the insn from the ready list. */
2583 if (sort_p)
2584 {
2585 int res;
2586
2587 insn = NULL_RTX;
2588 res = choose_ready (&ready, &insn);
2589
2590 if (res < 0)
2591 /* Finish cycle. */
2592 break;
2593 if (res > 0)
2594 /* Restart choose_ready (). */
2595 continue;
2596
2597 gcc_assert (insn != NULL_RTX);
2598 }
2599 else
2600 insn = ready_remove_first (&ready);
2601
2602 if (targetm.sched.dfa_new_cycle
2603 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2604 insn, last_clock_var,
2605 clock_var, &sort_p))
2606 /* SORT_P is used by the target to override sorting
2607 of the ready list. This is needed when the target
2608 has modified its internal structures expecting that
2609 the insn will be issued next. As we need the insn
2610 to have the highest priority (so it will be returned by
2611 the ready_remove_first call above), we invoke
2612 ready_add (&ready, insn, true).
2613 But, still, there is one issue: INSN can be later
2614 discarded by scheduler's front end through
2615 current_sched_info->can_schedule_ready_p, hence, won't
2616 be issued next. */
2617 {
2618 ready_add (&ready, insn, true);
2619 break;
2620 }
2621
2622 sort_p = TRUE;
2623 memcpy (temp_state, curr_state, dfa_state_size);
2624 if (recog_memoized (insn) < 0)
2625 {
2626 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2627 || asm_noperands (PATTERN (insn)) >= 0);
2628 if (!first_cycle_insn_p && asm_p)
2629 /* This is asm insn which is tried to be issued on the
2630 cycle not first. Issue it on the next cycle. */
2631 cost = 1;
2632 else
2633 /* A USE insn, or something else we don't need to
2634 understand. We can't pass these directly to
2635 state_transition because it will trigger a
2636 fatal error for unrecognizable insns. */
2637 cost = 0;
2638 }
2639 else
2640 {
2641 cost = state_transition (temp_state, insn);
2642 if (cost < 0)
2643 cost = 0;
2644 else if (cost == 0)
2645 cost = 1;
2646 }
2647
2648 if (cost >= 1)
2649 {
2650 queue_insn (insn, cost);
2651 if (SCHED_GROUP_P (insn))
2652 {
2653 advance = cost;
2654 break;
2655 }
2656
2657 continue;
2658 }
2659
2660 if (current_sched_info->can_schedule_ready_p
2661 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2662 /* We normally get here only if we don't want to move
2663 insn from the split block. */
2664 {
2665 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2666 continue;
2667 }
2668
2669 /* DECISION is made. */
2670
2671 if (TODO_SPEC (insn) & SPECULATIVE)
2672 generate_recovery_code (insn);
2673
2674 if (control_flow_insn_p (last_scheduled_insn)
2675 /* This is used to switch basic blocks by request
2676 from scheduler front-end (actually, sched-ebb.c only).
2677 This is used to process blocks with single fallthru
2678 edge. If succeeding block has jump, it [jump] will try
2679 move at the end of current bb, thus corrupting CFG. */
2680 || current_sched_info->advance_target_bb (*target_bb, insn))
2681 {
2682 *target_bb = current_sched_info->advance_target_bb
2683 (*target_bb, 0);
2684
2685 if (sched_verbose)
2686 {
2687 rtx x;
2688
2689 x = next_real_insn (last_scheduled_insn);
2690 gcc_assert (x);
2691 dump_new_block_header (1, *target_bb, x, tail);
2692 }
2693
2694 last_scheduled_insn = bb_note (*target_bb);
2695 }
2696
2697 /* Update counters, etc in the scheduler's front end. */
2698 (*current_sched_info->begin_schedule_ready) (insn,
2699 last_scheduled_insn);
2700
2701 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
2702 reemit_notes (insn);
2703 last_scheduled_insn = insn;
2704
2705 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2706 {
2707 cycle_issued_insns++;
2708 memcpy (curr_state, temp_state, dfa_state_size);
2709 }
2710
2711 if (targetm.sched.variable_issue)
2712 can_issue_more =
2713 targetm.sched.variable_issue (sched_dump, sched_verbose,
2714 insn, can_issue_more);
2715 /* A naked CLOBBER or USE generates no instruction, so do
2716 not count them against the issue rate. */
2717 else if (GET_CODE (PATTERN (insn)) != USE
2718 && GET_CODE (PATTERN (insn)) != CLOBBER)
2719 can_issue_more--;
2720
2721 advance = schedule_insn (insn);
2722
2723 /* After issuing an asm insn we should start a new cycle. */
2724 if (advance == 0 && asm_p)
2725 advance = 1;
2726 if (advance != 0)
2727 break;
2728
2729 first_cycle_insn_p = 0;
2730
2731 /* Sort the ready list based on priority. This must be
2732 redone here, as schedule_insn may have readied additional
2733 insns that will not be sorted correctly. */
2734 if (ready.n_ready > 0)
2735 ready_sort (&ready);
2736
2737 if (targetm.sched.reorder2
2738 && (ready.n_ready == 0
2739 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2740 {
2741 can_issue_more =
2742 targetm.sched.reorder2 (sched_dump, sched_verbose,
2743 ready.n_ready
2744 ? ready_lastpos (&ready) : NULL,
2745 &ready.n_ready, clock_var);
2746 }
2747 }
2748 }
2749
2750 /* Debug info. */
2751 if (sched_verbose)
2752 {
2753 fprintf (sched_dump, ";;\tReady list (final): ");
2754 debug_ready_list (&ready);
2755 }
2756
2757 if (current_sched_info->queue_must_finish_empty)
2758 /* Sanity check -- queue must be empty now. Meaningless if region has
2759 multiple bbs. */
2760 gcc_assert (!q_size && !ready.n_ready);
2761 else
2762 {
2763 /* We must maintain QUEUE_INDEX between blocks in region. */
2764 for (i = ready.n_ready - 1; i >= 0; i--)
2765 {
2766 rtx x;
2767
2768 x = ready_element (&ready, i);
2769 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2770 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2771 }
2772
2773 if (q_size)
2774 for (i = 0; i <= max_insn_queue_index; i++)
2775 {
2776 rtx link;
2777 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2778 {
2779 rtx x;
2780
2781 x = XEXP (link, 0);
2782 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2783 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2784 }
2785 free_INSN_LIST_list (&insn_queue[i]);
2786 }
2787 }
2788
2789 if (sched_verbose)
2790 fprintf (sched_dump, ";; total time = %d\n", clock_var);
2791
2792 if (!current_sched_info->queue_must_finish_empty
2793 || haifa_recovery_bb_recently_added_p)
2794 {
2795 /* INSN_TICK (minimum clock tick at which the insn becomes
2796 ready) may be not correct for the insn in the subsequent
2797 blocks of the region. We should use a correct value of
2798 `clock_var' or modify INSN_TICK. It is better to keep
2799 clock_var value equal to 0 at the start of a basic block.
2800 Therefore we modify INSN_TICK here. */
2801 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2802 }
2803
2804 if (targetm.sched.md_finish)
2805 {
2806 targetm.sched.md_finish (sched_dump, sched_verbose);
2807 /* Target might have added some instructions to the scheduled block
2808 in its md_finish () hook. These new insns don't have any data
2809 initialized and to identify them we extend h_i_d so that they'll
2810 get zero luids. */
2811 sched_init_luids (NULL, NULL, NULL, NULL);
2812 }
2813
2814 if (sched_verbose)
2815 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
2816 INSN_UID (head), INSN_UID (tail));
2817
2818 /* Update head/tail boundaries. */
2819 head = NEXT_INSN (prev_head);
2820 tail = last_scheduled_insn;
2821
2822 head = restore_other_notes (head, NULL);
2823
2824 current_sched_info->head = head;
2825 current_sched_info->tail = tail;
2826 }
2827 \f
2828 /* Set_priorities: compute priority of each insn in the block. */
2829
2830 int
2831 set_priorities (rtx head, rtx tail)
2832 {
2833 rtx insn;
2834 int n_insn;
2835 int sched_max_insns_priority =
2836 current_sched_info->sched_max_insns_priority;
2837 rtx prev_head;
2838
2839 if (head == tail && (! INSN_P (head)))
2840 return 0;
2841
2842 n_insn = 0;
2843
2844 prev_head = PREV_INSN (head);
2845 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2846 {
2847 if (!INSN_P (insn))
2848 continue;
2849
2850 n_insn++;
2851 (void) priority (insn);
2852
2853 gcc_assert (INSN_PRIORITY_KNOWN (insn));
2854
2855 sched_max_insns_priority = MAX (sched_max_insns_priority,
2856 INSN_PRIORITY (insn));
2857 }
2858
2859 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2860
2861 return n_insn;
2862 }
2863
2864 /* Set dump and sched_verbose for the desired debugging output. If no
2865 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2866 For -fsched-verbose=N, N>=10, print everything to stderr. */
2867 void
2868 setup_sched_dump (void)
2869 {
2870 sched_verbose = sched_verbose_param;
2871 if (sched_verbose_param == 0 && dump_file)
2872 sched_verbose = 1;
2873 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2874 ? stderr : dump_file);
2875 }
2876
2877 /* Initialize some global state for the scheduler. This function works
2878 with the common data shared between all the schedulers. It is called
2879 from the scheduler specific initialization routine. */
2880
2881 void
2882 sched_init (void)
2883 {
2884 /* Disable speculative loads in their presence if cc0 defined. */
2885 #ifdef HAVE_cc0
2886 flag_schedule_speculative_load = 0;
2887 #endif
2888
2889 /* Initialize SPEC_INFO. */
2890 if (targetm.sched.set_sched_flags)
2891 {
2892 spec_info = &spec_info_var;
2893 targetm.sched.set_sched_flags (spec_info);
2894
2895 if (spec_info->mask != 0)
2896 {
2897 spec_info->data_weakness_cutoff =
2898 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2899 spec_info->control_weakness_cutoff =
2900 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
2901 * REG_BR_PROB_BASE) / 100;
2902 }
2903 else
2904 /* So we won't read anything accidentally. */
2905 spec_info = NULL;
2906
2907 }
2908 else
2909 /* So we won't read anything accidentally. */
2910 spec_info = 0;
2911
2912 /* Initialize issue_rate. */
2913 if (targetm.sched.issue_rate)
2914 issue_rate = targetm.sched.issue_rate ();
2915 else
2916 issue_rate = 1;
2917
2918 if (cached_issue_rate != issue_rate)
2919 {
2920 cached_issue_rate = issue_rate;
2921 /* To invalidate max_lookahead_tries: */
2922 cached_first_cycle_multipass_dfa_lookahead = 0;
2923 }
2924
2925 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2926 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2927 else
2928 dfa_lookahead = 0;
2929
2930 if (targetm.sched.init_dfa_pre_cycle_insn)
2931 targetm.sched.init_dfa_pre_cycle_insn ();
2932
2933 if (targetm.sched.init_dfa_post_cycle_insn)
2934 targetm.sched.init_dfa_post_cycle_insn ();
2935
2936 dfa_start ();
2937 dfa_state_size = state_size ();
2938
2939 init_alias_analysis ();
2940
2941 df_set_flags (DF_LR_RUN_DCE);
2942 df_note_add_problem ();
2943
2944 /* More problems needed for interloop dep calculation in SMS. */
2945 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
2946 {
2947 df_rd_add_problem ();
2948 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
2949 }
2950
2951 df_analyze ();
2952
2953 /* Do not run DCE after reload, as this can kill nops inserted
2954 by bundling. */
2955 if (reload_completed)
2956 df_clear_flags (DF_LR_RUN_DCE);
2957
2958 regstat_compute_calls_crossed ();
2959
2960 if (targetm.sched.md_init_global)
2961 targetm.sched.md_init_global (sched_dump, sched_verbose,
2962 get_max_uid () + 1);
2963
2964 curr_state = xmalloc (dfa_state_size);
2965 }
2966
2967 static void haifa_init_only_bb (basic_block, basic_block);
2968
2969 /* Initialize data structures specific to the Haifa scheduler. */
2970 void
2971 haifa_sched_init (void)
2972 {
2973 setup_sched_dump ();
2974 sched_init ();
2975
2976 if (spec_info != NULL)
2977 {
2978 sched_deps_info->use_deps_list = 1;
2979 sched_deps_info->generate_spec_deps = 1;
2980 }
2981
2982 /* Initialize luids, dependency caches, target and h_i_d for the
2983 whole function. */
2984 {
2985 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
2986 basic_block bb;
2987
2988 sched_init_bbs ();
2989
2990 FOR_EACH_BB (bb)
2991 VEC_quick_push (basic_block, bbs, bb);
2992 sched_init_luids (bbs, NULL, NULL, NULL);
2993 sched_deps_init (true);
2994 sched_extend_target ();
2995 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
2996
2997 VEC_free (basic_block, heap, bbs);
2998 }
2999
3000 sched_init_only_bb = haifa_init_only_bb;
3001 sched_split_block = sched_split_block_1;
3002 sched_create_empty_bb = sched_create_empty_bb_1;
3003 haifa_recovery_bb_ever_added_p = false;
3004
3005 #ifdef ENABLE_CHECKING
3006 /* This is used preferably for finding bugs in check_cfg () itself.
3007 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3008 assumes that the last insn in the last bb has a non-null successor. */
3009 check_cfg (0, 0);
3010 #endif
3011
3012 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3013 before_recovery = 0;
3014 after_recovery = 0;
3015 }
3016
3017 /* Finish work with the data specific to the Haifa scheduler. */
3018 void
3019 haifa_sched_finish (void)
3020 {
3021 sched_create_empty_bb = NULL;
3022 sched_split_block = NULL;
3023 sched_init_only_bb = NULL;
3024
3025 if (spec_info && spec_info->dump)
3026 {
3027 char c = reload_completed ? 'a' : 'b';
3028
3029 fprintf (spec_info->dump,
3030 ";; %s:\n", current_function_name ());
3031
3032 fprintf (spec_info->dump,
3033 ";; Procedure %cr-begin-data-spec motions == %d\n",
3034 c, nr_begin_data);
3035 fprintf (spec_info->dump,
3036 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3037 c, nr_be_in_data);
3038 fprintf (spec_info->dump,
3039 ";; Procedure %cr-begin-control-spec motions == %d\n",
3040 c, nr_begin_control);
3041 fprintf (spec_info->dump,
3042 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3043 c, nr_be_in_control);
3044 }
3045
3046 /* Finalize h_i_d, dependency caches, and luids for the whole
3047 function. Target will be finalized in md_global_finish (). */
3048 sched_deps_finish ();
3049 sched_finish_luids ();
3050 current_sched_info = NULL;
3051 sched_finish ();
3052 }
3053
3054 /* Free global data used during insn scheduling. This function works with
3055 the common data shared between the schedulers. */
3056
3057 void
3058 sched_finish (void)
3059 {
3060 haifa_finish_h_i_d ();
3061 free (curr_state);
3062
3063 if (targetm.sched.md_finish_global)
3064 targetm.sched.md_finish_global (sched_dump, sched_verbose);
3065
3066 end_alias_analysis ();
3067
3068 regstat_free_calls_crossed ();
3069
3070 dfa_finish ();
3071
3072 #ifdef ENABLE_CHECKING
3073 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3074 if (!reload_completed)
3075 check_cfg (0, 0);
3076 #endif
3077 }
3078
3079 /* Fix INSN_TICKs of the instructions in the current block as well as
3080 INSN_TICKs of their dependents.
3081 HEAD and TAIL are the begin and the end of the current scheduled block. */
3082 static void
3083 fix_inter_tick (rtx head, rtx tail)
3084 {
3085 /* Set of instructions with corrected INSN_TICK. */
3086 bitmap_head processed;
3087 /* ??? It is doubtful if we should assume that cycle advance happens on
3088 basic block boundaries. Basically insns that are unconditionally ready
3089 on the start of the block are more preferable then those which have
3090 a one cycle dependency over insn from the previous block. */
3091 int next_clock = clock_var + 1;
3092
3093 bitmap_initialize (&processed, 0);
3094
3095 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3096 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3097 across different blocks. */
3098 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3099 {
3100 if (INSN_P (head))
3101 {
3102 int tick;
3103 sd_iterator_def sd_it;
3104 dep_t dep;
3105
3106 tick = INSN_TICK (head);
3107 gcc_assert (tick >= MIN_TICK);
3108
3109 /* Fix INSN_TICK of instruction from just scheduled block. */
3110 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3111 {
3112 bitmap_set_bit (&processed, INSN_LUID (head));
3113 tick -= next_clock;
3114
3115 if (tick < MIN_TICK)
3116 tick = MIN_TICK;
3117
3118 INSN_TICK (head) = tick;
3119 }
3120
3121 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3122 {
3123 rtx next;
3124
3125 next = DEP_CON (dep);
3126 tick = INSN_TICK (next);
3127
3128 if (tick != INVALID_TICK
3129 /* If NEXT has its INSN_TICK calculated, fix it.
3130 If not - it will be properly calculated from
3131 scratch later in fix_tick_ready. */
3132 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3133 {
3134 bitmap_set_bit (&processed, INSN_LUID (next));
3135 tick -= next_clock;
3136
3137 if (tick < MIN_TICK)
3138 tick = MIN_TICK;
3139
3140 if (tick > INTER_TICK (next))
3141 INTER_TICK (next) = tick;
3142 else
3143 tick = INTER_TICK (next);
3144
3145 INSN_TICK (next) = tick;
3146 }
3147 }
3148 }
3149 }
3150 bitmap_clear (&processed);
3151 }
3152
3153 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3154
3155 /* Check if NEXT is ready to be added to the ready or queue list.
3156 If "yes", add it to the proper list.
3157 Returns:
3158 -1 - is not ready yet,
3159 0 - added to the ready list,
3160 0 < N - queued for N cycles. */
3161 int
3162 try_ready (rtx next)
3163 {
3164 ds_t old_ts, *ts;
3165
3166 ts = &TODO_SPEC (next);
3167 old_ts = *ts;
3168
3169 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3170 && ((old_ts & HARD_DEP)
3171 || (old_ts & SPECULATIVE)));
3172
3173 if (sd_lists_empty_p (next, SD_LIST_BACK))
3174 /* NEXT has all its dependencies resolved. */
3175 {
3176 /* Remove HARD_DEP bit from NEXT's status. */
3177 *ts &= ~HARD_DEP;
3178
3179 if (current_sched_info->flags & DO_SPECULATION)
3180 /* Remove all speculative bits from NEXT's status. */
3181 *ts &= ~SPECULATIVE;
3182 }
3183 else
3184 {
3185 /* One of the NEXT's dependencies has been resolved.
3186 Recalculate NEXT's status. */
3187
3188 *ts &= ~SPECULATIVE & ~HARD_DEP;
3189
3190 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3191 /* Now we've got NEXT with speculative deps only.
3192 1. Look at the deps to see what we have to do.
3193 2. Check if we can do 'todo'. */
3194 {
3195 sd_iterator_def sd_it;
3196 dep_t dep;
3197 bool first_p = true;
3198
3199 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3200 {
3201 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3202
3203 if (first_p)
3204 {
3205 first_p = false;
3206
3207 *ts = ds;
3208 }
3209 else
3210 *ts = ds_merge (*ts, ds);
3211 }
3212
3213 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3214 /* Too few points. */
3215 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3216 }
3217 else
3218 *ts |= HARD_DEP;
3219 }
3220
3221 if (*ts & HARD_DEP)
3222 gcc_assert (*ts == old_ts
3223 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3224 else if (current_sched_info->new_ready)
3225 *ts = current_sched_info->new_ready (next, *ts);
3226
3227 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3228 have its original pattern or changed (speculative) one. This is due
3229 to changing ebb in region scheduling.
3230 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3231 has speculative pattern.
3232
3233 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3234 control-speculative NEXT could have been discarded by sched-rgn.c
3235 (the same case as when discarded by can_schedule_ready_p ()). */
3236
3237 if ((*ts & SPECULATIVE)
3238 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3239 need to change anything. */
3240 && *ts != old_ts)
3241 {
3242 int res;
3243 rtx new_pat;
3244
3245 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3246
3247 res = haifa_speculate_insn (next, *ts, &new_pat);
3248
3249 switch (res)
3250 {
3251 case -1:
3252 /* It would be nice to change DEP_STATUS of all dependences,
3253 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3254 so we won't reanalyze anything. */
3255 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3256 break;
3257
3258 case 0:
3259 /* We follow the rule, that every speculative insn
3260 has non-null ORIG_PAT. */
3261 if (!ORIG_PAT (next))
3262 ORIG_PAT (next) = PATTERN (next);
3263 break;
3264
3265 case 1:
3266 if (!ORIG_PAT (next))
3267 /* If we gonna to overwrite the original pattern of insn,
3268 save it. */
3269 ORIG_PAT (next) = PATTERN (next);
3270
3271 haifa_change_pattern (next, new_pat);
3272 break;
3273
3274 default:
3275 gcc_unreachable ();
3276 }
3277 }
3278
3279 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3280 either correct (*ts & SPECULATIVE),
3281 or we simply don't care (*ts & HARD_DEP). */
3282
3283 gcc_assert (!ORIG_PAT (next)
3284 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3285
3286 if (*ts & HARD_DEP)
3287 {
3288 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3289 control-speculative NEXT could have been discarded by sched-rgn.c
3290 (the same case as when discarded by can_schedule_ready_p ()). */
3291 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3292
3293 change_queue_index (next, QUEUE_NOWHERE);
3294 return -1;
3295 }
3296 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3297 /* We should change pattern of every previously speculative
3298 instruction - and we determine if NEXT was speculative by using
3299 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3300 pat too, so skip them. */
3301 {
3302 haifa_change_pattern (next, ORIG_PAT (next));
3303 ORIG_PAT (next) = 0;
3304 }
3305
3306 if (sched_verbose >= 2)
3307 {
3308 int s = TODO_SPEC (next);
3309
3310 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3311 (*current_sched_info->print_insn) (next, 0));
3312
3313 if (spec_info && spec_info->dump)
3314 {
3315 if (s & BEGIN_DATA)
3316 fprintf (spec_info->dump, "; data-spec;");
3317 if (s & BEGIN_CONTROL)
3318 fprintf (spec_info->dump, "; control-spec;");
3319 if (s & BE_IN_CONTROL)
3320 fprintf (spec_info->dump, "; in-control-spec;");
3321 }
3322
3323 fprintf (sched_dump, "\n");
3324 }
3325
3326 adjust_priority (next);
3327
3328 return fix_tick_ready (next);
3329 }
3330
3331 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3332 static int
3333 fix_tick_ready (rtx next)
3334 {
3335 int tick, delay;
3336
3337 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3338 {
3339 int full_p;
3340 sd_iterator_def sd_it;
3341 dep_t dep;
3342
3343 tick = INSN_TICK (next);
3344 /* if tick is not equal to INVALID_TICK, then update
3345 INSN_TICK of NEXT with the most recent resolved dependence
3346 cost. Otherwise, recalculate from scratch. */
3347 full_p = (tick == INVALID_TICK);
3348
3349 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3350 {
3351 rtx pro = DEP_PRO (dep);
3352 int tick1;
3353
3354 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3355
3356 tick1 = INSN_TICK (pro) + dep_cost (dep);
3357 if (tick1 > tick)
3358 tick = tick1;
3359
3360 if (!full_p)
3361 break;
3362 }
3363 }
3364 else
3365 tick = -1;
3366
3367 INSN_TICK (next) = tick;
3368
3369 delay = tick - clock_var;
3370 if (delay <= 0)
3371 delay = QUEUE_READY;
3372
3373 change_queue_index (next, delay);
3374
3375 return delay;
3376 }
3377
3378 /* Move NEXT to the proper queue list with (DELAY >= 1),
3379 or add it to the ready list (DELAY == QUEUE_READY),
3380 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3381 static void
3382 change_queue_index (rtx next, int delay)
3383 {
3384 int i = QUEUE_INDEX (next);
3385
3386 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3387 && delay != 0);
3388 gcc_assert (i != QUEUE_SCHEDULED);
3389
3390 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3391 || (delay < 0 && delay == i))
3392 /* We have nothing to do. */
3393 return;
3394
3395 /* Remove NEXT from wherever it is now. */
3396 if (i == QUEUE_READY)
3397 ready_remove_insn (next);
3398 else if (i >= 0)
3399 queue_remove (next);
3400
3401 /* Add it to the proper place. */
3402 if (delay == QUEUE_READY)
3403 ready_add (readyp, next, false);
3404 else if (delay >= 1)
3405 queue_insn (next, delay);
3406
3407 if (sched_verbose >= 2)
3408 {
3409 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3410 (*current_sched_info->print_insn) (next, 0));
3411
3412 if (delay == QUEUE_READY)
3413 fprintf (sched_dump, " into ready\n");
3414 else if (delay >= 1)
3415 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3416 else
3417 fprintf (sched_dump, " removed from ready or queue lists\n");
3418 }
3419 }
3420
3421 static int sched_ready_n_insns = -1;
3422
3423 /* Initialize per region data structures. */
3424 void
3425 sched_extend_ready_list (int new_sched_ready_n_insns)
3426 {
3427 int i;
3428
3429 if (sched_ready_n_insns == -1)
3430 /* At the first call we need to initialize one more choice_stack
3431 entry. */
3432 {
3433 i = 0;
3434 sched_ready_n_insns = 0;
3435 }
3436 else
3437 i = sched_ready_n_insns + 1;
3438
3439 ready.veclen = new_sched_ready_n_insns + issue_rate;
3440 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3441
3442 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3443
3444 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3445 sched_ready_n_insns, sizeof (*ready_try));
3446
3447 /* We allocate +1 element to save initial state in the choice_stack[0]
3448 entry. */
3449 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3450 new_sched_ready_n_insns + 1);
3451
3452 for (; i <= new_sched_ready_n_insns; i++)
3453 choice_stack[i].state = xmalloc (dfa_state_size);
3454
3455 sched_ready_n_insns = new_sched_ready_n_insns;
3456 }
3457
3458 /* Free per region data structures. */
3459 void
3460 sched_finish_ready_list (void)
3461 {
3462 int i;
3463
3464 free (ready.vec);
3465 ready.vec = NULL;
3466 ready.veclen = 0;
3467
3468 free (ready_try);
3469 ready_try = NULL;
3470
3471 for (i = 0; i <= sched_ready_n_insns; i++)
3472 free (choice_stack [i].state);
3473 free (choice_stack);
3474 choice_stack = NULL;
3475
3476 sched_ready_n_insns = -1;
3477 }
3478
3479 static int
3480 haifa_luid_for_non_insn (rtx x)
3481 {
3482 gcc_assert (NOTE_P (x) || LABEL_P (x));
3483
3484 return 0;
3485 }
3486
3487 /* Generates recovery code for INSN. */
3488 static void
3489 generate_recovery_code (rtx insn)
3490 {
3491 if (TODO_SPEC (insn) & BEGIN_SPEC)
3492 begin_speculative_block (insn);
3493
3494 /* Here we have insn with no dependencies to
3495 instructions other then CHECK_SPEC ones. */
3496
3497 if (TODO_SPEC (insn) & BE_IN_SPEC)
3498 add_to_speculative_block (insn);
3499 }
3500
3501 /* Helper function.
3502 Tries to add speculative dependencies of type FS between instructions
3503 in deps_list L and TWIN. */
3504 static void
3505 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
3506 {
3507 sd_iterator_def sd_it;
3508 dep_t dep;
3509
3510 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
3511 {
3512 ds_t ds;
3513 rtx consumer;
3514
3515 consumer = DEP_CON (dep);
3516
3517 ds = DEP_STATUS (dep);
3518
3519 if (/* If we want to create speculative dep. */
3520 fs
3521 /* And we can do that because this is a true dep. */
3522 && (ds & DEP_TYPES) == DEP_TRUE)
3523 {
3524 gcc_assert (!(ds & BE_IN_SPEC));
3525
3526 if (/* If this dep can be overcome with 'begin speculation'. */
3527 ds & BEGIN_SPEC)
3528 /* Then we have a choice: keep the dep 'begin speculative'
3529 or transform it into 'be in speculative'. */
3530 {
3531 if (/* In try_ready we assert that if insn once became ready
3532 it can be removed from the ready (or queue) list only
3533 due to backend decision. Hence we can't let the
3534 probability of the speculative dep to decrease. */
3535 ds_weak (ds) <= ds_weak (fs))
3536 {
3537 ds_t new_ds;
3538
3539 new_ds = (ds & ~BEGIN_SPEC) | fs;
3540
3541 if (/* consumer can 'be in speculative'. */
3542 sched_insn_is_legitimate_for_speculation_p (consumer,
3543 new_ds))
3544 /* Transform it to be in speculative. */
3545 ds = new_ds;
3546 }
3547 }
3548 else
3549 /* Mark the dep as 'be in speculative'. */
3550 ds |= fs;
3551 }
3552
3553 {
3554 dep_def _new_dep, *new_dep = &_new_dep;
3555
3556 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
3557 sd_add_dep (new_dep, false);
3558 }
3559 }
3560 }
3561
3562 /* Generates recovery code for BEGIN speculative INSN. */
3563 static void
3564 begin_speculative_block (rtx insn)
3565 {
3566 if (TODO_SPEC (insn) & BEGIN_DATA)
3567 nr_begin_data++;
3568 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3569 nr_begin_control++;
3570
3571 create_check_block_twin (insn, false);
3572
3573 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3574 }
3575
3576 static void haifa_init_insn (rtx);
3577
3578 /* Generates recovery code for BE_IN speculative INSN. */
3579 static void
3580 add_to_speculative_block (rtx insn)
3581 {
3582 ds_t ts;
3583 sd_iterator_def sd_it;
3584 dep_t dep;
3585 rtx twins = NULL;
3586 rtx_vec_t priorities_roots;
3587
3588 ts = TODO_SPEC (insn);
3589 gcc_assert (!(ts & ~BE_IN_SPEC));
3590
3591 if (ts & BE_IN_DATA)
3592 nr_be_in_data++;
3593 if (ts & BE_IN_CONTROL)
3594 nr_be_in_control++;
3595
3596 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3597 gcc_assert (!TODO_SPEC (insn));
3598
3599 DONE_SPEC (insn) |= ts;
3600
3601 /* First we convert all simple checks to branchy. */
3602 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3603 sd_iterator_cond (&sd_it, &dep);)
3604 {
3605 rtx check = DEP_PRO (dep);
3606
3607 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3608 {
3609 create_check_block_twin (check, true);
3610
3611 /* Restart search. */
3612 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3613 }
3614 else
3615 /* Continue search. */
3616 sd_iterator_next (&sd_it);
3617 }
3618
3619 priorities_roots = NULL;
3620 clear_priorities (insn, &priorities_roots);
3621
3622 while (1)
3623 {
3624 rtx check, twin;
3625 basic_block rec;
3626
3627 /* Get the first backward dependency of INSN. */
3628 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3629 if (!sd_iterator_cond (&sd_it, &dep))
3630 /* INSN has no backward dependencies left. */
3631 break;
3632
3633 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
3634 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
3635 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
3636
3637 check = DEP_PRO (dep);
3638
3639 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3640 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3641
3642 rec = BLOCK_FOR_INSN (check);
3643
3644 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
3645 haifa_init_insn (twin);
3646
3647 sd_copy_back_deps (twin, insn, true);
3648
3649 if (sched_verbose && spec_info->dump)
3650 /* INSN_BB (insn) isn't determined for twin insns yet.
3651 So we can't use current_sched_info->print_insn. */
3652 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3653 INSN_UID (twin), rec->index);
3654
3655 twins = alloc_INSN_LIST (twin, twins);
3656
3657 /* Add dependences between TWIN and all appropriate
3658 instructions from REC. */
3659 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
3660 {
3661 rtx pro = DEP_PRO (dep);
3662
3663 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
3664
3665 /* INSN might have dependencies from the instructions from
3666 several recovery blocks. At this iteration we process those
3667 producers that reside in REC. */
3668 if (BLOCK_FOR_INSN (pro) == rec)
3669 {
3670 dep_def _new_dep, *new_dep = &_new_dep;
3671
3672 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
3673 sd_add_dep (new_dep, false);
3674 }
3675 }
3676
3677 process_insn_forw_deps_be_in_spec (insn, twin, ts);
3678
3679 /* Remove all dependencies between INSN and insns in REC. */
3680 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3681 sd_iterator_cond (&sd_it, &dep);)
3682 {
3683 rtx pro = DEP_PRO (dep);
3684
3685 if (BLOCK_FOR_INSN (pro) == rec)
3686 sd_delete_dep (sd_it);
3687 else
3688 sd_iterator_next (&sd_it);
3689 }
3690 }
3691
3692 /* We couldn't have added the dependencies between INSN and TWINS earlier
3693 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3694 while (twins)
3695 {
3696 rtx twin;
3697
3698 twin = XEXP (twins, 0);
3699
3700 {
3701 dep_def _new_dep, *new_dep = &_new_dep;
3702
3703 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
3704 sd_add_dep (new_dep, false);
3705 }
3706
3707 twin = XEXP (twins, 1);
3708 free_INSN_LIST_node (twins);
3709 twins = twin;
3710 }
3711
3712 calc_priorities (priorities_roots);
3713 VEC_free (rtx, heap, priorities_roots);
3714 }
3715
3716 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3717 void *
3718 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3719 {
3720 gcc_assert (new_nmemb >= old_nmemb);
3721 p = XRESIZEVAR (void, p, new_nmemb * size);
3722 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3723 return p;
3724 }
3725
3726 /* Helper function.
3727 Find fallthru edge from PRED. */
3728 edge
3729 find_fallthru_edge (basic_block pred)
3730 {
3731 edge e;
3732 edge_iterator ei;
3733 basic_block succ;
3734
3735 succ = pred->next_bb;
3736 gcc_assert (succ->prev_bb == pred);
3737
3738 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3739 {
3740 FOR_EACH_EDGE (e, ei, pred->succs)
3741 if (e->flags & EDGE_FALLTHRU)
3742 {
3743 gcc_assert (e->dest == succ);
3744 return e;
3745 }
3746 }
3747 else
3748 {
3749 FOR_EACH_EDGE (e, ei, succ->preds)
3750 if (e->flags & EDGE_FALLTHRU)
3751 {
3752 gcc_assert (e->src == pred);
3753 return e;
3754 }
3755 }
3756
3757 return NULL;
3758 }
3759
3760 /* Extend per basic block data structures. */
3761 static void
3762 sched_extend_bb (void)
3763 {
3764 rtx insn;
3765
3766 /* The following is done to keep current_sched_info->next_tail non null. */
3767 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
3768 if (NEXT_INSN (insn) == 0
3769 || (!NOTE_P (insn)
3770 && !LABEL_P (insn)
3771 /* Don't emit a NOTE if it would end up before a BARRIER. */
3772 && !BARRIER_P (NEXT_INSN (insn))))
3773 {
3774 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
3775 /* Make insn appear outside BB. */
3776 set_block_for_insn (note, NULL);
3777 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
3778 }
3779 }
3780
3781 /* Init per basic block data structures. */
3782 void
3783 sched_init_bbs (void)
3784 {
3785 sched_extend_bb ();
3786 }
3787
3788 /* Initialize BEFORE_RECOVERY variable. */
3789 static void
3790 init_before_recovery (basic_block *before_recovery_ptr)
3791 {
3792 basic_block last;
3793 edge e;
3794
3795 last = EXIT_BLOCK_PTR->prev_bb;
3796 e = find_fallthru_edge (last);
3797
3798 if (e)
3799 {
3800 /* We create two basic blocks:
3801 1. Single instruction block is inserted right after E->SRC
3802 and has jump to
3803 2. Empty block right before EXIT_BLOCK.
3804 Between these two blocks recovery blocks will be emitted. */
3805
3806 basic_block single, empty;
3807 rtx x, label;
3808
3809 /* If the fallthrough edge to exit we've found is from the block we've
3810 created before, don't do anything more. */
3811 if (last == after_recovery)
3812 return;
3813
3814 adding_bb_to_current_region_p = false;
3815
3816 single = sched_create_empty_bb (last);
3817 empty = sched_create_empty_bb (single);
3818
3819 /* Add new blocks to the root loop. */
3820 if (current_loops != NULL)
3821 {
3822 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
3823 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
3824 }
3825
3826 single->count = last->count;
3827 empty->count = last->count;
3828 single->frequency = last->frequency;
3829 empty->frequency = last->frequency;
3830 BB_COPY_PARTITION (single, last);
3831 BB_COPY_PARTITION (empty, last);
3832
3833 redirect_edge_succ (e, single);
3834 make_single_succ_edge (single, empty, 0);
3835 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3836 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3837
3838 label = block_label (empty);
3839 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3840 JUMP_LABEL (x) = label;
3841 LABEL_NUSES (label)++;
3842 haifa_init_insn (x);
3843
3844 emit_barrier_after (x);
3845
3846 sched_init_only_bb (empty, NULL);
3847 sched_init_only_bb (single, NULL);
3848 sched_extend_bb ();
3849
3850 adding_bb_to_current_region_p = true;
3851 before_recovery = single;
3852 after_recovery = empty;
3853
3854 if (before_recovery_ptr)
3855 *before_recovery_ptr = before_recovery;
3856
3857 if (sched_verbose >= 2 && spec_info->dump)
3858 fprintf (spec_info->dump,
3859 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3860 last->index, single->index, empty->index);
3861 }
3862 else
3863 before_recovery = last;
3864 }
3865
3866 /* Returns new recovery block. */
3867 basic_block
3868 sched_create_recovery_block (basic_block *before_recovery_ptr)
3869 {
3870 rtx label;
3871 rtx barrier;
3872 basic_block rec;
3873
3874 haifa_recovery_bb_recently_added_p = true;
3875 haifa_recovery_bb_ever_added_p = true;
3876
3877 init_before_recovery (before_recovery_ptr);
3878
3879 barrier = get_last_bb_insn (before_recovery);
3880 gcc_assert (BARRIER_P (barrier));
3881
3882 label = emit_label_after (gen_label_rtx (), barrier);
3883
3884 rec = create_basic_block (label, label, before_recovery);
3885
3886 /* A recovery block always ends with an unconditional jump. */
3887 emit_barrier_after (BB_END (rec));
3888
3889 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3890 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3891
3892 if (sched_verbose && spec_info->dump)
3893 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3894 rec->index);
3895
3896 return rec;
3897 }
3898
3899 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
3900 and emit necessary jumps. */
3901 void
3902 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
3903 basic_block second_bb)
3904 {
3905 rtx label;
3906 rtx jump;
3907 edge e;
3908 int edge_flags;
3909
3910 /* This is fixing of incoming edge. */
3911 /* ??? Which other flags should be specified? */
3912 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3913 /* Partition type is the same, if it is "unpartitioned". */
3914 edge_flags = EDGE_CROSSING;
3915 else
3916 edge_flags = 0;
3917
3918 e = make_edge (first_bb, rec, edge_flags);
3919 label = block_label (second_bb);
3920 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3921 JUMP_LABEL (jump) = label;
3922 LABEL_NUSES (label)++;
3923
3924 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3925 /* Partition type is the same, if it is "unpartitioned". */
3926 {
3927 /* Rewritten from cfgrtl.c. */
3928 if (flag_reorder_blocks_and_partition
3929 && targetm.have_named_sections)
3930 {
3931 /* We don't need the same note for the check because
3932 any_condjump_p (check) == true. */
3933 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
3934 }
3935 edge_flags = EDGE_CROSSING;
3936 }
3937 else
3938 edge_flags = 0;
3939
3940 make_single_succ_edge (rec, second_bb, edge_flags);
3941 }
3942
3943 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3944 INSN is a simple check, that should be converted to branchy one. */
3945 static void
3946 create_check_block_twin (rtx insn, bool mutate_p)
3947 {
3948 basic_block rec;
3949 rtx label, check, twin;
3950 ds_t fs;
3951 sd_iterator_def sd_it;
3952 dep_t dep;
3953 dep_def _new_dep, *new_dep = &_new_dep;
3954 ds_t todo_spec;
3955
3956 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
3957
3958 if (!mutate_p)
3959 todo_spec = TODO_SPEC (insn);
3960 else
3961 {
3962 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3963 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
3964
3965 todo_spec = CHECK_SPEC (insn);
3966 }
3967
3968 todo_spec &= SPECULATIVE;
3969
3970 /* Create recovery block. */
3971 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
3972 {
3973 rec = sched_create_recovery_block (NULL);
3974 label = BB_HEAD (rec);
3975 }
3976 else
3977 {
3978 rec = EXIT_BLOCK_PTR;
3979 label = NULL_RTX;
3980 }
3981
3982 /* Emit CHECK. */
3983 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
3984
3985 if (rec != EXIT_BLOCK_PTR)
3986 {
3987 /* To have mem_reg alive at the beginning of second_bb,
3988 we emit check BEFORE insn, so insn after splitting
3989 insn will be at the beginning of second_bb, which will
3990 provide us with the correct life information. */
3991 check = emit_jump_insn_before (check, insn);
3992 JUMP_LABEL (check) = label;
3993 LABEL_NUSES (label)++;
3994 }
3995 else
3996 check = emit_insn_before (check, insn);
3997
3998 /* Extend data structures. */
3999 haifa_init_insn (check);
4000
4001 /* CHECK is being added to current region. Extend ready list. */
4002 gcc_assert (sched_ready_n_insns != -1);
4003 sched_extend_ready_list (sched_ready_n_insns + 1);
4004
4005 if (current_sched_info->add_remove_insn)
4006 current_sched_info->add_remove_insn (insn, 0);
4007
4008 RECOVERY_BLOCK (check) = rec;
4009
4010 if (sched_verbose && spec_info->dump)
4011 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4012 (*current_sched_info->print_insn) (check, 0));
4013
4014 gcc_assert (ORIG_PAT (insn));
4015
4016 /* Initialize TWIN (twin is a duplicate of original instruction
4017 in the recovery block). */
4018 if (rec != EXIT_BLOCK_PTR)
4019 {
4020 sd_iterator_def sd_it;
4021 dep_t dep;
4022
4023 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4024 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4025 {
4026 struct _dep _dep2, *dep2 = &_dep2;
4027
4028 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4029
4030 sd_add_dep (dep2, true);
4031 }
4032
4033 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4034 haifa_init_insn (twin);
4035
4036 if (sched_verbose && spec_info->dump)
4037 /* INSN_BB (insn) isn't determined for twin insns yet.
4038 So we can't use current_sched_info->print_insn. */
4039 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4040 INSN_UID (twin), rec->index);
4041 }
4042 else
4043 {
4044 ORIG_PAT (check) = ORIG_PAT (insn);
4045 HAS_INTERNAL_DEP (check) = 1;
4046 twin = check;
4047 /* ??? We probably should change all OUTPUT dependencies to
4048 (TRUE | OUTPUT). */
4049 }
4050
4051 /* Copy all resolved back dependencies of INSN to TWIN. This will
4052 provide correct value for INSN_TICK (TWIN). */
4053 sd_copy_back_deps (twin, insn, true);
4054
4055 if (rec != EXIT_BLOCK_PTR)
4056 /* In case of branchy check, fix CFG. */
4057 {
4058 basic_block first_bb, second_bb;
4059 rtx jump;
4060
4061 first_bb = BLOCK_FOR_INSN (check);
4062 second_bb = sched_split_block (first_bb, check);
4063
4064 sched_create_recovery_edges (first_bb, rec, second_bb);
4065
4066 sched_init_only_bb (second_bb, first_bb);
4067 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4068
4069 jump = BB_END (rec);
4070 haifa_init_insn (jump);
4071 }
4072
4073 /* Move backward dependences from INSN to CHECK and
4074 move forward dependences from INSN to TWIN. */
4075
4076 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4077 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4078 {
4079 rtx pro = DEP_PRO (dep);
4080 ds_t ds;
4081
4082 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4083 check --TRUE--> producer ??? or ANTI ???
4084 twin --TRUE--> producer
4085 twin --ANTI--> check
4086
4087 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4088 check --ANTI--> producer
4089 twin --ANTI--> producer
4090 twin --ANTI--> check
4091
4092 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4093 check ~~TRUE~~> producer
4094 twin ~~TRUE~~> producer
4095 twin --ANTI--> check */
4096
4097 ds = DEP_STATUS (dep);
4098
4099 if (ds & BEGIN_SPEC)
4100 {
4101 gcc_assert (!mutate_p);
4102 ds &= ~BEGIN_SPEC;
4103 }
4104
4105 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4106 sd_add_dep (new_dep, false);
4107
4108 if (rec != EXIT_BLOCK_PTR)
4109 {
4110 DEP_CON (new_dep) = twin;
4111 sd_add_dep (new_dep, false);
4112 }
4113 }
4114
4115 /* Second, remove backward dependencies of INSN. */
4116 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4117 sd_iterator_cond (&sd_it, &dep);)
4118 {
4119 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4120 || mutate_p)
4121 /* We can delete this dep because we overcome it with
4122 BEGIN_SPECULATION. */
4123 sd_delete_dep (sd_it);
4124 else
4125 sd_iterator_next (&sd_it);
4126 }
4127
4128 /* Future Speculations. Determine what BE_IN speculations will be like. */
4129 fs = 0;
4130
4131 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4132 here. */
4133
4134 gcc_assert (!DONE_SPEC (insn));
4135
4136 if (!mutate_p)
4137 {
4138 ds_t ts = TODO_SPEC (insn);
4139
4140 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4141 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4142
4143 /* Luckiness of future speculations solely depends upon initial
4144 BEGIN speculation. */
4145 if (ts & BEGIN_DATA)
4146 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4147 if (ts & BEGIN_CONTROL)
4148 fs = set_dep_weak (fs, BE_IN_CONTROL,
4149 get_dep_weak (ts, BEGIN_CONTROL));
4150 }
4151 else
4152 CHECK_SPEC (check) = CHECK_SPEC (insn);
4153
4154 /* Future speculations: call the helper. */
4155 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4156
4157 if (rec != EXIT_BLOCK_PTR)
4158 {
4159 /* Which types of dependencies should we use here is,
4160 generally, machine-dependent question... But, for now,
4161 it is not. */
4162
4163 if (!mutate_p)
4164 {
4165 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4166 sd_add_dep (new_dep, false);
4167
4168 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4169 sd_add_dep (new_dep, false);
4170 }
4171 else
4172 {
4173 if (spec_info->dump)
4174 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4175 (*current_sched_info->print_insn) (insn, 0));
4176
4177 /* Remove all dependencies of the INSN. */
4178 {
4179 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4180 | SD_LIST_BACK
4181 | SD_LIST_RES_BACK));
4182 while (sd_iterator_cond (&sd_it, &dep))
4183 sd_delete_dep (sd_it);
4184 }
4185
4186 /* If former check (INSN) already was moved to the ready (or queue)
4187 list, add new check (CHECK) there too. */
4188 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4189 try_ready (check);
4190
4191 /* Remove old check from instruction stream and free its
4192 data. */
4193 sched_remove_insn (insn);
4194 }
4195
4196 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4197 sd_add_dep (new_dep, false);
4198 }
4199 else
4200 {
4201 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4202 sd_add_dep (new_dep, false);
4203 }
4204
4205 if (!mutate_p)
4206 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4207 because it'll be done later in add_to_speculative_block. */
4208 {
4209 rtx_vec_t priorities_roots = NULL;
4210
4211 clear_priorities (twin, &priorities_roots);
4212 calc_priorities (priorities_roots);
4213 VEC_free (rtx, heap, priorities_roots);
4214 }
4215 }
4216
4217 /* Removes dependency between instructions in the recovery block REC
4218 and usual region instructions. It keeps inner dependences so it
4219 won't be necessary to recompute them. */
4220 static void
4221 fix_recovery_deps (basic_block rec)
4222 {
4223 rtx note, insn, jump, ready_list = 0;
4224 bitmap_head in_ready;
4225 rtx link;
4226
4227 bitmap_initialize (&in_ready, 0);
4228
4229 /* NOTE - a basic block note. */
4230 note = NEXT_INSN (BB_HEAD (rec));
4231 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4232 insn = BB_END (rec);
4233 gcc_assert (JUMP_P (insn));
4234 insn = PREV_INSN (insn);
4235
4236 do
4237 {
4238 sd_iterator_def sd_it;
4239 dep_t dep;
4240
4241 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4242 sd_iterator_cond (&sd_it, &dep);)
4243 {
4244 rtx consumer = DEP_CON (dep);
4245
4246 if (BLOCK_FOR_INSN (consumer) != rec)
4247 {
4248 sd_delete_dep (sd_it);
4249
4250 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4251 {
4252 ready_list = alloc_INSN_LIST (consumer, ready_list);
4253 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4254 }
4255 }
4256 else
4257 {
4258 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4259
4260 sd_iterator_next (&sd_it);
4261 }
4262 }
4263
4264 insn = PREV_INSN (insn);
4265 }
4266 while (insn != note);
4267
4268 bitmap_clear (&in_ready);
4269
4270 /* Try to add instructions to the ready or queue list. */
4271 for (link = ready_list; link; link = XEXP (link, 1))
4272 try_ready (XEXP (link, 0));
4273 free_INSN_LIST_list (&ready_list);
4274
4275 /* Fixing jump's dependences. */
4276 insn = BB_HEAD (rec);
4277 jump = BB_END (rec);
4278
4279 gcc_assert (LABEL_P (insn));
4280 insn = NEXT_INSN (insn);
4281
4282 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4283 add_jump_dependencies (insn, jump);
4284 }
4285
4286 /* Change pattern of INSN to NEW_PAT. */
4287 void
4288 sched_change_pattern (rtx insn, rtx new_pat)
4289 {
4290 int t;
4291
4292 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4293 gcc_assert (t);
4294 dfa_clear_single_insn_cache (insn);
4295 }
4296
4297 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4298 instruction data. */
4299 static void
4300 haifa_change_pattern (rtx insn, rtx new_pat)
4301 {
4302 sched_change_pattern (insn, new_pat);
4303
4304 /* Invalidate INSN_COST, so it'll be recalculated. */
4305 INSN_COST (insn) = -1;
4306 /* Invalidate INSN_TICK, so it'll be recalculated. */
4307 INSN_TICK (insn) = INVALID_TICK;
4308 }
4309
4310 /* -1 - can't speculate,
4311 0 - for speculation with REQUEST mode it is OK to use
4312 current instruction pattern,
4313 1 - need to change pattern for *NEW_PAT to be speculative. */
4314 int
4315 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4316 {
4317 gcc_assert (current_sched_info->flags & DO_SPECULATION
4318 && (request & SPECULATIVE)
4319 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4320
4321 if ((request & spec_info->mask) != request)
4322 return -1;
4323
4324 if (request & BE_IN_SPEC
4325 && !(request & BEGIN_SPEC))
4326 return 0;
4327
4328 return targetm.sched.speculate_insn (insn, request, new_pat);
4329 }
4330
4331 static int
4332 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4333 {
4334 gcc_assert (sched_deps_info->generate_spec_deps
4335 && !IS_SPECULATION_CHECK_P (insn));
4336
4337 if (HAS_INTERNAL_DEP (insn)
4338 || SCHED_GROUP_P (insn))
4339 return -1;
4340
4341 return sched_speculate_insn (insn, request, new_pat);
4342 }
4343
4344 /* Print some information about block BB, which starts with HEAD and
4345 ends with TAIL, before scheduling it.
4346 I is zero, if scheduler is about to start with the fresh ebb. */
4347 static void
4348 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4349 {
4350 if (!i)
4351 fprintf (sched_dump,
4352 ";; ======================================================\n");
4353 else
4354 fprintf (sched_dump,
4355 ";; =====================ADVANCING TO=====================\n");
4356 fprintf (sched_dump,
4357 ";; -- basic block %d from %d to %d -- %s reload\n",
4358 bb->index, INSN_UID (head), INSN_UID (tail),
4359 (reload_completed ? "after" : "before"));
4360 fprintf (sched_dump,
4361 ";; ======================================================\n");
4362 fprintf (sched_dump, "\n");
4363 }
4364
4365 /* Unlink basic block notes and labels and saves them, so they
4366 can be easily restored. We unlink basic block notes in EBB to
4367 provide back-compatibility with the previous code, as target backends
4368 assume, that there'll be only instructions between
4369 current_sched_info->{head and tail}. We restore these notes as soon
4370 as we can.
4371 FIRST (LAST) is the first (last) basic block in the ebb.
4372 NB: In usual case (FIRST == LAST) nothing is really done. */
4373 void
4374 unlink_bb_notes (basic_block first, basic_block last)
4375 {
4376 /* We DON'T unlink basic block notes of the first block in the ebb. */
4377 if (first == last)
4378 return;
4379
4380 bb_header = XNEWVEC (rtx, last_basic_block);
4381
4382 /* Make a sentinel. */
4383 if (last->next_bb != EXIT_BLOCK_PTR)
4384 bb_header[last->next_bb->index] = 0;
4385
4386 first = first->next_bb;
4387 do
4388 {
4389 rtx prev, label, note, next;
4390
4391 label = BB_HEAD (last);
4392 if (LABEL_P (label))
4393 note = NEXT_INSN (label);
4394 else
4395 note = label;
4396 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4397
4398 prev = PREV_INSN (label);
4399 next = NEXT_INSN (note);
4400 gcc_assert (prev && next);
4401
4402 NEXT_INSN (prev) = next;
4403 PREV_INSN (next) = prev;
4404
4405 bb_header[last->index] = label;
4406
4407 if (last == first)
4408 break;
4409
4410 last = last->prev_bb;
4411 }
4412 while (1);
4413 }
4414
4415 /* Restore basic block notes.
4416 FIRST is the first basic block in the ebb. */
4417 static void
4418 restore_bb_notes (basic_block first)
4419 {
4420 if (!bb_header)
4421 return;
4422
4423 /* We DON'T unlink basic block notes of the first block in the ebb. */
4424 first = first->next_bb;
4425 /* Remember: FIRST is actually a second basic block in the ebb. */
4426
4427 while (first != EXIT_BLOCK_PTR
4428 && bb_header[first->index])
4429 {
4430 rtx prev, label, note, next;
4431
4432 label = bb_header[first->index];
4433 prev = PREV_INSN (label);
4434 next = NEXT_INSN (prev);
4435
4436 if (LABEL_P (label))
4437 note = NEXT_INSN (label);
4438 else
4439 note = label;
4440 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4441
4442 bb_header[first->index] = 0;
4443
4444 NEXT_INSN (prev) = label;
4445 NEXT_INSN (note) = next;
4446 PREV_INSN (next) = note;
4447
4448 first = first->next_bb;
4449 }
4450
4451 free (bb_header);
4452 bb_header = 0;
4453 }
4454
4455 /* Helper function.
4456 Fix CFG after both in- and inter-block movement of
4457 control_flow_insn_p JUMP. */
4458 static void
4459 fix_jump_move (rtx jump)
4460 {
4461 basic_block bb, jump_bb, jump_bb_next;
4462
4463 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4464 jump_bb = BLOCK_FOR_INSN (jump);
4465 jump_bb_next = jump_bb->next_bb;
4466
4467 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4468 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4469
4470 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4471 /* if jump_bb_next is not empty. */
4472 BB_END (jump_bb) = BB_END (jump_bb_next);
4473
4474 if (BB_END (bb) != PREV_INSN (jump))
4475 /* Then there are instruction after jump that should be placed
4476 to jump_bb_next. */
4477 BB_END (jump_bb_next) = BB_END (bb);
4478 else
4479 /* Otherwise jump_bb_next is empty. */
4480 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4481
4482 /* To make assertion in move_insn happy. */
4483 BB_END (bb) = PREV_INSN (jump);
4484
4485 update_bb_for_insn (jump_bb_next);
4486 }
4487
4488 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4489 static void
4490 move_block_after_check (rtx jump)
4491 {
4492 basic_block bb, jump_bb, jump_bb_next;
4493 VEC(edge,gc) *t;
4494
4495 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4496 jump_bb = BLOCK_FOR_INSN (jump);
4497 jump_bb_next = jump_bb->next_bb;
4498
4499 update_bb_for_insn (jump_bb);
4500
4501 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4502 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4503
4504 unlink_block (jump_bb_next);
4505 link_block (jump_bb_next, bb);
4506
4507 t = bb->succs;
4508 bb->succs = 0;
4509 move_succs (&(jump_bb->succs), bb);
4510 move_succs (&(jump_bb_next->succs), jump_bb);
4511 move_succs (&t, jump_bb_next);
4512
4513 df_mark_solutions_dirty ();
4514
4515 common_sched_info->fix_recovery_cfg
4516 (bb->index, jump_bb->index, jump_bb_next->index);
4517 }
4518
4519 /* Helper function for move_block_after_check.
4520 This functions attaches edge vector pointed to by SUCCSP to
4521 block TO. */
4522 static void
4523 move_succs (VEC(edge,gc) **succsp, basic_block to)
4524 {
4525 edge e;
4526 edge_iterator ei;
4527
4528 gcc_assert (to->succs == 0);
4529
4530 to->succs = *succsp;
4531
4532 FOR_EACH_EDGE (e, ei, to->succs)
4533 e->src = to;
4534
4535 *succsp = 0;
4536 }
4537
4538 /* Remove INSN from the instruction stream.
4539 INSN should have any dependencies. */
4540 static void
4541 sched_remove_insn (rtx insn)
4542 {
4543 sd_finish_insn (insn);
4544
4545 change_queue_index (insn, QUEUE_NOWHERE);
4546 current_sched_info->add_remove_insn (insn, 1);
4547 remove_insn (insn);
4548 }
4549
4550 /* Clear priorities of all instructions, that are forward dependent on INSN.
4551 Store in vector pointed to by ROOTS_PTR insns on which priority () should
4552 be invoked to initialize all cleared priorities. */
4553 static void
4554 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
4555 {
4556 sd_iterator_def sd_it;
4557 dep_t dep;
4558 bool insn_is_root_p = true;
4559
4560 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
4561
4562 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4563 {
4564 rtx pro = DEP_PRO (dep);
4565
4566 if (INSN_PRIORITY_STATUS (pro) >= 0
4567 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4568 {
4569 /* If DEP doesn't contribute to priority then INSN itself should
4570 be added to priority roots. */
4571 if (contributes_to_priority_p (dep))
4572 insn_is_root_p = false;
4573
4574 INSN_PRIORITY_STATUS (pro) = -1;
4575 clear_priorities (pro, roots_ptr);
4576 }
4577 }
4578
4579 if (insn_is_root_p)
4580 VEC_safe_push (rtx, heap, *roots_ptr, insn);
4581 }
4582
4583 /* Recompute priorities of instructions, whose priorities might have been
4584 changed. ROOTS is a vector of instructions whose priority computation will
4585 trigger initialization of all cleared priorities. */
4586 static void
4587 calc_priorities (rtx_vec_t roots)
4588 {
4589 int i;
4590 rtx insn;
4591
4592 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
4593 priority (insn);
4594 }
4595
4596
4597 /* Add dependences between JUMP and other instructions in the recovery
4598 block. INSN is the first insn the recovery block. */
4599 static void
4600 add_jump_dependencies (rtx insn, rtx jump)
4601 {
4602 do
4603 {
4604 insn = NEXT_INSN (insn);
4605 if (insn == jump)
4606 break;
4607
4608 if (sd_lists_empty_p (insn, SD_LIST_FORW))
4609 {
4610 dep_def _new_dep, *new_dep = &_new_dep;
4611
4612 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
4613 sd_add_dep (new_dep, false);
4614 }
4615 }
4616 while (1);
4617
4618 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
4619 }
4620
4621 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4622 rtx
4623 bb_note (basic_block bb)
4624 {
4625 rtx note;
4626
4627 note = BB_HEAD (bb);
4628 if (LABEL_P (note))
4629 note = NEXT_INSN (note);
4630
4631 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4632 return note;
4633 }
4634
4635 #ifdef ENABLE_CHECKING
4636 /* Helper function for check_cfg.
4637 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4638 its flags. */
4639 static int
4640 has_edge_p (VEC(edge,gc) *el, int type)
4641 {
4642 edge e;
4643 edge_iterator ei;
4644
4645 FOR_EACH_EDGE (e, ei, el)
4646 if (e->flags & type)
4647 return 1;
4648 return 0;
4649 }
4650
4651 /* Check few properties of CFG between HEAD and TAIL.
4652 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4653 instruction stream. */
4654 static void
4655 check_cfg (rtx head, rtx tail)
4656 {
4657 rtx next_tail;
4658 basic_block bb = 0;
4659 int not_first = 0, not_last;
4660
4661 if (head == NULL)
4662 head = get_insns ();
4663 if (tail == NULL)
4664 tail = get_last_insn ();
4665 next_tail = NEXT_INSN (tail);
4666
4667 do
4668 {
4669 not_last = head != tail;
4670
4671 if (not_first)
4672 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4673 if (not_last)
4674 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4675
4676 if (LABEL_P (head)
4677 || (NOTE_INSN_BASIC_BLOCK_P (head)
4678 && (!not_first
4679 || (not_first && !LABEL_P (PREV_INSN (head))))))
4680 {
4681 gcc_assert (bb == 0);
4682 bb = BLOCK_FOR_INSN (head);
4683 if (bb != 0)
4684 gcc_assert (BB_HEAD (bb) == head);
4685 else
4686 /* This is the case of jump table. See inside_basic_block_p (). */
4687 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4688 }
4689
4690 if (bb == 0)
4691 {
4692 gcc_assert (!inside_basic_block_p (head));
4693 head = NEXT_INSN (head);
4694 }
4695 else
4696 {
4697 gcc_assert (inside_basic_block_p (head)
4698 || NOTE_P (head));
4699 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4700
4701 if (LABEL_P (head))
4702 {
4703 head = NEXT_INSN (head);
4704 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4705 }
4706 else
4707 {
4708 if (control_flow_insn_p (head))
4709 {
4710 gcc_assert (BB_END (bb) == head);
4711
4712 if (any_uncondjump_p (head))
4713 gcc_assert (EDGE_COUNT (bb->succs) == 1
4714 && BARRIER_P (NEXT_INSN (head)));
4715 else if (any_condjump_p (head))
4716 gcc_assert (/* Usual case. */
4717 (EDGE_COUNT (bb->succs) > 1
4718 && !BARRIER_P (NEXT_INSN (head)))
4719 /* Or jump to the next instruction. */
4720 || (EDGE_COUNT (bb->succs) == 1
4721 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4722 == JUMP_LABEL (head))));
4723 }
4724 if (BB_END (bb) == head)
4725 {
4726 if (EDGE_COUNT (bb->succs) > 1)
4727 gcc_assert (control_flow_insn_p (head)
4728 || has_edge_p (bb->succs, EDGE_COMPLEX));
4729 bb = 0;
4730 }
4731
4732 head = NEXT_INSN (head);
4733 }
4734 }
4735
4736 not_first = 1;
4737 }
4738 while (head != next_tail);
4739
4740 gcc_assert (bb == 0);
4741 }
4742
4743 #endif /* ENABLE_CHECKING */
4744
4745 /* Extend per basic block data structures. */
4746 static void
4747 extend_bb (void)
4748 {
4749 if (sched_scan_info->extend_bb)
4750 sched_scan_info->extend_bb ();
4751 }
4752
4753 /* Init data for BB. */
4754 static void
4755 init_bb (basic_block bb)
4756 {
4757 if (sched_scan_info->init_bb)
4758 sched_scan_info->init_bb (bb);
4759 }
4760
4761 /* Extend per insn data structures. */
4762 static void
4763 extend_insn (void)
4764 {
4765 if (sched_scan_info->extend_insn)
4766 sched_scan_info->extend_insn ();
4767 }
4768
4769 /* Init data structures for INSN. */
4770 static void
4771 init_insn (rtx insn)
4772 {
4773 if (sched_scan_info->init_insn)
4774 sched_scan_info->init_insn (insn);
4775 }
4776
4777 /* Init all insns in BB. */
4778 static void
4779 init_insns_in_bb (basic_block bb)
4780 {
4781 rtx insn;
4782
4783 FOR_BB_INSNS (bb, insn)
4784 init_insn (insn);
4785 }
4786
4787 /* A driver function to add a set of basic blocks (BBS),
4788 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
4789 to the scheduling region. */
4790 void
4791 sched_scan (const struct sched_scan_info_def *ssi,
4792 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4793 {
4794 sched_scan_info = ssi;
4795
4796 if (bbs != NULL || bb != NULL)
4797 {
4798 extend_bb ();
4799
4800 if (bbs != NULL)
4801 {
4802 unsigned i;
4803 basic_block x;
4804
4805 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4806 init_bb (x);
4807 }
4808
4809 if (bb != NULL)
4810 init_bb (bb);
4811 }
4812
4813 extend_insn ();
4814
4815 if (bbs != NULL)
4816 {
4817 unsigned i;
4818 basic_block x;
4819
4820 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
4821 init_insns_in_bb (x);
4822 }
4823
4824 if (bb != NULL)
4825 init_insns_in_bb (bb);
4826
4827 if (insns != NULL)
4828 {
4829 unsigned i;
4830 rtx x;
4831
4832 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
4833 init_insn (x);
4834 }
4835
4836 if (insn != NULL)
4837 init_insn (insn);
4838 }
4839
4840
4841 /* Extend data structures for logical insn UID. */
4842 static void
4843 luids_extend_insn (void)
4844 {
4845 int new_luids_max_uid = get_max_uid () + 1;
4846
4847 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
4848 }
4849
4850 /* Initialize LUID for INSN. */
4851 static void
4852 luids_init_insn (rtx insn)
4853 {
4854 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
4855 int luid;
4856
4857 if (i >= 0)
4858 {
4859 luid = sched_max_luid;
4860 sched_max_luid += i;
4861 }
4862 else
4863 luid = -1;
4864
4865 SET_INSN_LUID (insn, luid);
4866 }
4867
4868 /* Initialize luids for BBS, BB, INSNS and INSN.
4869 The hook common_sched_info->luid_for_non_insn () is used to determine
4870 if notes, labels, etc. need luids. */
4871 void
4872 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4873 {
4874 const struct sched_scan_info_def ssi =
4875 {
4876 NULL, /* extend_bb */
4877 NULL, /* init_bb */
4878 luids_extend_insn, /* extend_insn */
4879 luids_init_insn /* init_insn */
4880 };
4881
4882 sched_scan (&ssi, bbs, bb, insns, insn);
4883 }
4884
4885 /* Free LUIDs. */
4886 void
4887 sched_finish_luids (void)
4888 {
4889 VEC_free (int, heap, sched_luids);
4890 sched_max_luid = 1;
4891 }
4892
4893 /* Return logical uid of INSN. Helpful while debugging. */
4894 int
4895 insn_luid (rtx insn)
4896 {
4897 return INSN_LUID (insn);
4898 }
4899
4900 /* Extend per insn data in the target. */
4901 void
4902 sched_extend_target (void)
4903 {
4904 if (targetm.sched.h_i_d_extended)
4905 targetm.sched.h_i_d_extended ();
4906 }
4907
4908 /* Extend global scheduler structures (those, that live across calls to
4909 schedule_block) to include information about just emitted INSN. */
4910 static void
4911 extend_h_i_d (void)
4912 {
4913 int reserve = (get_max_uid () + 1
4914 - VEC_length (haifa_insn_data_def, h_i_d));
4915 if (reserve > 0
4916 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
4917 {
4918 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
4919 3 * get_max_uid () / 2);
4920 sched_extend_target ();
4921 }
4922 }
4923
4924 /* Initialize h_i_d entry of the INSN with default values.
4925 Values, that are not explicitly initialized here, hold zero. */
4926 static void
4927 init_h_i_d (rtx insn)
4928 {
4929 if (INSN_LUID (insn) > 0)
4930 {
4931 INSN_COST (insn) = -1;
4932 find_insn_reg_weight (insn);
4933 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4934 INSN_TICK (insn) = INVALID_TICK;
4935 INTER_TICK (insn) = INVALID_TICK;
4936 TODO_SPEC (insn) = HARD_DEP;
4937 }
4938 }
4939
4940 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
4941 void
4942 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
4943 {
4944 const struct sched_scan_info_def ssi =
4945 {
4946 NULL, /* extend_bb */
4947 NULL, /* init_bb */
4948 extend_h_i_d, /* extend_insn */
4949 init_h_i_d /* init_insn */
4950 };
4951
4952 sched_scan (&ssi, bbs, bb, insns, insn);
4953 }
4954
4955 /* Finalize haifa_insn_data. */
4956 void
4957 haifa_finish_h_i_d (void)
4958 {
4959 VEC_free (haifa_insn_data_def, heap, h_i_d);
4960 }
4961
4962 /* Init data for the new insn INSN. */
4963 static void
4964 haifa_init_insn (rtx insn)
4965 {
4966 gcc_assert (insn != NULL);
4967
4968 sched_init_luids (NULL, NULL, NULL, insn);
4969 sched_extend_target ();
4970 sched_deps_init (false);
4971 haifa_init_h_i_d (NULL, NULL, NULL, insn);
4972
4973 if (adding_bb_to_current_region_p)
4974 {
4975 sd_init_insn (insn);
4976
4977 /* Extend dependency caches by one element. */
4978 extend_dependency_caches (1, false);
4979 }
4980 }
4981
4982 /* Init data for the new basic block BB which comes after AFTER. */
4983 static void
4984 haifa_init_only_bb (basic_block bb, basic_block after)
4985 {
4986 gcc_assert (bb != NULL);
4987
4988 sched_init_bbs ();
4989
4990 if (common_sched_info->add_block)
4991 /* This changes only data structures of the front-end. */
4992 common_sched_info->add_block (bb, after);
4993 }
4994
4995 /* A generic version of sched_split_block (). */
4996 basic_block
4997 sched_split_block_1 (basic_block first_bb, rtx after)
4998 {
4999 edge e;
5000
5001 e = split_block (first_bb, after);
5002 gcc_assert (e->src == first_bb);
5003
5004 /* sched_split_block emits note if *check == BB_END. Probably it
5005 is better to rip that note off. */
5006
5007 return e->dest;
5008 }
5009
5010 /* A generic version of sched_create_empty_bb (). */
5011 basic_block
5012 sched_create_empty_bb_1 (basic_block after)
5013 {
5014 return create_empty_bb (after);
5015 }
5016
5017 /* Insert PAT as an INSN into the schedule and update the necessary data
5018 structures to account for it. */
5019 rtx
5020 sched_emit_insn (rtx pat)
5021 {
5022 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5023 last_scheduled_insn = insn;
5024 haifa_init_insn (insn);
5025 return insn;
5026 }
5027
5028 #endif /* INSN_SCHEDULING */