regmove.c (optimize_reg_copy_1): Undo Aug 18 change.
[gcc.git] / gcc / sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 /* Instruction scheduling pass.
24
25 This pass implements list scheduling within basic blocks. It is
26 run after flow analysis, but before register allocation. The
27 scheduler works as follows:
28
29 We compute insn priorities based on data dependencies. Flow
30 analysis only creates a fraction of the data-dependencies we must
31 observe: namely, only those dependencies which the combiner can be
32 expected to use. For this pass, we must therefore create the
33 remaining dependencies we need to observe: register dependencies,
34 memory dependencies, dependencies to keep function calls in order,
35 and the dependence between a conditional branch and the setting of
36 condition codes are all dealt with here.
37
38 The scheduler first traverses the data flow graph, starting with
39 the last instruction, and proceeding to the first, assigning
40 values to insn_priority as it goes. This sorts the instructions
41 topologically by data dependence.
42
43 Once priorities have been established, we order the insns using
44 list scheduling. This works as follows: starting with a list of
45 all the ready insns, and sorted according to priority number, we
46 schedule the insn from the end of the list by placing its
47 predecessors in the list according to their priority order. We
48 consider this insn scheduled by setting the pointer to the "end" of
49 the list to point to the previous insn. When an insn has no
50 predecessors, we either queue it until sufficient time has elapsed
51 or add it to the ready list. As the instructions are scheduled or
52 when stalls are introduced, the queue advances and dumps insns into
53 the ready list. When all insns down to the lowest priority have
54 been scheduled, the critical path of the basic block has been made
55 as short as possible. The remaining insns are then scheduled in
56 remaining slots.
57
58 Function unit conflicts are resolved during reverse list scheduling
59 by tracking the time when each insn is committed to the schedule
60 and from that, the time the function units it uses must be free.
61 As insns on the ready list are considered for scheduling, those
62 that would result in a blockage of the already committed insns are
63 queued until no blockage will result. Among the remaining insns on
64 the ready list to be considered, the first one with the largest
65 potential for causing a subsequent blockage is chosen.
66
67 The following list shows the order in which we want to break ties
68 among insns in the ready list:
69
70 1. choose insn with lowest conflict cost, ties broken by
71 2. choose insn with the longest path to end of bb, ties broken by
72 3. choose insn that kills the most registers, ties broken by
73 4. choose insn that conflicts with the most ready insns, or finally
74 5. choose insn with lowest UID.
75
76 Memory references complicate matters. Only if we can be certain
77 that memory references are not part of the data dependency graph
78 (via true, anti, or output dependence), can we move operations past
79 memory references. To first approximation, reads can be done
80 independently, while writes introduce dependencies. Better
81 approximations will yield fewer dependencies.
82
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using LOG_LINKS.
85
86 Having optimized the critical path, we may have also unduly
87 extended the lifetimes of some registers. If an operation requires
88 that constants be loaded into registers, it is certainly desirable
89 to load those constants as early as necessary, but no earlier.
90 I.e., it will not do to load up a bunch of registers at the
91 beginning of a basic block only to use them at the end, if they
92 could be loaded later, since this may result in excessive register
93 utilization.
94
95 Note that since branches are never in basic blocks, but only end
96 basic blocks, this pass will not do any branch scheduling. But
97 that is ok, since we can use GNU's delayed branch scheduling
98 pass to take care of this case.
99
100 Also note that no further optimizations based on algebraic identities
101 are performed, so this pass would be a good one to perform instruction
102 splitting, such as breaking up a multiply instruction into shifts
103 and adds where that is profitable.
104
105 Given the memory aliasing analysis that this pass should perform,
106 it should be possible to remove redundant stores to memory, and to
107 load values from registers instead of hitting memory.
108
109 This pass must update information that subsequent passes expect to be
110 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
111 reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
112 BLOCK_END.
113
114 The information in the line number notes is carefully retained by
115 this pass. Notes that refer to the starting and ending of
116 exception regions are also carefully retained by this pass. All
117 other NOTE insns are grouped in their same relative order at the
118 beginning of basic blocks that have been scheduled. */
119 \f
120 #include "config.h"
121 #include "system.h"
122 #include "rtl.h"
123 #include "basic-block.h"
124 #include "regs.h"
125 #include "hard-reg-set.h"
126 #include "flags.h"
127 #include "insn-config.h"
128 #include "insn-attr.h"
129 #include "recog.h"
130
131 #ifndef INSN_SCHEDULING
132 void
133 schedule_insns (dump_file)
134 FILE *dump_file ATTRIBUTE_UNUSED;
135 {
136 }
137 #else /* INSN_SCHEDULING -- rest of file */
138
139 extern char *reg_known_equiv_p;
140 extern rtx *reg_known_value;
141
142 /* Arrays set up by scheduling for the same respective purposes as
143 similar-named arrays set up by flow analysis. We work with these
144 arrays during the scheduling pass so we can compare values against
145 unscheduled code.
146
147 Values of these arrays are copied at the end of this pass into the
148 arrays set up by flow analysis. */
149 static int *sched_reg_n_calls_crossed;
150 static int *sched_reg_live_length;
151
152 /* Element N is the next insn that sets (hard or pseudo) register
153 N within the current basic block; or zero, if there is no
154 such insn. Needed for new registers which may be introduced
155 by splitting insns. */
156 static rtx *reg_last_uses;
157 static rtx *reg_last_sets;
158 static regset reg_pending_sets;
159 static int reg_pending_sets_all;
160
161 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
162 static int *insn_luid;
163 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
164
165 /* Vector indexed by INSN_UID giving each instruction a priority. */
166 static int *insn_priority;
167 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
168
169 static short *insn_costs;
170 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
171
172 /* Vector indexed by INSN_UID giving an encoding of the function units
173 used. */
174 static short *insn_units;
175 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
176
177 /* Vector indexed by INSN_UID giving an encoding of the blockage range
178 function. The unit and the range are encoded. */
179 static unsigned int *insn_blockage;
180 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
181 #define UNIT_BITS 5
182 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
183 #define ENCODE_BLOCKAGE(U,R) \
184 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
185 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
186 | MAX_BLOCKAGE_COST (R))
187 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
188 #define BLOCKAGE_RANGE(B) \
189 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
190 | ((B) & BLOCKAGE_MASK))
191
192 /* Encodings of the `<name>_unit_blockage_range' function. */
193 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
194 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
195
196 #define DONE_PRIORITY -1
197 #define MAX_PRIORITY 0x7fffffff
198 #define TAIL_PRIORITY 0x7ffffffe
199 #define LAUNCH_PRIORITY 0x7f000001
200 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
201 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
202
203 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
204 static int *insn_ref_count;
205 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
206
207 /* Vector indexed by INSN_UID giving line-number note in effect for each
208 insn. For line-number notes, this indicates whether the note may be
209 reused. */
210 static rtx *line_note;
211 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
212
213 /* Vector indexed by basic block number giving the starting line-number
214 for each basic block. */
215 static rtx *line_note_head;
216
217 /* List of important notes we must keep around. This is a pointer to the
218 last element in the list. */
219 static rtx note_list;
220
221 /* Regsets telling whether a given register is live or dead before the last
222 scheduled insn. Must scan the instructions once before scheduling to
223 determine what registers are live or dead at the end of the block. */
224 static regset bb_dead_regs;
225 static regset bb_live_regs;
226
227 /* Regset telling whether a given register is live after the insn currently
228 being scheduled. Before processing an insn, this is equal to bb_live_regs
229 above. This is used so that we can find registers that are newly born/dead
230 after processing an insn. */
231 static regset old_live_regs;
232
233 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
234 during the initial scan and reused later. If there are not exactly as
235 many REG_DEAD notes in the post scheduled code as there were in the
236 prescheduled code then we trigger an abort because this indicates a bug. */
237 static rtx dead_notes;
238
239 /* Queues, etc. */
240
241 /* An instruction is ready to be scheduled when all insns following it
242 have already been scheduled. It is important to ensure that all
243 insns which use its result will not be executed until its result
244 has been computed. An insn is maintained in one of four structures:
245
246 (P) the "Pending" set of insns which cannot be scheduled until
247 their dependencies have been satisfied.
248 (Q) the "Queued" set of insns that can be scheduled when sufficient
249 time has passed.
250 (R) the "Ready" list of unscheduled, uncommitted insns.
251 (S) the "Scheduled" list of insns.
252
253 Initially, all insns are either "Pending" or "Ready" depending on
254 whether their dependencies are satisfied.
255
256 Insns move from the "Ready" list to the "Scheduled" list as they
257 are committed to the schedule. As this occurs, the insns in the
258 "Pending" list have their dependencies satisfied and move to either
259 the "Ready" list or the "Queued" set depending on whether
260 sufficient time has passed to make them ready. As time passes,
261 insns move from the "Queued" set to the "Ready" list. Insns may
262 move from the "Ready" list to the "Queued" set if they are blocked
263 due to a function unit conflict.
264
265 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
266 insns, i.e., those that are ready, queued, and pending.
267 The "Queued" set (Q) is implemented by the variable `insn_queue'.
268 The "Ready" list (R) is implemented by the variables `ready' and
269 `n_ready'.
270 The "Scheduled" list (S) is the new insn chain built by this pass.
271
272 The transition (R->S) is implemented in the scheduling loop in
273 `schedule_block' when the best insn to schedule is chosen.
274 The transition (R->Q) is implemented in `schedule_select' when an
275 insn is found to have a function unit conflict with the already
276 committed insns.
277 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
278 insns move from the ready list to the scheduled list.
279 The transition (Q->R) is implemented at the top of the scheduling
280 loop in `schedule_block' as time passes or stalls are introduced. */
281
282 /* Implement a circular buffer to delay instructions until sufficient
283 time has passed. INSN_QUEUE_SIZE is a power of two larger than
284 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
285 longest time an isnsn may be queued. */
286 static rtx insn_queue[INSN_QUEUE_SIZE];
287 static int q_ptr = 0;
288 static int q_size = 0;
289 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
290 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
291
292 /* Vector indexed by INSN_UID giving the minimum clock tick at which
293 the insn becomes ready. This is used to note timing constraints for
294 insns in the pending list. */
295 static int *insn_tick;
296 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
297
298 /* Data structure for keeping track of register information
299 during that register's life. */
300
301 struct sometimes
302 {
303 int regno;
304 int live_length;
305 int calls_crossed;
306 };
307
308 /* Forward declarations. */
309 static void add_dependence PROTO((rtx, rtx, enum reg_note));
310 static void remove_dependence PROTO((rtx, rtx));
311 static rtx find_insn_list PROTO((rtx, rtx));
312 static int insn_unit PROTO((rtx));
313 static unsigned int blockage_range PROTO((int, rtx));
314 static void clear_units PROTO((void));
315 static void prepare_unit PROTO((int));
316 static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
317 static void schedule_unit PROTO((int, rtx, int));
318 static int actual_hazard PROTO((int, rtx, int, int));
319 static int potential_hazard PROTO((int, rtx, int));
320 static int insn_cost PROTO((rtx, rtx, rtx));
321 static int priority PROTO((rtx));
322 static void free_pending_lists PROTO((void));
323 static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
324 static void flush_pending_lists PROTO((rtx, int));
325 static void sched_analyze_1 PROTO((rtx, rtx));
326 static void sched_analyze_2 PROTO((rtx, rtx));
327 static void sched_analyze_insn PROTO((rtx, rtx, rtx));
328 static int sched_analyze PROTO((rtx, rtx));
329 static void sched_note_set PROTO((rtx, int));
330 static int rank_for_schedule PROTO((const GENERIC_PTR, const GENERIC_PTR));
331 static void swap_sort PROTO((rtx *, int));
332 static void queue_insn PROTO((rtx, int));
333 static int birthing_insn_p PROTO((rtx));
334 static void adjust_priority PROTO((rtx));
335 static int schedule_insn PROTO((rtx, rtx *, int, int));
336 static int schedule_select PROTO((rtx *, int, int, FILE *));
337 static void create_reg_dead_note PROTO((rtx, rtx));
338 static void attach_deaths PROTO((rtx, rtx, int));
339 static void attach_deaths_insn PROTO((rtx));
340 static rtx unlink_notes PROTO((rtx, rtx));
341 static int new_sometimes_live PROTO((struct sometimes *, int, int));
342 static void finish_sometimes_live PROTO((struct sometimes *, int));
343 static rtx reemit_notes PROTO((rtx, rtx));
344 static void schedule_block PROTO((int, FILE *));
345 static rtx regno_use_in PROTO((int, rtx));
346 static void split_hard_reg_notes PROTO((rtx, rtx, rtx));
347 static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
348 static void update_n_sets PROTO((rtx, int));
349 static void update_flow_info PROTO((rtx, rtx, rtx, rtx));
350
351 /* Main entry point of this file. */
352 void schedule_insns PROTO((FILE *));
353 \f
354 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
355
356 /* Helper functions for instruction scheduling. */
357
358 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
359 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
360 of dependence that this link represents. */
361
362 static void
363 add_dependence (insn, elem, dep_type)
364 rtx insn;
365 rtx elem;
366 enum reg_note dep_type;
367 {
368 rtx link, next;
369
370 /* Don't depend an insn on itself. */
371 if (insn == elem)
372 return;
373
374 /* If elem is part of a sequence that must be scheduled together, then
375 make the dependence point to the last insn of the sequence.
376 When HAVE_cc0, it is possible for NOTEs to exist between users and
377 setters of the condition codes, so we must skip past notes here.
378 Otherwise, NOTEs are impossible here. */
379
380 next = NEXT_INSN (elem);
381
382 #ifdef HAVE_cc0
383 while (next && GET_CODE (next) == NOTE)
384 next = NEXT_INSN (next);
385 #endif
386
387 if (next && SCHED_GROUP_P (next)
388 && GET_CODE (next) != CODE_LABEL)
389 {
390 /* Notes will never intervene here though, so don't bother checking
391 for them. */
392 /* We must reject CODE_LABELs, so that we don't get confused by one
393 that has LABEL_PRESERVE_P set, which is represented by the same
394 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
395 SCHED_GROUP_P. */
396 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
397 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
398 next = NEXT_INSN (next);
399
400 /* Again, don't depend an insn on itself. */
401 if (insn == next)
402 return;
403
404 /* Make the dependence to NEXT, the last insn of the group, instead
405 of the original ELEM. */
406 elem = next;
407 }
408
409 /* Check that we don't already have this dependence. */
410 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
411 if (XEXP (link, 0) == elem)
412 {
413 /* If this is a more restrictive type of dependence than the existing
414 one, then change the existing dependence to this type. */
415 if ((int) dep_type < (int) REG_NOTE_KIND (link))
416 PUT_REG_NOTE_KIND (link, dep_type);
417 return;
418 }
419 /* Might want to check one level of transitivity to save conses. */
420
421 link = rtx_alloc (INSN_LIST);
422 /* Insn dependency, not data dependency. */
423 PUT_REG_NOTE_KIND (link, dep_type);
424 XEXP (link, 0) = elem;
425 XEXP (link, 1) = LOG_LINKS (insn);
426 LOG_LINKS (insn) = link;
427 }
428
429 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
430 of INSN. Abort if not found. */
431
432 static void
433 remove_dependence (insn, elem)
434 rtx insn;
435 rtx elem;
436 {
437 rtx prev, link;
438 int found = 0;
439
440 for (prev = 0, link = LOG_LINKS (insn); link; link = XEXP (link, 1))
441 {
442 if (XEXP (link, 0) == elem)
443 {
444 RTX_INTEGRATED_P (link) = 1;
445 if (prev)
446 XEXP (prev, 1) = XEXP (link, 1);
447 else
448 LOG_LINKS (insn) = XEXP (link, 1);
449 found = 1;
450 }
451 else
452 prev = link;
453 }
454
455 if (! found)
456 abort ();
457 return;
458 }
459 \f
460 #ifndef __GNUC__
461 #define __inline
462 #endif
463
464 /* Computation of memory dependencies. */
465
466 /* The *_insns and *_mems are paired lists. Each pending memory operation
467 will have a pointer to the MEM rtx on one list and a pointer to the
468 containing insn on the other list in the same place in the list. */
469
470 /* We can't use add_dependence like the old code did, because a single insn
471 may have multiple memory accesses, and hence needs to be on the list
472 once for each memory access. Add_dependence won't let you add an insn
473 to a list more than once. */
474
475 /* An INSN_LIST containing all insns with pending read operations. */
476 static rtx pending_read_insns;
477
478 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
479 static rtx pending_read_mems;
480
481 /* An INSN_LIST containing all insns with pending write operations. */
482 static rtx pending_write_insns;
483
484 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
485 static rtx pending_write_mems;
486
487 /* Indicates the combined length of the two pending lists. We must prevent
488 these lists from ever growing too large since the number of dependencies
489 produced is at least O(N*N), and execution time is at least O(4*N*N), as
490 a function of the length of these pending lists. */
491
492 static int pending_lists_length;
493
494 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
495
496 static rtx unused_insn_list;
497
498 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
499
500 static rtx unused_expr_list;
501
502 /* The last insn upon which all memory references must depend.
503 This is an insn which flushed the pending lists, creating a dependency
504 between it and all previously pending memory references. This creates
505 a barrier (or a checkpoint) which no memory reference is allowed to cross.
506
507 This includes all non constant CALL_INSNs. When we do interprocedural
508 alias analysis, this restriction can be relaxed.
509 This may also be an INSN that writes memory if the pending lists grow
510 too large. */
511
512 static rtx last_pending_memory_flush;
513
514 /* The last function call we have seen. All hard regs, and, of course,
515 the last function call, must depend on this. */
516
517 static rtx last_function_call;
518
519 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
520 that does not already cross a call. We create dependencies between each
521 of those insn and the next call insn, to ensure that they won't cross a call
522 after scheduling is done. */
523
524 static rtx sched_before_next_call;
525
526 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
527 so that insns independent of the last scheduled insn will be preferred
528 over dependent instructions. */
529
530 static rtx last_scheduled_insn;
531
532 /* Process an insn's memory dependencies. There are four kinds of
533 dependencies:
534
535 (0) read dependence: read follows read
536 (1) true dependence: read follows write
537 (2) anti dependence: write follows read
538 (3) output dependence: write follows write
539
540 We are careful to build only dependencies which actually exist, and
541 use transitivity to avoid building too many links. */
542 \f
543 /* Return the INSN_LIST containing INSN in LIST, or NULL
544 if LIST does not contain INSN. */
545
546 __inline static rtx
547 find_insn_list (insn, list)
548 rtx insn;
549 rtx list;
550 {
551 while (list)
552 {
553 if (XEXP (list, 0) == insn)
554 return list;
555 list = XEXP (list, 1);
556 }
557 return 0;
558 }
559
560 /* Compute the function units used by INSN. This caches the value
561 returned by function_units_used. A function unit is encoded as the
562 unit number if the value is non-negative and the compliment of a
563 mask if the value is negative. A function unit index is the
564 non-negative encoding. */
565
566 __inline static int
567 insn_unit (insn)
568 rtx insn;
569 {
570 register int unit = INSN_UNIT (insn);
571
572 if (unit == 0)
573 {
574 recog_memoized (insn);
575
576 /* A USE insn, or something else we don't need to understand.
577 We can't pass these directly to function_units_used because it will
578 trigger a fatal error for unrecognizable insns. */
579 if (INSN_CODE (insn) < 0)
580 unit = -1;
581 else
582 {
583 unit = function_units_used (insn);
584 /* Increment non-negative values so we can cache zero. */
585 if (unit >= 0) unit++;
586 }
587 /* We only cache 16 bits of the result, so if the value is out of
588 range, don't cache it. */
589 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
590 || unit >= 0
591 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
592 INSN_UNIT (insn) = unit;
593 }
594 return (unit > 0 ? unit - 1 : unit);
595 }
596
597 /* Compute the blockage range for executing INSN on UNIT. This caches
598 the value returned by the blockage_range_function for the unit.
599 These values are encoded in an int where the upper half gives the
600 minimum value and the lower half gives the maximum value. */
601
602 __inline static unsigned int
603 blockage_range (unit, insn)
604 int unit;
605 rtx insn;
606 {
607 unsigned int blockage = INSN_BLOCKAGE (insn);
608 unsigned int range;
609
610 if ((int) UNIT_BLOCKED (blockage) != unit + 1)
611 {
612 range = function_units[unit].blockage_range_function (insn);
613 /* We only cache the blockage range for one unit and then only if
614 the values fit. */
615 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
616 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
617 }
618 else
619 range = BLOCKAGE_RANGE (blockage);
620
621 return range;
622 }
623
624 /* A vector indexed by function unit instance giving the last insn to use
625 the unit. The value of the function unit instance index for unit U
626 instance I is (U + I * FUNCTION_UNITS_SIZE). */
627 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
628
629 /* A vector indexed by function unit instance giving the minimum time when
630 the unit will unblock based on the maximum blockage cost. */
631 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
632
633 /* A vector indexed by function unit number giving the number of insns
634 that remain to use the unit. */
635 static int unit_n_insns[FUNCTION_UNITS_SIZE];
636
637 /* Reset the function unit state to the null state. */
638
639 static void
640 clear_units ()
641 {
642 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
643 bzero ((char *) unit_tick, sizeof (unit_tick));
644 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
645 }
646
647 /* Record an insn as one that will use the units encoded by UNIT. */
648
649 __inline static void
650 prepare_unit (unit)
651 int unit;
652 {
653 int i;
654
655 if (unit >= 0)
656 unit_n_insns[unit]++;
657 else
658 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
659 if ((unit & 1) != 0)
660 prepare_unit (i);
661 }
662
663 /* Return the actual hazard cost of executing INSN on the unit UNIT,
664 instance INSTANCE at time CLOCK if the previous actual hazard cost
665 was COST. */
666
667 __inline static int
668 actual_hazard_this_instance (unit, instance, insn, clock, cost)
669 int unit, instance, clock, cost;
670 rtx insn;
671 {
672 int tick = unit_tick[instance];
673
674 if (tick - clock > cost)
675 {
676 /* The scheduler is operating in reverse, so INSN is the executing
677 insn and the unit's last insn is the candidate insn. We want a
678 more exact measure of the blockage if we execute INSN at CLOCK
679 given when we committed the execution of the unit's last insn.
680
681 The blockage value is given by either the unit's max blockage
682 constant, blockage range function, or blockage function. Use
683 the most exact form for the given unit. */
684
685 if (function_units[unit].blockage_range_function)
686 {
687 if (function_units[unit].blockage_function)
688 tick += (function_units[unit].blockage_function
689 (insn, unit_last_insn[instance])
690 - function_units[unit].max_blockage);
691 else
692 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
693 - function_units[unit].max_blockage);
694 }
695 if (tick - clock > cost)
696 cost = tick - clock;
697 }
698 return cost;
699 }
700
701 /* Record INSN as having begun execution on the units encoded by UNIT at
702 time CLOCK. */
703
704 __inline static void
705 schedule_unit (unit, insn, clock)
706 int unit, clock;
707 rtx insn;
708 {
709 int i;
710
711 if (unit >= 0)
712 {
713 int instance = unit;
714 #if MAX_MULTIPLICITY > 1
715 /* Find the first free instance of the function unit and use that
716 one. We assume that one is free. */
717 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
718 {
719 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
720 break;
721 instance += FUNCTION_UNITS_SIZE;
722 }
723 #endif
724 unit_last_insn[instance] = insn;
725 unit_tick[instance] = (clock + function_units[unit].max_blockage);
726 }
727 else
728 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
729 if ((unit & 1) != 0)
730 schedule_unit (i, insn, clock);
731 }
732
733 /* Return the actual hazard cost of executing INSN on the units encoded by
734 UNIT at time CLOCK if the previous actual hazard cost was COST. */
735
736 __inline static int
737 actual_hazard (unit, insn, clock, cost)
738 int unit, clock, cost;
739 rtx insn;
740 {
741 int i;
742
743 if (unit >= 0)
744 {
745 /* Find the instance of the function unit with the minimum hazard. */
746 int instance = unit;
747 int best_cost = actual_hazard_this_instance (unit, instance, insn,
748 clock, cost);
749 #if MAX_MULTIPLICITY > 1
750 int this_cost;
751
752 if (best_cost > cost)
753 {
754 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
755 {
756 instance += FUNCTION_UNITS_SIZE;
757 this_cost = actual_hazard_this_instance (unit, instance, insn,
758 clock, cost);
759 if (this_cost < best_cost)
760 {
761 best_cost = this_cost;
762 if (this_cost <= cost)
763 break;
764 }
765 }
766 }
767 #endif
768 cost = MAX (cost, best_cost);
769 }
770 else
771 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
772 if ((unit & 1) != 0)
773 cost = actual_hazard (i, insn, clock, cost);
774
775 return cost;
776 }
777
778 /* Return the potential hazard cost of executing an instruction on the
779 units encoded by UNIT if the previous potential hazard cost was COST.
780 An insn with a large blockage time is chosen in preference to one
781 with a smaller time; an insn that uses a unit that is more likely
782 to be used is chosen in preference to one with a unit that is less
783 used. We are trying to minimize a subsequent actual hazard. */
784
785 __inline static int
786 potential_hazard (unit, insn, cost)
787 int unit, cost;
788 rtx insn;
789 {
790 int i, ncost;
791 unsigned int minb, maxb;
792
793 if (unit >= 0)
794 {
795 minb = maxb = function_units[unit].max_blockage;
796 if (maxb > 1)
797 {
798 if (function_units[unit].blockage_range_function)
799 {
800 maxb = minb = blockage_range (unit, insn);
801 maxb = MAX_BLOCKAGE_COST (maxb);
802 minb = MIN_BLOCKAGE_COST (minb);
803 }
804
805 if (maxb > 1)
806 {
807 /* Make the number of instructions left dominate. Make the
808 minimum delay dominate the maximum delay. If all these
809 are the same, use the unit number to add an arbitrary
810 ordering. Other terms can be added. */
811 ncost = minb * 0x40 + maxb;
812 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
813 if (ncost > cost)
814 cost = ncost;
815 }
816 }
817 }
818 else
819 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
820 if ((unit & 1) != 0)
821 cost = potential_hazard (i, insn, cost);
822
823 return cost;
824 }
825
826 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
827 This is the number of virtual cycles taken between instruction issue and
828 instruction results. */
829
830 __inline static int
831 insn_cost (insn, link, used)
832 rtx insn, link, used;
833 {
834 register int cost = INSN_COST (insn);
835
836 if (cost == 0)
837 {
838 recog_memoized (insn);
839
840 /* A USE insn, or something else we don't need to understand.
841 We can't pass these directly to result_ready_cost because it will
842 trigger a fatal error for unrecognizable insns. */
843 if (INSN_CODE (insn) < 0)
844 {
845 INSN_COST (insn) = 1;
846 return 1;
847 }
848 else
849 {
850 cost = result_ready_cost (insn);
851
852 if (cost < 1)
853 cost = 1;
854
855 INSN_COST (insn) = cost;
856 }
857 }
858
859 /* A USE insn should never require the value used to be computed. This
860 allows the computation of a function's result and parameter values to
861 overlap the return and call. */
862 recog_memoized (used);
863 if (INSN_CODE (used) < 0)
864 LINK_COST_FREE (link) = 1;
865
866 /* If some dependencies vary the cost, compute the adjustment. Most
867 commonly, the adjustment is complete: either the cost is ignored
868 (in the case of an output- or anti-dependence), or the cost is
869 unchanged. These values are cached in the link as LINK_COST_FREE
870 and LINK_COST_ZERO. */
871
872 if (LINK_COST_FREE (link))
873 cost = 1;
874 #ifdef ADJUST_COST
875 else if (! LINK_COST_ZERO (link))
876 {
877 int ncost = cost;
878
879 ADJUST_COST (used, link, insn, ncost);
880 if (ncost <= 1)
881 LINK_COST_FREE (link) = ncost = 1;
882 if (cost == ncost)
883 LINK_COST_ZERO (link) = 1;
884 cost = ncost;
885 }
886 #endif
887 return cost;
888 }
889
890 /* Compute the priority number for INSN. */
891
892 static int
893 priority (insn)
894 rtx insn;
895 {
896 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
897 {
898 int prev_priority;
899 int max_priority;
900 int this_priority = INSN_PRIORITY (insn);
901 rtx prev;
902
903 if (this_priority > 0)
904 return this_priority;
905
906 max_priority = 1;
907
908 /* Nonzero if these insns must be scheduled together. */
909 if (SCHED_GROUP_P (insn))
910 {
911 prev = insn;
912 while (SCHED_GROUP_P (prev))
913 {
914 prev = PREV_INSN (prev);
915 INSN_REF_COUNT (prev) += 1;
916 }
917 }
918
919 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
920 {
921 rtx x = XEXP (prev, 0);
922
923 /* If this was a duplicate of a dependence we already deleted,
924 ignore it. */
925 if (RTX_INTEGRATED_P (prev))
926 continue;
927
928 /* A dependence pointing to a note or deleted insn is always
929 obsolete, because sched_analyze_insn will have created any
930 necessary new dependences which replace it. Notes and deleted
931 insns can be created when instructions are deleted by insn
932 splitting, or by register allocation. */
933 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
934 {
935 remove_dependence (insn, x);
936 continue;
937 }
938
939 /* Clear the link cost adjustment bits. */
940 LINK_COST_FREE (prev) = 0;
941 #ifdef ADJUST_COST
942 LINK_COST_ZERO (prev) = 0;
943 #endif
944
945 /* This priority calculation was chosen because it results in the
946 least instruction movement, and does not hurt the performance
947 of the resulting code compared to the old algorithm.
948 This makes the sched algorithm more stable, which results
949 in better code, because there is less register pressure,
950 cross jumping is more likely to work, and debugging is easier.
951
952 When all instructions have a latency of 1, there is no need to
953 move any instructions. Subtracting one here ensures that in such
954 cases all instructions will end up with a priority of one, and
955 hence no scheduling will be done.
956
957 The original code did not subtract the one, and added the
958 insn_cost of the current instruction to its priority (e.g.
959 move the insn_cost call down to the end). */
960
961 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
962
963 if (prev_priority > max_priority)
964 max_priority = prev_priority;
965 INSN_REF_COUNT (x) += 1;
966 }
967
968 prepare_unit (insn_unit (insn));
969 INSN_PRIORITY (insn) = max_priority;
970 return INSN_PRIORITY (insn);
971 }
972 return 0;
973 }
974 \f
975 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
976 them to the unused_*_list variables, so that they can be reused. */
977
978 static void
979 free_pending_lists ()
980 {
981 register rtx link, prev_link;
982
983 if (pending_read_insns)
984 {
985 prev_link = pending_read_insns;
986 link = XEXP (prev_link, 1);
987
988 while (link)
989 {
990 prev_link = link;
991 link = XEXP (link, 1);
992 }
993
994 XEXP (prev_link, 1) = unused_insn_list;
995 unused_insn_list = pending_read_insns;
996 pending_read_insns = 0;
997 }
998
999 if (pending_write_insns)
1000 {
1001 prev_link = pending_write_insns;
1002 link = XEXP (prev_link, 1);
1003
1004 while (link)
1005 {
1006 prev_link = link;
1007 link = XEXP (link, 1);
1008 }
1009
1010 XEXP (prev_link, 1) = unused_insn_list;
1011 unused_insn_list = pending_write_insns;
1012 pending_write_insns = 0;
1013 }
1014
1015 if (pending_read_mems)
1016 {
1017 prev_link = pending_read_mems;
1018 link = XEXP (prev_link, 1);
1019
1020 while (link)
1021 {
1022 prev_link = link;
1023 link = XEXP (link, 1);
1024 }
1025
1026 XEXP (prev_link, 1) = unused_expr_list;
1027 unused_expr_list = pending_read_mems;
1028 pending_read_mems = 0;
1029 }
1030
1031 if (pending_write_mems)
1032 {
1033 prev_link = pending_write_mems;
1034 link = XEXP (prev_link, 1);
1035
1036 while (link)
1037 {
1038 prev_link = link;
1039 link = XEXP (link, 1);
1040 }
1041
1042 XEXP (prev_link, 1) = unused_expr_list;
1043 unused_expr_list = pending_write_mems;
1044 pending_write_mems = 0;
1045 }
1046 }
1047
1048 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1049 The MEM is a memory reference contained within INSN, which we are saving
1050 so that we can do memory aliasing on it. */
1051
1052 static void
1053 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1054 rtx *insn_list, *mem_list, insn, mem;
1055 {
1056 register rtx link;
1057
1058 if (unused_insn_list)
1059 {
1060 link = unused_insn_list;
1061 unused_insn_list = XEXP (link, 1);
1062 }
1063 else
1064 link = rtx_alloc (INSN_LIST);
1065 XEXP (link, 0) = insn;
1066 XEXP (link, 1) = *insn_list;
1067 *insn_list = link;
1068
1069 if (unused_expr_list)
1070 {
1071 link = unused_expr_list;
1072 unused_expr_list = XEXP (link, 1);
1073 }
1074 else
1075 link = rtx_alloc (EXPR_LIST);
1076 XEXP (link, 0) = mem;
1077 XEXP (link, 1) = *mem_list;
1078 *mem_list = link;
1079
1080 pending_lists_length++;
1081 }
1082 \f
1083 /* Make a dependency between every memory reference on the pending lists
1084 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
1085 the read list. */
1086
1087 static void
1088 flush_pending_lists (insn, only_write)
1089 rtx insn;
1090 int only_write;
1091 {
1092 rtx link;
1093
1094 while (pending_read_insns && ! only_write)
1095 {
1096 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1097
1098 link = pending_read_insns;
1099 pending_read_insns = XEXP (pending_read_insns, 1);
1100 XEXP (link, 1) = unused_insn_list;
1101 unused_insn_list = link;
1102
1103 link = pending_read_mems;
1104 pending_read_mems = XEXP (pending_read_mems, 1);
1105 XEXP (link, 1) = unused_expr_list;
1106 unused_expr_list = link;
1107 }
1108 while (pending_write_insns)
1109 {
1110 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1111
1112 link = pending_write_insns;
1113 pending_write_insns = XEXP (pending_write_insns, 1);
1114 XEXP (link, 1) = unused_insn_list;
1115 unused_insn_list = link;
1116
1117 link = pending_write_mems;
1118 pending_write_mems = XEXP (pending_write_mems, 1);
1119 XEXP (link, 1) = unused_expr_list;
1120 unused_expr_list = link;
1121 }
1122 pending_lists_length = 0;
1123
1124 if (last_pending_memory_flush)
1125 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1126
1127 last_pending_memory_flush = insn;
1128 }
1129
1130 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1131 by the write to the destination of X, and reads of everything mentioned. */
1132
1133 static void
1134 sched_analyze_1 (x, insn)
1135 rtx x;
1136 rtx insn;
1137 {
1138 register int regno;
1139 register rtx dest = SET_DEST (x);
1140
1141 if (dest == 0)
1142 return;
1143
1144 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1145 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1146 {
1147 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1148 {
1149 /* The second and third arguments are values read by this insn. */
1150 sched_analyze_2 (XEXP (dest, 1), insn);
1151 sched_analyze_2 (XEXP (dest, 2), insn);
1152 }
1153 dest = SUBREG_REG (dest);
1154 }
1155
1156 if (GET_CODE (dest) == REG)
1157 {
1158 register int i;
1159
1160 regno = REGNO (dest);
1161
1162 /* A hard reg in a wide mode may really be multiple registers.
1163 If so, mark all of them just like the first. */
1164 if (regno < FIRST_PSEUDO_REGISTER)
1165 {
1166 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1167 while (--i >= 0)
1168 {
1169 rtx u;
1170
1171 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1172 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1173 reg_last_uses[regno + i] = 0;
1174 if (reg_last_sets[regno + i])
1175 add_dependence (insn, reg_last_sets[regno + i],
1176 REG_DEP_OUTPUT);
1177 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1178 if ((call_used_regs[i] || global_regs[i])
1179 && last_function_call)
1180 /* Function calls clobber all call_used regs. */
1181 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1182 }
1183 }
1184 else
1185 {
1186 rtx u;
1187
1188 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1189 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1190 reg_last_uses[regno] = 0;
1191 if (reg_last_sets[regno])
1192 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1193 SET_REGNO_REG_SET (reg_pending_sets, regno);
1194
1195 /* Pseudos that are REG_EQUIV to something may be replaced
1196 by that during reloading. We need only add dependencies for
1197 the address in the REG_EQUIV note. */
1198 if (! reload_completed
1199 && reg_known_equiv_p[regno]
1200 && GET_CODE (reg_known_value[regno]) == MEM)
1201 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1202
1203 /* Don't let it cross a call after scheduling if it doesn't
1204 already cross one. */
1205 if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
1206 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1207 }
1208 }
1209 else if (GET_CODE (dest) == MEM)
1210 {
1211 /* Writing memory. */
1212
1213 if (pending_lists_length > 32)
1214 {
1215 /* Flush all pending reads and writes to prevent the pending lists
1216 from getting any larger. Insn scheduling runs too slowly when
1217 these lists get long. The number 32 was chosen because it
1218 seems like a reasonable number. When compiling GCC with itself,
1219 this flush occurs 8 times for sparc, and 10 times for m88k using
1220 the number 32. */
1221 flush_pending_lists (insn, 0);
1222 }
1223 else
1224 {
1225 rtx pending, pending_mem;
1226
1227 pending = pending_read_insns;
1228 pending_mem = pending_read_mems;
1229 while (pending)
1230 {
1231 /* If a dependency already exists, don't create a new one. */
1232 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1233 if (anti_dependence (XEXP (pending_mem, 0), dest))
1234 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1235
1236 pending = XEXP (pending, 1);
1237 pending_mem = XEXP (pending_mem, 1);
1238 }
1239
1240 pending = pending_write_insns;
1241 pending_mem = pending_write_mems;
1242 while (pending)
1243 {
1244 /* If a dependency already exists, don't create a new one. */
1245 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1246 if (output_dependence (XEXP (pending_mem, 0), dest))
1247 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1248
1249 pending = XEXP (pending, 1);
1250 pending_mem = XEXP (pending_mem, 1);
1251 }
1252
1253 if (last_pending_memory_flush)
1254 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1255
1256 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1257 insn, dest);
1258 }
1259 sched_analyze_2 (XEXP (dest, 0), insn);
1260 }
1261
1262 /* Analyze reads. */
1263 if (GET_CODE (x) == SET)
1264 sched_analyze_2 (SET_SRC (x), insn);
1265 }
1266
1267 /* Analyze the uses of memory and registers in rtx X in INSN. */
1268
1269 static void
1270 sched_analyze_2 (x, insn)
1271 rtx x;
1272 rtx insn;
1273 {
1274 register int i;
1275 register int j;
1276 register enum rtx_code code;
1277 register char *fmt;
1278
1279 if (x == 0)
1280 return;
1281
1282 code = GET_CODE (x);
1283
1284 switch (code)
1285 {
1286 case CONST_INT:
1287 case CONST_DOUBLE:
1288 case SYMBOL_REF:
1289 case CONST:
1290 case LABEL_REF:
1291 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1292 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1293 this does not mean that this insn is using cc0. */
1294 return;
1295
1296 #ifdef HAVE_cc0
1297 case CC0:
1298 {
1299 rtx link, prev;
1300
1301 /* User of CC0 depends on immediately preceding insn. */
1302 SCHED_GROUP_P (insn) = 1;
1303
1304 /* There may be a note before this insn now, but all notes will
1305 be removed before we actually try to schedule the insns, so
1306 it won't cause a problem later. We must avoid it here though. */
1307 prev = prev_nonnote_insn (insn);
1308
1309 /* Make a copy of all dependencies on the immediately previous insn,
1310 and add to this insn. This is so that all the dependencies will
1311 apply to the group. Remove an explicit dependence on this insn
1312 as SCHED_GROUP_P now represents it. */
1313
1314 if (find_insn_list (prev, LOG_LINKS (insn)))
1315 remove_dependence (insn, prev);
1316
1317 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1318 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1319
1320 return;
1321 }
1322 #endif
1323
1324 case REG:
1325 {
1326 int regno = REGNO (x);
1327 if (regno < FIRST_PSEUDO_REGISTER)
1328 {
1329 int i;
1330
1331 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1332 while (--i >= 0)
1333 {
1334 reg_last_uses[regno + i]
1335 = gen_rtx_INSN_LIST (VOIDmode,
1336 insn, reg_last_uses[regno + i]);
1337 if (reg_last_sets[regno + i])
1338 add_dependence (insn, reg_last_sets[regno + i], 0);
1339 if ((call_used_regs[regno + i] || global_regs[regno + i])
1340 && last_function_call)
1341 /* Function calls clobber all call_used regs. */
1342 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1343 }
1344 }
1345 else
1346 {
1347 reg_last_uses[regno]
1348 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_last_uses[regno]);
1349 if (reg_last_sets[regno])
1350 add_dependence (insn, reg_last_sets[regno], 0);
1351
1352 /* Pseudos that are REG_EQUIV to something may be replaced
1353 by that during reloading. We need only add dependencies for
1354 the address in the REG_EQUIV note. */
1355 if (! reload_completed
1356 && reg_known_equiv_p[regno]
1357 && GET_CODE (reg_known_value[regno]) == MEM)
1358 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1359
1360 /* If the register does not already cross any calls, then add this
1361 insn to the sched_before_next_call list so that it will still
1362 not cross calls after scheduling. */
1363 if (REG_N_CALLS_CROSSED (regno) == 0)
1364 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1365 }
1366 return;
1367 }
1368
1369 case MEM:
1370 {
1371 /* Reading memory. */
1372
1373 rtx pending, pending_mem;
1374
1375 pending = pending_read_insns;
1376 pending_mem = pending_read_mems;
1377 while (pending)
1378 {
1379 /* If a dependency already exists, don't create a new one. */
1380 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1381 if (read_dependence (XEXP (pending_mem, 0), x))
1382 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1383
1384 pending = XEXP (pending, 1);
1385 pending_mem = XEXP (pending_mem, 1);
1386 }
1387
1388 pending = pending_write_insns;
1389 pending_mem = pending_write_mems;
1390 while (pending)
1391 {
1392 /* If a dependency already exists, don't create a new one. */
1393 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1394 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1395 x, rtx_varies_p))
1396 add_dependence (insn, XEXP (pending, 0), 0);
1397
1398 pending = XEXP (pending, 1);
1399 pending_mem = XEXP (pending_mem, 1);
1400 }
1401 if (last_pending_memory_flush)
1402 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1403
1404 /* Always add these dependencies to pending_reads, since
1405 this insn may be followed by a write. */
1406 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1407 insn, x);
1408
1409 /* Take advantage of tail recursion here. */
1410 sched_analyze_2 (XEXP (x, 0), insn);
1411 return;
1412 }
1413
1414 case ASM_OPERANDS:
1415 case ASM_INPUT:
1416 case UNSPEC_VOLATILE:
1417 case TRAP_IF:
1418 {
1419 rtx u;
1420
1421 /* Traditional and volatile asm instructions must be considered to use
1422 and clobber all hard registers, all pseudo-registers and all of
1423 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1424
1425 Consider for instance a volatile asm that changes the fpu rounding
1426 mode. An insn should not be moved across this even if it only uses
1427 pseudo-regs because it might give an incorrectly rounded result. */
1428 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1429 {
1430 int max_reg = max_reg_num ();
1431 for (i = 0; i < max_reg; i++)
1432 {
1433 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1434 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1435 reg_last_uses[i] = 0;
1436 if (reg_last_sets[i])
1437 add_dependence (insn, reg_last_sets[i], 0);
1438 }
1439 reg_pending_sets_all = 1;
1440
1441 flush_pending_lists (insn, 0);
1442 }
1443
1444 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1445 We can not just fall through here since then we would be confused
1446 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1447 traditional asms unlike their normal usage. */
1448
1449 if (code == ASM_OPERANDS)
1450 {
1451 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1452 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1453 return;
1454 }
1455 break;
1456 }
1457
1458 case PRE_DEC:
1459 case POST_DEC:
1460 case PRE_INC:
1461 case POST_INC:
1462 /* These both read and modify the result. We must handle them as writes
1463 to get proper dependencies for following instructions. We must handle
1464 them as reads to get proper dependencies from this to previous
1465 instructions. Thus we need to pass them to both sched_analyze_1
1466 and sched_analyze_2. We must call sched_analyze_2 first in order
1467 to get the proper antecedent for the read. */
1468 sched_analyze_2 (XEXP (x, 0), insn);
1469 sched_analyze_1 (x, insn);
1470 return;
1471
1472 default:
1473 break;
1474 }
1475
1476 /* Other cases: walk the insn. */
1477 fmt = GET_RTX_FORMAT (code);
1478 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1479 {
1480 if (fmt[i] == 'e')
1481 sched_analyze_2 (XEXP (x, i), insn);
1482 else if (fmt[i] == 'E')
1483 for (j = 0; j < XVECLEN (x, i); j++)
1484 sched_analyze_2 (XVECEXP (x, i, j), insn);
1485 }
1486 }
1487
1488 /* Analyze an INSN with pattern X to find all dependencies. */
1489
1490 static void
1491 sched_analyze_insn (x, insn, loop_notes)
1492 rtx x, insn;
1493 rtx loop_notes;
1494 {
1495 register RTX_CODE code = GET_CODE (x);
1496 rtx link;
1497 int maxreg = max_reg_num ();
1498 int i;
1499
1500 if (code == SET || code == CLOBBER)
1501 sched_analyze_1 (x, insn);
1502 else if (code == PARALLEL)
1503 {
1504 register int i;
1505 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1506 {
1507 code = GET_CODE (XVECEXP (x, 0, i));
1508 if (code == SET || code == CLOBBER)
1509 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1510 else
1511 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1512 }
1513 }
1514 else
1515 sched_analyze_2 (x, insn);
1516
1517 /* Mark registers CLOBBERED or used by called function. */
1518 if (GET_CODE (insn) == CALL_INSN)
1519 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1520 {
1521 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1522 sched_analyze_1 (XEXP (link, 0), insn);
1523 else
1524 sched_analyze_2 (XEXP (link, 0), insn);
1525 }
1526
1527 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
1528 we must be sure that no instructions are scheduled across it.
1529 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1530 become incorrect. */
1531
1532 if (loop_notes)
1533 {
1534 int max_reg = max_reg_num ();
1535 rtx link;
1536
1537 for (i = 0; i < max_reg; i++)
1538 {
1539 rtx u;
1540 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1541 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1542 reg_last_uses[i] = 0;
1543 if (reg_last_sets[i])
1544 add_dependence (insn, reg_last_sets[i], 0);
1545 }
1546 reg_pending_sets_all = 1;
1547
1548 flush_pending_lists (insn, 0);
1549
1550 link = loop_notes;
1551 while (XEXP (link, 1))
1552 link = XEXP (link, 1);
1553 XEXP (link, 1) = REG_NOTES (insn);
1554 REG_NOTES (insn) = loop_notes;
1555 }
1556
1557 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1558 {
1559 reg_last_sets[i] = insn;
1560 });
1561 CLEAR_REG_SET (reg_pending_sets);
1562
1563 if (reg_pending_sets_all)
1564 {
1565 for (i = 0; i < maxreg; i++)
1566 reg_last_sets[i] = insn;
1567 reg_pending_sets_all = 0;
1568 }
1569
1570 /* Handle function calls and function returns created by the epilogue
1571 threading code. */
1572 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1573 {
1574 rtx dep_insn;
1575 rtx prev_dep_insn;
1576
1577 /* When scheduling instructions, we make sure calls don't lose their
1578 accompanying USE insns by depending them one on another in order.
1579
1580 Also, we must do the same thing for returns created by the epilogue
1581 threading code. Note this code works only in this special case,
1582 because other passes make no guarantee that they will never emit
1583 an instruction between a USE and a RETURN. There is such a guarantee
1584 for USE instructions immediately before a call. */
1585
1586 prev_dep_insn = insn;
1587 dep_insn = PREV_INSN (insn);
1588 while (GET_CODE (dep_insn) == INSN
1589 && GET_CODE (PATTERN (dep_insn)) == USE
1590 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
1591 {
1592 SCHED_GROUP_P (prev_dep_insn) = 1;
1593
1594 /* Make a copy of all dependencies on dep_insn, and add to insn.
1595 This is so that all of the dependencies will apply to the
1596 group. */
1597
1598 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1599 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1600
1601 prev_dep_insn = dep_insn;
1602 dep_insn = PREV_INSN (dep_insn);
1603 }
1604 }
1605 }
1606
1607 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1608 for every dependency. */
1609
1610 static int
1611 sched_analyze (head, tail)
1612 rtx head, tail;
1613 {
1614 register rtx insn;
1615 register int n_insns = 0;
1616 register rtx u;
1617 register int luid = 0;
1618 rtx loop_notes = 0;
1619
1620 for (insn = head; ; insn = NEXT_INSN (insn))
1621 {
1622 INSN_LUID (insn) = luid++;
1623
1624 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1625 {
1626 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1627 loop_notes = 0;
1628 n_insns += 1;
1629 }
1630 else if (GET_CODE (insn) == CALL_INSN)
1631 {
1632 rtx x;
1633 register int i;
1634
1635 /* Any instruction using a hard register which may get clobbered
1636 by a call needs to be marked as dependent on this call.
1637 This prevents a use of a hard return reg from being moved
1638 past a void call (i.e. it does not explicitly set the hard
1639 return reg). */
1640
1641 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1642 all registers, not just hard registers, may be clobbered by this
1643 call. */
1644
1645 /* Insn, being a CALL_INSN, magically depends on
1646 `last_function_call' already. */
1647
1648 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1649 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1650 {
1651 int max_reg = max_reg_num ();
1652 for (i = 0; i < max_reg; i++)
1653 {
1654 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1655 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1656 reg_last_uses[i] = 0;
1657 if (reg_last_sets[i])
1658 add_dependence (insn, reg_last_sets[i], 0);
1659 }
1660 reg_pending_sets_all = 1;
1661
1662 /* Add a pair of fake REG_NOTEs which we will later
1663 convert back into a NOTE_INSN_SETJMP note. See
1664 reemit_notes for why we use a pair of NOTEs. */
1665
1666 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1667 GEN_INT (0),
1668 REG_NOTES (insn));
1669 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1670 GEN_INT (NOTE_INSN_SETJMP),
1671 REG_NOTES (insn));
1672 }
1673 else
1674 {
1675 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1676 if (call_used_regs[i] || global_regs[i])
1677 {
1678 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1679 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1680 reg_last_uses[i] = 0;
1681 if (reg_last_sets[i])
1682 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1683 SET_REGNO_REG_SET (reg_pending_sets, i);
1684 }
1685 }
1686
1687 /* For each insn which shouldn't cross a call, add a dependence
1688 between that insn and this call insn. */
1689 x = LOG_LINKS (sched_before_next_call);
1690 while (x)
1691 {
1692 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1693 x = XEXP (x, 1);
1694 }
1695 LOG_LINKS (sched_before_next_call) = 0;
1696
1697 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1698 loop_notes = 0;
1699
1700 /* In the absence of interprocedural alias analysis, we must flush
1701 all pending reads and writes, and start new dependencies starting
1702 from here. But only flush writes for constant calls (which may
1703 be passed a pointer to something we haven't written yet). */
1704 flush_pending_lists (insn, CONST_CALL_P (insn));
1705
1706 /* Depend this function call (actually, the user of this
1707 function call) on all hard register clobberage. */
1708 last_function_call = insn;
1709 n_insns += 1;
1710 }
1711
1712 /* See comments on reemit_notes as to why we do this. */
1713 else if (GET_CODE (insn) == NOTE
1714 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1715 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1716 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1717 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1718 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
1719 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END
1720 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1721 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1722 {
1723 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1724 GEN_INT (NOTE_BLOCK_NUMBER (insn)),
1725 loop_notes);
1726 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1727 GEN_INT (NOTE_LINE_NUMBER (insn)),
1728 loop_notes);
1729 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1730 }
1731
1732 if (insn == tail)
1733 return n_insns;
1734 }
1735
1736 abort ();
1737 }
1738 \f
1739 /* Called when we see a set of a register. If death is true, then we are
1740 scanning backwards. Mark that register as unborn. If nobody says
1741 otherwise, that is how things will remain. If death is false, then we
1742 are scanning forwards. Mark that register as being born. */
1743
1744 static void
1745 sched_note_set (x, death)
1746 rtx x;
1747 int death;
1748 {
1749 register int regno;
1750 register rtx reg = SET_DEST (x);
1751 int subreg_p = 0;
1752
1753 if (reg == 0)
1754 return;
1755
1756 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1757 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1758 {
1759 /* Must treat modification of just one hardware register of a multi-reg
1760 value or just a byte field of a register exactly the same way that
1761 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
1762 does not kill the entire register. */
1763 if (GET_CODE (reg) != SUBREG
1764 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
1765 subreg_p = 1;
1766
1767 reg = SUBREG_REG (reg);
1768 }
1769
1770 if (GET_CODE (reg) != REG)
1771 return;
1772
1773 /* Global registers are always live, so the code below does not apply
1774 to them. */
1775
1776 regno = REGNO (reg);
1777 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1778 {
1779 if (death)
1780 {
1781 /* If we only set part of the register, then this set does not
1782 kill it. */
1783 if (subreg_p)
1784 return;
1785
1786 /* Try killing this register. */
1787 if (regno < FIRST_PSEUDO_REGISTER)
1788 {
1789 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1790 while (--j >= 0)
1791 {
1792 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
1793 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
1794 }
1795 }
1796 else
1797 {
1798 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
1799 SET_REGNO_REG_SET (bb_dead_regs, regno);
1800 }
1801 }
1802 else
1803 {
1804 /* Make the register live again. */
1805 if (regno < FIRST_PSEUDO_REGISTER)
1806 {
1807 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1808 while (--j >= 0)
1809 {
1810 SET_REGNO_REG_SET (bb_live_regs, regno + j);
1811 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
1812 }
1813 }
1814 else
1815 {
1816 SET_REGNO_REG_SET (bb_live_regs, regno);
1817 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
1818 }
1819 }
1820 }
1821 }
1822 \f
1823 /* Macros and functions for keeping the priority queue sorted, and
1824 dealing with queueing and dequeueing of instructions. */
1825
1826 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1827 do { if ((NEW_READY) - (OLD_READY) == 1) \
1828 swap_sort (READY, NEW_READY); \
1829 else if ((NEW_READY) - (OLD_READY) > 1) \
1830 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1831 while (0)
1832
1833 /* Returns a positive value if y is preferred; returns a negative value if
1834 x is preferred. Should never return 0, since that will make the sort
1835 unstable. */
1836
1837 static int
1838 rank_for_schedule (x, y)
1839 const GENERIC_PTR x;
1840 const GENERIC_PTR y;
1841 {
1842 rtx tmp = *(rtx *)y;
1843 rtx tmp2 = *(rtx *)x;
1844 rtx link;
1845 int tmp_class, tmp2_class;
1846 int value;
1847
1848 /* Choose the instruction with the highest priority, if different. */
1849 if ((value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)))
1850 return value;
1851
1852 if (last_scheduled_insn)
1853 {
1854 /* Classify the instructions into three classes:
1855 1) Data dependent on last schedule insn.
1856 2) Anti/Output dependent on last scheduled insn.
1857 3) Independent of last scheduled insn, or has latency of one.
1858 Choose the insn from the highest numbered class if different. */
1859 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1860 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
1861 tmp_class = 3;
1862 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1863 tmp_class = 1;
1864 else
1865 tmp_class = 2;
1866
1867 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1868 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
1869 tmp2_class = 3;
1870 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1871 tmp2_class = 1;
1872 else
1873 tmp2_class = 2;
1874
1875 if ((value = tmp_class - tmp2_class))
1876 return value;
1877 }
1878
1879 /* If insns are equally good, sort by INSN_LUID (original insn order),
1880 so that we make the sort stable. This minimizes instruction movement,
1881 thus minimizing sched's effect on debugging and cross-jumping. */
1882 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1883 }
1884
1885 /* Resort the array A in which only element at index N may be out of order. */
1886
1887 __inline static void
1888 swap_sort (a, n)
1889 rtx *a;
1890 int n;
1891 {
1892 rtx insn = a[n-1];
1893 int i = n-2;
1894
1895 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1896 {
1897 a[i+1] = a[i];
1898 i -= 1;
1899 }
1900 a[i+1] = insn;
1901 }
1902
1903 static int max_priority;
1904
1905 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1906 before the currently executing insn. */
1907
1908 __inline static void
1909 queue_insn (insn, n_cycles)
1910 rtx insn;
1911 int n_cycles;
1912 {
1913 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1914 NEXT_INSN (insn) = insn_queue[next_q];
1915 insn_queue[next_q] = insn;
1916 q_size += 1;
1917 }
1918
1919 /* Return nonzero if PAT is the pattern of an insn which makes a
1920 register live. */
1921
1922 __inline static int
1923 birthing_insn_p (pat)
1924 rtx pat;
1925 {
1926 int j;
1927
1928 if (reload_completed == 1)
1929 return 0;
1930
1931 if (GET_CODE (pat) == SET
1932 && GET_CODE (SET_DEST (pat)) == REG)
1933 {
1934 rtx dest = SET_DEST (pat);
1935 int i = REGNO (dest);
1936
1937 /* It would be more accurate to use refers_to_regno_p or
1938 reg_mentioned_p to determine when the dest is not live before this
1939 insn. */
1940
1941 if (REGNO_REG_SET_P (bb_live_regs, i))
1942 return (REG_N_SETS (i) == 1);
1943
1944 return 0;
1945 }
1946 if (GET_CODE (pat) == PARALLEL)
1947 {
1948 for (j = 0; j < XVECLEN (pat, 0); j++)
1949 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1950 return 1;
1951 }
1952 return 0;
1953 }
1954
1955 /* PREV is an insn that is ready to execute. Adjust its priority if that
1956 will help shorten register lifetimes. */
1957
1958 __inline static void
1959 adjust_priority (prev)
1960 rtx prev;
1961 {
1962 /* Trying to shorten register lives after reload has completed
1963 is useless and wrong. It gives inaccurate schedules. */
1964 if (reload_completed == 0)
1965 {
1966 rtx note;
1967 int n_deaths = 0;
1968
1969 /* ??? This code has no effect, because REG_DEAD notes are removed
1970 before we ever get here. */
1971 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1972 if (REG_NOTE_KIND (note) == REG_DEAD)
1973 n_deaths += 1;
1974
1975 /* Defer scheduling insns which kill registers, since that
1976 shortens register lives. Prefer scheduling insns which
1977 make registers live for the same reason. */
1978 switch (n_deaths)
1979 {
1980 default:
1981 INSN_PRIORITY (prev) >>= 3;
1982 break;
1983 case 3:
1984 INSN_PRIORITY (prev) >>= 2;
1985 break;
1986 case 2:
1987 case 1:
1988 INSN_PRIORITY (prev) >>= 1;
1989 break;
1990 case 0:
1991 if (birthing_insn_p (PATTERN (prev)))
1992 {
1993 int max = max_priority;
1994
1995 if (max > INSN_PRIORITY (prev))
1996 INSN_PRIORITY (prev) = max;
1997 }
1998 break;
1999 }
2000 #ifdef ADJUST_PRIORITY
2001 ADJUST_PRIORITY (prev);
2002 #endif
2003 }
2004 }
2005
2006 /* INSN is the "currently executing insn". Launch each insn which was
2007 waiting on INSN (in the backwards dataflow sense). READY is a
2008 vector of insns which are ready to fire. N_READY is the number of
2009 elements in READY. CLOCK is the current virtual cycle. */
2010
2011 static int
2012 schedule_insn (insn, ready, n_ready, clock)
2013 rtx insn;
2014 rtx *ready;
2015 int n_ready;
2016 int clock;
2017 {
2018 rtx link;
2019 int new_ready = n_ready;
2020
2021 if (MAX_BLOCKAGE > 1)
2022 schedule_unit (insn_unit (insn), insn, clock);
2023
2024 if (LOG_LINKS (insn) == 0)
2025 return n_ready;
2026
2027 /* This is used by the function adjust_priority above. */
2028 if (n_ready > 0)
2029 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2030 else
2031 max_priority = INSN_PRIORITY (insn);
2032
2033 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2034 {
2035 rtx prev = XEXP (link, 0);
2036 int cost = insn_cost (prev, link, insn);
2037
2038 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2039 {
2040 /* We satisfied one requirement to fire PREV. Record the earliest
2041 time when PREV can fire. No need to do this if the cost is 1,
2042 because PREV can fire no sooner than the next cycle. */
2043 if (cost > 1)
2044 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2045 }
2046 else
2047 {
2048 /* We satisfied the last requirement to fire PREV. Ensure that all
2049 timing requirements are satisfied. */
2050 if (INSN_TICK (prev) - clock > cost)
2051 cost = INSN_TICK (prev) - clock;
2052
2053 /* Adjust the priority of PREV and either put it on the ready
2054 list or queue it. */
2055 adjust_priority (prev);
2056 if (cost <= 1)
2057 ready[new_ready++] = prev;
2058 else
2059 queue_insn (prev, cost);
2060 }
2061 }
2062
2063 return new_ready;
2064 }
2065
2066 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2067 those that are blocked due to function unit hazards and rearrange
2068 the remaining ones to minimize subsequent function unit hazards. */
2069
2070 static int
2071 schedule_select (ready, n_ready, clock, file)
2072 rtx *ready;
2073 int n_ready, clock;
2074 FILE *file;
2075 {
2076 int pri = INSN_PRIORITY (ready[0]);
2077 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2078 rtx insn;
2079
2080 /* Work down the ready list in groups of instructions with the same
2081 priority value. Queue insns in the group that are blocked and
2082 select among those that remain for the one with the largest
2083 potential hazard. */
2084 for (i = 0; i < n_ready; i = j)
2085 {
2086 int opri = pri;
2087 for (j = i + 1; j < n_ready; j++)
2088 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2089 break;
2090
2091 /* Queue insns in the group that are blocked. */
2092 for (k = i, q = 0; k < j; k++)
2093 {
2094 insn = ready[k];
2095 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2096 {
2097 q++;
2098 ready[k] = 0;
2099 queue_insn (insn, cost);
2100 if (file)
2101 fprintf (file, "\n;; blocking insn %d for %d cycles",
2102 INSN_UID (insn), cost);
2103 }
2104 }
2105 new_ready -= q;
2106
2107 /* Check the next group if all insns were queued. */
2108 if (j - i - q == 0)
2109 continue;
2110
2111 /* If more than one remains, select the first one with the largest
2112 potential hazard. */
2113 else if (j - i - q > 1)
2114 {
2115 best_cost = -1;
2116 for (k = i; k < j; k++)
2117 {
2118 if ((insn = ready[k]) == 0)
2119 continue;
2120 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2121 > best_cost)
2122 {
2123 best_cost = cost;
2124 best_insn = k;
2125 }
2126 }
2127 }
2128 /* We have found a suitable insn to schedule. */
2129 break;
2130 }
2131
2132 /* Move the best insn to be front of the ready list. */
2133 if (best_insn != 0)
2134 {
2135 if (file)
2136 {
2137 fprintf (file, ", now");
2138 for (i = 0; i < n_ready; i++)
2139 if (ready[i])
2140 fprintf (file, " %d", INSN_UID (ready[i]));
2141 fprintf (file, "\n;; insn %d has a greater potential hazard",
2142 INSN_UID (ready[best_insn]));
2143 }
2144 for (i = best_insn; i > 0; i--)
2145 {
2146 insn = ready[i-1];
2147 ready[i-1] = ready[i];
2148 ready[i] = insn;
2149 }
2150 }
2151
2152 /* Compact the ready list. */
2153 if (new_ready < n_ready)
2154 for (i = j = 0; i < n_ready; i++)
2155 if (ready[i])
2156 ready[j++] = ready[i];
2157
2158 return new_ready;
2159 }
2160
2161 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2162 dead_notes list. */
2163
2164 static void
2165 create_reg_dead_note (reg, insn)
2166 rtx reg, insn;
2167 {
2168 rtx link;
2169
2170 /* The number of registers killed after scheduling must be the same as the
2171 number of registers killed before scheduling. The number of REG_DEAD
2172 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2173 might become one DImode hard register REG_DEAD note, but the number of
2174 registers killed will be conserved.
2175
2176 We carefully remove REG_DEAD notes from the dead_notes list, so that
2177 there will be none left at the end. If we run out early, then there
2178 is a bug somewhere in flow, combine and/or sched. */
2179
2180 if (dead_notes == 0)
2181 {
2182 #if 1
2183 abort ();
2184 #else
2185 link = rtx_alloc (EXPR_LIST);
2186 PUT_REG_NOTE_KIND (link, REG_DEAD);
2187 #endif
2188 }
2189 else
2190 {
2191 /* Number of regs killed by REG. */
2192 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2193 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2194 /* Number of regs killed by REG_DEAD notes taken off the list. */
2195 int reg_note_regs;
2196
2197 link = dead_notes;
2198 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2199 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2200 GET_MODE (XEXP (link, 0))));
2201 while (reg_note_regs < regs_killed)
2202 {
2203 /* LINK might be zero if we killed more registers after scheduling
2204 than before, and the last hard register we kill is actually
2205 multiple hard regs. */
2206 if (link == NULL_RTX)
2207 abort ();
2208
2209 link = XEXP (link, 1);
2210 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2211 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2212 GET_MODE (XEXP (link, 0))));
2213 }
2214 dead_notes = XEXP (link, 1);
2215
2216 /* If we took too many regs kills off, put the extra ones back. */
2217 while (reg_note_regs > regs_killed)
2218 {
2219 rtx temp_reg, temp_link;
2220
2221 temp_reg = gen_rtx_REG (word_mode, 0);
2222 temp_link = rtx_alloc (EXPR_LIST);
2223 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2224 XEXP (temp_link, 0) = temp_reg;
2225 XEXP (temp_link, 1) = dead_notes;
2226 dead_notes = temp_link;
2227 reg_note_regs--;
2228 }
2229 }
2230
2231 XEXP (link, 0) = reg;
2232 XEXP (link, 1) = REG_NOTES (insn);
2233 REG_NOTES (insn) = link;
2234 }
2235
2236 /* Subroutine on attach_deaths_insn--handles the recursive search
2237 through INSN. If SET_P is true, then x is being modified by the insn. */
2238
2239 static void
2240 attach_deaths (x, insn, set_p)
2241 rtx x;
2242 rtx insn;
2243 int set_p;
2244 {
2245 register int i;
2246 register int j;
2247 register enum rtx_code code;
2248 register char *fmt;
2249
2250 if (x == 0)
2251 return;
2252
2253 code = GET_CODE (x);
2254
2255 switch (code)
2256 {
2257 case CONST_INT:
2258 case CONST_DOUBLE:
2259 case LABEL_REF:
2260 case SYMBOL_REF:
2261 case CONST:
2262 case CODE_LABEL:
2263 case PC:
2264 case CC0:
2265 /* Get rid of the easy cases first. */
2266 return;
2267
2268 case REG:
2269 {
2270 /* If the register dies in this insn, queue that note, and mark
2271 this register as needing to die. */
2272 /* This code is very similar to mark_used_1 (if set_p is false)
2273 and mark_set_1 (if set_p is true) in flow.c. */
2274
2275 register int regno;
2276 int some_needed;
2277 int all_needed;
2278
2279 if (set_p)
2280 return;
2281
2282 regno = REGNO (x);
2283 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
2284 if (regno < FIRST_PSEUDO_REGISTER)
2285 {
2286 int n;
2287
2288 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2289 while (--n > 0)
2290 {
2291 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
2292 some_needed |= needed;
2293 all_needed &= needed;
2294 }
2295 }
2296
2297 /* If it wasn't live before we started, then add a REG_DEAD note.
2298 We must check the previous lifetime info not the current info,
2299 because we may have to execute this code several times, e.g.
2300 once for a clobber (which doesn't add a note) and later
2301 for a use (which does add a note).
2302
2303 Always make the register live. We must do this even if it was
2304 live before, because this may be an insn which sets and uses
2305 the same register, in which case the register has already been
2306 killed, so we must make it live again.
2307
2308 Global registers are always live, and should never have a REG_DEAD
2309 note added for them, so none of the code below applies to them. */
2310
2311 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2312 {
2313 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2314 STACK_POINTER_REGNUM, since these are always considered to be
2315 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2316 if (regno != FRAME_POINTER_REGNUM
2317 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2318 && ! (regno == HARD_FRAME_POINTER_REGNUM)
2319 #endif
2320 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2321 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2322 #endif
2323 && regno != STACK_POINTER_REGNUM)
2324 {
2325 if (! all_needed && ! dead_or_set_p (insn, x))
2326 {
2327 /* Check for the case where the register dying partially
2328 overlaps the register set by this insn. */
2329 if (regno < FIRST_PSEUDO_REGISTER
2330 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
2331 {
2332 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2333 while (--n >= 0)
2334 some_needed |= dead_or_set_regno_p (insn, regno + n);
2335 }
2336
2337 /* If none of the words in X is needed, make a REG_DEAD
2338 note. Otherwise, we must make partial REG_DEAD
2339 notes. */
2340 if (! some_needed)
2341 create_reg_dead_note (x, insn);
2342 else
2343 {
2344 int i;
2345
2346 /* Don't make a REG_DEAD note for a part of a
2347 register that is set in the insn. */
2348 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2349 i >= 0; i--)
2350 if (! REGNO_REG_SET_P (old_live_regs, regno + i)
2351 && ! dead_or_set_regno_p (insn, regno + i))
2352 create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
2353 regno + i),
2354 insn);
2355 }
2356 }
2357 }
2358
2359 if (regno < FIRST_PSEUDO_REGISTER)
2360 {
2361 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2362 while (--j >= 0)
2363 {
2364 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
2365 SET_REGNO_REG_SET (bb_live_regs, regno + j);
2366 }
2367 }
2368 else
2369 {
2370 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
2371 SET_REGNO_REG_SET (bb_live_regs, regno);
2372 }
2373 }
2374 return;
2375 }
2376
2377 case MEM:
2378 /* Handle tail-recursive case. */
2379 attach_deaths (XEXP (x, 0), insn, 0);
2380 return;
2381
2382 case SUBREG:
2383 attach_deaths (SUBREG_REG (x), insn,
2384 set_p && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2385 <= UNITS_PER_WORD)
2386 || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2387 == GET_MODE_SIZE (GET_MODE ((x))))));
2388 return;
2389
2390 case STRICT_LOW_PART:
2391 attach_deaths (XEXP (x, 0), insn, 0);
2392 return;
2393
2394 case ZERO_EXTRACT:
2395 case SIGN_EXTRACT:
2396 attach_deaths (XEXP (x, 0), insn, 0);
2397 attach_deaths (XEXP (x, 1), insn, 0);
2398 attach_deaths (XEXP (x, 2), insn, 0);
2399 return;
2400
2401 default:
2402 /* Other cases: walk the insn. */
2403 fmt = GET_RTX_FORMAT (code);
2404 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2405 {
2406 if (fmt[i] == 'e')
2407 attach_deaths (XEXP (x, i), insn, 0);
2408 else if (fmt[i] == 'E')
2409 for (j = 0; j < XVECLEN (x, i); j++)
2410 attach_deaths (XVECEXP (x, i, j), insn, 0);
2411 }
2412 }
2413 }
2414
2415 /* After INSN has executed, add register death notes for each register
2416 that is dead after INSN. */
2417
2418 static void
2419 attach_deaths_insn (insn)
2420 rtx insn;
2421 {
2422 rtx x = PATTERN (insn);
2423 register RTX_CODE code = GET_CODE (x);
2424 rtx link;
2425
2426 if (code == SET)
2427 {
2428 attach_deaths (SET_SRC (x), insn, 0);
2429
2430 /* A register might die here even if it is the destination, e.g.
2431 it is the target of a volatile read and is otherwise unused.
2432 Hence we must always call attach_deaths for the SET_DEST. */
2433 attach_deaths (SET_DEST (x), insn, 1);
2434 }
2435 else if (code == PARALLEL)
2436 {
2437 register int i;
2438 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2439 {
2440 code = GET_CODE (XVECEXP (x, 0, i));
2441 if (code == SET)
2442 {
2443 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2444
2445 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2446 }
2447 /* Flow does not add REG_DEAD notes to registers that die in
2448 clobbers, so we can't either. */
2449 else if (code != CLOBBER)
2450 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2451 }
2452 }
2453 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
2454 MEM being clobbered, just like flow. */
2455 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
2456 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
2457 /* Otherwise don't add a death note to things being clobbered. */
2458 else if (code != CLOBBER)
2459 attach_deaths (x, insn, 0);
2460
2461 /* Make death notes for things used in the called function. */
2462 if (GET_CODE (insn) == CALL_INSN)
2463 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2464 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
2465 GET_CODE (XEXP (link, 0)) == CLOBBER);
2466 }
2467
2468 /* Delete notes beginning with INSN and maybe put them in the chain
2469 of notes ended by NOTE_LIST.
2470 Returns the insn following the notes. */
2471
2472 static rtx
2473 unlink_notes (insn, tail)
2474 rtx insn, tail;
2475 {
2476 rtx prev = PREV_INSN (insn);
2477
2478 while (insn != tail && GET_CODE (insn) == NOTE)
2479 {
2480 rtx next = NEXT_INSN (insn);
2481 /* Delete the note from its current position. */
2482 if (prev)
2483 NEXT_INSN (prev) = next;
2484 if (next)
2485 PREV_INSN (next) = prev;
2486
2487 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2488 /* Record line-number notes so they can be reused. */
2489 LINE_NOTE (insn) = insn;
2490
2491 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
2492 immediately after the call they follow. We use a fake
2493 (REG_DEAD (const_int -1)) note to remember them.
2494 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
2495 else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
2496 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
2497 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
2498 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
2499 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
2500 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
2501 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
2502 {
2503 /* Insert the note at the end of the notes list. */
2504 PREV_INSN (insn) = note_list;
2505 if (note_list)
2506 NEXT_INSN (note_list) = insn;
2507 note_list = insn;
2508 }
2509
2510 insn = next;
2511 }
2512 return insn;
2513 }
2514
2515 /* Constructor for `sometimes' data structure. */
2516
2517 static int
2518 new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
2519 struct sometimes *regs_sometimes_live;
2520 int regno;
2521 int sometimes_max;
2522 {
2523 register struct sometimes *p;
2524
2525 /* There should never be a register greater than max_regno here. If there
2526 is, it means that a define_split has created a new pseudo reg. This
2527 is not allowed, since there will not be flow info available for any
2528 new register, so catch the error here. */
2529 if (regno >= max_regno)
2530 abort ();
2531
2532 p = &regs_sometimes_live[sometimes_max];
2533 p->regno = regno;
2534 p->live_length = 0;
2535 p->calls_crossed = 0;
2536 sometimes_max++;
2537 return sometimes_max;
2538 }
2539
2540 /* Count lengths of all regs we are currently tracking,
2541 and find new registers no longer live. */
2542
2543 static void
2544 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2545 struct sometimes *regs_sometimes_live;
2546 int sometimes_max;
2547 {
2548 int i;
2549
2550 for (i = 0; i < sometimes_max; i++)
2551 {
2552 register struct sometimes *p = &regs_sometimes_live[i];
2553 int regno = p->regno;
2554
2555 sched_reg_live_length[regno] += p->live_length;
2556 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2557 }
2558 }
2559
2560 /* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
2561 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2562 NOTEs. The REG_DEAD note following first one is contains the saved
2563 value for NOTE_BLOCK_NUMBER which is useful for
2564 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
2565 output by the instruction scheduler. Return the new value of LAST. */
2566
2567 static rtx
2568 reemit_notes (insn, last)
2569 rtx insn;
2570 rtx last;
2571 {
2572 rtx note;
2573
2574 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2575 {
2576 if (REG_NOTE_KIND (note) == REG_DEAD
2577 && GET_CODE (XEXP (note, 0)) == CONST_INT)
2578 {
2579 if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
2580 {
2581 CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
2582 = CONST_CALL_P (note);
2583 remove_note (insn, note);
2584 note = XEXP (note, 1);
2585 }
2586 else
2587 {
2588 last = emit_note_before (INTVAL (XEXP (note, 0)), last);
2589 remove_note (insn, note);
2590 note = XEXP (note, 1);
2591 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
2592 }
2593 remove_note (insn, note);
2594 }
2595 }
2596 return last;
2597 }
2598
2599 /* Use modified list scheduling to rearrange insns in basic block
2600 B. FILE, if nonzero, is where we dump interesting output about
2601 this pass. */
2602
2603 static void
2604 schedule_block (b, file)
2605 int b;
2606 FILE *file;
2607 {
2608 rtx insn, last;
2609 rtx *ready, link;
2610 int i, j, n_ready = 0, new_ready, n_insns;
2611 int sched_n_insns = 0;
2612 int clock;
2613 #define NEED_NOTHING 0
2614 #define NEED_HEAD 1
2615 #define NEED_TAIL 2
2616 int new_needs;
2617
2618 /* HEAD and TAIL delimit the region being scheduled. */
2619 rtx head = BLOCK_HEAD (b);
2620 rtx tail = BLOCK_END (b);
2621 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2622 being scheduled. When the insns have been ordered,
2623 these insns delimit where the new insns are to be
2624 spliced back into the insn chain. */
2625 rtx next_tail;
2626 rtx prev_head;
2627
2628 /* Keep life information accurate. */
2629 register struct sometimes *regs_sometimes_live;
2630 int sometimes_max;
2631
2632 if (file)
2633 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2634 b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
2635
2636 i = max_reg_num ();
2637 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2638 bzero ((char *) reg_last_uses, i * sizeof (rtx));
2639 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2640 bzero ((char *) reg_last_sets, i * sizeof (rtx));
2641 reg_pending_sets = ALLOCA_REG_SET ();
2642 CLEAR_REG_SET (reg_pending_sets);
2643 reg_pending_sets_all = 0;
2644 clear_units ();
2645
2646 #if 0
2647 /* We used to have code to avoid getting parameters moved from hard
2648 argument registers into pseudos.
2649
2650 However, it was removed when it proved to be of marginal benefit and
2651 caused problems because of different notions of what the "head" insn
2652 was. */
2653
2654 /* Remove certain insns at the beginning from scheduling,
2655 by advancing HEAD. */
2656
2657 /* At the start of a function, before reload has run, don't delay getting
2658 parameters from hard registers into pseudo registers. */
2659 if (reload_completed == 0 && b == 0)
2660 {
2661 while (head != tail
2662 && GET_CODE (head) == NOTE
2663 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2664 head = NEXT_INSN (head);
2665 while (head != tail
2666 && GET_CODE (head) == INSN
2667 && GET_CODE (PATTERN (head)) == SET)
2668 {
2669 rtx src = SET_SRC (PATTERN (head));
2670 while (GET_CODE (src) == SUBREG
2671 || GET_CODE (src) == SIGN_EXTEND
2672 || GET_CODE (src) == ZERO_EXTEND
2673 || GET_CODE (src) == SIGN_EXTRACT
2674 || GET_CODE (src) == ZERO_EXTRACT)
2675 src = XEXP (src, 0);
2676 if (GET_CODE (src) != REG
2677 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2678 break;
2679 /* Keep this insn from ever being scheduled. */
2680 INSN_REF_COUNT (head) = 1;
2681 head = NEXT_INSN (head);
2682 }
2683 }
2684 #endif
2685
2686 /* Don't include any notes or labels at the beginning of the
2687 basic block, or notes at the ends of basic blocks. */
2688 while (head != tail)
2689 {
2690 if (GET_CODE (head) == NOTE)
2691 head = NEXT_INSN (head);
2692 else if (GET_CODE (tail) == NOTE)
2693 tail = PREV_INSN (tail);
2694 else if (GET_CODE (head) == CODE_LABEL)
2695 head = NEXT_INSN (head);
2696 else break;
2697 }
2698 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2699 to schedule this block. */
2700 if (head == tail
2701 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2702 goto ret;
2703
2704 #if 0
2705 /* This short-cut doesn't work. It does not count call insns crossed by
2706 registers in reg_sometimes_live. It does not mark these registers as
2707 dead if they die in this block. It does not mark these registers live
2708 (or create new reg_sometimes_live entries if necessary) if they are born
2709 in this block.
2710
2711 The easy solution is to just always schedule a block. This block only
2712 has one insn, so this won't slow down this pass by much. */
2713
2714 if (head == tail)
2715 goto ret;
2716 #endif
2717
2718 /* Now HEAD through TAIL are the insns actually to be rearranged;
2719 Let PREV_HEAD and NEXT_TAIL enclose them. */
2720 prev_head = PREV_INSN (head);
2721 next_tail = NEXT_INSN (tail);
2722
2723 /* Initialize basic block data structures. */
2724 dead_notes = 0;
2725 pending_read_insns = 0;
2726 pending_read_mems = 0;
2727 pending_write_insns = 0;
2728 pending_write_mems = 0;
2729 pending_lists_length = 0;
2730 last_pending_memory_flush = 0;
2731 last_function_call = 0;
2732 last_scheduled_insn = 0;
2733
2734 LOG_LINKS (sched_before_next_call) = 0;
2735
2736 n_insns = sched_analyze (head, tail);
2737 if (n_insns == 0)
2738 {
2739 free_pending_lists ();
2740 goto ret;
2741 }
2742
2743 /* Allocate vector to hold insns to be rearranged (except those
2744 insns which are controlled by an insn with SCHED_GROUP_P set).
2745 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2746 as those variables ultimately are set up. */
2747 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2748
2749 /* TAIL is now the last of the insns to be rearranged.
2750 Put those insns into the READY vector. */
2751 insn = tail;
2752
2753 /* For all branches, calls, uses, and cc0 setters, force them to remain
2754 in order at the end of the block by adding dependencies and giving
2755 the last a high priority. There may be notes present, and prev_head
2756 may also be a note.
2757
2758 Branches must obviously remain at the end. Calls should remain at the
2759 end since moving them results in worse register allocation. Uses remain
2760 at the end to ensure proper register allocation. cc0 setters remaim
2761 at the end because they can't be moved away from their cc0 user. */
2762 last = 0;
2763 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2764 || (GET_CODE (insn) == INSN
2765 && (GET_CODE (PATTERN (insn)) == USE
2766 #ifdef HAVE_cc0
2767 || sets_cc0_p (PATTERN (insn))
2768 #endif
2769 ))
2770 || GET_CODE (insn) == NOTE)
2771 {
2772 if (GET_CODE (insn) != NOTE)
2773 {
2774 priority (insn);
2775 if (last == 0)
2776 {
2777 ready[n_ready++] = insn;
2778 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2779 INSN_REF_COUNT (insn) = 0;
2780 }
2781 else if (! find_insn_list (insn, LOG_LINKS (last)))
2782 {
2783 add_dependence (last, insn, REG_DEP_ANTI);
2784 INSN_REF_COUNT (insn)++;
2785 }
2786 last = insn;
2787
2788 /* Skip over insns that are part of a group. */
2789 while (SCHED_GROUP_P (insn))
2790 {
2791 insn = prev_nonnote_insn (insn);
2792 priority (insn);
2793 }
2794 }
2795
2796 insn = PREV_INSN (insn);
2797 /* Don't overrun the bounds of the basic block. */
2798 if (insn == prev_head)
2799 break;
2800 }
2801
2802 /* Assign priorities to instructions. Also check whether they
2803 are in priority order already. If so then I will be nonnegative.
2804 We use this shortcut only before reloading. */
2805 #if 0
2806 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2807 #endif
2808
2809 for (; insn != prev_head; insn = PREV_INSN (insn))
2810 {
2811 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2812 {
2813 priority (insn);
2814 if (INSN_REF_COUNT (insn) == 0)
2815 {
2816 if (last == 0)
2817 ready[n_ready++] = insn;
2818 else
2819 {
2820 /* Make this dependent on the last of the instructions
2821 that must remain in order at the end of the block. */
2822 add_dependence (last, insn, REG_DEP_ANTI);
2823 INSN_REF_COUNT (insn) = 1;
2824 }
2825 }
2826 if (SCHED_GROUP_P (insn))
2827 {
2828 while (SCHED_GROUP_P (insn))
2829 {
2830 insn = prev_nonnote_insn (insn);
2831 priority (insn);
2832 }
2833 continue;
2834 }
2835 #if 0
2836 if (i < 0)
2837 continue;
2838 if (INSN_PRIORITY (insn) < i)
2839 i = INSN_PRIORITY (insn);
2840 else if (INSN_PRIORITY (insn) > i)
2841 i = DONE_PRIORITY;
2842 #endif
2843 }
2844 }
2845
2846 #if 0
2847 /* This short-cut doesn't work. It does not count call insns crossed by
2848 registers in reg_sometimes_live. It does not mark these registers as
2849 dead if they die in this block. It does not mark these registers live
2850 (or create new reg_sometimes_live entries if necessary) if they are born
2851 in this block.
2852
2853 The easy solution is to just always schedule a block. These blocks tend
2854 to be very short, so this doesn't slow down this pass by much. */
2855
2856 /* If existing order is good, don't bother to reorder. */
2857 if (i != DONE_PRIORITY)
2858 {
2859 if (file)
2860 fprintf (file, ";; already scheduled\n");
2861
2862 if (reload_completed == 0)
2863 {
2864 for (i = 0; i < sometimes_max; i++)
2865 regs_sometimes_live[i].live_length += n_insns;
2866
2867 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2868 }
2869 free_pending_lists ();
2870 goto ret;
2871 }
2872 #endif
2873
2874 /* Scan all the insns to be scheduled, removing NOTE insns
2875 and register death notes.
2876 Line number NOTE insns end up in NOTE_LIST.
2877 Register death notes end up in DEAD_NOTES.
2878
2879 Recreate the register life information for the end of this basic
2880 block. */
2881
2882 if (reload_completed == 0)
2883 {
2884 COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
2885 CLEAR_REG_SET (bb_dead_regs);
2886
2887 if (b == 0)
2888 {
2889 /* This is the first block in the function. There may be insns
2890 before head that we can't schedule. We still need to examine
2891 them though for accurate register lifetime analysis. */
2892
2893 /* We don't want to remove any REG_DEAD notes as the code below
2894 does. */
2895
2896 for (insn = BLOCK_HEAD (b); insn != head;
2897 insn = NEXT_INSN (insn))
2898 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2899 {
2900 /* See if the register gets born here. */
2901 /* We must check for registers being born before we check for
2902 registers dying. It is possible for a register to be born
2903 and die in the same insn, e.g. reading from a volatile
2904 memory location into an otherwise unused register. Such
2905 a register must be marked as dead after this insn. */
2906 if (GET_CODE (PATTERN (insn)) == SET
2907 || GET_CODE (PATTERN (insn)) == CLOBBER)
2908 sched_note_set (PATTERN (insn), 0);
2909 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2910 {
2911 int j;
2912 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2913 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2914 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2915 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2916
2917 /* ??? This code is obsolete and should be deleted. It
2918 is harmless though, so we will leave it in for now. */
2919 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2920 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2921 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2922 }
2923
2924 /* Each call clobbers (makes live) all call-clobbered regs
2925 that are not global or fixed. Note that the function-value
2926 reg is a call_clobbered reg. */
2927
2928 if (GET_CODE (insn) == CALL_INSN)
2929 {
2930 int j;
2931 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2932 if (call_used_regs[j] && ! global_regs[j]
2933 && ! fixed_regs[j])
2934 {
2935 SET_REGNO_REG_SET (bb_live_regs, j);
2936 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
2937 }
2938 }
2939
2940 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2941 {
2942 if ((REG_NOTE_KIND (link) == REG_DEAD
2943 || REG_NOTE_KIND (link) == REG_UNUSED)
2944 /* Verify that the REG_NOTE has a valid value. */
2945 && GET_CODE (XEXP (link, 0)) == REG)
2946 {
2947 register int regno = REGNO (XEXP (link, 0));
2948
2949 if (regno < FIRST_PSEUDO_REGISTER)
2950 {
2951 int j = HARD_REGNO_NREGS (regno,
2952 GET_MODE (XEXP (link, 0)));
2953 while (--j >= 0)
2954 {
2955 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
2956 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
2957 }
2958 }
2959 else
2960 {
2961 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
2962 SET_REGNO_REG_SET (bb_dead_regs, regno);
2963 }
2964 }
2965 }
2966 }
2967 }
2968 }
2969
2970 /* If debugging information is being produced, keep track of the line
2971 number notes for each insn. */
2972 if (write_symbols != NO_DEBUG)
2973 {
2974 /* We must use the true line number for the first insn in the block
2975 that was computed and saved at the start of this pass. We can't
2976 use the current line number, because scheduling of the previous
2977 block may have changed the current line number. */
2978 rtx line = line_note_head[b];
2979
2980 for (insn = BLOCK_HEAD (b);
2981 insn != next_tail;
2982 insn = NEXT_INSN (insn))
2983 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2984 line = insn;
2985 else
2986 LINE_NOTE (insn) = line;
2987 }
2988
2989 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2990 {
2991 rtx prev, next, link;
2992
2993 /* Farm out notes. This is needed to keep the debugger from
2994 getting completely deranged. */
2995 if (GET_CODE (insn) == NOTE)
2996 {
2997 prev = insn;
2998 insn = unlink_notes (insn, next_tail);
2999 if (prev == tail)
3000 abort ();
3001 if (prev == head)
3002 abort ();
3003 if (insn == next_tail)
3004 abort ();
3005 }
3006
3007 if (reload_completed == 0
3008 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3009 {
3010 /* See if the register gets born here. */
3011 /* We must check for registers being born before we check for
3012 registers dying. It is possible for a register to be born and
3013 die in the same insn, e.g. reading from a volatile memory
3014 location into an otherwise unused register. Such a register
3015 must be marked as dead after this insn. */
3016 if (GET_CODE (PATTERN (insn)) == SET
3017 || GET_CODE (PATTERN (insn)) == CLOBBER)
3018 sched_note_set (PATTERN (insn), 0);
3019 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3020 {
3021 int j;
3022 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3023 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3024 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3025 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3026
3027 /* ??? This code is obsolete and should be deleted. It
3028 is harmless though, so we will leave it in for now. */
3029 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3030 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3031 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3032 }
3033
3034 /* Each call clobbers (makes live) all call-clobbered regs that are
3035 not global or fixed. Note that the function-value reg is a
3036 call_clobbered reg. */
3037
3038 if (GET_CODE (insn) == CALL_INSN)
3039 {
3040 int j;
3041 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
3042 if (call_used_regs[j] && ! global_regs[j]
3043 && ! fixed_regs[j])
3044 {
3045 SET_REGNO_REG_SET (bb_live_regs, j);
3046 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
3047 }
3048 }
3049
3050 /* Need to know what registers this insn kills. */
3051 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3052 {
3053 next = XEXP (link, 1);
3054 if ((REG_NOTE_KIND (link) == REG_DEAD
3055 || REG_NOTE_KIND (link) == REG_UNUSED)
3056 /* Verify that the REG_NOTE has a valid value. */
3057 && GET_CODE (XEXP (link, 0)) == REG)
3058 {
3059 register int regno = REGNO (XEXP (link, 0));
3060
3061 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3062 alone. */
3063 if (REG_NOTE_KIND (link) == REG_DEAD)
3064 {
3065 if (prev)
3066 XEXP (prev, 1) = next;
3067 else
3068 REG_NOTES (insn) = next;
3069 XEXP (link, 1) = dead_notes;
3070 dead_notes = link;
3071 }
3072 else
3073 prev = link;
3074
3075 if (regno < FIRST_PSEUDO_REGISTER)
3076 {
3077 int j = HARD_REGNO_NREGS (regno,
3078 GET_MODE (XEXP (link, 0)));
3079 while (--j >= 0)
3080 {
3081 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
3082 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
3083 }
3084 }
3085 else
3086 {
3087 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
3088 SET_REGNO_REG_SET (bb_dead_regs, regno);
3089 }
3090 }
3091 else
3092 prev = link;
3093 }
3094 }
3095 }
3096
3097 if (reload_completed == 0)
3098 {
3099 /* Keep track of register lives. */
3100 old_live_regs = ALLOCA_REG_SET ();
3101 regs_sometimes_live
3102 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3103 sometimes_max = 0;
3104
3105 /* Start with registers live at end. */
3106 COPY_REG_SET (old_live_regs, bb_live_regs);
3107 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
3108 {
3109 sometimes_max
3110 = new_sometimes_live (regs_sometimes_live,
3111 j, sometimes_max);
3112 });
3113 }
3114
3115 SCHED_SORT (ready, n_ready, 1);
3116
3117 if (file)
3118 {
3119 fprintf (file, ";; ready list initially:\n;; ");
3120 for (i = 0; i < n_ready; i++)
3121 fprintf (file, "%d ", INSN_UID (ready[i]));
3122 fprintf (file, "\n\n");
3123
3124 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3125 if (INSN_PRIORITY (insn) > 0)
3126 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3127 INSN_UID (insn), INSN_PRIORITY (insn),
3128 INSN_REF_COUNT (insn));
3129 }
3130
3131 /* Now HEAD and TAIL are going to become disconnected
3132 entirely from the insn chain. */
3133 tail = 0;
3134
3135 /* Q_SIZE will always be zero here. */
3136 q_ptr = 0; clock = 0;
3137 bzero ((char *) insn_queue, sizeof (insn_queue));
3138
3139 /* Now, perform list scheduling. */
3140
3141 /* Where we start inserting insns is after TAIL. */
3142 last = next_tail;
3143
3144 new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
3145 ? NEED_HEAD : NEED_NOTHING);
3146 if (PREV_INSN (next_tail) == BLOCK_END (b))
3147 new_needs |= NEED_TAIL;
3148
3149 new_ready = n_ready;
3150 while (sched_n_insns < n_insns)
3151 {
3152 q_ptr = NEXT_Q (q_ptr); clock++;
3153
3154 /* Add all pending insns that can be scheduled without stalls to the
3155 ready list. */
3156 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3157 {
3158 if (file)
3159 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3160 INSN_UID (insn), INSN_UID (last), clock);
3161 ready[new_ready++] = insn;
3162 q_size -= 1;
3163 }
3164 insn_queue[q_ptr] = 0;
3165
3166 /* If there are no ready insns, stall until one is ready and add all
3167 of the pending insns at that point to the ready list. */
3168 if (new_ready == 0)
3169 {
3170 register int stalls;
3171
3172 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3173 if ((insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3174 {
3175 for (; insn; insn = NEXT_INSN (insn))
3176 {
3177 if (file)
3178 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3179 INSN_UID (insn), INSN_UID (last), stalls, clock);
3180 ready[new_ready++] = insn;
3181 q_size -= 1;
3182 }
3183 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3184 break;
3185 }
3186
3187 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3188 }
3189
3190 /* There should be some instructions waiting to fire. */
3191 if (new_ready == 0)
3192 abort ();
3193
3194 if (file)
3195 {
3196 fprintf (file, ";; ready list at T-%d:", clock);
3197 for (i = 0; i < new_ready; i++)
3198 fprintf (file, " %d (%x)",
3199 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3200 }
3201
3202 /* Sort the ready list and choose the best insn to schedule. Select
3203 which insn should issue in this cycle and queue those that are
3204 blocked by function unit hazards.
3205
3206 N_READY holds the number of items that were scheduled the last time,
3207 minus the one instruction scheduled on the last loop iteration; it
3208 is not modified for any other reason in this loop. */
3209
3210 SCHED_SORT (ready, new_ready, n_ready);
3211 if (MAX_BLOCKAGE > 1)
3212 {
3213 new_ready = schedule_select (ready, new_ready, clock, file);
3214 if (new_ready == 0)
3215 {
3216 if (file)
3217 fprintf (file, "\n");
3218 /* We must set n_ready here, to ensure that sorting always
3219 occurs when we come back to the SCHED_SORT line above. */
3220 n_ready = 0;
3221 continue;
3222 }
3223 }
3224 n_ready = new_ready;
3225 last_scheduled_insn = insn = ready[0];
3226
3227 /* The first insn scheduled becomes the new tail. */
3228 if (tail == 0)
3229 tail = insn;
3230
3231 if (file)
3232 {
3233 fprintf (file, ", now");
3234 for (i = 0; i < n_ready; i++)
3235 fprintf (file, " %d", INSN_UID (ready[i]));
3236 fprintf (file, "\n");
3237 }
3238
3239 if (DONE_PRIORITY_P (insn))
3240 abort ();
3241
3242 if (reload_completed == 0)
3243 {
3244 /* Process this insn, and each insn linked to this one which must
3245 be immediately output after this insn. */
3246 do
3247 {
3248 /* First we kill registers set by this insn, and then we
3249 make registers used by this insn live. This is the opposite
3250 order used above because we are traversing the instructions
3251 backwards. */
3252
3253 /* Strictly speaking, we should scan REG_UNUSED notes and make
3254 every register mentioned there live, however, we will just
3255 kill them again immediately below, so there doesn't seem to
3256 be any reason why we bother to do this. */
3257
3258 /* See if this is the last notice we must take of a register. */
3259 if (GET_CODE (PATTERN (insn)) == SET
3260 || GET_CODE (PATTERN (insn)) == CLOBBER)
3261 sched_note_set (PATTERN (insn), 1);
3262 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3263 {
3264 int j;
3265 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3266 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3267 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3268 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 1);
3269 }
3270
3271 /* This code keeps life analysis information up to date. */
3272 if (GET_CODE (insn) == CALL_INSN)
3273 {
3274 register struct sometimes *p;
3275
3276 /* A call kills all call used registers that are not
3277 global or fixed, except for those mentioned in the call
3278 pattern which will be made live again later. */
3279 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3280 if (call_used_regs[i] && ! global_regs[i]
3281 && ! fixed_regs[i])
3282 {
3283 CLEAR_REGNO_REG_SET (bb_live_regs, i);
3284 SET_REGNO_REG_SET (bb_dead_regs, i);
3285 }
3286
3287 /* Regs live at the time of a call instruction must not
3288 go in a register clobbered by calls. Record this for
3289 all regs now live. Note that insns which are born or
3290 die in a call do not cross a call, so this must be done
3291 after the killings (above) and before the births
3292 (below). */
3293 p = regs_sometimes_live;
3294 for (i = 0; i < sometimes_max; i++, p++)
3295 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
3296 p->calls_crossed += 1;
3297 }
3298
3299 /* Make every register used live, and add REG_DEAD notes for
3300 registers which were not live before we started. */
3301 attach_deaths_insn (insn);
3302
3303 /* Find registers now made live by that instruction. */
3304 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
3305 {
3306 sometimes_max
3307 = new_sometimes_live (regs_sometimes_live,
3308 i, sometimes_max);
3309 });
3310 IOR_REG_SET (old_live_regs, bb_live_regs);
3311
3312 /* Count lengths of all regs we are worrying about now,
3313 and handle registers no longer live. */
3314
3315 for (i = 0; i < sometimes_max; i++)
3316 {
3317 register struct sometimes *p = &regs_sometimes_live[i];
3318 int regno = p->regno;
3319
3320 p->live_length += 1;
3321
3322 if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
3323 {
3324 /* This is the end of one of this register's lifetime
3325 segments. Save the lifetime info collected so far,
3326 and clear its bit in the old_live_regs entry. */
3327 sched_reg_live_length[regno] += p->live_length;
3328 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3329 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
3330
3331 /* Delete the reg_sometimes_live entry for this reg by
3332 copying the last entry over top of it. */
3333 *p = regs_sometimes_live[--sometimes_max];
3334 /* ...and decrement i so that this newly copied entry
3335 will be processed. */
3336 i--;
3337 }
3338 }
3339
3340 link = insn;
3341 insn = PREV_INSN (insn);
3342 }
3343 while (SCHED_GROUP_P (link));
3344
3345 /* Set INSN back to the insn we are scheduling now. */
3346 insn = ready[0];
3347 }
3348
3349 /* Schedule INSN. Remove it from the ready list. */
3350 ready += 1;
3351 n_ready -= 1;
3352
3353 sched_n_insns += 1;
3354 NEXT_INSN (insn) = last;
3355 PREV_INSN (last) = insn;
3356
3357 /* Everything that precedes INSN now either becomes "ready", if
3358 it can execute immediately before INSN, or "pending", if
3359 there must be a delay. Give INSN high enough priority that
3360 at least one (maybe more) reg-killing insns can be launched
3361 ahead of all others. Mark INSN as scheduled by changing its
3362 priority to -1. */
3363 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3364 new_ready = schedule_insn (insn, ready, n_ready, clock);
3365 INSN_PRIORITY (insn) = DONE_PRIORITY;
3366
3367 /* Schedule all prior insns that must not be moved. */
3368 if (SCHED_GROUP_P (insn))
3369 {
3370 /* Disable these insns from being launched, in case one of the
3371 insns in the group has a dependency on an earlier one. */
3372 link = insn;
3373 while (SCHED_GROUP_P (link))
3374 {
3375 /* Disable these insns from being launched by anybody. */
3376 link = PREV_INSN (link);
3377 INSN_REF_COUNT (link) = 0;
3378 }
3379
3380 /* Now handle each group insn like the main insn was handled
3381 above. */
3382 link = insn;
3383 while (SCHED_GROUP_P (link))
3384 {
3385 link = PREV_INSN (link);
3386
3387 sched_n_insns += 1;
3388
3389 /* ??? Why don't we set LAUNCH_PRIORITY here? */
3390 new_ready = schedule_insn (link, ready, new_ready, clock);
3391 INSN_PRIORITY (link) = DONE_PRIORITY;
3392 }
3393 }
3394
3395 /* Put back NOTE_INSN_SETJMP,
3396 NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes. */
3397
3398 /* To prime the loop. We need to handle INSN and all the insns in the
3399 sched group. */
3400 last = NEXT_INSN (insn);
3401 do
3402 {
3403 insn = PREV_INSN (last);
3404
3405 /* Maintain a valid chain so emit_note_before works.
3406 This is necessary because PREV_INSN (insn) isn't valid
3407 (if ! SCHED_GROUP_P) and if it points to an insn already
3408 scheduled, a circularity will result. */
3409 if (! SCHED_GROUP_P (insn))
3410 {
3411 NEXT_INSN (prev_head) = insn;
3412 PREV_INSN (insn) = prev_head;
3413 }
3414
3415 last = reemit_notes (insn, insn);
3416 }
3417 while (SCHED_GROUP_P (insn));
3418 }
3419 if (q_size != 0)
3420 abort ();
3421
3422 if (reload_completed == 0)
3423 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3424
3425 /* HEAD is now the first insn in the chain of insns that
3426 been scheduled by the loop above.
3427 TAIL is the last of those insns. */
3428 head = last;
3429
3430 /* NOTE_LIST is the end of a chain of notes previously found
3431 among the insns. Insert them at the beginning of the insns. */
3432 if (note_list != 0)
3433 {
3434 rtx note_head = note_list;
3435 while (PREV_INSN (note_head))
3436 note_head = PREV_INSN (note_head);
3437
3438 PREV_INSN (head) = note_list;
3439 NEXT_INSN (note_list) = head;
3440 head = note_head;
3441 }
3442
3443 /* There should be no REG_DEAD notes leftover at the end.
3444 In practice, this can occur as the result of bugs in flow, combine.c,
3445 and/or sched.c. The values of the REG_DEAD notes remaining are
3446 meaningless, because dead_notes is just used as a free list. */
3447 #if 1
3448 if (dead_notes != 0)
3449 abort ();
3450 #endif
3451
3452 if (new_needs & NEED_HEAD)
3453 BLOCK_HEAD (b) = head;
3454 PREV_INSN (head) = prev_head;
3455 NEXT_INSN (prev_head) = head;
3456
3457 if (new_needs & NEED_TAIL)
3458 BLOCK_END (b) = tail;
3459 NEXT_INSN (tail) = next_tail;
3460 PREV_INSN (next_tail) = tail;
3461
3462 /* Restore the line-number notes of each insn. */
3463 if (write_symbols != NO_DEBUG)
3464 {
3465 rtx line, note, prev, new;
3466 int notes = 0;
3467
3468 head = BLOCK_HEAD (b);
3469 next_tail = NEXT_INSN (BLOCK_END (b));
3470
3471 /* Determine the current line-number. We want to know the current
3472 line number of the first insn of the block here, in case it is
3473 different from the true line number that was saved earlier. If
3474 different, then we need a line number note before the first insn
3475 of this block. If it happens to be the same, then we don't want to
3476 emit another line number note here. */
3477 for (line = head; line; line = PREV_INSN (line))
3478 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3479 break;
3480
3481 /* Walk the insns keeping track of the current line-number and inserting
3482 the line-number notes as needed. */
3483 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3484 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3485 line = insn;
3486 /* This used to emit line number notes before every non-deleted note.
3487 However, this confuses a debugger, because line notes not separated
3488 by real instructions all end up at the same address. I can find no
3489 use for line number notes before other notes, so none are emitted. */
3490 else if (GET_CODE (insn) != NOTE
3491 && (note = LINE_NOTE (insn)) != 0
3492 && note != line
3493 && (line == 0
3494 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3495 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3496 {
3497 line = note;
3498 prev = PREV_INSN (insn);
3499 if (LINE_NOTE (note))
3500 {
3501 /* Re-use the original line-number note. */
3502 LINE_NOTE (note) = 0;
3503 PREV_INSN (note) = prev;
3504 NEXT_INSN (prev) = note;
3505 PREV_INSN (insn) = note;
3506 NEXT_INSN (note) = insn;
3507 }
3508 else
3509 {
3510 notes++;
3511 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3512 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3513 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
3514 }
3515 }
3516 if (file && notes)
3517 fprintf (file, ";; added %d line-number notes\n", notes);
3518 }
3519
3520 if (file)
3521 {
3522 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3523 clock, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
3524 }
3525
3526 /* Yow! We're done! */
3527 free_pending_lists ();
3528
3529 ret:
3530 FREE_REG_SET (reg_pending_sets);
3531 FREE_REG_SET (old_live_regs);
3532
3533 return;
3534 }
3535 \f
3536 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3537 REGNO, returning the rtx of the reference found if any. Otherwise,
3538 returns 0. */
3539
3540 static rtx
3541 regno_use_in (regno, x)
3542 int regno;
3543 rtx x;
3544 {
3545 register char *fmt;
3546 int i, j;
3547 rtx tem;
3548
3549 if (GET_CODE (x) == REG && REGNO (x) == regno)
3550 return x;
3551
3552 fmt = GET_RTX_FORMAT (GET_CODE (x));
3553 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3554 {
3555 if (fmt[i] == 'e')
3556 {
3557 if ((tem = regno_use_in (regno, XEXP (x, i))))
3558 return tem;
3559 }
3560 else if (fmt[i] == 'E')
3561 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3562 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3563 return tem;
3564 }
3565
3566 return 0;
3567 }
3568
3569 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3570 needed for the hard register mentioned in the note. This can happen
3571 if the reference to the hard register in the original insn was split into
3572 several smaller hard register references in the split insns. */
3573
3574 static void
3575 split_hard_reg_notes (note, first, last)
3576 rtx note, first, last;
3577 {
3578 rtx reg, temp, link;
3579 int n_regs, i, new_reg;
3580 rtx insn;
3581
3582 /* Assume that this is a REG_DEAD note. */
3583 if (REG_NOTE_KIND (note) != REG_DEAD)
3584 abort ();
3585
3586 reg = XEXP (note, 0);
3587
3588 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3589
3590 for (i = 0; i < n_regs; i++)
3591 {
3592 new_reg = REGNO (reg) + i;
3593
3594 /* Check for references to new_reg in the split insns. */
3595 for (insn = last; ; insn = PREV_INSN (insn))
3596 {
3597 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3598 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3599 {
3600 /* Create a new reg dead note here. */
3601 link = rtx_alloc (EXPR_LIST);
3602 PUT_REG_NOTE_KIND (link, REG_DEAD);
3603 XEXP (link, 0) = temp;
3604 XEXP (link, 1) = REG_NOTES (insn);
3605 REG_NOTES (insn) = link;
3606
3607 /* If killed multiple registers here, then add in the excess. */
3608 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3609
3610 break;
3611 }
3612 /* It isn't mentioned anywhere, so no new reg note is needed for
3613 this register. */
3614 if (insn == first)
3615 break;
3616 }
3617 }
3618 }
3619
3620 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3621 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3622
3623 static void
3624 new_insn_dead_notes (pat, insn, last, orig_insn)
3625 rtx pat, insn, last, orig_insn;
3626 {
3627 rtx dest, tem, set;
3628
3629 /* PAT is either a CLOBBER or a SET here. */
3630 dest = XEXP (pat, 0);
3631
3632 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3633 || GET_CODE (dest) == STRICT_LOW_PART
3634 || GET_CODE (dest) == SIGN_EXTRACT)
3635 dest = XEXP (dest, 0);
3636
3637 if (GET_CODE (dest) == REG)
3638 {
3639 /* If the original insn already used this register, we may not add new
3640 notes for it. One example for a split that needs this test is
3641 when a multi-word memory access with register-indirect addressing
3642 is split into multiple memory accesses with auto-increment and
3643 one adjusting add instruction for the address register. */
3644 if (reg_referenced_p (dest, PATTERN (orig_insn)))
3645 return;
3646 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3647 {
3648 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3649 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3650 && (set = single_set (tem)))
3651 {
3652 rtx tem_dest = SET_DEST (set);
3653
3654 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3655 || GET_CODE (tem_dest) == SUBREG
3656 || GET_CODE (tem_dest) == STRICT_LOW_PART
3657 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3658 tem_dest = XEXP (tem_dest, 0);
3659
3660 if (! rtx_equal_p (tem_dest, dest))
3661 {
3662 /* Use the same scheme as combine.c, don't put both REG_DEAD
3663 and REG_UNUSED notes on the same insn. */
3664 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3665 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3666 {
3667 rtx note = rtx_alloc (EXPR_LIST);
3668 PUT_REG_NOTE_KIND (note, REG_DEAD);
3669 XEXP (note, 0) = dest;
3670 XEXP (note, 1) = REG_NOTES (tem);
3671 REG_NOTES (tem) = note;
3672 }
3673 /* The reg only dies in one insn, the last one that uses
3674 it. */
3675 break;
3676 }
3677 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3678 /* We found an instruction that both uses the register,
3679 and sets it, so no new REG_NOTE is needed for this set. */
3680 break;
3681 }
3682 }
3683 /* If this is a set, it must die somewhere, unless it is the dest of
3684 the original insn, and hence is live after the original insn. Abort
3685 if it isn't supposed to be live after the original insn.
3686
3687 If this is a clobber, then just add a REG_UNUSED note. */
3688 if (tem == insn)
3689 {
3690 int live_after_orig_insn = 0;
3691 rtx pattern = PATTERN (orig_insn);
3692 int i;
3693
3694 if (GET_CODE (pat) == CLOBBER)
3695 {
3696 rtx note = rtx_alloc (EXPR_LIST);
3697 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3698 XEXP (note, 0) = dest;
3699 XEXP (note, 1) = REG_NOTES (insn);
3700 REG_NOTES (insn) = note;
3701 return;
3702 }
3703
3704 /* The original insn could have multiple sets, so search the
3705 insn for all sets. */
3706 if (GET_CODE (pattern) == SET)
3707 {
3708 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3709 live_after_orig_insn = 1;
3710 }
3711 else if (GET_CODE (pattern) == PARALLEL)
3712 {
3713 for (i = 0; i < XVECLEN (pattern, 0); i++)
3714 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3715 && reg_overlap_mentioned_p (dest,
3716 SET_DEST (XVECEXP (pattern,
3717 0, i))))
3718 live_after_orig_insn = 1;
3719 }
3720
3721 if (! live_after_orig_insn)
3722 abort ();
3723 }
3724 }
3725 }
3726
3727 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3728 registers modified by X. INC is -1 if the containing insn is being deleted,
3729 and is 1 if the containing insn is a newly generated insn. */
3730
3731 static void
3732 update_n_sets (x, inc)
3733 rtx x;
3734 int inc;
3735 {
3736 rtx dest = SET_DEST (x);
3737
3738 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3739 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3740 dest = SUBREG_REG (dest);
3741
3742 if (GET_CODE (dest) == REG)
3743 {
3744 int regno = REGNO (dest);
3745
3746 if (regno < FIRST_PSEUDO_REGISTER)
3747 {
3748 register int i;
3749 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3750
3751 for (i = regno; i < endregno; i++)
3752 REG_N_SETS (i) += inc;
3753 }
3754 else
3755 REG_N_SETS (regno) += inc;
3756 }
3757 }
3758
3759 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3760 the insns from FIRST to LAST inclusive that were created by splitting
3761 ORIG_INSN. NOTES are the original REG_NOTES. */
3762
3763 static void
3764 update_flow_info (notes, first, last, orig_insn)
3765 rtx notes;
3766 rtx first, last;
3767 rtx orig_insn;
3768 {
3769 rtx insn, note;
3770 rtx next;
3771 rtx orig_dest, temp;
3772 rtx set;
3773
3774 /* Get and save the destination set by the original insn. */
3775
3776 orig_dest = single_set (orig_insn);
3777 if (orig_dest)
3778 orig_dest = SET_DEST (orig_dest);
3779
3780 /* Move REG_NOTES from the original insn to where they now belong. */
3781
3782 for (note = notes; note; note = next)
3783 {
3784 next = XEXP (note, 1);
3785 switch (REG_NOTE_KIND (note))
3786 {
3787 case REG_DEAD:
3788 case REG_UNUSED:
3789 /* Move these notes from the original insn to the last new insn where
3790 the register is now set. */
3791
3792 for (insn = last; ; insn = PREV_INSN (insn))
3793 {
3794 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3795 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3796 {
3797 /* If this note refers to a multiple word hard register, it
3798 may have been split into several smaller hard register
3799 references, so handle it specially. */
3800 temp = XEXP (note, 0);
3801 if (REG_NOTE_KIND (note) == REG_DEAD
3802 && GET_CODE (temp) == REG
3803 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3804 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
3805 split_hard_reg_notes (note, first, last);
3806 else
3807 {
3808 XEXP (note, 1) = REG_NOTES (insn);
3809 REG_NOTES (insn) = note;
3810 }
3811
3812 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3813 notes. */
3814 /* ??? This won't handle multiple word registers correctly,
3815 but should be good enough for now. */
3816 if (REG_NOTE_KIND (note) == REG_UNUSED
3817 && GET_CODE (XEXP (note, 0)) != SCRATCH
3818 && ! dead_or_set_p (insn, XEXP (note, 0)))
3819 PUT_REG_NOTE_KIND (note, REG_DEAD);
3820
3821 /* The reg only dies in one insn, the last one that uses
3822 it. */
3823 break;
3824 }
3825 /* It must die somewhere, fail it we couldn't find where it died.
3826
3827 If this is a REG_UNUSED note, then it must be a temporary
3828 register that was not needed by this instantiation of the
3829 pattern, so we can safely ignore it. */
3830 if (insn == first)
3831 {
3832 if (REG_NOTE_KIND (note) != REG_UNUSED)
3833 abort ();
3834
3835 break;
3836 }
3837 }
3838 break;
3839
3840 case REG_WAS_0:
3841 /* If the insn that set the register to 0 was deleted, this
3842 note cannot be relied on any longer. The destination might
3843 even have been moved to memory.
3844 This was observed for SH4 with execute/920501-6.c compilation,
3845 -O2 -fomit-frame-pointer -finline-functions . */
3846 if (GET_CODE (XEXP (note, 0)) == NOTE
3847 || INSN_DELETED_P (XEXP (note, 0)))
3848 break;
3849 /* This note applies to the dest of the original insn. Find the
3850 first new insn that now has the same dest, and move the note
3851 there. */
3852
3853 if (! orig_dest)
3854 abort ();
3855
3856 for (insn = first; ; insn = NEXT_INSN (insn))
3857 {
3858 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3859 && (temp = single_set (insn))
3860 && rtx_equal_p (SET_DEST (temp), orig_dest))
3861 {
3862 XEXP (note, 1) = REG_NOTES (insn);
3863 REG_NOTES (insn) = note;
3864 /* The reg is only zero before one insn, the first that
3865 uses it. */
3866 break;
3867 }
3868 /* If this note refers to a multiple word hard
3869 register, it may have been split into several smaller
3870 hard register references. We could split the notes,
3871 but simply dropping them is good enough. */
3872 if (GET_CODE (orig_dest) == REG
3873 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3874 && HARD_REGNO_NREGS (REGNO (orig_dest),
3875 GET_MODE (orig_dest)) > 1)
3876 break;
3877 /* It must be set somewhere, fail if we couldn't find where it
3878 was set. */
3879 if (insn == last)
3880 abort ();
3881 }
3882 break;
3883
3884 case REG_EQUAL:
3885 case REG_EQUIV:
3886 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3887 set is meaningless. Just drop the note. */
3888 if (! orig_dest)
3889 break;
3890
3891 case REG_NO_CONFLICT:
3892 /* These notes apply to the dest of the original insn. Find the last
3893 new insn that now has the same dest, and move the note there. */
3894
3895 if (! orig_dest)
3896 abort ();
3897
3898 for (insn = last; ; insn = PREV_INSN (insn))
3899 {
3900 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3901 && (temp = single_set (insn))
3902 && rtx_equal_p (SET_DEST (temp), orig_dest))
3903 {
3904 XEXP (note, 1) = REG_NOTES (insn);
3905 REG_NOTES (insn) = note;
3906 /* Only put this note on one of the new insns. */
3907 break;
3908 }
3909
3910 /* The original dest must still be set someplace. Abort if we
3911 couldn't find it. */
3912 if (insn == first)
3913 {
3914 /* However, if this note refers to a multiple word hard
3915 register, it may have been split into several smaller
3916 hard register references. We could split the notes,
3917 but simply dropping them is good enough. */
3918 if (GET_CODE (orig_dest) == REG
3919 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3920 && HARD_REGNO_NREGS (REGNO (orig_dest),
3921 GET_MODE (orig_dest)) > 1)
3922 break;
3923 /* Likewise for multi-word memory references. */
3924 if (GET_CODE (orig_dest) == MEM
3925 && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
3926 break;
3927 abort ();
3928 }
3929 }
3930 break;
3931
3932 case REG_LIBCALL:
3933 /* Move a REG_LIBCALL note to the first insn created, and update
3934 the corresponding REG_RETVAL note. */
3935 XEXP (note, 1) = REG_NOTES (first);
3936 REG_NOTES (first) = note;
3937
3938 insn = XEXP (note, 0);
3939 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3940 if (note)
3941 XEXP (note, 0) = first;
3942 break;
3943
3944 case REG_EXEC_COUNT:
3945 /* Move a REG_EXEC_COUNT note to the first insn created. */
3946 XEXP (note, 1) = REG_NOTES (first);
3947 REG_NOTES (first) = note;
3948 break;
3949
3950 case REG_RETVAL:
3951 /* Move a REG_RETVAL note to the last insn created, and update
3952 the corresponding REG_LIBCALL note. */
3953 XEXP (note, 1) = REG_NOTES (last);
3954 REG_NOTES (last) = note;
3955
3956 insn = XEXP (note, 0);
3957 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3958 if (note)
3959 XEXP (note, 0) = last;
3960 break;
3961
3962 case REG_NONNEG:
3963 case REG_BR_PROB:
3964 /* This should be moved to whichever instruction is a JUMP_INSN. */
3965
3966 for (insn = last; ; insn = PREV_INSN (insn))
3967 {
3968 if (GET_CODE (insn) == JUMP_INSN)
3969 {
3970 XEXP (note, 1) = REG_NOTES (insn);
3971 REG_NOTES (insn) = note;
3972 /* Only put this note on one of the new insns. */
3973 break;
3974 }
3975 /* Fail if we couldn't find a JUMP_INSN. */
3976 if (insn == first)
3977 abort ();
3978 }
3979 break;
3980
3981 case REG_INC:
3982 /* reload sometimes leaves obsolete REG_INC notes around. */
3983 if (reload_completed)
3984 break;
3985 /* This should be moved to whichever instruction now has the
3986 increment operation. */
3987 abort ();
3988
3989 case REG_LABEL:
3990 /* Should be moved to the new insn(s) which use the label. */
3991 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3992 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3993 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3994 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL,
3995 XEXP (note, 0),
3996 REG_NOTES (insn));
3997 break;
3998
3999 case REG_CC_SETTER:
4000 case REG_CC_USER:
4001 /* These two notes will never appear until after reorg, so we don't
4002 have to handle them here. */
4003 default:
4004 abort ();
4005 }
4006 }
4007
4008 /* Each new insn created, except the last, has a new set. If the destination
4009 is a register, then this reg is now live across several insns, whereas
4010 previously the dest reg was born and died within the same insn. To
4011 reflect this, we now need a REG_DEAD note on the insn where this
4012 dest reg dies.
4013
4014 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4015
4016 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4017 {
4018 rtx pat;
4019 int i;
4020
4021 pat = PATTERN (insn);
4022 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4023 new_insn_dead_notes (pat, insn, last, orig_insn);
4024 else if (GET_CODE (pat) == PARALLEL)
4025 {
4026 for (i = 0; i < XVECLEN (pat, 0); i++)
4027 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4028 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4029 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4030 }
4031 }
4032
4033 /* If any insn, except the last, uses the register set by the last insn,
4034 then we need a new REG_DEAD note on that insn. In this case, there
4035 would not have been a REG_DEAD note for this register in the original
4036 insn because it was used and set within one insn. */
4037
4038 set = single_set (last);
4039 if (set)
4040 {
4041 rtx dest = SET_DEST (set);
4042
4043 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4044 || GET_CODE (dest) == STRICT_LOW_PART
4045 || GET_CODE (dest) == SIGN_EXTRACT)
4046 dest = XEXP (dest, 0);
4047
4048 if (GET_CODE (dest) == REG
4049 /* Global registers are always live, so the code below does not
4050 apply to them. */
4051 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
4052 || ! global_regs[REGNO (dest)]))
4053 {
4054 rtx stop_insn = PREV_INSN (first);
4055
4056 /* If the last insn uses the register that it is setting, then
4057 we don't want to put a REG_DEAD note there. Search backwards
4058 to find the first insn that sets but does not use DEST. */
4059
4060 insn = last;
4061 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
4062 {
4063 for (insn = PREV_INSN (insn); insn != first;
4064 insn = PREV_INSN (insn))
4065 {
4066 if ((set = single_set (insn))
4067 && reg_mentioned_p (dest, SET_DEST (set))
4068 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4069 break;
4070 }
4071 }
4072
4073 /* Now find the first insn that uses but does not set DEST. */
4074
4075 for (insn = PREV_INSN (insn); insn != stop_insn;
4076 insn = PREV_INSN (insn))
4077 {
4078 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4079 && reg_mentioned_p (dest, PATTERN (insn))
4080 && (set = single_set (insn)))
4081 {
4082 rtx insn_dest = SET_DEST (set);
4083
4084 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4085 || GET_CODE (insn_dest) == SUBREG
4086 || GET_CODE (insn_dest) == STRICT_LOW_PART
4087 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4088 insn_dest = XEXP (insn_dest, 0);
4089
4090 if (insn_dest != dest)
4091 {
4092 note = rtx_alloc (EXPR_LIST);
4093 PUT_REG_NOTE_KIND (note, REG_DEAD);
4094 XEXP (note, 0) = dest;
4095 XEXP (note, 1) = REG_NOTES (insn);
4096 REG_NOTES (insn) = note;
4097 /* The reg only dies in one insn, the last one
4098 that uses it. */
4099 break;
4100 }
4101 }
4102 }
4103 }
4104 }
4105
4106 /* If the original dest is modifying a multiple register target, and the
4107 original instruction was split such that the original dest is now set
4108 by two or more SUBREG sets, then the split insns no longer kill the
4109 destination of the original insn.
4110
4111 In this case, if there exists an instruction in the same basic block,
4112 before the split insn, which uses the original dest, and this use is
4113 killed by the original insn, then we must remove the REG_DEAD note on
4114 this insn, because it is now superfluous.
4115
4116 This does not apply when a hard register gets split, because the code
4117 knows how to handle overlapping hard registers properly. */
4118 if (orig_dest && GET_CODE (orig_dest) == REG)
4119 {
4120 int found_orig_dest = 0;
4121 int found_split_dest = 0;
4122
4123 for (insn = first; ; insn = NEXT_INSN (insn))
4124 {
4125 rtx pat = PATTERN (insn);
4126 int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
4127 set = pat;
4128 for (;;)
4129 {
4130 if (GET_CODE (set) == SET)
4131 {
4132 if (GET_CODE (SET_DEST (set)) == REG
4133 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4134 {
4135 found_orig_dest = 1;
4136 break;
4137 }
4138 else if (GET_CODE (SET_DEST (set)) == SUBREG
4139 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4140 {
4141 found_split_dest = 1;
4142 break;
4143 }
4144 }
4145 if (--i < 0)
4146 break;
4147 set = XVECEXP (pat, 0, i);
4148 }
4149
4150 if (insn == last)
4151 break;
4152 }
4153
4154 if (found_split_dest)
4155 {
4156 /* Search backwards from FIRST, looking for the first insn that uses
4157 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4158 If we find an insn, and it has a REG_DEAD note, then delete the
4159 note. */
4160
4161 for (insn = first; insn; insn = PREV_INSN (insn))
4162 {
4163 if (GET_CODE (insn) == CODE_LABEL
4164 || GET_CODE (insn) == JUMP_INSN)
4165 break;
4166 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4167 && reg_mentioned_p (orig_dest, insn))
4168 {
4169 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4170 if (note)
4171 remove_note (insn, note);
4172 }
4173 }
4174 }
4175 else if (! found_orig_dest)
4176 {
4177 int i, regno;
4178
4179 /* Should never reach here for a pseudo reg. */
4180 if (REGNO (orig_dest) >= FIRST_PSEUDO_REGISTER)
4181 abort ();
4182
4183 /* This can happen for a hard register, if the splitter
4184 does not bother to emit instructions which would be no-ops.
4185 We try to verify that this is the case by checking to see if
4186 the original instruction uses all of the registers that it
4187 set. This case is OK, because deleting a no-op can not affect
4188 REG_DEAD notes on other insns. If this is not the case, then
4189 abort. */
4190
4191 regno = REGNO (orig_dest);
4192 for (i = HARD_REGNO_NREGS (regno, GET_MODE (orig_dest)) - 1;
4193 i >= 0; i--)
4194 if (! refers_to_regno_p (regno + i, regno + i + 1, orig_insn,
4195 NULL_PTR))
4196 break;
4197 if (i >= 0)
4198 abort ();
4199 }
4200 }
4201
4202 /* Update reg_n_sets. This is necessary to prevent local alloc from
4203 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4204 a reg from set once to set multiple times. */
4205
4206 {
4207 rtx x = PATTERN (orig_insn);
4208 RTX_CODE code = GET_CODE (x);
4209
4210 if (code == SET || code == CLOBBER)
4211 update_n_sets (x, -1);
4212 else if (code == PARALLEL)
4213 {
4214 int i;
4215 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4216 {
4217 code = GET_CODE (XVECEXP (x, 0, i));
4218 if (code == SET || code == CLOBBER)
4219 update_n_sets (XVECEXP (x, 0, i), -1);
4220 }
4221 }
4222
4223 for (insn = first; ; insn = NEXT_INSN (insn))
4224 {
4225 x = PATTERN (insn);
4226 code = GET_CODE (x);
4227
4228 if (code == SET || code == CLOBBER)
4229 update_n_sets (x, 1);
4230 else if (code == PARALLEL)
4231 {
4232 int i;
4233 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4234 {
4235 code = GET_CODE (XVECEXP (x, 0, i));
4236 if (code == SET || code == CLOBBER)
4237 update_n_sets (XVECEXP (x, 0, i), 1);
4238 }
4239 }
4240
4241 if (insn == last)
4242 break;
4243 }
4244 }
4245 }
4246
4247 /* The one entry point in this file. DUMP_FILE is the dump file for
4248 this pass. */
4249
4250 void
4251 schedule_insns (dump_file)
4252 FILE *dump_file;
4253 {
4254 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4255 int b;
4256 rtx insn;
4257
4258 /* Taking care of this degenerate case makes the rest of
4259 this code simpler. */
4260 if (n_basic_blocks == 0)
4261 return;
4262
4263 /* Create an insn here so that we can hang dependencies off of it later. */
4264 sched_before_next_call
4265 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
4266 NULL_RTX, 0, NULL_RTX, NULL_RTX);
4267
4268 /* Initialize the unused_*_lists. We can't use the ones left over from
4269 the previous function, because gcc has freed that memory. We can use
4270 the ones left over from the first sched pass in the second pass however,
4271 so only clear them on the first sched pass. The first pass is before
4272 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4273
4274 if (reload_completed == 0 || ! flag_schedule_insns)
4275 {
4276 unused_insn_list = 0;
4277 unused_expr_list = 0;
4278 }
4279
4280 /* We create no insns here, only reorder them, so we
4281 remember how far we can cut back the stack on exit. */
4282
4283 /* Allocate data for this pass. See comments, above,
4284 for what these vectors do.
4285
4286 We use xmalloc instead of alloca, because max_uid can be very large
4287 when there is a lot of function inlining. If we used alloca, we could
4288 exceed stack limits on some hosts for some inputs. */
4289 insn_luid = (int *) xmalloc (max_uid * sizeof (int));
4290 insn_priority = (int *) xmalloc (max_uid * sizeof (int));
4291 insn_tick = (int *) xmalloc (max_uid * sizeof (int));
4292 insn_costs = (short *) xmalloc (max_uid * sizeof (short));
4293 insn_units = (short *) xmalloc (max_uid * sizeof (short));
4294 insn_blockage = (unsigned int *) xmalloc (max_uid * sizeof (unsigned int));
4295 insn_ref_count = (int *) xmalloc (max_uid * sizeof (int));
4296
4297 if (reload_completed == 0)
4298 {
4299 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4300 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4301 bb_dead_regs = ALLOCA_REG_SET ();
4302 bb_live_regs = ALLOCA_REG_SET ();
4303 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
4304 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
4305 }
4306 else
4307 {
4308 sched_reg_n_calls_crossed = 0;
4309 sched_reg_live_length = 0;
4310 bb_dead_regs = 0;
4311 bb_live_regs = 0;
4312 }
4313 init_alias_analysis ();
4314
4315 if (write_symbols != NO_DEBUG)
4316 {
4317 rtx line;
4318
4319 line_note = (rtx *) xmalloc (max_uid * sizeof (rtx));
4320 bzero ((char *) line_note, max_uid * sizeof (rtx));
4321 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4322 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
4323
4324 /* Determine the line-number at the start of each basic block.
4325 This must be computed and saved now, because after a basic block's
4326 predecessor has been scheduled, it is impossible to accurately
4327 determine the correct line number for the first insn of the block. */
4328
4329 for (b = 0; b < n_basic_blocks; b++)
4330 for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
4331 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4332 {
4333 line_note_head[b] = line;
4334 break;
4335 }
4336 }
4337
4338 bzero ((char *) insn_luid, max_uid * sizeof (int));
4339 bzero ((char *) insn_priority, max_uid * sizeof (int));
4340 bzero ((char *) insn_tick, max_uid * sizeof (int));
4341 bzero ((char *) insn_costs, max_uid * sizeof (short));
4342 bzero ((char *) insn_units, max_uid * sizeof (short));
4343 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
4344 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
4345
4346 /* Schedule each basic block, block by block. */
4347
4348 /* ??? Add a NOTE after the last insn of the last basic block. It is not
4349 known why this is done. */
4350 /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
4351 valid insn. */
4352
4353 insn = BLOCK_END (n_basic_blocks-1);
4354 if (NEXT_INSN (insn) == 0
4355 || (GET_CODE (insn) != NOTE
4356 && GET_CODE (insn) != CODE_LABEL
4357 /* Don't emit a NOTE if it would end up between an unconditional
4358 jump and a BARRIER. */
4359 && ! (GET_CODE (insn) == JUMP_INSN
4360 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4361 emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks-1));
4362
4363 for (b = 0; b < n_basic_blocks; b++)
4364 {
4365 rtx insn, next;
4366
4367 note_list = 0;
4368
4369 for (insn = BLOCK_HEAD (b); ; insn = next)
4370 {
4371 rtx prev;
4372 rtx set;
4373
4374 /* Can't use `next_real_insn' because that
4375 might go across CODE_LABELS and short-out basic blocks. */
4376 next = NEXT_INSN (insn);
4377 if (GET_CODE (insn) != INSN)
4378 {
4379 if (insn == BLOCK_END (b))
4380 break;
4381
4382 continue;
4383 }
4384
4385 /* Don't split no-op move insns. These should silently disappear
4386 later in final. Splitting such insns would break the code
4387 that handles REG_NO_CONFLICT blocks. */
4388 set = single_set (insn);
4389 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4390 {
4391 if (insn == BLOCK_END (b))
4392 break;
4393
4394 /* Nops get in the way while scheduling, so delete them now if
4395 register allocation has already been done. It is too risky
4396 to try to do this before register allocation, and there are
4397 unlikely to be very many nops then anyways. */
4398 if (reload_completed)
4399 {
4400 PUT_CODE (insn, NOTE);
4401 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4402 NOTE_SOURCE_FILE (insn) = 0;
4403 }
4404
4405 continue;
4406 }
4407
4408 /* Split insns here to get max fine-grain parallelism. */
4409 prev = PREV_INSN (insn);
4410 /* It is probably not worthwhile to try to split again in the
4411 second pass. However, if flag_schedule_insns is not set,
4412 the first and only (if any) scheduling pass is after reload. */
4413 if (reload_completed == 0 || ! flag_schedule_insns)
4414 {
4415 rtx last, first = PREV_INSN (insn);
4416 rtx notes = REG_NOTES (insn);
4417
4418 last = try_split (PATTERN (insn), insn, 1);
4419 if (last != insn)
4420 {
4421 /* try_split returns the NOTE that INSN became. */
4422 first = NEXT_INSN (first);
4423 update_flow_info (notes, first, last, insn);
4424
4425 PUT_CODE (insn, NOTE);
4426 NOTE_SOURCE_FILE (insn) = 0;
4427 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4428 if (insn == BLOCK_HEAD (b))
4429 BLOCK_HEAD (b) = first;
4430 if (insn == BLOCK_END (b))
4431 {
4432 BLOCK_END (b) = last;
4433 break;
4434 }
4435 }
4436 }
4437
4438 if (insn == BLOCK_END (b))
4439 break;
4440 }
4441
4442 schedule_block (b, dump_file);
4443
4444 #ifdef USE_C_ALLOCA
4445 alloca (0);
4446 #endif
4447 }
4448
4449 /* Reposition the prologue and epilogue notes in case we moved the
4450 prologue/epilogue insns. */
4451 if (reload_completed)
4452 reposition_prologue_and_epilogue_notes (get_insns ());
4453
4454 if (write_symbols != NO_DEBUG)
4455 {
4456 rtx line = 0;
4457 rtx insn = get_insns ();
4458 int active_insn = 0;
4459 int notes = 0;
4460
4461 /* Walk the insns deleting redundant line-number notes. Many of these
4462 are already present. The remainder tend to occur at basic
4463 block boundaries. */
4464 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4465 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4466 {
4467 /* If there are no active insns following, INSN is redundant. */
4468 if (active_insn == 0)
4469 {
4470 notes++;
4471 NOTE_SOURCE_FILE (insn) = 0;
4472 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4473 }
4474 /* If the line number is unchanged, LINE is redundant. */
4475 else if (line
4476 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4477 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4478 {
4479 notes++;
4480 NOTE_SOURCE_FILE (line) = 0;
4481 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4482 line = insn;
4483 }
4484 else
4485 line = insn;
4486 active_insn = 0;
4487 }
4488 else if (! ((GET_CODE (insn) == NOTE
4489 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4490 || (GET_CODE (insn) == INSN
4491 && (GET_CODE (PATTERN (insn)) == USE
4492 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4493 active_insn++;
4494
4495 if (dump_file && notes)
4496 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4497 }
4498
4499 if (reload_completed == 0)
4500 {
4501 int regno;
4502 for (regno = 0; regno < max_regno; regno++)
4503 if (sched_reg_live_length[regno])
4504 {
4505 if (dump_file)
4506 {
4507 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
4508 fprintf (dump_file,
4509 ";; register %d life shortened from %d to %d\n",
4510 regno, REG_LIVE_LENGTH (regno),
4511 sched_reg_live_length[regno]);
4512 /* Negative values are special; don't overwrite the current
4513 reg_live_length value if it is negative. */
4514 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
4515 && REG_LIVE_LENGTH (regno) >= 0)
4516 fprintf (dump_file,
4517 ";; register %d life extended from %d to %d\n",
4518 regno, REG_LIVE_LENGTH (regno),
4519 sched_reg_live_length[regno]);
4520
4521 if (! REG_N_CALLS_CROSSED (regno)
4522 && sched_reg_n_calls_crossed[regno])
4523 fprintf (dump_file,
4524 ";; register %d now crosses calls\n", regno);
4525 else if (REG_N_CALLS_CROSSED (regno)
4526 && ! sched_reg_n_calls_crossed[regno]
4527 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4528 fprintf (dump_file,
4529 ";; register %d no longer crosses calls\n", regno);
4530
4531 }
4532 /* Negative values are special; don't overwrite the current
4533 reg_live_length value if it is negative. */
4534 if (REG_LIVE_LENGTH (regno) >= 0)
4535 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
4536
4537 /* We can't change the value of reg_n_calls_crossed to zero for
4538 pseudos which are live in more than one block.
4539
4540 This is because combine might have made an optimization which
4541 invalidated basic_block_live_at_start and reg_n_calls_crossed,
4542 but it does not update them. If we update reg_n_calls_crossed
4543 here, the two variables are now inconsistent, and this might
4544 confuse the caller-save code into saving a register that doesn't
4545 need to be saved. This is only a problem when we zero calls
4546 crossed for a pseudo live in multiple basic blocks.
4547
4548 Alternatively, we could try to correctly update basic block live
4549 at start here in sched, but that seems complicated. */
4550 if (sched_reg_n_calls_crossed[regno]
4551 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4552 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
4553 }
4554 }
4555
4556 free (insn_luid);
4557 free (insn_priority);
4558 free (insn_tick);
4559 free (insn_costs);
4560 free (insn_units);
4561 free (insn_blockage);
4562 free (insn_ref_count);
4563
4564 if (write_symbols != NO_DEBUG)
4565 free (line_note);
4566
4567 if (reload_completed == 0)
4568 {
4569 FREE_REG_SET (bb_dead_regs);
4570 FREE_REG_SET (bb_live_regs);
4571 }
4572
4573 }
4574 #endif /* INSN_SCHEDULING */