dbxout.c: Consistently use putc instead of fputc.
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
14
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
44
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
47
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
51
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
55
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
60
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
68
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
72 dependencies. */
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
75 #endif
76
77 static int deps_may_trap_p PARAMS ((rtx));
78 static void remove_dependence PARAMS ((rtx, rtx));
79 static void set_sched_group_p PARAMS ((rtx));
80
81 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
82 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
85 static rtx group_leader PARAMS ((rtx));
86
87 static rtx get_condition PARAMS ((rtx));
88 static int conditions_mutex_p PARAMS ((rtx, rtx));
89 \f
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
91
92 static int
93 deps_may_trap_p (mem)
94 rtx mem;
95 {
96 rtx addr = XEXP (mem, 0);
97
98 if (REG_P (addr)
99 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value[REGNO (addr)])
101 addr = reg_known_value[REGNO (addr)];
102 return rtx_addr_can_trap_p (addr);
103 }
104 \f
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
107
108 HAIFA_INLINE rtx
109 find_insn_list (insn, list)
110 rtx insn;
111 rtx list;
112 {
113 while (list)
114 {
115 if (XEXP (list, 0) == insn)
116 return list;
117 list = XEXP (list, 1);
118 }
119 return 0;
120 }
121
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
123 otherwise. */
124
125 HAIFA_INLINE int
126 find_insn_mem_list (insn, x, list, list1)
127 rtx insn, x;
128 rtx list, list1;
129 {
130 while (list)
131 {
132 if (XEXP (list, 0) == insn
133 && XEXP (list1, 0) == x)
134 return 1;
135 list = XEXP (list, 1);
136 list1 = XEXP (list1, 1);
137 }
138 return 0;
139 }
140 \f
141 /* Find the condition under which INSN is executed. */
142
143 static rtx
144 get_condition (insn)
145 rtx insn;
146 {
147 rtx pat = PATTERN (insn);
148 rtx cond;
149
150 if (pat == 0)
151 return 0;
152 if (GET_CODE (pat) == COND_EXEC)
153 return COND_EXEC_TEST (pat);
154 if (GET_CODE (insn) != JUMP_INSN)
155 return 0;
156 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
157 return 0;
158 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
159 return 0;
160 pat = SET_DEST (pat);
161 cond = XEXP (pat, 0);
162 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
163 && XEXP (cond, 2) == pc_rtx)
164 return cond;
165 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
166 && XEXP (cond, 1) == pc_rtx)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
168 XEXP (cond, 0), XEXP (cond, 1));
169 else
170 return 0;
171 }
172
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
174
175 static int
176 conditions_mutex_p (cond1, cond2)
177 rtx cond1, cond2;
178 {
179 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
181 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
184 return 1;
185 return 0;
186 }
187 \f
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
191
192 void
193 add_dependence (insn, elem, dep_type)
194 rtx insn;
195 rtx elem;
196 enum reg_note dep_type;
197 {
198 rtx link, next;
199 int present_p;
200 rtx cond1, cond2;
201
202 /* Don't depend an insn on itself. */
203 if (insn == elem)
204 return;
205
206 /* We can get a dependency on deleted insns due to optimizations in
207 the register allocation and reloading or due to splitting. Any
208 such dependency is useless and can be ignored. */
209 if (GET_CODE (elem) == NOTE)
210 return;
211
212 /* flow.c doesn't handle conditional lifetimes entirely correctly;
213 calls mess up the conditional lifetimes. */
214 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
215 {
216 cond1 = get_condition (insn);
217 cond2 = get_condition (elem);
218 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
219 return;
220 }
221
222 /* If elem is part of a sequence that must be scheduled together, then
223 make the dependence point to the last insn of the sequence.
224 When HAVE_cc0, it is possible for NOTEs to exist between users and
225 setters of the condition codes, so we must skip past notes here.
226 Otherwise, NOTEs are impossible here. */
227 next = next_nonnote_insn (elem);
228 if (next && SCHED_GROUP_P (next)
229 && GET_CODE (next) != CODE_LABEL)
230 {
231 /* Notes will never intervene here though, so don't bother checking
232 for them. */
233 /* Hah! Wrong. */
234 /* We must reject CODE_LABELs, so that we don't get confused by one
235 that has LABEL_PRESERVE_P set, which is represented by the same
236 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
237 SCHED_GROUP_P. */
238
239 rtx nnext;
240 while ((nnext = next_nonnote_insn (next)) != NULL
241 && SCHED_GROUP_P (nnext)
242 && GET_CODE (nnext) != CODE_LABEL)
243 next = nnext;
244
245 /* Again, don't depend an insn on itself. */
246 if (insn == next)
247 return;
248
249 /* Make the dependence to NEXT, the last insn of the group, instead
250 of the original ELEM. */
251 elem = next;
252 }
253
254 present_p = 1;
255 #ifdef INSN_SCHEDULING
256 /* ??? No good way to tell from here whether we're doing interblock
257 scheduling. Possibly add another callback. */
258 #if 0
259 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
260 No need for interblock dependences with calls, since
261 calls are not moved between blocks. Note: the edge where
262 elem is a CALL is still required. */
263 if (GET_CODE (insn) == CALL_INSN
264 && (INSN_BB (elem) != INSN_BB (insn)))
265 return;
266 #endif
267
268 /* If we already have a dependency for ELEM, then we do not need to
269 do anything. Avoiding the list walk below can cut compile times
270 dramatically for some code. */
271 if (true_dependency_cache != NULL)
272 {
273 enum reg_note present_dep_type = 0;
274
275 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
276 abort ();
277 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
278 /* Do nothing (present_set_type is already 0). */
279 ;
280 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
281 INSN_LUID (elem)))
282 present_dep_type = REG_DEP_ANTI;
283 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
284 INSN_LUID (elem)))
285 present_dep_type = REG_DEP_OUTPUT;
286 else
287 present_p = 0;
288 if (present_p && (int) dep_type >= (int) present_dep_type)
289 return;
290 }
291 #endif
292
293 /* Check that we don't already have this dependence. */
294 if (present_p)
295 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
296 if (XEXP (link, 0) == elem)
297 {
298 #ifdef INSN_SCHEDULING
299 /* Clear corresponding cache entry because type of the link
300 may be changed. */
301 if (true_dependency_cache != NULL)
302 {
303 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
304 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
305 INSN_LUID (elem));
306 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
307 && output_dependency_cache)
308 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
309 INSN_LUID (elem));
310 else
311 abort ();
312 }
313 #endif
314
315 /* If this is a more restrictive type of dependence than the existing
316 one, then change the existing dependence to this type. */
317 if ((int) dep_type < (int) REG_NOTE_KIND (link))
318 PUT_REG_NOTE_KIND (link, dep_type);
319
320 #ifdef INSN_SCHEDULING
321 /* If we are adding a dependency to INSN's LOG_LINKs, then
322 note that in the bitmap caches of dependency information. */
323 if (true_dependency_cache != NULL)
324 {
325 if ((int)REG_NOTE_KIND (link) == 0)
326 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
327 INSN_LUID (elem));
328 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
329 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
330 INSN_LUID (elem));
331 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
332 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
333 INSN_LUID (elem));
334 }
335 #endif
336 return;
337 }
338 /* Might want to check one level of transitivity to save conses. */
339
340 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
341 LOG_LINKS (insn) = link;
342
343 /* Insn dependency, not data dependency. */
344 PUT_REG_NOTE_KIND (link, dep_type);
345
346 #ifdef INSN_SCHEDULING
347 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
348 in the bitmap caches of dependency information. */
349 if (true_dependency_cache != NULL)
350 {
351 if ((int)dep_type == 0)
352 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353 else if (dep_type == REG_DEP_ANTI)
354 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
355 else if (dep_type == REG_DEP_OUTPUT)
356 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
357 }
358 #endif
359 }
360
361 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
362 of INSN. Abort if not found. */
363
364 static void
365 remove_dependence (insn, elem)
366 rtx insn;
367 rtx elem;
368 {
369 rtx prev, link, next;
370 int found = 0;
371
372 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
373 {
374 next = XEXP (link, 1);
375 if (XEXP (link, 0) == elem)
376 {
377 if (prev)
378 XEXP (prev, 1) = next;
379 else
380 LOG_LINKS (insn) = next;
381
382 #ifdef INSN_SCHEDULING
383 /* If we are removing a dependency from the LOG_LINKS list,
384 make sure to remove it from the cache too. */
385 if (true_dependency_cache != NULL)
386 {
387 if (REG_NOTE_KIND (link) == 0)
388 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
389 INSN_LUID (elem));
390 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
391 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
392 INSN_LUID (elem));
393 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
394 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
395 INSN_LUID (elem));
396 }
397 #endif
398
399 free_INSN_LIST_node (link);
400
401 found = 1;
402 }
403 else
404 prev = link;
405 }
406
407 if (!found)
408 abort ();
409 return;
410 }
411
412 /* Return an insn which represents a SCHED_GROUP, which is
413 the last insn in the group. */
414
415 static rtx
416 group_leader (insn)
417 rtx insn;
418 {
419 rtx prev;
420
421 do
422 {
423 prev = insn;
424 insn = next_nonnote_insn (insn);
425 }
426 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
427
428 return prev;
429 }
430
431 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
432 goes along with that. */
433
434 static void
435 set_sched_group_p (insn)
436 rtx insn;
437 {
438 rtx link, prev;
439
440 SCHED_GROUP_P (insn) = 1;
441
442 /* There may be a note before this insn now, but all notes will
443 be removed before we actually try to schedule the insns, so
444 it won't cause a problem later. We must avoid it here though. */
445 prev = prev_nonnote_insn (insn);
446
447 /* Make a copy of all dependencies on the immediately previous insn,
448 and add to this insn. This is so that all the dependencies will
449 apply to the group. Remove an explicit dependence on this insn
450 as SCHED_GROUP_P now represents it. */
451
452 if (find_insn_list (prev, LOG_LINKS (insn)))
453 remove_dependence (insn, prev);
454
455 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
456 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
457 }
458 \f
459 /* Process an insn's memory dependencies. There are four kinds of
460 dependencies:
461
462 (0) read dependence: read follows read
463 (1) true dependence: read follows write
464 (2) anti dependence: write follows read
465 (3) output dependence: write follows write
466
467 We are careful to build only dependencies which actually exist, and
468 use transitivity to avoid building too many links. */
469
470 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
471 The MEM is a memory reference contained within INSN, which we are saving
472 so that we can do memory aliasing on it. */
473
474 void
475 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
476 struct deps *deps;
477 rtx *insn_list, *mem_list, insn, mem;
478 {
479 register rtx link;
480
481 link = alloc_INSN_LIST (insn, *insn_list);
482 *insn_list = link;
483
484 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
485 *mem_list = link;
486
487 deps->pending_lists_length++;
488 }
489
490 /* Make a dependency between every memory reference on the pending lists
491 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
492 the read list. */
493
494 static void
495 flush_pending_lists (deps, insn, only_write)
496 struct deps *deps;
497 rtx insn;
498 int only_write;
499 {
500 rtx u;
501 rtx link;
502
503 while (deps->pending_read_insns && ! only_write)
504 {
505 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
506 REG_DEP_ANTI);
507
508 link = deps->pending_read_insns;
509 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
510 free_INSN_LIST_node (link);
511
512 link = deps->pending_read_mems;
513 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
514 free_EXPR_LIST_node (link);
515 }
516 while (deps->pending_write_insns)
517 {
518 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
519 REG_DEP_ANTI);
520
521 link = deps->pending_write_insns;
522 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
523 free_INSN_LIST_node (link);
524
525 link = deps->pending_write_mems;
526 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
527 free_EXPR_LIST_node (link);
528 }
529 deps->pending_lists_length = 0;
530
531 /* last_pending_memory_flush is now a list of insns. */
532 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
533 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
534
535 free_INSN_LIST_list (&deps->last_pending_memory_flush);
536 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
537 }
538 \f
539 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
540 rtx, X, creating all dependencies generated by the write to the
541 destination of X, and reads of everything mentioned. */
542
543 static void
544 sched_analyze_1 (deps, x, insn)
545 struct deps *deps;
546 rtx x;
547 rtx insn;
548 {
549 register int regno;
550 register rtx dest = XEXP (x, 0);
551 enum rtx_code code = GET_CODE (x);
552
553 if (dest == 0)
554 return;
555
556 if (GET_CODE (dest) == PARALLEL)
557 {
558 register int i;
559
560 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
561 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
562 sched_analyze_1 (deps,
563 gen_rtx_CLOBBER (VOIDmode,
564 XEXP (XVECEXP (dest, 0, i), 0)),
565 insn);
566
567 if (GET_CODE (x) == SET)
568 sched_analyze_2 (deps, SET_SRC (x), insn);
569 return;
570 }
571
572 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
573 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
574 {
575 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
576 {
577 /* The second and third arguments are values read by this insn. */
578 sched_analyze_2 (deps, XEXP (dest, 1), insn);
579 sched_analyze_2 (deps, XEXP (dest, 2), insn);
580 }
581 dest = XEXP (dest, 0);
582 }
583
584 if (GET_CODE (dest) == REG)
585 {
586 register int i;
587
588 regno = REGNO (dest);
589
590 /* A hard reg in a wide mode may really be multiple registers.
591 If so, mark all of them just like the first. */
592 if (regno < FIRST_PSEUDO_REGISTER)
593 {
594 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
595 while (--i >= 0)
596 {
597 int r = regno + i;
598 rtx u;
599
600 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
601 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
602
603 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
604 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
605
606 /* Clobbers need not be ordered with respect to one
607 another, but sets must be ordered with respect to a
608 pending clobber. */
609 if (code == SET)
610 {
611 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
612 free_INSN_LIST_list (&deps->reg_last[r].uses);
613 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
614 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
615 SET_REGNO_REG_SET (reg_pending_sets, r);
616 }
617 else
618 SET_REGNO_REG_SET (reg_pending_clobbers, r);
619
620 /* Function calls clobber all call_used regs. */
621 if (global_regs[r]
622 || (code == SET
623 && TEST_HARD_REG_BIT (regs_invalidated_by_call, r)))
624 for (u = deps->last_function_call; u; u = XEXP (u, 1))
625 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
626 }
627 }
628 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
629 it does not reload. Ignore these as they have served their
630 purpose already. */
631 else if (regno >= deps->max_reg)
632 {
633 if (GET_CODE (PATTERN (insn)) != USE
634 && GET_CODE (PATTERN (insn)) != CLOBBER)
635 abort ();
636 }
637 else
638 {
639 rtx u;
640
641 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
642 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
643
644 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
645 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
646
647 if (code == SET)
648 {
649 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
650 free_INSN_LIST_list (&deps->reg_last[regno].uses);
651 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
652 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
653 SET_REGNO_REG_SET (reg_pending_sets, regno);
654 }
655 else
656 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
657
658 /* Pseudos that are REG_EQUIV to something may be replaced
659 by that during reloading. We need only add dependencies for
660 the address in the REG_EQUIV note. */
661 if (!reload_completed
662 && reg_known_equiv_p[regno]
663 && GET_CODE (reg_known_value[regno]) == MEM)
664 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
665
666 /* Don't let it cross a call after scheduling if it doesn't
667 already cross one. */
668
669 if (REG_N_CALLS_CROSSED (regno) == 0)
670 for (u = deps->last_function_call; u; u = XEXP (u, 1))
671 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
672 }
673 }
674 else if (GET_CODE (dest) == MEM)
675 {
676 /* Writing memory. */
677
678 if (deps->pending_lists_length > 32)
679 {
680 /* Flush all pending reads and writes to prevent the pending lists
681 from getting any larger. Insn scheduling runs too slowly when
682 these lists get long. The number 32 was chosen because it
683 seems like a reasonable number. When compiling GCC with itself,
684 this flush occurs 8 times for sparc, and 10 times for m88k using
685 the number 32. */
686 flush_pending_lists (deps, insn, 0);
687 }
688 else
689 {
690 rtx u;
691 rtx pending, pending_mem;
692
693 pending = deps->pending_read_insns;
694 pending_mem = deps->pending_read_mems;
695 while (pending)
696 {
697 if (anti_dependence (XEXP (pending_mem, 0), dest))
698 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
699
700 pending = XEXP (pending, 1);
701 pending_mem = XEXP (pending_mem, 1);
702 }
703
704 pending = deps->pending_write_insns;
705 pending_mem = deps->pending_write_mems;
706 while (pending)
707 {
708 if (output_dependence (XEXP (pending_mem, 0), dest))
709 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
710
711 pending = XEXP (pending, 1);
712 pending_mem = XEXP (pending_mem, 1);
713 }
714
715 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
716 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
717
718 add_insn_mem_dependence (deps, &deps->pending_write_insns,
719 &deps->pending_write_mems, insn, dest);
720 }
721 sched_analyze_2 (deps, XEXP (dest, 0), insn);
722 }
723
724 /* Analyze reads. */
725 if (GET_CODE (x) == SET)
726 sched_analyze_2 (deps, SET_SRC (x), insn);
727 }
728
729 /* Analyze the uses of memory and registers in rtx X in INSN. */
730
731 static void
732 sched_analyze_2 (deps, x, insn)
733 struct deps *deps;
734 rtx x;
735 rtx insn;
736 {
737 register int i;
738 register int j;
739 register enum rtx_code code;
740 register const char *fmt;
741
742 if (x == 0)
743 return;
744
745 code = GET_CODE (x);
746
747 switch (code)
748 {
749 case CONST_INT:
750 case CONST_DOUBLE:
751 case SYMBOL_REF:
752 case CONST:
753 case LABEL_REF:
754 /* Ignore constants. Note that we must handle CONST_DOUBLE here
755 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
756 this does not mean that this insn is using cc0. */
757 return;
758
759 #ifdef HAVE_cc0
760 case CC0:
761 /* User of CC0 depends on immediately preceding insn. */
762 set_sched_group_p (insn);
763 return;
764 #endif
765
766 case REG:
767 {
768 rtx u;
769 int regno = REGNO (x);
770 if (regno < FIRST_PSEUDO_REGISTER)
771 {
772 int i;
773
774 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
775 while (--i >= 0)
776 {
777 int r = regno + i;
778 deps->reg_last[r].uses
779 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
780 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
781
782 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
783 add_dependence (insn, XEXP (u, 0), 0);
784
785 /* ??? This should never happen. */
786 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
787 add_dependence (insn, XEXP (u, 0), 0);
788
789 if (call_used_regs[r] || global_regs[r])
790 /* Function calls clobber all call_used regs. */
791 for (u = deps->last_function_call; u; u = XEXP (u, 1))
792 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
793 }
794 }
795 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
796 it does not reload. Ignore these as they have served their
797 purpose already. */
798 else if (regno >= deps->max_reg)
799 {
800 if (GET_CODE (PATTERN (insn)) != USE
801 && GET_CODE (PATTERN (insn)) != CLOBBER)
802 abort ();
803 }
804 else
805 {
806 deps->reg_last[regno].uses
807 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
808 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
809
810 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
811 add_dependence (insn, XEXP (u, 0), 0);
812
813 /* ??? This should never happen. */
814 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
815 add_dependence (insn, XEXP (u, 0), 0);
816
817 /* Pseudos that are REG_EQUIV to something may be replaced
818 by that during reloading. We need only add dependencies for
819 the address in the REG_EQUIV note. */
820 if (!reload_completed
821 && reg_known_equiv_p[regno]
822 && GET_CODE (reg_known_value[regno]) == MEM)
823 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
824
825 /* If the register does not already cross any calls, then add this
826 insn to the sched_before_next_call list so that it will still
827 not cross calls after scheduling. */
828 if (REG_N_CALLS_CROSSED (regno) == 0)
829 add_dependence (deps->sched_before_next_call, insn,
830 REG_DEP_ANTI);
831 }
832 return;
833 }
834
835 case MEM:
836 {
837 /* Reading memory. */
838 rtx u;
839 rtx pending, pending_mem;
840
841 pending = deps->pending_read_insns;
842 pending_mem = deps->pending_read_mems;
843 while (pending)
844 {
845 if (read_dependence (XEXP (pending_mem, 0), x))
846 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
847
848 pending = XEXP (pending, 1);
849 pending_mem = XEXP (pending_mem, 1);
850 }
851
852 pending = deps->pending_write_insns;
853 pending_mem = deps->pending_write_mems;
854 while (pending)
855 {
856 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
857 x, rtx_varies_p))
858 add_dependence (insn, XEXP (pending, 0), 0);
859
860 pending = XEXP (pending, 1);
861 pending_mem = XEXP (pending_mem, 1);
862 }
863
864 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
865 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
866 || deps_may_trap_p (x))
867 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
868
869 /* Always add these dependencies to pending_reads, since
870 this insn may be followed by a write. */
871 add_insn_mem_dependence (deps, &deps->pending_read_insns,
872 &deps->pending_read_mems, insn, x);
873
874 /* Take advantage of tail recursion here. */
875 sched_analyze_2 (deps, XEXP (x, 0), insn);
876 return;
877 }
878
879 /* Force pending stores to memory in case a trap handler needs them. */
880 case TRAP_IF:
881 flush_pending_lists (deps, insn, 1);
882 break;
883
884 case ASM_OPERANDS:
885 case ASM_INPUT:
886 case UNSPEC_VOLATILE:
887 {
888 rtx u;
889
890 /* Traditional and volatile asm instructions must be considered to use
891 and clobber all hard registers, all pseudo-registers and all of
892 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
893
894 Consider for instance a volatile asm that changes the fpu rounding
895 mode. An insn should not be moved across this even if it only uses
896 pseudo-regs because it might give an incorrectly rounded result. */
897 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
898 {
899 for (i = 0; i < deps->max_reg; i++)
900 {
901 struct deps_reg *reg_last = &deps->reg_last[i];
902
903 for (u = reg_last->uses; u; u = XEXP (u, 1))
904 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
905 for (u = reg_last->sets; u; u = XEXP (u, 1))
906 add_dependence (insn, XEXP (u, 0), 0);
907 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
908 add_dependence (insn, XEXP (u, 0), 0);
909
910 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
911 free_INSN_LIST_list (&reg_last->uses);
912 }
913 reg_pending_sets_all = 1;
914
915 flush_pending_lists (deps, insn, 0);
916 }
917
918 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
919 We can not just fall through here since then we would be confused
920 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
921 traditional asms unlike their normal usage. */
922
923 if (code == ASM_OPERANDS)
924 {
925 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
926 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
927 return;
928 }
929 break;
930 }
931
932 case PRE_DEC:
933 case POST_DEC:
934 case PRE_INC:
935 case POST_INC:
936 /* These both read and modify the result. We must handle them as writes
937 to get proper dependencies for following instructions. We must handle
938 them as reads to get proper dependencies from this to previous
939 instructions. Thus we need to pass them to both sched_analyze_1
940 and sched_analyze_2. We must call sched_analyze_2 first in order
941 to get the proper antecedent for the read. */
942 sched_analyze_2 (deps, XEXP (x, 0), insn);
943 sched_analyze_1 (deps, x, insn);
944 return;
945
946 case POST_MODIFY:
947 case PRE_MODIFY:
948 /* op0 = op0 + op1 */
949 sched_analyze_2 (deps, XEXP (x, 0), insn);
950 sched_analyze_2 (deps, XEXP (x, 1), insn);
951 sched_analyze_1 (deps, x, insn);
952 return;
953
954 default:
955 break;
956 }
957
958 /* Other cases: walk the insn. */
959 fmt = GET_RTX_FORMAT (code);
960 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
961 {
962 if (fmt[i] == 'e')
963 sched_analyze_2 (deps, XEXP (x, i), insn);
964 else if (fmt[i] == 'E')
965 for (j = 0; j < XVECLEN (x, i); j++)
966 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
967 }
968 }
969
970 /* Analyze an INSN with pattern X to find all dependencies. */
971
972 static void
973 sched_analyze_insn (deps, x, insn, loop_notes)
974 struct deps *deps;
975 rtx x, insn;
976 rtx loop_notes;
977 {
978 register RTX_CODE code = GET_CODE (x);
979 int schedule_barrier_found = 0;
980 rtx link;
981 int i;
982
983 if (code == COND_EXEC)
984 {
985 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
986
987 /* ??? Should be recording conditions so we reduce the number of
988 false dependancies. */
989 x = COND_EXEC_CODE (x);
990 code = GET_CODE (x);
991 }
992 if (code == SET || code == CLOBBER)
993 sched_analyze_1 (deps, x, insn);
994 else if (code == PARALLEL)
995 {
996 register int i;
997 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
998 {
999 rtx sub = XVECEXP (x, 0, i);
1000 code = GET_CODE (sub);
1001
1002 if (code == COND_EXEC)
1003 {
1004 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1005 sub = COND_EXEC_CODE (sub);
1006 code = GET_CODE (sub);
1007 }
1008 if (code == SET || code == CLOBBER)
1009 sched_analyze_1 (deps, sub, insn);
1010 else
1011 sched_analyze_2 (deps, sub, insn);
1012 }
1013 }
1014 else
1015 sched_analyze_2 (deps, x, insn);
1016
1017 /* Mark registers CLOBBERED or used by called function. */
1018 if (GET_CODE (insn) == CALL_INSN)
1019 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1020 {
1021 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1022 sched_analyze_1 (deps, XEXP (link, 0), insn);
1023 else
1024 sched_analyze_2 (deps, XEXP (link, 0), insn);
1025 }
1026
1027 if (GET_CODE (insn) == JUMP_INSN)
1028 {
1029 rtx next;
1030 next = next_nonnote_insn (insn);
1031 if (next && GET_CODE (next) == BARRIER)
1032 schedule_barrier_found = 1;
1033 else
1034 {
1035 rtx pending, pending_mem, u;
1036 regset_head tmp;
1037 INIT_REG_SET (&tmp);
1038
1039 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1040 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1041 {
1042 struct deps_reg *reg_last = &deps->reg_last[i];
1043 for (u = reg_last->sets; u; u = XEXP (u, 1))
1044 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1045 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1046 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1047 });
1048
1049 CLEAR_REG_SET (&tmp);
1050
1051 /* All memory writes and volatile reads must happen before the
1052 jump. Non-volatile reads must happen before the jump iff
1053 the result is needed by the above register used mask. */
1054
1055 pending = deps->pending_write_insns;
1056 pending_mem = deps->pending_write_mems;
1057 while (pending)
1058 {
1059 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1060 pending = XEXP (pending, 1);
1061 pending_mem = XEXP (pending_mem, 1);
1062 }
1063
1064 pending = deps->pending_read_insns;
1065 pending_mem = deps->pending_read_mems;
1066 while (pending)
1067 {
1068 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1069 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1070 pending = XEXP (pending, 1);
1071 pending_mem = XEXP (pending_mem, 1);
1072 }
1073
1074 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1075 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1076 }
1077 }
1078
1079 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1080 block, then we must be sure that no instructions are scheduled across it.
1081 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1082 become incorrect. */
1083 if (loop_notes)
1084 {
1085 rtx link;
1086
1087 /* Update loop_notes with any notes from this insn. Also determine
1088 if any of the notes on the list correspond to instruction scheduling
1089 barriers (loop, eh & setjmp notes, but not range notes). */
1090 link = loop_notes;
1091 while (XEXP (link, 1))
1092 {
1093 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1094 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1095 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1096 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1097 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1098 schedule_barrier_found = 1;
1099
1100 link = XEXP (link, 1);
1101 }
1102 XEXP (link, 1) = REG_NOTES (insn);
1103 REG_NOTES (insn) = loop_notes;
1104 }
1105
1106 /* If this instruction can throw an exception, then moving it changes
1107 where block boundaries fall. This is mighty confusing elsewhere.
1108 Therefore, prevent such an instruction from being moved. */
1109 if (flag_non_call_exceptions && can_throw_internal (insn))
1110 schedule_barrier_found = 1;
1111
1112 /* Add dependencies if a scheduling barrier was found. */
1113 if (schedule_barrier_found)
1114 {
1115 rtx u;
1116
1117 for (i = 0; i < deps->max_reg; i++)
1118 {
1119 struct deps_reg *reg_last = &deps->reg_last[i];
1120
1121 for (u = reg_last->uses; u; u = XEXP (u, 1))
1122 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1123 for (u = reg_last->sets; u; u = XEXP (u, 1))
1124 add_dependence (insn, XEXP (u, 0), 0);
1125 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1126 add_dependence (insn, XEXP (u, 0), 0);
1127
1128 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1129 free_INSN_LIST_list (&reg_last->uses);
1130 }
1131 flush_pending_lists (deps, insn, 0);
1132
1133 reg_pending_sets_all = 1;
1134 }
1135
1136 /* Accumulate clobbers until the next set so that it will be output
1137 dependent on all of them. At the next set we can clear the clobber
1138 list, since subsequent sets will be output dependent on it. */
1139 if (reg_pending_sets_all)
1140 {
1141 reg_pending_sets_all = 0;
1142 for (i = 0; i < deps->max_reg; i++)
1143 {
1144 struct deps_reg *reg_last = &deps->reg_last[i];
1145 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1146 {
1147 free_INSN_LIST_list (&reg_last->sets);
1148 free_INSN_LIST_list (&reg_last->clobbers);
1149 }
1150 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1151 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1152 }
1153 }
1154 else
1155 {
1156 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1157 {
1158 struct deps_reg *reg_last = &deps->reg_last[i];
1159 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1160 {
1161 free_INSN_LIST_list (&reg_last->sets);
1162 free_INSN_LIST_list (&reg_last->clobbers);
1163 }
1164 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1165 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1166 });
1167 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1168 {
1169 struct deps_reg *reg_last = &deps->reg_last[i];
1170 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1171 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1172 });
1173 }
1174 CLEAR_REG_SET (reg_pending_sets);
1175 CLEAR_REG_SET (reg_pending_clobbers);
1176
1177 /* If a post-call group is still open, see if it should remain so.
1178 This insn must be a simple move of a hard reg to a pseudo or
1179 vice-versa.
1180
1181 We must avoid moving these insns for correctness on
1182 SMALL_REGISTER_CLASS machines, and for special registers like
1183 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1184 hard regs for all targets. */
1185
1186 if (deps->in_post_call_group_p)
1187 {
1188 rtx tmp, set = single_set (insn);
1189 int src_regno, dest_regno;
1190
1191 if (set == NULL)
1192 goto end_call_group;
1193
1194 tmp = SET_DEST (set);
1195 if (GET_CODE (tmp) == SUBREG)
1196 tmp = SUBREG_REG (tmp);
1197 if (GET_CODE (tmp) == REG)
1198 dest_regno = REGNO (tmp);
1199 else
1200 goto end_call_group;
1201
1202 tmp = SET_SRC (set);
1203 if (GET_CODE (tmp) == SUBREG)
1204 tmp = SUBREG_REG (tmp);
1205 if (GET_CODE (tmp) == REG)
1206 src_regno = REGNO (tmp);
1207 else
1208 goto end_call_group;
1209
1210 if (src_regno < FIRST_PSEUDO_REGISTER
1211 || dest_regno < FIRST_PSEUDO_REGISTER)
1212 {
1213 set_sched_group_p (insn);
1214 CANT_MOVE (insn) = 1;
1215 }
1216 else
1217 {
1218 end_call_group:
1219 deps->in_post_call_group_p = 0;
1220 }
1221 }
1222 }
1223
1224 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1225 for every dependency. */
1226
1227 void
1228 sched_analyze (deps, head, tail)
1229 struct deps *deps;
1230 rtx head, tail;
1231 {
1232 register rtx insn;
1233 register rtx u;
1234 rtx loop_notes = 0;
1235
1236 for (insn = head;; insn = NEXT_INSN (insn))
1237 {
1238 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1239 {
1240 /* Clear out the stale LOG_LINKS from flow. */
1241 free_INSN_LIST_list (&LOG_LINKS (insn));
1242
1243 /* Clear out stale SCHED_GROUP_P. */
1244 SCHED_GROUP_P (insn) = 0;
1245
1246 /* Make each JUMP_INSN a scheduling barrier for memory
1247 references. */
1248 if (GET_CODE (insn) == JUMP_INSN)
1249 deps->last_pending_memory_flush
1250 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1251 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1252 loop_notes = 0;
1253 }
1254 else if (GET_CODE (insn) == CALL_INSN)
1255 {
1256 rtx x;
1257 register int i;
1258
1259 /* Clear out stale SCHED_GROUP_P. */
1260 SCHED_GROUP_P (insn) = 0;
1261
1262 CANT_MOVE (insn) = 1;
1263
1264 /* Clear out the stale LOG_LINKS from flow. */
1265 free_INSN_LIST_list (&LOG_LINKS (insn));
1266
1267 /* Any instruction using a hard register which may get clobbered
1268 by a call needs to be marked as dependent on this call.
1269 This prevents a use of a hard return reg from being moved
1270 past a void call (i.e. it does not explicitly set the hard
1271 return reg). */
1272
1273 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1274 all registers, not just hard registers, may be clobbered by this
1275 call. */
1276
1277 /* Insn, being a CALL_INSN, magically depends on
1278 `last_function_call' already. */
1279
1280 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1281 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1282 {
1283 for (i = 0; i < deps->max_reg; i++)
1284 {
1285 struct deps_reg *reg_last = &deps->reg_last[i];
1286
1287 for (u = reg_last->uses; u; u = XEXP (u, 1))
1288 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1289 for (u = reg_last->sets; u; u = XEXP (u, 1))
1290 add_dependence (insn, XEXP (u, 0), 0);
1291 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1292 add_dependence (insn, XEXP (u, 0), 0);
1293
1294 free_INSN_LIST_list (&reg_last->uses);
1295 }
1296 reg_pending_sets_all = 1;
1297
1298 /* Add a pair of REG_SAVE_NOTEs which we will later
1299 convert back into a NOTE_INSN_SETJMP note. See
1300 reemit_notes for why we use a pair of NOTEs. */
1301 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1302 GEN_INT (0),
1303 REG_NOTES (insn));
1304 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1305 GEN_INT (NOTE_INSN_SETJMP),
1306 REG_NOTES (insn));
1307 }
1308 else
1309 {
1310 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1311 if (call_used_regs[i] || global_regs[i])
1312 {
1313 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1314 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1315 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1316 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1317
1318 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1319 }
1320 }
1321
1322 /* For each insn which shouldn't cross a call, add a dependence
1323 between that insn and this call insn. */
1324 x = LOG_LINKS (deps->sched_before_next_call);
1325 while (x)
1326 {
1327 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1328 x = XEXP (x, 1);
1329 }
1330 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1331
1332 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1333 loop_notes = 0;
1334
1335 /* In the absence of interprocedural alias analysis, we must flush
1336 all pending reads and writes, and start new dependencies starting
1337 from here. But only flush writes for constant calls (which may
1338 be passed a pointer to something we haven't written yet). */
1339 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1340
1341 /* Depend this function call (actually, the user of this
1342 function call) on all hard register clobberage. */
1343
1344 /* last_function_call is now a list of insns. */
1345 free_INSN_LIST_list (&deps->last_function_call);
1346 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1347
1348 /* Before reload, begin a post-call group, so as to keep the
1349 lifetimes of hard registers correct. */
1350 if (! reload_completed)
1351 deps->in_post_call_group_p = 1;
1352 }
1353
1354 /* See comments on reemit_notes as to why we do this.
1355 ??? Actually, the reemit_notes just say what is done, not why. */
1356
1357 else if (GET_CODE (insn) == NOTE
1358 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1359 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1360 {
1361 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1362 loop_notes);
1363 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1364 GEN_INT (NOTE_LINE_NUMBER (insn)),
1365 loop_notes);
1366 }
1367 else if (GET_CODE (insn) == NOTE
1368 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1369 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1370 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1371 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1372 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1373 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1374 {
1375 rtx rtx_region;
1376
1377 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1378 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1379 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1380 else
1381 rtx_region = GEN_INT (0);
1382
1383 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1384 rtx_region,
1385 loop_notes);
1386 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1387 GEN_INT (NOTE_LINE_NUMBER (insn)),
1388 loop_notes);
1389 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1390 }
1391
1392 if (insn == tail)
1393 return;
1394 }
1395 abort ();
1396 }
1397 \f
1398 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1399 dependences from LOG_LINKS to build forward dependences in
1400 INSN_DEPEND. */
1401
1402 void
1403 compute_forward_dependences (head, tail)
1404 rtx head, tail;
1405 {
1406 rtx insn, link;
1407 rtx next_tail;
1408 enum reg_note dep_type;
1409
1410 next_tail = NEXT_INSN (tail);
1411 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1412 {
1413 if (! INSN_P (insn))
1414 continue;
1415
1416 insn = group_leader (insn);
1417
1418 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1419 {
1420 rtx x = group_leader (XEXP (link, 0));
1421 rtx new_link;
1422
1423 if (x != XEXP (link, 0))
1424 continue;
1425
1426 #ifdef ENABLE_CHECKING
1427 /* If add_dependence is working properly there should never
1428 be notes, deleted insns or duplicates in the backward
1429 links. Thus we need not check for them here.
1430
1431 However, if we have enabled checking we might as well go
1432 ahead and verify that add_dependence worked properly. */
1433 if (GET_CODE (x) == NOTE
1434 || INSN_DELETED_P (x)
1435 || (forward_dependency_cache != NULL
1436 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1437 INSN_LUID (insn)))
1438 || (forward_dependency_cache == NULL
1439 && find_insn_list (insn, INSN_DEPEND (x))))
1440 abort ();
1441 if (forward_dependency_cache != NULL)
1442 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1443 INSN_LUID (insn));
1444 #endif
1445
1446 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1447
1448 dep_type = REG_NOTE_KIND (link);
1449 PUT_REG_NOTE_KIND (new_link, dep_type);
1450
1451 INSN_DEPEND (x) = new_link;
1452 INSN_DEP_COUNT (insn) += 1;
1453 }
1454 }
1455 }
1456 \f
1457 /* Initialize variables for region data dependence analysis.
1458 n_bbs is the number of region blocks. */
1459
1460 void
1461 init_deps (deps)
1462 struct deps *deps;
1463 {
1464 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1465
1466 deps->max_reg = max_reg;
1467 deps->reg_last = (struct deps_reg *)
1468 xcalloc (max_reg, sizeof (struct deps_reg));
1469 INIT_REG_SET (&deps->reg_last_in_use);
1470
1471 deps->pending_read_insns = 0;
1472 deps->pending_read_mems = 0;
1473 deps->pending_write_insns = 0;
1474 deps->pending_write_mems = 0;
1475 deps->pending_lists_length = 0;
1476 deps->last_pending_memory_flush = 0;
1477 deps->last_function_call = 0;
1478 deps->in_post_call_group_p = 0;
1479
1480 deps->sched_before_next_call
1481 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1482 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1483 LOG_LINKS (deps->sched_before_next_call) = 0;
1484 }
1485
1486 /* Free insn lists found in DEPS. */
1487
1488 void
1489 free_deps (deps)
1490 struct deps *deps;
1491 {
1492 int i;
1493
1494 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1495 times. For a test case with 42000 regs and 8000 small basic blocks,
1496 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1497 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1498 {
1499 struct deps_reg *reg_last = &deps->reg_last[i];
1500 free_INSN_LIST_list (&reg_last->uses);
1501 free_INSN_LIST_list (&reg_last->sets);
1502 free_INSN_LIST_list (&reg_last->clobbers);
1503 });
1504 CLEAR_REG_SET (&deps->reg_last_in_use);
1505
1506 free (deps->reg_last);
1507 deps->reg_last = NULL;
1508 }
1509
1510 /* If it is profitable to use them, initialize caches for tracking
1511 dependency informatino. LUID is the number of insns to be scheduled,
1512 it is used in the estimate of profitability. */
1513
1514 void
1515 init_dependency_caches (luid)
1516 int luid;
1517 {
1518 /* ?!? We could save some memory by computing a per-region luid mapping
1519 which could reduce both the number of vectors in the cache and the size
1520 of each vector. Instead we just avoid the cache entirely unless the
1521 average number of instructions in a basic block is very high. See
1522 the comment before the declaration of true_dependency_cache for
1523 what we consider "very high". */
1524 if (luid / n_basic_blocks > 100 * 5)
1525 {
1526 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1527 sbitmap_vector_zero (true_dependency_cache, luid);
1528 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1529 sbitmap_vector_zero (anti_dependency_cache, luid);
1530 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1531 sbitmap_vector_zero (output_dependency_cache, luid);
1532 #ifdef ENABLE_CHECKING
1533 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1534 sbitmap_vector_zero (forward_dependency_cache, luid);
1535 #endif
1536 }
1537 }
1538
1539 /* Free the caches allocated in init_dependency_caches. */
1540
1541 void
1542 free_dependency_caches ()
1543 {
1544 if (true_dependency_cache)
1545 {
1546 sbitmap_vector_free (true_dependency_cache);
1547 true_dependency_cache = NULL;
1548 sbitmap_vector_free (anti_dependency_cache);
1549 anti_dependency_cache = NULL;
1550 sbitmap_vector_free (output_dependency_cache);
1551 output_dependency_cache = NULL;
1552 #ifdef ENABLE_CHECKING
1553 sbitmap_vector_free (forward_dependency_cache);
1554 forward_dependency_cache = NULL;
1555 #endif
1556 }
1557 }
1558
1559 /* Initialize some global variables needed by the dependency analysis
1560 code. */
1561
1562 void
1563 init_deps_global ()
1564 {
1565 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1566 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1567 reg_pending_sets_all = 0;
1568 }
1569
1570 /* Free everything used by the dependency analysis code. */
1571
1572 void
1573 finish_deps_global ()
1574 {
1575 FREE_REG_SET (reg_pending_sets);
1576 FREE_REG_SET (reg_pending_clobbers);
1577 }