0bb9386f2eadb98f162c2c657d93a01b89a80659
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
14
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
44
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
47
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
51
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
55
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
60
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
68
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
72 dependencies. */
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
75 #endif
76
77 static int deps_may_trap_p PARAMS ((rtx));
78 static void remove_dependence PARAMS ((rtx, rtx));
79 static void set_sched_group_p PARAMS ((rtx));
80
81 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
82 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
85 static rtx group_leader PARAMS ((rtx));
86
87 static rtx get_condition PARAMS ((rtx));
88 static int conditions_mutex_p PARAMS ((rtx, rtx));
89 \f
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
91
92 static int
93 deps_may_trap_p (mem)
94 rtx mem;
95 {
96 rtx addr = XEXP (mem, 0);
97
98 if (REG_P (addr)
99 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value[REGNO (addr)])
101 addr = reg_known_value[REGNO (addr)];
102 return rtx_addr_can_trap_p (addr);
103 }
104 \f
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
107
108 HAIFA_INLINE rtx
109 find_insn_list (insn, list)
110 rtx insn;
111 rtx list;
112 {
113 while (list)
114 {
115 if (XEXP (list, 0) == insn)
116 return list;
117 list = XEXP (list, 1);
118 }
119 return 0;
120 }
121
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
123 otherwise. */
124
125 HAIFA_INLINE int
126 find_insn_mem_list (insn, x, list, list1)
127 rtx insn, x;
128 rtx list, list1;
129 {
130 while (list)
131 {
132 if (XEXP (list, 0) == insn
133 && XEXP (list1, 0) == x)
134 return 1;
135 list = XEXP (list, 1);
136 list1 = XEXP (list1, 1);
137 }
138 return 0;
139 }
140 \f
141 /* Find the condition under which INSN is executed. */
142
143 static rtx
144 get_condition (insn)
145 rtx insn;
146 {
147 rtx pat = PATTERN (insn);
148 rtx cond;
149
150 if (pat == 0)
151 return 0;
152 if (GET_CODE (pat) == COND_EXEC)
153 return COND_EXEC_TEST (pat);
154 if (GET_CODE (insn) != JUMP_INSN)
155 return 0;
156 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
157 return 0;
158 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
159 return 0;
160 pat = SET_DEST (pat);
161 cond = XEXP (pat, 0);
162 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
163 && XEXP (cond, 2) == pc_rtx)
164 return cond;
165 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
166 && XEXP (cond, 1) == pc_rtx)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
168 XEXP (cond, 0), XEXP (cond, 1));
169 else
170 return 0;
171 }
172
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
174
175 static int
176 conditions_mutex_p (cond1, cond2)
177 rtx cond1, cond2;
178 {
179 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
181 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
184 return 1;
185 return 0;
186 }
187 \f
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
191
192 void
193 add_dependence (insn, elem, dep_type)
194 rtx insn;
195 rtx elem;
196 enum reg_note dep_type;
197 {
198 rtx link, next;
199 int present_p;
200 enum reg_note present_dep_type;
201 rtx cond1, cond2;
202
203 /* Don't depend an insn on itself. */
204 if (insn == elem)
205 return;
206
207 /* We can get a dependency on deleted insns due to optimizations in
208 the register allocation and reloading or due to splitting. Any
209 such dependency is useless and can be ignored. */
210 if (GET_CODE (elem) == NOTE)
211 return;
212
213 /* flow.c doesn't handle conditional lifetimes entirely correctly;
214 calls mess up the conditional lifetimes. */
215 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
216 {
217 cond1 = get_condition (insn);
218 cond2 = get_condition (elem);
219 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
220 return;
221 }
222
223 /* If elem is part of a sequence that must be scheduled together, then
224 make the dependence point to the last insn of the sequence.
225 When HAVE_cc0, it is possible for NOTEs to exist between users and
226 setters of the condition codes, so we must skip past notes here.
227 Otherwise, NOTEs are impossible here. */
228 next = next_nonnote_insn (elem);
229 if (next && SCHED_GROUP_P (next)
230 && GET_CODE (next) != CODE_LABEL)
231 {
232 /* Notes will never intervene here though, so don't bother checking
233 for them. */
234 /* Hah! Wrong. */
235 /* We must reject CODE_LABELs, so that we don't get confused by one
236 that has LABEL_PRESERVE_P set, which is represented by the same
237 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
238 SCHED_GROUP_P. */
239
240 rtx nnext;
241 while ((nnext = next_nonnote_insn (next)) != NULL
242 && SCHED_GROUP_P (nnext)
243 && GET_CODE (nnext) != CODE_LABEL)
244 next = nnext;
245
246 /* Again, don't depend an insn on itself. */
247 if (insn == next)
248 return;
249
250 /* Make the dependence to NEXT, the last insn of the group, instead
251 of the original ELEM. */
252 elem = next;
253 }
254
255 present_p = 1;
256 #ifdef INSN_SCHEDULING
257 /* ??? No good way to tell from here whether we're doing interblock
258 scheduling. Possibly add another callback. */
259 #if 0
260 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
261 No need for interblock dependences with calls, since
262 calls are not moved between blocks. Note: the edge where
263 elem is a CALL is still required. */
264 if (GET_CODE (insn) == CALL_INSN
265 && (INSN_BB (elem) != INSN_BB (insn)))
266 return;
267 #endif
268
269 /* If we already have a dependency for ELEM, then we do not need to
270 do anything. Avoiding the list walk below can cut compile times
271 dramatically for some code. */
272 if (true_dependency_cache != NULL)
273 {
274 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
275 abort ();
276 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
277 present_dep_type = 0;
278 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
279 INSN_LUID (elem)))
280 present_dep_type = REG_DEP_ANTI;
281 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
282 INSN_LUID (elem)))
283 present_dep_type = REG_DEP_OUTPUT;
284 else
285 present_p = 0;
286 if (present_p && (int) dep_type >= (int) present_dep_type)
287 return;
288 }
289 #endif
290
291 /* Check that we don't already have this dependence. */
292 if (present_p)
293 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
294 if (XEXP (link, 0) == elem)
295 {
296 #ifdef INSN_SCHEDULING
297 /* Clear corresponding cache entry because type of the link
298 may be changed. */
299 if (true_dependency_cache != NULL)
300 {
301 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
302 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
303 INSN_LUID (elem));
304 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
305 && output_dependency_cache)
306 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
307 INSN_LUID (elem));
308 else
309 abort ();
310 }
311 #endif
312
313 /* If this is a more restrictive type of dependence than the existing
314 one, then change the existing dependence to this type. */
315 if ((int) dep_type < (int) REG_NOTE_KIND (link))
316 PUT_REG_NOTE_KIND (link, dep_type);
317
318 #ifdef INSN_SCHEDULING
319 /* If we are adding a dependency to INSN's LOG_LINKs, then
320 note that in the bitmap caches of dependency information. */
321 if (true_dependency_cache != NULL)
322 {
323 if ((int)REG_NOTE_KIND (link) == 0)
324 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
325 INSN_LUID (elem));
326 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
327 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
328 INSN_LUID (elem));
329 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
330 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
331 INSN_LUID (elem));
332 }
333 #endif
334 return;
335 }
336 /* Might want to check one level of transitivity to save conses. */
337
338 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
339 LOG_LINKS (insn) = link;
340
341 /* Insn dependency, not data dependency. */
342 PUT_REG_NOTE_KIND (link, dep_type);
343
344 #ifdef INSN_SCHEDULING
345 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
346 in the bitmap caches of dependency information. */
347 if (true_dependency_cache != NULL)
348 {
349 if ((int)dep_type == 0)
350 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351 else if (dep_type == REG_DEP_ANTI)
352 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353 else if (dep_type == REG_DEP_OUTPUT)
354 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
355 }
356 #endif
357 }
358
359 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
360 of INSN. Abort if not found. */
361
362 static void
363 remove_dependence (insn, elem)
364 rtx insn;
365 rtx elem;
366 {
367 rtx prev, link, next;
368 int found = 0;
369
370 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
371 {
372 next = XEXP (link, 1);
373 if (XEXP (link, 0) == elem)
374 {
375 if (prev)
376 XEXP (prev, 1) = next;
377 else
378 LOG_LINKS (insn) = next;
379
380 #ifdef INSN_SCHEDULING
381 /* If we are removing a dependency from the LOG_LINKS list,
382 make sure to remove it from the cache too. */
383 if (true_dependency_cache != NULL)
384 {
385 if (REG_NOTE_KIND (link) == 0)
386 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
387 INSN_LUID (elem));
388 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
389 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
390 INSN_LUID (elem));
391 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
392 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
393 INSN_LUID (elem));
394 }
395 #endif
396
397 free_INSN_LIST_node (link);
398
399 found = 1;
400 }
401 else
402 prev = link;
403 }
404
405 if (!found)
406 abort ();
407 return;
408 }
409
410 /* Return an insn which represents a SCHED_GROUP, which is
411 the last insn in the group. */
412
413 static rtx
414 group_leader (insn)
415 rtx insn;
416 {
417 rtx prev;
418
419 do
420 {
421 prev = insn;
422 insn = next_nonnote_insn (insn);
423 }
424 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
425
426 return prev;
427 }
428
429 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
430 goes along with that. */
431
432 static void
433 set_sched_group_p (insn)
434 rtx insn;
435 {
436 rtx link, prev;
437
438 SCHED_GROUP_P (insn) = 1;
439
440 /* There may be a note before this insn now, but all notes will
441 be removed before we actually try to schedule the insns, so
442 it won't cause a problem later. We must avoid it here though. */
443 prev = prev_nonnote_insn (insn);
444
445 /* Make a copy of all dependencies on the immediately previous insn,
446 and add to this insn. This is so that all the dependencies will
447 apply to the group. Remove an explicit dependence on this insn
448 as SCHED_GROUP_P now represents it. */
449
450 if (find_insn_list (prev, LOG_LINKS (insn)))
451 remove_dependence (insn, prev);
452
453 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
454 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
455 }
456 \f
457 /* Process an insn's memory dependencies. There are four kinds of
458 dependencies:
459
460 (0) read dependence: read follows read
461 (1) true dependence: read follows write
462 (2) anti dependence: write follows read
463 (3) output dependence: write follows write
464
465 We are careful to build only dependencies which actually exist, and
466 use transitivity to avoid building too many links. */
467
468 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
469 The MEM is a memory reference contained within INSN, which we are saving
470 so that we can do memory aliasing on it. */
471
472 void
473 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
474 struct deps *deps;
475 rtx *insn_list, *mem_list, insn, mem;
476 {
477 register rtx link;
478
479 link = alloc_INSN_LIST (insn, *insn_list);
480 *insn_list = link;
481
482 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
483 *mem_list = link;
484
485 deps->pending_lists_length++;
486 }
487
488 /* Make a dependency between every memory reference on the pending lists
489 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
490 the read list. */
491
492 static void
493 flush_pending_lists (deps, insn, only_write)
494 struct deps *deps;
495 rtx insn;
496 int only_write;
497 {
498 rtx u;
499 rtx link;
500
501 while (deps->pending_read_insns && ! only_write)
502 {
503 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
504 REG_DEP_ANTI);
505
506 link = deps->pending_read_insns;
507 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
508 free_INSN_LIST_node (link);
509
510 link = deps->pending_read_mems;
511 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
512 free_EXPR_LIST_node (link);
513 }
514 while (deps->pending_write_insns)
515 {
516 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
517 REG_DEP_ANTI);
518
519 link = deps->pending_write_insns;
520 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
521 free_INSN_LIST_node (link);
522
523 link = deps->pending_write_mems;
524 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
525 free_EXPR_LIST_node (link);
526 }
527 deps->pending_lists_length = 0;
528
529 /* last_pending_memory_flush is now a list of insns. */
530 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
531 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
532
533 free_INSN_LIST_list (&deps->last_pending_memory_flush);
534 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
535 }
536 \f
537 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
538 rtx, X, creating all dependencies generated by the write to the
539 destination of X, and reads of everything mentioned. */
540
541 static void
542 sched_analyze_1 (deps, x, insn)
543 struct deps *deps;
544 rtx x;
545 rtx insn;
546 {
547 register int regno;
548 register rtx dest = XEXP (x, 0);
549 enum rtx_code code = GET_CODE (x);
550
551 if (dest == 0)
552 return;
553
554 if (GET_CODE (dest) == PARALLEL)
555 {
556 register int i;
557
558 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
559 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
560 sched_analyze_1 (deps,
561 gen_rtx_CLOBBER (VOIDmode,
562 XEXP (XVECEXP (dest, 0, i), 0)),
563 insn);
564
565 if (GET_CODE (x) == SET)
566 sched_analyze_2 (deps, SET_SRC (x), insn);
567 return;
568 }
569
570 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
571 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
572 {
573 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
574 {
575 /* The second and third arguments are values read by this insn. */
576 sched_analyze_2 (deps, XEXP (dest, 1), insn);
577 sched_analyze_2 (deps, XEXP (dest, 2), insn);
578 }
579 dest = XEXP (dest, 0);
580 }
581
582 if (GET_CODE (dest) == REG)
583 {
584 register int i;
585
586 regno = REGNO (dest);
587
588 /* A hard reg in a wide mode may really be multiple registers.
589 If so, mark all of them just like the first. */
590 if (regno < FIRST_PSEUDO_REGISTER)
591 {
592 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
593 while (--i >= 0)
594 {
595 int r = regno + i;
596 rtx u;
597
598 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
599 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
600
601 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
602 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
603
604 /* Clobbers need not be ordered with respect to one
605 another, but sets must be ordered with respect to a
606 pending clobber. */
607 if (code == SET)
608 {
609 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
610 free_INSN_LIST_list (&deps->reg_last[r].uses);
611 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
612 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
613 SET_REGNO_REG_SET (reg_pending_sets, r);
614 }
615 else
616 SET_REGNO_REG_SET (reg_pending_clobbers, r);
617
618 /* Function calls clobber all call_used regs. */
619 if (global_regs[r] || (code == SET && call_used_regs[r]))
620 for (u = deps->last_function_call; u; u = XEXP (u, 1))
621 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
622 }
623 }
624 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
625 it does not reload. Ignore these as they have served their
626 purpose already. */
627 else if (regno >= deps->max_reg)
628 {
629 if (GET_CODE (PATTERN (insn)) != USE
630 && GET_CODE (PATTERN (insn)) != CLOBBER)
631 abort ();
632 }
633 else
634 {
635 rtx u;
636
637 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
638 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
639
640 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
641 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
642
643 if (code == SET)
644 {
645 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
646 free_INSN_LIST_list (&deps->reg_last[regno].uses);
647 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
648 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
649 SET_REGNO_REG_SET (reg_pending_sets, regno);
650 }
651 else
652 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
653
654 /* Pseudos that are REG_EQUIV to something may be replaced
655 by that during reloading. We need only add dependencies for
656 the address in the REG_EQUIV note. */
657 if (!reload_completed
658 && reg_known_equiv_p[regno]
659 && GET_CODE (reg_known_value[regno]) == MEM)
660 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
661
662 /* Don't let it cross a call after scheduling if it doesn't
663 already cross one. */
664
665 if (REG_N_CALLS_CROSSED (regno) == 0)
666 for (u = deps->last_function_call; u; u = XEXP (u, 1))
667 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
668 }
669 }
670 else if (GET_CODE (dest) == MEM)
671 {
672 /* Writing memory. */
673
674 if (deps->pending_lists_length > 32)
675 {
676 /* Flush all pending reads and writes to prevent the pending lists
677 from getting any larger. Insn scheduling runs too slowly when
678 these lists get long. The number 32 was chosen because it
679 seems like a reasonable number. When compiling GCC with itself,
680 this flush occurs 8 times for sparc, and 10 times for m88k using
681 the number 32. */
682 flush_pending_lists (deps, insn, 0);
683 }
684 else
685 {
686 rtx u;
687 rtx pending, pending_mem;
688
689 pending = deps->pending_read_insns;
690 pending_mem = deps->pending_read_mems;
691 while (pending)
692 {
693 if (anti_dependence (XEXP (pending_mem, 0), dest))
694 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
695
696 pending = XEXP (pending, 1);
697 pending_mem = XEXP (pending_mem, 1);
698 }
699
700 pending = deps->pending_write_insns;
701 pending_mem = deps->pending_write_mems;
702 while (pending)
703 {
704 if (output_dependence (XEXP (pending_mem, 0), dest))
705 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
706
707 pending = XEXP (pending, 1);
708 pending_mem = XEXP (pending_mem, 1);
709 }
710
711 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
712 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
713
714 add_insn_mem_dependence (deps, &deps->pending_write_insns,
715 &deps->pending_write_mems, insn, dest);
716 }
717 sched_analyze_2 (deps, XEXP (dest, 0), insn);
718 }
719
720 /* Analyze reads. */
721 if (GET_CODE (x) == SET)
722 sched_analyze_2 (deps, SET_SRC (x), insn);
723 }
724
725 /* Analyze the uses of memory and registers in rtx X in INSN. */
726
727 static void
728 sched_analyze_2 (deps, x, insn)
729 struct deps *deps;
730 rtx x;
731 rtx insn;
732 {
733 register int i;
734 register int j;
735 register enum rtx_code code;
736 register const char *fmt;
737
738 if (x == 0)
739 return;
740
741 code = GET_CODE (x);
742
743 switch (code)
744 {
745 case CONST_INT:
746 case CONST_DOUBLE:
747 case SYMBOL_REF:
748 case CONST:
749 case LABEL_REF:
750 /* Ignore constants. Note that we must handle CONST_DOUBLE here
751 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
752 this does not mean that this insn is using cc0. */
753 return;
754
755 #ifdef HAVE_cc0
756 case CC0:
757 /* User of CC0 depends on immediately preceding insn. */
758 set_sched_group_p (insn);
759 return;
760 #endif
761
762 case REG:
763 {
764 rtx u;
765 int regno = REGNO (x);
766 if (regno < FIRST_PSEUDO_REGISTER)
767 {
768 int i;
769
770 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
771 while (--i >= 0)
772 {
773 int r = regno + i;
774 deps->reg_last[r].uses
775 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
776 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
777
778 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
779 add_dependence (insn, XEXP (u, 0), 0);
780
781 /* ??? This should never happen. */
782 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
783 add_dependence (insn, XEXP (u, 0), 0);
784
785 if (call_used_regs[r] || global_regs[r])
786 /* Function calls clobber all call_used regs. */
787 for (u = deps->last_function_call; u; u = XEXP (u, 1))
788 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
789 }
790 }
791 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
792 it does not reload. Ignore these as they have served their
793 purpose already. */
794 else if (regno >= deps->max_reg)
795 {
796 if (GET_CODE (PATTERN (insn)) != USE
797 && GET_CODE (PATTERN (insn)) != CLOBBER)
798 abort ();
799 }
800 else
801 {
802 deps->reg_last[regno].uses
803 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
804 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
805
806 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
807 add_dependence (insn, XEXP (u, 0), 0);
808
809 /* ??? This should never happen. */
810 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
811 add_dependence (insn, XEXP (u, 0), 0);
812
813 /* Pseudos that are REG_EQUIV to something may be replaced
814 by that during reloading. We need only add dependencies for
815 the address in the REG_EQUIV note. */
816 if (!reload_completed
817 && reg_known_equiv_p[regno]
818 && GET_CODE (reg_known_value[regno]) == MEM)
819 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
820
821 /* If the register does not already cross any calls, then add this
822 insn to the sched_before_next_call list so that it will still
823 not cross calls after scheduling. */
824 if (REG_N_CALLS_CROSSED (regno) == 0)
825 add_dependence (deps->sched_before_next_call, insn,
826 REG_DEP_ANTI);
827 }
828 return;
829 }
830
831 case MEM:
832 {
833 /* Reading memory. */
834 rtx u;
835 rtx pending, pending_mem;
836
837 pending = deps->pending_read_insns;
838 pending_mem = deps->pending_read_mems;
839 while (pending)
840 {
841 if (read_dependence (XEXP (pending_mem, 0), x))
842 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
843
844 pending = XEXP (pending, 1);
845 pending_mem = XEXP (pending_mem, 1);
846 }
847
848 pending = deps->pending_write_insns;
849 pending_mem = deps->pending_write_mems;
850 while (pending)
851 {
852 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
853 x, rtx_varies_p))
854 add_dependence (insn, XEXP (pending, 0), 0);
855
856 pending = XEXP (pending, 1);
857 pending_mem = XEXP (pending_mem, 1);
858 }
859
860 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
861 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
862 || deps_may_trap_p (x))
863 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
864
865 /* Always add these dependencies to pending_reads, since
866 this insn may be followed by a write. */
867 add_insn_mem_dependence (deps, &deps->pending_read_insns,
868 &deps->pending_read_mems, insn, x);
869
870 /* Take advantage of tail recursion here. */
871 sched_analyze_2 (deps, XEXP (x, 0), insn);
872 return;
873 }
874
875 /* Force pending stores to memory in case a trap handler needs them. */
876 case TRAP_IF:
877 flush_pending_lists (deps, insn, 1);
878 break;
879
880 case ASM_OPERANDS:
881 case ASM_INPUT:
882 case UNSPEC_VOLATILE:
883 {
884 rtx u;
885
886 /* Traditional and volatile asm instructions must be considered to use
887 and clobber all hard registers, all pseudo-registers and all of
888 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
889
890 Consider for instance a volatile asm that changes the fpu rounding
891 mode. An insn should not be moved across this even if it only uses
892 pseudo-regs because it might give an incorrectly rounded result. */
893 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
894 {
895 for (i = 0; i < deps->max_reg; i++)
896 {
897 struct deps_reg *reg_last = &deps->reg_last[i];
898
899 for (u = reg_last->uses; u; u = XEXP (u, 1))
900 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
901 for (u = reg_last->sets; u; u = XEXP (u, 1))
902 add_dependence (insn, XEXP (u, 0), 0);
903 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
904 add_dependence (insn, XEXP (u, 0), 0);
905
906 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
907 free_INSN_LIST_list (&reg_last->uses);
908 }
909 reg_pending_sets_all = 1;
910
911 flush_pending_lists (deps, insn, 0);
912 }
913
914 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
915 We can not just fall through here since then we would be confused
916 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
917 traditional asms unlike their normal usage. */
918
919 if (code == ASM_OPERANDS)
920 {
921 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
922 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
923 return;
924 }
925 break;
926 }
927
928 case PRE_DEC:
929 case POST_DEC:
930 case PRE_INC:
931 case POST_INC:
932 /* These both read and modify the result. We must handle them as writes
933 to get proper dependencies for following instructions. We must handle
934 them as reads to get proper dependencies from this to previous
935 instructions. Thus we need to pass them to both sched_analyze_1
936 and sched_analyze_2. We must call sched_analyze_2 first in order
937 to get the proper antecedent for the read. */
938 sched_analyze_2 (deps, XEXP (x, 0), insn);
939 sched_analyze_1 (deps, x, insn);
940 return;
941
942 case POST_MODIFY:
943 case PRE_MODIFY:
944 /* op0 = op0 + op1 */
945 sched_analyze_2 (deps, XEXP (x, 0), insn);
946 sched_analyze_2 (deps, XEXP (x, 1), insn);
947 sched_analyze_1 (deps, x, insn);
948 return;
949
950 default:
951 break;
952 }
953
954 /* Other cases: walk the insn. */
955 fmt = GET_RTX_FORMAT (code);
956 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
957 {
958 if (fmt[i] == 'e')
959 sched_analyze_2 (deps, XEXP (x, i), insn);
960 else if (fmt[i] == 'E')
961 for (j = 0; j < XVECLEN (x, i); j++)
962 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
963 }
964 }
965
966 /* Analyze an INSN with pattern X to find all dependencies. */
967
968 static void
969 sched_analyze_insn (deps, x, insn, loop_notes)
970 struct deps *deps;
971 rtx x, insn;
972 rtx loop_notes;
973 {
974 register RTX_CODE code = GET_CODE (x);
975 int schedule_barrier_found = 0;
976 rtx link;
977 int i;
978
979 if (code == COND_EXEC)
980 {
981 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
982
983 /* ??? Should be recording conditions so we reduce the number of
984 false dependancies. */
985 x = COND_EXEC_CODE (x);
986 code = GET_CODE (x);
987 }
988 if (code == SET || code == CLOBBER)
989 sched_analyze_1 (deps, x, insn);
990 else if (code == PARALLEL)
991 {
992 register int i;
993 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
994 {
995 rtx sub = XVECEXP (x, 0, i);
996 code = GET_CODE (sub);
997
998 if (code == COND_EXEC)
999 {
1000 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1001 sub = COND_EXEC_CODE (sub);
1002 code = GET_CODE (sub);
1003 }
1004 if (code == SET || code == CLOBBER)
1005 sched_analyze_1 (deps, sub, insn);
1006 else
1007 sched_analyze_2 (deps, sub, insn);
1008 }
1009 }
1010 else
1011 sched_analyze_2 (deps, x, insn);
1012
1013 /* Mark registers CLOBBERED or used by called function. */
1014 if (GET_CODE (insn) == CALL_INSN)
1015 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1016 {
1017 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1018 sched_analyze_1 (deps, XEXP (link, 0), insn);
1019 else
1020 sched_analyze_2 (deps, XEXP (link, 0), insn);
1021 }
1022
1023 if (GET_CODE (insn) == JUMP_INSN)
1024 {
1025 rtx next;
1026 next = next_nonnote_insn (insn);
1027 if (next && GET_CODE (next) == BARRIER)
1028 schedule_barrier_found = 1;
1029 else
1030 {
1031 rtx pending, pending_mem, u;
1032 regset_head tmp;
1033 INIT_REG_SET (&tmp);
1034
1035 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1036 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1037 {
1038 struct deps_reg *reg_last = &deps->reg_last[i];
1039 for (u = reg_last->sets; u; u = XEXP (u, 1))
1040 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1041 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1042 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1043 });
1044
1045 CLEAR_REG_SET (&tmp);
1046
1047 pending = deps->pending_write_insns;
1048 pending_mem = deps->pending_write_mems;
1049 while (pending)
1050 {
1051 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1052
1053 pending = XEXP (pending, 1);
1054 pending_mem = XEXP (pending_mem, 1);
1055 }
1056
1057 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1058 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1059 }
1060 }
1061
1062 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1063 block, then we must be sure that no instructions are scheduled across it.
1064 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1065 become incorrect. */
1066 if (loop_notes)
1067 {
1068 rtx link;
1069
1070 /* Update loop_notes with any notes from this insn. Also determine
1071 if any of the notes on the list correspond to instruction scheduling
1072 barriers (loop, eh & setjmp notes, but not range notes). */
1073 link = loop_notes;
1074 while (XEXP (link, 1))
1075 {
1076 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1077 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1078 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1079 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1080 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1081 schedule_barrier_found = 1;
1082
1083 link = XEXP (link, 1);
1084 }
1085 XEXP (link, 1) = REG_NOTES (insn);
1086 REG_NOTES (insn) = loop_notes;
1087 }
1088
1089 /* If this instruction can throw an exception, then moving it changes
1090 where block boundaries fall. This is mighty confusing elsewhere.
1091 Therefore, prevent such an instruction from being moved. */
1092 if (flag_non_call_exceptions && can_throw_internal (insn))
1093 schedule_barrier_found = 1;
1094
1095 /* Add dependencies if a scheduling barrier was found. */
1096 if (schedule_barrier_found)
1097 {
1098 rtx u, pending, pending_mem;
1099
1100 for (i = 0; i < deps->max_reg; i++)
1101 {
1102 struct deps_reg *reg_last = &deps->reg_last[i];
1103
1104 for (u = reg_last->uses; u; u = XEXP (u, 1))
1105 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1106 for (u = reg_last->sets; u; u = XEXP (u, 1))
1107 add_dependence (insn, XEXP (u, 0), 0);
1108 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1109 add_dependence (insn, XEXP (u, 0), 0);
1110
1111 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1112 free_INSN_LIST_list (&reg_last->uses);
1113 }
1114 flush_pending_lists (deps, insn, 0);
1115
1116 pending = deps->pending_write_insns;
1117 pending_mem = deps->pending_write_mems;
1118 while (pending)
1119 {
1120 add_dependence (insn, XEXP (pending, 0), 0);
1121 pending = XEXP (pending, 1);
1122 pending_mem = XEXP (pending_mem, 1);
1123 }
1124
1125 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1126 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1127
1128 reg_pending_sets_all = 1;
1129 }
1130
1131 /* Accumulate clobbers until the next set so that it will be output
1132 dependent on all of them. At the next set we can clear the clobber
1133 list, since subsequent sets will be output dependent on it. */
1134 if (reg_pending_sets_all)
1135 {
1136 reg_pending_sets_all = 0;
1137 for (i = 0; i < deps->max_reg; i++)
1138 {
1139 struct deps_reg *reg_last = &deps->reg_last[i];
1140 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1141 {
1142 free_INSN_LIST_list (&reg_last->sets);
1143 free_INSN_LIST_list (&reg_last->clobbers);
1144 }
1145 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1146 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1147 }
1148 }
1149 else
1150 {
1151 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1152 {
1153 struct deps_reg *reg_last = &deps->reg_last[i];
1154 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1155 {
1156 free_INSN_LIST_list (&reg_last->sets);
1157 free_INSN_LIST_list (&reg_last->clobbers);
1158 }
1159 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1160 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1161 });
1162 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1163 {
1164 struct deps_reg *reg_last = &deps->reg_last[i];
1165 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1166 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1167 });
1168 }
1169 CLEAR_REG_SET (reg_pending_sets);
1170 CLEAR_REG_SET (reg_pending_clobbers);
1171
1172 /* If a post-call group is still open, see if it should remain so.
1173 This insn must be a simple move of a hard reg to a pseudo or
1174 vice-versa.
1175
1176 We must avoid moving these insns for correctness on
1177 SMALL_REGISTER_CLASS machines, and for special registers like
1178 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1179 hard regs for all targets. */
1180
1181 if (deps->in_post_call_group_p)
1182 {
1183 rtx tmp, set = single_set (insn);
1184 int src_regno, dest_regno;
1185
1186 if (set == NULL)
1187 goto end_call_group;
1188
1189 tmp = SET_DEST (set);
1190 if (GET_CODE (tmp) == SUBREG)
1191 tmp = SUBREG_REG (tmp);
1192 if (GET_CODE (tmp) == REG)
1193 dest_regno = REGNO (tmp);
1194 else
1195 goto end_call_group;
1196
1197 tmp = SET_SRC (set);
1198 if (GET_CODE (tmp) == SUBREG)
1199 tmp = SUBREG_REG (tmp);
1200 if (GET_CODE (tmp) == REG)
1201 src_regno = REGNO (tmp);
1202 else
1203 goto end_call_group;
1204
1205 if (src_regno < FIRST_PSEUDO_REGISTER
1206 || dest_regno < FIRST_PSEUDO_REGISTER)
1207 {
1208 set_sched_group_p (insn);
1209 CANT_MOVE (insn) = 1;
1210 }
1211 else
1212 {
1213 end_call_group:
1214 deps->in_post_call_group_p = 0;
1215 }
1216 }
1217 }
1218
1219 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1220 for every dependency. */
1221
1222 void
1223 sched_analyze (deps, head, tail)
1224 struct deps *deps;
1225 rtx head, tail;
1226 {
1227 register rtx insn;
1228 register rtx u;
1229 rtx loop_notes = 0;
1230
1231 for (insn = head;; insn = NEXT_INSN (insn))
1232 {
1233 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1234 {
1235 /* Clear out the stale LOG_LINKS from flow. */
1236 free_INSN_LIST_list (&LOG_LINKS (insn));
1237
1238 /* Clear out stale SCHED_GROUP_P. */
1239 SCHED_GROUP_P (insn) = 0;
1240
1241 /* Make each JUMP_INSN a scheduling barrier for memory
1242 references. */
1243 if (GET_CODE (insn) == JUMP_INSN)
1244 deps->last_pending_memory_flush
1245 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1246 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1247 loop_notes = 0;
1248 }
1249 else if (GET_CODE (insn) == CALL_INSN)
1250 {
1251 rtx x;
1252 register int i;
1253
1254 /* Clear out stale SCHED_GROUP_P. */
1255 SCHED_GROUP_P (insn) = 0;
1256
1257 CANT_MOVE (insn) = 1;
1258
1259 /* Clear out the stale LOG_LINKS from flow. */
1260 free_INSN_LIST_list (&LOG_LINKS (insn));
1261
1262 /* Any instruction using a hard register which may get clobbered
1263 by a call needs to be marked as dependent on this call.
1264 This prevents a use of a hard return reg from being moved
1265 past a void call (i.e. it does not explicitly set the hard
1266 return reg). */
1267
1268 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1269 all registers, not just hard registers, may be clobbered by this
1270 call. */
1271
1272 /* Insn, being a CALL_INSN, magically depends on
1273 `last_function_call' already. */
1274
1275 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1276 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1277 {
1278 for (i = 0; i < deps->max_reg; i++)
1279 {
1280 struct deps_reg *reg_last = &deps->reg_last[i];
1281
1282 for (u = reg_last->uses; u; u = XEXP (u, 1))
1283 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1284 for (u = reg_last->sets; u; u = XEXP (u, 1))
1285 add_dependence (insn, XEXP (u, 0), 0);
1286 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1287 add_dependence (insn, XEXP (u, 0), 0);
1288
1289 free_INSN_LIST_list (&reg_last->uses);
1290 }
1291 reg_pending_sets_all = 1;
1292
1293 /* Add a pair of REG_SAVE_NOTEs which we will later
1294 convert back into a NOTE_INSN_SETJMP note. See
1295 reemit_notes for why we use a pair of NOTEs. */
1296 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1297 GEN_INT (0),
1298 REG_NOTES (insn));
1299 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1300 GEN_INT (NOTE_INSN_SETJMP),
1301 REG_NOTES (insn));
1302 }
1303 else
1304 {
1305 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1306 if (call_used_regs[i] || global_regs[i])
1307 {
1308 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1309 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1310 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1311 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1312
1313 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1314 }
1315 }
1316
1317 /* For each insn which shouldn't cross a call, add a dependence
1318 between that insn and this call insn. */
1319 x = LOG_LINKS (deps->sched_before_next_call);
1320 while (x)
1321 {
1322 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1323 x = XEXP (x, 1);
1324 }
1325 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1326
1327 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1328 loop_notes = 0;
1329
1330 /* In the absence of interprocedural alias analysis, we must flush
1331 all pending reads and writes, and start new dependencies starting
1332 from here. But only flush writes for constant calls (which may
1333 be passed a pointer to something we haven't written yet). */
1334 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1335
1336 /* Depend this function call (actually, the user of this
1337 function call) on all hard register clobberage. */
1338
1339 /* last_function_call is now a list of insns. */
1340 free_INSN_LIST_list (&deps->last_function_call);
1341 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1342
1343 /* Before reload, begin a post-call group, so as to keep the
1344 lifetimes of hard registers correct. */
1345 if (! reload_completed)
1346 deps->in_post_call_group_p = 1;
1347 }
1348
1349 /* See comments on reemit_notes as to why we do this.
1350 ??? Actually, the reemit_notes just say what is done, not why. */
1351
1352 else if (GET_CODE (insn) == NOTE
1353 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1354 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1355 {
1356 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1357 loop_notes);
1358 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1359 GEN_INT (NOTE_LINE_NUMBER (insn)),
1360 loop_notes);
1361 }
1362 else if (GET_CODE (insn) == NOTE
1363 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1364 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1365 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1366 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1367 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1368 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1369 {
1370 rtx rtx_region;
1371
1372 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1373 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1374 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1375 else
1376 rtx_region = GEN_INT (0);
1377
1378 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1379 rtx_region,
1380 loop_notes);
1381 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1382 GEN_INT (NOTE_LINE_NUMBER (insn)),
1383 loop_notes);
1384 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1385 }
1386
1387 if (insn == tail)
1388 return;
1389 }
1390 abort ();
1391 }
1392 \f
1393 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1394 dependences from LOG_LINKS to build forward dependences in
1395 INSN_DEPEND. */
1396
1397 void
1398 compute_forward_dependences (head, tail)
1399 rtx head, tail;
1400 {
1401 rtx insn, link;
1402 rtx next_tail;
1403 enum reg_note dep_type;
1404
1405 next_tail = NEXT_INSN (tail);
1406 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1407 {
1408 if (! INSN_P (insn))
1409 continue;
1410
1411 insn = group_leader (insn);
1412
1413 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1414 {
1415 rtx x = group_leader (XEXP (link, 0));
1416 rtx new_link;
1417
1418 if (x != XEXP (link, 0))
1419 continue;
1420
1421 #ifdef ENABLE_CHECKING
1422 /* If add_dependence is working properly there should never
1423 be notes, deleted insns or duplicates in the backward
1424 links. Thus we need not check for them here.
1425
1426 However, if we have enabled checking we might as well go
1427 ahead and verify that add_dependence worked properly. */
1428 if (GET_CODE (x) == NOTE
1429 || INSN_DELETED_P (x)
1430 || (forward_dependency_cache != NULL
1431 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1432 INSN_LUID (insn)))
1433 || (forward_dependency_cache == NULL
1434 && find_insn_list (insn, INSN_DEPEND (x))))
1435 abort ();
1436 if (forward_dependency_cache != NULL)
1437 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1438 INSN_LUID (insn));
1439 #endif
1440
1441 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1442
1443 dep_type = REG_NOTE_KIND (link);
1444 PUT_REG_NOTE_KIND (new_link, dep_type);
1445
1446 INSN_DEPEND (x) = new_link;
1447 INSN_DEP_COUNT (insn) += 1;
1448 }
1449 }
1450 }
1451 \f
1452 /* Initialize variables for region data dependence analysis.
1453 n_bbs is the number of region blocks. */
1454
1455 void
1456 init_deps (deps)
1457 struct deps *deps;
1458 {
1459 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1460
1461 deps->max_reg = max_reg;
1462 deps->reg_last = (struct deps_reg *)
1463 xcalloc (max_reg, sizeof (struct deps_reg));
1464 INIT_REG_SET (&deps->reg_last_in_use);
1465
1466 deps->pending_read_insns = 0;
1467 deps->pending_read_mems = 0;
1468 deps->pending_write_insns = 0;
1469 deps->pending_write_mems = 0;
1470 deps->pending_lists_length = 0;
1471 deps->last_pending_memory_flush = 0;
1472 deps->last_function_call = 0;
1473 deps->in_post_call_group_p = 0;
1474
1475 deps->sched_before_next_call
1476 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1477 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1478 LOG_LINKS (deps->sched_before_next_call) = 0;
1479 }
1480
1481 /* Free insn lists found in DEPS. */
1482
1483 void
1484 free_deps (deps)
1485 struct deps *deps;
1486 {
1487 int i;
1488
1489 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1490 times. For a test case with 42000 regs and 8000 small basic blocks,
1491 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1492 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1493 {
1494 struct deps_reg *reg_last = &deps->reg_last[i];
1495 free_INSN_LIST_list (&reg_last->uses);
1496 free_INSN_LIST_list (&reg_last->sets);
1497 free_INSN_LIST_list (&reg_last->clobbers);
1498 });
1499 CLEAR_REG_SET (&deps->reg_last_in_use);
1500
1501 free (deps->reg_last);
1502 deps->reg_last = NULL;
1503 }
1504
1505 /* If it is profitable to use them, initialize caches for tracking
1506 dependency informatino. LUID is the number of insns to be scheduled,
1507 it is used in the estimate of profitability. */
1508
1509 void
1510 init_dependency_caches (luid)
1511 int luid;
1512 {
1513 /* ?!? We could save some memory by computing a per-region luid mapping
1514 which could reduce both the number of vectors in the cache and the size
1515 of each vector. Instead we just avoid the cache entirely unless the
1516 average number of instructions in a basic block is very high. See
1517 the comment before the declaration of true_dependency_cache for
1518 what we consider "very high". */
1519 if (luid / n_basic_blocks > 100 * 5)
1520 {
1521 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1522 sbitmap_vector_zero (true_dependency_cache, luid);
1523 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1524 sbitmap_vector_zero (anti_dependency_cache, luid);
1525 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1526 sbitmap_vector_zero (output_dependency_cache, luid);
1527 #ifdef ENABLE_CHECKING
1528 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1529 sbitmap_vector_zero (forward_dependency_cache, luid);
1530 #endif
1531 }
1532 }
1533
1534 /* Free the caches allocated in init_dependency_caches. */
1535
1536 void
1537 free_dependency_caches ()
1538 {
1539 if (true_dependency_cache)
1540 {
1541 free (true_dependency_cache);
1542 true_dependency_cache = NULL;
1543 free (anti_dependency_cache);
1544 anti_dependency_cache = NULL;
1545 free (output_dependency_cache);
1546 output_dependency_cache = NULL;
1547 #ifdef ENABLE_CHECKING
1548 free (forward_dependency_cache);
1549 forward_dependency_cache = NULL;
1550 #endif
1551 }
1552 }
1553
1554 /* Initialize some global variables needed by the dependency analysis
1555 code. */
1556
1557 void
1558 init_deps_global ()
1559 {
1560 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1561 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1562 reg_pending_sets_all = 0;
1563 }
1564
1565 /* Free everything used by the dependency analysis code. */
1566
1567 void
1568 finish_deps_global ()
1569 {
1570 FREE_REG_SET (reg_pending_sets);
1571 FREE_REG_SET (reg_pending_clobbers);
1572 }