Collections.java (UnmodifiableMap.toArray): Imported changes from Classpath.
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 02110-1301, USA. */
25 \f
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "toplev.h"
31 #include "rtl.h"
32 #include "tm_p.h"
33 #include "hard-reg-set.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "flags.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
39 #include "except.h"
40 #include "toplev.h"
41 #include "recog.h"
42 #include "sched-int.h"
43 #include "params.h"
44 #include "cselib.h"
45 #include "df.h"
46
47 #ifdef ENABLE_CHECKING
48 #define CHECK (true)
49 #else
50 #define CHECK (false)
51 #endif
52
53 /* Return the major type present in the DS. */
54 enum reg_note
55 ds_to_dk (ds_t ds)
56 {
57 if (ds & DEP_TRUE)
58 return REG_DEP_TRUE;
59
60 if (ds & DEP_OUTPUT)
61 return REG_DEP_OUTPUT;
62
63 gcc_assert (ds & DEP_ANTI);
64
65 return REG_DEP_ANTI;
66 }
67
68 /* Return equivalent dep_status. */
69 ds_t
70 dk_to_ds (enum reg_note dk)
71 {
72 switch (dk)
73 {
74 case REG_DEP_TRUE:
75 return DEP_TRUE;
76
77 case REG_DEP_OUTPUT:
78 return DEP_OUTPUT;
79
80 default:
81 gcc_assert (dk == REG_DEP_ANTI);
82 return DEP_ANTI;
83 }
84 }
85
86 /* Functions to operate with dependence information container - dep_t. */
87
88 /* Init DEP with the arguments. */
89 static void
90 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note kind, ds_t ds)
91 {
92 DEP_PRO (dep) = pro;
93 DEP_CON (dep) = con;
94 DEP_KIND (dep) = kind;
95 DEP_STATUS (dep) = ds;
96 }
97
98 /* Init DEP with the arguments.
99 While most of the scheduler (including targets) only need the major type
100 of the dependency, it is convenient to hide full dep_status from them. */
101 void
102 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
103 {
104 ds_t ds;
105
106 if ((current_sched_info->flags & USE_DEPS_LIST) != 0)
107 ds = dk_to_ds (kind);
108 else
109 ds = -1;
110
111 init_dep_1 (dep, pro, con, kind, ds);
112 }
113
114 /* Make a copy of FROM in TO. */
115 static void
116 copy_dep (dep_t to, dep_t from)
117 {
118 memcpy (to, from, sizeof (*to));
119 }
120
121 /* Functions to operate with a single link from the dependencies lists -
122 dep_link_t. */
123
124 /* Return true if dep_link L is consistent. */
125 static bool
126 dep_link_consistent_p (dep_link_t l)
127 {
128 dep_link_t next = DEP_LINK_NEXT (l);
129
130 return (next == NULL
131 || &DEP_LINK_NEXT (l) == DEP_LINK_PREV_NEXTP (next));
132 }
133
134 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
135 PREV_NEXT_P. */
136 static void
137 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
138 {
139 dep_link_t next = *prev_nextp;
140
141 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
142 && DEP_LINK_NEXT (l) == NULL);
143
144 /* Init node being inserted. */
145 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
146 DEP_LINK_NEXT (l) = next;
147
148 /* Fix next node. */
149 if (next != NULL)
150 {
151 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
152
153 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
154 }
155
156 /* Fix prev node. */
157 *prev_nextp = l;
158 }
159
160 /* Add dep_link LINK to deps_list L. */
161 static void
162 add_to_deps_list (dep_link_t link, deps_list_t l)
163 {
164 attach_dep_link (link, &DEPS_LIST_FIRST (l));
165 }
166
167 /* Detach dep_link L from the list. */
168 static void
169 detach_dep_link (dep_link_t l)
170 {
171 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
172 dep_link_t next = DEP_LINK_NEXT (l);
173
174 *prev_nextp = next;
175
176 if (next != NULL)
177 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
178
179 /* Though this is property is not used anywhere but in the assert in
180 attach_dep_link (), this can prevent latent errors. */
181 DEP_LINK_PREV_NEXTP (l) = NULL;
182 DEP_LINK_NEXT (l) = NULL;
183 }
184
185 /* Move LINK from whatever list it is now to L. */
186 void
187 move_dep_link (dep_link_t link, deps_list_t l)
188 {
189 detach_dep_link (link);
190 add_to_deps_list (link, l);
191 }
192
193 /* Check L's and its successors' consistency.
194 This is, potentially, an expensive check, hence it should be guarded by
195 ENABLE_CHECKING at all times. */
196 static bool
197 dep_links_consistent_p (dep_link_t l)
198 {
199 while (l != NULL)
200 {
201 if (dep_link_consistent_p (l))
202 l = DEP_LINK_NEXT (l);
203 else
204 return false;
205 }
206
207 return true;
208 }
209
210 /* Dump dep_nodes starting from l. */
211 static void
212 dump_dep_links (FILE *dump, dep_link_t l)
213 {
214 while (l != NULL)
215 {
216 dep_t d = DEP_LINK_DEP (l);
217
218 fprintf (dump, "%d%c>%d ", INSN_UID (DEP_PRO (d)),
219 dep_link_consistent_p (l) ? '-' : '!', INSN_UID (DEP_CON (d)));
220
221 l = DEP_LINK_NEXT (l);
222 }
223
224 fprintf (dump, "\n");
225 }
226
227 /* Dump dep_nodes starting from L to stderr. */
228 void
229 debug_dep_links (dep_link_t l)
230 {
231 dump_dep_links (stderr, l);
232 }
233
234 /* Obstack to allocate dep_nodes and deps_lists on. */
235 static struct obstack deps_obstack;
236
237 /* Obstack to hold forward dependencies lists (deps_list_t). */
238 static struct obstack *dl_obstack = &deps_obstack;
239
240 /* Obstack to hold all dependency nodes (dep_node_t). */
241 static struct obstack *dn_obstack = &deps_obstack;
242
243 /* Functions to operate with dependences lists - deps_list_t. */
244
245 /* Allocate deps_list.
246
247 If ON_OBSTACK_P is true, allocate the list on the obstack. This is done for
248 INSN_FORW_DEPS lists because they should live till the end of scheduling.
249
250 INSN_BACK_DEPS and INSN_RESOLVED_BACK_DEPS lists are allocated on the free
251 store and are being freed in haifa-sched.c: schedule_insn (). */
252 static deps_list_t
253 alloc_deps_list (bool on_obstack_p)
254 {
255 if (on_obstack_p)
256 return obstack_alloc (dl_obstack, sizeof (struct _deps_list));
257 else
258 return xmalloc (sizeof (struct _deps_list));
259 }
260
261 /* Initialize deps_list L. */
262 static void
263 init_deps_list (deps_list_t l)
264 {
265 DEPS_LIST_FIRST (l) = NULL;
266 }
267
268 /* Create (allocate and init) deps_list.
269 The meaning of ON_OBSTACK_P is the same as in alloc_deps_list (). */
270 deps_list_t
271 create_deps_list (bool on_obstack_p)
272 {
273 deps_list_t l = alloc_deps_list (on_obstack_p);
274
275 init_deps_list (l);
276 return l;
277 }
278
279 /* Free dep_data_nodes that present in L. */
280 static void
281 clear_deps_list (deps_list_t l)
282 {
283 /* All dep_nodes are allocated on the dn_obstack. They'll be freed with
284 the obstack. */
285
286 DEPS_LIST_FIRST (l) = NULL;
287 }
288
289 /* Free deps_list L. */
290 void
291 free_deps_list (deps_list_t l)
292 {
293 gcc_assert (deps_list_empty_p (l));
294 free (l);
295 }
296
297 /* Delete (clear and free) deps_list L. */
298 void
299 delete_deps_list (deps_list_t l)
300 {
301 clear_deps_list (l);
302 free_deps_list (l);
303 }
304
305 /* Return true if L is empty. */
306 bool
307 deps_list_empty_p (deps_list_t l)
308 {
309 return DEPS_LIST_FIRST (l) == NULL;
310 }
311
312 /* Check L's consistency.
313 This is, potentially, an expensive check, hence it should be guarded by
314 ENABLE_CHECKING at all times. */
315 static bool
316 deps_list_consistent_p (deps_list_t l)
317 {
318 dep_link_t first = DEPS_LIST_FIRST (l);
319
320 return (first == NULL
321 || (&DEPS_LIST_FIRST (l) == DEP_LINK_PREV_NEXTP (first)
322 && dep_links_consistent_p (first)));
323 }
324
325 /* Dump L to F. */
326 static void
327 dump_deps_list (FILE *f, deps_list_t l)
328 {
329 dump_dep_links (f, DEPS_LIST_FIRST (l));
330 }
331
332 /* Dump L to STDERR. */
333 void
334 debug_deps_list (deps_list_t l)
335 {
336 dump_deps_list (stderr, l);
337 }
338
339 /* Add a dependency described by DEP to the list L.
340 L should be either INSN_BACK_DEPS or INSN_RESOLVED_BACK_DEPS. */
341 void
342 add_back_dep_to_deps_list (deps_list_t l, dep_t dep_from)
343 {
344 dep_node_t n = (dep_node_t) obstack_alloc (dn_obstack,
345 sizeof (*n));
346 dep_t dep_to = DEP_NODE_DEP (n);
347 dep_link_t back = DEP_NODE_BACK (n);
348 dep_link_t forw = DEP_NODE_FORW (n);
349
350 copy_dep (dep_to, dep_from);
351
352 DEP_LINK_NODE (back) = n;
353 DEP_LINK_NODE (forw) = n;
354
355 /* There is no particular need to initialize these four fields except to make
356 assert in attach_dep_link () happy. */
357 DEP_LINK_NEXT (back) = NULL;
358 DEP_LINK_PREV_NEXTP (back) = NULL;
359 DEP_LINK_NEXT (forw) = NULL;
360 DEP_LINK_PREV_NEXTP (forw) = NULL;
361
362 add_to_deps_list (back, l);
363 }
364
365 /* Find the dep_link with producer PRO in deps_list L. */
366 dep_link_t
367 find_link_by_pro_in_deps_list (deps_list_t l, rtx pro)
368 {
369 dep_link_t link;
370
371 FOR_EACH_DEP_LINK (link, l)
372 if (DEP_LINK_PRO (link) == pro)
373 return link;
374
375 return NULL;
376 }
377
378 /* Find the dep_link with consumer CON in deps_list L. */
379 dep_link_t
380 find_link_by_con_in_deps_list (deps_list_t l, rtx con)
381 {
382 dep_link_t link;
383
384 FOR_EACH_DEP_LINK (link, l)
385 if (DEP_LINK_CON (link) == con)
386 return link;
387
388 return NULL;
389 }
390
391 /* Make a copy of FROM in TO with substituting consumer with CON.
392 TO and FROM should be RESOLVED_BACK_DEPS lists. */
393 void
394 copy_deps_list_change_con (deps_list_t to, deps_list_t from, rtx con)
395 {
396 dep_link_t l;
397
398 gcc_assert (deps_list_empty_p (to));
399
400 FOR_EACH_DEP_LINK (l, from)
401 {
402 add_back_dep_to_deps_list (to, DEP_LINK_DEP (l));
403 DEP_LINK_CON (DEPS_LIST_FIRST (to)) = con;
404 }
405 }
406
407 static regset reg_pending_sets;
408 static regset reg_pending_clobbers;
409 static regset reg_pending_uses;
410
411 /* The following enumeration values tell us what dependencies we
412 should use to implement the barrier. We use true-dependencies for
413 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
414 enum reg_pending_barrier_mode
415 {
416 NOT_A_BARRIER = 0,
417 MOVE_BARRIER,
418 TRUE_BARRIER
419 };
420
421 static enum reg_pending_barrier_mode reg_pending_barrier;
422
423 /* To speed up the test for duplicate dependency links we keep a
424 record of dependencies created by add_dependence when the average
425 number of instructions in a basic block is very large.
426
427 Studies have shown that there is typically around 5 instructions between
428 branches for typical C code. So we can make a guess that the average
429 basic block is approximately 5 instructions long; we will choose 100X
430 the average size as a very large basic block.
431
432 Each insn has associated bitmaps for its dependencies. Each bitmap
433 has enough entries to represent a dependency on any other insn in
434 the insn chain. All bitmap for true dependencies cache is
435 allocated then the rest two ones are also allocated. */
436 static bitmap_head *true_dependency_cache;
437 static bitmap_head *output_dependency_cache;
438 static bitmap_head *anti_dependency_cache;
439 static bitmap_head *spec_dependency_cache;
440 static int cache_size;
441
442 /* To speed up checking consistency of formed forward insn
443 dependencies we use the following cache. Another possible solution
444 could be switching off checking duplication of insns in forward
445 dependencies. */
446 #ifdef ENABLE_CHECKING
447 static bitmap_head *forward_dependency_cache;
448 #endif
449
450 static int deps_may_trap_p (rtx);
451 static void add_dependence_list (rtx, rtx, int, enum reg_note);
452 static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
453 static void delete_all_dependences (rtx);
454 static void fixup_sched_groups (rtx);
455
456 static void flush_pending_lists (struct deps *, rtx, int, int);
457 static void sched_analyze_1 (struct deps *, rtx, rtx);
458 static void sched_analyze_2 (struct deps *, rtx, rtx);
459 static void sched_analyze_insn (struct deps *, rtx, rtx);
460
461 static rtx sched_get_condition (rtx);
462 static int conditions_mutex_p (rtx, rtx);
463
464 static enum DEPS_ADJUST_RESULT maybe_add_or_update_back_dep_1 (rtx, rtx,
465 enum reg_note, ds_t, rtx, rtx, dep_link_t **);
466 static enum DEPS_ADJUST_RESULT add_or_update_back_dep_1 (rtx, rtx,
467 enum reg_note, ds_t, rtx, rtx, dep_link_t **);
468 static void add_back_dep (rtx, rtx, enum reg_note, ds_t);
469
470 static void adjust_add_sorted_back_dep (rtx, dep_link_t, dep_link_t *);
471 static void adjust_back_add_forw_dep (rtx, dep_link_t *);
472 static void delete_forw_dep (dep_link_t);
473 static dw_t estimate_dep_weak (rtx, rtx);
474 #ifdef INSN_SCHEDULING
475 #ifdef ENABLE_CHECKING
476 static void check_dep_status (enum reg_note, ds_t, bool);
477 #endif
478 #endif
479 \f
480 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
481
482 static int
483 deps_may_trap_p (rtx mem)
484 {
485 rtx addr = XEXP (mem, 0);
486
487 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
488 {
489 rtx t = get_reg_known_value (REGNO (addr));
490 if (t)
491 addr = t;
492 }
493 return rtx_addr_can_trap_p (addr);
494 }
495 \f
496 /* Return the INSN_LIST containing INSN in LIST, or NULL
497 if LIST does not contain INSN. */
498
499 rtx
500 find_insn_list (rtx insn, rtx list)
501 {
502 while (list)
503 {
504 if (XEXP (list, 0) == insn)
505 return list;
506 list = XEXP (list, 1);
507 }
508 return 0;
509 }
510 \f
511 /* Find the condition under which INSN is executed. */
512
513 static rtx
514 sched_get_condition (rtx insn)
515 {
516 rtx pat = PATTERN (insn);
517 rtx src;
518
519 if (pat == 0)
520 return 0;
521
522 if (GET_CODE (pat) == COND_EXEC)
523 return COND_EXEC_TEST (pat);
524
525 if (!any_condjump_p (insn) || !onlyjump_p (insn))
526 return 0;
527
528 src = SET_SRC (pc_set (insn));
529
530 if (XEXP (src, 2) == pc_rtx)
531 return XEXP (src, 0);
532 else if (XEXP (src, 1) == pc_rtx)
533 {
534 rtx cond = XEXP (src, 0);
535 enum rtx_code revcode = reversed_comparison_code (cond, insn);
536
537 if (revcode == UNKNOWN)
538 return 0;
539 return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
540 XEXP (cond, 1));
541 }
542
543 return 0;
544 }
545
546 \f
547 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
548
549 static int
550 conditions_mutex_p (rtx cond1, rtx cond2)
551 {
552 if (COMPARISON_P (cond1)
553 && COMPARISON_P (cond2)
554 && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
555 && XEXP (cond1, 0) == XEXP (cond2, 0)
556 && XEXP (cond1, 1) == XEXP (cond2, 1))
557 return 1;
558 return 0;
559 }
560
561 /* Return true if insn1 and insn2 can never depend on one another because
562 the conditions under which they are executed are mutually exclusive. */
563 bool
564 sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
565 {
566 rtx cond1, cond2;
567
568 /* flow.c doesn't handle conditional lifetimes entirely correctly;
569 calls mess up the conditional lifetimes. */
570 if (!CALL_P (insn1) && !CALL_P (insn2))
571 {
572 cond1 = sched_get_condition (insn1);
573 cond2 = sched_get_condition (insn2);
574 if (cond1 && cond2
575 && conditions_mutex_p (cond1, cond2)
576 /* Make sure first instruction doesn't affect condition of second
577 instruction if switched. */
578 && !modified_in_p (cond1, insn2)
579 /* Make sure second instruction doesn't affect condition of first
580 instruction if switched. */
581 && !modified_in_p (cond2, insn1))
582 return true;
583 }
584 return false;
585 }
586 \f
587 /* Add ELEM wrapped in an dep_link with reg note kind DEP_TYPE to the
588 INSN_BACK_DEPS (INSN), if it is not already there. DEP_TYPE indicates the
589 type of dependence that this link represents. DS, if nonzero,
590 indicates speculations, through which this dependence can be overcome.
591 MEM1 and MEM2, if non-null, corresponds to memory locations in case of
592 data speculation. The function returns a value indicating if an old entry
593 has been changed or a new entry has been added to insn's backward deps.
594 In case of changed entry CHANGED_LINKPP sets to its address.
595 See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
596 Actual manipulation of dependence data structures is performed in
597 add_or_update_back_dep_1. */
598
599 static enum DEPS_ADJUST_RESULT
600 maybe_add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
601 ds_t ds, rtx mem1, rtx mem2,
602 dep_link_t **changed_linkpp)
603 {
604 gcc_assert (INSN_P (insn) && INSN_P (elem));
605
606 /* Don't depend an insn on itself. */
607 if (insn == elem)
608 {
609 #ifdef INSN_SCHEDULING
610 if (current_sched_info->flags & DO_SPECULATION)
611 /* INSN has an internal dependence, which we can't overcome. */
612 HAS_INTERNAL_DEP (insn) = 1;
613 #endif
614 return 0;
615 }
616
617 return add_or_update_back_dep_1 (insn, elem, dep_type,
618 ds, mem1, mem2, changed_linkpp);
619 }
620
621 /* This function has the same meaning of parameters and return values
622 as maybe_add_or_update_back_dep_1. The only difference between these
623 two functions is that INSN and ELEM are guaranteed not to be the same
624 in this one. */
625 static enum DEPS_ADJUST_RESULT
626 add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
627 ds_t ds ATTRIBUTE_UNUSED,
628 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED,
629 dep_link_t **changed_linkpp ATTRIBUTE_UNUSED)
630 {
631 bool maybe_present_p = true, present_p = false;
632
633 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
634
635 #ifdef INSN_SCHEDULING
636
637 #ifdef ENABLE_CHECKING
638 check_dep_status (dep_type, ds, mem1 != NULL);
639 #endif
640
641 /* If we already have a dependency for ELEM, then we do not need to
642 do anything. Avoiding the list walk below can cut compile times
643 dramatically for some code. */
644 if (true_dependency_cache != NULL)
645 {
646 enum reg_note present_dep_type;
647
648 gcc_assert (output_dependency_cache);
649 gcc_assert (anti_dependency_cache);
650 if (!(current_sched_info->flags & USE_DEPS_LIST))
651 {
652 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
653 INSN_LUID (elem)))
654 present_dep_type = REG_DEP_TRUE;
655 else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
656 INSN_LUID (elem)))
657 present_dep_type = REG_DEP_OUTPUT;
658 else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
659 INSN_LUID (elem)))
660 present_dep_type = REG_DEP_ANTI;
661 else
662 maybe_present_p = false;
663
664 if (maybe_present_p)
665 {
666 if ((int) dep_type >= (int) present_dep_type)
667 return DEP_PRESENT;
668
669 present_p = true;
670 }
671 }
672 else
673 {
674 ds_t present_dep_types = 0;
675
676 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
677 INSN_LUID (elem)))
678 present_dep_types |= DEP_TRUE;
679 if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
680 INSN_LUID (elem)))
681 present_dep_types |= DEP_OUTPUT;
682 if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
683 INSN_LUID (elem)))
684 present_dep_types |= DEP_ANTI;
685
686 if (present_dep_types)
687 {
688 if (!(current_sched_info->flags & DO_SPECULATION)
689 || !bitmap_bit_p (&spec_dependency_cache[INSN_LUID (insn)],
690 INSN_LUID (elem)))
691 {
692 if ((present_dep_types | (ds & DEP_TYPES))
693 == present_dep_types)
694 /* We already have all these bits. */
695 return DEP_PRESENT;
696 }
697 else
698 {
699 /* Only true dependencies can be data speculative and
700 only anti dependencies can be control speculative. */
701 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
702 == present_dep_types);
703
704 /* if (additional dep is SPECULATIVE) then
705 we should update DEP_STATUS
706 else
707 we should reset existing dep to non-speculative. */
708 }
709
710 present_p = true;
711 }
712 else
713 maybe_present_p = false;
714 }
715 }
716 #endif
717
718 /* Check that we don't already have this dependence. */
719 if (maybe_present_p)
720 {
721 dep_link_t *linkp;
722
723 for (linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
724 *linkp != NULL;
725 linkp = &DEP_LINK_NEXT (*linkp))
726 {
727 dep_t link = DEP_LINK_DEP (*linkp);
728
729 gcc_assert (true_dependency_cache == 0 || present_p);
730
731 if (DEP_PRO (link) == elem)
732 {
733 enum DEPS_ADJUST_RESULT changed_p = DEP_PRESENT;
734
735 #ifdef INSN_SCHEDULING
736 if (current_sched_info->flags & USE_DEPS_LIST)
737 {
738 ds_t new_status = ds | DEP_STATUS (link);
739
740 if (new_status & SPECULATIVE)
741 {
742 if (!(ds & SPECULATIVE)
743 || !(DEP_STATUS (link) & SPECULATIVE))
744 /* Then this dep can't be speculative. */
745 {
746 new_status &= ~SPECULATIVE;
747 if (true_dependency_cache
748 && (DEP_STATUS (link) & SPECULATIVE))
749 bitmap_clear_bit (&spec_dependency_cache
750 [INSN_LUID (insn)],
751 INSN_LUID (elem));
752 }
753 else
754 {
755 /* Both are speculative. Merging probabilities. */
756 if (mem1)
757 {
758 dw_t dw;
759
760 dw = estimate_dep_weak (mem1, mem2);
761 ds = set_dep_weak (ds, BEGIN_DATA, dw);
762 }
763
764 new_status = ds_merge (DEP_STATUS (link), ds);
765 }
766 }
767
768 ds = new_status;
769 }
770
771 /* Clear corresponding cache entry because type of the link
772 may have changed. Keep them if we use_deps_list. */
773 if (true_dependency_cache != NULL
774 && !(current_sched_info->flags & USE_DEPS_LIST))
775 {
776 enum reg_note kind = DEP_KIND (link);
777
778 switch (kind)
779 {
780 case REG_DEP_OUTPUT:
781 bitmap_clear_bit (&output_dependency_cache
782 [INSN_LUID (insn)], INSN_LUID (elem));
783 break;
784 case REG_DEP_ANTI:
785 bitmap_clear_bit (&anti_dependency_cache
786 [INSN_LUID (insn)], INSN_LUID (elem));
787 break;
788 default:
789 gcc_unreachable ();
790 }
791 }
792
793 if ((current_sched_info->flags & USE_DEPS_LIST)
794 && DEP_STATUS (link) != ds)
795 {
796 DEP_STATUS (link) = ds;
797 changed_p = DEP_CHANGED;
798 }
799 #endif
800
801 /* If this is a more restrictive type of dependence than the
802 existing one, then change the existing dependence to this
803 type. */
804 if ((int) dep_type < (int) DEP_KIND (link))
805 {
806 DEP_KIND (link) = dep_type;
807 changed_p = DEP_CHANGED;
808 }
809
810 #ifdef INSN_SCHEDULING
811 /* If we are adding a dependency to INSN's LOG_LINKs, then
812 note that in the bitmap caches of dependency information. */
813 if (true_dependency_cache != NULL)
814 {
815 if (!(current_sched_info->flags & USE_DEPS_LIST))
816 {
817 if (DEP_KIND (link) == REG_DEP_TRUE)
818 bitmap_set_bit (&true_dependency_cache
819 [INSN_LUID (insn)], INSN_LUID (elem));
820 else if (DEP_KIND (link) == REG_DEP_OUTPUT)
821 bitmap_set_bit (&output_dependency_cache
822 [INSN_LUID (insn)], INSN_LUID (elem));
823 else if (DEP_KIND (link) == REG_DEP_ANTI)
824 bitmap_set_bit (&anti_dependency_cache
825 [INSN_LUID (insn)], INSN_LUID (elem));
826 }
827 else
828 {
829 if (ds & DEP_TRUE)
830 bitmap_set_bit (&true_dependency_cache
831 [INSN_LUID (insn)], INSN_LUID (elem));
832 if (ds & DEP_OUTPUT)
833 bitmap_set_bit (&output_dependency_cache
834 [INSN_LUID (insn)], INSN_LUID (elem));
835 if (ds & DEP_ANTI)
836 bitmap_set_bit (&anti_dependency_cache
837 [INSN_LUID (insn)], INSN_LUID (elem));
838 /* Note, that dep can become speculative only
839 at the moment of creation. Thus, we don't need to
840 check for it here. */
841 }
842 }
843
844 if (changed_linkpp && changed_p == DEP_CHANGED)
845 *changed_linkpp = linkp;
846 #endif
847 return changed_p;
848 }
849 }
850 /* We didn't find a dep. It shouldn't be present in the cache. */
851 gcc_assert (!present_p);
852 }
853
854 /* Might want to check one level of transitivity to save conses.
855 This check should be done in maybe_add_or_update_back_dep_1.
856 Since we made it to add_or_update_back_dep_1, we must create
857 (or update) a link. */
858
859 if (mem1)
860 {
861 gcc_assert (current_sched_info->flags & DO_SPECULATION);
862 ds = set_dep_weak (ds, BEGIN_DATA, estimate_dep_weak (mem1, mem2));
863 }
864
865 add_back_dep (insn, elem, dep_type, ds);
866
867 return DEP_CREATED;
868 }
869
870 /* This function creates a link between INSN and ELEM under any
871 conditions. DS describes speculative status of the link. */
872 static void
873 add_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
874 {
875 struct _dep _dep, *dep = &_dep;
876
877 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
878
879 if (current_sched_info->flags & USE_DEPS_LIST)
880 init_dep_1 (dep, elem, insn, dep_type, ds);
881 else
882 init_dep_1 (dep, elem, insn, dep_type, -1);
883
884 add_back_dep_to_deps_list (INSN_BACK_DEPS (insn), dep);
885
886 #ifdef INSN_SCHEDULING
887 #ifdef ENABLE_CHECKING
888 check_dep_status (dep_type, ds, false);
889 #endif
890
891 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
892 in the bitmap caches of dependency information. */
893 if (true_dependency_cache != NULL)
894 {
895 if (!(current_sched_info->flags & USE_DEPS_LIST))
896 {
897 if (dep_type == REG_DEP_TRUE)
898 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
899 INSN_LUID (elem));
900 else if (dep_type == REG_DEP_OUTPUT)
901 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
902 INSN_LUID (elem));
903 else if (dep_type == REG_DEP_ANTI)
904 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
905 INSN_LUID (elem));
906 }
907 else
908 {
909 if (ds & DEP_TRUE)
910 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
911 INSN_LUID (elem));
912 if (ds & DEP_OUTPUT)
913 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
914 INSN_LUID (elem));
915 if (ds & DEP_ANTI)
916 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
917 INSN_LUID (elem));
918 if (ds & SPECULATIVE)
919 {
920 gcc_assert (current_sched_info->flags & DO_SPECULATION);
921 bitmap_set_bit (&spec_dependency_cache[INSN_LUID (insn)],
922 INSN_LUID (elem));
923 }
924 }
925 }
926 #endif
927 }
928
929 /* A convenience wrapper to operate on an entire list. */
930
931 static void
932 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
933 {
934 for (; list; list = XEXP (list, 1))
935 {
936 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
937 add_dependence (insn, XEXP (list, 0), dep_type);
938 }
939 }
940
941 /* Similar, but free *LISTP at the same time. */
942
943 static void
944 add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
945 enum reg_note dep_type)
946 {
947 rtx list, next;
948 for (list = *listp, *listp = NULL; list ; list = next)
949 {
950 next = XEXP (list, 1);
951 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
952 add_dependence (insn, XEXP (list, 0), dep_type);
953 free_INSN_LIST_node (list);
954 }
955 }
956
957 /* Clear all dependencies for an insn. */
958
959 static void
960 delete_all_dependences (rtx insn)
961 {
962 /* Clear caches, if they exist, as well as free the dependence. */
963
964 #ifdef INSN_SCHEDULING
965 if (true_dependency_cache != NULL)
966 {
967 bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
968 bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
969 bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
970 /* We don't have to clear forward_dependency_cache here,
971 because it is formed later. */
972 if (current_sched_info->flags & DO_SPECULATION)
973 bitmap_clear (&spec_dependency_cache[INSN_LUID (insn)]);
974 }
975 #endif
976
977 clear_deps_list (INSN_BACK_DEPS (insn));
978 }
979
980 /* All insns in a scheduling group except the first should only have
981 dependencies on the previous insn in the group. So we find the
982 first instruction in the scheduling group by walking the dependence
983 chains backwards. Then we add the dependencies for the group to
984 the previous nonnote insn. */
985
986 static void
987 fixup_sched_groups (rtx insn)
988 {
989 dep_link_t link;
990 rtx prev_nonnote;
991
992 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
993 {
994 rtx i = insn;
995 dep_t dep = DEP_LINK_DEP (link);
996 rtx pro = DEP_PRO (dep);
997
998 do
999 {
1000 i = prev_nonnote_insn (i);
1001
1002 if (pro == i)
1003 goto next_link;
1004 } while (SCHED_GROUP_P (i));
1005
1006 if (! sched_insns_conditions_mutex_p (i, pro))
1007 add_dependence (i, pro, DEP_KIND (dep));
1008 next_link:;
1009 }
1010
1011 delete_all_dependences (insn);
1012
1013 prev_nonnote = prev_nonnote_insn (insn);
1014 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1015 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1016 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1017 }
1018 \f
1019 /* Process an insn's memory dependencies. There are four kinds of
1020 dependencies:
1021
1022 (0) read dependence: read follows read
1023 (1) true dependence: read follows write
1024 (2) output dependence: write follows write
1025 (3) anti dependence: write follows read
1026
1027 We are careful to build only dependencies which actually exist, and
1028 use transitivity to avoid building too many links. */
1029
1030 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1031 The MEM is a memory reference contained within INSN, which we are saving
1032 so that we can do memory aliasing on it. */
1033
1034 static void
1035 add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
1036 rtx insn, rtx mem)
1037 {
1038 rtx link;
1039
1040 link = alloc_INSN_LIST (insn, *insn_list);
1041 *insn_list = link;
1042
1043 if (current_sched_info->use_cselib)
1044 {
1045 mem = shallow_copy_rtx (mem);
1046 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1047 }
1048 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1049 *mem_list = link;
1050
1051 deps->pending_lists_length++;
1052 }
1053
1054 /* Make a dependency between every memory reference on the pending lists
1055 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1056 dependencies for a read operation, similarly with FOR_WRITE. */
1057
1058 static void
1059 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1060 int for_write)
1061 {
1062 if (for_write)
1063 {
1064 add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
1065 REG_DEP_ANTI);
1066 free_EXPR_LIST_list (&deps->pending_read_mems);
1067 }
1068
1069 add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
1070 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1071 free_EXPR_LIST_list (&deps->pending_write_mems);
1072 deps->pending_lists_length = 0;
1073
1074 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
1075 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1076 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1077 deps->pending_flush_length = 1;
1078 }
1079 \f
1080 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
1081 The type of the reference is specified by REF and can be SET,
1082 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
1083
1084 static void
1085 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
1086 enum rtx_code ref, rtx insn)
1087 {
1088 /* A hard reg in a wide mode may really be multiple registers.
1089 If so, mark all of them just like the first. */
1090 if (regno < FIRST_PSEUDO_REGISTER)
1091 {
1092 int i = hard_regno_nregs[regno][mode];
1093 if (ref == SET)
1094 {
1095 while (--i >= 0)
1096 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1097 }
1098 else if (ref == USE)
1099 {
1100 while (--i >= 0)
1101 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
1102 }
1103 else
1104 {
1105 while (--i >= 0)
1106 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
1107 }
1108 }
1109
1110 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
1111 it does not reload. Ignore these as they have served their
1112 purpose already. */
1113 else if (regno >= deps->max_reg)
1114 {
1115 enum rtx_code code = GET_CODE (PATTERN (insn));
1116 gcc_assert (code == USE || code == CLOBBER);
1117 }
1118
1119 else
1120 {
1121 if (ref == SET)
1122 SET_REGNO_REG_SET (reg_pending_sets, regno);
1123 else if (ref == USE)
1124 SET_REGNO_REG_SET (reg_pending_uses, regno);
1125 else
1126 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1127
1128 /* Pseudos that are REG_EQUIV to something may be replaced
1129 by that during reloading. We need only add dependencies for
1130 the address in the REG_EQUIV note. */
1131 if (!reload_completed && get_reg_known_equiv_p (regno))
1132 {
1133 rtx t = get_reg_known_value (regno);
1134 if (MEM_P (t))
1135 sched_analyze_2 (deps, XEXP (t, 0), insn);
1136 }
1137
1138 /* Don't let it cross a call after scheduling if it doesn't
1139 already cross one. */
1140 if (REG_N_CALLS_CROSSED (regno) == 0)
1141 {
1142 if (ref == USE)
1143 deps->sched_before_next_call
1144 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1145 else
1146 add_dependence_list (insn, deps->last_function_call, 1,
1147 REG_DEP_ANTI);
1148 }
1149 }
1150 }
1151
1152 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
1153 rtx, X, creating all dependencies generated by the write to the
1154 destination of X, and reads of everything mentioned. */
1155
1156 static void
1157 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
1158 {
1159 rtx dest = XEXP (x, 0);
1160 enum rtx_code code = GET_CODE (x);
1161
1162 if (dest == 0)
1163 return;
1164
1165 if (GET_CODE (dest) == PARALLEL)
1166 {
1167 int i;
1168
1169 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1170 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1171 sched_analyze_1 (deps,
1172 gen_rtx_CLOBBER (VOIDmode,
1173 XEXP (XVECEXP (dest, 0, i), 0)),
1174 insn);
1175
1176 if (GET_CODE (x) == SET)
1177 sched_analyze_2 (deps, SET_SRC (x), insn);
1178 return;
1179 }
1180
1181 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1182 || GET_CODE (dest) == ZERO_EXTRACT)
1183 {
1184 if (GET_CODE (dest) == STRICT_LOW_PART
1185 || GET_CODE (dest) == ZERO_EXTRACT
1186 || df_read_modify_subreg_p (dest))
1187 {
1188 /* These both read and modify the result. We must handle
1189 them as writes to get proper dependencies for following
1190 instructions. We must handle them as reads to get proper
1191 dependencies from this to previous instructions.
1192 Thus we need to call sched_analyze_2. */
1193
1194 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1195 }
1196 if (GET_CODE (dest) == ZERO_EXTRACT)
1197 {
1198 /* The second and third arguments are values read by this insn. */
1199 sched_analyze_2 (deps, XEXP (dest, 1), insn);
1200 sched_analyze_2 (deps, XEXP (dest, 2), insn);
1201 }
1202 dest = XEXP (dest, 0);
1203 }
1204
1205 if (REG_P (dest))
1206 {
1207 int regno = REGNO (dest);
1208 enum machine_mode mode = GET_MODE (dest);
1209
1210 sched_analyze_reg (deps, regno, mode, code, insn);
1211
1212 #ifdef STACK_REGS
1213 /* Treat all writes to a stack register as modifying the TOS. */
1214 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1215 {
1216 /* Avoid analyzing the same register twice. */
1217 if (regno != FIRST_STACK_REG)
1218 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1219 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1220 }
1221 #endif
1222 }
1223 else if (MEM_P (dest))
1224 {
1225 /* Writing memory. */
1226 rtx t = dest;
1227
1228 if (current_sched_info->use_cselib)
1229 {
1230 t = shallow_copy_rtx (dest);
1231 cselib_lookup (XEXP (t, 0), Pmode, 1);
1232 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1233 }
1234 t = canon_rtx (t);
1235
1236 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
1237 {
1238 /* Flush all pending reads and writes to prevent the pending lists
1239 from getting any larger. Insn scheduling runs too slowly when
1240 these lists get long. When compiling GCC with itself,
1241 this flush occurs 8 times for sparc, and 10 times for m88k using
1242 the default value of 32. */
1243 flush_pending_lists (deps, insn, false, true);
1244 }
1245 else
1246 {
1247 rtx pending, pending_mem;
1248
1249 pending = deps->pending_read_insns;
1250 pending_mem = deps->pending_read_mems;
1251 while (pending)
1252 {
1253 if (anti_dependence (XEXP (pending_mem, 0), t)
1254 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1255 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1256
1257 pending = XEXP (pending, 1);
1258 pending_mem = XEXP (pending_mem, 1);
1259 }
1260
1261 pending = deps->pending_write_insns;
1262 pending_mem = deps->pending_write_mems;
1263 while (pending)
1264 {
1265 if (output_dependence (XEXP (pending_mem, 0), t)
1266 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1267 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1268
1269 pending = XEXP (pending, 1);
1270 pending_mem = XEXP (pending_mem, 1);
1271 }
1272
1273 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1274 REG_DEP_ANTI);
1275
1276 add_insn_mem_dependence (deps, &deps->pending_write_insns,
1277 &deps->pending_write_mems, insn, dest);
1278 }
1279 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1280 }
1281
1282 /* Analyze reads. */
1283 if (GET_CODE (x) == SET)
1284 sched_analyze_2 (deps, SET_SRC (x), insn);
1285 }
1286
1287 /* Analyze the uses of memory and registers in rtx X in INSN. */
1288
1289 static void
1290 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
1291 {
1292 int i;
1293 int j;
1294 enum rtx_code code;
1295 const char *fmt;
1296
1297 if (x == 0)
1298 return;
1299
1300 code = GET_CODE (x);
1301
1302 switch (code)
1303 {
1304 case CONST_INT:
1305 case CONST_DOUBLE:
1306 case CONST_VECTOR:
1307 case SYMBOL_REF:
1308 case CONST:
1309 case LABEL_REF:
1310 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1311 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1312 this does not mean that this insn is using cc0. */
1313 return;
1314
1315 #ifdef HAVE_cc0
1316 case CC0:
1317 /* User of CC0 depends on immediately preceding insn. */
1318 SCHED_GROUP_P (insn) = 1;
1319 /* Don't move CC0 setter to another block (it can set up the
1320 same flag for previous CC0 users which is safe). */
1321 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
1322 return;
1323 #endif
1324
1325 case REG:
1326 {
1327 int regno = REGNO (x);
1328 enum machine_mode mode = GET_MODE (x);
1329
1330 sched_analyze_reg (deps, regno, mode, USE, insn);
1331
1332 #ifdef STACK_REGS
1333 /* Treat all reads of a stack register as modifying the TOS. */
1334 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1335 {
1336 /* Avoid analyzing the same register twice. */
1337 if (regno != FIRST_STACK_REG)
1338 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1339 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
1340 }
1341 #endif
1342 return;
1343 }
1344
1345 case MEM:
1346 {
1347 /* Reading memory. */
1348 rtx u;
1349 rtx pending, pending_mem;
1350 rtx t = x;
1351
1352 if (current_sched_info->use_cselib)
1353 {
1354 t = shallow_copy_rtx (t);
1355 cselib_lookup (XEXP (t, 0), Pmode, 1);
1356 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1357 }
1358 t = canon_rtx (t);
1359 pending = deps->pending_read_insns;
1360 pending_mem = deps->pending_read_mems;
1361 while (pending)
1362 {
1363 if (read_dependence (XEXP (pending_mem, 0), t)
1364 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1365 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1366
1367 pending = XEXP (pending, 1);
1368 pending_mem = XEXP (pending_mem, 1);
1369 }
1370
1371 pending = deps->pending_write_insns;
1372 pending_mem = deps->pending_write_mems;
1373 while (pending)
1374 {
1375 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1376 t, rtx_varies_p)
1377 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1378 {
1379 if (current_sched_info->flags & DO_SPECULATION)
1380 maybe_add_or_update_back_dep_1 (insn, XEXP (pending, 0),
1381 REG_DEP_TRUE,
1382 BEGIN_DATA | DEP_TRUE,
1383 XEXP (pending_mem, 0), t, 0);
1384 else
1385 add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
1386 }
1387
1388 pending = XEXP (pending, 1);
1389 pending_mem = XEXP (pending_mem, 1);
1390 }
1391
1392 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1393 if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
1394 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1395
1396 /* Always add these dependencies to pending_reads, since
1397 this insn may be followed by a write. */
1398 add_insn_mem_dependence (deps, &deps->pending_read_insns,
1399 &deps->pending_read_mems, insn, x);
1400
1401 /* Take advantage of tail recursion here. */
1402 sched_analyze_2 (deps, XEXP (x, 0), insn);
1403 return;
1404 }
1405
1406 /* Force pending stores to memory in case a trap handler needs them. */
1407 case TRAP_IF:
1408 flush_pending_lists (deps, insn, true, false);
1409 break;
1410
1411 case ASM_OPERANDS:
1412 case ASM_INPUT:
1413 case UNSPEC_VOLATILE:
1414 {
1415 /* Traditional and volatile asm instructions must be considered to use
1416 and clobber all hard registers, all pseudo-registers and all of
1417 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1418
1419 Consider for instance a volatile asm that changes the fpu rounding
1420 mode. An insn should not be moved across this even if it only uses
1421 pseudo-regs because it might give an incorrectly rounded result. */
1422 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1423 reg_pending_barrier = TRUE_BARRIER;
1424
1425 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1426 We can not just fall through here since then we would be confused
1427 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1428 traditional asms unlike their normal usage. */
1429
1430 if (code == ASM_OPERANDS)
1431 {
1432 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1433 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
1434 return;
1435 }
1436 break;
1437 }
1438
1439 case PRE_DEC:
1440 case POST_DEC:
1441 case PRE_INC:
1442 case POST_INC:
1443 /* These both read and modify the result. We must handle them as writes
1444 to get proper dependencies for following instructions. We must handle
1445 them as reads to get proper dependencies from this to previous
1446 instructions. Thus we need to pass them to both sched_analyze_1
1447 and sched_analyze_2. We must call sched_analyze_2 first in order
1448 to get the proper antecedent for the read. */
1449 sched_analyze_2 (deps, XEXP (x, 0), insn);
1450 sched_analyze_1 (deps, x, insn);
1451 return;
1452
1453 case POST_MODIFY:
1454 case PRE_MODIFY:
1455 /* op0 = op0 + op1 */
1456 sched_analyze_2 (deps, XEXP (x, 0), insn);
1457 sched_analyze_2 (deps, XEXP (x, 1), insn);
1458 sched_analyze_1 (deps, x, insn);
1459 return;
1460
1461 default:
1462 break;
1463 }
1464
1465 /* Other cases: walk the insn. */
1466 fmt = GET_RTX_FORMAT (code);
1467 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1468 {
1469 if (fmt[i] == 'e')
1470 sched_analyze_2 (deps, XEXP (x, i), insn);
1471 else if (fmt[i] == 'E')
1472 for (j = 0; j < XVECLEN (x, i); j++)
1473 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
1474 }
1475 }
1476
1477 /* Analyze an INSN with pattern X to find all dependencies. */
1478
1479 static void
1480 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
1481 {
1482 RTX_CODE code = GET_CODE (x);
1483 rtx link;
1484 unsigned i;
1485 reg_set_iterator rsi;
1486
1487 if (code == COND_EXEC)
1488 {
1489 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1490
1491 /* ??? Should be recording conditions so we reduce the number of
1492 false dependencies. */
1493 x = COND_EXEC_CODE (x);
1494 code = GET_CODE (x);
1495 }
1496 if (code == SET || code == CLOBBER)
1497 {
1498 sched_analyze_1 (deps, x, insn);
1499
1500 /* Bare clobber insns are used for letting life analysis, reg-stack
1501 and others know that a value is dead. Depend on the last call
1502 instruction so that reg-stack won't get confused. */
1503 if (code == CLOBBER)
1504 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
1505 }
1506 else if (code == PARALLEL)
1507 {
1508 for (i = XVECLEN (x, 0); i--;)
1509 {
1510 rtx sub = XVECEXP (x, 0, i);
1511 code = GET_CODE (sub);
1512
1513 if (code == COND_EXEC)
1514 {
1515 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1516 sub = COND_EXEC_CODE (sub);
1517 code = GET_CODE (sub);
1518 }
1519 if (code == SET || code == CLOBBER)
1520 sched_analyze_1 (deps, sub, insn);
1521 else
1522 sched_analyze_2 (deps, sub, insn);
1523 }
1524 }
1525 else
1526 sched_analyze_2 (deps, x, insn);
1527
1528 /* Mark registers CLOBBERED or used by called function. */
1529 if (CALL_P (insn))
1530 {
1531 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1532 {
1533 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1534 sched_analyze_1 (deps, XEXP (link, 0), insn);
1535 else
1536 sched_analyze_2 (deps, XEXP (link, 0), insn);
1537 }
1538 if (find_reg_note (insn, REG_SETJMP, NULL))
1539 reg_pending_barrier = MOVE_BARRIER;
1540 }
1541
1542 if (JUMP_P (insn))
1543 {
1544 rtx next;
1545 next = next_nonnote_insn (insn);
1546 if (next && BARRIER_P (next))
1547 reg_pending_barrier = TRUE_BARRIER;
1548 else
1549 {
1550 rtx pending, pending_mem;
1551 regset_head tmp_uses, tmp_sets;
1552 INIT_REG_SET (&tmp_uses);
1553 INIT_REG_SET (&tmp_sets);
1554
1555 (*current_sched_info->compute_jump_reg_dependencies)
1556 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
1557 /* Make latency of jump equal to 0 by using anti-dependence. */
1558 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
1559 {
1560 struct deps_reg *reg_last = &deps->reg_last[i];
1561 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
1562 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
1563 reg_last->uses_length++;
1564 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1565 }
1566 IOR_REG_SET (reg_pending_sets, &tmp_sets);
1567
1568 CLEAR_REG_SET (&tmp_uses);
1569 CLEAR_REG_SET (&tmp_sets);
1570
1571 /* All memory writes and volatile reads must happen before the
1572 jump. Non-volatile reads must happen before the jump iff
1573 the result is needed by the above register used mask. */
1574
1575 pending = deps->pending_write_insns;
1576 pending_mem = deps->pending_write_mems;
1577 while (pending)
1578 {
1579 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1580 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1581 pending = XEXP (pending, 1);
1582 pending_mem = XEXP (pending_mem, 1);
1583 }
1584
1585 pending = deps->pending_read_insns;
1586 pending_mem = deps->pending_read_mems;
1587 while (pending)
1588 {
1589 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
1590 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1591 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1592 pending = XEXP (pending, 1);
1593 pending_mem = XEXP (pending_mem, 1);
1594 }
1595
1596 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1597 REG_DEP_ANTI);
1598 }
1599 }
1600
1601 /* If this instruction can throw an exception, then moving it changes
1602 where block boundaries fall. This is mighty confusing elsewhere.
1603 Therefore, prevent such an instruction from being moved. */
1604 if (can_throw_internal (insn))
1605 reg_pending_barrier = MOVE_BARRIER;
1606
1607 /* Add dependencies if a scheduling barrier was found. */
1608 if (reg_pending_barrier)
1609 {
1610 /* In the case of barrier the most added dependencies are not
1611 real, so we use anti-dependence here. */
1612 if (sched_get_condition (insn))
1613 {
1614 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1615 {
1616 struct deps_reg *reg_last = &deps->reg_last[i];
1617 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1618 add_dependence_list
1619 (insn, reg_last->sets, 0,
1620 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1621 add_dependence_list
1622 (insn, reg_last->clobbers, 0,
1623 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1624 }
1625 }
1626 else
1627 {
1628 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1629 {
1630 struct deps_reg *reg_last = &deps->reg_last[i];
1631 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1632 REG_DEP_ANTI);
1633 add_dependence_list_and_free
1634 (insn, &reg_last->sets, 0,
1635 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1636 add_dependence_list_and_free
1637 (insn, &reg_last->clobbers, 0,
1638 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1639 reg_last->uses_length = 0;
1640 reg_last->clobbers_length = 0;
1641 }
1642 }
1643
1644 for (i = 0; i < (unsigned)deps->max_reg; i++)
1645 {
1646 struct deps_reg *reg_last = &deps->reg_last[i];
1647 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1648 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1649 }
1650
1651 flush_pending_lists (deps, insn, true, true);
1652 CLEAR_REG_SET (&deps->reg_conditional_sets);
1653 reg_pending_barrier = NOT_A_BARRIER;
1654 }
1655 else
1656 {
1657 /* If the current insn is conditional, we can't free any
1658 of the lists. */
1659 if (sched_get_condition (insn))
1660 {
1661 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1662 {
1663 struct deps_reg *reg_last = &deps->reg_last[i];
1664 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1665 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1666 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1667 reg_last->uses_length++;
1668 }
1669 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1670 {
1671 struct deps_reg *reg_last = &deps->reg_last[i];
1672 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1673 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1674 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1675 reg_last->clobbers_length++;
1676 }
1677 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1678 {
1679 struct deps_reg *reg_last = &deps->reg_last[i];
1680 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1681 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
1682 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1683 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1684 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1685 }
1686 }
1687 else
1688 {
1689 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1690 {
1691 struct deps_reg *reg_last = &deps->reg_last[i];
1692 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1693 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1694 reg_last->uses_length++;
1695 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1696 }
1697 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1698 {
1699 struct deps_reg *reg_last = &deps->reg_last[i];
1700 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1701 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1702 {
1703 add_dependence_list_and_free (insn, &reg_last->sets, 0,
1704 REG_DEP_OUTPUT);
1705 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1706 REG_DEP_ANTI);
1707 add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
1708 REG_DEP_OUTPUT);
1709 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1710 reg_last->clobbers_length = 0;
1711 reg_last->uses_length = 0;
1712 }
1713 else
1714 {
1715 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1716 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1717 }
1718 reg_last->clobbers_length++;
1719 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1720 }
1721 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1722 {
1723 struct deps_reg *reg_last = &deps->reg_last[i];
1724 add_dependence_list_and_free (insn, &reg_last->sets, 0,
1725 REG_DEP_OUTPUT);
1726 add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
1727 REG_DEP_OUTPUT);
1728 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1729 REG_DEP_ANTI);
1730 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1731 reg_last->uses_length = 0;
1732 reg_last->clobbers_length = 0;
1733 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1734 }
1735 }
1736
1737 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1738 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1739 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1740 }
1741 CLEAR_REG_SET (reg_pending_uses);
1742 CLEAR_REG_SET (reg_pending_clobbers);
1743 CLEAR_REG_SET (reg_pending_sets);
1744
1745 /* If we are currently in a libcall scheduling group, then mark the
1746 current insn as being in a scheduling group and that it can not
1747 be moved into a different basic block. */
1748
1749 if (deps->libcall_block_tail_insn)
1750 {
1751 SCHED_GROUP_P (insn) = 1;
1752 CANT_MOVE (insn) = 1;
1753 }
1754
1755 /* If a post-call group is still open, see if it should remain so.
1756 This insn must be a simple move of a hard reg to a pseudo or
1757 vice-versa.
1758
1759 We must avoid moving these insns for correctness on
1760 SMALL_REGISTER_CLASS machines, and for special registers like
1761 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1762 hard regs for all targets. */
1763
1764 if (deps->in_post_call_group_p)
1765 {
1766 rtx tmp, set = single_set (insn);
1767 int src_regno, dest_regno;
1768
1769 if (set == NULL)
1770 goto end_call_group;
1771
1772 tmp = SET_DEST (set);
1773 if (GET_CODE (tmp) == SUBREG)
1774 tmp = SUBREG_REG (tmp);
1775 if (REG_P (tmp))
1776 dest_regno = REGNO (tmp);
1777 else
1778 goto end_call_group;
1779
1780 tmp = SET_SRC (set);
1781 if (GET_CODE (tmp) == SUBREG)
1782 tmp = SUBREG_REG (tmp);
1783 if ((GET_CODE (tmp) == PLUS
1784 || GET_CODE (tmp) == MINUS)
1785 && REG_P (XEXP (tmp, 0))
1786 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
1787 && dest_regno == STACK_POINTER_REGNUM)
1788 src_regno = STACK_POINTER_REGNUM;
1789 else if (REG_P (tmp))
1790 src_regno = REGNO (tmp);
1791 else
1792 goto end_call_group;
1793
1794 if (src_regno < FIRST_PSEUDO_REGISTER
1795 || dest_regno < FIRST_PSEUDO_REGISTER)
1796 {
1797 if (deps->in_post_call_group_p == post_call_initial)
1798 deps->in_post_call_group_p = post_call;
1799
1800 SCHED_GROUP_P (insn) = 1;
1801 CANT_MOVE (insn) = 1;
1802 }
1803 else
1804 {
1805 end_call_group:
1806 deps->in_post_call_group_p = not_post_call;
1807 }
1808 }
1809
1810 /* Fixup the dependencies in the sched group. */
1811 if (SCHED_GROUP_P (insn))
1812 fixup_sched_groups (insn);
1813 }
1814
1815 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
1816 dependencies for each insn. */
1817
1818 void
1819 sched_analyze (struct deps *deps, rtx head, rtx tail)
1820 {
1821 rtx insn;
1822
1823 if (current_sched_info->use_cselib)
1824 cselib_init (true);
1825
1826 /* Before reload, if the previous block ended in a call, show that
1827 we are inside a post-call group, so as to keep the lifetimes of
1828 hard registers correct. */
1829 if (! reload_completed && !LABEL_P (head))
1830 {
1831 insn = prev_nonnote_insn (head);
1832 if (insn && CALL_P (insn))
1833 deps->in_post_call_group_p = post_call_initial;
1834 }
1835 for (insn = head;; insn = NEXT_INSN (insn))
1836 {
1837 rtx link, end_seq, r0, set;
1838
1839 if (INSN_P (insn))
1840 {
1841 /* Clear out the stale LOG_LINKS from flow. */
1842 free_INSN_LIST_list (&LOG_LINKS (insn));
1843
1844 /* These two lists will be freed in schedule_insn (). */
1845 INSN_BACK_DEPS (insn) = create_deps_list (false);
1846 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
1847
1848 /* This one should be allocated on the obstack because it should live
1849 till the scheduling ends. */
1850 INSN_FORW_DEPS (insn) = create_deps_list (true);
1851 }
1852
1853 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
1854 {
1855 /* Make each JUMP_INSN a scheduling barrier for memory
1856 references. */
1857 if (JUMP_P (insn))
1858 {
1859 /* Keep the list a reasonable size. */
1860 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1861 flush_pending_lists (deps, insn, true, true);
1862 else
1863 deps->last_pending_memory_flush
1864 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1865 }
1866 sched_analyze_insn (deps, PATTERN (insn), insn);
1867 }
1868 else if (CALL_P (insn))
1869 {
1870 int i;
1871
1872 CANT_MOVE (insn) = 1;
1873
1874 if (find_reg_note (insn, REG_SETJMP, NULL))
1875 {
1876 /* This is setjmp. Assume that all registers, not just
1877 hard registers, may be clobbered by this call. */
1878 reg_pending_barrier = MOVE_BARRIER;
1879 }
1880 else
1881 {
1882 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1883 /* A call may read and modify global register variables. */
1884 if (global_regs[i])
1885 {
1886 SET_REGNO_REG_SET (reg_pending_sets, i);
1887 SET_REGNO_REG_SET (reg_pending_uses, i);
1888 }
1889 /* Other call-clobbered hard regs may be clobbered.
1890 Since we only have a choice between 'might be clobbered'
1891 and 'definitely not clobbered', we must include all
1892 partly call-clobbered registers here. */
1893 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1894 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1895 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1896 /* We don't know what set of fixed registers might be used
1897 by the function, but it is certain that the stack pointer
1898 is among them, but be conservative. */
1899 else if (fixed_regs[i])
1900 SET_REGNO_REG_SET (reg_pending_uses, i);
1901 /* The frame pointer is normally not used by the function
1902 itself, but by the debugger. */
1903 /* ??? MIPS o32 is an exception. It uses the frame pointer
1904 in the macro expansion of jal but does not represent this
1905 fact in the call_insn rtl. */
1906 else if (i == FRAME_POINTER_REGNUM
1907 || (i == HARD_FRAME_POINTER_REGNUM
1908 && (! reload_completed || frame_pointer_needed)))
1909 SET_REGNO_REG_SET (reg_pending_uses, i);
1910 }
1911
1912 /* For each insn which shouldn't cross a call, add a dependence
1913 between that insn and this call insn. */
1914 add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
1915 REG_DEP_ANTI);
1916
1917 sched_analyze_insn (deps, PATTERN (insn), insn);
1918
1919 /* In the absence of interprocedural alias analysis, we must flush
1920 all pending reads and writes, and start new dependencies starting
1921 from here. But only flush writes for constant calls (which may
1922 be passed a pointer to something we haven't written yet). */
1923 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1924
1925 /* Remember the last function call for limiting lifetimes. */
1926 free_INSN_LIST_list (&deps->last_function_call);
1927 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1928
1929 /* Before reload, begin a post-call group, so as to keep the
1930 lifetimes of hard registers correct. */
1931 if (! reload_completed)
1932 deps->in_post_call_group_p = post_call;
1933 }
1934
1935 /* EH_REGION insn notes can not appear until well after we complete
1936 scheduling. */
1937 if (NOTE_P (insn))
1938 gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1939 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
1940
1941 if (current_sched_info->use_cselib)
1942 cselib_process_insn (insn);
1943
1944 /* Now that we have completed handling INSN, check and see if it is
1945 a CLOBBER beginning a libcall block. If it is, record the
1946 end of the libcall sequence.
1947
1948 We want to schedule libcall blocks as a unit before reload. While
1949 this restricts scheduling, it preserves the meaning of a libcall
1950 block.
1951
1952 As a side effect, we may get better code due to decreased register
1953 pressure as well as less chance of a foreign insn appearing in
1954 a libcall block. */
1955 if (!reload_completed
1956 /* Note we may have nested libcall sequences. We only care about
1957 the outermost libcall sequence. */
1958 && deps->libcall_block_tail_insn == 0
1959 /* The sequence must start with a clobber of a register. */
1960 && NONJUMP_INSN_P (insn)
1961 && GET_CODE (PATTERN (insn)) == CLOBBER
1962 && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
1963 && REG_P (XEXP (PATTERN (insn), 0))
1964 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1965 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1966 && (end_seq = XEXP (link, 0)) != 0
1967 /* The insn referenced by the REG_LIBCALL note must be a
1968 simple nop copy with the same destination as the register
1969 mentioned in the clobber. */
1970 && (set = single_set (end_seq)) != 0
1971 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1972 /* And finally the insn referenced by the REG_LIBCALL must
1973 also contain a REG_EQUAL note and a REG_RETVAL note. */
1974 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1975 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1976 deps->libcall_block_tail_insn = XEXP (link, 0);
1977
1978 /* If we have reached the end of a libcall block, then close the
1979 block. */
1980 if (deps->libcall_block_tail_insn == insn)
1981 deps->libcall_block_tail_insn = 0;
1982
1983 if (insn == tail)
1984 {
1985 if (current_sched_info->use_cselib)
1986 cselib_finish ();
1987 return;
1988 }
1989 }
1990 gcc_unreachable ();
1991 }
1992 \f
1993
1994 /* The following function adds forward dependence (FROM, TO) with
1995 given DEP_TYPE. The forward dependence should be not exist before. */
1996
1997 void
1998 add_forw_dep (dep_link_t link)
1999 {
2000 dep_t dep = DEP_LINK_DEP (link);
2001 rtx to = DEP_CON (dep);
2002 rtx from = DEP_PRO (dep);
2003
2004 #ifdef ENABLE_CHECKING
2005 /* If add_dependence is working properly there should never
2006 be notes, deleted insns or duplicates in the backward
2007 links. Thus we need not check for them here.
2008
2009 However, if we have enabled checking we might as well go
2010 ahead and verify that add_dependence worked properly. */
2011 gcc_assert (INSN_P (from));
2012 gcc_assert (!INSN_DELETED_P (from));
2013 if (true_dependency_cache)
2014 {
2015 gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
2016 INSN_LUID (to)));
2017 bitmap_set_bit (&forward_dependency_cache[INSN_LUID (from)],
2018 INSN_LUID (to));
2019 }
2020
2021 gcc_assert (find_link_by_con_in_deps_list (INSN_FORW_DEPS (from), to)
2022 == NULL);
2023 #endif
2024
2025 add_to_deps_list (DEP_NODE_FORW (DEP_LINK_NODE (link)),
2026 INSN_FORW_DEPS (from));
2027
2028 INSN_DEP_COUNT (to) += 1;
2029 }
2030
2031 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
2032 dependences from INSN_BACK_DEPS list to build forward dependences in
2033 INSN_FORW_DEPS. */
2034
2035 void
2036 compute_forward_dependences (rtx head, rtx tail)
2037 {
2038 rtx insn;
2039 rtx next_tail;
2040
2041 next_tail = NEXT_INSN (tail);
2042 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2043 {
2044 dep_link_t link;
2045
2046 if (! INSN_P (insn))
2047 continue;
2048
2049 if (current_sched_info->flags & DO_SPECULATION)
2050 {
2051 /* We will add links, preserving order, from INSN_BACK_DEPS to
2052 NEW. */
2053 dep_link_t new = NULL;
2054
2055 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2056
2057 while (link != NULL)
2058 {
2059 dep_link_t next = DEP_LINK_NEXT (link);
2060
2061 detach_dep_link (link);
2062 adjust_add_sorted_back_dep (insn, link, &new);
2063
2064 link = next;
2065 }
2066
2067 /* Attach NEW to be the list of backward dependencies. */
2068 if (new != NULL)
2069 {
2070 DEP_LINK_PREV_NEXTP (new)
2071 = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2072
2073 DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)) = new;
2074 }
2075 }
2076
2077 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
2078 add_forw_dep (link);
2079 }
2080 }
2081 \f
2082 /* Initialize variables for region data dependence analysis.
2083 n_bbs is the number of region blocks. */
2084
2085 void
2086 init_deps (struct deps *deps)
2087 {
2088 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2089
2090 deps->max_reg = max_reg;
2091 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2092 INIT_REG_SET (&deps->reg_last_in_use);
2093 INIT_REG_SET (&deps->reg_conditional_sets);
2094
2095 deps->pending_read_insns = 0;
2096 deps->pending_read_mems = 0;
2097 deps->pending_write_insns = 0;
2098 deps->pending_write_mems = 0;
2099 deps->pending_lists_length = 0;
2100 deps->pending_flush_length = 0;
2101 deps->last_pending_memory_flush = 0;
2102 deps->last_function_call = 0;
2103 deps->sched_before_next_call = 0;
2104 deps->in_post_call_group_p = not_post_call;
2105 deps->libcall_block_tail_insn = 0;
2106 }
2107
2108 /* Free insn lists found in DEPS. */
2109
2110 void
2111 free_deps (struct deps *deps)
2112 {
2113 unsigned i;
2114 reg_set_iterator rsi;
2115
2116 free_INSN_LIST_list (&deps->pending_read_insns);
2117 free_EXPR_LIST_list (&deps->pending_read_mems);
2118 free_INSN_LIST_list (&deps->pending_write_insns);
2119 free_EXPR_LIST_list (&deps->pending_write_mems);
2120 free_INSN_LIST_list (&deps->last_pending_memory_flush);
2121
2122 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
2123 times. For a testcase with 42000 regs and 8000 small basic blocks,
2124 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
2125 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2126 {
2127 struct deps_reg *reg_last = &deps->reg_last[i];
2128 if (reg_last->uses)
2129 free_INSN_LIST_list (&reg_last->uses);
2130 if (reg_last->sets)
2131 free_INSN_LIST_list (&reg_last->sets);
2132 if (reg_last->clobbers)
2133 free_INSN_LIST_list (&reg_last->clobbers);
2134 }
2135 CLEAR_REG_SET (&deps->reg_last_in_use);
2136 CLEAR_REG_SET (&deps->reg_conditional_sets);
2137
2138 free (deps->reg_last);
2139 }
2140
2141 /* If it is profitable to use them, initialize caches for tracking
2142 dependency information. LUID is the number of insns to be scheduled,
2143 it is used in the estimate of profitability. */
2144
2145 void
2146 init_dependency_caches (int luid)
2147 {
2148 /* ?!? We could save some memory by computing a per-region luid mapping
2149 which could reduce both the number of vectors in the cache and the size
2150 of each vector. Instead we just avoid the cache entirely unless the
2151 average number of instructions in a basic block is very high. See
2152 the comment before the declaration of true_dependency_cache for
2153 what we consider "very high". */
2154 if (luid / n_basic_blocks > 100 * 5)
2155 {
2156 cache_size = 0;
2157 extend_dependency_caches (luid, true);
2158 }
2159
2160 /* Lifetime of this obstack is whole function scheduling (not single region
2161 scheduling) because some dependencies can be manually generated for
2162 outside regions. See dont_calc_deps in sched-{rgn, ebb}.c .
2163
2164 Possible solution would be to have two obstacks:
2165 * the big one for regular dependencies with region scheduling lifetime,
2166 * and the small one for manually generated dependencies with function
2167 scheduling lifetime. */
2168 gcc_obstack_init (&deps_obstack);
2169 }
2170
2171 /* Create or extend (depending on CREATE_P) dependency caches to
2172 size N. */
2173 void
2174 extend_dependency_caches (int n, bool create_p)
2175 {
2176 if (create_p || true_dependency_cache)
2177 {
2178 int i, luid = cache_size + n;
2179
2180 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
2181 luid);
2182 output_dependency_cache = XRESIZEVEC (bitmap_head,
2183 output_dependency_cache, luid);
2184 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
2185 luid);
2186 #ifdef ENABLE_CHECKING
2187 forward_dependency_cache = XRESIZEVEC (bitmap_head,
2188 forward_dependency_cache, luid);
2189 #endif
2190 if (current_sched_info->flags & DO_SPECULATION)
2191 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
2192 luid);
2193
2194 for (i = cache_size; i < luid; i++)
2195 {
2196 bitmap_initialize (&true_dependency_cache[i], 0);
2197 bitmap_initialize (&output_dependency_cache[i], 0);
2198 bitmap_initialize (&anti_dependency_cache[i], 0);
2199 #ifdef ENABLE_CHECKING
2200 bitmap_initialize (&forward_dependency_cache[i], 0);
2201 #endif
2202 if (current_sched_info->flags & DO_SPECULATION)
2203 bitmap_initialize (&spec_dependency_cache[i], 0);
2204 }
2205 cache_size = luid;
2206 }
2207 }
2208
2209 /* Free the caches allocated in init_dependency_caches. */
2210
2211 void
2212 free_dependency_caches (void)
2213 {
2214 obstack_free (&deps_obstack, NULL);
2215
2216 if (true_dependency_cache)
2217 {
2218 int i;
2219
2220 for (i = 0; i < cache_size; i++)
2221 {
2222 bitmap_clear (&true_dependency_cache[i]);
2223 bitmap_clear (&output_dependency_cache[i]);
2224 bitmap_clear (&anti_dependency_cache[i]);
2225 #ifdef ENABLE_CHECKING
2226 bitmap_clear (&forward_dependency_cache[i]);
2227 #endif
2228 if (current_sched_info->flags & DO_SPECULATION)
2229 bitmap_clear (&spec_dependency_cache[i]);
2230 }
2231 free (true_dependency_cache);
2232 true_dependency_cache = NULL;
2233 free (output_dependency_cache);
2234 output_dependency_cache = NULL;
2235 free (anti_dependency_cache);
2236 anti_dependency_cache = NULL;
2237 #ifdef ENABLE_CHECKING
2238 free (forward_dependency_cache);
2239 forward_dependency_cache = NULL;
2240 #endif
2241 if (current_sched_info->flags & DO_SPECULATION)
2242 {
2243 free (spec_dependency_cache);
2244 spec_dependency_cache = NULL;
2245 }
2246 }
2247 }
2248
2249 /* Initialize some global variables needed by the dependency analysis
2250 code. */
2251
2252 void
2253 init_deps_global (void)
2254 {
2255 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
2256 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
2257 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
2258 reg_pending_barrier = NOT_A_BARRIER;
2259 }
2260
2261 /* Free everything used by the dependency analysis code. */
2262
2263 void
2264 finish_deps_global (void)
2265 {
2266 FREE_REG_SET (reg_pending_sets);
2267 FREE_REG_SET (reg_pending_clobbers);
2268 FREE_REG_SET (reg_pending_uses);
2269 }
2270
2271 /* Insert LINK into the dependence chain pointed to by LINKP and
2272 maintain the sort order. */
2273 static void
2274 adjust_add_sorted_back_dep (rtx insn, dep_link_t link, dep_link_t *linkp)
2275 {
2276 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2277
2278 /* If the insn cannot move speculatively, but the link is speculative,
2279 make it hard dependence. */
2280 if (HAS_INTERNAL_DEP (insn)
2281 && (DEP_LINK_STATUS (link) & SPECULATIVE))
2282 {
2283 DEP_LINK_STATUS (link) &= ~SPECULATIVE;
2284
2285 if (true_dependency_cache)
2286 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
2287 INSN_LUID (DEP_LINK_PRO (link)));
2288 }
2289
2290 /* Non-speculative links go at the head of deps_list, followed by
2291 speculative links. */
2292 if (DEP_LINK_STATUS (link) & SPECULATIVE)
2293 while (*linkp && !(DEP_LINK_STATUS (*linkp) & SPECULATIVE))
2294 linkp = &DEP_LINK_NEXT (*linkp);
2295
2296 attach_dep_link (link, linkp);
2297
2298 if (CHECK)
2299 gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
2300 }
2301
2302 /* Move the dependence pointed to by LINKP to the back dependencies
2303 of INSN, and also add this dependence to the forward ones. All dep_links,
2304 except one pointed to by LINKP, must be sorted. */
2305 static void
2306 adjust_back_add_forw_dep (rtx insn, dep_link_t *linkp)
2307 {
2308 dep_link_t link;
2309
2310 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2311
2312 link = *linkp;
2313 detach_dep_link (link);
2314
2315 adjust_add_sorted_back_dep (insn, link,
2316 &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
2317 add_forw_dep (link);
2318 }
2319
2320 /* Remove forward dependence described by L. */
2321 static void
2322 delete_forw_dep (dep_link_t l)
2323 {
2324 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2325
2326 #ifdef ENABLE_CHECKING
2327 if (true_dependency_cache)
2328 bitmap_clear_bit (&forward_dependency_cache[INSN_LUID (DEP_LINK_PRO (l))],
2329 INSN_LUID (DEP_LINK_CON (l)));
2330 #endif
2331
2332 detach_dep_link (l);
2333
2334 INSN_DEP_COUNT (DEP_LINK_CON (l))--;
2335 }
2336
2337 /* Estimate the weakness of dependence between MEM1 and MEM2. */
2338 static dw_t
2339 estimate_dep_weak (rtx mem1, rtx mem2)
2340 {
2341 rtx r1, r2;
2342
2343 if (mem1 == mem2)
2344 /* MEMs are the same - don't speculate. */
2345 return MIN_DEP_WEAK;
2346
2347 r1 = XEXP (mem1, 0);
2348 r2 = XEXP (mem2, 0);
2349
2350 if (r1 == r2
2351 || (REG_P (r1) && REG_P (r2)
2352 && REGNO (r1) == REGNO (r2)))
2353 /* Again, MEMs are the same. */
2354 return MIN_DEP_WEAK;
2355 else if ((REG_P (r1) && !REG_P (r2))
2356 || (!REG_P (r1) && REG_P (r2)))
2357 /* Different addressing modes - reason to be more speculative,
2358 than usual. */
2359 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
2360 else
2361 /* We can't say anything about the dependence. */
2362 return UNCERTAIN_DEP_WEAK;
2363 }
2364
2365 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
2366 This function can handle same INSN and ELEM (INSN == ELEM).
2367 It is a convenience wrapper. */
2368 void
2369 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
2370 {
2371 ds_t ds;
2372
2373 if (dep_type == REG_DEP_TRUE)
2374 ds = DEP_TRUE;
2375 else if (dep_type == REG_DEP_OUTPUT)
2376 ds = DEP_OUTPUT;
2377 else if (dep_type == REG_DEP_ANTI)
2378 ds = DEP_ANTI;
2379 else
2380 gcc_unreachable ();
2381
2382 maybe_add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
2383 }
2384
2385 /* Add or update backward dependence between INSN and ELEM
2386 with given type DEP_TYPE and dep_status DS.
2387 This function is a convenience wrapper. */
2388 enum DEPS_ADJUST_RESULT
2389 add_or_update_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
2390 {
2391 return add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
2392 }
2393
2394 /* Add or update both backward and forward dependencies between INSN and ELEM
2395 with given type DEP_TYPE and dep_status DS. */
2396 void
2397 add_or_update_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type,
2398 ds_t ds)
2399 {
2400 enum DEPS_ADJUST_RESULT res;
2401 dep_link_t *linkp;
2402
2403 res = add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, &linkp);
2404
2405 if (res == DEP_CHANGED || res == DEP_CREATED)
2406 {
2407 if (res == DEP_CHANGED)
2408 delete_forw_dep (DEP_NODE_FORW (DEP_LINK_NODE (*linkp)));
2409 else if (res == DEP_CREATED)
2410 linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2411
2412 adjust_back_add_forw_dep (insn, linkp);
2413 }
2414 }
2415
2416 /* Add both backward and forward dependencies between INSN and ELEM
2417 with given type DEP_TYPE and dep_status DS. */
2418 void
2419 add_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
2420 {
2421 add_back_dep (insn, elem, dep_type, ds);
2422 adjust_back_add_forw_dep (insn, &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
2423
2424 if (CHECK)
2425 gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
2426 }
2427
2428 /* Remove a dependency referred to by L. */
2429 void
2430 delete_back_forw_dep (dep_link_t l)
2431 {
2432 dep_node_t n = DEP_LINK_NODE (l);
2433
2434 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2435
2436 if (true_dependency_cache != NULL)
2437 {
2438 dep_t dep = DEP_NODE_DEP (n);
2439 int elem_luid = INSN_LUID (DEP_PRO (dep));
2440 int insn_luid = INSN_LUID (DEP_CON (dep));
2441
2442 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
2443 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
2444 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
2445 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
2446 }
2447
2448 delete_forw_dep (DEP_NODE_FORW (n));
2449 detach_dep_link (DEP_NODE_BACK (n));
2450 }
2451
2452 /* Return weakness of speculative type TYPE in the dep_status DS. */
2453 dw_t
2454 get_dep_weak (ds_t ds, ds_t type)
2455 {
2456 ds = ds & type;
2457 switch (type)
2458 {
2459 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
2460 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
2461 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
2462 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
2463 default: gcc_unreachable ();
2464 }
2465
2466 gcc_assert (MIN_DEP_WEAK <= ds && ds <= MAX_DEP_WEAK);
2467 return (dw_t) ds;
2468 }
2469
2470 /* Return the dep_status, which has the same parameters as DS, except for
2471 speculative type TYPE, that will have weakness DW. */
2472 ds_t
2473 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
2474 {
2475 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
2476
2477 ds &= ~type;
2478 switch (type)
2479 {
2480 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
2481 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
2482 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
2483 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
2484 default: gcc_unreachable ();
2485 }
2486 return ds;
2487 }
2488
2489 /* Return the join of two dep_statuses DS1 and DS2. */
2490 ds_t
2491 ds_merge (ds_t ds1, ds_t ds2)
2492 {
2493 ds_t ds, t;
2494
2495 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
2496
2497 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
2498
2499 t = FIRST_SPEC_TYPE;
2500 do
2501 {
2502 if ((ds1 & t) && !(ds2 & t))
2503 ds |= ds1 & t;
2504 else if (!(ds1 & t) && (ds2 & t))
2505 ds |= ds2 & t;
2506 else if ((ds1 & t) && (ds2 & t))
2507 {
2508 ds_t dw;
2509
2510 dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
2511 dw /= MAX_DEP_WEAK;
2512 if (dw < MIN_DEP_WEAK)
2513 dw = MIN_DEP_WEAK;
2514
2515 ds = set_dep_weak (ds, t, (dw_t) dw);
2516 }
2517
2518 if (t == LAST_SPEC_TYPE)
2519 break;
2520 t <<= SPEC_TYPE_SHIFT;
2521 }
2522 while (1);
2523
2524 return ds;
2525 }
2526
2527 #ifdef INSN_SCHEDULING
2528 #ifdef ENABLE_CHECKING
2529 /* Verify that dependence type and status are consistent.
2530 If RELAXED_P is true, then skip dep_weakness checks. */
2531 static void
2532 check_dep_status (enum reg_note dt, ds_t ds, bool relaxed_p)
2533 {
2534 /* Check that dependence type contains the same bits as the status. */
2535 if (dt == REG_DEP_TRUE)
2536 gcc_assert (ds & DEP_TRUE);
2537 else if (dt == REG_DEP_OUTPUT)
2538 gcc_assert ((ds & DEP_OUTPUT)
2539 && !(ds & DEP_TRUE));
2540 else
2541 gcc_assert ((dt == REG_DEP_ANTI)
2542 && (ds & DEP_ANTI)
2543 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
2544
2545 /* HARD_DEP can not appear in dep_status of a link. */
2546 gcc_assert (!(ds & HARD_DEP));
2547
2548 /* Check that dependence status is set correctly when speculation is not
2549 supported. */
2550 if (!(current_sched_info->flags & DO_SPECULATION))
2551 gcc_assert (!(ds & SPECULATIVE));
2552 else if (ds & SPECULATIVE)
2553 {
2554 if (!relaxed_p)
2555 {
2556 ds_t type = FIRST_SPEC_TYPE;
2557
2558 /* Check that dependence weakness is in proper range. */
2559 do
2560 {
2561 if (ds & type)
2562 get_dep_weak (ds, type);
2563
2564 if (type == LAST_SPEC_TYPE)
2565 break;
2566 type <<= SPEC_TYPE_SHIFT;
2567 }
2568 while (1);
2569 }
2570
2571 if (ds & BEGIN_SPEC)
2572 {
2573 /* Only true dependence can be data speculative. */
2574 if (ds & BEGIN_DATA)
2575 gcc_assert (ds & DEP_TRUE);
2576
2577 /* Control dependencies in the insn scheduler are represented by
2578 anti-dependencies, therefore only anti dependence can be
2579 control speculative. */
2580 if (ds & BEGIN_CONTROL)
2581 gcc_assert (ds & DEP_ANTI);
2582 }
2583 else
2584 {
2585 /* Subsequent speculations should resolve true dependencies. */
2586 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
2587 }
2588
2589 /* Check that true and anti dependencies can't have other speculative
2590 statuses. */
2591 if (ds & DEP_TRUE)
2592 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
2593 /* An output dependence can't be speculative at all. */
2594 gcc_assert (!(ds & DEP_OUTPUT));
2595 if (ds & DEP_ANTI)
2596 gcc_assert (ds & BEGIN_CONTROL);
2597 }
2598 }
2599 #endif
2600 #endif