inclhack.def (hpux_imaginary_i): Remove spaces.
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "sched-int.h"
42 #include "params.h"
43 #include "cselib.h"
44
45 #ifdef INSN_SCHEDULING
46
47 #ifdef ENABLE_CHECKING
48 #define CHECK (true)
49 #else
50 #define CHECK (false)
51 #endif
52
53 /* Holds current parameters for the dependency analyzer. */
54 struct sched_deps_info_def *sched_deps_info;
55
56 /* The data is specific to the Haifa scheduler. */
57 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
58
59 /* Return the major type present in the DS. */
60 enum reg_note
61 ds_to_dk (ds_t ds)
62 {
63 if (ds & DEP_TRUE)
64 return REG_DEP_TRUE;
65
66 if (ds & DEP_OUTPUT)
67 return REG_DEP_OUTPUT;
68
69 gcc_assert (ds & DEP_ANTI);
70
71 return REG_DEP_ANTI;
72 }
73
74 /* Return equivalent dep_status. */
75 ds_t
76 dk_to_ds (enum reg_note dk)
77 {
78 switch (dk)
79 {
80 case REG_DEP_TRUE:
81 return DEP_TRUE;
82
83 case REG_DEP_OUTPUT:
84 return DEP_OUTPUT;
85
86 default:
87 gcc_assert (dk == REG_DEP_ANTI);
88 return DEP_ANTI;
89 }
90 }
91
92 /* Functions to operate with dependence information container - dep_t. */
93
94 /* Init DEP with the arguments. */
95 void
96 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
97 {
98 DEP_PRO (dep) = pro;
99 DEP_CON (dep) = con;
100 DEP_TYPE (dep) = type;
101 DEP_STATUS (dep) = ds;
102 }
103
104 /* Init DEP with the arguments.
105 While most of the scheduler (including targets) only need the major type
106 of the dependency, it is convenient to hide full dep_status from them. */
107 void
108 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
109 {
110 ds_t ds;
111
112 if ((current_sched_info->flags & USE_DEPS_LIST))
113 ds = dk_to_ds (kind);
114 else
115 ds = -1;
116
117 init_dep_1 (dep, pro, con, kind, ds);
118 }
119
120 /* Make a copy of FROM in TO. */
121 static void
122 copy_dep (dep_t to, dep_t from)
123 {
124 memcpy (to, from, sizeof (*to));
125 }
126
127 static void dump_ds (FILE *, ds_t);
128
129 /* Define flags for dump_dep (). */
130
131 /* Dump producer of the dependence. */
132 #define DUMP_DEP_PRO (2)
133
134 /* Dump consumer of the dependence. */
135 #define DUMP_DEP_CON (4)
136
137 /* Dump type of the dependence. */
138 #define DUMP_DEP_TYPE (8)
139
140 /* Dump status of the dependence. */
141 #define DUMP_DEP_STATUS (16)
142
143 /* Dump all information about the dependence. */
144 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
145 |DUMP_DEP_STATUS)
146
147 /* Dump DEP to DUMP.
148 FLAGS is a bit mask specifying what information about DEP needs
149 to be printed.
150 If FLAGS has the very first bit set, then dump all information about DEP
151 and propagate this bit into the callee dump functions. */
152 static void
153 dump_dep (FILE *dump, dep_t dep, int flags)
154 {
155 if (flags & 1)
156 flags |= DUMP_DEP_ALL;
157
158 fprintf (dump, "<");
159
160 if (flags & DUMP_DEP_PRO)
161 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
162
163 if (flags & DUMP_DEP_CON)
164 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
165
166 if (flags & DUMP_DEP_TYPE)
167 {
168 char t;
169 enum reg_note type = DEP_TYPE (dep);
170
171 switch (type)
172 {
173 case REG_DEP_TRUE:
174 t = 't';
175 break;
176
177 case REG_DEP_OUTPUT:
178 t = 'o';
179 break;
180
181 case REG_DEP_ANTI:
182 t = 'a';
183 break;
184
185 default:
186 gcc_unreachable ();
187 break;
188 }
189
190 fprintf (dump, "%c; ", t);
191 }
192
193 if (flags & DUMP_DEP_STATUS)
194 {
195 if (current_sched_info->flags & USE_DEPS_LIST)
196 dump_ds (dump, DEP_STATUS (dep));
197 }
198
199 fprintf (dump, ">");
200 }
201
202 /* Default flags for dump_dep (). */
203 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
204
205 /* Dump all fields of DEP to STDERR. */
206 void
207 sd_debug_dep (dep_t dep)
208 {
209 dump_dep (stderr, dep, 1);
210 fprintf (stderr, "\n");
211 }
212
213 /* Functions to operate with a single link from the dependencies lists -
214 dep_link_t. */
215
216 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
217 PREV_NEXT_P. */
218 static void
219 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
220 {
221 dep_link_t next = *prev_nextp;
222
223 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
224 && DEP_LINK_NEXT (l) == NULL);
225
226 /* Init node being inserted. */
227 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
228 DEP_LINK_NEXT (l) = next;
229
230 /* Fix next node. */
231 if (next != NULL)
232 {
233 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
234
235 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
236 }
237
238 /* Fix prev node. */
239 *prev_nextp = l;
240 }
241
242 /* Add dep_link LINK to deps_list L. */
243 static void
244 add_to_deps_list (dep_link_t link, deps_list_t l)
245 {
246 attach_dep_link (link, &DEPS_LIST_FIRST (l));
247
248 ++DEPS_LIST_N_LINKS (l);
249 }
250
251 /* Detach dep_link L from the list. */
252 static void
253 detach_dep_link (dep_link_t l)
254 {
255 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
256 dep_link_t next = DEP_LINK_NEXT (l);
257
258 *prev_nextp = next;
259
260 if (next != NULL)
261 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
262
263 DEP_LINK_PREV_NEXTP (l) = NULL;
264 DEP_LINK_NEXT (l) = NULL;
265 }
266
267 /* Remove link LINK from list LIST. */
268 static void
269 remove_from_deps_list (dep_link_t link, deps_list_t list)
270 {
271 detach_dep_link (link);
272
273 --DEPS_LIST_N_LINKS (list);
274 }
275
276 /* Move link LINK from list FROM to list TO. */
277 static void
278 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
279 {
280 remove_from_deps_list (link, from);
281 add_to_deps_list (link, to);
282 }
283
284 /* Return true of LINK is not attached to any list. */
285 static bool
286 dep_link_is_detached_p (dep_link_t link)
287 {
288 return DEP_LINK_PREV_NEXTP (link) == NULL;
289 }
290
291 /* Pool to hold all dependency nodes (dep_node_t). */
292 static alloc_pool dn_pool;
293
294 /* Number of dep_nodes out there. */
295 static int dn_pool_diff = 0;
296
297 /* Create a dep_node. */
298 static dep_node_t
299 create_dep_node (void)
300 {
301 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
302 dep_link_t back = DEP_NODE_BACK (n);
303 dep_link_t forw = DEP_NODE_FORW (n);
304
305 DEP_LINK_NODE (back) = n;
306 DEP_LINK_NEXT (back) = NULL;
307 DEP_LINK_PREV_NEXTP (back) = NULL;
308
309 DEP_LINK_NODE (forw) = n;
310 DEP_LINK_NEXT (forw) = NULL;
311 DEP_LINK_PREV_NEXTP (forw) = NULL;
312
313 ++dn_pool_diff;
314
315 return n;
316 }
317
318 /* Delete dep_node N. N must not be connected to any deps_list. */
319 static void
320 delete_dep_node (dep_node_t n)
321 {
322 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
323 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
324
325 --dn_pool_diff;
326
327 pool_free (dn_pool, n);
328 }
329
330 /* Pool to hold dependencies lists (deps_list_t). */
331 static alloc_pool dl_pool;
332
333 /* Number of deps_lists out there. */
334 static int dl_pool_diff = 0;
335
336 /* Functions to operate with dependences lists - deps_list_t. */
337
338 /* Return true if list L is empty. */
339 static bool
340 deps_list_empty_p (deps_list_t l)
341 {
342 return DEPS_LIST_N_LINKS (l) == 0;
343 }
344
345 /* Create a new deps_list. */
346 static deps_list_t
347 create_deps_list (void)
348 {
349 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
350
351 DEPS_LIST_FIRST (l) = NULL;
352 DEPS_LIST_N_LINKS (l) = 0;
353
354 ++dl_pool_diff;
355 return l;
356 }
357
358 /* Free deps_list L. */
359 static void
360 free_deps_list (deps_list_t l)
361 {
362 gcc_assert (deps_list_empty_p (l));
363
364 --dl_pool_diff;
365
366 pool_free (dl_pool, l);
367 }
368
369 /* Return true if there is no dep_nodes and deps_lists out there.
370 After the region is scheduled all the dependency nodes and lists
371 should [generally] be returned to pool. */
372 bool
373 deps_pools_are_empty_p (void)
374 {
375 return dn_pool_diff == 0 && dl_pool_diff == 0;
376 }
377
378 /* Remove all elements from L. */
379 static void
380 clear_deps_list (deps_list_t l)
381 {
382 do
383 {
384 dep_link_t link = DEPS_LIST_FIRST (l);
385
386 if (link == NULL)
387 break;
388
389 remove_from_deps_list (link, l);
390 }
391 while (1);
392 }
393
394 static regset reg_pending_sets;
395 static regset reg_pending_clobbers;
396 static regset reg_pending_uses;
397 static enum reg_pending_barrier_mode reg_pending_barrier;
398
399 /* To speed up the test for duplicate dependency links we keep a
400 record of dependencies created by add_dependence when the average
401 number of instructions in a basic block is very large.
402
403 Studies have shown that there is typically around 5 instructions between
404 branches for typical C code. So we can make a guess that the average
405 basic block is approximately 5 instructions long; we will choose 100X
406 the average size as a very large basic block.
407
408 Each insn has associated bitmaps for its dependencies. Each bitmap
409 has enough entries to represent a dependency on any other insn in
410 the insn chain. All bitmap for true dependencies cache is
411 allocated then the rest two ones are also allocated. */
412 static bitmap_head *true_dependency_cache = NULL;
413 static bitmap_head *output_dependency_cache = NULL;
414 static bitmap_head *anti_dependency_cache = NULL;
415 static bitmap_head *spec_dependency_cache = NULL;
416 static int cache_size;
417
418 static int deps_may_trap_p (const_rtx);
419 static void add_dependence_list (rtx, rtx, int, enum reg_note);
420 static void add_dependence_list_and_free (struct deps *, rtx,
421 rtx *, int, enum reg_note);
422 static void delete_all_dependences (rtx);
423 static void fixup_sched_groups (rtx);
424
425 static void flush_pending_lists (struct deps *, rtx, int, int);
426 static void sched_analyze_1 (struct deps *, rtx, rtx);
427 static void sched_analyze_2 (struct deps *, rtx, rtx);
428 static void sched_analyze_insn (struct deps *, rtx, rtx);
429
430 static bool sched_has_condition_p (const_rtx);
431 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
432
433 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
434 rtx, rtx);
435 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
436
437 #ifdef ENABLE_CHECKING
438 static void check_dep (dep_t, bool);
439 #endif
440 \f
441 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
442
443 static int
444 deps_may_trap_p (const_rtx mem)
445 {
446 const_rtx addr = XEXP (mem, 0);
447
448 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
449 {
450 const_rtx t = get_reg_known_value (REGNO (addr));
451 if (t)
452 addr = t;
453 }
454 return rtx_addr_can_trap_p (addr);
455 }
456 \f
457
458 /* Find the condition under which INSN is executed. If REV is not NULL,
459 it is set to TRUE when the returned comparison should be reversed
460 to get the actual condition. */
461 static rtx
462 sched_get_condition_with_rev (const_rtx insn, bool *rev)
463 {
464 rtx pat = PATTERN (insn);
465 rtx src;
466
467 if (pat == 0)
468 return 0;
469
470 if (rev)
471 *rev = false;
472
473 if (GET_CODE (pat) == COND_EXEC)
474 return COND_EXEC_TEST (pat);
475
476 if (!any_condjump_p (insn) || !onlyjump_p (insn))
477 return 0;
478
479 src = SET_SRC (pc_set (insn));
480
481 if (XEXP (src, 2) == pc_rtx)
482 return XEXP (src, 0);
483 else if (XEXP (src, 1) == pc_rtx)
484 {
485 rtx cond = XEXP (src, 0);
486 enum rtx_code revcode = reversed_comparison_code (cond, insn);
487
488 if (revcode == UNKNOWN)
489 return 0;
490
491 if (rev)
492 *rev = true;
493 return cond;
494 }
495
496 return 0;
497 }
498
499 /* True when we can find a condition under which INSN is executed. */
500 static bool
501 sched_has_condition_p (const_rtx insn)
502 {
503 return !! sched_get_condition_with_rev (insn, NULL);
504 }
505
506 \f
507
508 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
509 static int
510 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
511 {
512 if (COMPARISON_P (cond1)
513 && COMPARISON_P (cond2)
514 && GET_CODE (cond1) ==
515 (rev1==rev2
516 ? reversed_comparison_code (cond2, NULL)
517 : GET_CODE (cond2))
518 && XEXP (cond1, 0) == XEXP (cond2, 0)
519 && XEXP (cond1, 1) == XEXP (cond2, 1))
520 return 1;
521 return 0;
522 }
523
524 /* Return true if insn1 and insn2 can never depend on one another because
525 the conditions under which they are executed are mutually exclusive. */
526 bool
527 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
528 {
529 rtx cond1, cond2;
530 bool rev1 = false, rev2 = false;
531
532 /* df doesn't handle conditional lifetimes entirely correctly;
533 calls mess up the conditional lifetimes. */
534 if (!CALL_P (insn1) && !CALL_P (insn2))
535 {
536 cond1 = sched_get_condition_with_rev (insn1, &rev1);
537 cond2 = sched_get_condition_with_rev (insn2, &rev2);
538 if (cond1 && cond2
539 && conditions_mutex_p (cond1, cond2, rev1, rev2)
540 /* Make sure first instruction doesn't affect condition of second
541 instruction if switched. */
542 && !modified_in_p (cond1, insn2)
543 /* Make sure second instruction doesn't affect condition of first
544 instruction if switched. */
545 && !modified_in_p (cond2, insn1))
546 return true;
547 }
548 return false;
549 }
550 \f
551
552 /* Return true if INSN can potentially be speculated with type DS. */
553 bool
554 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
555 {
556 if (HAS_INTERNAL_DEP (insn))
557 return false;
558
559 if (!NONJUMP_INSN_P (insn))
560 return false;
561
562 if (SCHED_GROUP_P (insn))
563 return false;
564
565 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
566 return false;
567
568 if (side_effects_p (PATTERN (insn)))
569 return false;
570
571 if (ds & BE_IN_SPEC)
572 /* The following instructions, which depend on a speculatively scheduled
573 instruction, cannot be speculatively scheduled along. */
574 {
575 if (may_trap_p (PATTERN (insn)))
576 /* If instruction might trap, it cannot be speculatively scheduled.
577 For control speculation it's obvious why and for data speculation
578 it's because the insn might get wrong input if speculation
579 wasn't successful. */
580 return false;
581
582 if ((ds & BE_IN_DATA)
583 && sched_has_condition_p (insn))
584 /* If this is a predicated instruction, then it cannot be
585 speculatively scheduled. See PR35659. */
586 return false;
587 }
588
589 return true;
590 }
591
592 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
593 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
594 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
595 This function is used to switch sd_iterator to the next list.
596 !!! For internal use only. Might consider moving it to sched-int.h. */
597 void
598 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
599 deps_list_t *list_ptr, bool *resolved_p_ptr)
600 {
601 sd_list_types_def types = *types_ptr;
602
603 if (types & SD_LIST_HARD_BACK)
604 {
605 *list_ptr = INSN_HARD_BACK_DEPS (insn);
606 *resolved_p_ptr = false;
607 *types_ptr = types & ~SD_LIST_HARD_BACK;
608 }
609 else if (types & SD_LIST_SPEC_BACK)
610 {
611 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
612 *resolved_p_ptr = false;
613 *types_ptr = types & ~SD_LIST_SPEC_BACK;
614 }
615 else if (types & SD_LIST_FORW)
616 {
617 *list_ptr = INSN_FORW_DEPS (insn);
618 *resolved_p_ptr = false;
619 *types_ptr = types & ~SD_LIST_FORW;
620 }
621 else if (types & SD_LIST_RES_BACK)
622 {
623 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
624 *resolved_p_ptr = true;
625 *types_ptr = types & ~SD_LIST_RES_BACK;
626 }
627 else if (types & SD_LIST_RES_FORW)
628 {
629 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
630 *resolved_p_ptr = true;
631 *types_ptr = types & ~SD_LIST_RES_FORW;
632 }
633 else
634 {
635 *list_ptr = NULL;
636 *resolved_p_ptr = false;
637 *types_ptr = SD_LIST_NONE;
638 }
639 }
640
641 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
642 int
643 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
644 {
645 int size = 0;
646
647 while (list_types != SD_LIST_NONE)
648 {
649 deps_list_t list;
650 bool resolved_p;
651
652 sd_next_list (insn, &list_types, &list, &resolved_p);
653 size += DEPS_LIST_N_LINKS (list);
654 }
655
656 return size;
657 }
658
659 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
660 bool
661 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
662 {
663 return sd_lists_size (insn, list_types) == 0;
664 }
665
666 /* Initialize data for INSN. */
667 void
668 sd_init_insn (rtx insn)
669 {
670 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
671 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
672 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
673 INSN_FORW_DEPS (insn) = create_deps_list ();
674 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
675
676 /* ??? It would be nice to allocate dependency caches here. */
677 }
678
679 /* Free data for INSN. */
680 void
681 sd_finish_insn (rtx insn)
682 {
683 /* ??? It would be nice to deallocate dependency caches here. */
684
685 free_deps_list (INSN_HARD_BACK_DEPS (insn));
686 INSN_HARD_BACK_DEPS (insn) = NULL;
687
688 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
689 INSN_SPEC_BACK_DEPS (insn) = NULL;
690
691 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
692 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
693
694 free_deps_list (INSN_FORW_DEPS (insn));
695 INSN_FORW_DEPS (insn) = NULL;
696
697 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
698 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
699 }
700
701 /* Find a dependency between producer PRO and consumer CON.
702 Search through resolved dependency lists if RESOLVED_P is true.
703 If no such dependency is found return NULL,
704 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
705 with an iterator pointing to it. */
706 static dep_t
707 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
708 sd_iterator_def *sd_it_ptr)
709 {
710 sd_list_types_def pro_list_type;
711 sd_list_types_def con_list_type;
712 sd_iterator_def sd_it;
713 dep_t dep;
714 bool found_p = false;
715
716 if (resolved_p)
717 {
718 pro_list_type = SD_LIST_RES_FORW;
719 con_list_type = SD_LIST_RES_BACK;
720 }
721 else
722 {
723 pro_list_type = SD_LIST_FORW;
724 con_list_type = SD_LIST_BACK;
725 }
726
727 /* Walk through either back list of INSN or forw list of ELEM
728 depending on which one is shorter. */
729 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
730 {
731 /* Find the dep_link with producer PRO in consumer's back_deps. */
732 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
733 if (DEP_PRO (dep) == pro)
734 {
735 found_p = true;
736 break;
737 }
738 }
739 else
740 {
741 /* Find the dep_link with consumer CON in producer's forw_deps. */
742 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
743 if (DEP_CON (dep) == con)
744 {
745 found_p = true;
746 break;
747 }
748 }
749
750 if (found_p)
751 {
752 if (sd_it_ptr != NULL)
753 *sd_it_ptr = sd_it;
754
755 return dep;
756 }
757
758 return NULL;
759 }
760
761 /* Find a dependency between producer PRO and consumer CON.
762 Use dependency [if available] to check if dependency is present at all.
763 Search through resolved dependency lists if RESOLVED_P is true.
764 If the dependency or NULL if none found. */
765 dep_t
766 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
767 {
768 if (true_dependency_cache != NULL)
769 /* Avoiding the list walk below can cut compile times dramatically
770 for some code. */
771 {
772 int elem_luid = INSN_LUID (pro);
773 int insn_luid = INSN_LUID (con);
774
775 gcc_assert (output_dependency_cache != NULL
776 && anti_dependency_cache != NULL);
777
778 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
779 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
780 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
781 return NULL;
782 }
783
784 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
785 }
786
787 /* Add or update a dependence described by DEP.
788 MEM1 and MEM2, if non-null, correspond to memory locations in case of
789 data speculation.
790
791 The function returns a value indicating if an old entry has been changed
792 or a new entry has been added to insn's backward deps.
793
794 This function merely checks if producer and consumer is the same insn
795 and doesn't create a dep in this case. Actual manipulation of
796 dependence data structures is performed in add_or_update_dep_1. */
797 static enum DEPS_ADJUST_RESULT
798 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
799 {
800 rtx elem = DEP_PRO (dep);
801 rtx insn = DEP_CON (dep);
802
803 gcc_assert (INSN_P (insn) && INSN_P (elem));
804
805 /* Don't depend an insn on itself. */
806 if (insn == elem)
807 {
808 if (sched_deps_info->generate_spec_deps)
809 /* INSN has an internal dependence, which we can't overcome. */
810 HAS_INTERNAL_DEP (insn) = 1;
811
812 return DEP_NODEP;
813 }
814
815 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
816 }
817
818 /* Ask dependency caches what needs to be done for dependence DEP.
819 Return DEP_CREATED if new dependence should be created and there is no
820 need to try to find one searching the dependencies lists.
821 Return DEP_PRESENT if there already is a dependence described by DEP and
822 hence nothing is to be done.
823 Return DEP_CHANGED if there already is a dependence, but it should be
824 updated to incorporate additional information from DEP. */
825 static enum DEPS_ADJUST_RESULT
826 ask_dependency_caches (dep_t dep)
827 {
828 int elem_luid = INSN_LUID (DEP_PRO (dep));
829 int insn_luid = INSN_LUID (DEP_CON (dep));
830
831 gcc_assert (true_dependency_cache != NULL
832 && output_dependency_cache != NULL
833 && anti_dependency_cache != NULL);
834
835 if (!(current_sched_info->flags & USE_DEPS_LIST))
836 {
837 enum reg_note present_dep_type;
838
839 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
840 present_dep_type = REG_DEP_TRUE;
841 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
842 present_dep_type = REG_DEP_OUTPUT;
843 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
844 present_dep_type = REG_DEP_ANTI;
845 else
846 /* There is no existing dep so it should be created. */
847 return DEP_CREATED;
848
849 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
850 /* DEP does not add anything to the existing dependence. */
851 return DEP_PRESENT;
852 }
853 else
854 {
855 ds_t present_dep_types = 0;
856
857 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
858 present_dep_types |= DEP_TRUE;
859 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
860 present_dep_types |= DEP_OUTPUT;
861 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
862 present_dep_types |= DEP_ANTI;
863
864 if (present_dep_types == 0)
865 /* There is no existing dep so it should be created. */
866 return DEP_CREATED;
867
868 if (!(current_sched_info->flags & DO_SPECULATION)
869 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
870 {
871 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
872 == present_dep_types)
873 /* DEP does not add anything to the existing dependence. */
874 return DEP_PRESENT;
875 }
876 else
877 {
878 /* Only true dependencies can be data speculative and
879 only anti dependencies can be control speculative. */
880 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
881 == present_dep_types);
882
883 /* if (DEP is SPECULATIVE) then
884 ..we should update DEP_STATUS
885 else
886 ..we should reset existing dep to non-speculative. */
887 }
888 }
889
890 return DEP_CHANGED;
891 }
892
893 /* Set dependency caches according to DEP. */
894 static void
895 set_dependency_caches (dep_t dep)
896 {
897 int elem_luid = INSN_LUID (DEP_PRO (dep));
898 int insn_luid = INSN_LUID (DEP_CON (dep));
899
900 if (!(current_sched_info->flags & USE_DEPS_LIST))
901 {
902 switch (DEP_TYPE (dep))
903 {
904 case REG_DEP_TRUE:
905 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
906 break;
907
908 case REG_DEP_OUTPUT:
909 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
910 break;
911
912 case REG_DEP_ANTI:
913 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
914 break;
915
916 default:
917 gcc_unreachable ();
918 }
919 }
920 else
921 {
922 ds_t ds = DEP_STATUS (dep);
923
924 if (ds & DEP_TRUE)
925 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
926 if (ds & DEP_OUTPUT)
927 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
928 if (ds & DEP_ANTI)
929 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
930
931 if (ds & SPECULATIVE)
932 {
933 gcc_assert (current_sched_info->flags & DO_SPECULATION);
934 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
935 }
936 }
937 }
938
939 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
940 caches accordingly. */
941 static void
942 update_dependency_caches (dep_t dep, enum reg_note old_type)
943 {
944 int elem_luid = INSN_LUID (DEP_PRO (dep));
945 int insn_luid = INSN_LUID (DEP_CON (dep));
946
947 /* Clear corresponding cache entry because type of the link
948 may have changed. Keep them if we use_deps_list. */
949 if (!(current_sched_info->flags & USE_DEPS_LIST))
950 {
951 switch (old_type)
952 {
953 case REG_DEP_OUTPUT:
954 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
955 break;
956
957 case REG_DEP_ANTI:
958 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
959 break;
960
961 default:
962 gcc_unreachable ();
963 }
964 }
965
966 set_dependency_caches (dep);
967 }
968
969 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
970 static void
971 change_spec_dep_to_hard (sd_iterator_def sd_it)
972 {
973 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
974 dep_link_t link = DEP_NODE_BACK (node);
975 dep_t dep = DEP_NODE_DEP (node);
976 rtx elem = DEP_PRO (dep);
977 rtx insn = DEP_CON (dep);
978
979 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
980
981 DEP_STATUS (dep) &= ~SPECULATIVE;
982
983 if (true_dependency_cache != NULL)
984 /* Clear the cache entry. */
985 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
986 INSN_LUID (elem));
987 }
988
989 /* Update DEP to incorporate information from NEW_DEP.
990 SD_IT points to DEP in case it should be moved to another list.
991 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
992 data-speculative dependence should be updated. */
993 static enum DEPS_ADJUST_RESULT
994 update_dep (dep_t dep, dep_t new_dep,
995 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
996 rtx mem1 ATTRIBUTE_UNUSED,
997 rtx mem2 ATTRIBUTE_UNUSED)
998 {
999 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1000 enum reg_note old_type = DEP_TYPE (dep);
1001
1002 /* If this is a more restrictive type of dependence than the
1003 existing one, then change the existing dependence to this
1004 type. */
1005 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1006 {
1007 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1008 res = DEP_CHANGED;
1009 }
1010
1011 if (current_sched_info->flags & USE_DEPS_LIST)
1012 /* Update DEP_STATUS. */
1013 {
1014 ds_t dep_status = DEP_STATUS (dep);
1015 ds_t ds = DEP_STATUS (new_dep);
1016 ds_t new_status = ds | dep_status;
1017
1018 if (new_status & SPECULATIVE)
1019 /* Either existing dep or a dep we're adding or both are
1020 speculative. */
1021 {
1022 if (!(ds & SPECULATIVE)
1023 || !(dep_status & SPECULATIVE))
1024 /* The new dep can't be speculative. */
1025 {
1026 new_status &= ~SPECULATIVE;
1027
1028 if (dep_status & SPECULATIVE)
1029 /* The old dep was speculative, but now it
1030 isn't. */
1031 change_spec_dep_to_hard (sd_it);
1032 }
1033 else
1034 {
1035 /* Both are speculative. Merge probabilities. */
1036 if (mem1 != NULL)
1037 {
1038 dw_t dw;
1039
1040 dw = estimate_dep_weak (mem1, mem2);
1041 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1042 }
1043
1044 new_status = ds_merge (dep_status, ds);
1045 }
1046 }
1047
1048 ds = new_status;
1049
1050 if (dep_status != ds)
1051 {
1052 DEP_STATUS (dep) = ds;
1053 res = DEP_CHANGED;
1054 }
1055 }
1056
1057 if (true_dependency_cache != NULL
1058 && res == DEP_CHANGED)
1059 update_dependency_caches (dep, old_type);
1060
1061 return res;
1062 }
1063
1064 /* Add or update a dependence described by DEP.
1065 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1066 data speculation.
1067
1068 The function returns a value indicating if an old entry has been changed
1069 or a new entry has been added to insn's backward deps or nothing has
1070 been updated at all. */
1071 static enum DEPS_ADJUST_RESULT
1072 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1073 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1074 {
1075 bool maybe_present_p = true;
1076 bool present_p = false;
1077
1078 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1079 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1080
1081 #ifdef ENABLE_CHECKING
1082 check_dep (new_dep, mem1 != NULL);
1083 #endif
1084
1085 if (true_dependency_cache != NULL)
1086 {
1087 switch (ask_dependency_caches (new_dep))
1088 {
1089 case DEP_PRESENT:
1090 return DEP_PRESENT;
1091
1092 case DEP_CHANGED:
1093 maybe_present_p = true;
1094 present_p = true;
1095 break;
1096
1097 case DEP_CREATED:
1098 maybe_present_p = false;
1099 present_p = false;
1100 break;
1101
1102 default:
1103 gcc_unreachable ();
1104 break;
1105 }
1106 }
1107
1108 /* Check that we don't already have this dependence. */
1109 if (maybe_present_p)
1110 {
1111 dep_t present_dep;
1112 sd_iterator_def sd_it;
1113
1114 gcc_assert (true_dependency_cache == NULL || present_p);
1115
1116 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1117 DEP_CON (new_dep),
1118 resolved_p, &sd_it);
1119
1120 if (present_dep != NULL)
1121 /* We found an existing dependency between ELEM and INSN. */
1122 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1123 else
1124 /* We didn't find a dep, it shouldn't present in the cache. */
1125 gcc_assert (!present_p);
1126 }
1127
1128 /* Might want to check one level of transitivity to save conses.
1129 This check should be done in maybe_add_or_update_dep_1.
1130 Since we made it to add_or_update_dep_1, we must create
1131 (or update) a link. */
1132
1133 if (mem1 != NULL_RTX)
1134 {
1135 gcc_assert (sched_deps_info->generate_spec_deps);
1136 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1137 estimate_dep_weak (mem1, mem2));
1138 }
1139
1140 sd_add_dep (new_dep, resolved_p);
1141
1142 return DEP_CREATED;
1143 }
1144
1145 /* Initialize BACK_LIST_PTR with consumer's backward list and
1146 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1147 initialize with lists that hold resolved deps. */
1148 static void
1149 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1150 deps_list_t *back_list_ptr,
1151 deps_list_t *forw_list_ptr)
1152 {
1153 rtx con = DEP_CON (dep);
1154
1155 if (!resolved_p)
1156 {
1157 if ((current_sched_info->flags & DO_SPECULATION)
1158 && (DEP_STATUS (dep) & SPECULATIVE))
1159 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1160 else
1161 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1162
1163 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1164 }
1165 else
1166 {
1167 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1168 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1169 }
1170 }
1171
1172 /* Add dependence described by DEP.
1173 If RESOLVED_P is true treat the dependence as a resolved one. */
1174 void
1175 sd_add_dep (dep_t dep, bool resolved_p)
1176 {
1177 dep_node_t n = create_dep_node ();
1178 deps_list_t con_back_deps;
1179 deps_list_t pro_forw_deps;
1180 rtx elem = DEP_PRO (dep);
1181 rtx insn = DEP_CON (dep);
1182
1183 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1184
1185 if ((current_sched_info->flags & DO_SPECULATION)
1186 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1187 DEP_STATUS (dep) &= ~SPECULATIVE;
1188
1189 copy_dep (DEP_NODE_DEP (n), dep);
1190
1191 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1192
1193 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1194
1195 #ifdef ENABLE_CHECKING
1196 check_dep (dep, false);
1197 #endif
1198
1199 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1200
1201 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1202 in the bitmap caches of dependency information. */
1203 if (true_dependency_cache != NULL)
1204 set_dependency_caches (dep);
1205 }
1206
1207 /* Add or update backward dependence between INSN and ELEM
1208 with given type DEP_TYPE and dep_status DS.
1209 This function is a convenience wrapper. */
1210 enum DEPS_ADJUST_RESULT
1211 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1212 {
1213 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1214 }
1215
1216 /* Resolved dependence pointed to by SD_IT.
1217 SD_IT will advance to the next element. */
1218 void
1219 sd_resolve_dep (sd_iterator_def sd_it)
1220 {
1221 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1222 dep_t dep = DEP_NODE_DEP (node);
1223 rtx pro = DEP_PRO (dep);
1224 rtx con = DEP_CON (dep);
1225
1226 if ((current_sched_info->flags & DO_SPECULATION)
1227 && (DEP_STATUS (dep) & SPECULATIVE))
1228 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1229 INSN_RESOLVED_BACK_DEPS (con));
1230 else
1231 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1232 INSN_RESOLVED_BACK_DEPS (con));
1233
1234 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1235 INSN_RESOLVED_FORW_DEPS (pro));
1236 }
1237
1238 /* Make TO depend on all the FROM's producers.
1239 If RESOLVED_P is true add dependencies to the resolved lists. */
1240 void
1241 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1242 {
1243 sd_list_types_def list_type;
1244 sd_iterator_def sd_it;
1245 dep_t dep;
1246
1247 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1248
1249 FOR_EACH_DEP (from, list_type, sd_it, dep)
1250 {
1251 dep_def _new_dep, *new_dep = &_new_dep;
1252
1253 copy_dep (new_dep, dep);
1254 DEP_CON (new_dep) = to;
1255 sd_add_dep (new_dep, resolved_p);
1256 }
1257 }
1258
1259 /* Remove a dependency referred to by SD_IT.
1260 SD_IT will point to the next dependence after removal. */
1261 void
1262 sd_delete_dep (sd_iterator_def sd_it)
1263 {
1264 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1265 dep_t dep = DEP_NODE_DEP (n);
1266 rtx pro = DEP_PRO (dep);
1267 rtx con = DEP_CON (dep);
1268 deps_list_t con_back_deps;
1269 deps_list_t pro_forw_deps;
1270
1271 if (true_dependency_cache != NULL)
1272 {
1273 int elem_luid = INSN_LUID (pro);
1274 int insn_luid = INSN_LUID (con);
1275
1276 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1277 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1278 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1279
1280 if (current_sched_info->flags & DO_SPECULATION)
1281 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1282 }
1283
1284 get_back_and_forw_lists (dep, sd_it.resolved_p,
1285 &con_back_deps, &pro_forw_deps);
1286
1287 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1288 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1289
1290 delete_dep_node (n);
1291 }
1292
1293 /* Dump size of the lists. */
1294 #define DUMP_LISTS_SIZE (2)
1295
1296 /* Dump dependencies of the lists. */
1297 #define DUMP_LISTS_DEPS (4)
1298
1299 /* Dump all information about the lists. */
1300 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1301
1302 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1303 FLAGS is a bit mask specifying what information about the lists needs
1304 to be printed.
1305 If FLAGS has the very first bit set, then dump all information about
1306 the lists and propagate this bit into the callee dump functions. */
1307 static void
1308 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1309 {
1310 sd_iterator_def sd_it;
1311 dep_t dep;
1312 int all;
1313
1314 all = (flags & 1);
1315
1316 if (all)
1317 flags |= DUMP_LISTS_ALL;
1318
1319 fprintf (dump, "[");
1320
1321 if (flags & DUMP_LISTS_SIZE)
1322 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1323
1324 if (flags & DUMP_LISTS_DEPS)
1325 {
1326 FOR_EACH_DEP (insn, types, sd_it, dep)
1327 {
1328 dump_dep (dump, dep, dump_dep_flags | all);
1329 fprintf (dump, " ");
1330 }
1331 }
1332 }
1333
1334 /* Dump all information about deps_lists of INSN specified by TYPES
1335 to STDERR. */
1336 void
1337 sd_debug_lists (rtx insn, sd_list_types_def types)
1338 {
1339 dump_lists (stderr, insn, types, 1);
1340 fprintf (stderr, "\n");
1341 }
1342
1343 /* A convenience wrapper to operate on an entire list. */
1344
1345 static void
1346 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1347 {
1348 for (; list; list = XEXP (list, 1))
1349 {
1350 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1351 add_dependence (insn, XEXP (list, 0), dep_type);
1352 }
1353 }
1354
1355 /* Similar, but free *LISTP at the same time, when the context
1356 is not readonly. */
1357
1358 static void
1359 add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
1360 int uncond, enum reg_note dep_type)
1361 {
1362 rtx list, next;
1363
1364 if (deps->readonly)
1365 {
1366 add_dependence_list (insn, *listp, uncond, dep_type);
1367 return;
1368 }
1369
1370 for (list = *listp, *listp = NULL; list ; list = next)
1371 {
1372 next = XEXP (list, 1);
1373 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1374 add_dependence (insn, XEXP (list, 0), dep_type);
1375 free_INSN_LIST_node (list);
1376 }
1377 }
1378
1379 /* Remove all occurences of INSN from LIST. Return the number of
1380 occurences removed. */
1381
1382 static int
1383 remove_from_dependence_list (rtx insn, rtx* listp)
1384 {
1385 int removed = 0;
1386
1387 while (*listp)
1388 {
1389 if (XEXP (*listp, 0) == insn)
1390 {
1391 remove_free_INSN_LIST_node (listp);
1392 removed++;
1393 continue;
1394 }
1395
1396 listp = &XEXP (*listp, 1);
1397 }
1398
1399 return removed;
1400 }
1401
1402 /* Same as above, but process two lists at once. */
1403 static int
1404 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1405 {
1406 int removed = 0;
1407
1408 while (*listp)
1409 {
1410 if (XEXP (*listp, 0) == insn)
1411 {
1412 remove_free_INSN_LIST_node (listp);
1413 remove_free_EXPR_LIST_node (exprp);
1414 removed++;
1415 continue;
1416 }
1417
1418 listp = &XEXP (*listp, 1);
1419 exprp = &XEXP (*exprp, 1);
1420 }
1421
1422 return removed;
1423 }
1424
1425 /* Clear all dependencies for an insn. */
1426 static void
1427 delete_all_dependences (rtx insn)
1428 {
1429 sd_iterator_def sd_it;
1430 dep_t dep;
1431
1432 /* The below cycle can be optimized to clear the caches and back_deps
1433 in one call but that would provoke duplication of code from
1434 delete_dep (). */
1435
1436 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1437 sd_iterator_cond (&sd_it, &dep);)
1438 sd_delete_dep (sd_it);
1439 }
1440
1441 /* All insns in a scheduling group except the first should only have
1442 dependencies on the previous insn in the group. So we find the
1443 first instruction in the scheduling group by walking the dependence
1444 chains backwards. Then we add the dependencies for the group to
1445 the previous nonnote insn. */
1446
1447 static void
1448 fixup_sched_groups (rtx insn)
1449 {
1450 sd_iterator_def sd_it;
1451 dep_t dep;
1452 rtx prev_nonnote;
1453
1454 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1455 {
1456 rtx i = insn;
1457 rtx pro = DEP_PRO (dep);
1458
1459 do
1460 {
1461 i = prev_nonnote_insn (i);
1462
1463 if (pro == i)
1464 goto next_link;
1465 } while (SCHED_GROUP_P (i));
1466
1467 if (! sched_insns_conditions_mutex_p (i, pro))
1468 add_dependence (i, pro, DEP_TYPE (dep));
1469 next_link:;
1470 }
1471
1472 delete_all_dependences (insn);
1473
1474 prev_nonnote = prev_nonnote_insn (insn);
1475 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1476 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1477 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1478 }
1479 \f
1480 /* Process an insn's memory dependencies. There are four kinds of
1481 dependencies:
1482
1483 (0) read dependence: read follows read
1484 (1) true dependence: read follows write
1485 (2) output dependence: write follows write
1486 (3) anti dependence: write follows read
1487
1488 We are careful to build only dependencies which actually exist, and
1489 use transitivity to avoid building too many links. */
1490
1491 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1492 The MEM is a memory reference contained within INSN, which we are saving
1493 so that we can do memory aliasing on it. */
1494
1495 static void
1496 add_insn_mem_dependence (struct deps *deps, bool read_p,
1497 rtx insn, rtx mem)
1498 {
1499 rtx *insn_list;
1500 rtx *mem_list;
1501 rtx link;
1502
1503 gcc_assert (!deps->readonly);
1504 if (read_p)
1505 {
1506 insn_list = &deps->pending_read_insns;
1507 mem_list = &deps->pending_read_mems;
1508 deps->pending_read_list_length++;
1509 }
1510 else
1511 {
1512 insn_list = &deps->pending_write_insns;
1513 mem_list = &deps->pending_write_mems;
1514 deps->pending_write_list_length++;
1515 }
1516
1517 link = alloc_INSN_LIST (insn, *insn_list);
1518 *insn_list = link;
1519
1520 if (sched_deps_info->use_cselib)
1521 {
1522 mem = shallow_copy_rtx (mem);
1523 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1524 }
1525 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1526 *mem_list = link;
1527 }
1528
1529 /* Make a dependency between every memory reference on the pending lists
1530 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1531 dependencies for a read operation, similarly with FOR_WRITE. */
1532
1533 static void
1534 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1535 int for_write)
1536 {
1537 if (for_write)
1538 {
1539 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1540 1, REG_DEP_ANTI);
1541 if (!deps->readonly)
1542 {
1543 free_EXPR_LIST_list (&deps->pending_read_mems);
1544 deps->pending_read_list_length = 0;
1545 }
1546 }
1547
1548 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1549 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1550
1551 add_dependence_list_and_free (deps, insn,
1552 &deps->last_pending_memory_flush, 1,
1553 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1554 if (!deps->readonly)
1555 {
1556 free_EXPR_LIST_list (&deps->pending_write_mems);
1557 deps->pending_write_list_length = 0;
1558
1559 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1560 deps->pending_flush_length = 1;
1561 }
1562 }
1563 \f
1564 /* Instruction which dependencies we are analyzing. */
1565 static rtx cur_insn = NULL_RTX;
1566
1567 /* Implement hooks for haifa scheduler. */
1568
1569 static void
1570 haifa_start_insn (rtx insn)
1571 {
1572 gcc_assert (insn && !cur_insn);
1573
1574 cur_insn = insn;
1575 }
1576
1577 static void
1578 haifa_finish_insn (void)
1579 {
1580 cur_insn = NULL;
1581 }
1582
1583 void
1584 haifa_note_reg_set (int regno)
1585 {
1586 SET_REGNO_REG_SET (reg_pending_sets, regno);
1587 }
1588
1589 void
1590 haifa_note_reg_clobber (int regno)
1591 {
1592 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1593 }
1594
1595 void
1596 haifa_note_reg_use (int regno)
1597 {
1598 SET_REGNO_REG_SET (reg_pending_uses, regno);
1599 }
1600
1601 static void
1602 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1603 {
1604 if (!(ds & SPECULATIVE))
1605 {
1606 mem = NULL_RTX;
1607 pending_mem = NULL_RTX;
1608 }
1609 else
1610 gcc_assert (ds & BEGIN_DATA);
1611
1612 {
1613 dep_def _dep, *dep = &_dep;
1614
1615 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1616 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1617 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1618 }
1619
1620 }
1621
1622 static void
1623 haifa_note_dep (rtx elem, ds_t ds)
1624 {
1625 dep_def _dep;
1626 dep_t dep = &_dep;
1627
1628 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1629 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1630 }
1631
1632 static void
1633 note_reg_use (int r)
1634 {
1635 if (sched_deps_info->note_reg_use)
1636 sched_deps_info->note_reg_use (r);
1637 }
1638
1639 static void
1640 note_reg_set (int r)
1641 {
1642 if (sched_deps_info->note_reg_set)
1643 sched_deps_info->note_reg_set (r);
1644 }
1645
1646 static void
1647 note_reg_clobber (int r)
1648 {
1649 if (sched_deps_info->note_reg_clobber)
1650 sched_deps_info->note_reg_clobber (r);
1651 }
1652
1653 static void
1654 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1655 {
1656 if (sched_deps_info->note_mem_dep)
1657 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1658 }
1659
1660 static void
1661 note_dep (rtx e, ds_t ds)
1662 {
1663 if (sched_deps_info->note_dep)
1664 sched_deps_info->note_dep (e, ds);
1665 }
1666
1667 /* Return corresponding to DS reg_note. */
1668 enum reg_note
1669 ds_to_dt (ds_t ds)
1670 {
1671 if (ds & DEP_TRUE)
1672 return REG_DEP_TRUE;
1673 else if (ds & DEP_OUTPUT)
1674 return REG_DEP_OUTPUT;
1675 else
1676 {
1677 gcc_assert (ds & DEP_ANTI);
1678 return REG_DEP_ANTI;
1679 }
1680 }
1681 \f
1682
1683 /* Internal variable for sched_analyze_[12] () functions.
1684 If it is nonzero, this means that sched_analyze_[12] looks
1685 at the most toplevel SET. */
1686 static bool can_start_lhs_rhs_p;
1687
1688 /* Extend reg info for the deps context DEPS given that
1689 we have just generated a register numbered REGNO. */
1690 static void
1691 extend_deps_reg_info (struct deps *deps, int regno)
1692 {
1693 int max_regno = regno + 1;
1694
1695 gcc_assert (!reload_completed);
1696
1697 /* In a readonly context, it would not hurt to extend info,
1698 but it should not be needed. */
1699 if (reload_completed && deps->readonly)
1700 {
1701 deps->max_reg = max_regno;
1702 return;
1703 }
1704
1705 if (max_regno > deps->max_reg)
1706 {
1707 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
1708 max_regno);
1709 memset (&deps->reg_last[deps->max_reg],
1710 0, (max_regno - deps->max_reg)
1711 * sizeof (struct deps_reg));
1712 deps->max_reg = max_regno;
1713 }
1714 }
1715
1716 /* Extends REG_INFO_P if needed. */
1717 void
1718 maybe_extend_reg_info_p (void)
1719 {
1720 /* Extend REG_INFO_P, if needed. */
1721 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
1722 {
1723 size_t new_reg_info_p_size = max_regno + 128;
1724
1725 gcc_assert (!reload_completed && sel_sched_p ());
1726
1727 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
1728 new_reg_info_p_size,
1729 reg_info_p_size,
1730 sizeof (*reg_info_p));
1731 reg_info_p_size = new_reg_info_p_size;
1732 }
1733 }
1734
1735 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
1736 The type of the reference is specified by REF and can be SET,
1737 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
1738
1739 static void
1740 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
1741 enum rtx_code ref, rtx insn)
1742 {
1743 /* We could emit new pseudos in renaming. Extend the reg structures. */
1744 if (!reload_completed && sel_sched_p ()
1745 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
1746 extend_deps_reg_info (deps, regno);
1747
1748 maybe_extend_reg_info_p ();
1749
1750 /* A hard reg in a wide mode may really be multiple registers.
1751 If so, mark all of them just like the first. */
1752 if (regno < FIRST_PSEUDO_REGISTER)
1753 {
1754 int i = hard_regno_nregs[regno][mode];
1755 if (ref == SET)
1756 {
1757 while (--i >= 0)
1758 note_reg_set (regno + i);
1759 }
1760 else if (ref == USE)
1761 {
1762 while (--i >= 0)
1763 note_reg_use (regno + i);
1764 }
1765 else
1766 {
1767 while (--i >= 0)
1768 note_reg_clobber (regno + i);
1769 }
1770 }
1771
1772 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
1773 it does not reload. Ignore these as they have served their
1774 purpose already. */
1775 else if (regno >= deps->max_reg)
1776 {
1777 enum rtx_code code = GET_CODE (PATTERN (insn));
1778 gcc_assert (code == USE || code == CLOBBER);
1779 }
1780
1781 else
1782 {
1783 if (ref == SET)
1784 note_reg_set (regno);
1785 else if (ref == USE)
1786 note_reg_use (regno);
1787 else
1788 note_reg_clobber (regno);
1789
1790 /* Pseudos that are REG_EQUIV to something may be replaced
1791 by that during reloading. We need only add dependencies for
1792 the address in the REG_EQUIV note. */
1793 if (!reload_completed && get_reg_known_equiv_p (regno))
1794 {
1795 rtx t = get_reg_known_value (regno);
1796 if (MEM_P (t))
1797 sched_analyze_2 (deps, XEXP (t, 0), insn);
1798 }
1799
1800 /* Don't let it cross a call after scheduling if it doesn't
1801 already cross one. */
1802 if (REG_N_CALLS_CROSSED (regno) == 0)
1803 {
1804 if (!deps->readonly
1805 && ref == USE)
1806 deps->sched_before_next_call
1807 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1808 else
1809 add_dependence_list (insn, deps->last_function_call, 1,
1810 REG_DEP_ANTI);
1811 }
1812 }
1813 }
1814
1815 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
1816 rtx, X, creating all dependencies generated by the write to the
1817 destination of X, and reads of everything mentioned. */
1818
1819 static void
1820 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
1821 {
1822 rtx dest = XEXP (x, 0);
1823 enum rtx_code code = GET_CODE (x);
1824 bool cslr_p = can_start_lhs_rhs_p;
1825
1826 can_start_lhs_rhs_p = false;
1827
1828 gcc_assert (dest);
1829 if (dest == 0)
1830 return;
1831
1832 if (cslr_p && sched_deps_info->start_lhs)
1833 sched_deps_info->start_lhs (dest);
1834
1835 if (GET_CODE (dest) == PARALLEL)
1836 {
1837 int i;
1838
1839 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1840 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1841 sched_analyze_1 (deps,
1842 gen_rtx_CLOBBER (VOIDmode,
1843 XEXP (XVECEXP (dest, 0, i), 0)),
1844 insn);
1845
1846 if (cslr_p && sched_deps_info->finish_lhs)
1847 sched_deps_info->finish_lhs ();
1848
1849 if (code == SET)
1850 {
1851 can_start_lhs_rhs_p = cslr_p;
1852
1853 sched_analyze_2 (deps, SET_SRC (x), insn);
1854
1855 can_start_lhs_rhs_p = false;
1856 }
1857
1858 return;
1859 }
1860
1861 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1862 || GET_CODE (dest) == ZERO_EXTRACT)
1863 {
1864 if (GET_CODE (dest) == STRICT_LOW_PART
1865 || GET_CODE (dest) == ZERO_EXTRACT
1866 || df_read_modify_subreg_p (dest))
1867 {
1868 /* These both read and modify the result. We must handle
1869 them as writes to get proper dependencies for following
1870 instructions. We must handle them as reads to get proper
1871 dependencies from this to previous instructions.
1872 Thus we need to call sched_analyze_2. */
1873
1874 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1875 }
1876 if (GET_CODE (dest) == ZERO_EXTRACT)
1877 {
1878 /* The second and third arguments are values read by this insn. */
1879 sched_analyze_2 (deps, XEXP (dest, 1), insn);
1880 sched_analyze_2 (deps, XEXP (dest, 2), insn);
1881 }
1882 dest = XEXP (dest, 0);
1883 }
1884
1885 if (REG_P (dest))
1886 {
1887 int regno = REGNO (dest);
1888 enum machine_mode mode = GET_MODE (dest);
1889
1890 sched_analyze_reg (deps, regno, mode, code, insn);
1891
1892 #ifdef STACK_REGS
1893 /* Treat all writes to a stack register as modifying the TOS. */
1894 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1895 {
1896 /* Avoid analyzing the same register twice. */
1897 if (regno != FIRST_STACK_REG)
1898 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1899 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1900 }
1901 #endif
1902 }
1903 else if (MEM_P (dest))
1904 {
1905 /* Writing memory. */
1906 rtx t = dest;
1907
1908 if (sched_deps_info->use_cselib)
1909 {
1910 t = shallow_copy_rtx (dest);
1911 cselib_lookup (XEXP (t, 0), Pmode, 1);
1912 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1913 }
1914 t = canon_rtx (t);
1915
1916 /* Pending lists can't get larger with a readonly context. */
1917 if (!deps->readonly
1918 && ((deps->pending_read_list_length + deps->pending_write_list_length)
1919 > MAX_PENDING_LIST_LENGTH))
1920 {
1921 /* Flush all pending reads and writes to prevent the pending lists
1922 from getting any larger. Insn scheduling runs too slowly when
1923 these lists get long. When compiling GCC with itself,
1924 this flush occurs 8 times for sparc, and 10 times for m88k using
1925 the default value of 32. */
1926 flush_pending_lists (deps, insn, false, true);
1927 }
1928 else
1929 {
1930 rtx pending, pending_mem;
1931
1932 pending = deps->pending_read_insns;
1933 pending_mem = deps->pending_read_mems;
1934 while (pending)
1935 {
1936 if (anti_dependence (XEXP (pending_mem, 0), t)
1937 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1938 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
1939 DEP_ANTI);
1940
1941 pending = XEXP (pending, 1);
1942 pending_mem = XEXP (pending_mem, 1);
1943 }
1944
1945 pending = deps->pending_write_insns;
1946 pending_mem = deps->pending_write_mems;
1947 while (pending)
1948 {
1949 if (output_dependence (XEXP (pending_mem, 0), t)
1950 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1951 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
1952 DEP_OUTPUT);
1953
1954 pending = XEXP (pending, 1);
1955 pending_mem = XEXP (pending_mem, 1);
1956 }
1957
1958 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1959 REG_DEP_ANTI);
1960
1961 if (!deps->readonly)
1962 add_insn_mem_dependence (deps, false, insn, dest);
1963 }
1964 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1965 }
1966
1967 if (cslr_p && sched_deps_info->finish_lhs)
1968 sched_deps_info->finish_lhs ();
1969
1970 /* Analyze reads. */
1971 if (GET_CODE (x) == SET)
1972 {
1973 can_start_lhs_rhs_p = cslr_p;
1974
1975 sched_analyze_2 (deps, SET_SRC (x), insn);
1976
1977 can_start_lhs_rhs_p = false;
1978 }
1979 }
1980
1981 /* Analyze the uses of memory and registers in rtx X in INSN. */
1982 static void
1983 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
1984 {
1985 int i;
1986 int j;
1987 enum rtx_code code;
1988 const char *fmt;
1989 bool cslr_p = can_start_lhs_rhs_p;
1990
1991 can_start_lhs_rhs_p = false;
1992
1993 gcc_assert (x);
1994 if (x == 0)
1995 return;
1996
1997 if (cslr_p && sched_deps_info->start_rhs)
1998 sched_deps_info->start_rhs (x);
1999
2000 code = GET_CODE (x);
2001
2002 switch (code)
2003 {
2004 case CONST_INT:
2005 case CONST_DOUBLE:
2006 case CONST_FIXED:
2007 case CONST_VECTOR:
2008 case SYMBOL_REF:
2009 case CONST:
2010 case LABEL_REF:
2011 /* Ignore constants. */
2012 if (cslr_p && sched_deps_info->finish_rhs)
2013 sched_deps_info->finish_rhs ();
2014
2015 return;
2016
2017 #ifdef HAVE_cc0
2018 case CC0:
2019 /* User of CC0 depends on immediately preceding insn. */
2020 SCHED_GROUP_P (insn) = 1;
2021 /* Don't move CC0 setter to another block (it can set up the
2022 same flag for previous CC0 users which is safe). */
2023 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2024
2025 if (cslr_p && sched_deps_info->finish_rhs)
2026 sched_deps_info->finish_rhs ();
2027
2028 return;
2029 #endif
2030
2031 case REG:
2032 {
2033 int regno = REGNO (x);
2034 enum machine_mode mode = GET_MODE (x);
2035
2036 sched_analyze_reg (deps, regno, mode, USE, insn);
2037
2038 #ifdef STACK_REGS
2039 /* Treat all reads of a stack register as modifying the TOS. */
2040 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2041 {
2042 /* Avoid analyzing the same register twice. */
2043 if (regno != FIRST_STACK_REG)
2044 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2045 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2046 }
2047 #endif
2048
2049 if (cslr_p && sched_deps_info->finish_rhs)
2050 sched_deps_info->finish_rhs ();
2051
2052 return;
2053 }
2054
2055 case MEM:
2056 {
2057 /* Reading memory. */
2058 rtx u;
2059 rtx pending, pending_mem;
2060 rtx t = x;
2061
2062 if (sched_deps_info->use_cselib)
2063 {
2064 t = shallow_copy_rtx (t);
2065 cselib_lookup (XEXP (t, 0), Pmode, 1);
2066 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2067 }
2068 t = canon_rtx (t);
2069 pending = deps->pending_read_insns;
2070 pending_mem = deps->pending_read_mems;
2071 while (pending)
2072 {
2073 if (read_dependence (XEXP (pending_mem, 0), t)
2074 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2075 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2076 DEP_ANTI);
2077
2078 pending = XEXP (pending, 1);
2079 pending_mem = XEXP (pending_mem, 1);
2080 }
2081
2082 pending = deps->pending_write_insns;
2083 pending_mem = deps->pending_write_mems;
2084 while (pending)
2085 {
2086 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2087 t, rtx_varies_p)
2088 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2089 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2090 sched_deps_info->generate_spec_deps
2091 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2092
2093 pending = XEXP (pending, 1);
2094 pending_mem = XEXP (pending_mem, 1);
2095 }
2096
2097 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2098 {
2099 if (! JUMP_P (XEXP (u, 0)))
2100 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2101 else if (deps_may_trap_p (x))
2102 {
2103 if ((sched_deps_info->generate_spec_deps)
2104 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2105 {
2106 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2107 MAX_DEP_WEAK);
2108
2109 note_dep (XEXP (u, 0), ds);
2110 }
2111 else
2112 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2113 }
2114 }
2115
2116 /* Always add these dependencies to pending_reads, since
2117 this insn may be followed by a write. */
2118 if (!deps->readonly)
2119 add_insn_mem_dependence (deps, true, insn, x);
2120
2121 /* Take advantage of tail recursion here. */
2122 sched_analyze_2 (deps, XEXP (x, 0), insn);
2123
2124 if (cslr_p && sched_deps_info->finish_rhs)
2125 sched_deps_info->finish_rhs ();
2126
2127 return;
2128 }
2129
2130 /* Force pending stores to memory in case a trap handler needs them. */
2131 case TRAP_IF:
2132 flush_pending_lists (deps, insn, true, false);
2133 break;
2134
2135 case UNSPEC_VOLATILE:
2136 flush_pending_lists (deps, insn, true, true);
2137 /* FALLTHRU */
2138
2139 case ASM_OPERANDS:
2140 case ASM_INPUT:
2141 {
2142 /* Traditional and volatile asm instructions must be considered to use
2143 and clobber all hard registers, all pseudo-registers and all of
2144 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2145
2146 Consider for instance a volatile asm that changes the fpu rounding
2147 mode. An insn should not be moved across this even if it only uses
2148 pseudo-regs because it might give an incorrectly rounded result. */
2149 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2150 reg_pending_barrier = TRUE_BARRIER;
2151
2152 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2153 We can not just fall through here since then we would be confused
2154 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2155 traditional asms unlike their normal usage. */
2156
2157 if (code == ASM_OPERANDS)
2158 {
2159 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2160 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2161
2162 if (cslr_p && sched_deps_info->finish_rhs)
2163 sched_deps_info->finish_rhs ();
2164
2165 return;
2166 }
2167 break;
2168 }
2169
2170 case PRE_DEC:
2171 case POST_DEC:
2172 case PRE_INC:
2173 case POST_INC:
2174 /* These both read and modify the result. We must handle them as writes
2175 to get proper dependencies for following instructions. We must handle
2176 them as reads to get proper dependencies from this to previous
2177 instructions. Thus we need to pass them to both sched_analyze_1
2178 and sched_analyze_2. We must call sched_analyze_2 first in order
2179 to get the proper antecedent for the read. */
2180 sched_analyze_2 (deps, XEXP (x, 0), insn);
2181 sched_analyze_1 (deps, x, insn);
2182
2183 if (cslr_p && sched_deps_info->finish_rhs)
2184 sched_deps_info->finish_rhs ();
2185
2186 return;
2187
2188 case POST_MODIFY:
2189 case PRE_MODIFY:
2190 /* op0 = op0 + op1 */
2191 sched_analyze_2 (deps, XEXP (x, 0), insn);
2192 sched_analyze_2 (deps, XEXP (x, 1), insn);
2193 sched_analyze_1 (deps, x, insn);
2194
2195 if (cslr_p && sched_deps_info->finish_rhs)
2196 sched_deps_info->finish_rhs ();
2197
2198 return;
2199
2200 default:
2201 break;
2202 }
2203
2204 /* Other cases: walk the insn. */
2205 fmt = GET_RTX_FORMAT (code);
2206 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2207 {
2208 if (fmt[i] == 'e')
2209 sched_analyze_2 (deps, XEXP (x, i), insn);
2210 else if (fmt[i] == 'E')
2211 for (j = 0; j < XVECLEN (x, i); j++)
2212 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2213 }
2214
2215 if (cslr_p && sched_deps_info->finish_rhs)
2216 sched_deps_info->finish_rhs ();
2217 }
2218
2219 /* Analyze an INSN with pattern X to find all dependencies. */
2220 static void
2221 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
2222 {
2223 RTX_CODE code = GET_CODE (x);
2224 rtx link;
2225 unsigned i;
2226 reg_set_iterator rsi;
2227
2228 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2229 && code == SET);
2230
2231 if (code == COND_EXEC)
2232 {
2233 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2234
2235 /* ??? Should be recording conditions so we reduce the number of
2236 false dependencies. */
2237 x = COND_EXEC_CODE (x);
2238 code = GET_CODE (x);
2239 }
2240 if (code == SET || code == CLOBBER)
2241 {
2242 sched_analyze_1 (deps, x, insn);
2243
2244 /* Bare clobber insns are used for letting life analysis, reg-stack
2245 and others know that a value is dead. Depend on the last call
2246 instruction so that reg-stack won't get confused. */
2247 if (code == CLOBBER)
2248 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
2249 }
2250 else if (code == PARALLEL)
2251 {
2252 for (i = XVECLEN (x, 0); i--;)
2253 {
2254 rtx sub = XVECEXP (x, 0, i);
2255 code = GET_CODE (sub);
2256
2257 if (code == COND_EXEC)
2258 {
2259 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2260 sub = COND_EXEC_CODE (sub);
2261 code = GET_CODE (sub);
2262 }
2263 if (code == SET || code == CLOBBER)
2264 sched_analyze_1 (deps, sub, insn);
2265 else
2266 sched_analyze_2 (deps, sub, insn);
2267 }
2268 }
2269 else
2270 sched_analyze_2 (deps, x, insn);
2271
2272 /* Mark registers CLOBBERED or used by called function. */
2273 if (CALL_P (insn))
2274 {
2275 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2276 {
2277 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2278 sched_analyze_1 (deps, XEXP (link, 0), insn);
2279 else
2280 sched_analyze_2 (deps, XEXP (link, 0), insn);
2281 }
2282 if (find_reg_note (insn, REG_SETJMP, NULL))
2283 reg_pending_barrier = MOVE_BARRIER;
2284 }
2285
2286 if (JUMP_P (insn))
2287 {
2288 rtx next;
2289 next = next_nonnote_insn (insn);
2290 if (next && BARRIER_P (next))
2291 reg_pending_barrier = MOVE_BARRIER;
2292 else
2293 {
2294 rtx pending, pending_mem;
2295
2296 if (sched_deps_info->compute_jump_reg_dependencies)
2297 {
2298 regset_head tmp_uses, tmp_sets;
2299 INIT_REG_SET (&tmp_uses);
2300 INIT_REG_SET (&tmp_sets);
2301
2302 (*sched_deps_info->compute_jump_reg_dependencies)
2303 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2304 /* Make latency of jump equal to 0 by using anti-dependence. */
2305 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2306 {
2307 struct deps_reg *reg_last = &deps->reg_last[i];
2308 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2309 add_dependence_list (insn, reg_last->clobbers, 0,
2310 REG_DEP_ANTI);
2311
2312 if (!deps->readonly)
2313 {
2314 reg_last->uses_length++;
2315 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2316 }
2317 }
2318 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2319
2320 CLEAR_REG_SET (&tmp_uses);
2321 CLEAR_REG_SET (&tmp_sets);
2322 }
2323
2324 /* All memory writes and volatile reads must happen before the
2325 jump. Non-volatile reads must happen before the jump iff
2326 the result is needed by the above register used mask. */
2327
2328 pending = deps->pending_write_insns;
2329 pending_mem = deps->pending_write_mems;
2330 while (pending)
2331 {
2332 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2333 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2334 pending = XEXP (pending, 1);
2335 pending_mem = XEXP (pending_mem, 1);
2336 }
2337
2338 pending = deps->pending_read_insns;
2339 pending_mem = deps->pending_read_mems;
2340 while (pending)
2341 {
2342 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2343 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2344 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2345 pending = XEXP (pending, 1);
2346 pending_mem = XEXP (pending_mem, 1);
2347 }
2348
2349 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2350 REG_DEP_ANTI);
2351 }
2352 }
2353
2354 /* If this instruction can throw an exception, then moving it changes
2355 where block boundaries fall. This is mighty confusing elsewhere.
2356 Therefore, prevent such an instruction from being moved. Same for
2357 non-jump instructions that define block boundaries.
2358 ??? Unclear whether this is still necessary in EBB mode. If not,
2359 add_branch_dependences should be adjusted for RGN mode instead. */
2360 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2361 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2362 reg_pending_barrier = MOVE_BARRIER;
2363
2364 /* If the current insn is conditional, we can't free any
2365 of the lists. */
2366 if (sched_has_condition_p (insn))
2367 {
2368 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2369 {
2370 struct deps_reg *reg_last = &deps->reg_last[i];
2371 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2372 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2373
2374 if (!deps->readonly)
2375 {
2376 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2377 reg_last->uses_length++;
2378 }
2379 }
2380 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2381 {
2382 struct deps_reg *reg_last = &deps->reg_last[i];
2383 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2384 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2385
2386 if (!deps->readonly)
2387 {
2388 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2389 reg_last->clobbers_length++;
2390 }
2391 }
2392 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2393 {
2394 struct deps_reg *reg_last = &deps->reg_last[i];
2395 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2396 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2397 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2398
2399 if (!deps->readonly)
2400 {
2401 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2402 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2403 }
2404 }
2405 }
2406 else
2407 {
2408 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2409 {
2410 struct deps_reg *reg_last = &deps->reg_last[i];
2411 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2412 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2413
2414 if (!deps->readonly)
2415 {
2416 reg_last->uses_length++;
2417 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2418 }
2419 }
2420 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2421 {
2422 struct deps_reg *reg_last = &deps->reg_last[i];
2423 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2424 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2425 {
2426 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2427 REG_DEP_OUTPUT);
2428 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2429 REG_DEP_ANTI);
2430 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2431 REG_DEP_OUTPUT);
2432
2433 if (!deps->readonly)
2434 {
2435 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2436 reg_last->clobbers_length = 0;
2437 reg_last->uses_length = 0;
2438 }
2439 }
2440 else
2441 {
2442 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2443 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2444 }
2445
2446 if (!deps->readonly)
2447 {
2448 reg_last->clobbers_length++;
2449 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2450 }
2451 }
2452 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2453 {
2454 struct deps_reg *reg_last = &deps->reg_last[i];
2455 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2456 REG_DEP_OUTPUT);
2457 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2458 REG_DEP_OUTPUT);
2459 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2460 REG_DEP_ANTI);
2461
2462 if (!deps->readonly)
2463 {
2464 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2465 reg_last->uses_length = 0;
2466 reg_last->clobbers_length = 0;
2467 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2468 }
2469 }
2470 }
2471
2472 if (!deps->readonly)
2473 {
2474 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2475 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2476 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2477
2478 /* Set up the pending barrier found. */
2479 deps->last_reg_pending_barrier = reg_pending_barrier;
2480 }
2481
2482 CLEAR_REG_SET (reg_pending_uses);
2483 CLEAR_REG_SET (reg_pending_clobbers);
2484 CLEAR_REG_SET (reg_pending_sets);
2485
2486 /* Add dependencies if a scheduling barrier was found. */
2487 if (reg_pending_barrier)
2488 {
2489 /* In the case of barrier the most added dependencies are not
2490 real, so we use anti-dependence here. */
2491 if (sched_has_condition_p (insn))
2492 {
2493 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2494 {
2495 struct deps_reg *reg_last = &deps->reg_last[i];
2496 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2497 add_dependence_list
2498 (insn, reg_last->sets, 0,
2499 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2500 add_dependence_list
2501 (insn, reg_last->clobbers, 0,
2502 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2503 }
2504 }
2505 else
2506 {
2507 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2508 {
2509 struct deps_reg *reg_last = &deps->reg_last[i];
2510 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2511 REG_DEP_ANTI);
2512 add_dependence_list_and_free
2513 (deps, insn, &reg_last->sets, 0,
2514 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2515 add_dependence_list_and_free
2516 (deps, insn, &reg_last->clobbers, 0,
2517 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2518
2519 if (!deps->readonly)
2520 {
2521 reg_last->uses_length = 0;
2522 reg_last->clobbers_length = 0;
2523 }
2524 }
2525 }
2526
2527 if (!deps->readonly)
2528 for (i = 0; i < (unsigned)deps->max_reg; i++)
2529 {
2530 struct deps_reg *reg_last = &deps->reg_last[i];
2531 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2532 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2533 }
2534
2535 /* Flush pending lists on jumps, but not on speculative checks. */
2536 if (JUMP_P (insn) && !(sel_sched_p ()
2537 && sel_insn_is_speculation_check (insn)))
2538 flush_pending_lists (deps, insn, true, true);
2539
2540 if (!deps->readonly)
2541 CLEAR_REG_SET (&deps->reg_conditional_sets);
2542 reg_pending_barrier = NOT_A_BARRIER;
2543 }
2544
2545 /* If a post-call group is still open, see if it should remain so.
2546 This insn must be a simple move of a hard reg to a pseudo or
2547 vice-versa.
2548
2549 We must avoid moving these insns for correctness on
2550 SMALL_REGISTER_CLASS machines, and for special registers like
2551 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
2552 hard regs for all targets. */
2553
2554 if (deps->in_post_call_group_p)
2555 {
2556 rtx tmp, set = single_set (insn);
2557 int src_regno, dest_regno;
2558
2559 if (set == NULL)
2560 goto end_call_group;
2561
2562 tmp = SET_DEST (set);
2563 if (GET_CODE (tmp) == SUBREG)
2564 tmp = SUBREG_REG (tmp);
2565 if (REG_P (tmp))
2566 dest_regno = REGNO (tmp);
2567 else
2568 goto end_call_group;
2569
2570 tmp = SET_SRC (set);
2571 if (GET_CODE (tmp) == SUBREG)
2572 tmp = SUBREG_REG (tmp);
2573 if ((GET_CODE (tmp) == PLUS
2574 || GET_CODE (tmp) == MINUS)
2575 && REG_P (XEXP (tmp, 0))
2576 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
2577 && dest_regno == STACK_POINTER_REGNUM)
2578 src_regno = STACK_POINTER_REGNUM;
2579 else if (REG_P (tmp))
2580 src_regno = REGNO (tmp);
2581 else
2582 goto end_call_group;
2583
2584 if (src_regno < FIRST_PSEUDO_REGISTER
2585 || dest_regno < FIRST_PSEUDO_REGISTER)
2586 {
2587 if (!deps->readonly
2588 && deps->in_post_call_group_p == post_call_initial)
2589 deps->in_post_call_group_p = post_call;
2590
2591 if (!sel_sched_p () || sched_emulate_haifa_p)
2592 {
2593 SCHED_GROUP_P (insn) = 1;
2594 CANT_MOVE (insn) = 1;
2595 }
2596 }
2597 else
2598 {
2599 end_call_group:
2600 if (!deps->readonly)
2601 deps->in_post_call_group_p = not_post_call;
2602 }
2603 }
2604
2605 if ((current_sched_info->flags & DO_SPECULATION)
2606 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
2607 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
2608 be speculated. */
2609 {
2610 if (sel_sched_p ())
2611 sel_mark_hard_insn (insn);
2612 else
2613 {
2614 sd_iterator_def sd_it;
2615 dep_t dep;
2616
2617 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
2618 sd_iterator_cond (&sd_it, &dep);)
2619 change_spec_dep_to_hard (sd_it);
2620 }
2621 }
2622 }
2623
2624 /* Analyze INSN with DEPS as a context. */
2625 void
2626 deps_analyze_insn (struct deps *deps, rtx insn)
2627 {
2628 if (sched_deps_info->start_insn)
2629 sched_deps_info->start_insn (insn);
2630
2631 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
2632 {
2633 /* Make each JUMP_INSN (but not a speculative check)
2634 a scheduling barrier for memory references. */
2635 if (!deps->readonly
2636 && JUMP_P (insn)
2637 && !(sel_sched_p ()
2638 && sel_insn_is_speculation_check (insn)))
2639 {
2640 /* Keep the list a reasonable size. */
2641 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
2642 flush_pending_lists (deps, insn, true, true);
2643 else
2644 deps->last_pending_memory_flush
2645 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
2646 }
2647
2648 sched_analyze_insn (deps, PATTERN (insn), insn);
2649 }
2650 else if (CALL_P (insn))
2651 {
2652 int i;
2653
2654 CANT_MOVE (insn) = 1;
2655
2656 if (find_reg_note (insn, REG_SETJMP, NULL))
2657 {
2658 /* This is setjmp. Assume that all registers, not just
2659 hard registers, may be clobbered by this call. */
2660 reg_pending_barrier = MOVE_BARRIER;
2661 }
2662 else
2663 {
2664 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2665 /* A call may read and modify global register variables. */
2666 if (global_regs[i])
2667 {
2668 SET_REGNO_REG_SET (reg_pending_sets, i);
2669 SET_REGNO_REG_SET (reg_pending_uses, i);
2670 }
2671 /* Other call-clobbered hard regs may be clobbered.
2672 Since we only have a choice between 'might be clobbered'
2673 and 'definitely not clobbered', we must include all
2674 partly call-clobbered registers here. */
2675 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
2676 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2677 SET_REGNO_REG_SET (reg_pending_clobbers, i);
2678 /* We don't know what set of fixed registers might be used
2679 by the function, but it is certain that the stack pointer
2680 is among them, but be conservative. */
2681 else if (fixed_regs[i])
2682 SET_REGNO_REG_SET (reg_pending_uses, i);
2683 /* The frame pointer is normally not used by the function
2684 itself, but by the debugger. */
2685 /* ??? MIPS o32 is an exception. It uses the frame pointer
2686 in the macro expansion of jal but does not represent this
2687 fact in the call_insn rtl. */
2688 else if (i == FRAME_POINTER_REGNUM
2689 || (i == HARD_FRAME_POINTER_REGNUM
2690 && (! reload_completed || frame_pointer_needed)))
2691 SET_REGNO_REG_SET (reg_pending_uses, i);
2692 }
2693
2694 /* For each insn which shouldn't cross a call, add a dependence
2695 between that insn and this call insn. */
2696 add_dependence_list_and_free (deps, insn,
2697 &deps->sched_before_next_call, 1,
2698 REG_DEP_ANTI);
2699
2700 sched_analyze_insn (deps, PATTERN (insn), insn);
2701
2702 /* If CALL would be in a sched group, then this will violate
2703 convention that sched group insns have dependencies only on the
2704 previous instruction.
2705
2706 Of course one can say: "Hey! What about head of the sched group?"
2707 And I will answer: "Basic principles (one dep per insn) are always
2708 the same." */
2709 gcc_assert (!SCHED_GROUP_P (insn));
2710
2711 /* In the absence of interprocedural alias analysis, we must flush
2712 all pending reads and writes, and start new dependencies starting
2713 from here. But only flush writes for constant calls (which may
2714 be passed a pointer to something we haven't written yet). */
2715 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
2716
2717 if (!deps->readonly)
2718 {
2719 /* Remember the last function call for limiting lifetimes. */
2720 free_INSN_LIST_list (&deps->last_function_call);
2721 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
2722
2723 /* Before reload, begin a post-call group, so as to keep the
2724 lifetimes of hard registers correct. */
2725 if (! reload_completed)
2726 deps->in_post_call_group_p = post_call;
2727 }
2728 }
2729
2730 if (sched_deps_info->use_cselib)
2731 cselib_process_insn (insn);
2732
2733 /* EH_REGION insn notes can not appear until well after we complete
2734 scheduling. */
2735 if (NOTE_P (insn))
2736 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
2737 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
2738
2739 if (sched_deps_info->finish_insn)
2740 sched_deps_info->finish_insn ();
2741
2742 /* Fixup the dependencies in the sched group. */
2743 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
2744 && SCHED_GROUP_P (insn) && !sel_sched_p ())
2745 fixup_sched_groups (insn);
2746 }
2747
2748 /* Initialize DEPS for the new block beginning with HEAD. */
2749 void
2750 deps_start_bb (struct deps *deps, rtx head)
2751 {
2752 gcc_assert (!deps->readonly);
2753
2754 /* Before reload, if the previous block ended in a call, show that
2755 we are inside a post-call group, so as to keep the lifetimes of
2756 hard registers correct. */
2757 if (! reload_completed && !LABEL_P (head))
2758 {
2759 rtx insn = prev_nonnote_insn (head);
2760
2761 if (insn && CALL_P (insn))
2762 deps->in_post_call_group_p = post_call_initial;
2763 }
2764 }
2765
2766 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
2767 dependencies for each insn. */
2768 void
2769 sched_analyze (struct deps *deps, rtx head, rtx tail)
2770 {
2771 rtx insn;
2772
2773 if (sched_deps_info->use_cselib)
2774 cselib_init (true);
2775
2776 deps_start_bb (deps, head);
2777
2778 for (insn = head;; insn = NEXT_INSN (insn))
2779 {
2780
2781 if (INSN_P (insn))
2782 {
2783 /* And initialize deps_lists. */
2784 sd_init_insn (insn);
2785 }
2786
2787 deps_analyze_insn (deps, insn);
2788
2789 if (insn == tail)
2790 {
2791 if (sched_deps_info->use_cselib)
2792 cselib_finish ();
2793 return;
2794 }
2795 }
2796 gcc_unreachable ();
2797 }
2798
2799 /* Helper for sched_free_deps ().
2800 Delete INSN's (RESOLVED_P) backward dependencies. */
2801 static void
2802 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
2803 {
2804 sd_iterator_def sd_it;
2805 dep_t dep;
2806 sd_list_types_def types;
2807
2808 if (resolved_p)
2809 types = SD_LIST_RES_BACK;
2810 else
2811 types = SD_LIST_BACK;
2812
2813 for (sd_it = sd_iterator_start (insn, types);
2814 sd_iterator_cond (&sd_it, &dep);)
2815 {
2816 dep_link_t link = *sd_it.linkp;
2817 dep_node_t node = DEP_LINK_NODE (link);
2818 deps_list_t back_list;
2819 deps_list_t forw_list;
2820
2821 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
2822 remove_from_deps_list (link, back_list);
2823 delete_dep_node (node);
2824 }
2825 }
2826
2827 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
2828 deps_lists. */
2829 void
2830 sched_free_deps (rtx head, rtx tail, bool resolved_p)
2831 {
2832 rtx insn;
2833 rtx next_tail = NEXT_INSN (tail);
2834
2835 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2836 if (INSN_P (insn) && INSN_LUID (insn) > 0)
2837 {
2838 /* Clear resolved back deps together with its dep_nodes. */
2839 delete_dep_nodes_in_back_deps (insn, resolved_p);
2840
2841 /* Clear forward deps and leave the dep_nodes to the
2842 corresponding back_deps list. */
2843 if (resolved_p)
2844 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
2845 else
2846 clear_deps_list (INSN_FORW_DEPS (insn));
2847
2848 sd_finish_insn (insn);
2849 }
2850 }
2851 \f
2852 /* Initialize variables for region data dependence analysis.
2853 n_bbs is the number of region blocks. */
2854
2855 void
2856 init_deps (struct deps *deps)
2857 {
2858 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2859
2860 deps->max_reg = max_reg;
2861 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2862 INIT_REG_SET (&deps->reg_last_in_use);
2863 INIT_REG_SET (&deps->reg_conditional_sets);
2864
2865 deps->pending_read_insns = 0;
2866 deps->pending_read_mems = 0;
2867 deps->pending_write_insns = 0;
2868 deps->pending_write_mems = 0;
2869 deps->pending_read_list_length = 0;
2870 deps->pending_write_list_length = 0;
2871 deps->pending_flush_length = 0;
2872 deps->last_pending_memory_flush = 0;
2873 deps->last_function_call = 0;
2874 deps->sched_before_next_call = 0;
2875 deps->in_post_call_group_p = not_post_call;
2876 deps->last_reg_pending_barrier = NOT_A_BARRIER;
2877 deps->readonly = 0;
2878 }
2879
2880 /* Free insn lists found in DEPS. */
2881
2882 void
2883 free_deps (struct deps *deps)
2884 {
2885 unsigned i;
2886 reg_set_iterator rsi;
2887
2888 free_INSN_LIST_list (&deps->pending_read_insns);
2889 free_EXPR_LIST_list (&deps->pending_read_mems);
2890 free_INSN_LIST_list (&deps->pending_write_insns);
2891 free_EXPR_LIST_list (&deps->pending_write_mems);
2892 free_INSN_LIST_list (&deps->last_pending_memory_flush);
2893
2894 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
2895 times. For a testcase with 42000 regs and 8000 small basic blocks,
2896 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
2897 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2898 {
2899 struct deps_reg *reg_last = &deps->reg_last[i];
2900 if (reg_last->uses)
2901 free_INSN_LIST_list (&reg_last->uses);
2902 if (reg_last->sets)
2903 free_INSN_LIST_list (&reg_last->sets);
2904 if (reg_last->clobbers)
2905 free_INSN_LIST_list (&reg_last->clobbers);
2906 }
2907 CLEAR_REG_SET (&deps->reg_last_in_use);
2908 CLEAR_REG_SET (&deps->reg_conditional_sets);
2909
2910 free (deps->reg_last);
2911 deps->reg_last = NULL;
2912
2913 deps = NULL;
2914 }
2915
2916 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
2917 is not handled. */
2918 void
2919 remove_from_deps (struct deps *deps, rtx insn)
2920 {
2921 int removed;
2922 unsigned i;
2923 reg_set_iterator rsi;
2924
2925 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
2926 &deps->pending_read_mems);
2927 deps->pending_read_list_length -= removed;
2928 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
2929 &deps->pending_write_mems);
2930 deps->pending_write_list_length -= removed;
2931 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
2932 deps->pending_flush_length -= removed;
2933
2934 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2935 {
2936 struct deps_reg *reg_last = &deps->reg_last[i];
2937 if (reg_last->uses)
2938 remove_from_dependence_list (insn, &reg_last->uses);
2939 if (reg_last->sets)
2940 remove_from_dependence_list (insn, &reg_last->sets);
2941 if (reg_last->clobbers)
2942 remove_from_dependence_list (insn, &reg_last->clobbers);
2943 if (!reg_last->uses && !reg_last->sets && !reg_last->clobbers)
2944 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
2945 }
2946
2947 if (CALL_P (insn))
2948 remove_from_dependence_list (insn, &deps->last_function_call);
2949 remove_from_dependence_list (insn, &deps->sched_before_next_call);
2950 }
2951
2952 /* Init deps data vector. */
2953 static void
2954 init_deps_data_vector (void)
2955 {
2956 int reserve = (sched_max_luid + 1
2957 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
2958 if (reserve > 0
2959 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
2960 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
2961 3 * sched_max_luid / 2);
2962 }
2963
2964 /* If it is profitable to use them, initialize or extend (depending on
2965 GLOBAL_P) dependency data. */
2966 void
2967 sched_deps_init (bool global_p)
2968 {
2969 /* Average number of insns in the basic block.
2970 '+ 1' is used to make it nonzero. */
2971 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
2972
2973 init_deps_data_vector ();
2974
2975 /* We use another caching mechanism for selective scheduling, so
2976 we don't use this one. */
2977 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
2978 {
2979 /* ?!? We could save some memory by computing a per-region luid mapping
2980 which could reduce both the number of vectors in the cache and the
2981 size of each vector. Instead we just avoid the cache entirely unless
2982 the average number of instructions in a basic block is very high. See
2983 the comment before the declaration of true_dependency_cache for
2984 what we consider "very high". */
2985 cache_size = 0;
2986 extend_dependency_caches (sched_max_luid, true);
2987 }
2988
2989 if (global_p)
2990 {
2991 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
2992 /* Allocate lists for one block at a time. */
2993 insns_in_block);
2994 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
2995 /* Allocate nodes for one block at a time.
2996 We assume that average insn has
2997 5 producers. */
2998 5 * insns_in_block);
2999 }
3000 }
3001
3002
3003 /* Create or extend (depending on CREATE_P) dependency caches to
3004 size N. */
3005 void
3006 extend_dependency_caches (int n, bool create_p)
3007 {
3008 if (create_p || true_dependency_cache)
3009 {
3010 int i, luid = cache_size + n;
3011
3012 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3013 luid);
3014 output_dependency_cache = XRESIZEVEC (bitmap_head,
3015 output_dependency_cache, luid);
3016 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3017 luid);
3018
3019 if (current_sched_info->flags & DO_SPECULATION)
3020 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3021 luid);
3022
3023 for (i = cache_size; i < luid; i++)
3024 {
3025 bitmap_initialize (&true_dependency_cache[i], 0);
3026 bitmap_initialize (&output_dependency_cache[i], 0);
3027 bitmap_initialize (&anti_dependency_cache[i], 0);
3028
3029 if (current_sched_info->flags & DO_SPECULATION)
3030 bitmap_initialize (&spec_dependency_cache[i], 0);
3031 }
3032 cache_size = luid;
3033 }
3034 }
3035
3036 /* Finalize dependency information for the whole function. */
3037 void
3038 sched_deps_finish (void)
3039 {
3040 gcc_assert (deps_pools_are_empty_p ());
3041 free_alloc_pool_if_empty (&dn_pool);
3042 free_alloc_pool_if_empty (&dl_pool);
3043 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3044
3045 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3046 cache_size = 0;
3047
3048 if (true_dependency_cache)
3049 {
3050 int i;
3051
3052 for (i = 0; i < cache_size; i++)
3053 {
3054 bitmap_clear (&true_dependency_cache[i]);
3055 bitmap_clear (&output_dependency_cache[i]);
3056 bitmap_clear (&anti_dependency_cache[i]);
3057
3058 if (sched_deps_info->generate_spec_deps)
3059 bitmap_clear (&spec_dependency_cache[i]);
3060 }
3061 free (true_dependency_cache);
3062 true_dependency_cache = NULL;
3063 free (output_dependency_cache);
3064 output_dependency_cache = NULL;
3065 free (anti_dependency_cache);
3066 anti_dependency_cache = NULL;
3067
3068 if (sched_deps_info->generate_spec_deps)
3069 {
3070 free (spec_dependency_cache);
3071 spec_dependency_cache = NULL;
3072 }
3073
3074 }
3075 }
3076
3077 /* Initialize some global variables needed by the dependency analysis
3078 code. */
3079
3080 void
3081 init_deps_global (void)
3082 {
3083 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3084 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3085 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3086 reg_pending_barrier = NOT_A_BARRIER;
3087
3088 if (!sel_sched_p () || sched_emulate_haifa_p)
3089 {
3090 sched_deps_info->start_insn = haifa_start_insn;
3091 sched_deps_info->finish_insn = haifa_finish_insn;
3092
3093 sched_deps_info->note_reg_set = haifa_note_reg_set;
3094 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3095 sched_deps_info->note_reg_use = haifa_note_reg_use;
3096
3097 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3098 sched_deps_info->note_dep = haifa_note_dep;
3099 }
3100 }
3101
3102 /* Free everything used by the dependency analysis code. */
3103
3104 void
3105 finish_deps_global (void)
3106 {
3107 FREE_REG_SET (reg_pending_sets);
3108 FREE_REG_SET (reg_pending_clobbers);
3109 FREE_REG_SET (reg_pending_uses);
3110 }
3111
3112 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3113 dw_t
3114 estimate_dep_weak (rtx mem1, rtx mem2)
3115 {
3116 rtx r1, r2;
3117
3118 if (mem1 == mem2)
3119 /* MEMs are the same - don't speculate. */
3120 return MIN_DEP_WEAK;
3121
3122 r1 = XEXP (mem1, 0);
3123 r2 = XEXP (mem2, 0);
3124
3125 if (r1 == r2
3126 || (REG_P (r1) && REG_P (r2)
3127 && REGNO (r1) == REGNO (r2)))
3128 /* Again, MEMs are the same. */
3129 return MIN_DEP_WEAK;
3130 else if ((REG_P (r1) && !REG_P (r2))
3131 || (!REG_P (r1) && REG_P (r2)))
3132 /* Different addressing modes - reason to be more speculative,
3133 than usual. */
3134 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3135 else
3136 /* We can't say anything about the dependence. */
3137 return UNCERTAIN_DEP_WEAK;
3138 }
3139
3140 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3141 This function can handle same INSN and ELEM (INSN == ELEM).
3142 It is a convenience wrapper. */
3143 void
3144 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3145 {
3146 ds_t ds;
3147 bool internal;
3148
3149 if (dep_type == REG_DEP_TRUE)
3150 ds = DEP_TRUE;
3151 else if (dep_type == REG_DEP_OUTPUT)
3152 ds = DEP_OUTPUT;
3153 else
3154 {
3155 gcc_assert (dep_type == REG_DEP_ANTI);
3156 ds = DEP_ANTI;
3157 }
3158
3159 /* When add_dependence is called from inside sched-deps.c, we expect
3160 cur_insn to be non-null. */
3161 internal = cur_insn != NULL;
3162 if (internal)
3163 gcc_assert (insn == cur_insn);
3164 else
3165 cur_insn = insn;
3166
3167 note_dep (elem, ds);
3168 if (!internal)
3169 cur_insn = NULL;
3170 }
3171
3172 /* Return weakness of speculative type TYPE in the dep_status DS. */
3173 dw_t
3174 get_dep_weak_1 (ds_t ds, ds_t type)
3175 {
3176 ds = ds & type;
3177
3178 switch (type)
3179 {
3180 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3181 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3182 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3183 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3184 default: gcc_unreachable ();
3185 }
3186
3187 return (dw_t) ds;
3188 }
3189
3190 dw_t
3191 get_dep_weak (ds_t ds, ds_t type)
3192 {
3193 dw_t dw = get_dep_weak_1 (ds, type);
3194
3195 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3196 return dw;
3197 }
3198
3199 /* Return the dep_status, which has the same parameters as DS, except for
3200 speculative type TYPE, that will have weakness DW. */
3201 ds_t
3202 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3203 {
3204 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3205
3206 ds &= ~type;
3207 switch (type)
3208 {
3209 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3210 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3211 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3212 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3213 default: gcc_unreachable ();
3214 }
3215 return ds;
3216 }
3217
3218 /* Return the join of two dep_statuses DS1 and DS2.
3219 If MAX_P is true then choose the greater probability,
3220 otherwise multiply probabilities.
3221 This function assumes that both DS1 and DS2 contain speculative bits. */
3222 static ds_t
3223 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3224 {
3225 ds_t ds, t;
3226
3227 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3228
3229 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3230
3231 t = FIRST_SPEC_TYPE;
3232 do
3233 {
3234 if ((ds1 & t) && !(ds2 & t))
3235 ds |= ds1 & t;
3236 else if (!(ds1 & t) && (ds2 & t))
3237 ds |= ds2 & t;
3238 else if ((ds1 & t) && (ds2 & t))
3239 {
3240 dw_t dw1 = get_dep_weak (ds1, t);
3241 dw_t dw2 = get_dep_weak (ds2, t);
3242 ds_t dw;
3243
3244 if (!max_p)
3245 {
3246 dw = ((ds_t) dw1) * ((ds_t) dw2);
3247 dw /= MAX_DEP_WEAK;
3248 if (dw < MIN_DEP_WEAK)
3249 dw = MIN_DEP_WEAK;
3250 }
3251 else
3252 {
3253 if (dw1 >= dw2)
3254 dw = dw1;
3255 else
3256 dw = dw2;
3257 }
3258
3259 ds = set_dep_weak (ds, t, (dw_t) dw);
3260 }
3261
3262 if (t == LAST_SPEC_TYPE)
3263 break;
3264 t <<= SPEC_TYPE_SHIFT;
3265 }
3266 while (1);
3267
3268 return ds;
3269 }
3270
3271 /* Return the join of two dep_statuses DS1 and DS2.
3272 This function assumes that both DS1 and DS2 contain speculative bits. */
3273 ds_t
3274 ds_merge (ds_t ds1, ds_t ds2)
3275 {
3276 return ds_merge_1 (ds1, ds2, false);
3277 }
3278
3279 /* Return the join of two dep_statuses DS1 and DS2. */
3280 ds_t
3281 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3282 {
3283 ds_t new_status = ds | ds2;
3284
3285 if (new_status & SPECULATIVE)
3286 {
3287 if ((ds && !(ds & SPECULATIVE))
3288 || (ds2 && !(ds2 & SPECULATIVE)))
3289 /* Then this dep can't be speculative. */
3290 new_status &= ~SPECULATIVE;
3291 else
3292 {
3293 /* Both are speculative. Merging probabilities. */
3294 if (mem1)
3295 {
3296 dw_t dw;
3297
3298 dw = estimate_dep_weak (mem1, mem2);
3299 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3300 }
3301
3302 if (!ds)
3303 new_status = ds2;
3304 else if (!ds2)
3305 new_status = ds;
3306 else
3307 new_status = ds_merge (ds2, ds);
3308 }
3309 }
3310
3311 return new_status;
3312 }
3313
3314 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3315 probabilities. */
3316 ds_t
3317 ds_max_merge (ds_t ds1, ds_t ds2)
3318 {
3319 if (ds1 == 0 && ds2 == 0)
3320 return 0;
3321
3322 if (ds1 == 0 && ds2 != 0)
3323 return ds2;
3324
3325 if (ds1 != 0 && ds2 == 0)
3326 return ds1;
3327
3328 return ds_merge_1 (ds1, ds2, true);
3329 }
3330
3331 /* Return the probability of speculation success for the speculation
3332 status DS. */
3333 dw_t
3334 ds_weak (ds_t ds)
3335 {
3336 ds_t res = 1, dt;
3337 int n = 0;
3338
3339 dt = FIRST_SPEC_TYPE;
3340 do
3341 {
3342 if (ds & dt)
3343 {
3344 res *= (ds_t) get_dep_weak (ds, dt);
3345 n++;
3346 }
3347
3348 if (dt == LAST_SPEC_TYPE)
3349 break;
3350 dt <<= SPEC_TYPE_SHIFT;
3351 }
3352 while (1);
3353
3354 gcc_assert (n);
3355 while (--n)
3356 res /= MAX_DEP_WEAK;
3357
3358 if (res < MIN_DEP_WEAK)
3359 res = MIN_DEP_WEAK;
3360
3361 gcc_assert (res <= MAX_DEP_WEAK);
3362
3363 return (dw_t) res;
3364 }
3365
3366 /* Return a dep status that contains all speculation types of DS. */
3367 ds_t
3368 ds_get_speculation_types (ds_t ds)
3369 {
3370 if (ds & BEGIN_DATA)
3371 ds |= BEGIN_DATA;
3372 if (ds & BE_IN_DATA)
3373 ds |= BE_IN_DATA;
3374 if (ds & BEGIN_CONTROL)
3375 ds |= BEGIN_CONTROL;
3376 if (ds & BE_IN_CONTROL)
3377 ds |= BE_IN_CONTROL;
3378
3379 return ds & SPECULATIVE;
3380 }
3381
3382 /* Return a dep status that contains maximal weakness for each speculation
3383 type present in DS. */
3384 ds_t
3385 ds_get_max_dep_weak (ds_t ds)
3386 {
3387 if (ds & BEGIN_DATA)
3388 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
3389 if (ds & BE_IN_DATA)
3390 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
3391 if (ds & BEGIN_CONTROL)
3392 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
3393 if (ds & BE_IN_CONTROL)
3394 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
3395
3396 return ds;
3397 }
3398
3399 /* Dump information about the dependence status S. */
3400 static void
3401 dump_ds (FILE *f, ds_t s)
3402 {
3403 fprintf (f, "{");
3404
3405 if (s & BEGIN_DATA)
3406 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
3407 if (s & BE_IN_DATA)
3408 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
3409 if (s & BEGIN_CONTROL)
3410 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
3411 if (s & BE_IN_CONTROL)
3412 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
3413
3414 if (s & HARD_DEP)
3415 fprintf (f, "HARD_DEP; ");
3416
3417 if (s & DEP_TRUE)
3418 fprintf (f, "DEP_TRUE; ");
3419 if (s & DEP_ANTI)
3420 fprintf (f, "DEP_ANTI; ");
3421 if (s & DEP_OUTPUT)
3422 fprintf (f, "DEP_OUTPUT; ");
3423
3424 fprintf (f, "}");
3425 }
3426
3427 void
3428 debug_ds (ds_t s)
3429 {
3430 dump_ds (stderr, s);
3431 fprintf (stderr, "\n");
3432 }
3433
3434 #ifdef ENABLE_CHECKING
3435 /* Verify that dependence type and status are consistent.
3436 If RELAXED_P is true, then skip dep_weakness checks. */
3437 static void
3438 check_dep (dep_t dep, bool relaxed_p)
3439 {
3440 enum reg_note dt = DEP_TYPE (dep);
3441 ds_t ds = DEP_STATUS (dep);
3442
3443 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
3444
3445 if (!(current_sched_info->flags & USE_DEPS_LIST))
3446 {
3447 gcc_assert (ds == -1);
3448 return;
3449 }
3450
3451 /* Check that dependence type contains the same bits as the status. */
3452 if (dt == REG_DEP_TRUE)
3453 gcc_assert (ds & DEP_TRUE);
3454 else if (dt == REG_DEP_OUTPUT)
3455 gcc_assert ((ds & DEP_OUTPUT)
3456 && !(ds & DEP_TRUE));
3457 else
3458 gcc_assert ((dt == REG_DEP_ANTI)
3459 && (ds & DEP_ANTI)
3460 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
3461
3462 /* HARD_DEP can not appear in dep_status of a link. */
3463 gcc_assert (!(ds & HARD_DEP));
3464
3465 /* Check that dependence status is set correctly when speculation is not
3466 supported. */
3467 if (!sched_deps_info->generate_spec_deps)
3468 gcc_assert (!(ds & SPECULATIVE));
3469 else if (ds & SPECULATIVE)
3470 {
3471 if (!relaxed_p)
3472 {
3473 ds_t type = FIRST_SPEC_TYPE;
3474
3475 /* Check that dependence weakness is in proper range. */
3476 do
3477 {
3478 if (ds & type)
3479 get_dep_weak (ds, type);
3480
3481 if (type == LAST_SPEC_TYPE)
3482 break;
3483 type <<= SPEC_TYPE_SHIFT;
3484 }
3485 while (1);
3486 }
3487
3488 if (ds & BEGIN_SPEC)
3489 {
3490 /* Only true dependence can be data speculative. */
3491 if (ds & BEGIN_DATA)
3492 gcc_assert (ds & DEP_TRUE);
3493
3494 /* Control dependencies in the insn scheduler are represented by
3495 anti-dependencies, therefore only anti dependence can be
3496 control speculative. */
3497 if (ds & BEGIN_CONTROL)
3498 gcc_assert (ds & DEP_ANTI);
3499 }
3500 else
3501 {
3502 /* Subsequent speculations should resolve true dependencies. */
3503 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
3504 }
3505
3506 /* Check that true and anti dependencies can't have other speculative
3507 statuses. */
3508 if (ds & DEP_TRUE)
3509 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
3510 /* An output dependence can't be speculative at all. */
3511 gcc_assert (!(ds & DEP_OUTPUT));
3512 if (ds & DEP_ANTI)
3513 gcc_assert (ds & BEGIN_CONTROL);
3514 }
3515 }
3516 #endif /* ENABLE_CHECKING */
3517
3518 #endif /* INSN_SCHEDULING */