faq.xml: Adjust link to bug database.
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
45
46 #ifdef INSN_SCHEDULING
47
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53
54 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
55 added to the list because of flush_pending_lists, stands just
56 for itself and not for any other pending memory reads/writes. */
57 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
58 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
59
60 /* Holds current parameters for the dependency analyzer. */
61 struct sched_deps_info_def *sched_deps_info;
62
63 /* The data is specific to the Haifa scheduler. */
64 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
65
66 /* Return the major type present in the DS. */
67 enum reg_note
68 ds_to_dk (ds_t ds)
69 {
70 if (ds & DEP_TRUE)
71 return REG_DEP_TRUE;
72
73 if (ds & DEP_OUTPUT)
74 return REG_DEP_OUTPUT;
75
76 gcc_assert (ds & DEP_ANTI);
77
78 return REG_DEP_ANTI;
79 }
80
81 /* Return equivalent dep_status. */
82 ds_t
83 dk_to_ds (enum reg_note dk)
84 {
85 switch (dk)
86 {
87 case REG_DEP_TRUE:
88 return DEP_TRUE;
89
90 case REG_DEP_OUTPUT:
91 return DEP_OUTPUT;
92
93 default:
94 gcc_assert (dk == REG_DEP_ANTI);
95 return DEP_ANTI;
96 }
97 }
98
99 /* Functions to operate with dependence information container - dep_t. */
100
101 /* Init DEP with the arguments. */
102 void
103 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
104 {
105 DEP_PRO (dep) = pro;
106 DEP_CON (dep) = con;
107 DEP_TYPE (dep) = type;
108 DEP_STATUS (dep) = ds;
109 }
110
111 /* Init DEP with the arguments.
112 While most of the scheduler (including targets) only need the major type
113 of the dependency, it is convenient to hide full dep_status from them. */
114 void
115 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
116 {
117 ds_t ds;
118
119 if ((current_sched_info->flags & USE_DEPS_LIST))
120 ds = dk_to_ds (kind);
121 else
122 ds = -1;
123
124 init_dep_1 (dep, pro, con, kind, ds);
125 }
126
127 /* Make a copy of FROM in TO. */
128 static void
129 copy_dep (dep_t to, dep_t from)
130 {
131 memcpy (to, from, sizeof (*to));
132 }
133
134 static void dump_ds (FILE *, ds_t);
135
136 /* Define flags for dump_dep (). */
137
138 /* Dump producer of the dependence. */
139 #define DUMP_DEP_PRO (2)
140
141 /* Dump consumer of the dependence. */
142 #define DUMP_DEP_CON (4)
143
144 /* Dump type of the dependence. */
145 #define DUMP_DEP_TYPE (8)
146
147 /* Dump status of the dependence. */
148 #define DUMP_DEP_STATUS (16)
149
150 /* Dump all information about the dependence. */
151 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
152 |DUMP_DEP_STATUS)
153
154 /* Dump DEP to DUMP.
155 FLAGS is a bit mask specifying what information about DEP needs
156 to be printed.
157 If FLAGS has the very first bit set, then dump all information about DEP
158 and propagate this bit into the callee dump functions. */
159 static void
160 dump_dep (FILE *dump, dep_t dep, int flags)
161 {
162 if (flags & 1)
163 flags |= DUMP_DEP_ALL;
164
165 fprintf (dump, "<");
166
167 if (flags & DUMP_DEP_PRO)
168 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
169
170 if (flags & DUMP_DEP_CON)
171 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
172
173 if (flags & DUMP_DEP_TYPE)
174 {
175 char t;
176 enum reg_note type = DEP_TYPE (dep);
177
178 switch (type)
179 {
180 case REG_DEP_TRUE:
181 t = 't';
182 break;
183
184 case REG_DEP_OUTPUT:
185 t = 'o';
186 break;
187
188 case REG_DEP_ANTI:
189 t = 'a';
190 break;
191
192 default:
193 gcc_unreachable ();
194 break;
195 }
196
197 fprintf (dump, "%c; ", t);
198 }
199
200 if (flags & DUMP_DEP_STATUS)
201 {
202 if (current_sched_info->flags & USE_DEPS_LIST)
203 dump_ds (dump, DEP_STATUS (dep));
204 }
205
206 fprintf (dump, ">");
207 }
208
209 /* Default flags for dump_dep (). */
210 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
211
212 /* Dump all fields of DEP to STDERR. */
213 void
214 sd_debug_dep (dep_t dep)
215 {
216 dump_dep (stderr, dep, 1);
217 fprintf (stderr, "\n");
218 }
219
220 /* Determine whether DEP is a dependency link of a non-debug insn on a
221 debug insn. */
222
223 static inline bool
224 depl_on_debug_p (dep_link_t dep)
225 {
226 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 }
229
230 /* Functions to operate with a single link from the dependencies lists -
231 dep_link_t. */
232
233 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
234 PREV_NEXT_P. */
235 static void
236 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
237 {
238 dep_link_t next = *prev_nextp;
239
240 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 && DEP_LINK_NEXT (l) == NULL);
242
243 /* Init node being inserted. */
244 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245 DEP_LINK_NEXT (l) = next;
246
247 /* Fix next node. */
248 if (next != NULL)
249 {
250 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
251
252 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 }
254
255 /* Fix prev node. */
256 *prev_nextp = l;
257 }
258
259 /* Add dep_link LINK to deps_list L. */
260 static void
261 add_to_deps_list (dep_link_t link, deps_list_t l)
262 {
263 attach_dep_link (link, &DEPS_LIST_FIRST (l));
264
265 /* Don't count debug deps. */
266 if (!depl_on_debug_p (link))
267 ++DEPS_LIST_N_LINKS (l);
268 }
269
270 /* Detach dep_link L from the list. */
271 static void
272 detach_dep_link (dep_link_t l)
273 {
274 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275 dep_link_t next = DEP_LINK_NEXT (l);
276
277 *prev_nextp = next;
278
279 if (next != NULL)
280 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
281
282 DEP_LINK_PREV_NEXTP (l) = NULL;
283 DEP_LINK_NEXT (l) = NULL;
284 }
285
286 /* Remove link LINK from list LIST. */
287 static void
288 remove_from_deps_list (dep_link_t link, deps_list_t list)
289 {
290 detach_dep_link (link);
291
292 /* Don't count debug deps. */
293 if (!depl_on_debug_p (link))
294 --DEPS_LIST_N_LINKS (list);
295 }
296
297 /* Move link LINK from list FROM to list TO. */
298 static void
299 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
300 {
301 remove_from_deps_list (link, from);
302 add_to_deps_list (link, to);
303 }
304
305 /* Return true of LINK is not attached to any list. */
306 static bool
307 dep_link_is_detached_p (dep_link_t link)
308 {
309 return DEP_LINK_PREV_NEXTP (link) == NULL;
310 }
311
312 /* Pool to hold all dependency nodes (dep_node_t). */
313 static alloc_pool dn_pool;
314
315 /* Number of dep_nodes out there. */
316 static int dn_pool_diff = 0;
317
318 /* Create a dep_node. */
319 static dep_node_t
320 create_dep_node (void)
321 {
322 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
323 dep_link_t back = DEP_NODE_BACK (n);
324 dep_link_t forw = DEP_NODE_FORW (n);
325
326 DEP_LINK_NODE (back) = n;
327 DEP_LINK_NEXT (back) = NULL;
328 DEP_LINK_PREV_NEXTP (back) = NULL;
329
330 DEP_LINK_NODE (forw) = n;
331 DEP_LINK_NEXT (forw) = NULL;
332 DEP_LINK_PREV_NEXTP (forw) = NULL;
333
334 ++dn_pool_diff;
335
336 return n;
337 }
338
339 /* Delete dep_node N. N must not be connected to any deps_list. */
340 static void
341 delete_dep_node (dep_node_t n)
342 {
343 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
345
346 --dn_pool_diff;
347
348 pool_free (dn_pool, n);
349 }
350
351 /* Pool to hold dependencies lists (deps_list_t). */
352 static alloc_pool dl_pool;
353
354 /* Number of deps_lists out there. */
355 static int dl_pool_diff = 0;
356
357 /* Functions to operate with dependences lists - deps_list_t. */
358
359 /* Return true if list L is empty. */
360 static bool
361 deps_list_empty_p (deps_list_t l)
362 {
363 return DEPS_LIST_N_LINKS (l) == 0;
364 }
365
366 /* Create a new deps_list. */
367 static deps_list_t
368 create_deps_list (void)
369 {
370 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
371
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
374
375 ++dl_pool_diff;
376 return l;
377 }
378
379 /* Free deps_list L. */
380 static void
381 free_deps_list (deps_list_t l)
382 {
383 gcc_assert (deps_list_empty_p (l));
384
385 --dl_pool_diff;
386
387 pool_free (dl_pool, l);
388 }
389
390 /* Return true if there is no dep_nodes and deps_lists out there.
391 After the region is scheduled all the dependency nodes and lists
392 should [generally] be returned to pool. */
393 bool
394 deps_pools_are_empty_p (void)
395 {
396 return dn_pool_diff == 0 && dl_pool_diff == 0;
397 }
398
399 /* Remove all elements from L. */
400 static void
401 clear_deps_list (deps_list_t l)
402 {
403 do
404 {
405 dep_link_t link = DEPS_LIST_FIRST (l);
406
407 if (link == NULL)
408 break;
409
410 remove_from_deps_list (link, l);
411 }
412 while (1);
413 }
414
415 static regset reg_pending_sets;
416 static regset reg_pending_clobbers;
417 static regset reg_pending_uses;
418 static enum reg_pending_barrier_mode reg_pending_barrier;
419
420 /* Hard registers implicitly clobbered or used (or may be implicitly
421 clobbered or used) by the currently analyzed insn. For example,
422 insn in its constraint has one register class. Even if there is
423 currently no hard register in the insn, the particular hard
424 register will be in the insn after reload pass because the
425 constraint requires it. */
426 static HARD_REG_SET implicit_reg_pending_clobbers;
427 static HARD_REG_SET implicit_reg_pending_uses;
428
429 /* To speed up the test for duplicate dependency links we keep a
430 record of dependencies created by add_dependence when the average
431 number of instructions in a basic block is very large.
432
433 Studies have shown that there is typically around 5 instructions between
434 branches for typical C code. So we can make a guess that the average
435 basic block is approximately 5 instructions long; we will choose 100X
436 the average size as a very large basic block.
437
438 Each insn has associated bitmaps for its dependencies. Each bitmap
439 has enough entries to represent a dependency on any other insn in
440 the insn chain. All bitmap for true dependencies cache is
441 allocated then the rest two ones are also allocated. */
442 static bitmap_head *true_dependency_cache = NULL;
443 static bitmap_head *output_dependency_cache = NULL;
444 static bitmap_head *anti_dependency_cache = NULL;
445 static bitmap_head *spec_dependency_cache = NULL;
446 static int cache_size;
447
448 static int deps_may_trap_p (const_rtx);
449 static void add_dependence_list (rtx, rtx, int, enum reg_note);
450 static void add_dependence_list_and_free (struct deps_desc *, rtx,
451 rtx *, int, enum reg_note);
452 static void delete_all_dependences (rtx);
453 static void fixup_sched_groups (rtx);
454
455 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
456 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
457 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
458 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
459
460 static bool sched_has_condition_p (const_rtx);
461 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
462
463 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
464 rtx, rtx);
465 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
466
467 #ifdef ENABLE_CHECKING
468 static void check_dep (dep_t, bool);
469 #endif
470 \f
471 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
472
473 static int
474 deps_may_trap_p (const_rtx mem)
475 {
476 const_rtx addr = XEXP (mem, 0);
477
478 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
479 {
480 const_rtx t = get_reg_known_value (REGNO (addr));
481 if (t)
482 addr = t;
483 }
484 return rtx_addr_can_trap_p (addr);
485 }
486 \f
487
488 /* Find the condition under which INSN is executed. If REV is not NULL,
489 it is set to TRUE when the returned comparison should be reversed
490 to get the actual condition. */
491 static rtx
492 sched_get_condition_with_rev (const_rtx insn, bool *rev)
493 {
494 rtx pat = PATTERN (insn);
495 rtx src;
496
497 if (pat == 0)
498 return 0;
499
500 if (rev)
501 *rev = false;
502
503 if (GET_CODE (pat) == COND_EXEC)
504 return COND_EXEC_TEST (pat);
505
506 if (!any_condjump_p (insn) || !onlyjump_p (insn))
507 return 0;
508
509 src = SET_SRC (pc_set (insn));
510
511 if (XEXP (src, 2) == pc_rtx)
512 return XEXP (src, 0);
513 else if (XEXP (src, 1) == pc_rtx)
514 {
515 rtx cond = XEXP (src, 0);
516 enum rtx_code revcode = reversed_comparison_code (cond, insn);
517
518 if (revcode == UNKNOWN)
519 return 0;
520
521 if (rev)
522 *rev = true;
523 return cond;
524 }
525
526 return 0;
527 }
528
529 /* True when we can find a condition under which INSN is executed. */
530 static bool
531 sched_has_condition_p (const_rtx insn)
532 {
533 return !! sched_get_condition_with_rev (insn, NULL);
534 }
535
536 \f
537
538 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
539 static int
540 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
541 {
542 if (COMPARISON_P (cond1)
543 && COMPARISON_P (cond2)
544 && GET_CODE (cond1) ==
545 (rev1==rev2
546 ? reversed_comparison_code (cond2, NULL)
547 : GET_CODE (cond2))
548 && XEXP (cond1, 0) == XEXP (cond2, 0)
549 && XEXP (cond1, 1) == XEXP (cond2, 1))
550 return 1;
551 return 0;
552 }
553
554 /* Return true if insn1 and insn2 can never depend on one another because
555 the conditions under which they are executed are mutually exclusive. */
556 bool
557 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
558 {
559 rtx cond1, cond2;
560 bool rev1 = false, rev2 = false;
561
562 /* df doesn't handle conditional lifetimes entirely correctly;
563 calls mess up the conditional lifetimes. */
564 if (!CALL_P (insn1) && !CALL_P (insn2))
565 {
566 cond1 = sched_get_condition_with_rev (insn1, &rev1);
567 cond2 = sched_get_condition_with_rev (insn2, &rev2);
568 if (cond1 && cond2
569 && conditions_mutex_p (cond1, cond2, rev1, rev2)
570 /* Make sure first instruction doesn't affect condition of second
571 instruction if switched. */
572 && !modified_in_p (cond1, insn2)
573 /* Make sure second instruction doesn't affect condition of first
574 instruction if switched. */
575 && !modified_in_p (cond2, insn1))
576 return true;
577 }
578 return false;
579 }
580 \f
581
582 /* Return true if INSN can potentially be speculated with type DS. */
583 bool
584 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
585 {
586 if (HAS_INTERNAL_DEP (insn))
587 return false;
588
589 if (!NONJUMP_INSN_P (insn))
590 return false;
591
592 if (SCHED_GROUP_P (insn))
593 return false;
594
595 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
596 return false;
597
598 if (side_effects_p (PATTERN (insn)))
599 return false;
600
601 if (ds & BE_IN_SPEC)
602 /* The following instructions, which depend on a speculatively scheduled
603 instruction, cannot be speculatively scheduled along. */
604 {
605 if (may_trap_or_fault_p (PATTERN (insn)))
606 /* If instruction might fault, it cannot be speculatively scheduled.
607 For control speculation it's obvious why and for data speculation
608 it's because the insn might get wrong input if speculation
609 wasn't successful. */
610 return false;
611
612 if ((ds & BE_IN_DATA)
613 && sched_has_condition_p (insn))
614 /* If this is a predicated instruction, then it cannot be
615 speculatively scheduled. See PR35659. */
616 return false;
617 }
618
619 return true;
620 }
621
622 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
623 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
624 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
625 This function is used to switch sd_iterator to the next list.
626 !!! For internal use only. Might consider moving it to sched-int.h. */
627 void
628 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
629 deps_list_t *list_ptr, bool *resolved_p_ptr)
630 {
631 sd_list_types_def types = *types_ptr;
632
633 if (types & SD_LIST_HARD_BACK)
634 {
635 *list_ptr = INSN_HARD_BACK_DEPS (insn);
636 *resolved_p_ptr = false;
637 *types_ptr = types & ~SD_LIST_HARD_BACK;
638 }
639 else if (types & SD_LIST_SPEC_BACK)
640 {
641 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
642 *resolved_p_ptr = false;
643 *types_ptr = types & ~SD_LIST_SPEC_BACK;
644 }
645 else if (types & SD_LIST_FORW)
646 {
647 *list_ptr = INSN_FORW_DEPS (insn);
648 *resolved_p_ptr = false;
649 *types_ptr = types & ~SD_LIST_FORW;
650 }
651 else if (types & SD_LIST_RES_BACK)
652 {
653 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
654 *resolved_p_ptr = true;
655 *types_ptr = types & ~SD_LIST_RES_BACK;
656 }
657 else if (types & SD_LIST_RES_FORW)
658 {
659 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
660 *resolved_p_ptr = true;
661 *types_ptr = types & ~SD_LIST_RES_FORW;
662 }
663 else
664 {
665 *list_ptr = NULL;
666 *resolved_p_ptr = false;
667 *types_ptr = SD_LIST_NONE;
668 }
669 }
670
671 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
672 int
673 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
674 {
675 int size = 0;
676
677 while (list_types != SD_LIST_NONE)
678 {
679 deps_list_t list;
680 bool resolved_p;
681
682 sd_next_list (insn, &list_types, &list, &resolved_p);
683 if (list)
684 size += DEPS_LIST_N_LINKS (list);
685 }
686
687 return size;
688 }
689
690 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
691
692 bool
693 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
694 {
695 while (list_types != SD_LIST_NONE)
696 {
697 deps_list_t list;
698 bool resolved_p;
699
700 sd_next_list (insn, &list_types, &list, &resolved_p);
701 if (!deps_list_empty_p (list))
702 return false;
703 }
704
705 return true;
706 }
707
708 /* Initialize data for INSN. */
709 void
710 sd_init_insn (rtx insn)
711 {
712 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
713 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
714 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
715 INSN_FORW_DEPS (insn) = create_deps_list ();
716 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
717
718 /* ??? It would be nice to allocate dependency caches here. */
719 }
720
721 /* Free data for INSN. */
722 void
723 sd_finish_insn (rtx insn)
724 {
725 /* ??? It would be nice to deallocate dependency caches here. */
726
727 free_deps_list (INSN_HARD_BACK_DEPS (insn));
728 INSN_HARD_BACK_DEPS (insn) = NULL;
729
730 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
731 INSN_SPEC_BACK_DEPS (insn) = NULL;
732
733 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
734 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
735
736 free_deps_list (INSN_FORW_DEPS (insn));
737 INSN_FORW_DEPS (insn) = NULL;
738
739 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
740 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
741 }
742
743 /* Find a dependency between producer PRO and consumer CON.
744 Search through resolved dependency lists if RESOLVED_P is true.
745 If no such dependency is found return NULL,
746 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
747 with an iterator pointing to it. */
748 static dep_t
749 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
750 sd_iterator_def *sd_it_ptr)
751 {
752 sd_list_types_def pro_list_type;
753 sd_list_types_def con_list_type;
754 sd_iterator_def sd_it;
755 dep_t dep;
756 bool found_p = false;
757
758 if (resolved_p)
759 {
760 pro_list_type = SD_LIST_RES_FORW;
761 con_list_type = SD_LIST_RES_BACK;
762 }
763 else
764 {
765 pro_list_type = SD_LIST_FORW;
766 con_list_type = SD_LIST_BACK;
767 }
768
769 /* Walk through either back list of INSN or forw list of ELEM
770 depending on which one is shorter. */
771 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
772 {
773 /* Find the dep_link with producer PRO in consumer's back_deps. */
774 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
775 if (DEP_PRO (dep) == pro)
776 {
777 found_p = true;
778 break;
779 }
780 }
781 else
782 {
783 /* Find the dep_link with consumer CON in producer's forw_deps. */
784 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
785 if (DEP_CON (dep) == con)
786 {
787 found_p = true;
788 break;
789 }
790 }
791
792 if (found_p)
793 {
794 if (sd_it_ptr != NULL)
795 *sd_it_ptr = sd_it;
796
797 return dep;
798 }
799
800 return NULL;
801 }
802
803 /* Find a dependency between producer PRO and consumer CON.
804 Use dependency [if available] to check if dependency is present at all.
805 Search through resolved dependency lists if RESOLVED_P is true.
806 If the dependency or NULL if none found. */
807 dep_t
808 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
809 {
810 if (true_dependency_cache != NULL)
811 /* Avoiding the list walk below can cut compile times dramatically
812 for some code. */
813 {
814 int elem_luid = INSN_LUID (pro);
815 int insn_luid = INSN_LUID (con);
816
817 gcc_assert (output_dependency_cache != NULL
818 && anti_dependency_cache != NULL);
819
820 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
821 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
822 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
823 return NULL;
824 }
825
826 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
827 }
828
829 /* Add or update a dependence described by DEP.
830 MEM1 and MEM2, if non-null, correspond to memory locations in case of
831 data speculation.
832
833 The function returns a value indicating if an old entry has been changed
834 or a new entry has been added to insn's backward deps.
835
836 This function merely checks if producer and consumer is the same insn
837 and doesn't create a dep in this case. Actual manipulation of
838 dependence data structures is performed in add_or_update_dep_1. */
839 static enum DEPS_ADJUST_RESULT
840 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
841 {
842 rtx elem = DEP_PRO (dep);
843 rtx insn = DEP_CON (dep);
844
845 gcc_assert (INSN_P (insn) && INSN_P (elem));
846
847 /* Don't depend an insn on itself. */
848 if (insn == elem)
849 {
850 if (sched_deps_info->generate_spec_deps)
851 /* INSN has an internal dependence, which we can't overcome. */
852 HAS_INTERNAL_DEP (insn) = 1;
853
854 return DEP_NODEP;
855 }
856
857 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
858 }
859
860 /* Ask dependency caches what needs to be done for dependence DEP.
861 Return DEP_CREATED if new dependence should be created and there is no
862 need to try to find one searching the dependencies lists.
863 Return DEP_PRESENT if there already is a dependence described by DEP and
864 hence nothing is to be done.
865 Return DEP_CHANGED if there already is a dependence, but it should be
866 updated to incorporate additional information from DEP. */
867 static enum DEPS_ADJUST_RESULT
868 ask_dependency_caches (dep_t dep)
869 {
870 int elem_luid = INSN_LUID (DEP_PRO (dep));
871 int insn_luid = INSN_LUID (DEP_CON (dep));
872
873 gcc_assert (true_dependency_cache != NULL
874 && output_dependency_cache != NULL
875 && anti_dependency_cache != NULL);
876
877 if (!(current_sched_info->flags & USE_DEPS_LIST))
878 {
879 enum reg_note present_dep_type;
880
881 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
882 present_dep_type = REG_DEP_TRUE;
883 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
884 present_dep_type = REG_DEP_OUTPUT;
885 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
886 present_dep_type = REG_DEP_ANTI;
887 else
888 /* There is no existing dep so it should be created. */
889 return DEP_CREATED;
890
891 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
892 /* DEP does not add anything to the existing dependence. */
893 return DEP_PRESENT;
894 }
895 else
896 {
897 ds_t present_dep_types = 0;
898
899 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
900 present_dep_types |= DEP_TRUE;
901 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
902 present_dep_types |= DEP_OUTPUT;
903 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
904 present_dep_types |= DEP_ANTI;
905
906 if (present_dep_types == 0)
907 /* There is no existing dep so it should be created. */
908 return DEP_CREATED;
909
910 if (!(current_sched_info->flags & DO_SPECULATION)
911 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
912 {
913 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
914 == present_dep_types)
915 /* DEP does not add anything to the existing dependence. */
916 return DEP_PRESENT;
917 }
918 else
919 {
920 /* Only true dependencies can be data speculative and
921 only anti dependencies can be control speculative. */
922 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
923 == present_dep_types);
924
925 /* if (DEP is SPECULATIVE) then
926 ..we should update DEP_STATUS
927 else
928 ..we should reset existing dep to non-speculative. */
929 }
930 }
931
932 return DEP_CHANGED;
933 }
934
935 /* Set dependency caches according to DEP. */
936 static void
937 set_dependency_caches (dep_t dep)
938 {
939 int elem_luid = INSN_LUID (DEP_PRO (dep));
940 int insn_luid = INSN_LUID (DEP_CON (dep));
941
942 if (!(current_sched_info->flags & USE_DEPS_LIST))
943 {
944 switch (DEP_TYPE (dep))
945 {
946 case REG_DEP_TRUE:
947 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
948 break;
949
950 case REG_DEP_OUTPUT:
951 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
952 break;
953
954 case REG_DEP_ANTI:
955 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
956 break;
957
958 default:
959 gcc_unreachable ();
960 }
961 }
962 else
963 {
964 ds_t ds = DEP_STATUS (dep);
965
966 if (ds & DEP_TRUE)
967 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
968 if (ds & DEP_OUTPUT)
969 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
970 if (ds & DEP_ANTI)
971 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
972
973 if (ds & SPECULATIVE)
974 {
975 gcc_assert (current_sched_info->flags & DO_SPECULATION);
976 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
977 }
978 }
979 }
980
981 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
982 caches accordingly. */
983 static void
984 update_dependency_caches (dep_t dep, enum reg_note old_type)
985 {
986 int elem_luid = INSN_LUID (DEP_PRO (dep));
987 int insn_luid = INSN_LUID (DEP_CON (dep));
988
989 /* Clear corresponding cache entry because type of the link
990 may have changed. Keep them if we use_deps_list. */
991 if (!(current_sched_info->flags & USE_DEPS_LIST))
992 {
993 switch (old_type)
994 {
995 case REG_DEP_OUTPUT:
996 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
997 break;
998
999 case REG_DEP_ANTI:
1000 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1001 break;
1002
1003 default:
1004 gcc_unreachable ();
1005 }
1006 }
1007
1008 set_dependency_caches (dep);
1009 }
1010
1011 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1012 static void
1013 change_spec_dep_to_hard (sd_iterator_def sd_it)
1014 {
1015 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1016 dep_link_t link = DEP_NODE_BACK (node);
1017 dep_t dep = DEP_NODE_DEP (node);
1018 rtx elem = DEP_PRO (dep);
1019 rtx insn = DEP_CON (dep);
1020
1021 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1022
1023 DEP_STATUS (dep) &= ~SPECULATIVE;
1024
1025 if (true_dependency_cache != NULL)
1026 /* Clear the cache entry. */
1027 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1028 INSN_LUID (elem));
1029 }
1030
1031 /* Update DEP to incorporate information from NEW_DEP.
1032 SD_IT points to DEP in case it should be moved to another list.
1033 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1034 data-speculative dependence should be updated. */
1035 static enum DEPS_ADJUST_RESULT
1036 update_dep (dep_t dep, dep_t new_dep,
1037 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1038 rtx mem1 ATTRIBUTE_UNUSED,
1039 rtx mem2 ATTRIBUTE_UNUSED)
1040 {
1041 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1042 enum reg_note old_type = DEP_TYPE (dep);
1043
1044 /* If this is a more restrictive type of dependence than the
1045 existing one, then change the existing dependence to this
1046 type. */
1047 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1048 {
1049 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1050 res = DEP_CHANGED;
1051 }
1052
1053 if (current_sched_info->flags & USE_DEPS_LIST)
1054 /* Update DEP_STATUS. */
1055 {
1056 ds_t dep_status = DEP_STATUS (dep);
1057 ds_t ds = DEP_STATUS (new_dep);
1058 ds_t new_status = ds | dep_status;
1059
1060 if (new_status & SPECULATIVE)
1061 /* Either existing dep or a dep we're adding or both are
1062 speculative. */
1063 {
1064 if (!(ds & SPECULATIVE)
1065 || !(dep_status & SPECULATIVE))
1066 /* The new dep can't be speculative. */
1067 {
1068 new_status &= ~SPECULATIVE;
1069
1070 if (dep_status & SPECULATIVE)
1071 /* The old dep was speculative, but now it
1072 isn't. */
1073 change_spec_dep_to_hard (sd_it);
1074 }
1075 else
1076 {
1077 /* Both are speculative. Merge probabilities. */
1078 if (mem1 != NULL)
1079 {
1080 dw_t dw;
1081
1082 dw = estimate_dep_weak (mem1, mem2);
1083 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1084 }
1085
1086 new_status = ds_merge (dep_status, ds);
1087 }
1088 }
1089
1090 ds = new_status;
1091
1092 if (dep_status != ds)
1093 {
1094 DEP_STATUS (dep) = ds;
1095 res = DEP_CHANGED;
1096 }
1097 }
1098
1099 if (true_dependency_cache != NULL
1100 && res == DEP_CHANGED)
1101 update_dependency_caches (dep, old_type);
1102
1103 return res;
1104 }
1105
1106 /* Add or update a dependence described by DEP.
1107 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1108 data speculation.
1109
1110 The function returns a value indicating if an old entry has been changed
1111 or a new entry has been added to insn's backward deps or nothing has
1112 been updated at all. */
1113 static enum DEPS_ADJUST_RESULT
1114 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1115 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1116 {
1117 bool maybe_present_p = true;
1118 bool present_p = false;
1119
1120 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1121 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1122
1123 #ifdef ENABLE_CHECKING
1124 check_dep (new_dep, mem1 != NULL);
1125 #endif
1126
1127 if (true_dependency_cache != NULL)
1128 {
1129 switch (ask_dependency_caches (new_dep))
1130 {
1131 case DEP_PRESENT:
1132 return DEP_PRESENT;
1133
1134 case DEP_CHANGED:
1135 maybe_present_p = true;
1136 present_p = true;
1137 break;
1138
1139 case DEP_CREATED:
1140 maybe_present_p = false;
1141 present_p = false;
1142 break;
1143
1144 default:
1145 gcc_unreachable ();
1146 break;
1147 }
1148 }
1149
1150 /* Check that we don't already have this dependence. */
1151 if (maybe_present_p)
1152 {
1153 dep_t present_dep;
1154 sd_iterator_def sd_it;
1155
1156 gcc_assert (true_dependency_cache == NULL || present_p);
1157
1158 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1159 DEP_CON (new_dep),
1160 resolved_p, &sd_it);
1161
1162 if (present_dep != NULL)
1163 /* We found an existing dependency between ELEM and INSN. */
1164 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1165 else
1166 /* We didn't find a dep, it shouldn't present in the cache. */
1167 gcc_assert (!present_p);
1168 }
1169
1170 /* Might want to check one level of transitivity to save conses.
1171 This check should be done in maybe_add_or_update_dep_1.
1172 Since we made it to add_or_update_dep_1, we must create
1173 (or update) a link. */
1174
1175 if (mem1 != NULL_RTX)
1176 {
1177 gcc_assert (sched_deps_info->generate_spec_deps);
1178 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1179 estimate_dep_weak (mem1, mem2));
1180 }
1181
1182 sd_add_dep (new_dep, resolved_p);
1183
1184 return DEP_CREATED;
1185 }
1186
1187 /* Initialize BACK_LIST_PTR with consumer's backward list and
1188 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1189 initialize with lists that hold resolved deps. */
1190 static void
1191 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1192 deps_list_t *back_list_ptr,
1193 deps_list_t *forw_list_ptr)
1194 {
1195 rtx con = DEP_CON (dep);
1196
1197 if (!resolved_p)
1198 {
1199 if ((current_sched_info->flags & DO_SPECULATION)
1200 && (DEP_STATUS (dep) & SPECULATIVE))
1201 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1202 else
1203 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1204
1205 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1206 }
1207 else
1208 {
1209 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1210 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1211 }
1212 }
1213
1214 /* Add dependence described by DEP.
1215 If RESOLVED_P is true treat the dependence as a resolved one. */
1216 void
1217 sd_add_dep (dep_t dep, bool resolved_p)
1218 {
1219 dep_node_t n = create_dep_node ();
1220 deps_list_t con_back_deps;
1221 deps_list_t pro_forw_deps;
1222 rtx elem = DEP_PRO (dep);
1223 rtx insn = DEP_CON (dep);
1224
1225 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1226
1227 if ((current_sched_info->flags & DO_SPECULATION)
1228 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1229 DEP_STATUS (dep) &= ~SPECULATIVE;
1230
1231 copy_dep (DEP_NODE_DEP (n), dep);
1232
1233 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1234
1235 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1236
1237 #ifdef ENABLE_CHECKING
1238 check_dep (dep, false);
1239 #endif
1240
1241 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1242
1243 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1244 in the bitmap caches of dependency information. */
1245 if (true_dependency_cache != NULL)
1246 set_dependency_caches (dep);
1247 }
1248
1249 /* Add or update backward dependence between INSN and ELEM
1250 with given type DEP_TYPE and dep_status DS.
1251 This function is a convenience wrapper. */
1252 enum DEPS_ADJUST_RESULT
1253 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1254 {
1255 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1256 }
1257
1258 /* Resolved dependence pointed to by SD_IT.
1259 SD_IT will advance to the next element. */
1260 void
1261 sd_resolve_dep (sd_iterator_def sd_it)
1262 {
1263 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1264 dep_t dep = DEP_NODE_DEP (node);
1265 rtx pro = DEP_PRO (dep);
1266 rtx con = DEP_CON (dep);
1267
1268 if ((current_sched_info->flags & DO_SPECULATION)
1269 && (DEP_STATUS (dep) & SPECULATIVE))
1270 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1271 INSN_RESOLVED_BACK_DEPS (con));
1272 else
1273 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1274 INSN_RESOLVED_BACK_DEPS (con));
1275
1276 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1277 INSN_RESOLVED_FORW_DEPS (pro));
1278 }
1279
1280 /* Make TO depend on all the FROM's producers.
1281 If RESOLVED_P is true add dependencies to the resolved lists. */
1282 void
1283 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1284 {
1285 sd_list_types_def list_type;
1286 sd_iterator_def sd_it;
1287 dep_t dep;
1288
1289 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1290
1291 FOR_EACH_DEP (from, list_type, sd_it, dep)
1292 {
1293 dep_def _new_dep, *new_dep = &_new_dep;
1294
1295 copy_dep (new_dep, dep);
1296 DEP_CON (new_dep) = to;
1297 sd_add_dep (new_dep, resolved_p);
1298 }
1299 }
1300
1301 /* Remove a dependency referred to by SD_IT.
1302 SD_IT will point to the next dependence after removal. */
1303 void
1304 sd_delete_dep (sd_iterator_def sd_it)
1305 {
1306 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1307 dep_t dep = DEP_NODE_DEP (n);
1308 rtx pro = DEP_PRO (dep);
1309 rtx con = DEP_CON (dep);
1310 deps_list_t con_back_deps;
1311 deps_list_t pro_forw_deps;
1312
1313 if (true_dependency_cache != NULL)
1314 {
1315 int elem_luid = INSN_LUID (pro);
1316 int insn_luid = INSN_LUID (con);
1317
1318 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1319 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1320 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1321
1322 if (current_sched_info->flags & DO_SPECULATION)
1323 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1324 }
1325
1326 get_back_and_forw_lists (dep, sd_it.resolved_p,
1327 &con_back_deps, &pro_forw_deps);
1328
1329 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1330 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1331
1332 delete_dep_node (n);
1333 }
1334
1335 /* Dump size of the lists. */
1336 #define DUMP_LISTS_SIZE (2)
1337
1338 /* Dump dependencies of the lists. */
1339 #define DUMP_LISTS_DEPS (4)
1340
1341 /* Dump all information about the lists. */
1342 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1343
1344 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1345 FLAGS is a bit mask specifying what information about the lists needs
1346 to be printed.
1347 If FLAGS has the very first bit set, then dump all information about
1348 the lists and propagate this bit into the callee dump functions. */
1349 static void
1350 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1351 {
1352 sd_iterator_def sd_it;
1353 dep_t dep;
1354 int all;
1355
1356 all = (flags & 1);
1357
1358 if (all)
1359 flags |= DUMP_LISTS_ALL;
1360
1361 fprintf (dump, "[");
1362
1363 if (flags & DUMP_LISTS_SIZE)
1364 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1365
1366 if (flags & DUMP_LISTS_DEPS)
1367 {
1368 FOR_EACH_DEP (insn, types, sd_it, dep)
1369 {
1370 dump_dep (dump, dep, dump_dep_flags | all);
1371 fprintf (dump, " ");
1372 }
1373 }
1374 }
1375
1376 /* Dump all information about deps_lists of INSN specified by TYPES
1377 to STDERR. */
1378 void
1379 sd_debug_lists (rtx insn, sd_list_types_def types)
1380 {
1381 dump_lists (stderr, insn, types, 1);
1382 fprintf (stderr, "\n");
1383 }
1384
1385 /* A convenience wrapper to operate on an entire list. */
1386
1387 static void
1388 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1389 {
1390 for (; list; list = XEXP (list, 1))
1391 {
1392 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1393 add_dependence (insn, XEXP (list, 0), dep_type);
1394 }
1395 }
1396
1397 /* Similar, but free *LISTP at the same time, when the context
1398 is not readonly. */
1399
1400 static void
1401 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1402 int uncond, enum reg_note dep_type)
1403 {
1404 rtx list, next;
1405
1406 /* We don't want to short-circuit dependencies involving debug
1407 insns, because they may cause actual dependencies to be
1408 disregarded. */
1409 if (deps->readonly || DEBUG_INSN_P (insn))
1410 {
1411 add_dependence_list (insn, *listp, uncond, dep_type);
1412 return;
1413 }
1414
1415 for (list = *listp, *listp = NULL; list ; list = next)
1416 {
1417 next = XEXP (list, 1);
1418 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1419 add_dependence (insn, XEXP (list, 0), dep_type);
1420 free_INSN_LIST_node (list);
1421 }
1422 }
1423
1424 /* Remove all occurences of INSN from LIST. Return the number of
1425 occurences removed. */
1426
1427 static int
1428 remove_from_dependence_list (rtx insn, rtx* listp)
1429 {
1430 int removed = 0;
1431
1432 while (*listp)
1433 {
1434 if (XEXP (*listp, 0) == insn)
1435 {
1436 remove_free_INSN_LIST_node (listp);
1437 removed++;
1438 continue;
1439 }
1440
1441 listp = &XEXP (*listp, 1);
1442 }
1443
1444 return removed;
1445 }
1446
1447 /* Same as above, but process two lists at once. */
1448 static int
1449 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1450 {
1451 int removed = 0;
1452
1453 while (*listp)
1454 {
1455 if (XEXP (*listp, 0) == insn)
1456 {
1457 remove_free_INSN_LIST_node (listp);
1458 remove_free_EXPR_LIST_node (exprp);
1459 removed++;
1460 continue;
1461 }
1462
1463 listp = &XEXP (*listp, 1);
1464 exprp = &XEXP (*exprp, 1);
1465 }
1466
1467 return removed;
1468 }
1469
1470 /* Clear all dependencies for an insn. */
1471 static void
1472 delete_all_dependences (rtx insn)
1473 {
1474 sd_iterator_def sd_it;
1475 dep_t dep;
1476
1477 /* The below cycle can be optimized to clear the caches and back_deps
1478 in one call but that would provoke duplication of code from
1479 delete_dep (). */
1480
1481 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1482 sd_iterator_cond (&sd_it, &dep);)
1483 sd_delete_dep (sd_it);
1484 }
1485
1486 /* All insns in a scheduling group except the first should only have
1487 dependencies on the previous insn in the group. So we find the
1488 first instruction in the scheduling group by walking the dependence
1489 chains backwards. Then we add the dependencies for the group to
1490 the previous nonnote insn. */
1491
1492 static void
1493 fixup_sched_groups (rtx insn)
1494 {
1495 sd_iterator_def sd_it;
1496 dep_t dep;
1497 rtx prev_nonnote;
1498
1499 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1500 {
1501 rtx i = insn;
1502 rtx pro = DEP_PRO (dep);
1503
1504 do
1505 {
1506 i = prev_nonnote_insn (i);
1507
1508 if (pro == i)
1509 goto next_link;
1510 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1511
1512 if (! sched_insns_conditions_mutex_p (i, pro))
1513 add_dependence (i, pro, DEP_TYPE (dep));
1514 next_link:;
1515 }
1516
1517 delete_all_dependences (insn);
1518
1519 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1520 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1521 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1522 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1523 }
1524 \f
1525 /* Process an insn's memory dependencies. There are four kinds of
1526 dependencies:
1527
1528 (0) read dependence: read follows read
1529 (1) true dependence: read follows write
1530 (2) output dependence: write follows write
1531 (3) anti dependence: write follows read
1532
1533 We are careful to build only dependencies which actually exist, and
1534 use transitivity to avoid building too many links. */
1535
1536 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1537 The MEM is a memory reference contained within INSN, which we are saving
1538 so that we can do memory aliasing on it. */
1539
1540 static void
1541 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1542 rtx insn, rtx mem)
1543 {
1544 rtx *insn_list;
1545 rtx *mem_list;
1546 rtx link;
1547
1548 gcc_assert (!deps->readonly);
1549 if (read_p)
1550 {
1551 insn_list = &deps->pending_read_insns;
1552 mem_list = &deps->pending_read_mems;
1553 if (!DEBUG_INSN_P (insn))
1554 deps->pending_read_list_length++;
1555 }
1556 else
1557 {
1558 insn_list = &deps->pending_write_insns;
1559 mem_list = &deps->pending_write_mems;
1560 deps->pending_write_list_length++;
1561 }
1562
1563 link = alloc_INSN_LIST (insn, *insn_list);
1564 *insn_list = link;
1565
1566 if (sched_deps_info->use_cselib)
1567 {
1568 mem = shallow_copy_rtx (mem);
1569 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
1570 }
1571 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1572 *mem_list = link;
1573 }
1574
1575 /* Make a dependency between every memory reference on the pending lists
1576 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1577 dependencies for a read operation, similarly with FOR_WRITE. */
1578
1579 static void
1580 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1581 int for_write)
1582 {
1583 if (for_write)
1584 {
1585 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1586 1, REG_DEP_ANTI);
1587 if (!deps->readonly)
1588 {
1589 free_EXPR_LIST_list (&deps->pending_read_mems);
1590 deps->pending_read_list_length = 0;
1591 }
1592 }
1593
1594 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1595 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1596
1597 add_dependence_list_and_free (deps, insn,
1598 &deps->last_pending_memory_flush, 1,
1599 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1600 if (!deps->readonly)
1601 {
1602 free_EXPR_LIST_list (&deps->pending_write_mems);
1603 deps->pending_write_list_length = 0;
1604
1605 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1606 deps->pending_flush_length = 1;
1607 }
1608 }
1609 \f
1610 /* Instruction which dependencies we are analyzing. */
1611 static rtx cur_insn = NULL_RTX;
1612
1613 /* Implement hooks for haifa scheduler. */
1614
1615 static void
1616 haifa_start_insn (rtx insn)
1617 {
1618 gcc_assert (insn && !cur_insn);
1619
1620 cur_insn = insn;
1621 }
1622
1623 static void
1624 haifa_finish_insn (void)
1625 {
1626 cur_insn = NULL;
1627 }
1628
1629 void
1630 haifa_note_reg_set (int regno)
1631 {
1632 SET_REGNO_REG_SET (reg_pending_sets, regno);
1633 }
1634
1635 void
1636 haifa_note_reg_clobber (int regno)
1637 {
1638 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1639 }
1640
1641 void
1642 haifa_note_reg_use (int regno)
1643 {
1644 SET_REGNO_REG_SET (reg_pending_uses, regno);
1645 }
1646
1647 static void
1648 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1649 {
1650 if (!(ds & SPECULATIVE))
1651 {
1652 mem = NULL_RTX;
1653 pending_mem = NULL_RTX;
1654 }
1655 else
1656 gcc_assert (ds & BEGIN_DATA);
1657
1658 {
1659 dep_def _dep, *dep = &_dep;
1660
1661 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1662 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1663 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1664 }
1665
1666 }
1667
1668 static void
1669 haifa_note_dep (rtx elem, ds_t ds)
1670 {
1671 dep_def _dep;
1672 dep_t dep = &_dep;
1673
1674 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1675 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1676 }
1677
1678 static void
1679 note_reg_use (int r)
1680 {
1681 if (sched_deps_info->note_reg_use)
1682 sched_deps_info->note_reg_use (r);
1683 }
1684
1685 static void
1686 note_reg_set (int r)
1687 {
1688 if (sched_deps_info->note_reg_set)
1689 sched_deps_info->note_reg_set (r);
1690 }
1691
1692 static void
1693 note_reg_clobber (int r)
1694 {
1695 if (sched_deps_info->note_reg_clobber)
1696 sched_deps_info->note_reg_clobber (r);
1697 }
1698
1699 static void
1700 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1701 {
1702 if (sched_deps_info->note_mem_dep)
1703 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1704 }
1705
1706 static void
1707 note_dep (rtx e, ds_t ds)
1708 {
1709 if (sched_deps_info->note_dep)
1710 sched_deps_info->note_dep (e, ds);
1711 }
1712
1713 /* Return corresponding to DS reg_note. */
1714 enum reg_note
1715 ds_to_dt (ds_t ds)
1716 {
1717 if (ds & DEP_TRUE)
1718 return REG_DEP_TRUE;
1719 else if (ds & DEP_OUTPUT)
1720 return REG_DEP_OUTPUT;
1721 else
1722 {
1723 gcc_assert (ds & DEP_ANTI);
1724 return REG_DEP_ANTI;
1725 }
1726 }
1727
1728 \f
1729
1730 /* Functions for computation of info needed for register pressure
1731 sensitive insn scheduling. */
1732
1733
1734 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1735 static struct reg_use_data *
1736 create_insn_reg_use (int regno, rtx insn)
1737 {
1738 struct reg_use_data *use;
1739
1740 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1741 use->regno = regno;
1742 use->insn = insn;
1743 use->next_insn_use = INSN_REG_USE_LIST (insn);
1744 INSN_REG_USE_LIST (insn) = use;
1745 return use;
1746 }
1747
1748 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1749 static struct reg_set_data *
1750 create_insn_reg_set (int regno, rtx insn)
1751 {
1752 struct reg_set_data *set;
1753
1754 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1755 set->regno = regno;
1756 set->insn = insn;
1757 set->next_insn_set = INSN_REG_SET_LIST (insn);
1758 INSN_REG_SET_LIST (insn) = set;
1759 return set;
1760 }
1761
1762 /* Set up insn register uses for INSN and dependency context DEPS. */
1763 static void
1764 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1765 {
1766 unsigned i;
1767 reg_set_iterator rsi;
1768 rtx list;
1769 struct reg_use_data *use, *use2, *next;
1770 struct deps_reg *reg_last;
1771
1772 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1773 {
1774 if (i < FIRST_PSEUDO_REGISTER
1775 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1776 continue;
1777
1778 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1779 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1780 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1781 /* Ignore use which is not dying. */
1782 continue;
1783
1784 use = create_insn_reg_use (i, insn);
1785 use->next_regno_use = use;
1786 reg_last = &deps->reg_last[i];
1787
1788 /* Create the cycle list of uses. */
1789 for (list = reg_last->uses; list; list = XEXP (list, 1))
1790 {
1791 use2 = create_insn_reg_use (i, XEXP (list, 0));
1792 next = use->next_regno_use;
1793 use->next_regno_use = use2;
1794 use2->next_regno_use = next;
1795 }
1796 }
1797 }
1798
1799 /* Register pressure info for the currently processed insn. */
1800 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1801
1802 /* Return TRUE if INSN has the use structure for REGNO. */
1803 static bool
1804 insn_use_p (rtx insn, int regno)
1805 {
1806 struct reg_use_data *use;
1807
1808 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1809 if (use->regno == regno)
1810 return true;
1811 return false;
1812 }
1813
1814 /* Update the register pressure info after birth of pseudo register REGNO
1815 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1816 the register is in clobber or unused after the insn. */
1817 static void
1818 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1819 {
1820 int incr, new_incr;
1821 enum reg_class cl;
1822
1823 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1824 cl = sched_regno_cover_class[regno];
1825 if (cl != NO_REGS)
1826 {
1827 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1828 if (clobber_p)
1829 {
1830 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1831 reg_pressure_info[cl].clobber_increase = new_incr;
1832 }
1833 else if (unused_p)
1834 {
1835 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1836 reg_pressure_info[cl].unused_set_increase = new_incr;
1837 }
1838 else
1839 {
1840 new_incr = reg_pressure_info[cl].set_increase + incr;
1841 reg_pressure_info[cl].set_increase = new_incr;
1842 if (! insn_use_p (insn, regno))
1843 reg_pressure_info[cl].change += incr;
1844 create_insn_reg_set (regno, insn);
1845 }
1846 gcc_assert (new_incr < (1 << INCREASE_BITS));
1847 }
1848 }
1849
1850 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1851 hard registers involved in the birth. */
1852 static void
1853 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1854 bool clobber_p, bool unused_p)
1855 {
1856 enum reg_class cl;
1857 int new_incr, last = regno + nregs;
1858
1859 while (regno < last)
1860 {
1861 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1862 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1863 {
1864 cl = sched_regno_cover_class[regno];
1865 if (cl != NO_REGS)
1866 {
1867 if (clobber_p)
1868 {
1869 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1870 reg_pressure_info[cl].clobber_increase = new_incr;
1871 }
1872 else if (unused_p)
1873 {
1874 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1875 reg_pressure_info[cl].unused_set_increase = new_incr;
1876 }
1877 else
1878 {
1879 new_incr = reg_pressure_info[cl].set_increase + 1;
1880 reg_pressure_info[cl].set_increase = new_incr;
1881 if (! insn_use_p (insn, regno))
1882 reg_pressure_info[cl].change += 1;
1883 create_insn_reg_set (regno, insn);
1884 }
1885 gcc_assert (new_incr < (1 << INCREASE_BITS));
1886 }
1887 }
1888 regno++;
1889 }
1890 }
1891
1892 /* Update the register pressure info after birth of pseudo or hard
1893 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1894 correspondingly that the register is in clobber or unused after the
1895 insn. */
1896 static void
1897 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1898 {
1899 int regno;
1900
1901 if (GET_CODE (reg) == SUBREG)
1902 reg = SUBREG_REG (reg);
1903
1904 if (! REG_P (reg))
1905 return;
1906
1907 regno = REGNO (reg);
1908 if (regno < FIRST_PSEUDO_REGISTER)
1909 mark_insn_hard_regno_birth (insn, regno,
1910 hard_regno_nregs[regno][GET_MODE (reg)],
1911 clobber_p, unused_p);
1912 else
1913 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1914 }
1915
1916 /* Update the register pressure info after death of pseudo register
1917 REGNO. */
1918 static void
1919 mark_pseudo_death (int regno)
1920 {
1921 int incr;
1922 enum reg_class cl;
1923
1924 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1925 cl = sched_regno_cover_class[regno];
1926 if (cl != NO_REGS)
1927 {
1928 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1929 reg_pressure_info[cl].change -= incr;
1930 }
1931 }
1932
1933 /* Like mark_pseudo_death except that NREGS saying how many hard
1934 registers involved in the death. */
1935 static void
1936 mark_hard_regno_death (int regno, int nregs)
1937 {
1938 enum reg_class cl;
1939 int last = regno + nregs;
1940
1941 while (regno < last)
1942 {
1943 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1944 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1945 {
1946 cl = sched_regno_cover_class[regno];
1947 if (cl != NO_REGS)
1948 reg_pressure_info[cl].change -= 1;
1949 }
1950 regno++;
1951 }
1952 }
1953
1954 /* Update the register pressure info after death of pseudo or hard
1955 register REG. */
1956 static void
1957 mark_reg_death (rtx reg)
1958 {
1959 int regno;
1960
1961 if (GET_CODE (reg) == SUBREG)
1962 reg = SUBREG_REG (reg);
1963
1964 if (! REG_P (reg))
1965 return;
1966
1967 regno = REGNO (reg);
1968 if (regno < FIRST_PSEUDO_REGISTER)
1969 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1970 else
1971 mark_pseudo_death (regno);
1972 }
1973
1974 /* Process SETTER of REG. DATA is an insn containing the setter. */
1975 static void
1976 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1977 {
1978 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1979 return;
1980 mark_insn_reg_birth
1981 ((rtx) data, reg, false,
1982 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1983 }
1984
1985 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1986 static void
1987 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1988 {
1989 if (GET_CODE (setter) == CLOBBER)
1990 mark_insn_reg_birth ((rtx) data, reg, true, false);
1991 }
1992
1993 /* Set up reg pressure info related to INSN. */
1994 static void
1995 setup_insn_reg_pressure_info (rtx insn)
1996 {
1997 int i, len;
1998 enum reg_class cl;
1999 static struct reg_pressure_data *pressure_info;
2000 rtx link;
2001
2002 gcc_assert (sched_pressure_p);
2003
2004 if (! INSN_P (insn))
2005 return;
2006
2007 for (i = 0; i < ira_reg_class_cover_size; i++)
2008 {
2009 cl = ira_reg_class_cover[i];
2010 reg_pressure_info[cl].clobber_increase = 0;
2011 reg_pressure_info[cl].set_increase = 0;
2012 reg_pressure_info[cl].unused_set_increase = 0;
2013 reg_pressure_info[cl].change = 0;
2014 }
2015
2016 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2017
2018 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2019
2020 #ifdef AUTO_INC_DEC
2021 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2022 if (REG_NOTE_KIND (link) == REG_INC)
2023 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2024 #endif
2025
2026 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2027 if (REG_NOTE_KIND (link) == REG_DEAD)
2028 mark_reg_death (XEXP (link, 0));
2029
2030 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2031 pressure_info
2032 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2033 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2034 * sizeof (int), 1);
2035 for (i = 0; i < ira_reg_class_cover_size; i++)
2036 {
2037 cl = ira_reg_class_cover[i];
2038 pressure_info[i].clobber_increase
2039 = reg_pressure_info[cl].clobber_increase;
2040 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2041 pressure_info[i].unused_set_increase
2042 = reg_pressure_info[cl].unused_set_increase;
2043 pressure_info[i].change = reg_pressure_info[cl].change;
2044 }
2045 }
2046
2047
2048 \f
2049
2050 /* Internal variable for sched_analyze_[12] () functions.
2051 If it is nonzero, this means that sched_analyze_[12] looks
2052 at the most toplevel SET. */
2053 static bool can_start_lhs_rhs_p;
2054
2055 /* Extend reg info for the deps context DEPS given that
2056 we have just generated a register numbered REGNO. */
2057 static void
2058 extend_deps_reg_info (struct deps_desc *deps, int regno)
2059 {
2060 int max_regno = regno + 1;
2061
2062 gcc_assert (!reload_completed);
2063
2064 /* In a readonly context, it would not hurt to extend info,
2065 but it should not be needed. */
2066 if (reload_completed && deps->readonly)
2067 {
2068 deps->max_reg = max_regno;
2069 return;
2070 }
2071
2072 if (max_regno > deps->max_reg)
2073 {
2074 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2075 max_regno);
2076 memset (&deps->reg_last[deps->max_reg],
2077 0, (max_regno - deps->max_reg)
2078 * sizeof (struct deps_reg));
2079 deps->max_reg = max_regno;
2080 }
2081 }
2082
2083 /* Extends REG_INFO_P if needed. */
2084 void
2085 maybe_extend_reg_info_p (void)
2086 {
2087 /* Extend REG_INFO_P, if needed. */
2088 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2089 {
2090 size_t new_reg_info_p_size = max_regno + 128;
2091
2092 gcc_assert (!reload_completed && sel_sched_p ());
2093
2094 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2095 new_reg_info_p_size,
2096 reg_info_p_size,
2097 sizeof (*reg_info_p));
2098 reg_info_p_size = new_reg_info_p_size;
2099 }
2100 }
2101
2102 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2103 The type of the reference is specified by REF and can be SET,
2104 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2105
2106 static void
2107 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2108 enum rtx_code ref, rtx insn)
2109 {
2110 /* We could emit new pseudos in renaming. Extend the reg structures. */
2111 if (!reload_completed && sel_sched_p ()
2112 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2113 extend_deps_reg_info (deps, regno);
2114
2115 maybe_extend_reg_info_p ();
2116
2117 /* A hard reg in a wide mode may really be multiple registers.
2118 If so, mark all of them just like the first. */
2119 if (regno < FIRST_PSEUDO_REGISTER)
2120 {
2121 int i = hard_regno_nregs[regno][mode];
2122 if (ref == SET)
2123 {
2124 while (--i >= 0)
2125 note_reg_set (regno + i);
2126 }
2127 else if (ref == USE)
2128 {
2129 while (--i >= 0)
2130 note_reg_use (regno + i);
2131 }
2132 else
2133 {
2134 while (--i >= 0)
2135 note_reg_clobber (regno + i);
2136 }
2137 }
2138
2139 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2140 it does not reload. Ignore these as they have served their
2141 purpose already. */
2142 else if (regno >= deps->max_reg)
2143 {
2144 enum rtx_code code = GET_CODE (PATTERN (insn));
2145 gcc_assert (code == USE || code == CLOBBER);
2146 }
2147
2148 else
2149 {
2150 if (ref == SET)
2151 note_reg_set (regno);
2152 else if (ref == USE)
2153 note_reg_use (regno);
2154 else
2155 note_reg_clobber (regno);
2156
2157 /* Pseudos that are REG_EQUIV to something may be replaced
2158 by that during reloading. We need only add dependencies for
2159 the address in the REG_EQUIV note. */
2160 if (!reload_completed && get_reg_known_equiv_p (regno))
2161 {
2162 rtx t = get_reg_known_value (regno);
2163 if (MEM_P (t))
2164 sched_analyze_2 (deps, XEXP (t, 0), insn);
2165 }
2166
2167 /* Don't let it cross a call after scheduling if it doesn't
2168 already cross one. */
2169 if (REG_N_CALLS_CROSSED (regno) == 0)
2170 {
2171 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2172 deps->sched_before_next_call
2173 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2174 else
2175 add_dependence_list (insn, deps->last_function_call, 1,
2176 REG_DEP_ANTI);
2177 }
2178 }
2179 }
2180
2181 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2182 rtx, X, creating all dependencies generated by the write to the
2183 destination of X, and reads of everything mentioned. */
2184
2185 static void
2186 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2187 {
2188 rtx dest = XEXP (x, 0);
2189 enum rtx_code code = GET_CODE (x);
2190 bool cslr_p = can_start_lhs_rhs_p;
2191
2192 can_start_lhs_rhs_p = false;
2193
2194 gcc_assert (dest);
2195 if (dest == 0)
2196 return;
2197
2198 if (cslr_p && sched_deps_info->start_lhs)
2199 sched_deps_info->start_lhs (dest);
2200
2201 if (GET_CODE (dest) == PARALLEL)
2202 {
2203 int i;
2204
2205 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2206 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2207 sched_analyze_1 (deps,
2208 gen_rtx_CLOBBER (VOIDmode,
2209 XEXP (XVECEXP (dest, 0, i), 0)),
2210 insn);
2211
2212 if (cslr_p && sched_deps_info->finish_lhs)
2213 sched_deps_info->finish_lhs ();
2214
2215 if (code == SET)
2216 {
2217 can_start_lhs_rhs_p = cslr_p;
2218
2219 sched_analyze_2 (deps, SET_SRC (x), insn);
2220
2221 can_start_lhs_rhs_p = false;
2222 }
2223
2224 return;
2225 }
2226
2227 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2228 || GET_CODE (dest) == ZERO_EXTRACT)
2229 {
2230 if (GET_CODE (dest) == STRICT_LOW_PART
2231 || GET_CODE (dest) == ZERO_EXTRACT
2232 || df_read_modify_subreg_p (dest))
2233 {
2234 /* These both read and modify the result. We must handle
2235 them as writes to get proper dependencies for following
2236 instructions. We must handle them as reads to get proper
2237 dependencies from this to previous instructions.
2238 Thus we need to call sched_analyze_2. */
2239
2240 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2241 }
2242 if (GET_CODE (dest) == ZERO_EXTRACT)
2243 {
2244 /* The second and third arguments are values read by this insn. */
2245 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2246 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2247 }
2248 dest = XEXP (dest, 0);
2249 }
2250
2251 if (REG_P (dest))
2252 {
2253 int regno = REGNO (dest);
2254 enum machine_mode mode = GET_MODE (dest);
2255
2256 sched_analyze_reg (deps, regno, mode, code, insn);
2257
2258 #ifdef STACK_REGS
2259 /* Treat all writes to a stack register as modifying the TOS. */
2260 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2261 {
2262 int nregs;
2263
2264 /* Avoid analyzing the same register twice. */
2265 if (regno != FIRST_STACK_REG)
2266 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2267
2268 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2269 while (--nregs >= 0)
2270 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2271 FIRST_STACK_REG + nregs);
2272 }
2273 #endif
2274 }
2275 else if (MEM_P (dest))
2276 {
2277 /* Writing memory. */
2278 rtx t = dest;
2279
2280 if (sched_deps_info->use_cselib)
2281 {
2282 enum machine_mode address_mode
2283 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2284
2285 t = shallow_copy_rtx (dest);
2286 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2287 GET_MODE (t), insn);
2288 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2289 }
2290 t = canon_rtx (t);
2291
2292 /* Pending lists can't get larger with a readonly context. */
2293 if (!deps->readonly
2294 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2295 > MAX_PENDING_LIST_LENGTH))
2296 {
2297 /* Flush all pending reads and writes to prevent the pending lists
2298 from getting any larger. Insn scheduling runs too slowly when
2299 these lists get long. When compiling GCC with itself,
2300 this flush occurs 8 times for sparc, and 10 times for m88k using
2301 the default value of 32. */
2302 flush_pending_lists (deps, insn, false, true);
2303 }
2304 else
2305 {
2306 rtx pending, pending_mem;
2307
2308 pending = deps->pending_read_insns;
2309 pending_mem = deps->pending_read_mems;
2310 while (pending)
2311 {
2312 if (anti_dependence (XEXP (pending_mem, 0), t)
2313 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2314 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2315 DEP_ANTI);
2316
2317 pending = XEXP (pending, 1);
2318 pending_mem = XEXP (pending_mem, 1);
2319 }
2320
2321 pending = deps->pending_write_insns;
2322 pending_mem = deps->pending_write_mems;
2323 while (pending)
2324 {
2325 if (output_dependence (XEXP (pending_mem, 0), t)
2326 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2327 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2328 DEP_OUTPUT);
2329
2330 pending = XEXP (pending, 1);
2331 pending_mem = XEXP (pending_mem, 1);
2332 }
2333
2334 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2335 REG_DEP_ANTI);
2336
2337 if (!deps->readonly)
2338 add_insn_mem_dependence (deps, false, insn, dest);
2339 }
2340 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2341 }
2342
2343 if (cslr_p && sched_deps_info->finish_lhs)
2344 sched_deps_info->finish_lhs ();
2345
2346 /* Analyze reads. */
2347 if (GET_CODE (x) == SET)
2348 {
2349 can_start_lhs_rhs_p = cslr_p;
2350
2351 sched_analyze_2 (deps, SET_SRC (x), insn);
2352
2353 can_start_lhs_rhs_p = false;
2354 }
2355 }
2356
2357 /* Analyze the uses of memory and registers in rtx X in INSN. */
2358 static void
2359 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2360 {
2361 int i;
2362 int j;
2363 enum rtx_code code;
2364 const char *fmt;
2365 bool cslr_p = can_start_lhs_rhs_p;
2366
2367 can_start_lhs_rhs_p = false;
2368
2369 gcc_assert (x);
2370 if (x == 0)
2371 return;
2372
2373 if (cslr_p && sched_deps_info->start_rhs)
2374 sched_deps_info->start_rhs (x);
2375
2376 code = GET_CODE (x);
2377
2378 switch (code)
2379 {
2380 case CONST_INT:
2381 case CONST_DOUBLE:
2382 case CONST_FIXED:
2383 case CONST_VECTOR:
2384 case SYMBOL_REF:
2385 case CONST:
2386 case LABEL_REF:
2387 /* Ignore constants. */
2388 if (cslr_p && sched_deps_info->finish_rhs)
2389 sched_deps_info->finish_rhs ();
2390
2391 return;
2392
2393 #ifdef HAVE_cc0
2394 case CC0:
2395 /* User of CC0 depends on immediately preceding insn. */
2396 SCHED_GROUP_P (insn) = 1;
2397 /* Don't move CC0 setter to another block (it can set up the
2398 same flag for previous CC0 users which is safe). */
2399 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2400
2401 if (cslr_p && sched_deps_info->finish_rhs)
2402 sched_deps_info->finish_rhs ();
2403
2404 return;
2405 #endif
2406
2407 case REG:
2408 {
2409 int regno = REGNO (x);
2410 enum machine_mode mode = GET_MODE (x);
2411
2412 sched_analyze_reg (deps, regno, mode, USE, insn);
2413
2414 #ifdef STACK_REGS
2415 /* Treat all reads of a stack register as modifying the TOS. */
2416 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2417 {
2418 /* Avoid analyzing the same register twice. */
2419 if (regno != FIRST_STACK_REG)
2420 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2421 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2422 }
2423 #endif
2424
2425 if (cslr_p && sched_deps_info->finish_rhs)
2426 sched_deps_info->finish_rhs ();
2427
2428 return;
2429 }
2430
2431 case MEM:
2432 {
2433 /* Reading memory. */
2434 rtx u;
2435 rtx pending, pending_mem;
2436 rtx t = x;
2437
2438 if (sched_deps_info->use_cselib)
2439 {
2440 enum machine_mode address_mode
2441 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2442
2443 t = shallow_copy_rtx (t);
2444 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2445 GET_MODE (t), insn);
2446 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2447 }
2448
2449 if (!DEBUG_INSN_P (insn))
2450 {
2451 t = canon_rtx (t);
2452 pending = deps->pending_read_insns;
2453 pending_mem = deps->pending_read_mems;
2454 while (pending)
2455 {
2456 if (read_dependence (XEXP (pending_mem, 0), t)
2457 && ! sched_insns_conditions_mutex_p (insn,
2458 XEXP (pending, 0)))
2459 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2460 DEP_ANTI);
2461
2462 pending = XEXP (pending, 1);
2463 pending_mem = XEXP (pending_mem, 1);
2464 }
2465
2466 pending = deps->pending_write_insns;
2467 pending_mem = deps->pending_write_mems;
2468 while (pending)
2469 {
2470 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2471 t, rtx_varies_p)
2472 && ! sched_insns_conditions_mutex_p (insn,
2473 XEXP (pending, 0)))
2474 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2475 sched_deps_info->generate_spec_deps
2476 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2477
2478 pending = XEXP (pending, 1);
2479 pending_mem = XEXP (pending_mem, 1);
2480 }
2481
2482 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2483 {
2484 if (! NON_FLUSH_JUMP_P (u))
2485 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2486 else if (deps_may_trap_p (x))
2487 {
2488 if ((sched_deps_info->generate_spec_deps)
2489 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2490 {
2491 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2492 MAX_DEP_WEAK);
2493
2494 note_dep (XEXP (u, 0), ds);
2495 }
2496 else
2497 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2498 }
2499 }
2500 }
2501
2502 /* Always add these dependencies to pending_reads, since
2503 this insn may be followed by a write. */
2504 if (!deps->readonly)
2505 add_insn_mem_dependence (deps, true, insn, x);
2506
2507 sched_analyze_2 (deps, XEXP (x, 0), insn);
2508
2509 if (cslr_p && sched_deps_info->finish_rhs)
2510 sched_deps_info->finish_rhs ();
2511
2512 return;
2513 }
2514
2515 /* Force pending stores to memory in case a trap handler needs them. */
2516 case TRAP_IF:
2517 flush_pending_lists (deps, insn, true, false);
2518 break;
2519
2520 case PREFETCH:
2521 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2522 reg_pending_barrier = TRUE_BARRIER;
2523 break;
2524
2525 case UNSPEC_VOLATILE:
2526 flush_pending_lists (deps, insn, true, true);
2527 /* FALLTHRU */
2528
2529 case ASM_OPERANDS:
2530 case ASM_INPUT:
2531 {
2532 /* Traditional and volatile asm instructions must be considered to use
2533 and clobber all hard registers, all pseudo-registers and all of
2534 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2535
2536 Consider for instance a volatile asm that changes the fpu rounding
2537 mode. An insn should not be moved across this even if it only uses
2538 pseudo-regs because it might give an incorrectly rounded result. */
2539 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2540 reg_pending_barrier = TRUE_BARRIER;
2541
2542 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2543 We can not just fall through here since then we would be confused
2544 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2545 traditional asms unlike their normal usage. */
2546
2547 if (code == ASM_OPERANDS)
2548 {
2549 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2550 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2551
2552 if (cslr_p && sched_deps_info->finish_rhs)
2553 sched_deps_info->finish_rhs ();
2554
2555 return;
2556 }
2557 break;
2558 }
2559
2560 case PRE_DEC:
2561 case POST_DEC:
2562 case PRE_INC:
2563 case POST_INC:
2564 /* These both read and modify the result. We must handle them as writes
2565 to get proper dependencies for following instructions. We must handle
2566 them as reads to get proper dependencies from this to previous
2567 instructions. Thus we need to pass them to both sched_analyze_1
2568 and sched_analyze_2. We must call sched_analyze_2 first in order
2569 to get the proper antecedent for the read. */
2570 sched_analyze_2 (deps, XEXP (x, 0), insn);
2571 sched_analyze_1 (deps, x, insn);
2572
2573 if (cslr_p && sched_deps_info->finish_rhs)
2574 sched_deps_info->finish_rhs ();
2575
2576 return;
2577
2578 case POST_MODIFY:
2579 case PRE_MODIFY:
2580 /* op0 = op0 + op1 */
2581 sched_analyze_2 (deps, XEXP (x, 0), insn);
2582 sched_analyze_2 (deps, XEXP (x, 1), insn);
2583 sched_analyze_1 (deps, x, insn);
2584
2585 if (cslr_p && sched_deps_info->finish_rhs)
2586 sched_deps_info->finish_rhs ();
2587
2588 return;
2589
2590 default:
2591 break;
2592 }
2593
2594 /* Other cases: walk the insn. */
2595 fmt = GET_RTX_FORMAT (code);
2596 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2597 {
2598 if (fmt[i] == 'e')
2599 sched_analyze_2 (deps, XEXP (x, i), insn);
2600 else if (fmt[i] == 'E')
2601 for (j = 0; j < XVECLEN (x, i); j++)
2602 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2603 }
2604
2605 if (cslr_p && sched_deps_info->finish_rhs)
2606 sched_deps_info->finish_rhs ();
2607 }
2608
2609 /* Analyze an INSN with pattern X to find all dependencies. */
2610 static void
2611 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2612 {
2613 RTX_CODE code = GET_CODE (x);
2614 rtx link;
2615 unsigned i;
2616 reg_set_iterator rsi;
2617
2618 if (! reload_completed)
2619 {
2620 HARD_REG_SET temp;
2621
2622 extract_insn (insn);
2623 preprocess_constraints ();
2624 ira_implicitly_set_insn_hard_regs (&temp);
2625 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2626 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2627 }
2628
2629 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2630 && code == SET);
2631
2632 if (may_trap_p (x))
2633 /* Avoid moving trapping instructions accross function calls that might
2634 not always return. */
2635 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2636 1, REG_DEP_ANTI);
2637
2638 if (code == COND_EXEC)
2639 {
2640 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2641
2642 /* ??? Should be recording conditions so we reduce the number of
2643 false dependencies. */
2644 x = COND_EXEC_CODE (x);
2645 code = GET_CODE (x);
2646 }
2647 if (code == SET || code == CLOBBER)
2648 {
2649 sched_analyze_1 (deps, x, insn);
2650
2651 /* Bare clobber insns are used for letting life analysis, reg-stack
2652 and others know that a value is dead. Depend on the last call
2653 instruction so that reg-stack won't get confused. */
2654 if (code == CLOBBER)
2655 add_dependence_list (insn, deps->last_function_call, 1,
2656 REG_DEP_OUTPUT);
2657 }
2658 else if (code == PARALLEL)
2659 {
2660 for (i = XVECLEN (x, 0); i--;)
2661 {
2662 rtx sub = XVECEXP (x, 0, i);
2663 code = GET_CODE (sub);
2664
2665 if (code == COND_EXEC)
2666 {
2667 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2668 sub = COND_EXEC_CODE (sub);
2669 code = GET_CODE (sub);
2670 }
2671 if (code == SET || code == CLOBBER)
2672 sched_analyze_1 (deps, sub, insn);
2673 else
2674 sched_analyze_2 (deps, sub, insn);
2675 }
2676 }
2677 else
2678 sched_analyze_2 (deps, x, insn);
2679
2680 /* Mark registers CLOBBERED or used by called function. */
2681 if (CALL_P (insn))
2682 {
2683 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2684 {
2685 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2686 sched_analyze_1 (deps, XEXP (link, 0), insn);
2687 else
2688 sched_analyze_2 (deps, XEXP (link, 0), insn);
2689 }
2690 if (find_reg_note (insn, REG_SETJMP, NULL))
2691 reg_pending_barrier = MOVE_BARRIER;
2692 }
2693
2694 if (JUMP_P (insn))
2695 {
2696 rtx next;
2697 next = next_nonnote_nondebug_insn (insn);
2698 if (next && BARRIER_P (next))
2699 reg_pending_barrier = MOVE_BARRIER;
2700 else
2701 {
2702 rtx pending, pending_mem;
2703
2704 if (sched_deps_info->compute_jump_reg_dependencies)
2705 {
2706 regset_head tmp_uses, tmp_sets;
2707 INIT_REG_SET (&tmp_uses);
2708 INIT_REG_SET (&tmp_sets);
2709
2710 (*sched_deps_info->compute_jump_reg_dependencies)
2711 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2712 /* Make latency of jump equal to 0 by using anti-dependence. */
2713 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2714 {
2715 struct deps_reg *reg_last = &deps->reg_last[i];
2716 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2717 add_dependence_list (insn, reg_last->implicit_sets,
2718 0, REG_DEP_ANTI);
2719 add_dependence_list (insn, reg_last->clobbers, 0,
2720 REG_DEP_ANTI);
2721
2722 if (!deps->readonly)
2723 {
2724 reg_last->uses_length++;
2725 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2726 }
2727 }
2728 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2729
2730 CLEAR_REG_SET (&tmp_uses);
2731 CLEAR_REG_SET (&tmp_sets);
2732 }
2733
2734 /* All memory writes and volatile reads must happen before the
2735 jump. Non-volatile reads must happen before the jump iff
2736 the result is needed by the above register used mask. */
2737
2738 pending = deps->pending_write_insns;
2739 pending_mem = deps->pending_write_mems;
2740 while (pending)
2741 {
2742 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2743 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2744 pending = XEXP (pending, 1);
2745 pending_mem = XEXP (pending_mem, 1);
2746 }
2747
2748 pending = deps->pending_read_insns;
2749 pending_mem = deps->pending_read_mems;
2750 while (pending)
2751 {
2752 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2753 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2754 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2755 pending = XEXP (pending, 1);
2756 pending_mem = XEXP (pending_mem, 1);
2757 }
2758
2759 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2760 REG_DEP_ANTI);
2761 }
2762 }
2763
2764 /* If this instruction can throw an exception, then moving it changes
2765 where block boundaries fall. This is mighty confusing elsewhere.
2766 Therefore, prevent such an instruction from being moved. Same for
2767 non-jump instructions that define block boundaries.
2768 ??? Unclear whether this is still necessary in EBB mode. If not,
2769 add_branch_dependences should be adjusted for RGN mode instead. */
2770 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2771 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2772 reg_pending_barrier = MOVE_BARRIER;
2773
2774 if (sched_pressure_p)
2775 {
2776 setup_insn_reg_uses (deps, insn);
2777 setup_insn_reg_pressure_info (insn);
2778 }
2779
2780 /* Add register dependencies for insn. */
2781 if (DEBUG_INSN_P (insn))
2782 {
2783 rtx prev = deps->last_debug_insn;
2784 rtx u;
2785
2786 if (!deps->readonly)
2787 deps->last_debug_insn = insn;
2788
2789 if (prev)
2790 add_dependence (insn, prev, REG_DEP_ANTI);
2791
2792 add_dependence_list (insn, deps->last_function_call, 1,
2793 REG_DEP_ANTI);
2794
2795 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2796 if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2797 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2798
2799 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2800 {
2801 struct deps_reg *reg_last = &deps->reg_last[i];
2802 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2803 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2804
2805 if (!deps->readonly)
2806 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2807 }
2808 CLEAR_REG_SET (reg_pending_uses);
2809
2810 /* Quite often, a debug insn will refer to stuff in the
2811 previous instruction, but the reason we want this
2812 dependency here is to make sure the scheduler doesn't
2813 gratuitously move a debug insn ahead. This could dirty
2814 DF flags and cause additional analysis that wouldn't have
2815 occurred in compilation without debug insns, and such
2816 additional analysis can modify the generated code. */
2817 prev = PREV_INSN (insn);
2818
2819 if (prev && NONDEBUG_INSN_P (prev))
2820 add_dependence (insn, prev, REG_DEP_ANTI);
2821 }
2822 else
2823 {
2824 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2825 {
2826 struct deps_reg *reg_last = &deps->reg_last[i];
2827 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2828 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2829 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2830
2831 if (!deps->readonly)
2832 {
2833 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2834 reg_last->uses_length++;
2835 }
2836 }
2837
2838 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2839 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2840 {
2841 struct deps_reg *reg_last = &deps->reg_last[i];
2842 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2843 add_dependence_list (insn, reg_last->implicit_sets, 0,
2844 REG_DEP_ANTI);
2845 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2846
2847 if (!deps->readonly)
2848 {
2849 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2850 reg_last->uses_length++;
2851 }
2852 }
2853
2854 /* If the current insn is conditional, we can't free any
2855 of the lists. */
2856 if (sched_has_condition_p (insn))
2857 {
2858 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2859 {
2860 struct deps_reg *reg_last = &deps->reg_last[i];
2861 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2862 add_dependence_list (insn, reg_last->implicit_sets, 0,
2863 REG_DEP_ANTI);
2864 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2865
2866 if (!deps->readonly)
2867 {
2868 reg_last->clobbers
2869 = alloc_INSN_LIST (insn, reg_last->clobbers);
2870 reg_last->clobbers_length++;
2871 }
2872 }
2873 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2874 {
2875 struct deps_reg *reg_last = &deps->reg_last[i];
2876 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2877 add_dependence_list (insn, reg_last->implicit_sets, 0,
2878 REG_DEP_ANTI);
2879 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2880 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2881
2882 if (!deps->readonly)
2883 {
2884 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2885 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2886 }
2887 }
2888 }
2889 else
2890 {
2891 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2892 {
2893 struct deps_reg *reg_last = &deps->reg_last[i];
2894 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2895 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2896 {
2897 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2898 REG_DEP_OUTPUT);
2899 add_dependence_list_and_free (deps, insn,
2900 &reg_last->implicit_sets, 0,
2901 REG_DEP_ANTI);
2902 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2903 REG_DEP_ANTI);
2904 add_dependence_list_and_free
2905 (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2906
2907 if (!deps->readonly)
2908 {
2909 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2910 reg_last->clobbers_length = 0;
2911 reg_last->uses_length = 0;
2912 }
2913 }
2914 else
2915 {
2916 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2917 add_dependence_list (insn, reg_last->implicit_sets, 0,
2918 REG_DEP_ANTI);
2919 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2920 }
2921
2922 if (!deps->readonly)
2923 {
2924 reg_last->clobbers_length++;
2925 reg_last->clobbers
2926 = alloc_INSN_LIST (insn, reg_last->clobbers);
2927 }
2928 }
2929 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2930 {
2931 struct deps_reg *reg_last = &deps->reg_last[i];
2932
2933 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2934 REG_DEP_OUTPUT);
2935 add_dependence_list_and_free (deps, insn,
2936 &reg_last->implicit_sets,
2937 0, REG_DEP_ANTI);
2938 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2939 REG_DEP_OUTPUT);
2940 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2941 REG_DEP_ANTI);
2942
2943 if (!deps->readonly)
2944 {
2945 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2946 reg_last->uses_length = 0;
2947 reg_last->clobbers_length = 0;
2948 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2949 }
2950 }
2951 }
2952 }
2953
2954 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2955 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2956 {
2957 struct deps_reg *reg_last = &deps->reg_last[i];
2958 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2959 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2960 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2961
2962 if (!deps->readonly)
2963 reg_last->implicit_sets
2964 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2965 }
2966
2967 if (!deps->readonly)
2968 {
2969 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2970 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2971 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2972 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2973 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2974 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2975 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2976
2977 /* Set up the pending barrier found. */
2978 deps->last_reg_pending_barrier = reg_pending_barrier;
2979 }
2980
2981 CLEAR_REG_SET (reg_pending_uses);
2982 CLEAR_REG_SET (reg_pending_clobbers);
2983 CLEAR_REG_SET (reg_pending_sets);
2984 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2985 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2986
2987 /* Add dependencies if a scheduling barrier was found. */
2988 if (reg_pending_barrier)
2989 {
2990 /* In the case of barrier the most added dependencies are not
2991 real, so we use anti-dependence here. */
2992 if (sched_has_condition_p (insn))
2993 {
2994 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2995 {
2996 struct deps_reg *reg_last = &deps->reg_last[i];
2997 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2998 add_dependence_list (insn, reg_last->sets, 0,
2999 reg_pending_barrier == TRUE_BARRIER
3000 ? REG_DEP_TRUE : REG_DEP_ANTI);
3001 add_dependence_list (insn, reg_last->implicit_sets, 0,
3002 REG_DEP_ANTI);
3003 add_dependence_list (insn, reg_last->clobbers, 0,
3004 reg_pending_barrier == TRUE_BARRIER
3005 ? REG_DEP_TRUE : REG_DEP_ANTI);
3006 }
3007 }
3008 else
3009 {
3010 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3011 {
3012 struct deps_reg *reg_last = &deps->reg_last[i];
3013 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3014 REG_DEP_ANTI);
3015 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3016 reg_pending_barrier == TRUE_BARRIER
3017 ? REG_DEP_TRUE : REG_DEP_ANTI);
3018 add_dependence_list_and_free (deps, insn,
3019 &reg_last->implicit_sets, 0,
3020 REG_DEP_ANTI);
3021 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3022 reg_pending_barrier == TRUE_BARRIER
3023 ? REG_DEP_TRUE : REG_DEP_ANTI);
3024
3025 if (!deps->readonly)
3026 {
3027 reg_last->uses_length = 0;
3028 reg_last->clobbers_length = 0;
3029 }
3030 }
3031 }
3032
3033 if (!deps->readonly)
3034 for (i = 0; i < (unsigned)deps->max_reg; i++)
3035 {
3036 struct deps_reg *reg_last = &deps->reg_last[i];
3037 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3038 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3039 }
3040
3041 /* Flush pending lists on jumps, but not on speculative checks. */
3042 if (JUMP_P (insn) && !(sel_sched_p ()
3043 && sel_insn_is_speculation_check (insn)))
3044 flush_pending_lists (deps, insn, true, true);
3045
3046 if (!deps->readonly)
3047 CLEAR_REG_SET (&deps->reg_conditional_sets);
3048 reg_pending_barrier = NOT_A_BARRIER;
3049 }
3050
3051 /* If a post-call group is still open, see if it should remain so.
3052 This insn must be a simple move of a hard reg to a pseudo or
3053 vice-versa.
3054
3055 We must avoid moving these insns for correctness on targets
3056 with small register classes, and for special registers like
3057 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3058 hard regs for all targets. */
3059
3060 if (deps->in_post_call_group_p)
3061 {
3062 rtx tmp, set = single_set (insn);
3063 int src_regno, dest_regno;
3064
3065 if (set == NULL)
3066 {
3067 if (DEBUG_INSN_P (insn))
3068 /* We don't want to mark debug insns as part of the same
3069 sched group. We know they really aren't, but if we use
3070 debug insns to tell that a call group is over, we'll
3071 get different code if debug insns are not there and
3072 instructions that follow seem like they should be part
3073 of the call group.
3074
3075 Also, if we did, fixup_sched_groups() would move the
3076 deps of the debug insn to the call insn, modifying
3077 non-debug post-dependency counts of the debug insn
3078 dependencies and otherwise messing with the scheduling
3079 order.
3080
3081 Instead, let such debug insns be scheduled freely, but
3082 keep the call group open in case there are insns that
3083 should be part of it afterwards. Since we grant debug
3084 insns higher priority than even sched group insns, it
3085 will all turn out all right. */
3086 goto debug_dont_end_call_group;
3087 else
3088 goto end_call_group;
3089 }
3090
3091 tmp = SET_DEST (set);
3092 if (GET_CODE (tmp) == SUBREG)
3093 tmp = SUBREG_REG (tmp);
3094 if (REG_P (tmp))
3095 dest_regno = REGNO (tmp);
3096 else
3097 goto end_call_group;
3098
3099 tmp = SET_SRC (set);
3100 if (GET_CODE (tmp) == SUBREG)
3101 tmp = SUBREG_REG (tmp);
3102 if ((GET_CODE (tmp) == PLUS
3103 || GET_CODE (tmp) == MINUS)
3104 && REG_P (XEXP (tmp, 0))
3105 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3106 && dest_regno == STACK_POINTER_REGNUM)
3107 src_regno = STACK_POINTER_REGNUM;
3108 else if (REG_P (tmp))
3109 src_regno = REGNO (tmp);
3110 else
3111 goto end_call_group;
3112
3113 if (src_regno < FIRST_PSEUDO_REGISTER
3114 || dest_regno < FIRST_PSEUDO_REGISTER)
3115 {
3116 if (!deps->readonly
3117 && deps->in_post_call_group_p == post_call_initial)
3118 deps->in_post_call_group_p = post_call;
3119
3120 if (!sel_sched_p () || sched_emulate_haifa_p)
3121 {
3122 SCHED_GROUP_P (insn) = 1;
3123 CANT_MOVE (insn) = 1;
3124 }
3125 }
3126 else
3127 {
3128 end_call_group:
3129 if (!deps->readonly)
3130 deps->in_post_call_group_p = not_post_call;
3131 }
3132 }
3133
3134 debug_dont_end_call_group:
3135 if ((current_sched_info->flags & DO_SPECULATION)
3136 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3137 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3138 be speculated. */
3139 {
3140 if (sel_sched_p ())
3141 sel_mark_hard_insn (insn);
3142 else
3143 {
3144 sd_iterator_def sd_it;
3145 dep_t dep;
3146
3147 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3148 sd_iterator_cond (&sd_it, &dep);)
3149 change_spec_dep_to_hard (sd_it);
3150 }
3151 }
3152 }
3153
3154 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3155 longjmp, loop forever, ...). */
3156 static bool
3157 call_may_noreturn_p (rtx insn)
3158 {
3159 rtx call;
3160
3161 /* const or pure calls that aren't looping will always return. */
3162 if (RTL_CONST_OR_PURE_CALL_P (insn)
3163 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3164 return false;
3165
3166 call = PATTERN (insn);
3167 if (GET_CODE (call) == PARALLEL)
3168 call = XVECEXP (call, 0, 0);
3169 if (GET_CODE (call) == SET)
3170 call = SET_SRC (call);
3171 if (GET_CODE (call) == CALL
3172 && MEM_P (XEXP (call, 0))
3173 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3174 {
3175 rtx symbol = XEXP (XEXP (call, 0), 0);
3176 if (SYMBOL_REF_DECL (symbol)
3177 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3178 {
3179 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3180 == BUILT_IN_NORMAL)
3181 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3182 {
3183 case BUILT_IN_BCMP:
3184 case BUILT_IN_BCOPY:
3185 case BUILT_IN_BZERO:
3186 case BUILT_IN_INDEX:
3187 case BUILT_IN_MEMCHR:
3188 case BUILT_IN_MEMCMP:
3189 case BUILT_IN_MEMCPY:
3190 case BUILT_IN_MEMMOVE:
3191 case BUILT_IN_MEMPCPY:
3192 case BUILT_IN_MEMSET:
3193 case BUILT_IN_RINDEX:
3194 case BUILT_IN_STPCPY:
3195 case BUILT_IN_STPNCPY:
3196 case BUILT_IN_STRCAT:
3197 case BUILT_IN_STRCHR:
3198 case BUILT_IN_STRCMP:
3199 case BUILT_IN_STRCPY:
3200 case BUILT_IN_STRCSPN:
3201 case BUILT_IN_STRLEN:
3202 case BUILT_IN_STRNCAT:
3203 case BUILT_IN_STRNCMP:
3204 case BUILT_IN_STRNCPY:
3205 case BUILT_IN_STRPBRK:
3206 case BUILT_IN_STRRCHR:
3207 case BUILT_IN_STRSPN:
3208 case BUILT_IN_STRSTR:
3209 /* Assume certain string/memory builtins always return. */
3210 return false;
3211 default:
3212 break;
3213 }
3214 }
3215 }
3216
3217 /* For all other calls assume that they might not always return. */
3218 return true;
3219 }
3220
3221 /* Analyze INSN with DEPS as a context. */
3222 void
3223 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3224 {
3225 if (sched_deps_info->start_insn)
3226 sched_deps_info->start_insn (insn);
3227
3228 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3229 {
3230 /* Make each JUMP_INSN (but not a speculative check)
3231 a scheduling barrier for memory references. */
3232 if (!deps->readonly
3233 && JUMP_P (insn)
3234 && !(sel_sched_p ()
3235 && sel_insn_is_speculation_check (insn)))
3236 {
3237 /* Keep the list a reasonable size. */
3238 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3239 flush_pending_lists (deps, insn, true, true);
3240 else
3241 {
3242 deps->last_pending_memory_flush
3243 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3244 /* Signal to sched_analyze_insn that this jump stands
3245 just for its own, not any other pending memory
3246 reads/writes flush_pending_lists had to flush. */
3247 PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3248 NON_FLUSH_JUMP_KIND);
3249 }
3250 }
3251
3252 sched_analyze_insn (deps, PATTERN (insn), insn);
3253 }
3254 else if (CALL_P (insn))
3255 {
3256 int i;
3257
3258 CANT_MOVE (insn) = 1;
3259
3260 if (find_reg_note (insn, REG_SETJMP, NULL))
3261 {
3262 /* This is setjmp. Assume that all registers, not just
3263 hard registers, may be clobbered by this call. */
3264 reg_pending_barrier = MOVE_BARRIER;
3265 }
3266 else
3267 {
3268 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3269 /* A call may read and modify global register variables. */
3270 if (global_regs[i])
3271 {
3272 SET_REGNO_REG_SET (reg_pending_sets, i);
3273 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3274 }
3275 /* Other call-clobbered hard regs may be clobbered.
3276 Since we only have a choice between 'might be clobbered'
3277 and 'definitely not clobbered', we must include all
3278 partly call-clobbered registers here. */
3279 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3280 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3281 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3282 /* We don't know what set of fixed registers might be used
3283 by the function, but it is certain that the stack pointer
3284 is among them, but be conservative. */
3285 else if (fixed_regs[i])
3286 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3287 /* The frame pointer is normally not used by the function
3288 itself, but by the debugger. */
3289 /* ??? MIPS o32 is an exception. It uses the frame pointer
3290 in the macro expansion of jal but does not represent this
3291 fact in the call_insn rtl. */
3292 else if (i == FRAME_POINTER_REGNUM
3293 || (i == HARD_FRAME_POINTER_REGNUM
3294 && (! reload_completed || frame_pointer_needed)))
3295 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3296 }
3297
3298 /* For each insn which shouldn't cross a call, add a dependence
3299 between that insn and this call insn. */
3300 add_dependence_list_and_free (deps, insn,
3301 &deps->sched_before_next_call, 1,
3302 REG_DEP_ANTI);
3303
3304 sched_analyze_insn (deps, PATTERN (insn), insn);
3305
3306 /* If CALL would be in a sched group, then this will violate
3307 convention that sched group insns have dependencies only on the
3308 previous instruction.
3309
3310 Of course one can say: "Hey! What about head of the sched group?"
3311 And I will answer: "Basic principles (one dep per insn) are always
3312 the same." */
3313 gcc_assert (!SCHED_GROUP_P (insn));
3314
3315 /* In the absence of interprocedural alias analysis, we must flush
3316 all pending reads and writes, and start new dependencies starting
3317 from here. But only flush writes for constant calls (which may
3318 be passed a pointer to something we haven't written yet). */
3319 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3320
3321 if (!deps->readonly)
3322 {
3323 /* Remember the last function call for limiting lifetimes. */
3324 free_INSN_LIST_list (&deps->last_function_call);
3325 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3326
3327 if (call_may_noreturn_p (insn))
3328 {
3329 /* Remember the last function call that might not always return
3330 normally for limiting moves of trapping insns. */
3331 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3332 deps->last_function_call_may_noreturn
3333 = alloc_INSN_LIST (insn, NULL_RTX);
3334 }
3335
3336 /* Before reload, begin a post-call group, so as to keep the
3337 lifetimes of hard registers correct. */
3338 if (! reload_completed)
3339 deps->in_post_call_group_p = post_call;
3340 }
3341 }
3342
3343 if (sched_deps_info->use_cselib)
3344 cselib_process_insn (insn);
3345
3346 /* EH_REGION insn notes can not appear until well after we complete
3347 scheduling. */
3348 if (NOTE_P (insn))
3349 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3350 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3351
3352 if (sched_deps_info->finish_insn)
3353 sched_deps_info->finish_insn ();
3354
3355 /* Fixup the dependencies in the sched group. */
3356 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3357 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3358 fixup_sched_groups (insn);
3359 }
3360
3361 /* Initialize DEPS for the new block beginning with HEAD. */
3362 void
3363 deps_start_bb (struct deps_desc *deps, rtx head)
3364 {
3365 gcc_assert (!deps->readonly);
3366
3367 /* Before reload, if the previous block ended in a call, show that
3368 we are inside a post-call group, so as to keep the lifetimes of
3369 hard registers correct. */
3370 if (! reload_completed && !LABEL_P (head))
3371 {
3372 rtx insn = prev_nonnote_nondebug_insn (head);
3373
3374 if (insn && CALL_P (insn))
3375 deps->in_post_call_group_p = post_call_initial;
3376 }
3377 }
3378
3379 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3380 dependencies for each insn. */
3381 void
3382 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3383 {
3384 rtx insn;
3385
3386 if (sched_deps_info->use_cselib)
3387 cselib_init (CSELIB_RECORD_MEMORY);
3388
3389 deps_start_bb (deps, head);
3390
3391 for (insn = head;; insn = NEXT_INSN (insn))
3392 {
3393
3394 if (INSN_P (insn))
3395 {
3396 /* And initialize deps_lists. */
3397 sd_init_insn (insn);
3398 }
3399
3400 deps_analyze_insn (deps, insn);
3401
3402 if (insn == tail)
3403 {
3404 if (sched_deps_info->use_cselib)
3405 cselib_finish ();
3406 return;
3407 }
3408 }
3409 gcc_unreachable ();
3410 }
3411
3412 /* Helper for sched_free_deps ().
3413 Delete INSN's (RESOLVED_P) backward dependencies. */
3414 static void
3415 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3416 {
3417 sd_iterator_def sd_it;
3418 dep_t dep;
3419 sd_list_types_def types;
3420
3421 if (resolved_p)
3422 types = SD_LIST_RES_BACK;
3423 else
3424 types = SD_LIST_BACK;
3425
3426 for (sd_it = sd_iterator_start (insn, types);
3427 sd_iterator_cond (&sd_it, &dep);)
3428 {
3429 dep_link_t link = *sd_it.linkp;
3430 dep_node_t node = DEP_LINK_NODE (link);
3431 deps_list_t back_list;
3432 deps_list_t forw_list;
3433
3434 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3435 remove_from_deps_list (link, back_list);
3436 delete_dep_node (node);
3437 }
3438 }
3439
3440 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3441 deps_lists. */
3442 void
3443 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3444 {
3445 rtx insn;
3446 rtx next_tail = NEXT_INSN (tail);
3447
3448 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3449 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3450 {
3451 /* Clear resolved back deps together with its dep_nodes. */
3452 delete_dep_nodes_in_back_deps (insn, resolved_p);
3453
3454 /* Clear forward deps and leave the dep_nodes to the
3455 corresponding back_deps list. */
3456 if (resolved_p)
3457 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3458 else
3459 clear_deps_list (INSN_FORW_DEPS (insn));
3460
3461 sd_finish_insn (insn);
3462 }
3463 }
3464 \f
3465 /* Initialize variables for region data dependence analysis.
3466 When LAZY_REG_LAST is true, do not allocate reg_last array
3467 of struct deps_desc immediately. */
3468
3469 void
3470 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3471 {
3472 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3473
3474 deps->max_reg = max_reg;
3475 if (lazy_reg_last)
3476 deps->reg_last = NULL;
3477 else
3478 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3479 INIT_REG_SET (&deps->reg_last_in_use);
3480 INIT_REG_SET (&deps->reg_conditional_sets);
3481
3482 deps->pending_read_insns = 0;
3483 deps->pending_read_mems = 0;
3484 deps->pending_write_insns = 0;
3485 deps->pending_write_mems = 0;
3486 deps->pending_read_list_length = 0;
3487 deps->pending_write_list_length = 0;
3488 deps->pending_flush_length = 0;
3489 deps->last_pending_memory_flush = 0;
3490 deps->last_function_call = 0;
3491 deps->last_function_call_may_noreturn = 0;
3492 deps->sched_before_next_call = 0;
3493 deps->in_post_call_group_p = not_post_call;
3494 deps->last_debug_insn = 0;
3495 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3496 deps->readonly = 0;
3497 }
3498
3499 /* Init only reg_last field of DEPS, which was not allocated before as
3500 we inited DEPS lazily. */
3501 void
3502 init_deps_reg_last (struct deps_desc *deps)
3503 {
3504 gcc_assert (deps && deps->max_reg > 0);
3505 gcc_assert (deps->reg_last == NULL);
3506
3507 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3508 }
3509
3510
3511 /* Free insn lists found in DEPS. */
3512
3513 void
3514 free_deps (struct deps_desc *deps)
3515 {
3516 unsigned i;
3517 reg_set_iterator rsi;
3518
3519 /* We set max_reg to 0 when this context was already freed. */
3520 if (deps->max_reg == 0)
3521 {
3522 gcc_assert (deps->reg_last == NULL);
3523 return;
3524 }
3525 deps->max_reg = 0;
3526
3527 free_INSN_LIST_list (&deps->pending_read_insns);
3528 free_EXPR_LIST_list (&deps->pending_read_mems);
3529 free_INSN_LIST_list (&deps->pending_write_insns);
3530 free_EXPR_LIST_list (&deps->pending_write_mems);
3531 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3532
3533 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3534 times. For a testcase with 42000 regs and 8000 small basic blocks,
3535 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3536 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3537 {
3538 struct deps_reg *reg_last = &deps->reg_last[i];
3539 if (reg_last->uses)
3540 free_INSN_LIST_list (&reg_last->uses);
3541 if (reg_last->sets)
3542 free_INSN_LIST_list (&reg_last->sets);
3543 if (reg_last->implicit_sets)
3544 free_INSN_LIST_list (&reg_last->implicit_sets);
3545 if (reg_last->clobbers)
3546 free_INSN_LIST_list (&reg_last->clobbers);
3547 }
3548 CLEAR_REG_SET (&deps->reg_last_in_use);
3549 CLEAR_REG_SET (&deps->reg_conditional_sets);
3550
3551 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3552 it at all. */
3553 if (deps->reg_last)
3554 free (deps->reg_last);
3555 deps->reg_last = NULL;
3556
3557 deps = NULL;
3558 }
3559
3560 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3561 is not handled. */
3562 void
3563 remove_from_deps (struct deps_desc *deps, rtx insn)
3564 {
3565 int removed;
3566 unsigned i;
3567 reg_set_iterator rsi;
3568
3569 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3570 &deps->pending_read_mems);
3571 if (!DEBUG_INSN_P (insn))
3572 deps->pending_read_list_length -= removed;
3573 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3574 &deps->pending_write_mems);
3575 deps->pending_write_list_length -= removed;
3576 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3577 deps->pending_flush_length -= removed;
3578
3579 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3580 {
3581 struct deps_reg *reg_last = &deps->reg_last[i];
3582 if (reg_last->uses)
3583 remove_from_dependence_list (insn, &reg_last->uses);
3584 if (reg_last->sets)
3585 remove_from_dependence_list (insn, &reg_last->sets);
3586 if (reg_last->implicit_sets)
3587 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3588 if (reg_last->clobbers)
3589 remove_from_dependence_list (insn, &reg_last->clobbers);
3590 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3591 && !reg_last->clobbers)
3592 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3593 }
3594
3595 if (CALL_P (insn))
3596 {
3597 remove_from_dependence_list (insn, &deps->last_function_call);
3598 remove_from_dependence_list (insn,
3599 &deps->last_function_call_may_noreturn);
3600 }
3601 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3602 }
3603
3604 /* Init deps data vector. */
3605 static void
3606 init_deps_data_vector (void)
3607 {
3608 int reserve = (sched_max_luid + 1
3609 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3610 if (reserve > 0
3611 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3612 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3613 3 * sched_max_luid / 2);
3614 }
3615
3616 /* If it is profitable to use them, initialize or extend (depending on
3617 GLOBAL_P) dependency data. */
3618 void
3619 sched_deps_init (bool global_p)
3620 {
3621 /* Average number of insns in the basic block.
3622 '+ 1' is used to make it nonzero. */
3623 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3624
3625 init_deps_data_vector ();
3626
3627 /* We use another caching mechanism for selective scheduling, so
3628 we don't use this one. */
3629 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3630 {
3631 /* ?!? We could save some memory by computing a per-region luid mapping
3632 which could reduce both the number of vectors in the cache and the
3633 size of each vector. Instead we just avoid the cache entirely unless
3634 the average number of instructions in a basic block is very high. See
3635 the comment before the declaration of true_dependency_cache for
3636 what we consider "very high". */
3637 cache_size = 0;
3638 extend_dependency_caches (sched_max_luid, true);
3639 }
3640
3641 if (global_p)
3642 {
3643 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3644 /* Allocate lists for one block at a time. */
3645 insns_in_block);
3646 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3647 /* Allocate nodes for one block at a time.
3648 We assume that average insn has
3649 5 producers. */
3650 5 * insns_in_block);
3651 }
3652 }
3653
3654
3655 /* Create or extend (depending on CREATE_P) dependency caches to
3656 size N. */
3657 void
3658 extend_dependency_caches (int n, bool create_p)
3659 {
3660 if (create_p || true_dependency_cache)
3661 {
3662 int i, luid = cache_size + n;
3663
3664 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3665 luid);
3666 output_dependency_cache = XRESIZEVEC (bitmap_head,
3667 output_dependency_cache, luid);
3668 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3669 luid);
3670
3671 if (current_sched_info->flags & DO_SPECULATION)
3672 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3673 luid);
3674
3675 for (i = cache_size; i < luid; i++)
3676 {
3677 bitmap_initialize (&true_dependency_cache[i], 0);
3678 bitmap_initialize (&output_dependency_cache[i], 0);
3679 bitmap_initialize (&anti_dependency_cache[i], 0);
3680
3681 if (current_sched_info->flags & DO_SPECULATION)
3682 bitmap_initialize (&spec_dependency_cache[i], 0);
3683 }
3684 cache_size = luid;
3685 }
3686 }
3687
3688 /* Finalize dependency information for the whole function. */
3689 void
3690 sched_deps_finish (void)
3691 {
3692 gcc_assert (deps_pools_are_empty_p ());
3693 free_alloc_pool_if_empty (&dn_pool);
3694 free_alloc_pool_if_empty (&dl_pool);
3695 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3696
3697 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3698 cache_size = 0;
3699
3700 if (true_dependency_cache)
3701 {
3702 int i;
3703
3704 for (i = 0; i < cache_size; i++)
3705 {
3706 bitmap_clear (&true_dependency_cache[i]);
3707 bitmap_clear (&output_dependency_cache[i]);
3708 bitmap_clear (&anti_dependency_cache[i]);
3709
3710 if (sched_deps_info->generate_spec_deps)
3711 bitmap_clear (&spec_dependency_cache[i]);
3712 }
3713 free (true_dependency_cache);
3714 true_dependency_cache = NULL;
3715 free (output_dependency_cache);
3716 output_dependency_cache = NULL;
3717 free (anti_dependency_cache);
3718 anti_dependency_cache = NULL;
3719
3720 if (sched_deps_info->generate_spec_deps)
3721 {
3722 free (spec_dependency_cache);
3723 spec_dependency_cache = NULL;
3724 }
3725
3726 }
3727 }
3728
3729 /* Initialize some global variables needed by the dependency analysis
3730 code. */
3731
3732 void
3733 init_deps_global (void)
3734 {
3735 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3736 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3737 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3738 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3739 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3740 reg_pending_barrier = NOT_A_BARRIER;
3741
3742 if (!sel_sched_p () || sched_emulate_haifa_p)
3743 {
3744 sched_deps_info->start_insn = haifa_start_insn;
3745 sched_deps_info->finish_insn = haifa_finish_insn;
3746
3747 sched_deps_info->note_reg_set = haifa_note_reg_set;
3748 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3749 sched_deps_info->note_reg_use = haifa_note_reg_use;
3750
3751 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3752 sched_deps_info->note_dep = haifa_note_dep;
3753 }
3754 }
3755
3756 /* Free everything used by the dependency analysis code. */
3757
3758 void
3759 finish_deps_global (void)
3760 {
3761 FREE_REG_SET (reg_pending_sets);
3762 FREE_REG_SET (reg_pending_clobbers);
3763 FREE_REG_SET (reg_pending_uses);
3764 }
3765
3766 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3767 dw_t
3768 estimate_dep_weak (rtx mem1, rtx mem2)
3769 {
3770 rtx r1, r2;
3771
3772 if (mem1 == mem2)
3773 /* MEMs are the same - don't speculate. */
3774 return MIN_DEP_WEAK;
3775
3776 r1 = XEXP (mem1, 0);
3777 r2 = XEXP (mem2, 0);
3778
3779 if (r1 == r2
3780 || (REG_P (r1) && REG_P (r2)
3781 && REGNO (r1) == REGNO (r2)))
3782 /* Again, MEMs are the same. */
3783 return MIN_DEP_WEAK;
3784 else if ((REG_P (r1) && !REG_P (r2))
3785 || (!REG_P (r1) && REG_P (r2)))
3786 /* Different addressing modes - reason to be more speculative,
3787 than usual. */
3788 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3789 else
3790 /* We can't say anything about the dependence. */
3791 return UNCERTAIN_DEP_WEAK;
3792 }
3793
3794 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3795 This function can handle same INSN and ELEM (INSN == ELEM).
3796 It is a convenience wrapper. */
3797 void
3798 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3799 {
3800 ds_t ds;
3801 bool internal;
3802
3803 if (dep_type == REG_DEP_TRUE)
3804 ds = DEP_TRUE;
3805 else if (dep_type == REG_DEP_OUTPUT)
3806 ds = DEP_OUTPUT;
3807 else
3808 {
3809 gcc_assert (dep_type == REG_DEP_ANTI);
3810 ds = DEP_ANTI;
3811 }
3812
3813 /* When add_dependence is called from inside sched-deps.c, we expect
3814 cur_insn to be non-null. */
3815 internal = cur_insn != NULL;
3816 if (internal)
3817 gcc_assert (insn == cur_insn);
3818 else
3819 cur_insn = insn;
3820
3821 note_dep (elem, ds);
3822 if (!internal)
3823 cur_insn = NULL;
3824 }
3825
3826 /* Return weakness of speculative type TYPE in the dep_status DS. */
3827 dw_t
3828 get_dep_weak_1 (ds_t ds, ds_t type)
3829 {
3830 ds = ds & type;
3831
3832 switch (type)
3833 {
3834 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3835 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3836 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3837 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3838 default: gcc_unreachable ();
3839 }
3840
3841 return (dw_t) ds;
3842 }
3843
3844 dw_t
3845 get_dep_weak (ds_t ds, ds_t type)
3846 {
3847 dw_t dw = get_dep_weak_1 (ds, type);
3848
3849 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3850 return dw;
3851 }
3852
3853 /* Return the dep_status, which has the same parameters as DS, except for
3854 speculative type TYPE, that will have weakness DW. */
3855 ds_t
3856 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3857 {
3858 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3859
3860 ds &= ~type;
3861 switch (type)
3862 {
3863 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3864 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3865 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3866 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3867 default: gcc_unreachable ();
3868 }
3869 return ds;
3870 }
3871
3872 /* Return the join of two dep_statuses DS1 and DS2.
3873 If MAX_P is true then choose the greater probability,
3874 otherwise multiply probabilities.
3875 This function assumes that both DS1 and DS2 contain speculative bits. */
3876 static ds_t
3877 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3878 {
3879 ds_t ds, t;
3880
3881 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3882
3883 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3884
3885 t = FIRST_SPEC_TYPE;
3886 do
3887 {
3888 if ((ds1 & t) && !(ds2 & t))
3889 ds |= ds1 & t;
3890 else if (!(ds1 & t) && (ds2 & t))
3891 ds |= ds2 & t;
3892 else if ((ds1 & t) && (ds2 & t))
3893 {
3894 dw_t dw1 = get_dep_weak (ds1, t);
3895 dw_t dw2 = get_dep_weak (ds2, t);
3896 ds_t dw;
3897
3898 if (!max_p)
3899 {
3900 dw = ((ds_t) dw1) * ((ds_t) dw2);
3901 dw /= MAX_DEP_WEAK;
3902 if (dw < MIN_DEP_WEAK)
3903 dw = MIN_DEP_WEAK;
3904 }
3905 else
3906 {
3907 if (dw1 >= dw2)
3908 dw = dw1;
3909 else
3910 dw = dw2;
3911 }
3912
3913 ds = set_dep_weak (ds, t, (dw_t) dw);
3914 }
3915
3916 if (t == LAST_SPEC_TYPE)
3917 break;
3918 t <<= SPEC_TYPE_SHIFT;
3919 }
3920 while (1);
3921
3922 return ds;
3923 }
3924
3925 /* Return the join of two dep_statuses DS1 and DS2.
3926 This function assumes that both DS1 and DS2 contain speculative bits. */
3927 ds_t
3928 ds_merge (ds_t ds1, ds_t ds2)
3929 {
3930 return ds_merge_1 (ds1, ds2, false);
3931 }
3932
3933 /* Return the join of two dep_statuses DS1 and DS2. */
3934 ds_t
3935 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3936 {
3937 ds_t new_status = ds | ds2;
3938
3939 if (new_status & SPECULATIVE)
3940 {
3941 if ((ds && !(ds & SPECULATIVE))
3942 || (ds2 && !(ds2 & SPECULATIVE)))
3943 /* Then this dep can't be speculative. */
3944 new_status &= ~SPECULATIVE;
3945 else
3946 {
3947 /* Both are speculative. Merging probabilities. */
3948 if (mem1)
3949 {
3950 dw_t dw;
3951
3952 dw = estimate_dep_weak (mem1, mem2);
3953 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3954 }
3955
3956 if (!ds)
3957 new_status = ds2;
3958 else if (!ds2)
3959 new_status = ds;
3960 else
3961 new_status = ds_merge (ds2, ds);
3962 }
3963 }
3964
3965 return new_status;
3966 }
3967
3968 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3969 probabilities. */
3970 ds_t
3971 ds_max_merge (ds_t ds1, ds_t ds2)
3972 {
3973 if (ds1 == 0 && ds2 == 0)
3974 return 0;
3975
3976 if (ds1 == 0 && ds2 != 0)
3977 return ds2;
3978
3979 if (ds1 != 0 && ds2 == 0)
3980 return ds1;
3981
3982 return ds_merge_1 (ds1, ds2, true);
3983 }
3984
3985 /* Return the probability of speculation success for the speculation
3986 status DS. */
3987 dw_t
3988 ds_weak (ds_t ds)
3989 {
3990 ds_t res = 1, dt;
3991 int n = 0;
3992
3993 dt = FIRST_SPEC_TYPE;
3994 do
3995 {
3996 if (ds & dt)
3997 {
3998 res *= (ds_t) get_dep_weak (ds, dt);
3999 n++;
4000 }
4001
4002 if (dt == LAST_SPEC_TYPE)
4003 break;
4004 dt <<= SPEC_TYPE_SHIFT;
4005 }
4006 while (1);
4007
4008 gcc_assert (n);
4009 while (--n)
4010 res /= MAX_DEP_WEAK;
4011
4012 if (res < MIN_DEP_WEAK)
4013 res = MIN_DEP_WEAK;
4014
4015 gcc_assert (res <= MAX_DEP_WEAK);
4016
4017 return (dw_t) res;
4018 }
4019
4020 /* Return a dep status that contains all speculation types of DS. */
4021 ds_t
4022 ds_get_speculation_types (ds_t ds)
4023 {
4024 if (ds & BEGIN_DATA)
4025 ds |= BEGIN_DATA;
4026 if (ds & BE_IN_DATA)
4027 ds |= BE_IN_DATA;
4028 if (ds & BEGIN_CONTROL)
4029 ds |= BEGIN_CONTROL;
4030 if (ds & BE_IN_CONTROL)
4031 ds |= BE_IN_CONTROL;
4032
4033 return ds & SPECULATIVE;
4034 }
4035
4036 /* Return a dep status that contains maximal weakness for each speculation
4037 type present in DS. */
4038 ds_t
4039 ds_get_max_dep_weak (ds_t ds)
4040 {
4041 if (ds & BEGIN_DATA)
4042 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4043 if (ds & BE_IN_DATA)
4044 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4045 if (ds & BEGIN_CONTROL)
4046 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4047 if (ds & BE_IN_CONTROL)
4048 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4049
4050 return ds;
4051 }
4052
4053 /* Dump information about the dependence status S. */
4054 static void
4055 dump_ds (FILE *f, ds_t s)
4056 {
4057 fprintf (f, "{");
4058
4059 if (s & BEGIN_DATA)
4060 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4061 if (s & BE_IN_DATA)
4062 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4063 if (s & BEGIN_CONTROL)
4064 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4065 if (s & BE_IN_CONTROL)
4066 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4067
4068 if (s & HARD_DEP)
4069 fprintf (f, "HARD_DEP; ");
4070
4071 if (s & DEP_TRUE)
4072 fprintf (f, "DEP_TRUE; ");
4073 if (s & DEP_ANTI)
4074 fprintf (f, "DEP_ANTI; ");
4075 if (s & DEP_OUTPUT)
4076 fprintf (f, "DEP_OUTPUT; ");
4077
4078 fprintf (f, "}");
4079 }
4080
4081 DEBUG_FUNCTION void
4082 debug_ds (ds_t s)
4083 {
4084 dump_ds (stderr, s);
4085 fprintf (stderr, "\n");
4086 }
4087
4088 #ifdef ENABLE_CHECKING
4089 /* Verify that dependence type and status are consistent.
4090 If RELAXED_P is true, then skip dep_weakness checks. */
4091 static void
4092 check_dep (dep_t dep, bool relaxed_p)
4093 {
4094 enum reg_note dt = DEP_TYPE (dep);
4095 ds_t ds = DEP_STATUS (dep);
4096
4097 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4098
4099 if (!(current_sched_info->flags & USE_DEPS_LIST))
4100 {
4101 gcc_assert (ds == -1);
4102 return;
4103 }
4104
4105 /* Check that dependence type contains the same bits as the status. */
4106 if (dt == REG_DEP_TRUE)
4107 gcc_assert (ds & DEP_TRUE);
4108 else if (dt == REG_DEP_OUTPUT)
4109 gcc_assert ((ds & DEP_OUTPUT)
4110 && !(ds & DEP_TRUE));
4111 else
4112 gcc_assert ((dt == REG_DEP_ANTI)
4113 && (ds & DEP_ANTI)
4114 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4115
4116 /* HARD_DEP can not appear in dep_status of a link. */
4117 gcc_assert (!(ds & HARD_DEP));
4118
4119 /* Check that dependence status is set correctly when speculation is not
4120 supported. */
4121 if (!sched_deps_info->generate_spec_deps)
4122 gcc_assert (!(ds & SPECULATIVE));
4123 else if (ds & SPECULATIVE)
4124 {
4125 if (!relaxed_p)
4126 {
4127 ds_t type = FIRST_SPEC_TYPE;
4128
4129 /* Check that dependence weakness is in proper range. */
4130 do
4131 {
4132 if (ds & type)
4133 get_dep_weak (ds, type);
4134
4135 if (type == LAST_SPEC_TYPE)
4136 break;
4137 type <<= SPEC_TYPE_SHIFT;
4138 }
4139 while (1);
4140 }
4141
4142 if (ds & BEGIN_SPEC)
4143 {
4144 /* Only true dependence can be data speculative. */
4145 if (ds & BEGIN_DATA)
4146 gcc_assert (ds & DEP_TRUE);
4147
4148 /* Control dependencies in the insn scheduler are represented by
4149 anti-dependencies, therefore only anti dependence can be
4150 control speculative. */
4151 if (ds & BEGIN_CONTROL)
4152 gcc_assert (ds & DEP_ANTI);
4153 }
4154 else
4155 {
4156 /* Subsequent speculations should resolve true dependencies. */
4157 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4158 }
4159
4160 /* Check that true and anti dependencies can't have other speculative
4161 statuses. */
4162 if (ds & DEP_TRUE)
4163 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4164 /* An output dependence can't be speculative at all. */
4165 gcc_assert (!(ds & DEP_OUTPUT));
4166 if (ds & DEP_ANTI)
4167 gcc_assert (ds & BEGIN_CONTROL);
4168 }
4169 }
4170 #endif /* ENABLE_CHECKING */
4171
4172 #endif /* INSN_SCHEDULING */