817d8f966f983efaac4023c7e703f0ea78a8d3b3
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
45
46 #ifdef INSN_SCHEDULING
47
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53
54 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
55 added to the list because of flush_pending_lists, stands just
56 for itself and not for any other pending memory reads/writes. */
57 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
58 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
59
60 /* Holds current parameters for the dependency analyzer. */
61 struct sched_deps_info_def *sched_deps_info;
62
63 /* The data is specific to the Haifa scheduler. */
64 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
65
66 /* Return the major type present in the DS. */
67 enum reg_note
68 ds_to_dk (ds_t ds)
69 {
70 if (ds & DEP_TRUE)
71 return REG_DEP_TRUE;
72
73 if (ds & DEP_OUTPUT)
74 return REG_DEP_OUTPUT;
75
76 gcc_assert (ds & DEP_ANTI);
77
78 return REG_DEP_ANTI;
79 }
80
81 /* Return equivalent dep_status. */
82 ds_t
83 dk_to_ds (enum reg_note dk)
84 {
85 switch (dk)
86 {
87 case REG_DEP_TRUE:
88 return DEP_TRUE;
89
90 case REG_DEP_OUTPUT:
91 return DEP_OUTPUT;
92
93 default:
94 gcc_assert (dk == REG_DEP_ANTI);
95 return DEP_ANTI;
96 }
97 }
98
99 /* Functions to operate with dependence information container - dep_t. */
100
101 /* Init DEP with the arguments. */
102 void
103 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
104 {
105 DEP_PRO (dep) = pro;
106 DEP_CON (dep) = con;
107 DEP_TYPE (dep) = type;
108 DEP_STATUS (dep) = ds;
109 }
110
111 /* Init DEP with the arguments.
112 While most of the scheduler (including targets) only need the major type
113 of the dependency, it is convenient to hide full dep_status from them. */
114 void
115 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
116 {
117 ds_t ds;
118
119 if ((current_sched_info->flags & USE_DEPS_LIST))
120 ds = dk_to_ds (kind);
121 else
122 ds = -1;
123
124 init_dep_1 (dep, pro, con, kind, ds);
125 }
126
127 /* Make a copy of FROM in TO. */
128 static void
129 copy_dep (dep_t to, dep_t from)
130 {
131 memcpy (to, from, sizeof (*to));
132 }
133
134 static void dump_ds (FILE *, ds_t);
135
136 /* Define flags for dump_dep (). */
137
138 /* Dump producer of the dependence. */
139 #define DUMP_DEP_PRO (2)
140
141 /* Dump consumer of the dependence. */
142 #define DUMP_DEP_CON (4)
143
144 /* Dump type of the dependence. */
145 #define DUMP_DEP_TYPE (8)
146
147 /* Dump status of the dependence. */
148 #define DUMP_DEP_STATUS (16)
149
150 /* Dump all information about the dependence. */
151 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
152 |DUMP_DEP_STATUS)
153
154 /* Dump DEP to DUMP.
155 FLAGS is a bit mask specifying what information about DEP needs
156 to be printed.
157 If FLAGS has the very first bit set, then dump all information about DEP
158 and propagate this bit into the callee dump functions. */
159 static void
160 dump_dep (FILE *dump, dep_t dep, int flags)
161 {
162 if (flags & 1)
163 flags |= DUMP_DEP_ALL;
164
165 fprintf (dump, "<");
166
167 if (flags & DUMP_DEP_PRO)
168 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
169
170 if (flags & DUMP_DEP_CON)
171 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
172
173 if (flags & DUMP_DEP_TYPE)
174 {
175 char t;
176 enum reg_note type = DEP_TYPE (dep);
177
178 switch (type)
179 {
180 case REG_DEP_TRUE:
181 t = 't';
182 break;
183
184 case REG_DEP_OUTPUT:
185 t = 'o';
186 break;
187
188 case REG_DEP_ANTI:
189 t = 'a';
190 break;
191
192 default:
193 gcc_unreachable ();
194 break;
195 }
196
197 fprintf (dump, "%c; ", t);
198 }
199
200 if (flags & DUMP_DEP_STATUS)
201 {
202 if (current_sched_info->flags & USE_DEPS_LIST)
203 dump_ds (dump, DEP_STATUS (dep));
204 }
205
206 fprintf (dump, ">");
207 }
208
209 /* Default flags for dump_dep (). */
210 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
211
212 /* Dump all fields of DEP to STDERR. */
213 void
214 sd_debug_dep (dep_t dep)
215 {
216 dump_dep (stderr, dep, 1);
217 fprintf (stderr, "\n");
218 }
219
220 /* Determine whether DEP is a dependency link of a non-debug insn on a
221 debug insn. */
222
223 static inline bool
224 depl_on_debug_p (dep_link_t dep)
225 {
226 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 }
229
230 /* Functions to operate with a single link from the dependencies lists -
231 dep_link_t. */
232
233 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
234 PREV_NEXT_P. */
235 static void
236 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
237 {
238 dep_link_t next = *prev_nextp;
239
240 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 && DEP_LINK_NEXT (l) == NULL);
242
243 /* Init node being inserted. */
244 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245 DEP_LINK_NEXT (l) = next;
246
247 /* Fix next node. */
248 if (next != NULL)
249 {
250 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
251
252 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 }
254
255 /* Fix prev node. */
256 *prev_nextp = l;
257 }
258
259 /* Add dep_link LINK to deps_list L. */
260 static void
261 add_to_deps_list (dep_link_t link, deps_list_t l)
262 {
263 attach_dep_link (link, &DEPS_LIST_FIRST (l));
264
265 /* Don't count debug deps. */
266 if (!depl_on_debug_p (link))
267 ++DEPS_LIST_N_LINKS (l);
268 }
269
270 /* Detach dep_link L from the list. */
271 static void
272 detach_dep_link (dep_link_t l)
273 {
274 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275 dep_link_t next = DEP_LINK_NEXT (l);
276
277 *prev_nextp = next;
278
279 if (next != NULL)
280 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
281
282 DEP_LINK_PREV_NEXTP (l) = NULL;
283 DEP_LINK_NEXT (l) = NULL;
284 }
285
286 /* Remove link LINK from list LIST. */
287 static void
288 remove_from_deps_list (dep_link_t link, deps_list_t list)
289 {
290 detach_dep_link (link);
291
292 /* Don't count debug deps. */
293 if (!depl_on_debug_p (link))
294 --DEPS_LIST_N_LINKS (list);
295 }
296
297 /* Move link LINK from list FROM to list TO. */
298 static void
299 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
300 {
301 remove_from_deps_list (link, from);
302 add_to_deps_list (link, to);
303 }
304
305 /* Return true of LINK is not attached to any list. */
306 static bool
307 dep_link_is_detached_p (dep_link_t link)
308 {
309 return DEP_LINK_PREV_NEXTP (link) == NULL;
310 }
311
312 /* Pool to hold all dependency nodes (dep_node_t). */
313 static alloc_pool dn_pool;
314
315 /* Number of dep_nodes out there. */
316 static int dn_pool_diff = 0;
317
318 /* Create a dep_node. */
319 static dep_node_t
320 create_dep_node (void)
321 {
322 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
323 dep_link_t back = DEP_NODE_BACK (n);
324 dep_link_t forw = DEP_NODE_FORW (n);
325
326 DEP_LINK_NODE (back) = n;
327 DEP_LINK_NEXT (back) = NULL;
328 DEP_LINK_PREV_NEXTP (back) = NULL;
329
330 DEP_LINK_NODE (forw) = n;
331 DEP_LINK_NEXT (forw) = NULL;
332 DEP_LINK_PREV_NEXTP (forw) = NULL;
333
334 ++dn_pool_diff;
335
336 return n;
337 }
338
339 /* Delete dep_node N. N must not be connected to any deps_list. */
340 static void
341 delete_dep_node (dep_node_t n)
342 {
343 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
345
346 --dn_pool_diff;
347
348 pool_free (dn_pool, n);
349 }
350
351 /* Pool to hold dependencies lists (deps_list_t). */
352 static alloc_pool dl_pool;
353
354 /* Number of deps_lists out there. */
355 static int dl_pool_diff = 0;
356
357 /* Functions to operate with dependences lists - deps_list_t. */
358
359 /* Return true if list L is empty. */
360 static bool
361 deps_list_empty_p (deps_list_t l)
362 {
363 return DEPS_LIST_N_LINKS (l) == 0;
364 }
365
366 /* Create a new deps_list. */
367 static deps_list_t
368 create_deps_list (void)
369 {
370 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
371
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
374
375 ++dl_pool_diff;
376 return l;
377 }
378
379 /* Free deps_list L. */
380 static void
381 free_deps_list (deps_list_t l)
382 {
383 gcc_assert (deps_list_empty_p (l));
384
385 --dl_pool_diff;
386
387 pool_free (dl_pool, l);
388 }
389
390 /* Return true if there is no dep_nodes and deps_lists out there.
391 After the region is scheduled all the dependency nodes and lists
392 should [generally] be returned to pool. */
393 bool
394 deps_pools_are_empty_p (void)
395 {
396 return dn_pool_diff == 0 && dl_pool_diff == 0;
397 }
398
399 /* Remove all elements from L. */
400 static void
401 clear_deps_list (deps_list_t l)
402 {
403 do
404 {
405 dep_link_t link = DEPS_LIST_FIRST (l);
406
407 if (link == NULL)
408 break;
409
410 remove_from_deps_list (link, l);
411 }
412 while (1);
413 }
414
415 static regset reg_pending_sets;
416 static regset reg_pending_clobbers;
417 static regset reg_pending_uses;
418 static enum reg_pending_barrier_mode reg_pending_barrier;
419
420 /* Hard registers implicitly clobbered or used (or may be implicitly
421 clobbered or used) by the currently analyzed insn. For example,
422 insn in its constraint has one register class. Even if there is
423 currently no hard register in the insn, the particular hard
424 register will be in the insn after reload pass because the
425 constraint requires it. */
426 static HARD_REG_SET implicit_reg_pending_clobbers;
427 static HARD_REG_SET implicit_reg_pending_uses;
428
429 /* To speed up the test for duplicate dependency links we keep a
430 record of dependencies created by add_dependence when the average
431 number of instructions in a basic block is very large.
432
433 Studies have shown that there is typically around 5 instructions between
434 branches for typical C code. So we can make a guess that the average
435 basic block is approximately 5 instructions long; we will choose 100X
436 the average size as a very large basic block.
437
438 Each insn has associated bitmaps for its dependencies. Each bitmap
439 has enough entries to represent a dependency on any other insn in
440 the insn chain. All bitmap for true dependencies cache is
441 allocated then the rest two ones are also allocated. */
442 static bitmap_head *true_dependency_cache = NULL;
443 static bitmap_head *output_dependency_cache = NULL;
444 static bitmap_head *anti_dependency_cache = NULL;
445 static bitmap_head *spec_dependency_cache = NULL;
446 static int cache_size;
447
448 static int deps_may_trap_p (const_rtx);
449 static void add_dependence_list (rtx, rtx, int, enum reg_note);
450 static void add_dependence_list_and_free (struct deps_desc *, rtx,
451 rtx *, int, enum reg_note);
452 static void delete_all_dependences (rtx);
453 static void fixup_sched_groups (rtx);
454
455 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
456 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
457 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
458 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
459
460 static bool sched_has_condition_p (const_rtx);
461 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
462
463 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
464 rtx, rtx);
465 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
466
467 #ifdef ENABLE_CHECKING
468 static void check_dep (dep_t, bool);
469 #endif
470 \f
471 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
472
473 static int
474 deps_may_trap_p (const_rtx mem)
475 {
476 const_rtx addr = XEXP (mem, 0);
477
478 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
479 {
480 const_rtx t = get_reg_known_value (REGNO (addr));
481 if (t)
482 addr = t;
483 }
484 return rtx_addr_can_trap_p (addr);
485 }
486 \f
487
488 /* Find the condition under which INSN is executed. If REV is not NULL,
489 it is set to TRUE when the returned comparison should be reversed
490 to get the actual condition. */
491 static rtx
492 sched_get_condition_with_rev (const_rtx insn, bool *rev)
493 {
494 rtx pat = PATTERN (insn);
495 rtx src;
496
497 if (pat == 0)
498 return 0;
499
500 if (rev)
501 *rev = false;
502
503 if (GET_CODE (pat) == COND_EXEC)
504 return COND_EXEC_TEST (pat);
505
506 if (!any_condjump_p (insn) || !onlyjump_p (insn))
507 return 0;
508
509 src = SET_SRC (pc_set (insn));
510
511 if (XEXP (src, 2) == pc_rtx)
512 return XEXP (src, 0);
513 else if (XEXP (src, 1) == pc_rtx)
514 {
515 rtx cond = XEXP (src, 0);
516 enum rtx_code revcode = reversed_comparison_code (cond, insn);
517
518 if (revcode == UNKNOWN)
519 return 0;
520
521 if (rev)
522 *rev = true;
523 return cond;
524 }
525
526 return 0;
527 }
528
529 /* True when we can find a condition under which INSN is executed. */
530 static bool
531 sched_has_condition_p (const_rtx insn)
532 {
533 return !! sched_get_condition_with_rev (insn, NULL);
534 }
535
536 \f
537
538 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
539 static int
540 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
541 {
542 if (COMPARISON_P (cond1)
543 && COMPARISON_P (cond2)
544 && GET_CODE (cond1) ==
545 (rev1==rev2
546 ? reversed_comparison_code (cond2, NULL)
547 : GET_CODE (cond2))
548 && XEXP (cond1, 0) == XEXP (cond2, 0)
549 && XEXP (cond1, 1) == XEXP (cond2, 1))
550 return 1;
551 return 0;
552 }
553
554 /* Return true if insn1 and insn2 can never depend on one another because
555 the conditions under which they are executed are mutually exclusive. */
556 bool
557 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
558 {
559 rtx cond1, cond2;
560 bool rev1 = false, rev2 = false;
561
562 /* df doesn't handle conditional lifetimes entirely correctly;
563 calls mess up the conditional lifetimes. */
564 if (!CALL_P (insn1) && !CALL_P (insn2))
565 {
566 cond1 = sched_get_condition_with_rev (insn1, &rev1);
567 cond2 = sched_get_condition_with_rev (insn2, &rev2);
568 if (cond1 && cond2
569 && conditions_mutex_p (cond1, cond2, rev1, rev2)
570 /* Make sure first instruction doesn't affect condition of second
571 instruction if switched. */
572 && !modified_in_p (cond1, insn2)
573 /* Make sure second instruction doesn't affect condition of first
574 instruction if switched. */
575 && !modified_in_p (cond2, insn1))
576 return true;
577 }
578 return false;
579 }
580 \f
581
582 /* Return true if INSN can potentially be speculated with type DS. */
583 bool
584 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
585 {
586 if (HAS_INTERNAL_DEP (insn))
587 return false;
588
589 if (!NONJUMP_INSN_P (insn))
590 return false;
591
592 if (SCHED_GROUP_P (insn))
593 return false;
594
595 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
596 return false;
597
598 if (side_effects_p (PATTERN (insn)))
599 return false;
600
601 if (ds & BE_IN_SPEC)
602 /* The following instructions, which depend on a speculatively scheduled
603 instruction, cannot be speculatively scheduled along. */
604 {
605 if (may_trap_or_fault_p (PATTERN (insn)))
606 /* If instruction might fault, it cannot be speculatively scheduled.
607 For control speculation it's obvious why and for data speculation
608 it's because the insn might get wrong input if speculation
609 wasn't successful. */
610 return false;
611
612 if ((ds & BE_IN_DATA)
613 && sched_has_condition_p (insn))
614 /* If this is a predicated instruction, then it cannot be
615 speculatively scheduled. See PR35659. */
616 return false;
617 }
618
619 return true;
620 }
621
622 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
623 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
624 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
625 This function is used to switch sd_iterator to the next list.
626 !!! For internal use only. Might consider moving it to sched-int.h. */
627 void
628 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
629 deps_list_t *list_ptr, bool *resolved_p_ptr)
630 {
631 sd_list_types_def types = *types_ptr;
632
633 if (types & SD_LIST_HARD_BACK)
634 {
635 *list_ptr = INSN_HARD_BACK_DEPS (insn);
636 *resolved_p_ptr = false;
637 *types_ptr = types & ~SD_LIST_HARD_BACK;
638 }
639 else if (types & SD_LIST_SPEC_BACK)
640 {
641 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
642 *resolved_p_ptr = false;
643 *types_ptr = types & ~SD_LIST_SPEC_BACK;
644 }
645 else if (types & SD_LIST_FORW)
646 {
647 *list_ptr = INSN_FORW_DEPS (insn);
648 *resolved_p_ptr = false;
649 *types_ptr = types & ~SD_LIST_FORW;
650 }
651 else if (types & SD_LIST_RES_BACK)
652 {
653 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
654 *resolved_p_ptr = true;
655 *types_ptr = types & ~SD_LIST_RES_BACK;
656 }
657 else if (types & SD_LIST_RES_FORW)
658 {
659 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
660 *resolved_p_ptr = true;
661 *types_ptr = types & ~SD_LIST_RES_FORW;
662 }
663 else
664 {
665 *list_ptr = NULL;
666 *resolved_p_ptr = false;
667 *types_ptr = SD_LIST_NONE;
668 }
669 }
670
671 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
672 int
673 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
674 {
675 int size = 0;
676
677 while (list_types != SD_LIST_NONE)
678 {
679 deps_list_t list;
680 bool resolved_p;
681
682 sd_next_list (insn, &list_types, &list, &resolved_p);
683 if (list)
684 size += DEPS_LIST_N_LINKS (list);
685 }
686
687 return size;
688 }
689
690 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
691
692 bool
693 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
694 {
695 while (list_types != SD_LIST_NONE)
696 {
697 deps_list_t list;
698 bool resolved_p;
699
700 sd_next_list (insn, &list_types, &list, &resolved_p);
701 if (!deps_list_empty_p (list))
702 return false;
703 }
704
705 return true;
706 }
707
708 /* Initialize data for INSN. */
709 void
710 sd_init_insn (rtx insn)
711 {
712 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
713 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
714 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
715 INSN_FORW_DEPS (insn) = create_deps_list ();
716 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
717
718 if (DEBUG_INSN_P (insn))
719 DEBUG_INSN_SCHED_P (insn) = TRUE;
720
721 /* ??? It would be nice to allocate dependency caches here. */
722 }
723
724 /* Free data for INSN. */
725 void
726 sd_finish_insn (rtx insn)
727 {
728 /* ??? It would be nice to deallocate dependency caches here. */
729
730 if (DEBUG_INSN_P (insn))
731 {
732 gcc_assert (DEBUG_INSN_SCHED_P (insn));
733 DEBUG_INSN_SCHED_P (insn) = FALSE;
734 }
735
736 free_deps_list (INSN_HARD_BACK_DEPS (insn));
737 INSN_HARD_BACK_DEPS (insn) = NULL;
738
739 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
740 INSN_SPEC_BACK_DEPS (insn) = NULL;
741
742 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
743 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
744
745 free_deps_list (INSN_FORW_DEPS (insn));
746 INSN_FORW_DEPS (insn) = NULL;
747
748 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
749 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
750 }
751
752 /* Find a dependency between producer PRO and consumer CON.
753 Search through resolved dependency lists if RESOLVED_P is true.
754 If no such dependency is found return NULL,
755 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
756 with an iterator pointing to it. */
757 static dep_t
758 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
759 sd_iterator_def *sd_it_ptr)
760 {
761 sd_list_types_def pro_list_type;
762 sd_list_types_def con_list_type;
763 sd_iterator_def sd_it;
764 dep_t dep;
765 bool found_p = false;
766
767 if (resolved_p)
768 {
769 pro_list_type = SD_LIST_RES_FORW;
770 con_list_type = SD_LIST_RES_BACK;
771 }
772 else
773 {
774 pro_list_type = SD_LIST_FORW;
775 con_list_type = SD_LIST_BACK;
776 }
777
778 /* Walk through either back list of INSN or forw list of ELEM
779 depending on which one is shorter. */
780 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
781 {
782 /* Find the dep_link with producer PRO in consumer's back_deps. */
783 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
784 if (DEP_PRO (dep) == pro)
785 {
786 found_p = true;
787 break;
788 }
789 }
790 else
791 {
792 /* Find the dep_link with consumer CON in producer's forw_deps. */
793 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
794 if (DEP_CON (dep) == con)
795 {
796 found_p = true;
797 break;
798 }
799 }
800
801 if (found_p)
802 {
803 if (sd_it_ptr != NULL)
804 *sd_it_ptr = sd_it;
805
806 return dep;
807 }
808
809 return NULL;
810 }
811
812 /* Find a dependency between producer PRO and consumer CON.
813 Use dependency [if available] to check if dependency is present at all.
814 Search through resolved dependency lists if RESOLVED_P is true.
815 If the dependency or NULL if none found. */
816 dep_t
817 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
818 {
819 if (true_dependency_cache != NULL)
820 /* Avoiding the list walk below can cut compile times dramatically
821 for some code. */
822 {
823 int elem_luid = INSN_LUID (pro);
824 int insn_luid = INSN_LUID (con);
825
826 gcc_assert (output_dependency_cache != NULL
827 && anti_dependency_cache != NULL);
828
829 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
830 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
831 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
832 return NULL;
833 }
834
835 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
836 }
837
838 /* Add or update a dependence described by DEP.
839 MEM1 and MEM2, if non-null, correspond to memory locations in case of
840 data speculation.
841
842 The function returns a value indicating if an old entry has been changed
843 or a new entry has been added to insn's backward deps.
844
845 This function merely checks if producer and consumer is the same insn
846 and doesn't create a dep in this case. Actual manipulation of
847 dependence data structures is performed in add_or_update_dep_1. */
848 static enum DEPS_ADJUST_RESULT
849 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
850 {
851 rtx elem = DEP_PRO (dep);
852 rtx insn = DEP_CON (dep);
853
854 gcc_assert (INSN_P (insn) && INSN_P (elem));
855
856 /* Don't depend an insn on itself. */
857 if (insn == elem)
858 {
859 if (sched_deps_info->generate_spec_deps)
860 /* INSN has an internal dependence, which we can't overcome. */
861 HAS_INTERNAL_DEP (insn) = 1;
862
863 return DEP_NODEP;
864 }
865
866 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
867 }
868
869 /* Ask dependency caches what needs to be done for dependence DEP.
870 Return DEP_CREATED if new dependence should be created and there is no
871 need to try to find one searching the dependencies lists.
872 Return DEP_PRESENT if there already is a dependence described by DEP and
873 hence nothing is to be done.
874 Return DEP_CHANGED if there already is a dependence, but it should be
875 updated to incorporate additional information from DEP. */
876 static enum DEPS_ADJUST_RESULT
877 ask_dependency_caches (dep_t dep)
878 {
879 int elem_luid = INSN_LUID (DEP_PRO (dep));
880 int insn_luid = INSN_LUID (DEP_CON (dep));
881
882 gcc_assert (true_dependency_cache != NULL
883 && output_dependency_cache != NULL
884 && anti_dependency_cache != NULL);
885
886 if (!(current_sched_info->flags & USE_DEPS_LIST))
887 {
888 enum reg_note present_dep_type;
889
890 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
891 present_dep_type = REG_DEP_TRUE;
892 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
893 present_dep_type = REG_DEP_OUTPUT;
894 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
895 present_dep_type = REG_DEP_ANTI;
896 else
897 /* There is no existing dep so it should be created. */
898 return DEP_CREATED;
899
900 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
901 /* DEP does not add anything to the existing dependence. */
902 return DEP_PRESENT;
903 }
904 else
905 {
906 ds_t present_dep_types = 0;
907
908 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
909 present_dep_types |= DEP_TRUE;
910 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
911 present_dep_types |= DEP_OUTPUT;
912 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
913 present_dep_types |= DEP_ANTI;
914
915 if (present_dep_types == 0)
916 /* There is no existing dep so it should be created. */
917 return DEP_CREATED;
918
919 if (!(current_sched_info->flags & DO_SPECULATION)
920 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
921 {
922 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
923 == present_dep_types)
924 /* DEP does not add anything to the existing dependence. */
925 return DEP_PRESENT;
926 }
927 else
928 {
929 /* Only true dependencies can be data speculative and
930 only anti dependencies can be control speculative. */
931 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
932 == present_dep_types);
933
934 /* if (DEP is SPECULATIVE) then
935 ..we should update DEP_STATUS
936 else
937 ..we should reset existing dep to non-speculative. */
938 }
939 }
940
941 return DEP_CHANGED;
942 }
943
944 /* Set dependency caches according to DEP. */
945 static void
946 set_dependency_caches (dep_t dep)
947 {
948 int elem_luid = INSN_LUID (DEP_PRO (dep));
949 int insn_luid = INSN_LUID (DEP_CON (dep));
950
951 if (!(current_sched_info->flags & USE_DEPS_LIST))
952 {
953 switch (DEP_TYPE (dep))
954 {
955 case REG_DEP_TRUE:
956 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
957 break;
958
959 case REG_DEP_OUTPUT:
960 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
961 break;
962
963 case REG_DEP_ANTI:
964 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
965 break;
966
967 default:
968 gcc_unreachable ();
969 }
970 }
971 else
972 {
973 ds_t ds = DEP_STATUS (dep);
974
975 if (ds & DEP_TRUE)
976 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
977 if (ds & DEP_OUTPUT)
978 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
979 if (ds & DEP_ANTI)
980 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
981
982 if (ds & SPECULATIVE)
983 {
984 gcc_assert (current_sched_info->flags & DO_SPECULATION);
985 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
986 }
987 }
988 }
989
990 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
991 caches accordingly. */
992 static void
993 update_dependency_caches (dep_t dep, enum reg_note old_type)
994 {
995 int elem_luid = INSN_LUID (DEP_PRO (dep));
996 int insn_luid = INSN_LUID (DEP_CON (dep));
997
998 /* Clear corresponding cache entry because type of the link
999 may have changed. Keep them if we use_deps_list. */
1000 if (!(current_sched_info->flags & USE_DEPS_LIST))
1001 {
1002 switch (old_type)
1003 {
1004 case REG_DEP_OUTPUT:
1005 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1006 break;
1007
1008 case REG_DEP_ANTI:
1009 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1010 break;
1011
1012 default:
1013 gcc_unreachable ();
1014 }
1015 }
1016
1017 set_dependency_caches (dep);
1018 }
1019
1020 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1021 static void
1022 change_spec_dep_to_hard (sd_iterator_def sd_it)
1023 {
1024 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1025 dep_link_t link = DEP_NODE_BACK (node);
1026 dep_t dep = DEP_NODE_DEP (node);
1027 rtx elem = DEP_PRO (dep);
1028 rtx insn = DEP_CON (dep);
1029
1030 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1031
1032 DEP_STATUS (dep) &= ~SPECULATIVE;
1033
1034 if (true_dependency_cache != NULL)
1035 /* Clear the cache entry. */
1036 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1037 INSN_LUID (elem));
1038 }
1039
1040 /* Update DEP to incorporate information from NEW_DEP.
1041 SD_IT points to DEP in case it should be moved to another list.
1042 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1043 data-speculative dependence should be updated. */
1044 static enum DEPS_ADJUST_RESULT
1045 update_dep (dep_t dep, dep_t new_dep,
1046 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1047 rtx mem1 ATTRIBUTE_UNUSED,
1048 rtx mem2 ATTRIBUTE_UNUSED)
1049 {
1050 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1051 enum reg_note old_type = DEP_TYPE (dep);
1052
1053 /* If this is a more restrictive type of dependence than the
1054 existing one, then change the existing dependence to this
1055 type. */
1056 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1057 {
1058 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1059 res = DEP_CHANGED;
1060 }
1061
1062 if (current_sched_info->flags & USE_DEPS_LIST)
1063 /* Update DEP_STATUS. */
1064 {
1065 ds_t dep_status = DEP_STATUS (dep);
1066 ds_t ds = DEP_STATUS (new_dep);
1067 ds_t new_status = ds | dep_status;
1068
1069 if (new_status & SPECULATIVE)
1070 /* Either existing dep or a dep we're adding or both are
1071 speculative. */
1072 {
1073 if (!(ds & SPECULATIVE)
1074 || !(dep_status & SPECULATIVE))
1075 /* The new dep can't be speculative. */
1076 {
1077 new_status &= ~SPECULATIVE;
1078
1079 if (dep_status & SPECULATIVE)
1080 /* The old dep was speculative, but now it
1081 isn't. */
1082 change_spec_dep_to_hard (sd_it);
1083 }
1084 else
1085 {
1086 /* Both are speculative. Merge probabilities. */
1087 if (mem1 != NULL)
1088 {
1089 dw_t dw;
1090
1091 dw = estimate_dep_weak (mem1, mem2);
1092 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1093 }
1094
1095 new_status = ds_merge (dep_status, ds);
1096 }
1097 }
1098
1099 ds = new_status;
1100
1101 if (dep_status != ds)
1102 {
1103 DEP_STATUS (dep) = ds;
1104 res = DEP_CHANGED;
1105 }
1106 }
1107
1108 if (true_dependency_cache != NULL
1109 && res == DEP_CHANGED)
1110 update_dependency_caches (dep, old_type);
1111
1112 return res;
1113 }
1114
1115 /* Add or update a dependence described by DEP.
1116 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1117 data speculation.
1118
1119 The function returns a value indicating if an old entry has been changed
1120 or a new entry has been added to insn's backward deps or nothing has
1121 been updated at all. */
1122 static enum DEPS_ADJUST_RESULT
1123 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1124 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1125 {
1126 bool maybe_present_p = true;
1127 bool present_p = false;
1128
1129 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1130 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1131
1132 #ifdef ENABLE_CHECKING
1133 check_dep (new_dep, mem1 != NULL);
1134 #endif
1135
1136 if (true_dependency_cache != NULL)
1137 {
1138 switch (ask_dependency_caches (new_dep))
1139 {
1140 case DEP_PRESENT:
1141 return DEP_PRESENT;
1142
1143 case DEP_CHANGED:
1144 maybe_present_p = true;
1145 present_p = true;
1146 break;
1147
1148 case DEP_CREATED:
1149 maybe_present_p = false;
1150 present_p = false;
1151 break;
1152
1153 default:
1154 gcc_unreachable ();
1155 break;
1156 }
1157 }
1158
1159 /* Check that we don't already have this dependence. */
1160 if (maybe_present_p)
1161 {
1162 dep_t present_dep;
1163 sd_iterator_def sd_it;
1164
1165 gcc_assert (true_dependency_cache == NULL || present_p);
1166
1167 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1168 DEP_CON (new_dep),
1169 resolved_p, &sd_it);
1170
1171 if (present_dep != NULL)
1172 /* We found an existing dependency between ELEM and INSN. */
1173 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1174 else
1175 /* We didn't find a dep, it shouldn't present in the cache. */
1176 gcc_assert (!present_p);
1177 }
1178
1179 /* Might want to check one level of transitivity to save conses.
1180 This check should be done in maybe_add_or_update_dep_1.
1181 Since we made it to add_or_update_dep_1, we must create
1182 (or update) a link. */
1183
1184 if (mem1 != NULL_RTX)
1185 {
1186 gcc_assert (sched_deps_info->generate_spec_deps);
1187 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1188 estimate_dep_weak (mem1, mem2));
1189 }
1190
1191 sd_add_dep (new_dep, resolved_p);
1192
1193 return DEP_CREATED;
1194 }
1195
1196 /* Initialize BACK_LIST_PTR with consumer's backward list and
1197 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1198 initialize with lists that hold resolved deps. */
1199 static void
1200 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1201 deps_list_t *back_list_ptr,
1202 deps_list_t *forw_list_ptr)
1203 {
1204 rtx con = DEP_CON (dep);
1205
1206 if (!resolved_p)
1207 {
1208 if ((current_sched_info->flags & DO_SPECULATION)
1209 && (DEP_STATUS (dep) & SPECULATIVE))
1210 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1211 else
1212 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1213
1214 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1215 }
1216 else
1217 {
1218 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1219 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1220 }
1221 }
1222
1223 /* Add dependence described by DEP.
1224 If RESOLVED_P is true treat the dependence as a resolved one. */
1225 void
1226 sd_add_dep (dep_t dep, bool resolved_p)
1227 {
1228 dep_node_t n = create_dep_node ();
1229 deps_list_t con_back_deps;
1230 deps_list_t pro_forw_deps;
1231 rtx elem = DEP_PRO (dep);
1232 rtx insn = DEP_CON (dep);
1233
1234 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1235
1236 if ((current_sched_info->flags & DO_SPECULATION)
1237 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1238 DEP_STATUS (dep) &= ~SPECULATIVE;
1239
1240 copy_dep (DEP_NODE_DEP (n), dep);
1241
1242 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1243
1244 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1245
1246 #ifdef ENABLE_CHECKING
1247 check_dep (dep, false);
1248 #endif
1249
1250 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1251
1252 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1253 in the bitmap caches of dependency information. */
1254 if (true_dependency_cache != NULL)
1255 set_dependency_caches (dep);
1256 }
1257
1258 /* Add or update backward dependence between INSN and ELEM
1259 with given type DEP_TYPE and dep_status DS.
1260 This function is a convenience wrapper. */
1261 enum DEPS_ADJUST_RESULT
1262 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1263 {
1264 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1265 }
1266
1267 /* Resolved dependence pointed to by SD_IT.
1268 SD_IT will advance to the next element. */
1269 void
1270 sd_resolve_dep (sd_iterator_def sd_it)
1271 {
1272 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1273 dep_t dep = DEP_NODE_DEP (node);
1274 rtx pro = DEP_PRO (dep);
1275 rtx con = DEP_CON (dep);
1276
1277 if ((current_sched_info->flags & DO_SPECULATION)
1278 && (DEP_STATUS (dep) & SPECULATIVE))
1279 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1280 INSN_RESOLVED_BACK_DEPS (con));
1281 else
1282 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1283 INSN_RESOLVED_BACK_DEPS (con));
1284
1285 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1286 INSN_RESOLVED_FORW_DEPS (pro));
1287 }
1288
1289 /* Make TO depend on all the FROM's producers.
1290 If RESOLVED_P is true add dependencies to the resolved lists. */
1291 void
1292 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1293 {
1294 sd_list_types_def list_type;
1295 sd_iterator_def sd_it;
1296 dep_t dep;
1297
1298 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1299
1300 FOR_EACH_DEP (from, list_type, sd_it, dep)
1301 {
1302 dep_def _new_dep, *new_dep = &_new_dep;
1303
1304 copy_dep (new_dep, dep);
1305 DEP_CON (new_dep) = to;
1306 sd_add_dep (new_dep, resolved_p);
1307 }
1308 }
1309
1310 /* Remove a dependency referred to by SD_IT.
1311 SD_IT will point to the next dependence after removal. */
1312 void
1313 sd_delete_dep (sd_iterator_def sd_it)
1314 {
1315 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1316 dep_t dep = DEP_NODE_DEP (n);
1317 rtx pro = DEP_PRO (dep);
1318 rtx con = DEP_CON (dep);
1319 deps_list_t con_back_deps;
1320 deps_list_t pro_forw_deps;
1321
1322 if (true_dependency_cache != NULL)
1323 {
1324 int elem_luid = INSN_LUID (pro);
1325 int insn_luid = INSN_LUID (con);
1326
1327 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1328 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1329 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1330
1331 if (current_sched_info->flags & DO_SPECULATION)
1332 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1333 }
1334
1335 get_back_and_forw_lists (dep, sd_it.resolved_p,
1336 &con_back_deps, &pro_forw_deps);
1337
1338 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1339 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1340
1341 delete_dep_node (n);
1342 }
1343
1344 /* Dump size of the lists. */
1345 #define DUMP_LISTS_SIZE (2)
1346
1347 /* Dump dependencies of the lists. */
1348 #define DUMP_LISTS_DEPS (4)
1349
1350 /* Dump all information about the lists. */
1351 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1352
1353 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1354 FLAGS is a bit mask specifying what information about the lists needs
1355 to be printed.
1356 If FLAGS has the very first bit set, then dump all information about
1357 the lists and propagate this bit into the callee dump functions. */
1358 static void
1359 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1360 {
1361 sd_iterator_def sd_it;
1362 dep_t dep;
1363 int all;
1364
1365 all = (flags & 1);
1366
1367 if (all)
1368 flags |= DUMP_LISTS_ALL;
1369
1370 fprintf (dump, "[");
1371
1372 if (flags & DUMP_LISTS_SIZE)
1373 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1374
1375 if (flags & DUMP_LISTS_DEPS)
1376 {
1377 FOR_EACH_DEP (insn, types, sd_it, dep)
1378 {
1379 dump_dep (dump, dep, dump_dep_flags | all);
1380 fprintf (dump, " ");
1381 }
1382 }
1383 }
1384
1385 /* Dump all information about deps_lists of INSN specified by TYPES
1386 to STDERR. */
1387 void
1388 sd_debug_lists (rtx insn, sd_list_types_def types)
1389 {
1390 dump_lists (stderr, insn, types, 1);
1391 fprintf (stderr, "\n");
1392 }
1393
1394 /* A convenience wrapper to operate on an entire list. */
1395
1396 static void
1397 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1398 {
1399 for (; list; list = XEXP (list, 1))
1400 {
1401 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1402 add_dependence (insn, XEXP (list, 0), dep_type);
1403 }
1404 }
1405
1406 /* Similar, but free *LISTP at the same time, when the context
1407 is not readonly. */
1408
1409 static void
1410 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1411 int uncond, enum reg_note dep_type)
1412 {
1413 rtx list, next;
1414
1415 /* We don't want to short-circuit dependencies involving debug
1416 insns, because they may cause actual dependencies to be
1417 disregarded. */
1418 if (deps->readonly || DEBUG_INSN_P (insn))
1419 {
1420 add_dependence_list (insn, *listp, uncond, dep_type);
1421 return;
1422 }
1423
1424 for (list = *listp, *listp = NULL; list ; list = next)
1425 {
1426 next = XEXP (list, 1);
1427 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1428 add_dependence (insn, XEXP (list, 0), dep_type);
1429 free_INSN_LIST_node (list);
1430 }
1431 }
1432
1433 /* Remove all occurences of INSN from LIST. Return the number of
1434 occurences removed. */
1435
1436 static int
1437 remove_from_dependence_list (rtx insn, rtx* listp)
1438 {
1439 int removed = 0;
1440
1441 while (*listp)
1442 {
1443 if (XEXP (*listp, 0) == insn)
1444 {
1445 remove_free_INSN_LIST_node (listp);
1446 removed++;
1447 continue;
1448 }
1449
1450 listp = &XEXP (*listp, 1);
1451 }
1452
1453 return removed;
1454 }
1455
1456 /* Same as above, but process two lists at once. */
1457 static int
1458 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1459 {
1460 int removed = 0;
1461
1462 while (*listp)
1463 {
1464 if (XEXP (*listp, 0) == insn)
1465 {
1466 remove_free_INSN_LIST_node (listp);
1467 remove_free_EXPR_LIST_node (exprp);
1468 removed++;
1469 continue;
1470 }
1471
1472 listp = &XEXP (*listp, 1);
1473 exprp = &XEXP (*exprp, 1);
1474 }
1475
1476 return removed;
1477 }
1478
1479 /* Clear all dependencies for an insn. */
1480 static void
1481 delete_all_dependences (rtx insn)
1482 {
1483 sd_iterator_def sd_it;
1484 dep_t dep;
1485
1486 /* The below cycle can be optimized to clear the caches and back_deps
1487 in one call but that would provoke duplication of code from
1488 delete_dep (). */
1489
1490 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1491 sd_iterator_cond (&sd_it, &dep);)
1492 sd_delete_dep (sd_it);
1493 }
1494
1495 /* All insns in a scheduling group except the first should only have
1496 dependencies on the previous insn in the group. So we find the
1497 first instruction in the scheduling group by walking the dependence
1498 chains backwards. Then we add the dependencies for the group to
1499 the previous nonnote insn. */
1500
1501 static void
1502 fixup_sched_groups (rtx insn)
1503 {
1504 sd_iterator_def sd_it;
1505 dep_t dep;
1506 rtx prev_nonnote;
1507
1508 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1509 {
1510 rtx i = insn;
1511 rtx pro = DEP_PRO (dep);
1512
1513 do
1514 {
1515 i = prev_nonnote_insn (i);
1516
1517 if (pro == i)
1518 goto next_link;
1519 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1520
1521 if (! sched_insns_conditions_mutex_p (i, pro))
1522 add_dependence (i, pro, DEP_TYPE (dep));
1523 next_link:;
1524 }
1525
1526 delete_all_dependences (insn);
1527
1528 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1529 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1530 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1531 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1532 }
1533 \f
1534 /* Process an insn's memory dependencies. There are four kinds of
1535 dependencies:
1536
1537 (0) read dependence: read follows read
1538 (1) true dependence: read follows write
1539 (2) output dependence: write follows write
1540 (3) anti dependence: write follows read
1541
1542 We are careful to build only dependencies which actually exist, and
1543 use transitivity to avoid building too many links. */
1544
1545 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1546 The MEM is a memory reference contained within INSN, which we are saving
1547 so that we can do memory aliasing on it. */
1548
1549 static void
1550 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1551 rtx insn, rtx mem)
1552 {
1553 rtx *insn_list;
1554 rtx *mem_list;
1555 rtx link;
1556
1557 gcc_assert (!deps->readonly);
1558 if (read_p)
1559 {
1560 insn_list = &deps->pending_read_insns;
1561 mem_list = &deps->pending_read_mems;
1562 if (!DEBUG_INSN_P (insn))
1563 deps->pending_read_list_length++;
1564 }
1565 else
1566 {
1567 insn_list = &deps->pending_write_insns;
1568 mem_list = &deps->pending_write_mems;
1569 deps->pending_write_list_length++;
1570 }
1571
1572 link = alloc_INSN_LIST (insn, *insn_list);
1573 *insn_list = link;
1574
1575 if (sched_deps_info->use_cselib)
1576 {
1577 mem = shallow_copy_rtx (mem);
1578 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1579 }
1580 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1581 *mem_list = link;
1582 }
1583
1584 /* Make a dependency between every memory reference on the pending lists
1585 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1586 dependencies for a read operation, similarly with FOR_WRITE. */
1587
1588 static void
1589 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1590 int for_write)
1591 {
1592 if (for_write)
1593 {
1594 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1595 1, REG_DEP_ANTI);
1596 if (!deps->readonly)
1597 {
1598 free_EXPR_LIST_list (&deps->pending_read_mems);
1599 deps->pending_read_list_length = 0;
1600 }
1601 }
1602
1603 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1604 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1605
1606 add_dependence_list_and_free (deps, insn,
1607 &deps->last_pending_memory_flush, 1,
1608 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1609 if (!deps->readonly)
1610 {
1611 free_EXPR_LIST_list (&deps->pending_write_mems);
1612 deps->pending_write_list_length = 0;
1613
1614 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1615 deps->pending_flush_length = 1;
1616 }
1617 }
1618 \f
1619 /* Instruction which dependencies we are analyzing. */
1620 static rtx cur_insn = NULL_RTX;
1621
1622 /* Implement hooks for haifa scheduler. */
1623
1624 static void
1625 haifa_start_insn (rtx insn)
1626 {
1627 gcc_assert (insn && !cur_insn);
1628
1629 cur_insn = insn;
1630 }
1631
1632 static void
1633 haifa_finish_insn (void)
1634 {
1635 cur_insn = NULL;
1636 }
1637
1638 void
1639 haifa_note_reg_set (int regno)
1640 {
1641 SET_REGNO_REG_SET (reg_pending_sets, regno);
1642 }
1643
1644 void
1645 haifa_note_reg_clobber (int regno)
1646 {
1647 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1648 }
1649
1650 void
1651 haifa_note_reg_use (int regno)
1652 {
1653 SET_REGNO_REG_SET (reg_pending_uses, regno);
1654 }
1655
1656 static void
1657 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1658 {
1659 if (!(ds & SPECULATIVE))
1660 {
1661 mem = NULL_RTX;
1662 pending_mem = NULL_RTX;
1663 }
1664 else
1665 gcc_assert (ds & BEGIN_DATA);
1666
1667 {
1668 dep_def _dep, *dep = &_dep;
1669
1670 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1671 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1672 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1673 }
1674
1675 }
1676
1677 static void
1678 haifa_note_dep (rtx elem, ds_t ds)
1679 {
1680 dep_def _dep;
1681 dep_t dep = &_dep;
1682
1683 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1684 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1685 }
1686
1687 static void
1688 note_reg_use (int r)
1689 {
1690 if (sched_deps_info->note_reg_use)
1691 sched_deps_info->note_reg_use (r);
1692 }
1693
1694 static void
1695 note_reg_set (int r)
1696 {
1697 if (sched_deps_info->note_reg_set)
1698 sched_deps_info->note_reg_set (r);
1699 }
1700
1701 static void
1702 note_reg_clobber (int r)
1703 {
1704 if (sched_deps_info->note_reg_clobber)
1705 sched_deps_info->note_reg_clobber (r);
1706 }
1707
1708 static void
1709 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1710 {
1711 if (sched_deps_info->note_mem_dep)
1712 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1713 }
1714
1715 static void
1716 note_dep (rtx e, ds_t ds)
1717 {
1718 if (sched_deps_info->note_dep)
1719 sched_deps_info->note_dep (e, ds);
1720 }
1721
1722 /* Return corresponding to DS reg_note. */
1723 enum reg_note
1724 ds_to_dt (ds_t ds)
1725 {
1726 if (ds & DEP_TRUE)
1727 return REG_DEP_TRUE;
1728 else if (ds & DEP_OUTPUT)
1729 return REG_DEP_OUTPUT;
1730 else
1731 {
1732 gcc_assert (ds & DEP_ANTI);
1733 return REG_DEP_ANTI;
1734 }
1735 }
1736
1737 \f
1738
1739 /* Functions for computation of info needed for register pressure
1740 sensitive insn scheduling. */
1741
1742
1743 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1744 static struct reg_use_data *
1745 create_insn_reg_use (int regno, rtx insn)
1746 {
1747 struct reg_use_data *use;
1748
1749 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1750 use->regno = regno;
1751 use->insn = insn;
1752 use->next_insn_use = INSN_REG_USE_LIST (insn);
1753 INSN_REG_USE_LIST (insn) = use;
1754 return use;
1755 }
1756
1757 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1758 static struct reg_set_data *
1759 create_insn_reg_set (int regno, rtx insn)
1760 {
1761 struct reg_set_data *set;
1762
1763 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1764 set->regno = regno;
1765 set->insn = insn;
1766 set->next_insn_set = INSN_REG_SET_LIST (insn);
1767 INSN_REG_SET_LIST (insn) = set;
1768 return set;
1769 }
1770
1771 /* Set up insn register uses for INSN and dependency context DEPS. */
1772 static void
1773 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1774 {
1775 unsigned i;
1776 reg_set_iterator rsi;
1777 rtx list;
1778 struct reg_use_data *use, *use2, *next;
1779 struct deps_reg *reg_last;
1780
1781 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1782 {
1783 if (i < FIRST_PSEUDO_REGISTER
1784 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1785 continue;
1786
1787 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1788 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1789 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1790 /* Ignore use which is not dying. */
1791 continue;
1792
1793 use = create_insn_reg_use (i, insn);
1794 use->next_regno_use = use;
1795 reg_last = &deps->reg_last[i];
1796
1797 /* Create the cycle list of uses. */
1798 for (list = reg_last->uses; list; list = XEXP (list, 1))
1799 {
1800 use2 = create_insn_reg_use (i, XEXP (list, 0));
1801 next = use->next_regno_use;
1802 use->next_regno_use = use2;
1803 use2->next_regno_use = next;
1804 }
1805 }
1806 }
1807
1808 /* Register pressure info for the currently processed insn. */
1809 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1810
1811 /* Return TRUE if INSN has the use structure for REGNO. */
1812 static bool
1813 insn_use_p (rtx insn, int regno)
1814 {
1815 struct reg_use_data *use;
1816
1817 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1818 if (use->regno == regno)
1819 return true;
1820 return false;
1821 }
1822
1823 /* Update the register pressure info after birth of pseudo register REGNO
1824 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1825 the register is in clobber or unused after the insn. */
1826 static void
1827 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1828 {
1829 int incr, new_incr;
1830 enum reg_class cl;
1831
1832 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1833 cl = sched_regno_cover_class[regno];
1834 if (cl != NO_REGS)
1835 {
1836 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1837 if (clobber_p)
1838 {
1839 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1840 reg_pressure_info[cl].clobber_increase = new_incr;
1841 }
1842 else if (unused_p)
1843 {
1844 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1845 reg_pressure_info[cl].unused_set_increase = new_incr;
1846 }
1847 else
1848 {
1849 new_incr = reg_pressure_info[cl].set_increase + incr;
1850 reg_pressure_info[cl].set_increase = new_incr;
1851 if (! insn_use_p (insn, regno))
1852 reg_pressure_info[cl].change += incr;
1853 create_insn_reg_set (regno, insn);
1854 }
1855 gcc_assert (new_incr < (1 << INCREASE_BITS));
1856 }
1857 }
1858
1859 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1860 hard registers involved in the birth. */
1861 static void
1862 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1863 bool clobber_p, bool unused_p)
1864 {
1865 enum reg_class cl;
1866 int new_incr, last = regno + nregs;
1867
1868 while (regno < last)
1869 {
1870 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1871 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1872 {
1873 cl = sched_regno_cover_class[regno];
1874 if (cl != NO_REGS)
1875 {
1876 if (clobber_p)
1877 {
1878 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1879 reg_pressure_info[cl].clobber_increase = new_incr;
1880 }
1881 else if (unused_p)
1882 {
1883 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1884 reg_pressure_info[cl].unused_set_increase = new_incr;
1885 }
1886 else
1887 {
1888 new_incr = reg_pressure_info[cl].set_increase + 1;
1889 reg_pressure_info[cl].set_increase = new_incr;
1890 if (! insn_use_p (insn, regno))
1891 reg_pressure_info[cl].change += 1;
1892 create_insn_reg_set (regno, insn);
1893 }
1894 gcc_assert (new_incr < (1 << INCREASE_BITS));
1895 }
1896 }
1897 regno++;
1898 }
1899 }
1900
1901 /* Update the register pressure info after birth of pseudo or hard
1902 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1903 correspondingly that the register is in clobber or unused after the
1904 insn. */
1905 static void
1906 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1907 {
1908 int regno;
1909
1910 if (GET_CODE (reg) == SUBREG)
1911 reg = SUBREG_REG (reg);
1912
1913 if (! REG_P (reg))
1914 return;
1915
1916 regno = REGNO (reg);
1917 if (regno < FIRST_PSEUDO_REGISTER)
1918 mark_insn_hard_regno_birth (insn, regno,
1919 hard_regno_nregs[regno][GET_MODE (reg)],
1920 clobber_p, unused_p);
1921 else
1922 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1923 }
1924
1925 /* Update the register pressure info after death of pseudo register
1926 REGNO. */
1927 static void
1928 mark_pseudo_death (int regno)
1929 {
1930 int incr;
1931 enum reg_class cl;
1932
1933 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1934 cl = sched_regno_cover_class[regno];
1935 if (cl != NO_REGS)
1936 {
1937 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1938 reg_pressure_info[cl].change -= incr;
1939 }
1940 }
1941
1942 /* Like mark_pseudo_death except that NREGS saying how many hard
1943 registers involved in the death. */
1944 static void
1945 mark_hard_regno_death (int regno, int nregs)
1946 {
1947 enum reg_class cl;
1948 int last = regno + nregs;
1949
1950 while (regno < last)
1951 {
1952 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1953 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1954 {
1955 cl = sched_regno_cover_class[regno];
1956 if (cl != NO_REGS)
1957 reg_pressure_info[cl].change -= 1;
1958 }
1959 regno++;
1960 }
1961 }
1962
1963 /* Update the register pressure info after death of pseudo or hard
1964 register REG. */
1965 static void
1966 mark_reg_death (rtx reg)
1967 {
1968 int regno;
1969
1970 if (GET_CODE (reg) == SUBREG)
1971 reg = SUBREG_REG (reg);
1972
1973 if (! REG_P (reg))
1974 return;
1975
1976 regno = REGNO (reg);
1977 if (regno < FIRST_PSEUDO_REGISTER)
1978 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1979 else
1980 mark_pseudo_death (regno);
1981 }
1982
1983 /* Process SETTER of REG. DATA is an insn containing the setter. */
1984 static void
1985 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1986 {
1987 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1988 return;
1989 mark_insn_reg_birth
1990 ((rtx) data, reg, false,
1991 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1992 }
1993
1994 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1995 static void
1996 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1997 {
1998 if (GET_CODE (setter) == CLOBBER)
1999 mark_insn_reg_birth ((rtx) data, reg, true, false);
2000 }
2001
2002 /* Set up reg pressure info related to INSN. */
2003 static void
2004 setup_insn_reg_pressure_info (rtx insn)
2005 {
2006 int i, len;
2007 enum reg_class cl;
2008 static struct reg_pressure_data *pressure_info;
2009 rtx link;
2010
2011 gcc_assert (sched_pressure_p);
2012
2013 if (! INSN_P (insn))
2014 return;
2015
2016 for (i = 0; i < ira_reg_class_cover_size; i++)
2017 {
2018 cl = ira_reg_class_cover[i];
2019 reg_pressure_info[cl].clobber_increase = 0;
2020 reg_pressure_info[cl].set_increase = 0;
2021 reg_pressure_info[cl].unused_set_increase = 0;
2022 reg_pressure_info[cl].change = 0;
2023 }
2024
2025 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2026
2027 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2028
2029 #ifdef AUTO_INC_DEC
2030 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2031 if (REG_NOTE_KIND (link) == REG_INC)
2032 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2033 #endif
2034
2035 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2036 if (REG_NOTE_KIND (link) == REG_DEAD)
2037 mark_reg_death (XEXP (link, 0));
2038
2039 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2040 pressure_info
2041 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2042 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2043 * sizeof (int), 1);
2044 for (i = 0; i < ira_reg_class_cover_size; i++)
2045 {
2046 cl = ira_reg_class_cover[i];
2047 pressure_info[i].clobber_increase
2048 = reg_pressure_info[cl].clobber_increase;
2049 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2050 pressure_info[i].unused_set_increase
2051 = reg_pressure_info[cl].unused_set_increase;
2052 pressure_info[i].change = reg_pressure_info[cl].change;
2053 }
2054 }
2055
2056
2057 \f
2058
2059 /* Internal variable for sched_analyze_[12] () functions.
2060 If it is nonzero, this means that sched_analyze_[12] looks
2061 at the most toplevel SET. */
2062 static bool can_start_lhs_rhs_p;
2063
2064 /* Extend reg info for the deps context DEPS given that
2065 we have just generated a register numbered REGNO. */
2066 static void
2067 extend_deps_reg_info (struct deps_desc *deps, int regno)
2068 {
2069 int max_regno = regno + 1;
2070
2071 gcc_assert (!reload_completed);
2072
2073 /* In a readonly context, it would not hurt to extend info,
2074 but it should not be needed. */
2075 if (reload_completed && deps->readonly)
2076 {
2077 deps->max_reg = max_regno;
2078 return;
2079 }
2080
2081 if (max_regno > deps->max_reg)
2082 {
2083 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2084 max_regno);
2085 memset (&deps->reg_last[deps->max_reg],
2086 0, (max_regno - deps->max_reg)
2087 * sizeof (struct deps_reg));
2088 deps->max_reg = max_regno;
2089 }
2090 }
2091
2092 /* Extends REG_INFO_P if needed. */
2093 void
2094 maybe_extend_reg_info_p (void)
2095 {
2096 /* Extend REG_INFO_P, if needed. */
2097 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2098 {
2099 size_t new_reg_info_p_size = max_regno + 128;
2100
2101 gcc_assert (!reload_completed && sel_sched_p ());
2102
2103 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2104 new_reg_info_p_size,
2105 reg_info_p_size,
2106 sizeof (*reg_info_p));
2107 reg_info_p_size = new_reg_info_p_size;
2108 }
2109 }
2110
2111 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2112 The type of the reference is specified by REF and can be SET,
2113 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2114
2115 static void
2116 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2117 enum rtx_code ref, rtx insn)
2118 {
2119 /* We could emit new pseudos in renaming. Extend the reg structures. */
2120 if (!reload_completed && sel_sched_p ()
2121 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2122 extend_deps_reg_info (deps, regno);
2123
2124 maybe_extend_reg_info_p ();
2125
2126 /* A hard reg in a wide mode may really be multiple registers.
2127 If so, mark all of them just like the first. */
2128 if (regno < FIRST_PSEUDO_REGISTER)
2129 {
2130 int i = hard_regno_nregs[regno][mode];
2131 if (ref == SET)
2132 {
2133 while (--i >= 0)
2134 note_reg_set (regno + i);
2135 }
2136 else if (ref == USE)
2137 {
2138 while (--i >= 0)
2139 note_reg_use (regno + i);
2140 }
2141 else
2142 {
2143 while (--i >= 0)
2144 note_reg_clobber (regno + i);
2145 }
2146 }
2147
2148 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2149 it does not reload. Ignore these as they have served their
2150 purpose already. */
2151 else if (regno >= deps->max_reg)
2152 {
2153 enum rtx_code code = GET_CODE (PATTERN (insn));
2154 gcc_assert (code == USE || code == CLOBBER);
2155 }
2156
2157 else
2158 {
2159 if (ref == SET)
2160 note_reg_set (regno);
2161 else if (ref == USE)
2162 note_reg_use (regno);
2163 else
2164 note_reg_clobber (regno);
2165
2166 /* Pseudos that are REG_EQUIV to something may be replaced
2167 by that during reloading. We need only add dependencies for
2168 the address in the REG_EQUIV note. */
2169 if (!reload_completed && get_reg_known_equiv_p (regno))
2170 {
2171 rtx t = get_reg_known_value (regno);
2172 if (MEM_P (t))
2173 sched_analyze_2 (deps, XEXP (t, 0), insn);
2174 }
2175
2176 /* Don't let it cross a call after scheduling if it doesn't
2177 already cross one. */
2178 if (REG_N_CALLS_CROSSED (regno) == 0)
2179 {
2180 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2181 deps->sched_before_next_call
2182 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2183 else
2184 add_dependence_list (insn, deps->last_function_call, 1,
2185 REG_DEP_ANTI);
2186 }
2187 }
2188 }
2189
2190 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2191 rtx, X, creating all dependencies generated by the write to the
2192 destination of X, and reads of everything mentioned. */
2193
2194 static void
2195 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2196 {
2197 rtx dest = XEXP (x, 0);
2198 enum rtx_code code = GET_CODE (x);
2199 bool cslr_p = can_start_lhs_rhs_p;
2200
2201 can_start_lhs_rhs_p = false;
2202
2203 gcc_assert (dest);
2204 if (dest == 0)
2205 return;
2206
2207 if (cslr_p && sched_deps_info->start_lhs)
2208 sched_deps_info->start_lhs (dest);
2209
2210 if (GET_CODE (dest) == PARALLEL)
2211 {
2212 int i;
2213
2214 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2215 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2216 sched_analyze_1 (deps,
2217 gen_rtx_CLOBBER (VOIDmode,
2218 XEXP (XVECEXP (dest, 0, i), 0)),
2219 insn);
2220
2221 if (cslr_p && sched_deps_info->finish_lhs)
2222 sched_deps_info->finish_lhs ();
2223
2224 if (code == SET)
2225 {
2226 can_start_lhs_rhs_p = cslr_p;
2227
2228 sched_analyze_2 (deps, SET_SRC (x), insn);
2229
2230 can_start_lhs_rhs_p = false;
2231 }
2232
2233 return;
2234 }
2235
2236 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2237 || GET_CODE (dest) == ZERO_EXTRACT)
2238 {
2239 if (GET_CODE (dest) == STRICT_LOW_PART
2240 || GET_CODE (dest) == ZERO_EXTRACT
2241 || df_read_modify_subreg_p (dest))
2242 {
2243 /* These both read and modify the result. We must handle
2244 them as writes to get proper dependencies for following
2245 instructions. We must handle them as reads to get proper
2246 dependencies from this to previous instructions.
2247 Thus we need to call sched_analyze_2. */
2248
2249 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2250 }
2251 if (GET_CODE (dest) == ZERO_EXTRACT)
2252 {
2253 /* The second and third arguments are values read by this insn. */
2254 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2255 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2256 }
2257 dest = XEXP (dest, 0);
2258 }
2259
2260 if (REG_P (dest))
2261 {
2262 int regno = REGNO (dest);
2263 enum machine_mode mode = GET_MODE (dest);
2264
2265 sched_analyze_reg (deps, regno, mode, code, insn);
2266
2267 #ifdef STACK_REGS
2268 /* Treat all writes to a stack register as modifying the TOS. */
2269 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2270 {
2271 int nregs;
2272
2273 /* Avoid analyzing the same register twice. */
2274 if (regno != FIRST_STACK_REG)
2275 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2276
2277 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2278 while (--nregs >= 0)
2279 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2280 FIRST_STACK_REG + nregs);
2281 }
2282 #endif
2283 }
2284 else if (MEM_P (dest))
2285 {
2286 /* Writing memory. */
2287 rtx t = dest;
2288
2289 if (sched_deps_info->use_cselib)
2290 {
2291 enum machine_mode address_mode
2292 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2293
2294 t = shallow_copy_rtx (dest);
2295 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2296 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2297 }
2298 t = canon_rtx (t);
2299
2300 /* Pending lists can't get larger with a readonly context. */
2301 if (!deps->readonly
2302 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2303 > MAX_PENDING_LIST_LENGTH))
2304 {
2305 /* Flush all pending reads and writes to prevent the pending lists
2306 from getting any larger. Insn scheduling runs too slowly when
2307 these lists get long. When compiling GCC with itself,
2308 this flush occurs 8 times for sparc, and 10 times for m88k using
2309 the default value of 32. */
2310 flush_pending_lists (deps, insn, false, true);
2311 }
2312 else
2313 {
2314 rtx pending, pending_mem;
2315
2316 pending = deps->pending_read_insns;
2317 pending_mem = deps->pending_read_mems;
2318 while (pending)
2319 {
2320 if (anti_dependence (XEXP (pending_mem, 0), t)
2321 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2322 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2323 DEP_ANTI);
2324
2325 pending = XEXP (pending, 1);
2326 pending_mem = XEXP (pending_mem, 1);
2327 }
2328
2329 pending = deps->pending_write_insns;
2330 pending_mem = deps->pending_write_mems;
2331 while (pending)
2332 {
2333 if (output_dependence (XEXP (pending_mem, 0), t)
2334 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2335 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2336 DEP_OUTPUT);
2337
2338 pending = XEXP (pending, 1);
2339 pending_mem = XEXP (pending_mem, 1);
2340 }
2341
2342 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2343 REG_DEP_ANTI);
2344
2345 if (!deps->readonly)
2346 add_insn_mem_dependence (deps, false, insn, dest);
2347 }
2348 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2349 }
2350
2351 if (cslr_p && sched_deps_info->finish_lhs)
2352 sched_deps_info->finish_lhs ();
2353
2354 /* Analyze reads. */
2355 if (GET_CODE (x) == SET)
2356 {
2357 can_start_lhs_rhs_p = cslr_p;
2358
2359 sched_analyze_2 (deps, SET_SRC (x), insn);
2360
2361 can_start_lhs_rhs_p = false;
2362 }
2363 }
2364
2365 /* Analyze the uses of memory and registers in rtx X in INSN. */
2366 static void
2367 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2368 {
2369 int i;
2370 int j;
2371 enum rtx_code code;
2372 const char *fmt;
2373 bool cslr_p = can_start_lhs_rhs_p;
2374
2375 can_start_lhs_rhs_p = false;
2376
2377 gcc_assert (x);
2378 if (x == 0)
2379 return;
2380
2381 if (cslr_p && sched_deps_info->start_rhs)
2382 sched_deps_info->start_rhs (x);
2383
2384 code = GET_CODE (x);
2385
2386 switch (code)
2387 {
2388 case CONST_INT:
2389 case CONST_DOUBLE:
2390 case CONST_FIXED:
2391 case CONST_VECTOR:
2392 case SYMBOL_REF:
2393 case CONST:
2394 case LABEL_REF:
2395 /* Ignore constants. */
2396 if (cslr_p && sched_deps_info->finish_rhs)
2397 sched_deps_info->finish_rhs ();
2398
2399 return;
2400
2401 #ifdef HAVE_cc0
2402 case CC0:
2403 /* User of CC0 depends on immediately preceding insn. */
2404 SCHED_GROUP_P (insn) = 1;
2405 /* Don't move CC0 setter to another block (it can set up the
2406 same flag for previous CC0 users which is safe). */
2407 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2408
2409 if (cslr_p && sched_deps_info->finish_rhs)
2410 sched_deps_info->finish_rhs ();
2411
2412 return;
2413 #endif
2414
2415 case REG:
2416 {
2417 int regno = REGNO (x);
2418 enum machine_mode mode = GET_MODE (x);
2419
2420 sched_analyze_reg (deps, regno, mode, USE, insn);
2421
2422 #ifdef STACK_REGS
2423 /* Treat all reads of a stack register as modifying the TOS. */
2424 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2425 {
2426 /* Avoid analyzing the same register twice. */
2427 if (regno != FIRST_STACK_REG)
2428 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2429 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2430 }
2431 #endif
2432
2433 if (cslr_p && sched_deps_info->finish_rhs)
2434 sched_deps_info->finish_rhs ();
2435
2436 return;
2437 }
2438
2439 case MEM:
2440 {
2441 /* Reading memory. */
2442 rtx u;
2443 rtx pending, pending_mem;
2444 rtx t = x;
2445
2446 if (sched_deps_info->use_cselib)
2447 {
2448 enum machine_mode address_mode
2449 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2450
2451 t = shallow_copy_rtx (t);
2452 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2453 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2454 }
2455
2456 if (!DEBUG_INSN_P (insn))
2457 {
2458 t = canon_rtx (t);
2459 pending = deps->pending_read_insns;
2460 pending_mem = deps->pending_read_mems;
2461 while (pending)
2462 {
2463 if (read_dependence (XEXP (pending_mem, 0), t)
2464 && ! sched_insns_conditions_mutex_p (insn,
2465 XEXP (pending, 0)))
2466 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2467 DEP_ANTI);
2468
2469 pending = XEXP (pending, 1);
2470 pending_mem = XEXP (pending_mem, 1);
2471 }
2472
2473 pending = deps->pending_write_insns;
2474 pending_mem = deps->pending_write_mems;
2475 while (pending)
2476 {
2477 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2478 t, rtx_varies_p)
2479 && ! sched_insns_conditions_mutex_p (insn,
2480 XEXP (pending, 0)))
2481 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2482 sched_deps_info->generate_spec_deps
2483 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2484
2485 pending = XEXP (pending, 1);
2486 pending_mem = XEXP (pending_mem, 1);
2487 }
2488
2489 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2490 {
2491 if (! NON_FLUSH_JUMP_P (u))
2492 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2493 else if (deps_may_trap_p (x))
2494 {
2495 if ((sched_deps_info->generate_spec_deps)
2496 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2497 {
2498 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2499 MAX_DEP_WEAK);
2500
2501 note_dep (XEXP (u, 0), ds);
2502 }
2503 else
2504 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2505 }
2506 }
2507 }
2508
2509 /* Always add these dependencies to pending_reads, since
2510 this insn may be followed by a write. */
2511 if (!deps->readonly)
2512 add_insn_mem_dependence (deps, true, insn, x);
2513
2514 sched_analyze_2 (deps, XEXP (x, 0), insn);
2515
2516 if (cslr_p && sched_deps_info->finish_rhs)
2517 sched_deps_info->finish_rhs ();
2518
2519 return;
2520 }
2521
2522 /* Force pending stores to memory in case a trap handler needs them. */
2523 case TRAP_IF:
2524 flush_pending_lists (deps, insn, true, false);
2525 break;
2526
2527 case PREFETCH:
2528 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2529 reg_pending_barrier = TRUE_BARRIER;
2530 break;
2531
2532 case UNSPEC_VOLATILE:
2533 flush_pending_lists (deps, insn, true, true);
2534 /* FALLTHRU */
2535
2536 case ASM_OPERANDS:
2537 case ASM_INPUT:
2538 {
2539 /* Traditional and volatile asm instructions must be considered to use
2540 and clobber all hard registers, all pseudo-registers and all of
2541 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2542
2543 Consider for instance a volatile asm that changes the fpu rounding
2544 mode. An insn should not be moved across this even if it only uses
2545 pseudo-regs because it might give an incorrectly rounded result. */
2546 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2547 reg_pending_barrier = TRUE_BARRIER;
2548
2549 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2550 We can not just fall through here since then we would be confused
2551 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2552 traditional asms unlike their normal usage. */
2553
2554 if (code == ASM_OPERANDS)
2555 {
2556 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2557 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2558
2559 if (cslr_p && sched_deps_info->finish_rhs)
2560 sched_deps_info->finish_rhs ();
2561
2562 return;
2563 }
2564 break;
2565 }
2566
2567 case PRE_DEC:
2568 case POST_DEC:
2569 case PRE_INC:
2570 case POST_INC:
2571 /* These both read and modify the result. We must handle them as writes
2572 to get proper dependencies for following instructions. We must handle
2573 them as reads to get proper dependencies from this to previous
2574 instructions. Thus we need to pass them to both sched_analyze_1
2575 and sched_analyze_2. We must call sched_analyze_2 first in order
2576 to get the proper antecedent for the read. */
2577 sched_analyze_2 (deps, XEXP (x, 0), insn);
2578 sched_analyze_1 (deps, x, insn);
2579
2580 if (cslr_p && sched_deps_info->finish_rhs)
2581 sched_deps_info->finish_rhs ();
2582
2583 return;
2584
2585 case POST_MODIFY:
2586 case PRE_MODIFY:
2587 /* op0 = op0 + op1 */
2588 sched_analyze_2 (deps, XEXP (x, 0), insn);
2589 sched_analyze_2 (deps, XEXP (x, 1), insn);
2590 sched_analyze_1 (deps, x, insn);
2591
2592 if (cslr_p && sched_deps_info->finish_rhs)
2593 sched_deps_info->finish_rhs ();
2594
2595 return;
2596
2597 default:
2598 break;
2599 }
2600
2601 /* Other cases: walk the insn. */
2602 fmt = GET_RTX_FORMAT (code);
2603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2604 {
2605 if (fmt[i] == 'e')
2606 sched_analyze_2 (deps, XEXP (x, i), insn);
2607 else if (fmt[i] == 'E')
2608 for (j = 0; j < XVECLEN (x, i); j++)
2609 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2610 }
2611
2612 if (cslr_p && sched_deps_info->finish_rhs)
2613 sched_deps_info->finish_rhs ();
2614 }
2615
2616 /* Analyze an INSN with pattern X to find all dependencies. */
2617 static void
2618 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2619 {
2620 RTX_CODE code = GET_CODE (x);
2621 rtx link;
2622 unsigned i;
2623 reg_set_iterator rsi;
2624
2625 if (! reload_completed)
2626 {
2627 HARD_REG_SET temp;
2628
2629 extract_insn (insn);
2630 preprocess_constraints ();
2631 ira_implicitly_set_insn_hard_regs (&temp);
2632 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2633 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2634 }
2635
2636 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2637 && code == SET);
2638
2639 if (may_trap_p (x))
2640 /* Avoid moving trapping instructions accross function calls that might
2641 not always return. */
2642 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2643 1, REG_DEP_ANTI);
2644
2645 if (code == COND_EXEC)
2646 {
2647 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2648
2649 /* ??? Should be recording conditions so we reduce the number of
2650 false dependencies. */
2651 x = COND_EXEC_CODE (x);
2652 code = GET_CODE (x);
2653 }
2654 if (code == SET || code == CLOBBER)
2655 {
2656 sched_analyze_1 (deps, x, insn);
2657
2658 /* Bare clobber insns are used for letting life analysis, reg-stack
2659 and others know that a value is dead. Depend on the last call
2660 instruction so that reg-stack won't get confused. */
2661 if (code == CLOBBER)
2662 add_dependence_list (insn, deps->last_function_call, 1,
2663 REG_DEP_OUTPUT);
2664 }
2665 else if (code == PARALLEL)
2666 {
2667 for (i = XVECLEN (x, 0); i--;)
2668 {
2669 rtx sub = XVECEXP (x, 0, i);
2670 code = GET_CODE (sub);
2671
2672 if (code == COND_EXEC)
2673 {
2674 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2675 sub = COND_EXEC_CODE (sub);
2676 code = GET_CODE (sub);
2677 }
2678 if (code == SET || code == CLOBBER)
2679 sched_analyze_1 (deps, sub, insn);
2680 else
2681 sched_analyze_2 (deps, sub, insn);
2682 }
2683 }
2684 else
2685 sched_analyze_2 (deps, x, insn);
2686
2687 /* Mark registers CLOBBERED or used by called function. */
2688 if (CALL_P (insn))
2689 {
2690 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2691 {
2692 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2693 sched_analyze_1 (deps, XEXP (link, 0), insn);
2694 else
2695 sched_analyze_2 (deps, XEXP (link, 0), insn);
2696 }
2697 if (find_reg_note (insn, REG_SETJMP, NULL))
2698 reg_pending_barrier = MOVE_BARRIER;
2699 }
2700
2701 if (JUMP_P (insn))
2702 {
2703 rtx next;
2704 next = next_nonnote_nondebug_insn (insn);
2705 if (next && BARRIER_P (next))
2706 reg_pending_barrier = MOVE_BARRIER;
2707 else
2708 {
2709 rtx pending, pending_mem;
2710
2711 if (sched_deps_info->compute_jump_reg_dependencies)
2712 {
2713 regset_head tmp_uses, tmp_sets;
2714 INIT_REG_SET (&tmp_uses);
2715 INIT_REG_SET (&tmp_sets);
2716
2717 (*sched_deps_info->compute_jump_reg_dependencies)
2718 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2719 /* Make latency of jump equal to 0 by using anti-dependence. */
2720 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2721 {
2722 struct deps_reg *reg_last = &deps->reg_last[i];
2723 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2724 add_dependence_list (insn, reg_last->implicit_sets,
2725 0, REG_DEP_ANTI);
2726 add_dependence_list (insn, reg_last->clobbers, 0,
2727 REG_DEP_ANTI);
2728
2729 if (!deps->readonly)
2730 {
2731 reg_last->uses_length++;
2732 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2733 }
2734 }
2735 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2736
2737 CLEAR_REG_SET (&tmp_uses);
2738 CLEAR_REG_SET (&tmp_sets);
2739 }
2740
2741 /* All memory writes and volatile reads must happen before the
2742 jump. Non-volatile reads must happen before the jump iff
2743 the result is needed by the above register used mask. */
2744
2745 pending = deps->pending_write_insns;
2746 pending_mem = deps->pending_write_mems;
2747 while (pending)
2748 {
2749 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2750 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2751 pending = XEXP (pending, 1);
2752 pending_mem = XEXP (pending_mem, 1);
2753 }
2754
2755 pending = deps->pending_read_insns;
2756 pending_mem = deps->pending_read_mems;
2757 while (pending)
2758 {
2759 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2760 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2761 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2762 pending = XEXP (pending, 1);
2763 pending_mem = XEXP (pending_mem, 1);
2764 }
2765
2766 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2767 REG_DEP_ANTI);
2768 }
2769 }
2770
2771 /* If this instruction can throw an exception, then moving it changes
2772 where block boundaries fall. This is mighty confusing elsewhere.
2773 Therefore, prevent such an instruction from being moved. Same for
2774 non-jump instructions that define block boundaries.
2775 ??? Unclear whether this is still necessary in EBB mode. If not,
2776 add_branch_dependences should be adjusted for RGN mode instead. */
2777 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2778 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2779 reg_pending_barrier = MOVE_BARRIER;
2780
2781 if (sched_pressure_p)
2782 {
2783 setup_insn_reg_uses (deps, insn);
2784 setup_insn_reg_pressure_info (insn);
2785 }
2786
2787 /* Add register dependencies for insn. */
2788 if (DEBUG_INSN_P (insn))
2789 {
2790 rtx prev = deps->last_debug_insn;
2791 rtx u;
2792
2793 if (!deps->readonly)
2794 deps->last_debug_insn = insn;
2795
2796 if (prev)
2797 add_dependence (insn, prev, REG_DEP_ANTI);
2798
2799 add_dependence_list (insn, deps->last_function_call, 1,
2800 REG_DEP_ANTI);
2801
2802 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2803 if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2804 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2805
2806 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2807 {
2808 struct deps_reg *reg_last = &deps->reg_last[i];
2809 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2810 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2811
2812 if (!deps->readonly)
2813 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2814 }
2815 CLEAR_REG_SET (reg_pending_uses);
2816
2817 /* Quite often, a debug insn will refer to stuff in the
2818 previous instruction, but the reason we want this
2819 dependency here is to make sure the scheduler doesn't
2820 gratuitously move a debug insn ahead. This could dirty
2821 DF flags and cause additional analysis that wouldn't have
2822 occurred in compilation without debug insns, and such
2823 additional analysis can modify the generated code. */
2824 prev = PREV_INSN (insn);
2825
2826 if (prev && NONDEBUG_INSN_P (prev))
2827 add_dependence (insn, prev, REG_DEP_ANTI);
2828 }
2829 else
2830 {
2831 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2832 {
2833 struct deps_reg *reg_last = &deps->reg_last[i];
2834 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2835 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2836 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2837
2838 if (!deps->readonly)
2839 {
2840 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2841 reg_last->uses_length++;
2842 }
2843 }
2844
2845 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2846 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2847 {
2848 struct deps_reg *reg_last = &deps->reg_last[i];
2849 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2850 add_dependence_list (insn, reg_last->implicit_sets, 0,
2851 REG_DEP_ANTI);
2852 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2853
2854 if (!deps->readonly)
2855 {
2856 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2857 reg_last->uses_length++;
2858 }
2859 }
2860
2861 /* If the current insn is conditional, we can't free any
2862 of the lists. */
2863 if (sched_has_condition_p (insn))
2864 {
2865 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2866 {
2867 struct deps_reg *reg_last = &deps->reg_last[i];
2868 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2869 add_dependence_list (insn, reg_last->implicit_sets, 0,
2870 REG_DEP_ANTI);
2871 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2872
2873 if (!deps->readonly)
2874 {
2875 reg_last->clobbers
2876 = alloc_INSN_LIST (insn, reg_last->clobbers);
2877 reg_last->clobbers_length++;
2878 }
2879 }
2880 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2881 {
2882 struct deps_reg *reg_last = &deps->reg_last[i];
2883 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2884 add_dependence_list (insn, reg_last->implicit_sets, 0,
2885 REG_DEP_ANTI);
2886 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2887 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2888
2889 if (!deps->readonly)
2890 {
2891 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2892 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2893 }
2894 }
2895 }
2896 else
2897 {
2898 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2899 {
2900 struct deps_reg *reg_last = &deps->reg_last[i];
2901 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2902 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2903 {
2904 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2905 REG_DEP_OUTPUT);
2906 add_dependence_list_and_free (deps, insn,
2907 &reg_last->implicit_sets, 0,
2908 REG_DEP_ANTI);
2909 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2910 REG_DEP_ANTI);
2911 add_dependence_list_and_free
2912 (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2913
2914 if (!deps->readonly)
2915 {
2916 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2917 reg_last->clobbers_length = 0;
2918 reg_last->uses_length = 0;
2919 }
2920 }
2921 else
2922 {
2923 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2924 add_dependence_list (insn, reg_last->implicit_sets, 0,
2925 REG_DEP_ANTI);
2926 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2927 }
2928
2929 if (!deps->readonly)
2930 {
2931 reg_last->clobbers_length++;
2932 reg_last->clobbers
2933 = alloc_INSN_LIST (insn, reg_last->clobbers);
2934 }
2935 }
2936 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2937 {
2938 struct deps_reg *reg_last = &deps->reg_last[i];
2939
2940 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2941 REG_DEP_OUTPUT);
2942 add_dependence_list_and_free (deps, insn,
2943 &reg_last->implicit_sets,
2944 0, REG_DEP_ANTI);
2945 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2946 REG_DEP_OUTPUT);
2947 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2948 REG_DEP_ANTI);
2949
2950 if (!deps->readonly)
2951 {
2952 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2953 reg_last->uses_length = 0;
2954 reg_last->clobbers_length = 0;
2955 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2956 }
2957 }
2958 }
2959 }
2960
2961 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2962 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2963 {
2964 struct deps_reg *reg_last = &deps->reg_last[i];
2965 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2966 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2967 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2968
2969 if (!deps->readonly)
2970 reg_last->implicit_sets
2971 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2972 }
2973
2974 if (!deps->readonly)
2975 {
2976 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2977 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2978 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2979 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2980 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2981 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2982 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2983
2984 /* Set up the pending barrier found. */
2985 deps->last_reg_pending_barrier = reg_pending_barrier;
2986 }
2987
2988 CLEAR_REG_SET (reg_pending_uses);
2989 CLEAR_REG_SET (reg_pending_clobbers);
2990 CLEAR_REG_SET (reg_pending_sets);
2991 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2992 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2993
2994 /* Add dependencies if a scheduling barrier was found. */
2995 if (reg_pending_barrier)
2996 {
2997 /* In the case of barrier the most added dependencies are not
2998 real, so we use anti-dependence here. */
2999 if (sched_has_condition_p (insn))
3000 {
3001 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3002 {
3003 struct deps_reg *reg_last = &deps->reg_last[i];
3004 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3005 add_dependence_list (insn, reg_last->sets, 0,
3006 reg_pending_barrier == TRUE_BARRIER
3007 ? REG_DEP_TRUE : REG_DEP_ANTI);
3008 add_dependence_list (insn, reg_last->implicit_sets, 0,
3009 REG_DEP_ANTI);
3010 add_dependence_list (insn, reg_last->clobbers, 0,
3011 reg_pending_barrier == TRUE_BARRIER
3012 ? REG_DEP_TRUE : REG_DEP_ANTI);
3013 }
3014 }
3015 else
3016 {
3017 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3018 {
3019 struct deps_reg *reg_last = &deps->reg_last[i];
3020 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3021 REG_DEP_ANTI);
3022 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3023 reg_pending_barrier == TRUE_BARRIER
3024 ? REG_DEP_TRUE : REG_DEP_ANTI);
3025 add_dependence_list_and_free (deps, insn,
3026 &reg_last->implicit_sets, 0,
3027 REG_DEP_ANTI);
3028 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3029 reg_pending_barrier == TRUE_BARRIER
3030 ? REG_DEP_TRUE : REG_DEP_ANTI);
3031
3032 if (!deps->readonly)
3033 {
3034 reg_last->uses_length = 0;
3035 reg_last->clobbers_length = 0;
3036 }
3037 }
3038 }
3039
3040 if (!deps->readonly)
3041 for (i = 0; i < (unsigned)deps->max_reg; i++)
3042 {
3043 struct deps_reg *reg_last = &deps->reg_last[i];
3044 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3045 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3046 }
3047
3048 /* Flush pending lists on jumps, but not on speculative checks. */
3049 if (JUMP_P (insn) && !(sel_sched_p ()
3050 && sel_insn_is_speculation_check (insn)))
3051 flush_pending_lists (deps, insn, true, true);
3052
3053 if (!deps->readonly)
3054 CLEAR_REG_SET (&deps->reg_conditional_sets);
3055 reg_pending_barrier = NOT_A_BARRIER;
3056 }
3057
3058 /* If a post-call group is still open, see if it should remain so.
3059 This insn must be a simple move of a hard reg to a pseudo or
3060 vice-versa.
3061
3062 We must avoid moving these insns for correctness on targets
3063 with small register classes, and for special registers like
3064 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3065 hard regs for all targets. */
3066
3067 if (deps->in_post_call_group_p)
3068 {
3069 rtx tmp, set = single_set (insn);
3070 int src_regno, dest_regno;
3071
3072 if (set == NULL)
3073 {
3074 if (DEBUG_INSN_P (insn))
3075 /* We don't want to mark debug insns as part of the same
3076 sched group. We know they really aren't, but if we use
3077 debug insns to tell that a call group is over, we'll
3078 get different code if debug insns are not there and
3079 instructions that follow seem like they should be part
3080 of the call group.
3081
3082 Also, if we did, fixup_sched_groups() would move the
3083 deps of the debug insn to the call insn, modifying
3084 non-debug post-dependency counts of the debug insn
3085 dependencies and otherwise messing with the scheduling
3086 order.
3087
3088 Instead, let such debug insns be scheduled freely, but
3089 keep the call group open in case there are insns that
3090 should be part of it afterwards. Since we grant debug
3091 insns higher priority than even sched group insns, it
3092 will all turn out all right. */
3093 goto debug_dont_end_call_group;
3094 else
3095 goto end_call_group;
3096 }
3097
3098 tmp = SET_DEST (set);
3099 if (GET_CODE (tmp) == SUBREG)
3100 tmp = SUBREG_REG (tmp);
3101 if (REG_P (tmp))
3102 dest_regno = REGNO (tmp);
3103 else
3104 goto end_call_group;
3105
3106 tmp = SET_SRC (set);
3107 if (GET_CODE (tmp) == SUBREG)
3108 tmp = SUBREG_REG (tmp);
3109 if ((GET_CODE (tmp) == PLUS
3110 || GET_CODE (tmp) == MINUS)
3111 && REG_P (XEXP (tmp, 0))
3112 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3113 && dest_regno == STACK_POINTER_REGNUM)
3114 src_regno = STACK_POINTER_REGNUM;
3115 else if (REG_P (tmp))
3116 src_regno = REGNO (tmp);
3117 else
3118 goto end_call_group;
3119
3120 if (src_regno < FIRST_PSEUDO_REGISTER
3121 || dest_regno < FIRST_PSEUDO_REGISTER)
3122 {
3123 if (!deps->readonly
3124 && deps->in_post_call_group_p == post_call_initial)
3125 deps->in_post_call_group_p = post_call;
3126
3127 if (!sel_sched_p () || sched_emulate_haifa_p)
3128 {
3129 SCHED_GROUP_P (insn) = 1;
3130 CANT_MOVE (insn) = 1;
3131 }
3132 }
3133 else
3134 {
3135 end_call_group:
3136 if (!deps->readonly)
3137 deps->in_post_call_group_p = not_post_call;
3138 }
3139 }
3140
3141 debug_dont_end_call_group:
3142 if ((current_sched_info->flags & DO_SPECULATION)
3143 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3144 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3145 be speculated. */
3146 {
3147 if (sel_sched_p ())
3148 sel_mark_hard_insn (insn);
3149 else
3150 {
3151 sd_iterator_def sd_it;
3152 dep_t dep;
3153
3154 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3155 sd_iterator_cond (&sd_it, &dep);)
3156 change_spec_dep_to_hard (sd_it);
3157 }
3158 }
3159 }
3160
3161 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3162 longjmp, loop forever, ...). */
3163 static bool
3164 call_may_noreturn_p (rtx insn)
3165 {
3166 rtx call;
3167
3168 /* const or pure calls that aren't looping will always return. */
3169 if (RTL_CONST_OR_PURE_CALL_P (insn)
3170 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3171 return false;
3172
3173 call = PATTERN (insn);
3174 if (GET_CODE (call) == PARALLEL)
3175 call = XVECEXP (call, 0, 0);
3176 if (GET_CODE (call) == SET)
3177 call = SET_SRC (call);
3178 if (GET_CODE (call) == CALL
3179 && MEM_P (XEXP (call, 0))
3180 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3181 {
3182 rtx symbol = XEXP (XEXP (call, 0), 0);
3183 if (SYMBOL_REF_DECL (symbol)
3184 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3185 {
3186 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3187 == BUILT_IN_NORMAL)
3188 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3189 {
3190 case BUILT_IN_BCMP:
3191 case BUILT_IN_BCOPY:
3192 case BUILT_IN_BZERO:
3193 case BUILT_IN_INDEX:
3194 case BUILT_IN_MEMCHR:
3195 case BUILT_IN_MEMCMP:
3196 case BUILT_IN_MEMCPY:
3197 case BUILT_IN_MEMMOVE:
3198 case BUILT_IN_MEMPCPY:
3199 case BUILT_IN_MEMSET:
3200 case BUILT_IN_RINDEX:
3201 case BUILT_IN_STPCPY:
3202 case BUILT_IN_STPNCPY:
3203 case BUILT_IN_STRCAT:
3204 case BUILT_IN_STRCHR:
3205 case BUILT_IN_STRCMP:
3206 case BUILT_IN_STRCPY:
3207 case BUILT_IN_STRCSPN:
3208 case BUILT_IN_STRLEN:
3209 case BUILT_IN_STRNCAT:
3210 case BUILT_IN_STRNCMP:
3211 case BUILT_IN_STRNCPY:
3212 case BUILT_IN_STRPBRK:
3213 case BUILT_IN_STRRCHR:
3214 case BUILT_IN_STRSPN:
3215 case BUILT_IN_STRSTR:
3216 /* Assume certain string/memory builtins always return. */
3217 return false;
3218 default:
3219 break;
3220 }
3221 }
3222 }
3223
3224 /* For all other calls assume that they might not always return. */
3225 return true;
3226 }
3227
3228 /* Analyze INSN with DEPS as a context. */
3229 void
3230 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3231 {
3232 if (sched_deps_info->start_insn)
3233 sched_deps_info->start_insn (insn);
3234
3235 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3236 {
3237 /* Make each JUMP_INSN (but not a speculative check)
3238 a scheduling barrier for memory references. */
3239 if (!deps->readonly
3240 && JUMP_P (insn)
3241 && !(sel_sched_p ()
3242 && sel_insn_is_speculation_check (insn)))
3243 {
3244 /* Keep the list a reasonable size. */
3245 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3246 flush_pending_lists (deps, insn, true, true);
3247 else
3248 {
3249 deps->last_pending_memory_flush
3250 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3251 /* Signal to sched_analyze_insn that this jump stands
3252 just for its own, not any other pending memory
3253 reads/writes flush_pending_lists had to flush. */
3254 PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3255 NON_FLUSH_JUMP_KIND);
3256 }
3257 }
3258
3259 sched_analyze_insn (deps, PATTERN (insn), insn);
3260 }
3261 else if (CALL_P (insn))
3262 {
3263 int i;
3264
3265 CANT_MOVE (insn) = 1;
3266
3267 if (find_reg_note (insn, REG_SETJMP, NULL))
3268 {
3269 /* This is setjmp. Assume that all registers, not just
3270 hard registers, may be clobbered by this call. */
3271 reg_pending_barrier = MOVE_BARRIER;
3272 }
3273 else
3274 {
3275 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3276 /* A call may read and modify global register variables. */
3277 if (global_regs[i])
3278 {
3279 SET_REGNO_REG_SET (reg_pending_sets, i);
3280 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3281 }
3282 /* Other call-clobbered hard regs may be clobbered.
3283 Since we only have a choice between 'might be clobbered'
3284 and 'definitely not clobbered', we must include all
3285 partly call-clobbered registers here. */
3286 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3287 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3288 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3289 /* We don't know what set of fixed registers might be used
3290 by the function, but it is certain that the stack pointer
3291 is among them, but be conservative. */
3292 else if (fixed_regs[i])
3293 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3294 /* The frame pointer is normally not used by the function
3295 itself, but by the debugger. */
3296 /* ??? MIPS o32 is an exception. It uses the frame pointer
3297 in the macro expansion of jal but does not represent this
3298 fact in the call_insn rtl. */
3299 else if (i == FRAME_POINTER_REGNUM
3300 || (i == HARD_FRAME_POINTER_REGNUM
3301 && (! reload_completed || frame_pointer_needed)))
3302 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3303 }
3304
3305 /* For each insn which shouldn't cross a call, add a dependence
3306 between that insn and this call insn. */
3307 add_dependence_list_and_free (deps, insn,
3308 &deps->sched_before_next_call, 1,
3309 REG_DEP_ANTI);
3310
3311 sched_analyze_insn (deps, PATTERN (insn), insn);
3312
3313 /* If CALL would be in a sched group, then this will violate
3314 convention that sched group insns have dependencies only on the
3315 previous instruction.
3316
3317 Of course one can say: "Hey! What about head of the sched group?"
3318 And I will answer: "Basic principles (one dep per insn) are always
3319 the same." */
3320 gcc_assert (!SCHED_GROUP_P (insn));
3321
3322 /* In the absence of interprocedural alias analysis, we must flush
3323 all pending reads and writes, and start new dependencies starting
3324 from here. But only flush writes for constant calls (which may
3325 be passed a pointer to something we haven't written yet). */
3326 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3327
3328 if (!deps->readonly)
3329 {
3330 /* Remember the last function call for limiting lifetimes. */
3331 free_INSN_LIST_list (&deps->last_function_call);
3332 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3333
3334 if (call_may_noreturn_p (insn))
3335 {
3336 /* Remember the last function call that might not always return
3337 normally for limiting moves of trapping insns. */
3338 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3339 deps->last_function_call_may_noreturn
3340 = alloc_INSN_LIST (insn, NULL_RTX);
3341 }
3342
3343 /* Before reload, begin a post-call group, so as to keep the
3344 lifetimes of hard registers correct. */
3345 if (! reload_completed)
3346 deps->in_post_call_group_p = post_call;
3347 }
3348 }
3349
3350 if (sched_deps_info->use_cselib)
3351 cselib_process_insn (insn);
3352
3353 /* EH_REGION insn notes can not appear until well after we complete
3354 scheduling. */
3355 if (NOTE_P (insn))
3356 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3357 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3358
3359 if (sched_deps_info->finish_insn)
3360 sched_deps_info->finish_insn ();
3361
3362 /* Fixup the dependencies in the sched group. */
3363 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3364 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3365 fixup_sched_groups (insn);
3366 }
3367
3368 /* Initialize DEPS for the new block beginning with HEAD. */
3369 void
3370 deps_start_bb (struct deps_desc *deps, rtx head)
3371 {
3372 gcc_assert (!deps->readonly);
3373
3374 /* Before reload, if the previous block ended in a call, show that
3375 we are inside a post-call group, so as to keep the lifetimes of
3376 hard registers correct. */
3377 if (! reload_completed && !LABEL_P (head))
3378 {
3379 rtx insn = prev_nonnote_nondebug_insn (head);
3380
3381 if (insn && CALL_P (insn))
3382 deps->in_post_call_group_p = post_call_initial;
3383 }
3384 }
3385
3386 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3387 dependencies for each insn. */
3388 void
3389 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3390 {
3391 rtx insn;
3392
3393 if (sched_deps_info->use_cselib)
3394 cselib_init (CSELIB_RECORD_MEMORY);
3395
3396 deps_start_bb (deps, head);
3397
3398 for (insn = head;; insn = NEXT_INSN (insn))
3399 {
3400
3401 if (INSN_P (insn))
3402 {
3403 /* And initialize deps_lists. */
3404 sd_init_insn (insn);
3405 }
3406
3407 deps_analyze_insn (deps, insn);
3408
3409 if (insn == tail)
3410 {
3411 if (sched_deps_info->use_cselib)
3412 cselib_finish ();
3413 return;
3414 }
3415 }
3416 gcc_unreachable ();
3417 }
3418
3419 /* Helper for sched_free_deps ().
3420 Delete INSN's (RESOLVED_P) backward dependencies. */
3421 static void
3422 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3423 {
3424 sd_iterator_def sd_it;
3425 dep_t dep;
3426 sd_list_types_def types;
3427
3428 if (resolved_p)
3429 types = SD_LIST_RES_BACK;
3430 else
3431 types = SD_LIST_BACK;
3432
3433 for (sd_it = sd_iterator_start (insn, types);
3434 sd_iterator_cond (&sd_it, &dep);)
3435 {
3436 dep_link_t link = *sd_it.linkp;
3437 dep_node_t node = DEP_LINK_NODE (link);
3438 deps_list_t back_list;
3439 deps_list_t forw_list;
3440
3441 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3442 remove_from_deps_list (link, back_list);
3443 delete_dep_node (node);
3444 }
3445 }
3446
3447 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3448 deps_lists. */
3449 void
3450 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3451 {
3452 rtx insn;
3453 rtx next_tail = NEXT_INSN (tail);
3454
3455 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3456 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3457 {
3458 /* Clear resolved back deps together with its dep_nodes. */
3459 delete_dep_nodes_in_back_deps (insn, resolved_p);
3460
3461 /* Clear forward deps and leave the dep_nodes to the
3462 corresponding back_deps list. */
3463 if (resolved_p)
3464 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3465 else
3466 clear_deps_list (INSN_FORW_DEPS (insn));
3467
3468 sd_finish_insn (insn);
3469 }
3470 }
3471 \f
3472 /* Initialize variables for region data dependence analysis.
3473 When LAZY_REG_LAST is true, do not allocate reg_last array
3474 of struct deps_desc immediately. */
3475
3476 void
3477 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3478 {
3479 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3480
3481 deps->max_reg = max_reg;
3482 if (lazy_reg_last)
3483 deps->reg_last = NULL;
3484 else
3485 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3486 INIT_REG_SET (&deps->reg_last_in_use);
3487 INIT_REG_SET (&deps->reg_conditional_sets);
3488
3489 deps->pending_read_insns = 0;
3490 deps->pending_read_mems = 0;
3491 deps->pending_write_insns = 0;
3492 deps->pending_write_mems = 0;
3493 deps->pending_read_list_length = 0;
3494 deps->pending_write_list_length = 0;
3495 deps->pending_flush_length = 0;
3496 deps->last_pending_memory_flush = 0;
3497 deps->last_function_call = 0;
3498 deps->last_function_call_may_noreturn = 0;
3499 deps->sched_before_next_call = 0;
3500 deps->in_post_call_group_p = not_post_call;
3501 deps->last_debug_insn = 0;
3502 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3503 deps->readonly = 0;
3504 }
3505
3506 /* Init only reg_last field of DEPS, which was not allocated before as
3507 we inited DEPS lazily. */
3508 void
3509 init_deps_reg_last (struct deps_desc *deps)
3510 {
3511 gcc_assert (deps && deps->max_reg > 0);
3512 gcc_assert (deps->reg_last == NULL);
3513
3514 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3515 }
3516
3517
3518 /* Free insn lists found in DEPS. */
3519
3520 void
3521 free_deps (struct deps_desc *deps)
3522 {
3523 unsigned i;
3524 reg_set_iterator rsi;
3525
3526 /* We set max_reg to 0 when this context was already freed. */
3527 if (deps->max_reg == 0)
3528 {
3529 gcc_assert (deps->reg_last == NULL);
3530 return;
3531 }
3532 deps->max_reg = 0;
3533
3534 free_INSN_LIST_list (&deps->pending_read_insns);
3535 free_EXPR_LIST_list (&deps->pending_read_mems);
3536 free_INSN_LIST_list (&deps->pending_write_insns);
3537 free_EXPR_LIST_list (&deps->pending_write_mems);
3538 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3539
3540 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3541 times. For a testcase with 42000 regs and 8000 small basic blocks,
3542 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3543 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3544 {
3545 struct deps_reg *reg_last = &deps->reg_last[i];
3546 if (reg_last->uses)
3547 free_INSN_LIST_list (&reg_last->uses);
3548 if (reg_last->sets)
3549 free_INSN_LIST_list (&reg_last->sets);
3550 if (reg_last->implicit_sets)
3551 free_INSN_LIST_list (&reg_last->implicit_sets);
3552 if (reg_last->clobbers)
3553 free_INSN_LIST_list (&reg_last->clobbers);
3554 }
3555 CLEAR_REG_SET (&deps->reg_last_in_use);
3556 CLEAR_REG_SET (&deps->reg_conditional_sets);
3557
3558 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3559 it at all. */
3560 if (deps->reg_last)
3561 free (deps->reg_last);
3562 deps->reg_last = NULL;
3563
3564 deps = NULL;
3565 }
3566
3567 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3568 is not handled. */
3569 void
3570 remove_from_deps (struct deps_desc *deps, rtx insn)
3571 {
3572 int removed;
3573 unsigned i;
3574 reg_set_iterator rsi;
3575
3576 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3577 &deps->pending_read_mems);
3578 if (!DEBUG_INSN_P (insn))
3579 deps->pending_read_list_length -= removed;
3580 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3581 &deps->pending_write_mems);
3582 deps->pending_write_list_length -= removed;
3583 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3584 deps->pending_flush_length -= removed;
3585
3586 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3587 {
3588 struct deps_reg *reg_last = &deps->reg_last[i];
3589 if (reg_last->uses)
3590 remove_from_dependence_list (insn, &reg_last->uses);
3591 if (reg_last->sets)
3592 remove_from_dependence_list (insn, &reg_last->sets);
3593 if (reg_last->implicit_sets)
3594 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3595 if (reg_last->clobbers)
3596 remove_from_dependence_list (insn, &reg_last->clobbers);
3597 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3598 && !reg_last->clobbers)
3599 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3600 }
3601
3602 if (CALL_P (insn))
3603 {
3604 remove_from_dependence_list (insn, &deps->last_function_call);
3605 remove_from_dependence_list (insn,
3606 &deps->last_function_call_may_noreturn);
3607 }
3608 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3609 }
3610
3611 /* Init deps data vector. */
3612 static void
3613 init_deps_data_vector (void)
3614 {
3615 int reserve = (sched_max_luid + 1
3616 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3617 if (reserve > 0
3618 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3619 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3620 3 * sched_max_luid / 2);
3621 }
3622
3623 /* If it is profitable to use them, initialize or extend (depending on
3624 GLOBAL_P) dependency data. */
3625 void
3626 sched_deps_init (bool global_p)
3627 {
3628 /* Average number of insns in the basic block.
3629 '+ 1' is used to make it nonzero. */
3630 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3631
3632 init_deps_data_vector ();
3633
3634 /* We use another caching mechanism for selective scheduling, so
3635 we don't use this one. */
3636 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3637 {
3638 /* ?!? We could save some memory by computing a per-region luid mapping
3639 which could reduce both the number of vectors in the cache and the
3640 size of each vector. Instead we just avoid the cache entirely unless
3641 the average number of instructions in a basic block is very high. See
3642 the comment before the declaration of true_dependency_cache for
3643 what we consider "very high". */
3644 cache_size = 0;
3645 extend_dependency_caches (sched_max_luid, true);
3646 }
3647
3648 if (global_p)
3649 {
3650 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3651 /* Allocate lists for one block at a time. */
3652 insns_in_block);
3653 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3654 /* Allocate nodes for one block at a time.
3655 We assume that average insn has
3656 5 producers. */
3657 5 * insns_in_block);
3658 }
3659 }
3660
3661
3662 /* Create or extend (depending on CREATE_P) dependency caches to
3663 size N. */
3664 void
3665 extend_dependency_caches (int n, bool create_p)
3666 {
3667 if (create_p || true_dependency_cache)
3668 {
3669 int i, luid = cache_size + n;
3670
3671 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3672 luid);
3673 output_dependency_cache = XRESIZEVEC (bitmap_head,
3674 output_dependency_cache, luid);
3675 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3676 luid);
3677
3678 if (current_sched_info->flags & DO_SPECULATION)
3679 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3680 luid);
3681
3682 for (i = cache_size; i < luid; i++)
3683 {
3684 bitmap_initialize (&true_dependency_cache[i], 0);
3685 bitmap_initialize (&output_dependency_cache[i], 0);
3686 bitmap_initialize (&anti_dependency_cache[i], 0);
3687
3688 if (current_sched_info->flags & DO_SPECULATION)
3689 bitmap_initialize (&spec_dependency_cache[i], 0);
3690 }
3691 cache_size = luid;
3692 }
3693 }
3694
3695 /* Finalize dependency information for the whole function. */
3696 void
3697 sched_deps_finish (void)
3698 {
3699 gcc_assert (deps_pools_are_empty_p ());
3700 free_alloc_pool_if_empty (&dn_pool);
3701 free_alloc_pool_if_empty (&dl_pool);
3702 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3703
3704 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3705 cache_size = 0;
3706
3707 if (true_dependency_cache)
3708 {
3709 int i;
3710
3711 for (i = 0; i < cache_size; i++)
3712 {
3713 bitmap_clear (&true_dependency_cache[i]);
3714 bitmap_clear (&output_dependency_cache[i]);
3715 bitmap_clear (&anti_dependency_cache[i]);
3716
3717 if (sched_deps_info->generate_spec_deps)
3718 bitmap_clear (&spec_dependency_cache[i]);
3719 }
3720 free (true_dependency_cache);
3721 true_dependency_cache = NULL;
3722 free (output_dependency_cache);
3723 output_dependency_cache = NULL;
3724 free (anti_dependency_cache);
3725 anti_dependency_cache = NULL;
3726
3727 if (sched_deps_info->generate_spec_deps)
3728 {
3729 free (spec_dependency_cache);
3730 spec_dependency_cache = NULL;
3731 }
3732
3733 }
3734 }
3735
3736 /* Initialize some global variables needed by the dependency analysis
3737 code. */
3738
3739 void
3740 init_deps_global (void)
3741 {
3742 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3743 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3744 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3745 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3746 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3747 reg_pending_barrier = NOT_A_BARRIER;
3748
3749 if (!sel_sched_p () || sched_emulate_haifa_p)
3750 {
3751 sched_deps_info->start_insn = haifa_start_insn;
3752 sched_deps_info->finish_insn = haifa_finish_insn;
3753
3754 sched_deps_info->note_reg_set = haifa_note_reg_set;
3755 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3756 sched_deps_info->note_reg_use = haifa_note_reg_use;
3757
3758 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3759 sched_deps_info->note_dep = haifa_note_dep;
3760 }
3761 }
3762
3763 /* Free everything used by the dependency analysis code. */
3764
3765 void
3766 finish_deps_global (void)
3767 {
3768 FREE_REG_SET (reg_pending_sets);
3769 FREE_REG_SET (reg_pending_clobbers);
3770 FREE_REG_SET (reg_pending_uses);
3771 }
3772
3773 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3774 dw_t
3775 estimate_dep_weak (rtx mem1, rtx mem2)
3776 {
3777 rtx r1, r2;
3778
3779 if (mem1 == mem2)
3780 /* MEMs are the same - don't speculate. */
3781 return MIN_DEP_WEAK;
3782
3783 r1 = XEXP (mem1, 0);
3784 r2 = XEXP (mem2, 0);
3785
3786 if (r1 == r2
3787 || (REG_P (r1) && REG_P (r2)
3788 && REGNO (r1) == REGNO (r2)))
3789 /* Again, MEMs are the same. */
3790 return MIN_DEP_WEAK;
3791 else if ((REG_P (r1) && !REG_P (r2))
3792 || (!REG_P (r1) && REG_P (r2)))
3793 /* Different addressing modes - reason to be more speculative,
3794 than usual. */
3795 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3796 else
3797 /* We can't say anything about the dependence. */
3798 return UNCERTAIN_DEP_WEAK;
3799 }
3800
3801 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3802 This function can handle same INSN and ELEM (INSN == ELEM).
3803 It is a convenience wrapper. */
3804 void
3805 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3806 {
3807 ds_t ds;
3808 bool internal;
3809
3810 if (dep_type == REG_DEP_TRUE)
3811 ds = DEP_TRUE;
3812 else if (dep_type == REG_DEP_OUTPUT)
3813 ds = DEP_OUTPUT;
3814 else
3815 {
3816 gcc_assert (dep_type == REG_DEP_ANTI);
3817 ds = DEP_ANTI;
3818 }
3819
3820 /* When add_dependence is called from inside sched-deps.c, we expect
3821 cur_insn to be non-null. */
3822 internal = cur_insn != NULL;
3823 if (internal)
3824 gcc_assert (insn == cur_insn);
3825 else
3826 cur_insn = insn;
3827
3828 note_dep (elem, ds);
3829 if (!internal)
3830 cur_insn = NULL;
3831 }
3832
3833 /* Return weakness of speculative type TYPE in the dep_status DS. */
3834 dw_t
3835 get_dep_weak_1 (ds_t ds, ds_t type)
3836 {
3837 ds = ds & type;
3838
3839 switch (type)
3840 {
3841 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3842 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3843 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3844 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3845 default: gcc_unreachable ();
3846 }
3847
3848 return (dw_t) ds;
3849 }
3850
3851 dw_t
3852 get_dep_weak (ds_t ds, ds_t type)
3853 {
3854 dw_t dw = get_dep_weak_1 (ds, type);
3855
3856 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3857 return dw;
3858 }
3859
3860 /* Return the dep_status, which has the same parameters as DS, except for
3861 speculative type TYPE, that will have weakness DW. */
3862 ds_t
3863 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3864 {
3865 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3866
3867 ds &= ~type;
3868 switch (type)
3869 {
3870 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3871 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3872 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3873 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3874 default: gcc_unreachable ();
3875 }
3876 return ds;
3877 }
3878
3879 /* Return the join of two dep_statuses DS1 and DS2.
3880 If MAX_P is true then choose the greater probability,
3881 otherwise multiply probabilities.
3882 This function assumes that both DS1 and DS2 contain speculative bits. */
3883 static ds_t
3884 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3885 {
3886 ds_t ds, t;
3887
3888 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3889
3890 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3891
3892 t = FIRST_SPEC_TYPE;
3893 do
3894 {
3895 if ((ds1 & t) && !(ds2 & t))
3896 ds |= ds1 & t;
3897 else if (!(ds1 & t) && (ds2 & t))
3898 ds |= ds2 & t;
3899 else if ((ds1 & t) && (ds2 & t))
3900 {
3901 dw_t dw1 = get_dep_weak (ds1, t);
3902 dw_t dw2 = get_dep_weak (ds2, t);
3903 ds_t dw;
3904
3905 if (!max_p)
3906 {
3907 dw = ((ds_t) dw1) * ((ds_t) dw2);
3908 dw /= MAX_DEP_WEAK;
3909 if (dw < MIN_DEP_WEAK)
3910 dw = MIN_DEP_WEAK;
3911 }
3912 else
3913 {
3914 if (dw1 >= dw2)
3915 dw = dw1;
3916 else
3917 dw = dw2;
3918 }
3919
3920 ds = set_dep_weak (ds, t, (dw_t) dw);
3921 }
3922
3923 if (t == LAST_SPEC_TYPE)
3924 break;
3925 t <<= SPEC_TYPE_SHIFT;
3926 }
3927 while (1);
3928
3929 return ds;
3930 }
3931
3932 /* Return the join of two dep_statuses DS1 and DS2.
3933 This function assumes that both DS1 and DS2 contain speculative bits. */
3934 ds_t
3935 ds_merge (ds_t ds1, ds_t ds2)
3936 {
3937 return ds_merge_1 (ds1, ds2, false);
3938 }
3939
3940 /* Return the join of two dep_statuses DS1 and DS2. */
3941 ds_t
3942 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3943 {
3944 ds_t new_status = ds | ds2;
3945
3946 if (new_status & SPECULATIVE)
3947 {
3948 if ((ds && !(ds & SPECULATIVE))
3949 || (ds2 && !(ds2 & SPECULATIVE)))
3950 /* Then this dep can't be speculative. */
3951 new_status &= ~SPECULATIVE;
3952 else
3953 {
3954 /* Both are speculative. Merging probabilities. */
3955 if (mem1)
3956 {
3957 dw_t dw;
3958
3959 dw = estimate_dep_weak (mem1, mem2);
3960 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3961 }
3962
3963 if (!ds)
3964 new_status = ds2;
3965 else if (!ds2)
3966 new_status = ds;
3967 else
3968 new_status = ds_merge (ds2, ds);
3969 }
3970 }
3971
3972 return new_status;
3973 }
3974
3975 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3976 probabilities. */
3977 ds_t
3978 ds_max_merge (ds_t ds1, ds_t ds2)
3979 {
3980 if (ds1 == 0 && ds2 == 0)
3981 return 0;
3982
3983 if (ds1 == 0 && ds2 != 0)
3984 return ds2;
3985
3986 if (ds1 != 0 && ds2 == 0)
3987 return ds1;
3988
3989 return ds_merge_1 (ds1, ds2, true);
3990 }
3991
3992 /* Return the probability of speculation success for the speculation
3993 status DS. */
3994 dw_t
3995 ds_weak (ds_t ds)
3996 {
3997 ds_t res = 1, dt;
3998 int n = 0;
3999
4000 dt = FIRST_SPEC_TYPE;
4001 do
4002 {
4003 if (ds & dt)
4004 {
4005 res *= (ds_t) get_dep_weak (ds, dt);
4006 n++;
4007 }
4008
4009 if (dt == LAST_SPEC_TYPE)
4010 break;
4011 dt <<= SPEC_TYPE_SHIFT;
4012 }
4013 while (1);
4014
4015 gcc_assert (n);
4016 while (--n)
4017 res /= MAX_DEP_WEAK;
4018
4019 if (res < MIN_DEP_WEAK)
4020 res = MIN_DEP_WEAK;
4021
4022 gcc_assert (res <= MAX_DEP_WEAK);
4023
4024 return (dw_t) res;
4025 }
4026
4027 /* Return a dep status that contains all speculation types of DS. */
4028 ds_t
4029 ds_get_speculation_types (ds_t ds)
4030 {
4031 if (ds & BEGIN_DATA)
4032 ds |= BEGIN_DATA;
4033 if (ds & BE_IN_DATA)
4034 ds |= BE_IN_DATA;
4035 if (ds & BEGIN_CONTROL)
4036 ds |= BEGIN_CONTROL;
4037 if (ds & BE_IN_CONTROL)
4038 ds |= BE_IN_CONTROL;
4039
4040 return ds & SPECULATIVE;
4041 }
4042
4043 /* Return a dep status that contains maximal weakness for each speculation
4044 type present in DS. */
4045 ds_t
4046 ds_get_max_dep_weak (ds_t ds)
4047 {
4048 if (ds & BEGIN_DATA)
4049 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4050 if (ds & BE_IN_DATA)
4051 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4052 if (ds & BEGIN_CONTROL)
4053 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4054 if (ds & BE_IN_CONTROL)
4055 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4056
4057 return ds;
4058 }
4059
4060 /* Dump information about the dependence status S. */
4061 static void
4062 dump_ds (FILE *f, ds_t s)
4063 {
4064 fprintf (f, "{");
4065
4066 if (s & BEGIN_DATA)
4067 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4068 if (s & BE_IN_DATA)
4069 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4070 if (s & BEGIN_CONTROL)
4071 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4072 if (s & BE_IN_CONTROL)
4073 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4074
4075 if (s & HARD_DEP)
4076 fprintf (f, "HARD_DEP; ");
4077
4078 if (s & DEP_TRUE)
4079 fprintf (f, "DEP_TRUE; ");
4080 if (s & DEP_ANTI)
4081 fprintf (f, "DEP_ANTI; ");
4082 if (s & DEP_OUTPUT)
4083 fprintf (f, "DEP_OUTPUT; ");
4084
4085 fprintf (f, "}");
4086 }
4087
4088 DEBUG_FUNCTION void
4089 debug_ds (ds_t s)
4090 {
4091 dump_ds (stderr, s);
4092 fprintf (stderr, "\n");
4093 }
4094
4095 #ifdef ENABLE_CHECKING
4096 /* Verify that dependence type and status are consistent.
4097 If RELAXED_P is true, then skip dep_weakness checks. */
4098 static void
4099 check_dep (dep_t dep, bool relaxed_p)
4100 {
4101 enum reg_note dt = DEP_TYPE (dep);
4102 ds_t ds = DEP_STATUS (dep);
4103
4104 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4105
4106 if (!(current_sched_info->flags & USE_DEPS_LIST))
4107 {
4108 gcc_assert (ds == -1);
4109 return;
4110 }
4111
4112 /* Check that dependence type contains the same bits as the status. */
4113 if (dt == REG_DEP_TRUE)
4114 gcc_assert (ds & DEP_TRUE);
4115 else if (dt == REG_DEP_OUTPUT)
4116 gcc_assert ((ds & DEP_OUTPUT)
4117 && !(ds & DEP_TRUE));
4118 else
4119 gcc_assert ((dt == REG_DEP_ANTI)
4120 && (ds & DEP_ANTI)
4121 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4122
4123 /* HARD_DEP can not appear in dep_status of a link. */
4124 gcc_assert (!(ds & HARD_DEP));
4125
4126 /* Check that dependence status is set correctly when speculation is not
4127 supported. */
4128 if (!sched_deps_info->generate_spec_deps)
4129 gcc_assert (!(ds & SPECULATIVE));
4130 else if (ds & SPECULATIVE)
4131 {
4132 if (!relaxed_p)
4133 {
4134 ds_t type = FIRST_SPEC_TYPE;
4135
4136 /* Check that dependence weakness is in proper range. */
4137 do
4138 {
4139 if (ds & type)
4140 get_dep_weak (ds, type);
4141
4142 if (type == LAST_SPEC_TYPE)
4143 break;
4144 type <<= SPEC_TYPE_SHIFT;
4145 }
4146 while (1);
4147 }
4148
4149 if (ds & BEGIN_SPEC)
4150 {
4151 /* Only true dependence can be data speculative. */
4152 if (ds & BEGIN_DATA)
4153 gcc_assert (ds & DEP_TRUE);
4154
4155 /* Control dependencies in the insn scheduler are represented by
4156 anti-dependencies, therefore only anti dependence can be
4157 control speculative. */
4158 if (ds & BEGIN_CONTROL)
4159 gcc_assert (ds & DEP_ANTI);
4160 }
4161 else
4162 {
4163 /* Subsequent speculations should resolve true dependencies. */
4164 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4165 }
4166
4167 /* Check that true and anti dependencies can't have other speculative
4168 statuses. */
4169 if (ds & DEP_TRUE)
4170 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4171 /* An output dependence can't be speculative at all. */
4172 gcc_assert (!(ds & DEP_OUTPUT));
4173 if (ds & DEP_ANTI)
4174 gcc_assert (ds & BEGIN_CONTROL);
4175 }
4176 }
4177 #endif /* ENABLE_CHECKING */
4178
4179 #endif /* INSN_SCHEDULING */