* jvspec.c (jvgenmain_spec): Don't handle -fnew-verifier.
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "toplev.h"
31 #include "rtl.h"
32 #include "tm_p.h"
33 #include "hard-reg-set.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "flags.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
39 #include "except.h"
40 #include "toplev.h"
41 #include "recog.h"
42 #include "sched-int.h"
43 #include "params.h"
44 #include "cselib.h"
45 #include "ira.h"
46 #include "target.h"
47
48 #ifdef INSN_SCHEDULING
49
50 #ifdef ENABLE_CHECKING
51 #define CHECK (true)
52 #else
53 #define CHECK (false)
54 #endif
55
56 /* Holds current parameters for the dependency analyzer. */
57 struct sched_deps_info_def *sched_deps_info;
58
59 /* The data is specific to the Haifa scheduler. */
60 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
61
62 /* Return the major type present in the DS. */
63 enum reg_note
64 ds_to_dk (ds_t ds)
65 {
66 if (ds & DEP_TRUE)
67 return REG_DEP_TRUE;
68
69 if (ds & DEP_OUTPUT)
70 return REG_DEP_OUTPUT;
71
72 gcc_assert (ds & DEP_ANTI);
73
74 return REG_DEP_ANTI;
75 }
76
77 /* Return equivalent dep_status. */
78 ds_t
79 dk_to_ds (enum reg_note dk)
80 {
81 switch (dk)
82 {
83 case REG_DEP_TRUE:
84 return DEP_TRUE;
85
86 case REG_DEP_OUTPUT:
87 return DEP_OUTPUT;
88
89 default:
90 gcc_assert (dk == REG_DEP_ANTI);
91 return DEP_ANTI;
92 }
93 }
94
95 /* Functions to operate with dependence information container - dep_t. */
96
97 /* Init DEP with the arguments. */
98 void
99 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
100 {
101 DEP_PRO (dep) = pro;
102 DEP_CON (dep) = con;
103 DEP_TYPE (dep) = type;
104 DEP_STATUS (dep) = ds;
105 }
106
107 /* Init DEP with the arguments.
108 While most of the scheduler (including targets) only need the major type
109 of the dependency, it is convenient to hide full dep_status from them. */
110 void
111 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
112 {
113 ds_t ds;
114
115 if ((current_sched_info->flags & USE_DEPS_LIST))
116 ds = dk_to_ds (kind);
117 else
118 ds = -1;
119
120 init_dep_1 (dep, pro, con, kind, ds);
121 }
122
123 /* Make a copy of FROM in TO. */
124 static void
125 copy_dep (dep_t to, dep_t from)
126 {
127 memcpy (to, from, sizeof (*to));
128 }
129
130 static void dump_ds (FILE *, ds_t);
131
132 /* Define flags for dump_dep (). */
133
134 /* Dump producer of the dependence. */
135 #define DUMP_DEP_PRO (2)
136
137 /* Dump consumer of the dependence. */
138 #define DUMP_DEP_CON (4)
139
140 /* Dump type of the dependence. */
141 #define DUMP_DEP_TYPE (8)
142
143 /* Dump status of the dependence. */
144 #define DUMP_DEP_STATUS (16)
145
146 /* Dump all information about the dependence. */
147 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
148 |DUMP_DEP_STATUS)
149
150 /* Dump DEP to DUMP.
151 FLAGS is a bit mask specifying what information about DEP needs
152 to be printed.
153 If FLAGS has the very first bit set, then dump all information about DEP
154 and propagate this bit into the callee dump functions. */
155 static void
156 dump_dep (FILE *dump, dep_t dep, int flags)
157 {
158 if (flags & 1)
159 flags |= DUMP_DEP_ALL;
160
161 fprintf (dump, "<");
162
163 if (flags & DUMP_DEP_PRO)
164 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
165
166 if (flags & DUMP_DEP_CON)
167 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
168
169 if (flags & DUMP_DEP_TYPE)
170 {
171 char t;
172 enum reg_note type = DEP_TYPE (dep);
173
174 switch (type)
175 {
176 case REG_DEP_TRUE:
177 t = 't';
178 break;
179
180 case REG_DEP_OUTPUT:
181 t = 'o';
182 break;
183
184 case REG_DEP_ANTI:
185 t = 'a';
186 break;
187
188 default:
189 gcc_unreachable ();
190 break;
191 }
192
193 fprintf (dump, "%c; ", t);
194 }
195
196 if (flags & DUMP_DEP_STATUS)
197 {
198 if (current_sched_info->flags & USE_DEPS_LIST)
199 dump_ds (dump, DEP_STATUS (dep));
200 }
201
202 fprintf (dump, ">");
203 }
204
205 /* Default flags for dump_dep (). */
206 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
207
208 /* Dump all fields of DEP to STDERR. */
209 void
210 sd_debug_dep (dep_t dep)
211 {
212 dump_dep (stderr, dep, 1);
213 fprintf (stderr, "\n");
214 }
215
216 /* Determine whether DEP is a dependency link of a non-debug insn on a
217 debug insn. */
218
219 static inline bool
220 depl_on_debug_p (dep_link_t dep)
221 {
222 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
223 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
224 }
225
226 /* Functions to operate with a single link from the dependencies lists -
227 dep_link_t. */
228
229 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
230 PREV_NEXT_P. */
231 static void
232 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
233 {
234 dep_link_t next = *prev_nextp;
235
236 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
237 && DEP_LINK_NEXT (l) == NULL);
238
239 /* Init node being inserted. */
240 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
241 DEP_LINK_NEXT (l) = next;
242
243 /* Fix next node. */
244 if (next != NULL)
245 {
246 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
247
248 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
249 }
250
251 /* Fix prev node. */
252 *prev_nextp = l;
253 }
254
255 /* Add dep_link LINK to deps_list L. */
256 static void
257 add_to_deps_list (dep_link_t link, deps_list_t l)
258 {
259 attach_dep_link (link, &DEPS_LIST_FIRST (l));
260
261 /* Don't count debug deps. */
262 if (!depl_on_debug_p (link))
263 ++DEPS_LIST_N_LINKS (l);
264 }
265
266 /* Detach dep_link L from the list. */
267 static void
268 detach_dep_link (dep_link_t l)
269 {
270 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
271 dep_link_t next = DEP_LINK_NEXT (l);
272
273 *prev_nextp = next;
274
275 if (next != NULL)
276 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
277
278 DEP_LINK_PREV_NEXTP (l) = NULL;
279 DEP_LINK_NEXT (l) = NULL;
280 }
281
282 /* Remove link LINK from list LIST. */
283 static void
284 remove_from_deps_list (dep_link_t link, deps_list_t list)
285 {
286 detach_dep_link (link);
287
288 /* Don't count debug deps. */
289 if (!depl_on_debug_p (link))
290 --DEPS_LIST_N_LINKS (list);
291 }
292
293 /* Move link LINK from list FROM to list TO. */
294 static void
295 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
296 {
297 remove_from_deps_list (link, from);
298 add_to_deps_list (link, to);
299 }
300
301 /* Return true of LINK is not attached to any list. */
302 static bool
303 dep_link_is_detached_p (dep_link_t link)
304 {
305 return DEP_LINK_PREV_NEXTP (link) == NULL;
306 }
307
308 /* Pool to hold all dependency nodes (dep_node_t). */
309 static alloc_pool dn_pool;
310
311 /* Number of dep_nodes out there. */
312 static int dn_pool_diff = 0;
313
314 /* Create a dep_node. */
315 static dep_node_t
316 create_dep_node (void)
317 {
318 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
319 dep_link_t back = DEP_NODE_BACK (n);
320 dep_link_t forw = DEP_NODE_FORW (n);
321
322 DEP_LINK_NODE (back) = n;
323 DEP_LINK_NEXT (back) = NULL;
324 DEP_LINK_PREV_NEXTP (back) = NULL;
325
326 DEP_LINK_NODE (forw) = n;
327 DEP_LINK_NEXT (forw) = NULL;
328 DEP_LINK_PREV_NEXTP (forw) = NULL;
329
330 ++dn_pool_diff;
331
332 return n;
333 }
334
335 /* Delete dep_node N. N must not be connected to any deps_list. */
336 static void
337 delete_dep_node (dep_node_t n)
338 {
339 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
340 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
341
342 --dn_pool_diff;
343
344 pool_free (dn_pool, n);
345 }
346
347 /* Pool to hold dependencies lists (deps_list_t). */
348 static alloc_pool dl_pool;
349
350 /* Number of deps_lists out there. */
351 static int dl_pool_diff = 0;
352
353 /* Functions to operate with dependences lists - deps_list_t. */
354
355 /* Return true if list L is empty. */
356 static bool
357 deps_list_empty_p (deps_list_t l)
358 {
359 return DEPS_LIST_N_LINKS (l) == 0;
360 }
361
362 /* Create a new deps_list. */
363 static deps_list_t
364 create_deps_list (void)
365 {
366 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
367
368 DEPS_LIST_FIRST (l) = NULL;
369 DEPS_LIST_N_LINKS (l) = 0;
370
371 ++dl_pool_diff;
372 return l;
373 }
374
375 /* Free deps_list L. */
376 static void
377 free_deps_list (deps_list_t l)
378 {
379 gcc_assert (deps_list_empty_p (l));
380
381 --dl_pool_diff;
382
383 pool_free (dl_pool, l);
384 }
385
386 /* Return true if there is no dep_nodes and deps_lists out there.
387 After the region is scheduled all the dependency nodes and lists
388 should [generally] be returned to pool. */
389 bool
390 deps_pools_are_empty_p (void)
391 {
392 return dn_pool_diff == 0 && dl_pool_diff == 0;
393 }
394
395 /* Remove all elements from L. */
396 static void
397 clear_deps_list (deps_list_t l)
398 {
399 do
400 {
401 dep_link_t link = DEPS_LIST_FIRST (l);
402
403 if (link == NULL)
404 break;
405
406 remove_from_deps_list (link, l);
407 }
408 while (1);
409 }
410
411 static regset reg_pending_sets;
412 static regset reg_pending_clobbers;
413 static regset reg_pending_uses;
414 static enum reg_pending_barrier_mode reg_pending_barrier;
415
416 /* Hard registers implicitly clobbered or used (or may be implicitly
417 clobbered or used) by the currently analyzed insn. For example,
418 insn in its constraint has one register class. Even if there is
419 currently no hard register in the insn, the particular hard
420 register will be in the insn after reload pass because the
421 constraint requires it. */
422 static HARD_REG_SET implicit_reg_pending_clobbers;
423 static HARD_REG_SET implicit_reg_pending_uses;
424
425 /* To speed up the test for duplicate dependency links we keep a
426 record of dependencies created by add_dependence when the average
427 number of instructions in a basic block is very large.
428
429 Studies have shown that there is typically around 5 instructions between
430 branches for typical C code. So we can make a guess that the average
431 basic block is approximately 5 instructions long; we will choose 100X
432 the average size as a very large basic block.
433
434 Each insn has associated bitmaps for its dependencies. Each bitmap
435 has enough entries to represent a dependency on any other insn in
436 the insn chain. All bitmap for true dependencies cache is
437 allocated then the rest two ones are also allocated. */
438 static bitmap_head *true_dependency_cache = NULL;
439 static bitmap_head *output_dependency_cache = NULL;
440 static bitmap_head *anti_dependency_cache = NULL;
441 static bitmap_head *spec_dependency_cache = NULL;
442 static int cache_size;
443
444 static int deps_may_trap_p (const_rtx);
445 static void add_dependence_list (rtx, rtx, int, enum reg_note);
446 static void add_dependence_list_and_free (struct deps_desc *, rtx,
447 rtx *, int, enum reg_note);
448 static void delete_all_dependences (rtx);
449 static void fixup_sched_groups (rtx);
450
451 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
452 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
453 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
454 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
455
456 static bool sched_has_condition_p (const_rtx);
457 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
458
459 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
460 rtx, rtx);
461 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
462
463 #ifdef ENABLE_CHECKING
464 static void check_dep (dep_t, bool);
465 #endif
466 \f
467 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
468
469 static int
470 deps_may_trap_p (const_rtx mem)
471 {
472 const_rtx addr = XEXP (mem, 0);
473
474 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
475 {
476 const_rtx t = get_reg_known_value (REGNO (addr));
477 if (t)
478 addr = t;
479 }
480 return rtx_addr_can_trap_p (addr);
481 }
482 \f
483
484 /* Find the condition under which INSN is executed. If REV is not NULL,
485 it is set to TRUE when the returned comparison should be reversed
486 to get the actual condition. */
487 static rtx
488 sched_get_condition_with_rev (const_rtx insn, bool *rev)
489 {
490 rtx pat = PATTERN (insn);
491 rtx src;
492
493 if (pat == 0)
494 return 0;
495
496 if (rev)
497 *rev = false;
498
499 if (GET_CODE (pat) == COND_EXEC)
500 return COND_EXEC_TEST (pat);
501
502 if (!any_condjump_p (insn) || !onlyjump_p (insn))
503 return 0;
504
505 src = SET_SRC (pc_set (insn));
506
507 if (XEXP (src, 2) == pc_rtx)
508 return XEXP (src, 0);
509 else if (XEXP (src, 1) == pc_rtx)
510 {
511 rtx cond = XEXP (src, 0);
512 enum rtx_code revcode = reversed_comparison_code (cond, insn);
513
514 if (revcode == UNKNOWN)
515 return 0;
516
517 if (rev)
518 *rev = true;
519 return cond;
520 }
521
522 return 0;
523 }
524
525 /* True when we can find a condition under which INSN is executed. */
526 static bool
527 sched_has_condition_p (const_rtx insn)
528 {
529 return !! sched_get_condition_with_rev (insn, NULL);
530 }
531
532 \f
533
534 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
535 static int
536 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
537 {
538 if (COMPARISON_P (cond1)
539 && COMPARISON_P (cond2)
540 && GET_CODE (cond1) ==
541 (rev1==rev2
542 ? reversed_comparison_code (cond2, NULL)
543 : GET_CODE (cond2))
544 && XEXP (cond1, 0) == XEXP (cond2, 0)
545 && XEXP (cond1, 1) == XEXP (cond2, 1))
546 return 1;
547 return 0;
548 }
549
550 /* Return true if insn1 and insn2 can never depend on one another because
551 the conditions under which they are executed are mutually exclusive. */
552 bool
553 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
554 {
555 rtx cond1, cond2;
556 bool rev1 = false, rev2 = false;
557
558 /* df doesn't handle conditional lifetimes entirely correctly;
559 calls mess up the conditional lifetimes. */
560 if (!CALL_P (insn1) && !CALL_P (insn2))
561 {
562 cond1 = sched_get_condition_with_rev (insn1, &rev1);
563 cond2 = sched_get_condition_with_rev (insn2, &rev2);
564 if (cond1 && cond2
565 && conditions_mutex_p (cond1, cond2, rev1, rev2)
566 /* Make sure first instruction doesn't affect condition of second
567 instruction if switched. */
568 && !modified_in_p (cond1, insn2)
569 /* Make sure second instruction doesn't affect condition of first
570 instruction if switched. */
571 && !modified_in_p (cond2, insn1))
572 return true;
573 }
574 return false;
575 }
576 \f
577
578 /* Return true if INSN can potentially be speculated with type DS. */
579 bool
580 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
581 {
582 if (HAS_INTERNAL_DEP (insn))
583 return false;
584
585 if (!NONJUMP_INSN_P (insn))
586 return false;
587
588 if (SCHED_GROUP_P (insn))
589 return false;
590
591 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
592 return false;
593
594 if (side_effects_p (PATTERN (insn)))
595 return false;
596
597 if (ds & BE_IN_SPEC)
598 /* The following instructions, which depend on a speculatively scheduled
599 instruction, cannot be speculatively scheduled along. */
600 {
601 if (may_trap_p (PATTERN (insn)))
602 /* If instruction might trap, it cannot be speculatively scheduled.
603 For control speculation it's obvious why and for data speculation
604 it's because the insn might get wrong input if speculation
605 wasn't successful. */
606 return false;
607
608 if ((ds & BE_IN_DATA)
609 && sched_has_condition_p (insn))
610 /* If this is a predicated instruction, then it cannot be
611 speculatively scheduled. See PR35659. */
612 return false;
613 }
614
615 return true;
616 }
617
618 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
619 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
620 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
621 This function is used to switch sd_iterator to the next list.
622 !!! For internal use only. Might consider moving it to sched-int.h. */
623 void
624 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
625 deps_list_t *list_ptr, bool *resolved_p_ptr)
626 {
627 sd_list_types_def types = *types_ptr;
628
629 if (types & SD_LIST_HARD_BACK)
630 {
631 *list_ptr = INSN_HARD_BACK_DEPS (insn);
632 *resolved_p_ptr = false;
633 *types_ptr = types & ~SD_LIST_HARD_BACK;
634 }
635 else if (types & SD_LIST_SPEC_BACK)
636 {
637 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
638 *resolved_p_ptr = false;
639 *types_ptr = types & ~SD_LIST_SPEC_BACK;
640 }
641 else if (types & SD_LIST_FORW)
642 {
643 *list_ptr = INSN_FORW_DEPS (insn);
644 *resolved_p_ptr = false;
645 *types_ptr = types & ~SD_LIST_FORW;
646 }
647 else if (types & SD_LIST_RES_BACK)
648 {
649 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
650 *resolved_p_ptr = true;
651 *types_ptr = types & ~SD_LIST_RES_BACK;
652 }
653 else if (types & SD_LIST_RES_FORW)
654 {
655 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
656 *resolved_p_ptr = true;
657 *types_ptr = types & ~SD_LIST_RES_FORW;
658 }
659 else
660 {
661 *list_ptr = NULL;
662 *resolved_p_ptr = false;
663 *types_ptr = SD_LIST_NONE;
664 }
665 }
666
667 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
668 int
669 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
670 {
671 int size = 0;
672
673 while (list_types != SD_LIST_NONE)
674 {
675 deps_list_t list;
676 bool resolved_p;
677
678 sd_next_list (insn, &list_types, &list, &resolved_p);
679 if (list)
680 size += DEPS_LIST_N_LINKS (list);
681 }
682
683 return size;
684 }
685
686 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
687
688 bool
689 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
690 {
691 while (list_types != SD_LIST_NONE)
692 {
693 deps_list_t list;
694 bool resolved_p;
695
696 sd_next_list (insn, &list_types, &list, &resolved_p);
697 if (!deps_list_empty_p (list))
698 return false;
699 }
700
701 return true;
702 }
703
704 /* Initialize data for INSN. */
705 void
706 sd_init_insn (rtx insn)
707 {
708 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
709 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
710 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
711 INSN_FORW_DEPS (insn) = create_deps_list ();
712 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
713
714 if (DEBUG_INSN_P (insn))
715 DEBUG_INSN_SCHED_P (insn) = TRUE;
716
717 /* ??? It would be nice to allocate dependency caches here. */
718 }
719
720 /* Free data for INSN. */
721 void
722 sd_finish_insn (rtx insn)
723 {
724 /* ??? It would be nice to deallocate dependency caches here. */
725
726 if (DEBUG_INSN_P (insn))
727 {
728 gcc_assert (DEBUG_INSN_SCHED_P (insn));
729 DEBUG_INSN_SCHED_P (insn) = FALSE;
730 }
731
732 free_deps_list (INSN_HARD_BACK_DEPS (insn));
733 INSN_HARD_BACK_DEPS (insn) = NULL;
734
735 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
736 INSN_SPEC_BACK_DEPS (insn) = NULL;
737
738 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
739 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
740
741 free_deps_list (INSN_FORW_DEPS (insn));
742 INSN_FORW_DEPS (insn) = NULL;
743
744 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
745 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
746 }
747
748 /* Find a dependency between producer PRO and consumer CON.
749 Search through resolved dependency lists if RESOLVED_P is true.
750 If no such dependency is found return NULL,
751 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
752 with an iterator pointing to it. */
753 static dep_t
754 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
755 sd_iterator_def *sd_it_ptr)
756 {
757 sd_list_types_def pro_list_type;
758 sd_list_types_def con_list_type;
759 sd_iterator_def sd_it;
760 dep_t dep;
761 bool found_p = false;
762
763 if (resolved_p)
764 {
765 pro_list_type = SD_LIST_RES_FORW;
766 con_list_type = SD_LIST_RES_BACK;
767 }
768 else
769 {
770 pro_list_type = SD_LIST_FORW;
771 con_list_type = SD_LIST_BACK;
772 }
773
774 /* Walk through either back list of INSN or forw list of ELEM
775 depending on which one is shorter. */
776 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
777 {
778 /* Find the dep_link with producer PRO in consumer's back_deps. */
779 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
780 if (DEP_PRO (dep) == pro)
781 {
782 found_p = true;
783 break;
784 }
785 }
786 else
787 {
788 /* Find the dep_link with consumer CON in producer's forw_deps. */
789 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
790 if (DEP_CON (dep) == con)
791 {
792 found_p = true;
793 break;
794 }
795 }
796
797 if (found_p)
798 {
799 if (sd_it_ptr != NULL)
800 *sd_it_ptr = sd_it;
801
802 return dep;
803 }
804
805 return NULL;
806 }
807
808 /* Find a dependency between producer PRO and consumer CON.
809 Use dependency [if available] to check if dependency is present at all.
810 Search through resolved dependency lists if RESOLVED_P is true.
811 If the dependency or NULL if none found. */
812 dep_t
813 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
814 {
815 if (true_dependency_cache != NULL)
816 /* Avoiding the list walk below can cut compile times dramatically
817 for some code. */
818 {
819 int elem_luid = INSN_LUID (pro);
820 int insn_luid = INSN_LUID (con);
821
822 gcc_assert (output_dependency_cache != NULL
823 && anti_dependency_cache != NULL);
824
825 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
826 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
827 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
828 return NULL;
829 }
830
831 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
832 }
833
834 /* Add or update a dependence described by DEP.
835 MEM1 and MEM2, if non-null, correspond to memory locations in case of
836 data speculation.
837
838 The function returns a value indicating if an old entry has been changed
839 or a new entry has been added to insn's backward deps.
840
841 This function merely checks if producer and consumer is the same insn
842 and doesn't create a dep in this case. Actual manipulation of
843 dependence data structures is performed in add_or_update_dep_1. */
844 static enum DEPS_ADJUST_RESULT
845 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
846 {
847 rtx elem = DEP_PRO (dep);
848 rtx insn = DEP_CON (dep);
849
850 gcc_assert (INSN_P (insn) && INSN_P (elem));
851
852 /* Don't depend an insn on itself. */
853 if (insn == elem)
854 {
855 if (sched_deps_info->generate_spec_deps)
856 /* INSN has an internal dependence, which we can't overcome. */
857 HAS_INTERNAL_DEP (insn) = 1;
858
859 return DEP_NODEP;
860 }
861
862 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
863 }
864
865 /* Ask dependency caches what needs to be done for dependence DEP.
866 Return DEP_CREATED if new dependence should be created and there is no
867 need to try to find one searching the dependencies lists.
868 Return DEP_PRESENT if there already is a dependence described by DEP and
869 hence nothing is to be done.
870 Return DEP_CHANGED if there already is a dependence, but it should be
871 updated to incorporate additional information from DEP. */
872 static enum DEPS_ADJUST_RESULT
873 ask_dependency_caches (dep_t dep)
874 {
875 int elem_luid = INSN_LUID (DEP_PRO (dep));
876 int insn_luid = INSN_LUID (DEP_CON (dep));
877
878 gcc_assert (true_dependency_cache != NULL
879 && output_dependency_cache != NULL
880 && anti_dependency_cache != NULL);
881
882 if (!(current_sched_info->flags & USE_DEPS_LIST))
883 {
884 enum reg_note present_dep_type;
885
886 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
887 present_dep_type = REG_DEP_TRUE;
888 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
889 present_dep_type = REG_DEP_OUTPUT;
890 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
891 present_dep_type = REG_DEP_ANTI;
892 else
893 /* There is no existing dep so it should be created. */
894 return DEP_CREATED;
895
896 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
897 /* DEP does not add anything to the existing dependence. */
898 return DEP_PRESENT;
899 }
900 else
901 {
902 ds_t present_dep_types = 0;
903
904 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
905 present_dep_types |= DEP_TRUE;
906 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
907 present_dep_types |= DEP_OUTPUT;
908 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
909 present_dep_types |= DEP_ANTI;
910
911 if (present_dep_types == 0)
912 /* There is no existing dep so it should be created. */
913 return DEP_CREATED;
914
915 if (!(current_sched_info->flags & DO_SPECULATION)
916 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
917 {
918 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
919 == present_dep_types)
920 /* DEP does not add anything to the existing dependence. */
921 return DEP_PRESENT;
922 }
923 else
924 {
925 /* Only true dependencies can be data speculative and
926 only anti dependencies can be control speculative. */
927 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
928 == present_dep_types);
929
930 /* if (DEP is SPECULATIVE) then
931 ..we should update DEP_STATUS
932 else
933 ..we should reset existing dep to non-speculative. */
934 }
935 }
936
937 return DEP_CHANGED;
938 }
939
940 /* Set dependency caches according to DEP. */
941 static void
942 set_dependency_caches (dep_t dep)
943 {
944 int elem_luid = INSN_LUID (DEP_PRO (dep));
945 int insn_luid = INSN_LUID (DEP_CON (dep));
946
947 if (!(current_sched_info->flags & USE_DEPS_LIST))
948 {
949 switch (DEP_TYPE (dep))
950 {
951 case REG_DEP_TRUE:
952 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
953 break;
954
955 case REG_DEP_OUTPUT:
956 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
957 break;
958
959 case REG_DEP_ANTI:
960 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
961 break;
962
963 default:
964 gcc_unreachable ();
965 }
966 }
967 else
968 {
969 ds_t ds = DEP_STATUS (dep);
970
971 if (ds & DEP_TRUE)
972 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
973 if (ds & DEP_OUTPUT)
974 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
975 if (ds & DEP_ANTI)
976 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
977
978 if (ds & SPECULATIVE)
979 {
980 gcc_assert (current_sched_info->flags & DO_SPECULATION);
981 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
982 }
983 }
984 }
985
986 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
987 caches accordingly. */
988 static void
989 update_dependency_caches (dep_t dep, enum reg_note old_type)
990 {
991 int elem_luid = INSN_LUID (DEP_PRO (dep));
992 int insn_luid = INSN_LUID (DEP_CON (dep));
993
994 /* Clear corresponding cache entry because type of the link
995 may have changed. Keep them if we use_deps_list. */
996 if (!(current_sched_info->flags & USE_DEPS_LIST))
997 {
998 switch (old_type)
999 {
1000 case REG_DEP_OUTPUT:
1001 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1002 break;
1003
1004 case REG_DEP_ANTI:
1005 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1006 break;
1007
1008 default:
1009 gcc_unreachable ();
1010 }
1011 }
1012
1013 set_dependency_caches (dep);
1014 }
1015
1016 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1017 static void
1018 change_spec_dep_to_hard (sd_iterator_def sd_it)
1019 {
1020 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1021 dep_link_t link = DEP_NODE_BACK (node);
1022 dep_t dep = DEP_NODE_DEP (node);
1023 rtx elem = DEP_PRO (dep);
1024 rtx insn = DEP_CON (dep);
1025
1026 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1027
1028 DEP_STATUS (dep) &= ~SPECULATIVE;
1029
1030 if (true_dependency_cache != NULL)
1031 /* Clear the cache entry. */
1032 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1033 INSN_LUID (elem));
1034 }
1035
1036 /* Update DEP to incorporate information from NEW_DEP.
1037 SD_IT points to DEP in case it should be moved to another list.
1038 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1039 data-speculative dependence should be updated. */
1040 static enum DEPS_ADJUST_RESULT
1041 update_dep (dep_t dep, dep_t new_dep,
1042 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1043 rtx mem1 ATTRIBUTE_UNUSED,
1044 rtx mem2 ATTRIBUTE_UNUSED)
1045 {
1046 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1047 enum reg_note old_type = DEP_TYPE (dep);
1048
1049 /* If this is a more restrictive type of dependence than the
1050 existing one, then change the existing dependence to this
1051 type. */
1052 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1053 {
1054 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1055 res = DEP_CHANGED;
1056 }
1057
1058 if (current_sched_info->flags & USE_DEPS_LIST)
1059 /* Update DEP_STATUS. */
1060 {
1061 ds_t dep_status = DEP_STATUS (dep);
1062 ds_t ds = DEP_STATUS (new_dep);
1063 ds_t new_status = ds | dep_status;
1064
1065 if (new_status & SPECULATIVE)
1066 /* Either existing dep or a dep we're adding or both are
1067 speculative. */
1068 {
1069 if (!(ds & SPECULATIVE)
1070 || !(dep_status & SPECULATIVE))
1071 /* The new dep can't be speculative. */
1072 {
1073 new_status &= ~SPECULATIVE;
1074
1075 if (dep_status & SPECULATIVE)
1076 /* The old dep was speculative, but now it
1077 isn't. */
1078 change_spec_dep_to_hard (sd_it);
1079 }
1080 else
1081 {
1082 /* Both are speculative. Merge probabilities. */
1083 if (mem1 != NULL)
1084 {
1085 dw_t dw;
1086
1087 dw = estimate_dep_weak (mem1, mem2);
1088 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1089 }
1090
1091 new_status = ds_merge (dep_status, ds);
1092 }
1093 }
1094
1095 ds = new_status;
1096
1097 if (dep_status != ds)
1098 {
1099 DEP_STATUS (dep) = ds;
1100 res = DEP_CHANGED;
1101 }
1102 }
1103
1104 if (true_dependency_cache != NULL
1105 && res == DEP_CHANGED)
1106 update_dependency_caches (dep, old_type);
1107
1108 return res;
1109 }
1110
1111 /* Add or update a dependence described by DEP.
1112 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1113 data speculation.
1114
1115 The function returns a value indicating if an old entry has been changed
1116 or a new entry has been added to insn's backward deps or nothing has
1117 been updated at all. */
1118 static enum DEPS_ADJUST_RESULT
1119 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1120 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1121 {
1122 bool maybe_present_p = true;
1123 bool present_p = false;
1124
1125 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1126 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1127
1128 #ifdef ENABLE_CHECKING
1129 check_dep (new_dep, mem1 != NULL);
1130 #endif
1131
1132 if (true_dependency_cache != NULL)
1133 {
1134 switch (ask_dependency_caches (new_dep))
1135 {
1136 case DEP_PRESENT:
1137 return DEP_PRESENT;
1138
1139 case DEP_CHANGED:
1140 maybe_present_p = true;
1141 present_p = true;
1142 break;
1143
1144 case DEP_CREATED:
1145 maybe_present_p = false;
1146 present_p = false;
1147 break;
1148
1149 default:
1150 gcc_unreachable ();
1151 break;
1152 }
1153 }
1154
1155 /* Check that we don't already have this dependence. */
1156 if (maybe_present_p)
1157 {
1158 dep_t present_dep;
1159 sd_iterator_def sd_it;
1160
1161 gcc_assert (true_dependency_cache == NULL || present_p);
1162
1163 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1164 DEP_CON (new_dep),
1165 resolved_p, &sd_it);
1166
1167 if (present_dep != NULL)
1168 /* We found an existing dependency between ELEM and INSN. */
1169 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1170 else
1171 /* We didn't find a dep, it shouldn't present in the cache. */
1172 gcc_assert (!present_p);
1173 }
1174
1175 /* Might want to check one level of transitivity to save conses.
1176 This check should be done in maybe_add_or_update_dep_1.
1177 Since we made it to add_or_update_dep_1, we must create
1178 (or update) a link. */
1179
1180 if (mem1 != NULL_RTX)
1181 {
1182 gcc_assert (sched_deps_info->generate_spec_deps);
1183 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1184 estimate_dep_weak (mem1, mem2));
1185 }
1186
1187 sd_add_dep (new_dep, resolved_p);
1188
1189 return DEP_CREATED;
1190 }
1191
1192 /* Initialize BACK_LIST_PTR with consumer's backward list and
1193 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1194 initialize with lists that hold resolved deps. */
1195 static void
1196 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1197 deps_list_t *back_list_ptr,
1198 deps_list_t *forw_list_ptr)
1199 {
1200 rtx con = DEP_CON (dep);
1201
1202 if (!resolved_p)
1203 {
1204 if ((current_sched_info->flags & DO_SPECULATION)
1205 && (DEP_STATUS (dep) & SPECULATIVE))
1206 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1207 else
1208 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1209
1210 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1211 }
1212 else
1213 {
1214 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1215 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1216 }
1217 }
1218
1219 /* Add dependence described by DEP.
1220 If RESOLVED_P is true treat the dependence as a resolved one. */
1221 void
1222 sd_add_dep (dep_t dep, bool resolved_p)
1223 {
1224 dep_node_t n = create_dep_node ();
1225 deps_list_t con_back_deps;
1226 deps_list_t pro_forw_deps;
1227 rtx elem = DEP_PRO (dep);
1228 rtx insn = DEP_CON (dep);
1229
1230 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1231
1232 if ((current_sched_info->flags & DO_SPECULATION)
1233 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1234 DEP_STATUS (dep) &= ~SPECULATIVE;
1235
1236 copy_dep (DEP_NODE_DEP (n), dep);
1237
1238 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1239
1240 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1241
1242 #ifdef ENABLE_CHECKING
1243 check_dep (dep, false);
1244 #endif
1245
1246 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1247
1248 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1249 in the bitmap caches of dependency information. */
1250 if (true_dependency_cache != NULL)
1251 set_dependency_caches (dep);
1252 }
1253
1254 /* Add or update backward dependence between INSN and ELEM
1255 with given type DEP_TYPE and dep_status DS.
1256 This function is a convenience wrapper. */
1257 enum DEPS_ADJUST_RESULT
1258 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1259 {
1260 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1261 }
1262
1263 /* Resolved dependence pointed to by SD_IT.
1264 SD_IT will advance to the next element. */
1265 void
1266 sd_resolve_dep (sd_iterator_def sd_it)
1267 {
1268 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1269 dep_t dep = DEP_NODE_DEP (node);
1270 rtx pro = DEP_PRO (dep);
1271 rtx con = DEP_CON (dep);
1272
1273 if ((current_sched_info->flags & DO_SPECULATION)
1274 && (DEP_STATUS (dep) & SPECULATIVE))
1275 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1276 INSN_RESOLVED_BACK_DEPS (con));
1277 else
1278 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1279 INSN_RESOLVED_BACK_DEPS (con));
1280
1281 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1282 INSN_RESOLVED_FORW_DEPS (pro));
1283 }
1284
1285 /* Make TO depend on all the FROM's producers.
1286 If RESOLVED_P is true add dependencies to the resolved lists. */
1287 void
1288 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1289 {
1290 sd_list_types_def list_type;
1291 sd_iterator_def sd_it;
1292 dep_t dep;
1293
1294 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1295
1296 FOR_EACH_DEP (from, list_type, sd_it, dep)
1297 {
1298 dep_def _new_dep, *new_dep = &_new_dep;
1299
1300 copy_dep (new_dep, dep);
1301 DEP_CON (new_dep) = to;
1302 sd_add_dep (new_dep, resolved_p);
1303 }
1304 }
1305
1306 /* Remove a dependency referred to by SD_IT.
1307 SD_IT will point to the next dependence after removal. */
1308 void
1309 sd_delete_dep (sd_iterator_def sd_it)
1310 {
1311 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1312 dep_t dep = DEP_NODE_DEP (n);
1313 rtx pro = DEP_PRO (dep);
1314 rtx con = DEP_CON (dep);
1315 deps_list_t con_back_deps;
1316 deps_list_t pro_forw_deps;
1317
1318 if (true_dependency_cache != NULL)
1319 {
1320 int elem_luid = INSN_LUID (pro);
1321 int insn_luid = INSN_LUID (con);
1322
1323 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1324 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1325 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1326
1327 if (current_sched_info->flags & DO_SPECULATION)
1328 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1329 }
1330
1331 get_back_and_forw_lists (dep, sd_it.resolved_p,
1332 &con_back_deps, &pro_forw_deps);
1333
1334 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1335 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1336
1337 delete_dep_node (n);
1338 }
1339
1340 /* Dump size of the lists. */
1341 #define DUMP_LISTS_SIZE (2)
1342
1343 /* Dump dependencies of the lists. */
1344 #define DUMP_LISTS_DEPS (4)
1345
1346 /* Dump all information about the lists. */
1347 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1348
1349 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1350 FLAGS is a bit mask specifying what information about the lists needs
1351 to be printed.
1352 If FLAGS has the very first bit set, then dump all information about
1353 the lists and propagate this bit into the callee dump functions. */
1354 static void
1355 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1356 {
1357 sd_iterator_def sd_it;
1358 dep_t dep;
1359 int all;
1360
1361 all = (flags & 1);
1362
1363 if (all)
1364 flags |= DUMP_LISTS_ALL;
1365
1366 fprintf (dump, "[");
1367
1368 if (flags & DUMP_LISTS_SIZE)
1369 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1370
1371 if (flags & DUMP_LISTS_DEPS)
1372 {
1373 FOR_EACH_DEP (insn, types, sd_it, dep)
1374 {
1375 dump_dep (dump, dep, dump_dep_flags | all);
1376 fprintf (dump, " ");
1377 }
1378 }
1379 }
1380
1381 /* Dump all information about deps_lists of INSN specified by TYPES
1382 to STDERR. */
1383 void
1384 sd_debug_lists (rtx insn, sd_list_types_def types)
1385 {
1386 dump_lists (stderr, insn, types, 1);
1387 fprintf (stderr, "\n");
1388 }
1389
1390 /* A convenience wrapper to operate on an entire list. */
1391
1392 static void
1393 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1394 {
1395 for (; list; list = XEXP (list, 1))
1396 {
1397 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1398 add_dependence (insn, XEXP (list, 0), dep_type);
1399 }
1400 }
1401
1402 /* Similar, but free *LISTP at the same time, when the context
1403 is not readonly. */
1404
1405 static void
1406 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1407 int uncond, enum reg_note dep_type)
1408 {
1409 rtx list, next;
1410
1411 /* We don't want to short-circuit dependencies involving debug
1412 insns, because they may cause actual dependencies to be
1413 disregarded. */
1414 if (deps->readonly || DEBUG_INSN_P (insn))
1415 {
1416 add_dependence_list (insn, *listp, uncond, dep_type);
1417 return;
1418 }
1419
1420 for (list = *listp, *listp = NULL; list ; list = next)
1421 {
1422 next = XEXP (list, 1);
1423 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1424 add_dependence (insn, XEXP (list, 0), dep_type);
1425 free_INSN_LIST_node (list);
1426 }
1427 }
1428
1429 /* Remove all occurences of INSN from LIST. Return the number of
1430 occurences removed. */
1431
1432 static int
1433 remove_from_dependence_list (rtx insn, rtx* listp)
1434 {
1435 int removed = 0;
1436
1437 while (*listp)
1438 {
1439 if (XEXP (*listp, 0) == insn)
1440 {
1441 remove_free_INSN_LIST_node (listp);
1442 removed++;
1443 continue;
1444 }
1445
1446 listp = &XEXP (*listp, 1);
1447 }
1448
1449 return removed;
1450 }
1451
1452 /* Same as above, but process two lists at once. */
1453 static int
1454 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1455 {
1456 int removed = 0;
1457
1458 while (*listp)
1459 {
1460 if (XEXP (*listp, 0) == insn)
1461 {
1462 remove_free_INSN_LIST_node (listp);
1463 remove_free_EXPR_LIST_node (exprp);
1464 removed++;
1465 continue;
1466 }
1467
1468 listp = &XEXP (*listp, 1);
1469 exprp = &XEXP (*exprp, 1);
1470 }
1471
1472 return removed;
1473 }
1474
1475 /* Clear all dependencies for an insn. */
1476 static void
1477 delete_all_dependences (rtx insn)
1478 {
1479 sd_iterator_def sd_it;
1480 dep_t dep;
1481
1482 /* The below cycle can be optimized to clear the caches and back_deps
1483 in one call but that would provoke duplication of code from
1484 delete_dep (). */
1485
1486 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1487 sd_iterator_cond (&sd_it, &dep);)
1488 sd_delete_dep (sd_it);
1489 }
1490
1491 /* All insns in a scheduling group except the first should only have
1492 dependencies on the previous insn in the group. So we find the
1493 first instruction in the scheduling group by walking the dependence
1494 chains backwards. Then we add the dependencies for the group to
1495 the previous nonnote insn. */
1496
1497 static void
1498 fixup_sched_groups (rtx insn)
1499 {
1500 sd_iterator_def sd_it;
1501 dep_t dep;
1502 rtx prev_nonnote;
1503
1504 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1505 {
1506 rtx i = insn;
1507 rtx pro = DEP_PRO (dep);
1508
1509 do
1510 {
1511 i = prev_nonnote_insn (i);
1512
1513 if (pro == i)
1514 goto next_link;
1515 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1516
1517 if (! sched_insns_conditions_mutex_p (i, pro))
1518 add_dependence (i, pro, DEP_TYPE (dep));
1519 next_link:;
1520 }
1521
1522 delete_all_dependences (insn);
1523
1524 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1525 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1526 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1527 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1528 }
1529 \f
1530 /* Process an insn's memory dependencies. There are four kinds of
1531 dependencies:
1532
1533 (0) read dependence: read follows read
1534 (1) true dependence: read follows write
1535 (2) output dependence: write follows write
1536 (3) anti dependence: write follows read
1537
1538 We are careful to build only dependencies which actually exist, and
1539 use transitivity to avoid building too many links. */
1540
1541 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1542 The MEM is a memory reference contained within INSN, which we are saving
1543 so that we can do memory aliasing on it. */
1544
1545 static void
1546 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1547 rtx insn, rtx mem)
1548 {
1549 rtx *insn_list;
1550 rtx *mem_list;
1551 rtx link;
1552
1553 gcc_assert (!deps->readonly);
1554 if (read_p)
1555 {
1556 insn_list = &deps->pending_read_insns;
1557 mem_list = &deps->pending_read_mems;
1558 if (!DEBUG_INSN_P (insn))
1559 deps->pending_read_list_length++;
1560 }
1561 else
1562 {
1563 insn_list = &deps->pending_write_insns;
1564 mem_list = &deps->pending_write_mems;
1565 deps->pending_write_list_length++;
1566 }
1567
1568 link = alloc_INSN_LIST (insn, *insn_list);
1569 *insn_list = link;
1570
1571 if (sched_deps_info->use_cselib)
1572 {
1573 mem = shallow_copy_rtx (mem);
1574 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1575 }
1576 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1577 *mem_list = link;
1578 }
1579
1580 /* Make a dependency between every memory reference on the pending lists
1581 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1582 dependencies for a read operation, similarly with FOR_WRITE. */
1583
1584 static void
1585 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1586 int for_write)
1587 {
1588 if (for_write)
1589 {
1590 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1591 1, REG_DEP_ANTI);
1592 if (!deps->readonly)
1593 {
1594 free_EXPR_LIST_list (&deps->pending_read_mems);
1595 deps->pending_read_list_length = 0;
1596 }
1597 }
1598
1599 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1600 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1601
1602 add_dependence_list_and_free (deps, insn,
1603 &deps->last_pending_memory_flush, 1,
1604 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1605 if (!deps->readonly)
1606 {
1607 free_EXPR_LIST_list (&deps->pending_write_mems);
1608 deps->pending_write_list_length = 0;
1609
1610 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1611 deps->pending_flush_length = 1;
1612 }
1613 }
1614 \f
1615 /* Instruction which dependencies we are analyzing. */
1616 static rtx cur_insn = NULL_RTX;
1617
1618 /* Implement hooks for haifa scheduler. */
1619
1620 static void
1621 haifa_start_insn (rtx insn)
1622 {
1623 gcc_assert (insn && !cur_insn);
1624
1625 cur_insn = insn;
1626 }
1627
1628 static void
1629 haifa_finish_insn (void)
1630 {
1631 cur_insn = NULL;
1632 }
1633
1634 void
1635 haifa_note_reg_set (int regno)
1636 {
1637 SET_REGNO_REG_SET (reg_pending_sets, regno);
1638 }
1639
1640 void
1641 haifa_note_reg_clobber (int regno)
1642 {
1643 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1644 }
1645
1646 void
1647 haifa_note_reg_use (int regno)
1648 {
1649 SET_REGNO_REG_SET (reg_pending_uses, regno);
1650 }
1651
1652 static void
1653 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1654 {
1655 if (!(ds & SPECULATIVE))
1656 {
1657 mem = NULL_RTX;
1658 pending_mem = NULL_RTX;
1659 }
1660 else
1661 gcc_assert (ds & BEGIN_DATA);
1662
1663 {
1664 dep_def _dep, *dep = &_dep;
1665
1666 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1667 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1668 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1669 }
1670
1671 }
1672
1673 static void
1674 haifa_note_dep (rtx elem, ds_t ds)
1675 {
1676 dep_def _dep;
1677 dep_t dep = &_dep;
1678
1679 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1680 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1681 }
1682
1683 static void
1684 note_reg_use (int r)
1685 {
1686 if (sched_deps_info->note_reg_use)
1687 sched_deps_info->note_reg_use (r);
1688 }
1689
1690 static void
1691 note_reg_set (int r)
1692 {
1693 if (sched_deps_info->note_reg_set)
1694 sched_deps_info->note_reg_set (r);
1695 }
1696
1697 static void
1698 note_reg_clobber (int r)
1699 {
1700 if (sched_deps_info->note_reg_clobber)
1701 sched_deps_info->note_reg_clobber (r);
1702 }
1703
1704 static void
1705 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1706 {
1707 if (sched_deps_info->note_mem_dep)
1708 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1709 }
1710
1711 static void
1712 note_dep (rtx e, ds_t ds)
1713 {
1714 if (sched_deps_info->note_dep)
1715 sched_deps_info->note_dep (e, ds);
1716 }
1717
1718 /* Return corresponding to DS reg_note. */
1719 enum reg_note
1720 ds_to_dt (ds_t ds)
1721 {
1722 if (ds & DEP_TRUE)
1723 return REG_DEP_TRUE;
1724 else if (ds & DEP_OUTPUT)
1725 return REG_DEP_OUTPUT;
1726 else
1727 {
1728 gcc_assert (ds & DEP_ANTI);
1729 return REG_DEP_ANTI;
1730 }
1731 }
1732
1733 \f
1734
1735 /* Functions for computation of info needed for register pressure
1736 sensitive insn scheduling. */
1737
1738
1739 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1740 static struct reg_use_data *
1741 create_insn_reg_use (int regno, rtx insn)
1742 {
1743 struct reg_use_data *use;
1744
1745 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1746 use->regno = regno;
1747 use->insn = insn;
1748 use->next_insn_use = INSN_REG_USE_LIST (insn);
1749 INSN_REG_USE_LIST (insn) = use;
1750 return use;
1751 }
1752
1753 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1754 static struct reg_set_data *
1755 create_insn_reg_set (int regno, rtx insn)
1756 {
1757 struct reg_set_data *set;
1758
1759 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1760 set->regno = regno;
1761 set->insn = insn;
1762 set->next_insn_set = INSN_REG_SET_LIST (insn);
1763 INSN_REG_SET_LIST (insn) = set;
1764 return set;
1765 }
1766
1767 /* Set up insn register uses for INSN and dependency context DEPS. */
1768 static void
1769 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1770 {
1771 unsigned i;
1772 reg_set_iterator rsi;
1773 rtx list;
1774 struct reg_use_data *use, *use2, *next;
1775 struct deps_reg *reg_last;
1776
1777 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1778 {
1779 if (i < FIRST_PSEUDO_REGISTER
1780 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1781 continue;
1782
1783 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1784 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1785 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1786 /* Ignore use which is not dying. */
1787 continue;
1788
1789 use = create_insn_reg_use (i, insn);
1790 use->next_regno_use = use;
1791 reg_last = &deps->reg_last[i];
1792
1793 /* Create the cycle list of uses. */
1794 for (list = reg_last->uses; list; list = XEXP (list, 1))
1795 {
1796 use2 = create_insn_reg_use (i, XEXP (list, 0));
1797 next = use->next_regno_use;
1798 use->next_regno_use = use2;
1799 use2->next_regno_use = next;
1800 }
1801 }
1802 }
1803
1804 /* Register pressure info for the currently processed insn. */
1805 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1806
1807 /* Return TRUE if INSN has the use structure for REGNO. */
1808 static bool
1809 insn_use_p (rtx insn, int regno)
1810 {
1811 struct reg_use_data *use;
1812
1813 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1814 if (use->regno == regno)
1815 return true;
1816 return false;
1817 }
1818
1819 /* Update the register pressure info after birth of pseudo register REGNO
1820 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1821 the register is in clobber or unused after the insn. */
1822 static void
1823 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1824 {
1825 int incr, new_incr;
1826 enum reg_class cl;
1827
1828 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1829 cl = sched_regno_cover_class[regno];
1830 if (cl != NO_REGS)
1831 {
1832 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1833 if (clobber_p)
1834 {
1835 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1836 reg_pressure_info[cl].clobber_increase = new_incr;
1837 }
1838 else if (unused_p)
1839 {
1840 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1841 reg_pressure_info[cl].unused_set_increase = new_incr;
1842 }
1843 else
1844 {
1845 new_incr = reg_pressure_info[cl].set_increase + incr;
1846 reg_pressure_info[cl].set_increase = new_incr;
1847 if (! insn_use_p (insn, regno))
1848 reg_pressure_info[cl].change += incr;
1849 create_insn_reg_set (regno, insn);
1850 }
1851 gcc_assert (new_incr < (1 << INCREASE_BITS));
1852 }
1853 }
1854
1855 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1856 hard registers involved in the birth. */
1857 static void
1858 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1859 bool clobber_p, bool unused_p)
1860 {
1861 enum reg_class cl;
1862 int new_incr, last = regno + nregs;
1863
1864 while (regno < last)
1865 {
1866 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1867 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1868 {
1869 cl = sched_regno_cover_class[regno];
1870 if (cl != NO_REGS)
1871 {
1872 if (clobber_p)
1873 {
1874 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1875 reg_pressure_info[cl].clobber_increase = new_incr;
1876 }
1877 else if (unused_p)
1878 {
1879 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1880 reg_pressure_info[cl].unused_set_increase = new_incr;
1881 }
1882 else
1883 {
1884 new_incr = reg_pressure_info[cl].set_increase + 1;
1885 reg_pressure_info[cl].set_increase = new_incr;
1886 if (! insn_use_p (insn, regno))
1887 reg_pressure_info[cl].change += 1;
1888 create_insn_reg_set (regno, insn);
1889 }
1890 gcc_assert (new_incr < (1 << INCREASE_BITS));
1891 }
1892 }
1893 regno++;
1894 }
1895 }
1896
1897 /* Update the register pressure info after birth of pseudo or hard
1898 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1899 correspondingly that the register is in clobber or unused after the
1900 insn. */
1901 static void
1902 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1903 {
1904 int regno;
1905
1906 if (GET_CODE (reg) == SUBREG)
1907 reg = SUBREG_REG (reg);
1908
1909 if (! REG_P (reg))
1910 return;
1911
1912 regno = REGNO (reg);
1913 if (regno < FIRST_PSEUDO_REGISTER)
1914 mark_insn_hard_regno_birth (insn, regno,
1915 hard_regno_nregs[regno][GET_MODE (reg)],
1916 clobber_p, unused_p);
1917 else
1918 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1919 }
1920
1921 /* Update the register pressure info after death of pseudo register
1922 REGNO. */
1923 static void
1924 mark_pseudo_death (int regno)
1925 {
1926 int incr;
1927 enum reg_class cl;
1928
1929 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1930 cl = sched_regno_cover_class[regno];
1931 if (cl != NO_REGS)
1932 {
1933 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1934 reg_pressure_info[cl].change -= incr;
1935 }
1936 }
1937
1938 /* Like mark_pseudo_death except that NREGS saying how many hard
1939 registers involved in the death. */
1940 static void
1941 mark_hard_regno_death (int regno, int nregs)
1942 {
1943 enum reg_class cl;
1944 int last = regno + nregs;
1945
1946 while (regno < last)
1947 {
1948 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1949 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1950 {
1951 cl = sched_regno_cover_class[regno];
1952 if (cl != NO_REGS)
1953 reg_pressure_info[cl].change -= 1;
1954 }
1955 regno++;
1956 }
1957 }
1958
1959 /* Update the register pressure info after death of pseudo or hard
1960 register REG. */
1961 static void
1962 mark_reg_death (rtx reg)
1963 {
1964 int regno;
1965
1966 if (GET_CODE (reg) == SUBREG)
1967 reg = SUBREG_REG (reg);
1968
1969 if (! REG_P (reg))
1970 return;
1971
1972 regno = REGNO (reg);
1973 if (regno < FIRST_PSEUDO_REGISTER)
1974 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1975 else
1976 mark_pseudo_death (regno);
1977 }
1978
1979 /* Process SETTER of REG. DATA is an insn containing the setter. */
1980 static void
1981 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1982 {
1983 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1984 return;
1985 mark_insn_reg_birth
1986 ((rtx) data, reg, false,
1987 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1988 }
1989
1990 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1991 static void
1992 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1993 {
1994 if (GET_CODE (setter) == CLOBBER)
1995 mark_insn_reg_birth ((rtx) data, reg, true, false);
1996 }
1997
1998 /* Set up reg pressure info related to INSN. */
1999 static void
2000 setup_insn_reg_pressure_info (rtx insn)
2001 {
2002 int i, len;
2003 enum reg_class cl;
2004 static struct reg_pressure_data *pressure_info;
2005 rtx link;
2006
2007 gcc_assert (sched_pressure_p);
2008
2009 if (! INSN_P (insn))
2010 return;
2011
2012 for (i = 0; i < ira_reg_class_cover_size; i++)
2013 {
2014 cl = ira_reg_class_cover[i];
2015 reg_pressure_info[cl].clobber_increase = 0;
2016 reg_pressure_info[cl].set_increase = 0;
2017 reg_pressure_info[cl].unused_set_increase = 0;
2018 reg_pressure_info[cl].change = 0;
2019 }
2020
2021 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2022
2023 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2024
2025 #ifdef AUTO_INC_DEC
2026 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2027 if (REG_NOTE_KIND (link) == REG_INC)
2028 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2029 #endif
2030
2031 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2032 if (REG_NOTE_KIND (link) == REG_DEAD)
2033 mark_reg_death (XEXP (link, 0));
2034
2035 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2036 pressure_info
2037 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2038 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2039 * sizeof (int), 1);
2040 for (i = 0; i < ira_reg_class_cover_size; i++)
2041 {
2042 cl = ira_reg_class_cover[i];
2043 pressure_info[i].clobber_increase
2044 = reg_pressure_info[cl].clobber_increase;
2045 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2046 pressure_info[i].unused_set_increase
2047 = reg_pressure_info[cl].unused_set_increase;
2048 pressure_info[i].change = reg_pressure_info[cl].change;
2049 }
2050 }
2051
2052
2053 \f
2054
2055 /* Internal variable for sched_analyze_[12] () functions.
2056 If it is nonzero, this means that sched_analyze_[12] looks
2057 at the most toplevel SET. */
2058 static bool can_start_lhs_rhs_p;
2059
2060 /* Extend reg info for the deps context DEPS given that
2061 we have just generated a register numbered REGNO. */
2062 static void
2063 extend_deps_reg_info (struct deps_desc *deps, int regno)
2064 {
2065 int max_regno = regno + 1;
2066
2067 gcc_assert (!reload_completed);
2068
2069 /* In a readonly context, it would not hurt to extend info,
2070 but it should not be needed. */
2071 if (reload_completed && deps->readonly)
2072 {
2073 deps->max_reg = max_regno;
2074 return;
2075 }
2076
2077 if (max_regno > deps->max_reg)
2078 {
2079 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2080 max_regno);
2081 memset (&deps->reg_last[deps->max_reg],
2082 0, (max_regno - deps->max_reg)
2083 * sizeof (struct deps_reg));
2084 deps->max_reg = max_regno;
2085 }
2086 }
2087
2088 /* Extends REG_INFO_P if needed. */
2089 void
2090 maybe_extend_reg_info_p (void)
2091 {
2092 /* Extend REG_INFO_P, if needed. */
2093 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2094 {
2095 size_t new_reg_info_p_size = max_regno + 128;
2096
2097 gcc_assert (!reload_completed && sel_sched_p ());
2098
2099 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2100 new_reg_info_p_size,
2101 reg_info_p_size,
2102 sizeof (*reg_info_p));
2103 reg_info_p_size = new_reg_info_p_size;
2104 }
2105 }
2106
2107 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2108 The type of the reference is specified by REF and can be SET,
2109 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2110
2111 static void
2112 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2113 enum rtx_code ref, rtx insn)
2114 {
2115 /* We could emit new pseudos in renaming. Extend the reg structures. */
2116 if (!reload_completed && sel_sched_p ()
2117 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2118 extend_deps_reg_info (deps, regno);
2119
2120 maybe_extend_reg_info_p ();
2121
2122 /* A hard reg in a wide mode may really be multiple registers.
2123 If so, mark all of them just like the first. */
2124 if (regno < FIRST_PSEUDO_REGISTER)
2125 {
2126 int i = hard_regno_nregs[regno][mode];
2127 if (ref == SET)
2128 {
2129 while (--i >= 0)
2130 note_reg_set (regno + i);
2131 }
2132 else if (ref == USE)
2133 {
2134 while (--i >= 0)
2135 note_reg_use (regno + i);
2136 }
2137 else
2138 {
2139 while (--i >= 0)
2140 note_reg_clobber (regno + i);
2141 }
2142 }
2143
2144 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2145 it does not reload. Ignore these as they have served their
2146 purpose already. */
2147 else if (regno >= deps->max_reg)
2148 {
2149 enum rtx_code code = GET_CODE (PATTERN (insn));
2150 gcc_assert (code == USE || code == CLOBBER);
2151 }
2152
2153 else
2154 {
2155 if (ref == SET)
2156 note_reg_set (regno);
2157 else if (ref == USE)
2158 note_reg_use (regno);
2159 else
2160 note_reg_clobber (regno);
2161
2162 /* Pseudos that are REG_EQUIV to something may be replaced
2163 by that during reloading. We need only add dependencies for
2164 the address in the REG_EQUIV note. */
2165 if (!reload_completed && get_reg_known_equiv_p (regno))
2166 {
2167 rtx t = get_reg_known_value (regno);
2168 if (MEM_P (t))
2169 sched_analyze_2 (deps, XEXP (t, 0), insn);
2170 }
2171
2172 /* Don't let it cross a call after scheduling if it doesn't
2173 already cross one. */
2174 if (REG_N_CALLS_CROSSED (regno) == 0)
2175 {
2176 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2177 deps->sched_before_next_call
2178 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2179 else
2180 add_dependence_list (insn, deps->last_function_call, 1,
2181 REG_DEP_ANTI);
2182 }
2183 }
2184 }
2185
2186 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2187 rtx, X, creating all dependencies generated by the write to the
2188 destination of X, and reads of everything mentioned. */
2189
2190 static void
2191 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2192 {
2193 rtx dest = XEXP (x, 0);
2194 enum rtx_code code = GET_CODE (x);
2195 bool cslr_p = can_start_lhs_rhs_p;
2196
2197 can_start_lhs_rhs_p = false;
2198
2199 gcc_assert (dest);
2200 if (dest == 0)
2201 return;
2202
2203 if (cslr_p && sched_deps_info->start_lhs)
2204 sched_deps_info->start_lhs (dest);
2205
2206 if (GET_CODE (dest) == PARALLEL)
2207 {
2208 int i;
2209
2210 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2211 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2212 sched_analyze_1 (deps,
2213 gen_rtx_CLOBBER (VOIDmode,
2214 XEXP (XVECEXP (dest, 0, i), 0)),
2215 insn);
2216
2217 if (cslr_p && sched_deps_info->finish_lhs)
2218 sched_deps_info->finish_lhs ();
2219
2220 if (code == SET)
2221 {
2222 can_start_lhs_rhs_p = cslr_p;
2223
2224 sched_analyze_2 (deps, SET_SRC (x), insn);
2225
2226 can_start_lhs_rhs_p = false;
2227 }
2228
2229 return;
2230 }
2231
2232 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2233 || GET_CODE (dest) == ZERO_EXTRACT)
2234 {
2235 if (GET_CODE (dest) == STRICT_LOW_PART
2236 || GET_CODE (dest) == ZERO_EXTRACT
2237 || df_read_modify_subreg_p (dest))
2238 {
2239 /* These both read and modify the result. We must handle
2240 them as writes to get proper dependencies for following
2241 instructions. We must handle them as reads to get proper
2242 dependencies from this to previous instructions.
2243 Thus we need to call sched_analyze_2. */
2244
2245 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2246 }
2247 if (GET_CODE (dest) == ZERO_EXTRACT)
2248 {
2249 /* The second and third arguments are values read by this insn. */
2250 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2251 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2252 }
2253 dest = XEXP (dest, 0);
2254 }
2255
2256 if (REG_P (dest))
2257 {
2258 int regno = REGNO (dest);
2259 enum machine_mode mode = GET_MODE (dest);
2260
2261 sched_analyze_reg (deps, regno, mode, code, insn);
2262
2263 #ifdef STACK_REGS
2264 /* Treat all writes to a stack register as modifying the TOS. */
2265 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2266 {
2267 int nregs;
2268
2269 /* Avoid analyzing the same register twice. */
2270 if (regno != FIRST_STACK_REG)
2271 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2272
2273 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2274 while (--nregs >= 0)
2275 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2276 FIRST_STACK_REG + nregs);
2277 }
2278 #endif
2279 }
2280 else if (MEM_P (dest))
2281 {
2282 /* Writing memory. */
2283 rtx t = dest;
2284
2285 if (sched_deps_info->use_cselib)
2286 {
2287 enum machine_mode address_mode
2288 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2289
2290 t = shallow_copy_rtx (dest);
2291 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2292 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2293 }
2294 t = canon_rtx (t);
2295
2296 /* Pending lists can't get larger with a readonly context. */
2297 if (!deps->readonly
2298 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2299 > MAX_PENDING_LIST_LENGTH))
2300 {
2301 /* Flush all pending reads and writes to prevent the pending lists
2302 from getting any larger. Insn scheduling runs too slowly when
2303 these lists get long. When compiling GCC with itself,
2304 this flush occurs 8 times for sparc, and 10 times for m88k using
2305 the default value of 32. */
2306 flush_pending_lists (deps, insn, false, true);
2307 }
2308 else
2309 {
2310 rtx pending, pending_mem;
2311
2312 pending = deps->pending_read_insns;
2313 pending_mem = deps->pending_read_mems;
2314 while (pending)
2315 {
2316 if (anti_dependence (XEXP (pending_mem, 0), t)
2317 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2318 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2319 DEP_ANTI);
2320
2321 pending = XEXP (pending, 1);
2322 pending_mem = XEXP (pending_mem, 1);
2323 }
2324
2325 pending = deps->pending_write_insns;
2326 pending_mem = deps->pending_write_mems;
2327 while (pending)
2328 {
2329 if (output_dependence (XEXP (pending_mem, 0), t)
2330 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2331 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2332 DEP_OUTPUT);
2333
2334 pending = XEXP (pending, 1);
2335 pending_mem = XEXP (pending_mem, 1);
2336 }
2337
2338 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2339 REG_DEP_ANTI);
2340
2341 if (!deps->readonly)
2342 add_insn_mem_dependence (deps, false, insn, dest);
2343 }
2344 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2345 }
2346
2347 if (cslr_p && sched_deps_info->finish_lhs)
2348 sched_deps_info->finish_lhs ();
2349
2350 /* Analyze reads. */
2351 if (GET_CODE (x) == SET)
2352 {
2353 can_start_lhs_rhs_p = cslr_p;
2354
2355 sched_analyze_2 (deps, SET_SRC (x), insn);
2356
2357 can_start_lhs_rhs_p = false;
2358 }
2359 }
2360
2361 /* Analyze the uses of memory and registers in rtx X in INSN. */
2362 static void
2363 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2364 {
2365 int i;
2366 int j;
2367 enum rtx_code code;
2368 const char *fmt;
2369 bool cslr_p = can_start_lhs_rhs_p;
2370
2371 can_start_lhs_rhs_p = false;
2372
2373 gcc_assert (x);
2374 if (x == 0)
2375 return;
2376
2377 if (cslr_p && sched_deps_info->start_rhs)
2378 sched_deps_info->start_rhs (x);
2379
2380 code = GET_CODE (x);
2381
2382 switch (code)
2383 {
2384 case CONST_INT:
2385 case CONST_DOUBLE:
2386 case CONST_FIXED:
2387 case CONST_VECTOR:
2388 case SYMBOL_REF:
2389 case CONST:
2390 case LABEL_REF:
2391 /* Ignore constants. */
2392 if (cslr_p && sched_deps_info->finish_rhs)
2393 sched_deps_info->finish_rhs ();
2394
2395 return;
2396
2397 #ifdef HAVE_cc0
2398 case CC0:
2399 /* User of CC0 depends on immediately preceding insn. */
2400 SCHED_GROUP_P (insn) = 1;
2401 /* Don't move CC0 setter to another block (it can set up the
2402 same flag for previous CC0 users which is safe). */
2403 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2404
2405 if (cslr_p && sched_deps_info->finish_rhs)
2406 sched_deps_info->finish_rhs ();
2407
2408 return;
2409 #endif
2410
2411 case REG:
2412 {
2413 int regno = REGNO (x);
2414 enum machine_mode mode = GET_MODE (x);
2415
2416 sched_analyze_reg (deps, regno, mode, USE, insn);
2417
2418 #ifdef STACK_REGS
2419 /* Treat all reads of a stack register as modifying the TOS. */
2420 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2421 {
2422 /* Avoid analyzing the same register twice. */
2423 if (regno != FIRST_STACK_REG)
2424 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2425 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2426 }
2427 #endif
2428
2429 if (cslr_p && sched_deps_info->finish_rhs)
2430 sched_deps_info->finish_rhs ();
2431
2432 return;
2433 }
2434
2435 case MEM:
2436 {
2437 /* Reading memory. */
2438 rtx u;
2439 rtx pending, pending_mem;
2440 rtx t = x;
2441
2442 if (sched_deps_info->use_cselib)
2443 {
2444 enum machine_mode address_mode
2445 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2446
2447 t = shallow_copy_rtx (t);
2448 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2449 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2450 }
2451
2452 if (!DEBUG_INSN_P (insn))
2453 {
2454 t = canon_rtx (t);
2455 pending = deps->pending_read_insns;
2456 pending_mem = deps->pending_read_mems;
2457 while (pending)
2458 {
2459 if (read_dependence (XEXP (pending_mem, 0), t)
2460 && ! sched_insns_conditions_mutex_p (insn,
2461 XEXP (pending, 0)))
2462 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2463 DEP_ANTI);
2464
2465 pending = XEXP (pending, 1);
2466 pending_mem = XEXP (pending_mem, 1);
2467 }
2468
2469 pending = deps->pending_write_insns;
2470 pending_mem = deps->pending_write_mems;
2471 while (pending)
2472 {
2473 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2474 t, rtx_varies_p)
2475 && ! sched_insns_conditions_mutex_p (insn,
2476 XEXP (pending, 0)))
2477 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2478 sched_deps_info->generate_spec_deps
2479 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2480
2481 pending = XEXP (pending, 1);
2482 pending_mem = XEXP (pending_mem, 1);
2483 }
2484
2485 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2486 {
2487 if (! JUMP_P (XEXP (u, 0)))
2488 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2489 else if (deps_may_trap_p (x))
2490 {
2491 if ((sched_deps_info->generate_spec_deps)
2492 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2493 {
2494 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2495 MAX_DEP_WEAK);
2496
2497 note_dep (XEXP (u, 0), ds);
2498 }
2499 else
2500 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2501 }
2502 }
2503 }
2504
2505 /* Always add these dependencies to pending_reads, since
2506 this insn may be followed by a write. */
2507 if (!deps->readonly)
2508 add_insn_mem_dependence (deps, true, insn, x);
2509
2510 sched_analyze_2 (deps, XEXP (x, 0), insn);
2511
2512 if (cslr_p && sched_deps_info->finish_rhs)
2513 sched_deps_info->finish_rhs ();
2514
2515 return;
2516 }
2517
2518 /* Force pending stores to memory in case a trap handler needs them. */
2519 case TRAP_IF:
2520 flush_pending_lists (deps, insn, true, false);
2521 break;
2522
2523 case PREFETCH:
2524 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2525 reg_pending_barrier = TRUE_BARRIER;
2526 break;
2527
2528 case UNSPEC_VOLATILE:
2529 flush_pending_lists (deps, insn, true, true);
2530 /* FALLTHRU */
2531
2532 case ASM_OPERANDS:
2533 case ASM_INPUT:
2534 {
2535 /* Traditional and volatile asm instructions must be considered to use
2536 and clobber all hard registers, all pseudo-registers and all of
2537 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2538
2539 Consider for instance a volatile asm that changes the fpu rounding
2540 mode. An insn should not be moved across this even if it only uses
2541 pseudo-regs because it might give an incorrectly rounded result. */
2542 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2543 reg_pending_barrier = TRUE_BARRIER;
2544
2545 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2546 We can not just fall through here since then we would be confused
2547 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2548 traditional asms unlike their normal usage. */
2549
2550 if (code == ASM_OPERANDS)
2551 {
2552 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2553 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2554
2555 if (cslr_p && sched_deps_info->finish_rhs)
2556 sched_deps_info->finish_rhs ();
2557
2558 return;
2559 }
2560 break;
2561 }
2562
2563 case PRE_DEC:
2564 case POST_DEC:
2565 case PRE_INC:
2566 case POST_INC:
2567 /* These both read and modify the result. We must handle them as writes
2568 to get proper dependencies for following instructions. We must handle
2569 them as reads to get proper dependencies from this to previous
2570 instructions. Thus we need to pass them to both sched_analyze_1
2571 and sched_analyze_2. We must call sched_analyze_2 first in order
2572 to get the proper antecedent for the read. */
2573 sched_analyze_2 (deps, XEXP (x, 0), insn);
2574 sched_analyze_1 (deps, x, insn);
2575
2576 if (cslr_p && sched_deps_info->finish_rhs)
2577 sched_deps_info->finish_rhs ();
2578
2579 return;
2580
2581 case POST_MODIFY:
2582 case PRE_MODIFY:
2583 /* op0 = op0 + op1 */
2584 sched_analyze_2 (deps, XEXP (x, 0), insn);
2585 sched_analyze_2 (deps, XEXP (x, 1), insn);
2586 sched_analyze_1 (deps, x, insn);
2587
2588 if (cslr_p && sched_deps_info->finish_rhs)
2589 sched_deps_info->finish_rhs ();
2590
2591 return;
2592
2593 default:
2594 break;
2595 }
2596
2597 /* Other cases: walk the insn. */
2598 fmt = GET_RTX_FORMAT (code);
2599 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2600 {
2601 if (fmt[i] == 'e')
2602 sched_analyze_2 (deps, XEXP (x, i), insn);
2603 else if (fmt[i] == 'E')
2604 for (j = 0; j < XVECLEN (x, i); j++)
2605 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2606 }
2607
2608 if (cslr_p && sched_deps_info->finish_rhs)
2609 sched_deps_info->finish_rhs ();
2610 }
2611
2612 /* Analyze an INSN with pattern X to find all dependencies. */
2613 static void
2614 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2615 {
2616 RTX_CODE code = GET_CODE (x);
2617 rtx link;
2618 unsigned i;
2619 reg_set_iterator rsi;
2620
2621 if (! reload_completed)
2622 {
2623 HARD_REG_SET temp;
2624
2625 extract_insn (insn);
2626 preprocess_constraints ();
2627 ira_implicitly_set_insn_hard_regs (&temp);
2628 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2629 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2630 }
2631
2632 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2633 && code == SET);
2634
2635 if (may_trap_p (x))
2636 /* Avoid moving trapping instructions accross function calls that might
2637 not always return. */
2638 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2639 1, REG_DEP_ANTI);
2640
2641 if (code == COND_EXEC)
2642 {
2643 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2644
2645 /* ??? Should be recording conditions so we reduce the number of
2646 false dependencies. */
2647 x = COND_EXEC_CODE (x);
2648 code = GET_CODE (x);
2649 }
2650 if (code == SET || code == CLOBBER)
2651 {
2652 sched_analyze_1 (deps, x, insn);
2653
2654 /* Bare clobber insns are used for letting life analysis, reg-stack
2655 and others know that a value is dead. Depend on the last call
2656 instruction so that reg-stack won't get confused. */
2657 if (code == CLOBBER)
2658 add_dependence_list (insn, deps->last_function_call, 1,
2659 REG_DEP_OUTPUT);
2660 }
2661 else if (code == PARALLEL)
2662 {
2663 for (i = XVECLEN (x, 0); i--;)
2664 {
2665 rtx sub = XVECEXP (x, 0, i);
2666 code = GET_CODE (sub);
2667
2668 if (code == COND_EXEC)
2669 {
2670 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2671 sub = COND_EXEC_CODE (sub);
2672 code = GET_CODE (sub);
2673 }
2674 if (code == SET || code == CLOBBER)
2675 sched_analyze_1 (deps, sub, insn);
2676 else
2677 sched_analyze_2 (deps, sub, insn);
2678 }
2679 }
2680 else
2681 sched_analyze_2 (deps, x, insn);
2682
2683 /* Mark registers CLOBBERED or used by called function. */
2684 if (CALL_P (insn))
2685 {
2686 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2687 {
2688 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2689 sched_analyze_1 (deps, XEXP (link, 0), insn);
2690 else
2691 sched_analyze_2 (deps, XEXP (link, 0), insn);
2692 }
2693 if (find_reg_note (insn, REG_SETJMP, NULL))
2694 reg_pending_barrier = MOVE_BARRIER;
2695 }
2696
2697 if (JUMP_P (insn))
2698 {
2699 rtx next;
2700 next = next_nonnote_nondebug_insn (insn);
2701 if (next && BARRIER_P (next))
2702 reg_pending_barrier = MOVE_BARRIER;
2703 else
2704 {
2705 rtx pending, pending_mem;
2706
2707 if (sched_deps_info->compute_jump_reg_dependencies)
2708 {
2709 regset_head tmp_uses, tmp_sets;
2710 INIT_REG_SET (&tmp_uses);
2711 INIT_REG_SET (&tmp_sets);
2712
2713 (*sched_deps_info->compute_jump_reg_dependencies)
2714 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2715 /* Make latency of jump equal to 0 by using anti-dependence. */
2716 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2717 {
2718 struct deps_reg *reg_last = &deps->reg_last[i];
2719 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2720 add_dependence_list (insn, reg_last->implicit_sets,
2721 0, REG_DEP_ANTI);
2722 add_dependence_list (insn, reg_last->clobbers, 0,
2723 REG_DEP_ANTI);
2724
2725 if (!deps->readonly)
2726 {
2727 reg_last->uses_length++;
2728 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2729 }
2730 }
2731 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2732
2733 CLEAR_REG_SET (&tmp_uses);
2734 CLEAR_REG_SET (&tmp_sets);
2735 }
2736
2737 /* All memory writes and volatile reads must happen before the
2738 jump. Non-volatile reads must happen before the jump iff
2739 the result is needed by the above register used mask. */
2740
2741 pending = deps->pending_write_insns;
2742 pending_mem = deps->pending_write_mems;
2743 while (pending)
2744 {
2745 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2746 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2747 pending = XEXP (pending, 1);
2748 pending_mem = XEXP (pending_mem, 1);
2749 }
2750
2751 pending = deps->pending_read_insns;
2752 pending_mem = deps->pending_read_mems;
2753 while (pending)
2754 {
2755 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2756 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2757 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2758 pending = XEXP (pending, 1);
2759 pending_mem = XEXP (pending_mem, 1);
2760 }
2761
2762 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2763 REG_DEP_ANTI);
2764 }
2765 }
2766
2767 /* If this instruction can throw an exception, then moving it changes
2768 where block boundaries fall. This is mighty confusing elsewhere.
2769 Therefore, prevent such an instruction from being moved. Same for
2770 non-jump instructions that define block boundaries.
2771 ??? Unclear whether this is still necessary in EBB mode. If not,
2772 add_branch_dependences should be adjusted for RGN mode instead. */
2773 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2774 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2775 reg_pending_barrier = MOVE_BARRIER;
2776
2777 if (sched_pressure_p)
2778 {
2779 setup_insn_reg_uses (deps, insn);
2780 setup_insn_reg_pressure_info (insn);
2781 }
2782
2783 /* Add register dependencies for insn. */
2784 if (DEBUG_INSN_P (insn))
2785 {
2786 rtx prev = deps->last_debug_insn;
2787 rtx u;
2788
2789 if (!deps->readonly)
2790 deps->last_debug_insn = insn;
2791
2792 if (prev)
2793 add_dependence (insn, prev, REG_DEP_ANTI);
2794
2795 add_dependence_list (insn, deps->last_function_call, 1,
2796 REG_DEP_ANTI);
2797
2798 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2799 if (! JUMP_P (XEXP (u, 0))
2800 || !sel_sched_p ())
2801 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2802
2803 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2804 {
2805 struct deps_reg *reg_last = &deps->reg_last[i];
2806 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2807 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2808
2809 if (!deps->readonly)
2810 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2811 }
2812 CLEAR_REG_SET (reg_pending_uses);
2813
2814 /* Quite often, a debug insn will refer to stuff in the
2815 previous instruction, but the reason we want this
2816 dependency here is to make sure the scheduler doesn't
2817 gratuitously move a debug insn ahead. This could dirty
2818 DF flags and cause additional analysis that wouldn't have
2819 occurred in compilation without debug insns, and such
2820 additional analysis can modify the generated code. */
2821 prev = PREV_INSN (insn);
2822
2823 if (prev && NONDEBUG_INSN_P (prev))
2824 add_dependence (insn, prev, REG_DEP_ANTI);
2825 }
2826 else
2827 {
2828 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2829 {
2830 struct deps_reg *reg_last = &deps->reg_last[i];
2831 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2832 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2833 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2834
2835 if (!deps->readonly)
2836 {
2837 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2838 reg_last->uses_length++;
2839 }
2840 }
2841
2842 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2843 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2844 {
2845 struct deps_reg *reg_last = &deps->reg_last[i];
2846 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2847 add_dependence_list (insn, reg_last->implicit_sets, 0,
2848 REG_DEP_ANTI);
2849 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2850
2851 if (!deps->readonly)
2852 {
2853 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2854 reg_last->uses_length++;
2855 }
2856 }
2857
2858 /* If the current insn is conditional, we can't free any
2859 of the lists. */
2860 if (sched_has_condition_p (insn))
2861 {
2862 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2863 {
2864 struct deps_reg *reg_last = &deps->reg_last[i];
2865 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2866 add_dependence_list (insn, reg_last->implicit_sets, 0,
2867 REG_DEP_ANTI);
2868 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2869
2870 if (!deps->readonly)
2871 {
2872 reg_last->clobbers
2873 = alloc_INSN_LIST (insn, reg_last->clobbers);
2874 reg_last->clobbers_length++;
2875 }
2876 }
2877 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2878 {
2879 struct deps_reg *reg_last = &deps->reg_last[i];
2880 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2881 add_dependence_list (insn, reg_last->implicit_sets, 0,
2882 REG_DEP_ANTI);
2883 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2884 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2885
2886 if (!deps->readonly)
2887 {
2888 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2889 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2890 }
2891 }
2892 }
2893 else
2894 {
2895 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2896 {
2897 struct deps_reg *reg_last = &deps->reg_last[i];
2898 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2899 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2900 {
2901 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2902 REG_DEP_OUTPUT);
2903 add_dependence_list_and_free (deps, insn,
2904 &reg_last->implicit_sets, 0,
2905 REG_DEP_ANTI);
2906 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2907 REG_DEP_ANTI);
2908 add_dependence_list_and_free
2909 (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2910
2911 if (!deps->readonly)
2912 {
2913 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2914 reg_last->clobbers_length = 0;
2915 reg_last->uses_length = 0;
2916 }
2917 }
2918 else
2919 {
2920 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2921 add_dependence_list (insn, reg_last->implicit_sets, 0,
2922 REG_DEP_ANTI);
2923 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2924 }
2925
2926 if (!deps->readonly)
2927 {
2928 reg_last->clobbers_length++;
2929 reg_last->clobbers
2930 = alloc_INSN_LIST (insn, reg_last->clobbers);
2931 }
2932 }
2933 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2934 {
2935 struct deps_reg *reg_last = &deps->reg_last[i];
2936
2937 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2938 REG_DEP_OUTPUT);
2939 add_dependence_list_and_free (deps, insn,
2940 &reg_last->implicit_sets,
2941 0, REG_DEP_ANTI);
2942 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2943 REG_DEP_OUTPUT);
2944 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2945 REG_DEP_ANTI);
2946
2947 if (!deps->readonly)
2948 {
2949 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2950 reg_last->uses_length = 0;
2951 reg_last->clobbers_length = 0;
2952 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2953 }
2954 }
2955 }
2956 }
2957
2958 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2959 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2960 {
2961 struct deps_reg *reg_last = &deps->reg_last[i];
2962 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2963 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2964 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2965
2966 if (!deps->readonly)
2967 reg_last->implicit_sets
2968 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2969 }
2970
2971 if (!deps->readonly)
2972 {
2973 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2974 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2975 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2976 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2977 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2978 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2979 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2980
2981 /* Set up the pending barrier found. */
2982 deps->last_reg_pending_barrier = reg_pending_barrier;
2983 }
2984
2985 CLEAR_REG_SET (reg_pending_uses);
2986 CLEAR_REG_SET (reg_pending_clobbers);
2987 CLEAR_REG_SET (reg_pending_sets);
2988 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2989 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2990
2991 /* Add dependencies if a scheduling barrier was found. */
2992 if (reg_pending_barrier)
2993 {
2994 /* In the case of barrier the most added dependencies are not
2995 real, so we use anti-dependence here. */
2996 if (sched_has_condition_p (insn))
2997 {
2998 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2999 {
3000 struct deps_reg *reg_last = &deps->reg_last[i];
3001 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3002 add_dependence_list (insn, reg_last->sets, 0,
3003 reg_pending_barrier == TRUE_BARRIER
3004 ? REG_DEP_TRUE : REG_DEP_ANTI);
3005 add_dependence_list (insn, reg_last->implicit_sets, 0,
3006 REG_DEP_ANTI);
3007 add_dependence_list (insn, reg_last->clobbers, 0,
3008 reg_pending_barrier == TRUE_BARRIER
3009 ? REG_DEP_TRUE : REG_DEP_ANTI);
3010 }
3011 }
3012 else
3013 {
3014 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3015 {
3016 struct deps_reg *reg_last = &deps->reg_last[i];
3017 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3018 REG_DEP_ANTI);
3019 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3020 reg_pending_barrier == TRUE_BARRIER
3021 ? REG_DEP_TRUE : REG_DEP_ANTI);
3022 add_dependence_list_and_free (deps, insn,
3023 &reg_last->implicit_sets, 0,
3024 REG_DEP_ANTI);
3025 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3026 reg_pending_barrier == TRUE_BARRIER
3027 ? REG_DEP_TRUE : REG_DEP_ANTI);
3028
3029 if (!deps->readonly)
3030 {
3031 reg_last->uses_length = 0;
3032 reg_last->clobbers_length = 0;
3033 }
3034 }
3035 }
3036
3037 if (!deps->readonly)
3038 for (i = 0; i < (unsigned)deps->max_reg; i++)
3039 {
3040 struct deps_reg *reg_last = &deps->reg_last[i];
3041 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3042 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3043 }
3044
3045 /* Flush pending lists on jumps, but not on speculative checks. */
3046 if (JUMP_P (insn) && !(sel_sched_p ()
3047 && sel_insn_is_speculation_check (insn)))
3048 flush_pending_lists (deps, insn, true, true);
3049
3050 if (!deps->readonly)
3051 CLEAR_REG_SET (&deps->reg_conditional_sets);
3052 reg_pending_barrier = NOT_A_BARRIER;
3053 }
3054
3055 /* If a post-call group is still open, see if it should remain so.
3056 This insn must be a simple move of a hard reg to a pseudo or
3057 vice-versa.
3058
3059 We must avoid moving these insns for correctness on targets
3060 with small register classes, and for special registers like
3061 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3062 hard regs for all targets. */
3063
3064 if (deps->in_post_call_group_p)
3065 {
3066 rtx tmp, set = single_set (insn);
3067 int src_regno, dest_regno;
3068
3069 if (set == NULL)
3070 {
3071 if (DEBUG_INSN_P (insn))
3072 /* We don't want to mark debug insns as part of the same
3073 sched group. We know they really aren't, but if we use
3074 debug insns to tell that a call group is over, we'll
3075 get different code if debug insns are not there and
3076 instructions that follow seem like they should be part
3077 of the call group.
3078
3079 Also, if we did, fixup_sched_groups() would move the
3080 deps of the debug insn to the call insn, modifying
3081 non-debug post-dependency counts of the debug insn
3082 dependencies and otherwise messing with the scheduling
3083 order.
3084
3085 Instead, let such debug insns be scheduled freely, but
3086 keep the call group open in case there are insns that
3087 should be part of it afterwards. Since we grant debug
3088 insns higher priority than even sched group insns, it
3089 will all turn out all right. */
3090 goto debug_dont_end_call_group;
3091 else
3092 goto end_call_group;
3093 }
3094
3095 tmp = SET_DEST (set);
3096 if (GET_CODE (tmp) == SUBREG)
3097 tmp = SUBREG_REG (tmp);
3098 if (REG_P (tmp))
3099 dest_regno = REGNO (tmp);
3100 else
3101 goto end_call_group;
3102
3103 tmp = SET_SRC (set);
3104 if (GET_CODE (tmp) == SUBREG)
3105 tmp = SUBREG_REG (tmp);
3106 if ((GET_CODE (tmp) == PLUS
3107 || GET_CODE (tmp) == MINUS)
3108 && REG_P (XEXP (tmp, 0))
3109 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3110 && dest_regno == STACK_POINTER_REGNUM)
3111 src_regno = STACK_POINTER_REGNUM;
3112 else if (REG_P (tmp))
3113 src_regno = REGNO (tmp);
3114 else
3115 goto end_call_group;
3116
3117 if (src_regno < FIRST_PSEUDO_REGISTER
3118 || dest_regno < FIRST_PSEUDO_REGISTER)
3119 {
3120 if (!deps->readonly
3121 && deps->in_post_call_group_p == post_call_initial)
3122 deps->in_post_call_group_p = post_call;
3123
3124 if (!sel_sched_p () || sched_emulate_haifa_p)
3125 {
3126 SCHED_GROUP_P (insn) = 1;
3127 CANT_MOVE (insn) = 1;
3128 }
3129 }
3130 else
3131 {
3132 end_call_group:
3133 if (!deps->readonly)
3134 deps->in_post_call_group_p = not_post_call;
3135 }
3136 }
3137
3138 debug_dont_end_call_group:
3139 if ((current_sched_info->flags & DO_SPECULATION)
3140 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3141 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3142 be speculated. */
3143 {
3144 if (sel_sched_p ())
3145 sel_mark_hard_insn (insn);
3146 else
3147 {
3148 sd_iterator_def sd_it;
3149 dep_t dep;
3150
3151 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3152 sd_iterator_cond (&sd_it, &dep);)
3153 change_spec_dep_to_hard (sd_it);
3154 }
3155 }
3156 }
3157
3158 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3159 longjmp, loop forever, ...). */
3160 static bool
3161 call_may_noreturn_p (rtx insn)
3162 {
3163 rtx call;
3164
3165 /* const or pure calls that aren't looping will always return. */
3166 if (RTL_CONST_OR_PURE_CALL_P (insn)
3167 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3168 return false;
3169
3170 call = PATTERN (insn);
3171 if (GET_CODE (call) == PARALLEL)
3172 call = XVECEXP (call, 0, 0);
3173 if (GET_CODE (call) == SET)
3174 call = SET_SRC (call);
3175 if (GET_CODE (call) == CALL
3176 && MEM_P (XEXP (call, 0))
3177 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3178 {
3179 rtx symbol = XEXP (XEXP (call, 0), 0);
3180 if (SYMBOL_REF_DECL (symbol)
3181 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3182 {
3183 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3184 == BUILT_IN_NORMAL)
3185 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3186 {
3187 case BUILT_IN_BCMP:
3188 case BUILT_IN_BCOPY:
3189 case BUILT_IN_BZERO:
3190 case BUILT_IN_INDEX:
3191 case BUILT_IN_MEMCHR:
3192 case BUILT_IN_MEMCMP:
3193 case BUILT_IN_MEMCPY:
3194 case BUILT_IN_MEMMOVE:
3195 case BUILT_IN_MEMPCPY:
3196 case BUILT_IN_MEMSET:
3197 case BUILT_IN_RINDEX:
3198 case BUILT_IN_STPCPY:
3199 case BUILT_IN_STPNCPY:
3200 case BUILT_IN_STRCAT:
3201 case BUILT_IN_STRCHR:
3202 case BUILT_IN_STRCMP:
3203 case BUILT_IN_STRCPY:
3204 case BUILT_IN_STRCSPN:
3205 case BUILT_IN_STRLEN:
3206 case BUILT_IN_STRNCAT:
3207 case BUILT_IN_STRNCMP:
3208 case BUILT_IN_STRNCPY:
3209 case BUILT_IN_STRPBRK:
3210 case BUILT_IN_STRRCHR:
3211 case BUILT_IN_STRSPN:
3212 case BUILT_IN_STRSTR:
3213 /* Assume certain string/memory builtins always return. */
3214 return false;
3215 default:
3216 break;
3217 }
3218 }
3219 }
3220
3221 /* For all other calls assume that they might not always return. */
3222 return true;
3223 }
3224
3225 /* Analyze INSN with DEPS as a context. */
3226 void
3227 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3228 {
3229 if (sched_deps_info->start_insn)
3230 sched_deps_info->start_insn (insn);
3231
3232 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3233 {
3234 /* Make each JUMP_INSN (but not a speculative check)
3235 a scheduling barrier for memory references. */
3236 if (!deps->readonly
3237 && JUMP_P (insn)
3238 && !(sel_sched_p ()
3239 && sel_insn_is_speculation_check (insn)))
3240 {
3241 /* Keep the list a reasonable size. */
3242 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3243 flush_pending_lists (deps, insn, true, true);
3244 else
3245 deps->last_pending_memory_flush
3246 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3247 }
3248
3249 sched_analyze_insn (deps, PATTERN (insn), insn);
3250 }
3251 else if (CALL_P (insn))
3252 {
3253 int i;
3254
3255 CANT_MOVE (insn) = 1;
3256
3257 if (find_reg_note (insn, REG_SETJMP, NULL))
3258 {
3259 /* This is setjmp. Assume that all registers, not just
3260 hard registers, may be clobbered by this call. */
3261 reg_pending_barrier = MOVE_BARRIER;
3262 }
3263 else
3264 {
3265 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3266 /* A call may read and modify global register variables. */
3267 if (global_regs[i])
3268 {
3269 SET_REGNO_REG_SET (reg_pending_sets, i);
3270 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3271 }
3272 /* Other call-clobbered hard regs may be clobbered.
3273 Since we only have a choice between 'might be clobbered'
3274 and 'definitely not clobbered', we must include all
3275 partly call-clobbered registers here. */
3276 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3277 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3278 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3279 /* We don't know what set of fixed registers might be used
3280 by the function, but it is certain that the stack pointer
3281 is among them, but be conservative. */
3282 else if (fixed_regs[i])
3283 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3284 /* The frame pointer is normally not used by the function
3285 itself, but by the debugger. */
3286 /* ??? MIPS o32 is an exception. It uses the frame pointer
3287 in the macro expansion of jal but does not represent this
3288 fact in the call_insn rtl. */
3289 else if (i == FRAME_POINTER_REGNUM
3290 || (i == HARD_FRAME_POINTER_REGNUM
3291 && (! reload_completed || frame_pointer_needed)))
3292 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3293 }
3294
3295 /* For each insn which shouldn't cross a call, add a dependence
3296 between that insn and this call insn. */
3297 add_dependence_list_and_free (deps, insn,
3298 &deps->sched_before_next_call, 1,
3299 REG_DEP_ANTI);
3300
3301 sched_analyze_insn (deps, PATTERN (insn), insn);
3302
3303 /* If CALL would be in a sched group, then this will violate
3304 convention that sched group insns have dependencies only on the
3305 previous instruction.
3306
3307 Of course one can say: "Hey! What about head of the sched group?"
3308 And I will answer: "Basic principles (one dep per insn) are always
3309 the same." */
3310 gcc_assert (!SCHED_GROUP_P (insn));
3311
3312 /* In the absence of interprocedural alias analysis, we must flush
3313 all pending reads and writes, and start new dependencies starting
3314 from here. But only flush writes for constant calls (which may
3315 be passed a pointer to something we haven't written yet). */
3316 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3317
3318 if (!deps->readonly)
3319 {
3320 /* Remember the last function call for limiting lifetimes. */
3321 free_INSN_LIST_list (&deps->last_function_call);
3322 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3323
3324 if (call_may_noreturn_p (insn))
3325 {
3326 /* Remember the last function call that might not always return
3327 normally for limiting moves of trapping insns. */
3328 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3329 deps->last_function_call_may_noreturn
3330 = alloc_INSN_LIST (insn, NULL_RTX);
3331 }
3332
3333 /* Before reload, begin a post-call group, so as to keep the
3334 lifetimes of hard registers correct. */
3335 if (! reload_completed)
3336 deps->in_post_call_group_p = post_call;
3337 }
3338 }
3339
3340 if (sched_deps_info->use_cselib)
3341 cselib_process_insn (insn);
3342
3343 /* EH_REGION insn notes can not appear until well after we complete
3344 scheduling. */
3345 if (NOTE_P (insn))
3346 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3347 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3348
3349 if (sched_deps_info->finish_insn)
3350 sched_deps_info->finish_insn ();
3351
3352 /* Fixup the dependencies in the sched group. */
3353 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3354 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3355 fixup_sched_groups (insn);
3356 }
3357
3358 /* Initialize DEPS for the new block beginning with HEAD. */
3359 void
3360 deps_start_bb (struct deps_desc *deps, rtx head)
3361 {
3362 gcc_assert (!deps->readonly);
3363
3364 /* Before reload, if the previous block ended in a call, show that
3365 we are inside a post-call group, so as to keep the lifetimes of
3366 hard registers correct. */
3367 if (! reload_completed && !LABEL_P (head))
3368 {
3369 rtx insn = prev_nonnote_nondebug_insn (head);
3370
3371 if (insn && CALL_P (insn))
3372 deps->in_post_call_group_p = post_call_initial;
3373 }
3374 }
3375
3376 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3377 dependencies for each insn. */
3378 void
3379 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3380 {
3381 rtx insn;
3382
3383 if (sched_deps_info->use_cselib)
3384 cselib_init (CSELIB_RECORD_MEMORY);
3385
3386 deps_start_bb (deps, head);
3387
3388 for (insn = head;; insn = NEXT_INSN (insn))
3389 {
3390
3391 if (INSN_P (insn))
3392 {
3393 /* And initialize deps_lists. */
3394 sd_init_insn (insn);
3395 }
3396
3397 deps_analyze_insn (deps, insn);
3398
3399 if (insn == tail)
3400 {
3401 if (sched_deps_info->use_cselib)
3402 cselib_finish ();
3403 return;
3404 }
3405 }
3406 gcc_unreachable ();
3407 }
3408
3409 /* Helper for sched_free_deps ().
3410 Delete INSN's (RESOLVED_P) backward dependencies. */
3411 static void
3412 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3413 {
3414 sd_iterator_def sd_it;
3415 dep_t dep;
3416 sd_list_types_def types;
3417
3418 if (resolved_p)
3419 types = SD_LIST_RES_BACK;
3420 else
3421 types = SD_LIST_BACK;
3422
3423 for (sd_it = sd_iterator_start (insn, types);
3424 sd_iterator_cond (&sd_it, &dep);)
3425 {
3426 dep_link_t link = *sd_it.linkp;
3427 dep_node_t node = DEP_LINK_NODE (link);
3428 deps_list_t back_list;
3429 deps_list_t forw_list;
3430
3431 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3432 remove_from_deps_list (link, back_list);
3433 delete_dep_node (node);
3434 }
3435 }
3436
3437 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3438 deps_lists. */
3439 void
3440 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3441 {
3442 rtx insn;
3443 rtx next_tail = NEXT_INSN (tail);
3444
3445 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3446 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3447 {
3448 /* Clear resolved back deps together with its dep_nodes. */
3449 delete_dep_nodes_in_back_deps (insn, resolved_p);
3450
3451 /* Clear forward deps and leave the dep_nodes to the
3452 corresponding back_deps list. */
3453 if (resolved_p)
3454 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3455 else
3456 clear_deps_list (INSN_FORW_DEPS (insn));
3457
3458 sd_finish_insn (insn);
3459 }
3460 }
3461 \f
3462 /* Initialize variables for region data dependence analysis.
3463 When LAZY_REG_LAST is true, do not allocate reg_last array
3464 of struct deps_desc immediately. */
3465
3466 void
3467 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3468 {
3469 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3470
3471 deps->max_reg = max_reg;
3472 if (lazy_reg_last)
3473 deps->reg_last = NULL;
3474 else
3475 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3476 INIT_REG_SET (&deps->reg_last_in_use);
3477 INIT_REG_SET (&deps->reg_conditional_sets);
3478
3479 deps->pending_read_insns = 0;
3480 deps->pending_read_mems = 0;
3481 deps->pending_write_insns = 0;
3482 deps->pending_write_mems = 0;
3483 deps->pending_read_list_length = 0;
3484 deps->pending_write_list_length = 0;
3485 deps->pending_flush_length = 0;
3486 deps->last_pending_memory_flush = 0;
3487 deps->last_function_call = 0;
3488 deps->last_function_call_may_noreturn = 0;
3489 deps->sched_before_next_call = 0;
3490 deps->in_post_call_group_p = not_post_call;
3491 deps->last_debug_insn = 0;
3492 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3493 deps->readonly = 0;
3494 }
3495
3496 /* Init only reg_last field of DEPS, which was not allocated before as
3497 we inited DEPS lazily. */
3498 void
3499 init_deps_reg_last (struct deps_desc *deps)
3500 {
3501 gcc_assert (deps && deps->max_reg > 0);
3502 gcc_assert (deps->reg_last == NULL);
3503
3504 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3505 }
3506
3507
3508 /* Free insn lists found in DEPS. */
3509
3510 void
3511 free_deps (struct deps_desc *deps)
3512 {
3513 unsigned i;
3514 reg_set_iterator rsi;
3515
3516 /* We set max_reg to 0 when this context was already freed. */
3517 if (deps->max_reg == 0)
3518 {
3519 gcc_assert (deps->reg_last == NULL);
3520 return;
3521 }
3522 deps->max_reg = 0;
3523
3524 free_INSN_LIST_list (&deps->pending_read_insns);
3525 free_EXPR_LIST_list (&deps->pending_read_mems);
3526 free_INSN_LIST_list (&deps->pending_write_insns);
3527 free_EXPR_LIST_list (&deps->pending_write_mems);
3528 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3529
3530 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3531 times. For a testcase with 42000 regs and 8000 small basic blocks,
3532 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3533 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3534 {
3535 struct deps_reg *reg_last = &deps->reg_last[i];
3536 if (reg_last->uses)
3537 free_INSN_LIST_list (&reg_last->uses);
3538 if (reg_last->sets)
3539 free_INSN_LIST_list (&reg_last->sets);
3540 if (reg_last->implicit_sets)
3541 free_INSN_LIST_list (&reg_last->implicit_sets);
3542 if (reg_last->clobbers)
3543 free_INSN_LIST_list (&reg_last->clobbers);
3544 }
3545 CLEAR_REG_SET (&deps->reg_last_in_use);
3546 CLEAR_REG_SET (&deps->reg_conditional_sets);
3547
3548 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3549 it at all. */
3550 if (deps->reg_last)
3551 free (deps->reg_last);
3552 deps->reg_last = NULL;
3553
3554 deps = NULL;
3555 }
3556
3557 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3558 is not handled. */
3559 void
3560 remove_from_deps (struct deps_desc *deps, rtx insn)
3561 {
3562 int removed;
3563 unsigned i;
3564 reg_set_iterator rsi;
3565
3566 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3567 &deps->pending_read_mems);
3568 if (!DEBUG_INSN_P (insn))
3569 deps->pending_read_list_length -= removed;
3570 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3571 &deps->pending_write_mems);
3572 deps->pending_write_list_length -= removed;
3573 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3574 deps->pending_flush_length -= removed;
3575
3576 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3577 {
3578 struct deps_reg *reg_last = &deps->reg_last[i];
3579 if (reg_last->uses)
3580 remove_from_dependence_list (insn, &reg_last->uses);
3581 if (reg_last->sets)
3582 remove_from_dependence_list (insn, &reg_last->sets);
3583 if (reg_last->implicit_sets)
3584 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3585 if (reg_last->clobbers)
3586 remove_from_dependence_list (insn, &reg_last->clobbers);
3587 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3588 && !reg_last->clobbers)
3589 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3590 }
3591
3592 if (CALL_P (insn))
3593 {
3594 remove_from_dependence_list (insn, &deps->last_function_call);
3595 remove_from_dependence_list (insn,
3596 &deps->last_function_call_may_noreturn);
3597 }
3598 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3599 }
3600
3601 /* Init deps data vector. */
3602 static void
3603 init_deps_data_vector (void)
3604 {
3605 int reserve = (sched_max_luid + 1
3606 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3607 if (reserve > 0
3608 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3609 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3610 3 * sched_max_luid / 2);
3611 }
3612
3613 /* If it is profitable to use them, initialize or extend (depending on
3614 GLOBAL_P) dependency data. */
3615 void
3616 sched_deps_init (bool global_p)
3617 {
3618 /* Average number of insns in the basic block.
3619 '+ 1' is used to make it nonzero. */
3620 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3621
3622 init_deps_data_vector ();
3623
3624 /* We use another caching mechanism for selective scheduling, so
3625 we don't use this one. */
3626 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3627 {
3628 /* ?!? We could save some memory by computing a per-region luid mapping
3629 which could reduce both the number of vectors in the cache and the
3630 size of each vector. Instead we just avoid the cache entirely unless
3631 the average number of instructions in a basic block is very high. See
3632 the comment before the declaration of true_dependency_cache for
3633 what we consider "very high". */
3634 cache_size = 0;
3635 extend_dependency_caches (sched_max_luid, true);
3636 }
3637
3638 if (global_p)
3639 {
3640 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3641 /* Allocate lists for one block at a time. */
3642 insns_in_block);
3643 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3644 /* Allocate nodes for one block at a time.
3645 We assume that average insn has
3646 5 producers. */
3647 5 * insns_in_block);
3648 }
3649 }
3650
3651
3652 /* Create or extend (depending on CREATE_P) dependency caches to
3653 size N. */
3654 void
3655 extend_dependency_caches (int n, bool create_p)
3656 {
3657 if (create_p || true_dependency_cache)
3658 {
3659 int i, luid = cache_size + n;
3660
3661 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3662 luid);
3663 output_dependency_cache = XRESIZEVEC (bitmap_head,
3664 output_dependency_cache, luid);
3665 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3666 luid);
3667
3668 if (current_sched_info->flags & DO_SPECULATION)
3669 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3670 luid);
3671
3672 for (i = cache_size; i < luid; i++)
3673 {
3674 bitmap_initialize (&true_dependency_cache[i], 0);
3675 bitmap_initialize (&output_dependency_cache[i], 0);
3676 bitmap_initialize (&anti_dependency_cache[i], 0);
3677
3678 if (current_sched_info->flags & DO_SPECULATION)
3679 bitmap_initialize (&spec_dependency_cache[i], 0);
3680 }
3681 cache_size = luid;
3682 }
3683 }
3684
3685 /* Finalize dependency information for the whole function. */
3686 void
3687 sched_deps_finish (void)
3688 {
3689 gcc_assert (deps_pools_are_empty_p ());
3690 free_alloc_pool_if_empty (&dn_pool);
3691 free_alloc_pool_if_empty (&dl_pool);
3692 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3693
3694 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3695 cache_size = 0;
3696
3697 if (true_dependency_cache)
3698 {
3699 int i;
3700
3701 for (i = 0; i < cache_size; i++)
3702 {
3703 bitmap_clear (&true_dependency_cache[i]);
3704 bitmap_clear (&output_dependency_cache[i]);
3705 bitmap_clear (&anti_dependency_cache[i]);
3706
3707 if (sched_deps_info->generate_spec_deps)
3708 bitmap_clear (&spec_dependency_cache[i]);
3709 }
3710 free (true_dependency_cache);
3711 true_dependency_cache = NULL;
3712 free (output_dependency_cache);
3713 output_dependency_cache = NULL;
3714 free (anti_dependency_cache);
3715 anti_dependency_cache = NULL;
3716
3717 if (sched_deps_info->generate_spec_deps)
3718 {
3719 free (spec_dependency_cache);
3720 spec_dependency_cache = NULL;
3721 }
3722
3723 }
3724 }
3725
3726 /* Initialize some global variables needed by the dependency analysis
3727 code. */
3728
3729 void
3730 init_deps_global (void)
3731 {
3732 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3733 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3734 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3735 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3736 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3737 reg_pending_barrier = NOT_A_BARRIER;
3738
3739 if (!sel_sched_p () || sched_emulate_haifa_p)
3740 {
3741 sched_deps_info->start_insn = haifa_start_insn;
3742 sched_deps_info->finish_insn = haifa_finish_insn;
3743
3744 sched_deps_info->note_reg_set = haifa_note_reg_set;
3745 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3746 sched_deps_info->note_reg_use = haifa_note_reg_use;
3747
3748 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3749 sched_deps_info->note_dep = haifa_note_dep;
3750 }
3751 }
3752
3753 /* Free everything used by the dependency analysis code. */
3754
3755 void
3756 finish_deps_global (void)
3757 {
3758 FREE_REG_SET (reg_pending_sets);
3759 FREE_REG_SET (reg_pending_clobbers);
3760 FREE_REG_SET (reg_pending_uses);
3761 }
3762
3763 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3764 dw_t
3765 estimate_dep_weak (rtx mem1, rtx mem2)
3766 {
3767 rtx r1, r2;
3768
3769 if (mem1 == mem2)
3770 /* MEMs are the same - don't speculate. */
3771 return MIN_DEP_WEAK;
3772
3773 r1 = XEXP (mem1, 0);
3774 r2 = XEXP (mem2, 0);
3775
3776 if (r1 == r2
3777 || (REG_P (r1) && REG_P (r2)
3778 && REGNO (r1) == REGNO (r2)))
3779 /* Again, MEMs are the same. */
3780 return MIN_DEP_WEAK;
3781 else if ((REG_P (r1) && !REG_P (r2))
3782 || (!REG_P (r1) && REG_P (r2)))
3783 /* Different addressing modes - reason to be more speculative,
3784 than usual. */
3785 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3786 else
3787 /* We can't say anything about the dependence. */
3788 return UNCERTAIN_DEP_WEAK;
3789 }
3790
3791 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3792 This function can handle same INSN and ELEM (INSN == ELEM).
3793 It is a convenience wrapper. */
3794 void
3795 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3796 {
3797 ds_t ds;
3798 bool internal;
3799
3800 if (dep_type == REG_DEP_TRUE)
3801 ds = DEP_TRUE;
3802 else if (dep_type == REG_DEP_OUTPUT)
3803 ds = DEP_OUTPUT;
3804 else
3805 {
3806 gcc_assert (dep_type == REG_DEP_ANTI);
3807 ds = DEP_ANTI;
3808 }
3809
3810 /* When add_dependence is called from inside sched-deps.c, we expect
3811 cur_insn to be non-null. */
3812 internal = cur_insn != NULL;
3813 if (internal)
3814 gcc_assert (insn == cur_insn);
3815 else
3816 cur_insn = insn;
3817
3818 note_dep (elem, ds);
3819 if (!internal)
3820 cur_insn = NULL;
3821 }
3822
3823 /* Return weakness of speculative type TYPE in the dep_status DS. */
3824 dw_t
3825 get_dep_weak_1 (ds_t ds, ds_t type)
3826 {
3827 ds = ds & type;
3828
3829 switch (type)
3830 {
3831 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3832 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3833 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3834 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3835 default: gcc_unreachable ();
3836 }
3837
3838 return (dw_t) ds;
3839 }
3840
3841 dw_t
3842 get_dep_weak (ds_t ds, ds_t type)
3843 {
3844 dw_t dw = get_dep_weak_1 (ds, type);
3845
3846 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3847 return dw;
3848 }
3849
3850 /* Return the dep_status, which has the same parameters as DS, except for
3851 speculative type TYPE, that will have weakness DW. */
3852 ds_t
3853 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3854 {
3855 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3856
3857 ds &= ~type;
3858 switch (type)
3859 {
3860 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3861 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3862 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3863 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3864 default: gcc_unreachable ();
3865 }
3866 return ds;
3867 }
3868
3869 /* Return the join of two dep_statuses DS1 and DS2.
3870 If MAX_P is true then choose the greater probability,
3871 otherwise multiply probabilities.
3872 This function assumes that both DS1 and DS2 contain speculative bits. */
3873 static ds_t
3874 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3875 {
3876 ds_t ds, t;
3877
3878 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3879
3880 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3881
3882 t = FIRST_SPEC_TYPE;
3883 do
3884 {
3885 if ((ds1 & t) && !(ds2 & t))
3886 ds |= ds1 & t;
3887 else if (!(ds1 & t) && (ds2 & t))
3888 ds |= ds2 & t;
3889 else if ((ds1 & t) && (ds2 & t))
3890 {
3891 dw_t dw1 = get_dep_weak (ds1, t);
3892 dw_t dw2 = get_dep_weak (ds2, t);
3893 ds_t dw;
3894
3895 if (!max_p)
3896 {
3897 dw = ((ds_t) dw1) * ((ds_t) dw2);
3898 dw /= MAX_DEP_WEAK;
3899 if (dw < MIN_DEP_WEAK)
3900 dw = MIN_DEP_WEAK;
3901 }
3902 else
3903 {
3904 if (dw1 >= dw2)
3905 dw = dw1;
3906 else
3907 dw = dw2;
3908 }
3909
3910 ds = set_dep_weak (ds, t, (dw_t) dw);
3911 }
3912
3913 if (t == LAST_SPEC_TYPE)
3914 break;
3915 t <<= SPEC_TYPE_SHIFT;
3916 }
3917 while (1);
3918
3919 return ds;
3920 }
3921
3922 /* Return the join of two dep_statuses DS1 and DS2.
3923 This function assumes that both DS1 and DS2 contain speculative bits. */
3924 ds_t
3925 ds_merge (ds_t ds1, ds_t ds2)
3926 {
3927 return ds_merge_1 (ds1, ds2, false);
3928 }
3929
3930 /* Return the join of two dep_statuses DS1 and DS2. */
3931 ds_t
3932 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3933 {
3934 ds_t new_status = ds | ds2;
3935
3936 if (new_status & SPECULATIVE)
3937 {
3938 if ((ds && !(ds & SPECULATIVE))
3939 || (ds2 && !(ds2 & SPECULATIVE)))
3940 /* Then this dep can't be speculative. */
3941 new_status &= ~SPECULATIVE;
3942 else
3943 {
3944 /* Both are speculative. Merging probabilities. */
3945 if (mem1)
3946 {
3947 dw_t dw;
3948
3949 dw = estimate_dep_weak (mem1, mem2);
3950 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3951 }
3952
3953 if (!ds)
3954 new_status = ds2;
3955 else if (!ds2)
3956 new_status = ds;
3957 else
3958 new_status = ds_merge (ds2, ds);
3959 }
3960 }
3961
3962 return new_status;
3963 }
3964
3965 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3966 probabilities. */
3967 ds_t
3968 ds_max_merge (ds_t ds1, ds_t ds2)
3969 {
3970 if (ds1 == 0 && ds2 == 0)
3971 return 0;
3972
3973 if (ds1 == 0 && ds2 != 0)
3974 return ds2;
3975
3976 if (ds1 != 0 && ds2 == 0)
3977 return ds1;
3978
3979 return ds_merge_1 (ds1, ds2, true);
3980 }
3981
3982 /* Return the probability of speculation success for the speculation
3983 status DS. */
3984 dw_t
3985 ds_weak (ds_t ds)
3986 {
3987 ds_t res = 1, dt;
3988 int n = 0;
3989
3990 dt = FIRST_SPEC_TYPE;
3991 do
3992 {
3993 if (ds & dt)
3994 {
3995 res *= (ds_t) get_dep_weak (ds, dt);
3996 n++;
3997 }
3998
3999 if (dt == LAST_SPEC_TYPE)
4000 break;
4001 dt <<= SPEC_TYPE_SHIFT;
4002 }
4003 while (1);
4004
4005 gcc_assert (n);
4006 while (--n)
4007 res /= MAX_DEP_WEAK;
4008
4009 if (res < MIN_DEP_WEAK)
4010 res = MIN_DEP_WEAK;
4011
4012 gcc_assert (res <= MAX_DEP_WEAK);
4013
4014 return (dw_t) res;
4015 }
4016
4017 /* Return a dep status that contains all speculation types of DS. */
4018 ds_t
4019 ds_get_speculation_types (ds_t ds)
4020 {
4021 if (ds & BEGIN_DATA)
4022 ds |= BEGIN_DATA;
4023 if (ds & BE_IN_DATA)
4024 ds |= BE_IN_DATA;
4025 if (ds & BEGIN_CONTROL)
4026 ds |= BEGIN_CONTROL;
4027 if (ds & BE_IN_CONTROL)
4028 ds |= BE_IN_CONTROL;
4029
4030 return ds & SPECULATIVE;
4031 }
4032
4033 /* Return a dep status that contains maximal weakness for each speculation
4034 type present in DS. */
4035 ds_t
4036 ds_get_max_dep_weak (ds_t ds)
4037 {
4038 if (ds & BEGIN_DATA)
4039 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4040 if (ds & BE_IN_DATA)
4041 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4042 if (ds & BEGIN_CONTROL)
4043 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4044 if (ds & BE_IN_CONTROL)
4045 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4046
4047 return ds;
4048 }
4049
4050 /* Dump information about the dependence status S. */
4051 static void
4052 dump_ds (FILE *f, ds_t s)
4053 {
4054 fprintf (f, "{");
4055
4056 if (s & BEGIN_DATA)
4057 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4058 if (s & BE_IN_DATA)
4059 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4060 if (s & BEGIN_CONTROL)
4061 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4062 if (s & BE_IN_CONTROL)
4063 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4064
4065 if (s & HARD_DEP)
4066 fprintf (f, "HARD_DEP; ");
4067
4068 if (s & DEP_TRUE)
4069 fprintf (f, "DEP_TRUE; ");
4070 if (s & DEP_ANTI)
4071 fprintf (f, "DEP_ANTI; ");
4072 if (s & DEP_OUTPUT)
4073 fprintf (f, "DEP_OUTPUT; ");
4074
4075 fprintf (f, "}");
4076 }
4077
4078 DEBUG_FUNCTION void
4079 debug_ds (ds_t s)
4080 {
4081 dump_ds (stderr, s);
4082 fprintf (stderr, "\n");
4083 }
4084
4085 #ifdef ENABLE_CHECKING
4086 /* Verify that dependence type and status are consistent.
4087 If RELAXED_P is true, then skip dep_weakness checks. */
4088 static void
4089 check_dep (dep_t dep, bool relaxed_p)
4090 {
4091 enum reg_note dt = DEP_TYPE (dep);
4092 ds_t ds = DEP_STATUS (dep);
4093
4094 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4095
4096 if (!(current_sched_info->flags & USE_DEPS_LIST))
4097 {
4098 gcc_assert (ds == -1);
4099 return;
4100 }
4101
4102 /* Check that dependence type contains the same bits as the status. */
4103 if (dt == REG_DEP_TRUE)
4104 gcc_assert (ds & DEP_TRUE);
4105 else if (dt == REG_DEP_OUTPUT)
4106 gcc_assert ((ds & DEP_OUTPUT)
4107 && !(ds & DEP_TRUE));
4108 else
4109 gcc_assert ((dt == REG_DEP_ANTI)
4110 && (ds & DEP_ANTI)
4111 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4112
4113 /* HARD_DEP can not appear in dep_status of a link. */
4114 gcc_assert (!(ds & HARD_DEP));
4115
4116 /* Check that dependence status is set correctly when speculation is not
4117 supported. */
4118 if (!sched_deps_info->generate_spec_deps)
4119 gcc_assert (!(ds & SPECULATIVE));
4120 else if (ds & SPECULATIVE)
4121 {
4122 if (!relaxed_p)
4123 {
4124 ds_t type = FIRST_SPEC_TYPE;
4125
4126 /* Check that dependence weakness is in proper range. */
4127 do
4128 {
4129 if (ds & type)
4130 get_dep_weak (ds, type);
4131
4132 if (type == LAST_SPEC_TYPE)
4133 break;
4134 type <<= SPEC_TYPE_SHIFT;
4135 }
4136 while (1);
4137 }
4138
4139 if (ds & BEGIN_SPEC)
4140 {
4141 /* Only true dependence can be data speculative. */
4142 if (ds & BEGIN_DATA)
4143 gcc_assert (ds & DEP_TRUE);
4144
4145 /* Control dependencies in the insn scheduler are represented by
4146 anti-dependencies, therefore only anti dependence can be
4147 control speculative. */
4148 if (ds & BEGIN_CONTROL)
4149 gcc_assert (ds & DEP_ANTI);
4150 }
4151 else
4152 {
4153 /* Subsequent speculations should resolve true dependencies. */
4154 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4155 }
4156
4157 /* Check that true and anti dependencies can't have other speculative
4158 statuses. */
4159 if (ds & DEP_TRUE)
4160 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4161 /* An output dependence can't be speculative at all. */
4162 gcc_assert (!(ds & DEP_OUTPUT));
4163 if (ds & DEP_ANTI)
4164 gcc_assert (ds & BEGIN_CONTROL);
4165 }
4166 }
4167 #endif /* ENABLE_CHECKING */
4168
4169 #endif /* INSN_SCHEDULING */