1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
35 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
37 #include "hard-reg-set.h"
42 #include "insn-config.h"
43 #include "insn-attr.h"
47 #include "dominance.h"
51 #include "basic-block.h"
52 #include "sched-int.h"
54 #include "alloc-pool.h"
59 #ifdef INSN_SCHEDULING
61 #ifdef ENABLE_CHECKING
67 /* Holds current parameters for the dependency analyzer. */
68 struct sched_deps_info_def
*sched_deps_info
;
70 /* The data is specific to the Haifa scheduler. */
71 vec
<haifa_deps_insn_data_def
>
74 /* Return the major type present in the DS. */
82 return REG_DEP_OUTPUT
;
85 return REG_DEP_CONTROL
;
87 gcc_assert (ds
& DEP_ANTI
);
92 /* Return equivalent dep_status. */
94 dk_to_ds (enum reg_note dk
)
104 case REG_DEP_CONTROL
:
108 gcc_assert (dk
== REG_DEP_ANTI
);
113 /* Functions to operate with dependence information container - dep_t. */
115 /* Init DEP with the arguments. */
117 init_dep_1 (dep_t dep
, rtx_insn
*pro
, rtx_insn
*con
, enum reg_note type
, ds_t ds
)
121 DEP_TYPE (dep
) = type
;
122 DEP_STATUS (dep
) = ds
;
123 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
124 DEP_NONREG (dep
) = 0;
125 DEP_MULTIPLE (dep
) = 0;
126 DEP_REPLACE (dep
) = NULL
;
129 /* Init DEP with the arguments.
130 While most of the scheduler (including targets) only need the major type
131 of the dependency, it is convenient to hide full dep_status from them. */
133 init_dep (dep_t dep
, rtx_insn
*pro
, rtx_insn
*con
, enum reg_note kind
)
137 if ((current_sched_info
->flags
& USE_DEPS_LIST
))
138 ds
= dk_to_ds (kind
);
142 init_dep_1 (dep
, pro
, con
, kind
, ds
);
145 /* Make a copy of FROM in TO. */
147 copy_dep (dep_t to
, dep_t from
)
149 memcpy (to
, from
, sizeof (*to
));
152 static void dump_ds (FILE *, ds_t
);
154 /* Define flags for dump_dep (). */
156 /* Dump producer of the dependence. */
157 #define DUMP_DEP_PRO (2)
159 /* Dump consumer of the dependence. */
160 #define DUMP_DEP_CON (4)
162 /* Dump type of the dependence. */
163 #define DUMP_DEP_TYPE (8)
165 /* Dump status of the dependence. */
166 #define DUMP_DEP_STATUS (16)
168 /* Dump all information about the dependence. */
169 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
173 FLAGS is a bit mask specifying what information about DEP needs
175 If FLAGS has the very first bit set, then dump all information about DEP
176 and propagate this bit into the callee dump functions. */
178 dump_dep (FILE *dump
, dep_t dep
, int flags
)
181 flags
|= DUMP_DEP_ALL
;
185 if (flags
& DUMP_DEP_PRO
)
186 fprintf (dump
, "%d; ", INSN_UID (DEP_PRO (dep
)));
188 if (flags
& DUMP_DEP_CON
)
189 fprintf (dump
, "%d; ", INSN_UID (DEP_CON (dep
)));
191 if (flags
& DUMP_DEP_TYPE
)
194 enum reg_note type
= DEP_TYPE (dep
);
206 case REG_DEP_CONTROL
:
219 fprintf (dump
, "%c; ", t
);
222 if (flags
& DUMP_DEP_STATUS
)
224 if (current_sched_info
->flags
& USE_DEPS_LIST
)
225 dump_ds (dump
, DEP_STATUS (dep
));
231 /* Default flags for dump_dep (). */
232 static int dump_dep_flags
= (DUMP_DEP_PRO
| DUMP_DEP_CON
);
234 /* Dump all fields of DEP to STDERR. */
236 sd_debug_dep (dep_t dep
)
238 dump_dep (stderr
, dep
, 1);
239 fprintf (stderr
, "\n");
242 /* Determine whether DEP is a dependency link of a non-debug insn on a
246 depl_on_debug_p (dep_link_t dep
)
248 return (DEBUG_INSN_P (DEP_LINK_PRO (dep
))
249 && !DEBUG_INSN_P (DEP_LINK_CON (dep
)));
252 /* Functions to operate with a single link from the dependencies lists -
255 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
258 attach_dep_link (dep_link_t l
, dep_link_t
*prev_nextp
)
260 dep_link_t next
= *prev_nextp
;
262 gcc_assert (DEP_LINK_PREV_NEXTP (l
) == NULL
263 && DEP_LINK_NEXT (l
) == NULL
);
265 /* Init node being inserted. */
266 DEP_LINK_PREV_NEXTP (l
) = prev_nextp
;
267 DEP_LINK_NEXT (l
) = next
;
272 gcc_assert (DEP_LINK_PREV_NEXTP (next
) == prev_nextp
);
274 DEP_LINK_PREV_NEXTP (next
) = &DEP_LINK_NEXT (l
);
281 /* Add dep_link LINK to deps_list L. */
283 add_to_deps_list (dep_link_t link
, deps_list_t l
)
285 attach_dep_link (link
, &DEPS_LIST_FIRST (l
));
287 /* Don't count debug deps. */
288 if (!depl_on_debug_p (link
))
289 ++DEPS_LIST_N_LINKS (l
);
292 /* Detach dep_link L from the list. */
294 detach_dep_link (dep_link_t l
)
296 dep_link_t
*prev_nextp
= DEP_LINK_PREV_NEXTP (l
);
297 dep_link_t next
= DEP_LINK_NEXT (l
);
302 DEP_LINK_PREV_NEXTP (next
) = prev_nextp
;
304 DEP_LINK_PREV_NEXTP (l
) = NULL
;
305 DEP_LINK_NEXT (l
) = NULL
;
308 /* Remove link LINK from list LIST. */
310 remove_from_deps_list (dep_link_t link
, deps_list_t list
)
312 detach_dep_link (link
);
314 /* Don't count debug deps. */
315 if (!depl_on_debug_p (link
))
316 --DEPS_LIST_N_LINKS (list
);
319 /* Move link LINK from list FROM to list TO. */
321 move_dep_link (dep_link_t link
, deps_list_t from
, deps_list_t to
)
323 remove_from_deps_list (link
, from
);
324 add_to_deps_list (link
, to
);
327 /* Return true of LINK is not attached to any list. */
329 dep_link_is_detached_p (dep_link_t link
)
331 return DEP_LINK_PREV_NEXTP (link
) == NULL
;
334 /* Pool to hold all dependency nodes (dep_node_t). */
335 static pool_allocator
<_dep_node
> *dn_pool
;
337 /* Number of dep_nodes out there. */
338 static int dn_pool_diff
= 0;
340 /* Create a dep_node. */
342 create_dep_node (void)
344 dep_node_t n
= dn_pool
->allocate ();
345 dep_link_t back
= DEP_NODE_BACK (n
);
346 dep_link_t forw
= DEP_NODE_FORW (n
);
348 DEP_LINK_NODE (back
) = n
;
349 DEP_LINK_NEXT (back
) = NULL
;
350 DEP_LINK_PREV_NEXTP (back
) = NULL
;
352 DEP_LINK_NODE (forw
) = n
;
353 DEP_LINK_NEXT (forw
) = NULL
;
354 DEP_LINK_PREV_NEXTP (forw
) = NULL
;
361 /* Delete dep_node N. N must not be connected to any deps_list. */
363 delete_dep_node (dep_node_t n
)
365 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n
))
366 && dep_link_is_detached_p (DEP_NODE_FORW (n
)));
368 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n
)));
375 /* Pool to hold dependencies lists (deps_list_t). */
376 static pool_allocator
<_deps_list
> *dl_pool
;
378 /* Number of deps_lists out there. */
379 static int dl_pool_diff
= 0;
381 /* Functions to operate with dependences lists - deps_list_t. */
383 /* Return true if list L is empty. */
385 deps_list_empty_p (deps_list_t l
)
387 return DEPS_LIST_N_LINKS (l
) == 0;
390 /* Create a new deps_list. */
392 create_deps_list (void)
394 deps_list_t l
= dl_pool
->allocate ();
396 DEPS_LIST_FIRST (l
) = NULL
;
397 DEPS_LIST_N_LINKS (l
) = 0;
403 /* Free deps_list L. */
405 free_deps_list (deps_list_t l
)
407 gcc_assert (deps_list_empty_p (l
));
414 /* Return true if there is no dep_nodes and deps_lists out there.
415 After the region is scheduled all the dependency nodes and lists
416 should [generally] be returned to pool. */
418 deps_pools_are_empty_p (void)
420 return dn_pool_diff
== 0 && dl_pool_diff
== 0;
423 /* Remove all elements from L. */
425 clear_deps_list (deps_list_t l
)
429 dep_link_t link
= DEPS_LIST_FIRST (l
);
434 remove_from_deps_list (link
, l
);
439 /* Decide whether a dependency should be treated as a hard or a speculative
442 dep_spec_p (dep_t dep
)
444 if (current_sched_info
->flags
& DO_SPECULATION
)
446 if (DEP_STATUS (dep
) & SPECULATIVE
)
449 if (current_sched_info
->flags
& DO_PREDICATION
)
451 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
454 if (DEP_REPLACE (dep
) != NULL
)
459 static regset reg_pending_sets
;
460 static regset reg_pending_clobbers
;
461 static regset reg_pending_uses
;
462 static regset reg_pending_control_uses
;
463 static enum reg_pending_barrier_mode reg_pending_barrier
;
465 /* Hard registers implicitly clobbered or used (or may be implicitly
466 clobbered or used) by the currently analyzed insn. For example,
467 insn in its constraint has one register class. Even if there is
468 currently no hard register in the insn, the particular hard
469 register will be in the insn after reload pass because the
470 constraint requires it. */
471 static HARD_REG_SET implicit_reg_pending_clobbers
;
472 static HARD_REG_SET implicit_reg_pending_uses
;
474 /* To speed up the test for duplicate dependency links we keep a
475 record of dependencies created by add_dependence when the average
476 number of instructions in a basic block is very large.
478 Studies have shown that there is typically around 5 instructions between
479 branches for typical C code. So we can make a guess that the average
480 basic block is approximately 5 instructions long; we will choose 100X
481 the average size as a very large basic block.
483 Each insn has associated bitmaps for its dependencies. Each bitmap
484 has enough entries to represent a dependency on any other insn in
485 the insn chain. All bitmap for true dependencies cache is
486 allocated then the rest two ones are also allocated. */
487 static bitmap_head
*true_dependency_cache
= NULL
;
488 static bitmap_head
*output_dependency_cache
= NULL
;
489 static bitmap_head
*anti_dependency_cache
= NULL
;
490 static bitmap_head
*control_dependency_cache
= NULL
;
491 static bitmap_head
*spec_dependency_cache
= NULL
;
492 static int cache_size
;
494 /* True if we should mark added dependencies as a non-register deps. */
495 static bool mark_as_hard
;
497 static int deps_may_trap_p (const_rtx
);
498 static void add_dependence_1 (rtx_insn
*, rtx_insn
*, enum reg_note
);
499 static void add_dependence_list (rtx_insn
*, rtx_insn_list
*, int,
500 enum reg_note
, bool);
501 static void add_dependence_list_and_free (struct deps_desc
*, rtx_insn
*,
502 rtx_insn_list
**, int, enum reg_note
,
504 static void delete_all_dependences (rtx_insn
*);
505 static void chain_to_prev_insn (rtx_insn
*);
507 static void flush_pending_lists (struct deps_desc
*, rtx_insn
*, int, int);
508 static void sched_analyze_1 (struct deps_desc
*, rtx
, rtx_insn
*);
509 static void sched_analyze_2 (struct deps_desc
*, rtx
, rtx_insn
*);
510 static void sched_analyze_insn (struct deps_desc
*, rtx
, rtx_insn
*);
512 static bool sched_has_condition_p (const rtx_insn
*);
513 static int conditions_mutex_p (const_rtx
, const_rtx
, bool, bool);
515 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1 (dep_t
, bool,
517 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1 (dep_t
, bool, rtx
, rtx
);
519 #ifdef ENABLE_CHECKING
520 static void check_dep (dep_t
, bool);
523 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
526 deps_may_trap_p (const_rtx mem
)
528 const_rtx addr
= XEXP (mem
, 0);
530 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
532 const_rtx t
= get_reg_known_value (REGNO (addr
));
536 return rtx_addr_can_trap_p (addr
);
540 /* Find the condition under which INSN is executed. If REV is not NULL,
541 it is set to TRUE when the returned comparison should be reversed
542 to get the actual condition. */
544 sched_get_condition_with_rev_uncached (const rtx_insn
*insn
, bool *rev
)
546 rtx pat
= PATTERN (insn
);
552 if (GET_CODE (pat
) == COND_EXEC
)
553 return COND_EXEC_TEST (pat
);
555 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
558 src
= SET_SRC (pc_set (insn
));
560 if (XEXP (src
, 2) == pc_rtx
)
561 return XEXP (src
, 0);
562 else if (XEXP (src
, 1) == pc_rtx
)
564 rtx cond
= XEXP (src
, 0);
565 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
567 if (revcode
== UNKNOWN
)
578 /* Return the condition under which INSN does not execute (i.e. the
579 not-taken condition for a conditional branch), or NULL if we cannot
580 find such a condition. The caller should make a copy of the condition
583 sched_get_reverse_condition_uncached (const rtx_insn
*insn
)
586 rtx cond
= sched_get_condition_with_rev_uncached (insn
, &rev
);
587 if (cond
== NULL_RTX
)
591 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
592 cond
= gen_rtx_fmt_ee (revcode
, GET_MODE (cond
),
599 /* Caching variant of sched_get_condition_with_rev_uncached.
600 We only do actual work the first time we come here for an insn; the
601 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
603 sched_get_condition_with_rev (const rtx_insn
*insn
, bool *rev
)
607 if (INSN_LUID (insn
) == 0)
608 return sched_get_condition_with_rev_uncached (insn
, rev
);
610 if (INSN_CACHED_COND (insn
) == const_true_rtx
)
613 if (INSN_CACHED_COND (insn
) != NULL_RTX
)
616 *rev
= INSN_REVERSE_COND (insn
);
617 return INSN_CACHED_COND (insn
);
620 INSN_CACHED_COND (insn
) = sched_get_condition_with_rev_uncached (insn
, &tmp
);
621 INSN_REVERSE_COND (insn
) = tmp
;
623 if (INSN_CACHED_COND (insn
) == NULL_RTX
)
625 INSN_CACHED_COND (insn
) = const_true_rtx
;
630 *rev
= INSN_REVERSE_COND (insn
);
631 return INSN_CACHED_COND (insn
);
634 /* True when we can find a condition under which INSN is executed. */
636 sched_has_condition_p (const rtx_insn
*insn
)
638 return !! sched_get_condition_with_rev (insn
, NULL
);
643 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
645 conditions_mutex_p (const_rtx cond1
, const_rtx cond2
, bool rev1
, bool rev2
)
647 if (COMPARISON_P (cond1
)
648 && COMPARISON_P (cond2
)
649 && GET_CODE (cond1
) ==
651 ? reversed_comparison_code (cond2
, NULL
)
653 && rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
654 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
659 /* Return true if insn1 and insn2 can never depend on one another because
660 the conditions under which they are executed are mutually exclusive. */
662 sched_insns_conditions_mutex_p (const rtx_insn
*insn1
, const rtx_insn
*insn2
)
665 bool rev1
= false, rev2
= false;
667 /* df doesn't handle conditional lifetimes entirely correctly;
668 calls mess up the conditional lifetimes. */
669 if (!CALL_P (insn1
) && !CALL_P (insn2
))
671 cond1
= sched_get_condition_with_rev (insn1
, &rev1
);
672 cond2
= sched_get_condition_with_rev (insn2
, &rev2
);
674 && conditions_mutex_p (cond1
, cond2
, rev1
, rev2
)
675 /* Make sure first instruction doesn't affect condition of second
676 instruction if switched. */
677 && !modified_in_p (cond1
, insn2
)
678 /* Make sure second instruction doesn't affect condition of first
679 instruction if switched. */
680 && !modified_in_p (cond2
, insn1
))
687 /* Return true if INSN can potentially be speculated with type DS. */
689 sched_insn_is_legitimate_for_speculation_p (const rtx_insn
*insn
, ds_t ds
)
691 if (HAS_INTERNAL_DEP (insn
))
694 if (!NONJUMP_INSN_P (insn
))
697 if (SCHED_GROUP_P (insn
))
700 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn
)))
703 if (side_effects_p (PATTERN (insn
)))
707 /* The following instructions, which depend on a speculatively scheduled
708 instruction, cannot be speculatively scheduled along. */
710 if (may_trap_or_fault_p (PATTERN (insn
)))
711 /* If instruction might fault, it cannot be speculatively scheduled.
712 For control speculation it's obvious why and for data speculation
713 it's because the insn might get wrong input if speculation
714 wasn't successful. */
717 if ((ds
& BE_IN_DATA
)
718 && sched_has_condition_p (insn
))
719 /* If this is a predicated instruction, then it cannot be
720 speculatively scheduled. See PR35659. */
727 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
728 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
729 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
730 This function is used to switch sd_iterator to the next list.
731 !!! For internal use only. Might consider moving it to sched-int.h. */
733 sd_next_list (const_rtx insn
, sd_list_types_def
*types_ptr
,
734 deps_list_t
*list_ptr
, bool *resolved_p_ptr
)
736 sd_list_types_def types
= *types_ptr
;
738 if (types
& SD_LIST_HARD_BACK
)
740 *list_ptr
= INSN_HARD_BACK_DEPS (insn
);
741 *resolved_p_ptr
= false;
742 *types_ptr
= types
& ~SD_LIST_HARD_BACK
;
744 else if (types
& SD_LIST_SPEC_BACK
)
746 *list_ptr
= INSN_SPEC_BACK_DEPS (insn
);
747 *resolved_p_ptr
= false;
748 *types_ptr
= types
& ~SD_LIST_SPEC_BACK
;
750 else if (types
& SD_LIST_FORW
)
752 *list_ptr
= INSN_FORW_DEPS (insn
);
753 *resolved_p_ptr
= false;
754 *types_ptr
= types
& ~SD_LIST_FORW
;
756 else if (types
& SD_LIST_RES_BACK
)
758 *list_ptr
= INSN_RESOLVED_BACK_DEPS (insn
);
759 *resolved_p_ptr
= true;
760 *types_ptr
= types
& ~SD_LIST_RES_BACK
;
762 else if (types
& SD_LIST_RES_FORW
)
764 *list_ptr
= INSN_RESOLVED_FORW_DEPS (insn
);
765 *resolved_p_ptr
= true;
766 *types_ptr
= types
& ~SD_LIST_RES_FORW
;
771 *resolved_p_ptr
= false;
772 *types_ptr
= SD_LIST_NONE
;
776 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
778 sd_lists_size (const_rtx insn
, sd_list_types_def list_types
)
782 while (list_types
!= SD_LIST_NONE
)
787 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
789 size
+= DEPS_LIST_N_LINKS (list
);
795 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
798 sd_lists_empty_p (const_rtx insn
, sd_list_types_def list_types
)
800 while (list_types
!= SD_LIST_NONE
)
805 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
806 if (!deps_list_empty_p (list
))
813 /* Initialize data for INSN. */
815 sd_init_insn (rtx_insn
*insn
)
817 INSN_HARD_BACK_DEPS (insn
) = create_deps_list ();
818 INSN_SPEC_BACK_DEPS (insn
) = create_deps_list ();
819 INSN_RESOLVED_BACK_DEPS (insn
) = create_deps_list ();
820 INSN_FORW_DEPS (insn
) = create_deps_list ();
821 INSN_RESOLVED_FORW_DEPS (insn
) = create_deps_list ();
823 /* ??? It would be nice to allocate dependency caches here. */
826 /* Free data for INSN. */
828 sd_finish_insn (rtx_insn
*insn
)
830 /* ??? It would be nice to deallocate dependency caches here. */
832 free_deps_list (INSN_HARD_BACK_DEPS (insn
));
833 INSN_HARD_BACK_DEPS (insn
) = NULL
;
835 free_deps_list (INSN_SPEC_BACK_DEPS (insn
));
836 INSN_SPEC_BACK_DEPS (insn
) = NULL
;
838 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn
));
839 INSN_RESOLVED_BACK_DEPS (insn
) = NULL
;
841 free_deps_list (INSN_FORW_DEPS (insn
));
842 INSN_FORW_DEPS (insn
) = NULL
;
844 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
845 INSN_RESOLVED_FORW_DEPS (insn
) = NULL
;
848 /* Find a dependency between producer PRO and consumer CON.
849 Search through resolved dependency lists if RESOLVED_P is true.
850 If no such dependency is found return NULL,
851 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
852 with an iterator pointing to it. */
854 sd_find_dep_between_no_cache (rtx pro
, rtx con
, bool resolved_p
,
855 sd_iterator_def
*sd_it_ptr
)
857 sd_list_types_def pro_list_type
;
858 sd_list_types_def con_list_type
;
859 sd_iterator_def sd_it
;
861 bool found_p
= false;
865 pro_list_type
= SD_LIST_RES_FORW
;
866 con_list_type
= SD_LIST_RES_BACK
;
870 pro_list_type
= SD_LIST_FORW
;
871 con_list_type
= SD_LIST_BACK
;
874 /* Walk through either back list of INSN or forw list of ELEM
875 depending on which one is shorter. */
876 if (sd_lists_size (con
, con_list_type
) < sd_lists_size (pro
, pro_list_type
))
878 /* Find the dep_link with producer PRO in consumer's back_deps. */
879 FOR_EACH_DEP (con
, con_list_type
, sd_it
, dep
)
880 if (DEP_PRO (dep
) == pro
)
888 /* Find the dep_link with consumer CON in producer's forw_deps. */
889 FOR_EACH_DEP (pro
, pro_list_type
, sd_it
, dep
)
890 if (DEP_CON (dep
) == con
)
899 if (sd_it_ptr
!= NULL
)
908 /* Find a dependency between producer PRO and consumer CON.
909 Use dependency [if available] to check if dependency is present at all.
910 Search through resolved dependency lists if RESOLVED_P is true.
911 If the dependency or NULL if none found. */
913 sd_find_dep_between (rtx pro
, rtx con
, bool resolved_p
)
915 if (true_dependency_cache
!= NULL
)
916 /* Avoiding the list walk below can cut compile times dramatically
919 int elem_luid
= INSN_LUID (pro
);
920 int insn_luid
= INSN_LUID (con
);
922 if (!bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
)
923 && !bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
)
924 && !bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
)
925 && !bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
929 return sd_find_dep_between_no_cache (pro
, con
, resolved_p
, NULL
);
932 /* Add or update a dependence described by DEP.
933 MEM1 and MEM2, if non-null, correspond to memory locations in case of
936 The function returns a value indicating if an old entry has been changed
937 or a new entry has been added to insn's backward deps.
939 This function merely checks if producer and consumer is the same insn
940 and doesn't create a dep in this case. Actual manipulation of
941 dependence data structures is performed in add_or_update_dep_1. */
942 static enum DEPS_ADJUST_RESULT
943 maybe_add_or_update_dep_1 (dep_t dep
, bool resolved_p
, rtx mem1
, rtx mem2
)
945 rtx_insn
*elem
= DEP_PRO (dep
);
946 rtx_insn
*insn
= DEP_CON (dep
);
948 gcc_assert (INSN_P (insn
) && INSN_P (elem
));
950 /* Don't depend an insn on itself. */
953 if (sched_deps_info
->generate_spec_deps
)
954 /* INSN has an internal dependence, which we can't overcome. */
955 HAS_INTERNAL_DEP (insn
) = 1;
960 return add_or_update_dep_1 (dep
, resolved_p
, mem1
, mem2
);
963 /* Ask dependency caches what needs to be done for dependence DEP.
964 Return DEP_CREATED if new dependence should be created and there is no
965 need to try to find one searching the dependencies lists.
966 Return DEP_PRESENT if there already is a dependence described by DEP and
967 hence nothing is to be done.
968 Return DEP_CHANGED if there already is a dependence, but it should be
969 updated to incorporate additional information from DEP. */
970 static enum DEPS_ADJUST_RESULT
971 ask_dependency_caches (dep_t dep
)
973 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
974 int insn_luid
= INSN_LUID (DEP_CON (dep
));
976 gcc_assert (true_dependency_cache
!= NULL
977 && output_dependency_cache
!= NULL
978 && anti_dependency_cache
!= NULL
979 && control_dependency_cache
!= NULL
);
981 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
983 enum reg_note present_dep_type
;
985 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
986 present_dep_type
= REG_DEP_TRUE
;
987 else if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
988 present_dep_type
= REG_DEP_OUTPUT
;
989 else if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
990 present_dep_type
= REG_DEP_ANTI
;
991 else if (bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
992 present_dep_type
= REG_DEP_CONTROL
;
994 /* There is no existing dep so it should be created. */
997 if ((int) DEP_TYPE (dep
) >= (int) present_dep_type
)
998 /* DEP does not add anything to the existing dependence. */
1003 ds_t present_dep_types
= 0;
1005 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
1006 present_dep_types
|= DEP_TRUE
;
1007 if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
1008 present_dep_types
|= DEP_OUTPUT
;
1009 if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
1010 present_dep_types
|= DEP_ANTI
;
1011 if (bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
1012 present_dep_types
|= DEP_CONTROL
;
1014 if (present_dep_types
== 0)
1015 /* There is no existing dep so it should be created. */
1018 if (!(current_sched_info
->flags
& DO_SPECULATION
)
1019 || !bitmap_bit_p (&spec_dependency_cache
[insn_luid
], elem_luid
))
1021 if ((present_dep_types
| (DEP_STATUS (dep
) & DEP_TYPES
))
1022 == present_dep_types
)
1023 /* DEP does not add anything to the existing dependence. */
1028 /* Only true dependencies can be data speculative and
1029 only anti dependencies can be control speculative. */
1030 gcc_assert ((present_dep_types
& (DEP_TRUE
| DEP_ANTI
))
1031 == present_dep_types
);
1033 /* if (DEP is SPECULATIVE) then
1034 ..we should update DEP_STATUS
1036 ..we should reset existing dep to non-speculative. */
1043 /* Set dependency caches according to DEP. */
1045 set_dependency_caches (dep_t dep
)
1047 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
1048 int insn_luid
= INSN_LUID (DEP_CON (dep
));
1050 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1052 switch (DEP_TYPE (dep
))
1055 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1058 case REG_DEP_OUTPUT
:
1059 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1063 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1066 case REG_DEP_CONTROL
:
1067 bitmap_set_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1076 ds_t ds
= DEP_STATUS (dep
);
1079 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1080 if (ds
& DEP_OUTPUT
)
1081 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1083 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1084 if (ds
& DEP_CONTROL
)
1085 bitmap_set_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1087 if (ds
& SPECULATIVE
)
1089 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1090 bitmap_set_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
1095 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1096 caches accordingly. */
1098 update_dependency_caches (dep_t dep
, enum reg_note old_type
)
1100 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
1101 int insn_luid
= INSN_LUID (DEP_CON (dep
));
1103 /* Clear corresponding cache entry because type of the link
1104 may have changed. Keep them if we use_deps_list. */
1105 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1109 case REG_DEP_OUTPUT
:
1110 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1114 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1117 case REG_DEP_CONTROL
:
1118 bitmap_clear_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1126 set_dependency_caches (dep
);
1129 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1131 change_spec_dep_to_hard (sd_iterator_def sd_it
)
1133 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1134 dep_link_t link
= DEP_NODE_BACK (node
);
1135 dep_t dep
= DEP_NODE_DEP (node
);
1136 rtx_insn
*elem
= DEP_PRO (dep
);
1137 rtx_insn
*insn
= DEP_CON (dep
);
1139 move_dep_link (link
, INSN_SPEC_BACK_DEPS (insn
), INSN_HARD_BACK_DEPS (insn
));
1141 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1143 if (true_dependency_cache
!= NULL
)
1144 /* Clear the cache entry. */
1145 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
1149 /* Update DEP to incorporate information from NEW_DEP.
1150 SD_IT points to DEP in case it should be moved to another list.
1151 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1152 data-speculative dependence should be updated. */
1153 static enum DEPS_ADJUST_RESULT
1154 update_dep (dep_t dep
, dep_t new_dep
,
1155 sd_iterator_def sd_it ATTRIBUTE_UNUSED
,
1156 rtx mem1 ATTRIBUTE_UNUSED
,
1157 rtx mem2 ATTRIBUTE_UNUSED
)
1159 enum DEPS_ADJUST_RESULT res
= DEP_PRESENT
;
1160 enum reg_note old_type
= DEP_TYPE (dep
);
1161 bool was_spec
= dep_spec_p (dep
);
1163 DEP_NONREG (dep
) |= DEP_NONREG (new_dep
);
1164 DEP_MULTIPLE (dep
) = 1;
1166 /* If this is a more restrictive type of dependence than the
1167 existing one, then change the existing dependence to this
1169 if ((int) DEP_TYPE (new_dep
) < (int) old_type
)
1171 DEP_TYPE (dep
) = DEP_TYPE (new_dep
);
1175 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1176 /* Update DEP_STATUS. */
1178 ds_t dep_status
= DEP_STATUS (dep
);
1179 ds_t ds
= DEP_STATUS (new_dep
);
1180 ds_t new_status
= ds
| dep_status
;
1182 if (new_status
& SPECULATIVE
)
1184 /* Either existing dep or a dep we're adding or both are
1186 if (!(ds
& SPECULATIVE
)
1187 || !(dep_status
& SPECULATIVE
))
1188 /* The new dep can't be speculative. */
1189 new_status
&= ~SPECULATIVE
;
1192 /* Both are speculative. Merge probabilities. */
1197 dw
= estimate_dep_weak (mem1
, mem2
);
1198 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
1201 new_status
= ds_merge (dep_status
, ds
);
1207 if (dep_status
!= ds
)
1209 DEP_STATUS (dep
) = ds
;
1214 if (was_spec
&& !dep_spec_p (dep
))
1215 /* The old dep was speculative, but now it isn't. */
1216 change_spec_dep_to_hard (sd_it
);
1218 if (true_dependency_cache
!= NULL
1219 && res
== DEP_CHANGED
)
1220 update_dependency_caches (dep
, old_type
);
1225 /* Add or update a dependence described by DEP.
1226 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1229 The function returns a value indicating if an old entry has been changed
1230 or a new entry has been added to insn's backward deps or nothing has
1231 been updated at all. */
1232 static enum DEPS_ADJUST_RESULT
1233 add_or_update_dep_1 (dep_t new_dep
, bool resolved_p
,
1234 rtx mem1 ATTRIBUTE_UNUSED
, rtx mem2 ATTRIBUTE_UNUSED
)
1236 bool maybe_present_p
= true;
1237 bool present_p
= false;
1239 gcc_assert (INSN_P (DEP_PRO (new_dep
)) && INSN_P (DEP_CON (new_dep
))
1240 && DEP_PRO (new_dep
) != DEP_CON (new_dep
));
1242 #ifdef ENABLE_CHECKING
1243 check_dep (new_dep
, mem1
!= NULL
);
1246 if (true_dependency_cache
!= NULL
)
1248 switch (ask_dependency_caches (new_dep
))
1252 sd_iterator_def sd_it
;
1254 present_dep
= sd_find_dep_between_no_cache (DEP_PRO (new_dep
),
1256 resolved_p
, &sd_it
);
1257 DEP_MULTIPLE (present_dep
) = 1;
1261 maybe_present_p
= true;
1266 maybe_present_p
= false;
1276 /* Check that we don't already have this dependence. */
1277 if (maybe_present_p
)
1280 sd_iterator_def sd_it
;
1282 gcc_assert (true_dependency_cache
== NULL
|| present_p
);
1284 present_dep
= sd_find_dep_between_no_cache (DEP_PRO (new_dep
),
1286 resolved_p
, &sd_it
);
1288 if (present_dep
!= NULL
)
1289 /* We found an existing dependency between ELEM and INSN. */
1290 return update_dep (present_dep
, new_dep
, sd_it
, mem1
, mem2
);
1292 /* We didn't find a dep, it shouldn't present in the cache. */
1293 gcc_assert (!present_p
);
1296 /* Might want to check one level of transitivity to save conses.
1297 This check should be done in maybe_add_or_update_dep_1.
1298 Since we made it to add_or_update_dep_1, we must create
1299 (or update) a link. */
1301 if (mem1
!= NULL_RTX
)
1303 gcc_assert (sched_deps_info
->generate_spec_deps
);
1304 DEP_STATUS (new_dep
) = set_dep_weak (DEP_STATUS (new_dep
), BEGIN_DATA
,
1305 estimate_dep_weak (mem1
, mem2
));
1308 sd_add_dep (new_dep
, resolved_p
);
1313 /* Initialize BACK_LIST_PTR with consumer's backward list and
1314 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1315 initialize with lists that hold resolved deps. */
1317 get_back_and_forw_lists (dep_t dep
, bool resolved_p
,
1318 deps_list_t
*back_list_ptr
,
1319 deps_list_t
*forw_list_ptr
)
1321 rtx_insn
*con
= DEP_CON (dep
);
1325 if (dep_spec_p (dep
))
1326 *back_list_ptr
= INSN_SPEC_BACK_DEPS (con
);
1328 *back_list_ptr
= INSN_HARD_BACK_DEPS (con
);
1330 *forw_list_ptr
= INSN_FORW_DEPS (DEP_PRO (dep
));
1334 *back_list_ptr
= INSN_RESOLVED_BACK_DEPS (con
);
1335 *forw_list_ptr
= INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep
));
1339 /* Add dependence described by DEP.
1340 If RESOLVED_P is true treat the dependence as a resolved one. */
1342 sd_add_dep (dep_t dep
, bool resolved_p
)
1344 dep_node_t n
= create_dep_node ();
1345 deps_list_t con_back_deps
;
1346 deps_list_t pro_forw_deps
;
1347 rtx_insn
*elem
= DEP_PRO (dep
);
1348 rtx_insn
*insn
= DEP_CON (dep
);
1350 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
1352 if ((current_sched_info
->flags
& DO_SPECULATION
) == 0
1353 || !sched_insn_is_legitimate_for_speculation_p (insn
, DEP_STATUS (dep
)))
1354 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1356 copy_dep (DEP_NODE_DEP (n
), dep
);
1358 get_back_and_forw_lists (dep
, resolved_p
, &con_back_deps
, &pro_forw_deps
);
1360 add_to_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1362 #ifdef ENABLE_CHECKING
1363 check_dep (dep
, false);
1366 add_to_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1368 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1369 in the bitmap caches of dependency information. */
1370 if (true_dependency_cache
!= NULL
)
1371 set_dependency_caches (dep
);
1374 /* Add or update backward dependence between INSN and ELEM
1375 with given type DEP_TYPE and dep_status DS.
1376 This function is a convenience wrapper. */
1377 enum DEPS_ADJUST_RESULT
1378 sd_add_or_update_dep (dep_t dep
, bool resolved_p
)
1380 return add_or_update_dep_1 (dep
, resolved_p
, NULL_RTX
, NULL_RTX
);
1383 /* Resolved dependence pointed to by SD_IT.
1384 SD_IT will advance to the next element. */
1386 sd_resolve_dep (sd_iterator_def sd_it
)
1388 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1389 dep_t dep
= DEP_NODE_DEP (node
);
1390 rtx_insn
*pro
= DEP_PRO (dep
);
1391 rtx_insn
*con
= DEP_CON (dep
);
1393 if (dep_spec_p (dep
))
1394 move_dep_link (DEP_NODE_BACK (node
), INSN_SPEC_BACK_DEPS (con
),
1395 INSN_RESOLVED_BACK_DEPS (con
));
1397 move_dep_link (DEP_NODE_BACK (node
), INSN_HARD_BACK_DEPS (con
),
1398 INSN_RESOLVED_BACK_DEPS (con
));
1400 move_dep_link (DEP_NODE_FORW (node
), INSN_FORW_DEPS (pro
),
1401 INSN_RESOLVED_FORW_DEPS (pro
));
1404 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1405 pointed to by SD_IT to unresolved state. */
1407 sd_unresolve_dep (sd_iterator_def sd_it
)
1409 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1410 dep_t dep
= DEP_NODE_DEP (node
);
1411 rtx_insn
*pro
= DEP_PRO (dep
);
1412 rtx_insn
*con
= DEP_CON (dep
);
1414 if (dep_spec_p (dep
))
1415 move_dep_link (DEP_NODE_BACK (node
), INSN_RESOLVED_BACK_DEPS (con
),
1416 INSN_SPEC_BACK_DEPS (con
));
1418 move_dep_link (DEP_NODE_BACK (node
), INSN_RESOLVED_BACK_DEPS (con
),
1419 INSN_HARD_BACK_DEPS (con
));
1421 move_dep_link (DEP_NODE_FORW (node
), INSN_RESOLVED_FORW_DEPS (pro
),
1422 INSN_FORW_DEPS (pro
));
1425 /* Make TO depend on all the FROM's producers.
1426 If RESOLVED_P is true add dependencies to the resolved lists. */
1428 sd_copy_back_deps (rtx_insn
*to
, rtx_insn
*from
, bool resolved_p
)
1430 sd_list_types_def list_type
;
1431 sd_iterator_def sd_it
;
1434 list_type
= resolved_p
? SD_LIST_RES_BACK
: SD_LIST_BACK
;
1436 FOR_EACH_DEP (from
, list_type
, sd_it
, dep
)
1438 dep_def _new_dep
, *new_dep
= &_new_dep
;
1440 copy_dep (new_dep
, dep
);
1441 DEP_CON (new_dep
) = to
;
1442 sd_add_dep (new_dep
, resolved_p
);
1446 /* Remove a dependency referred to by SD_IT.
1447 SD_IT will point to the next dependence after removal. */
1449 sd_delete_dep (sd_iterator_def sd_it
)
1451 dep_node_t n
= DEP_LINK_NODE (*sd_it
.linkp
);
1452 dep_t dep
= DEP_NODE_DEP (n
);
1453 rtx_insn
*pro
= DEP_PRO (dep
);
1454 rtx_insn
*con
= DEP_CON (dep
);
1455 deps_list_t con_back_deps
;
1456 deps_list_t pro_forw_deps
;
1458 if (true_dependency_cache
!= NULL
)
1460 int elem_luid
= INSN_LUID (pro
);
1461 int insn_luid
= INSN_LUID (con
);
1463 bitmap_clear_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1464 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1465 bitmap_clear_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1466 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1468 if (current_sched_info
->flags
& DO_SPECULATION
)
1469 bitmap_clear_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
1472 get_back_and_forw_lists (dep
, sd_it
.resolved_p
,
1473 &con_back_deps
, &pro_forw_deps
);
1475 remove_from_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1476 remove_from_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1478 delete_dep_node (n
);
1481 /* Dump size of the lists. */
1482 #define DUMP_LISTS_SIZE (2)
1484 /* Dump dependencies of the lists. */
1485 #define DUMP_LISTS_DEPS (4)
1487 /* Dump all information about the lists. */
1488 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1490 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1491 FLAGS is a bit mask specifying what information about the lists needs
1493 If FLAGS has the very first bit set, then dump all information about
1494 the lists and propagate this bit into the callee dump functions. */
1496 dump_lists (FILE *dump
, rtx insn
, sd_list_types_def types
, int flags
)
1498 sd_iterator_def sd_it
;
1505 flags
|= DUMP_LISTS_ALL
;
1507 fprintf (dump
, "[");
1509 if (flags
& DUMP_LISTS_SIZE
)
1510 fprintf (dump
, "%d; ", sd_lists_size (insn
, types
));
1512 if (flags
& DUMP_LISTS_DEPS
)
1514 FOR_EACH_DEP (insn
, types
, sd_it
, dep
)
1516 dump_dep (dump
, dep
, dump_dep_flags
| all
);
1517 fprintf (dump
, " ");
1522 /* Dump all information about deps_lists of INSN specified by TYPES
1525 sd_debug_lists (rtx insn
, sd_list_types_def types
)
1527 dump_lists (stderr
, insn
, types
, 1);
1528 fprintf (stderr
, "\n");
1531 /* A wrapper around add_dependence_1, to add a dependence of CON on
1532 PRO, with type DEP_TYPE. This function implements special handling
1533 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1534 the type to REG_DEP_ANTI if we can determine that predication is
1535 impossible; otherwise we add additional true dependencies on the
1536 INSN_COND_DEPS list of the jump (which PRO must be). */
1538 add_dependence (rtx_insn
*con
, rtx_insn
*pro
, enum reg_note dep_type
)
1540 if (dep_type
== REG_DEP_CONTROL
1541 && !(current_sched_info
->flags
& DO_PREDICATION
))
1542 dep_type
= REG_DEP_ANTI
;
1544 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1545 so we must also make the insn dependent on the setter of the
1547 if (dep_type
== REG_DEP_CONTROL
)
1549 rtx_insn
*real_pro
= pro
;
1550 rtx_insn
*other
= real_insn_for_shadow (real_pro
);
1553 if (other
!= NULL_RTX
)
1555 cond
= sched_get_reverse_condition_uncached (real_pro
);
1556 /* Verify that the insn does not use a different value in
1557 the condition register than the one that was present at
1559 if (cond
== NULL_RTX
)
1560 dep_type
= REG_DEP_ANTI
;
1561 else if (INSN_CACHED_COND (real_pro
) == const_true_rtx
)
1564 CLEAR_HARD_REG_SET (uses
);
1565 note_uses (&PATTERN (con
), record_hard_reg_uses
, &uses
);
1566 if (TEST_HARD_REG_BIT (uses
, REGNO (XEXP (cond
, 0))))
1567 dep_type
= REG_DEP_ANTI
;
1569 if (dep_type
== REG_DEP_CONTROL
)
1571 if (sched_verbose
>= 5)
1572 fprintf (sched_dump
, "making DEP_CONTROL for %d\n",
1573 INSN_UID (real_pro
));
1574 add_dependence_list (con
, INSN_COND_DEPS (real_pro
), 0,
1575 REG_DEP_TRUE
, false);
1579 add_dependence_1 (con
, pro
, dep_type
);
1582 /* A convenience wrapper to operate on an entire list. HARD should be
1583 true if DEP_NONREG should be set on newly created dependencies. */
1586 add_dependence_list (rtx_insn
*insn
, rtx_insn_list
*list
, int uncond
,
1587 enum reg_note dep_type
, bool hard
)
1589 mark_as_hard
= hard
;
1590 for (; list
; list
= list
->next ())
1592 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, list
->insn ()))
1593 add_dependence (insn
, list
->insn (), dep_type
);
1595 mark_as_hard
= false;
1598 /* Similar, but free *LISTP at the same time, when the context
1599 is not readonly. HARD should be true if DEP_NONREG should be set on
1600 newly created dependencies. */
1603 add_dependence_list_and_free (struct deps_desc
*deps
, rtx_insn
*insn
,
1604 rtx_insn_list
**listp
,
1605 int uncond
, enum reg_note dep_type
, bool hard
)
1607 add_dependence_list (insn
, *listp
, uncond
, dep_type
, hard
);
1609 /* We don't want to short-circuit dependencies involving debug
1610 insns, because they may cause actual dependencies to be
1612 if (deps
->readonly
|| DEBUG_INSN_P (insn
))
1615 free_INSN_LIST_list (listp
);
1618 /* Remove all occurrences of INSN from LIST. Return the number of
1619 occurrences removed. */
1622 remove_from_dependence_list (rtx_insn
*insn
, rtx_insn_list
**listp
)
1628 if ((*listp
)->insn () == insn
)
1630 remove_free_INSN_LIST_node (listp
);
1635 listp
= (rtx_insn_list
**)&XEXP (*listp
, 1);
1641 /* Same as above, but process two lists at once. */
1643 remove_from_both_dependence_lists (rtx_insn
*insn
,
1644 rtx_insn_list
**listp
,
1645 rtx_expr_list
**exprp
)
1651 if (XEXP (*listp
, 0) == insn
)
1653 remove_free_INSN_LIST_node (listp
);
1654 remove_free_EXPR_LIST_node (exprp
);
1659 listp
= (rtx_insn_list
**)&XEXP (*listp
, 1);
1660 exprp
= (rtx_expr_list
**)&XEXP (*exprp
, 1);
1666 /* Clear all dependencies for an insn. */
1668 delete_all_dependences (rtx_insn
*insn
)
1670 sd_iterator_def sd_it
;
1673 /* The below cycle can be optimized to clear the caches and back_deps
1674 in one call but that would provoke duplication of code from
1677 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
1678 sd_iterator_cond (&sd_it
, &dep
);)
1679 sd_delete_dep (sd_it
);
1682 /* All insns in a scheduling group except the first should only have
1683 dependencies on the previous insn in the group. So we find the
1684 first instruction in the scheduling group by walking the dependence
1685 chains backwards. Then we add the dependencies for the group to
1686 the previous nonnote insn. */
1689 chain_to_prev_insn (rtx_insn
*insn
)
1691 sd_iterator_def sd_it
;
1693 rtx_insn
*prev_nonnote
;
1695 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1698 rtx_insn
*pro
= DEP_PRO (dep
);
1702 i
= prev_nonnote_insn (i
);
1706 } while (SCHED_GROUP_P (i
) || DEBUG_INSN_P (i
));
1708 if (! sched_insns_conditions_mutex_p (i
, pro
))
1709 add_dependence (i
, pro
, DEP_TYPE (dep
));
1713 delete_all_dependences (insn
);
1715 prev_nonnote
= prev_nonnote_nondebug_insn (insn
);
1716 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote
)
1717 && ! sched_insns_conditions_mutex_p (insn
, prev_nonnote
))
1718 add_dependence (insn
, prev_nonnote
, REG_DEP_ANTI
);
1721 /* Process an insn's memory dependencies. There are four kinds of
1724 (0) read dependence: read follows read
1725 (1) true dependence: read follows write
1726 (2) output dependence: write follows write
1727 (3) anti dependence: write follows read
1729 We are careful to build only dependencies which actually exist, and
1730 use transitivity to avoid building too many links. */
1732 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1733 The MEM is a memory reference contained within INSN, which we are saving
1734 so that we can do memory aliasing on it. */
1737 add_insn_mem_dependence (struct deps_desc
*deps
, bool read_p
,
1738 rtx_insn
*insn
, rtx mem
)
1740 rtx_insn_list
**insn_list
;
1741 rtx_insn_list
*insn_node
;
1742 rtx_expr_list
**mem_list
;
1743 rtx_expr_list
*mem_node
;
1745 gcc_assert (!deps
->readonly
);
1748 insn_list
= &deps
->pending_read_insns
;
1749 mem_list
= &deps
->pending_read_mems
;
1750 if (!DEBUG_INSN_P (insn
))
1751 deps
->pending_read_list_length
++;
1755 insn_list
= &deps
->pending_write_insns
;
1756 mem_list
= &deps
->pending_write_mems
;
1757 deps
->pending_write_list_length
++;
1760 insn_node
= alloc_INSN_LIST (insn
, *insn_list
);
1761 *insn_list
= insn_node
;
1763 if (sched_deps_info
->use_cselib
)
1765 mem
= shallow_copy_rtx (mem
);
1766 XEXP (mem
, 0) = cselib_subst_to_values_from_insn (XEXP (mem
, 0),
1767 GET_MODE (mem
), insn
);
1769 mem_node
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
1770 *mem_list
= mem_node
;
1773 /* Make a dependency between every memory reference on the pending lists
1774 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1775 dependencies for a read operation, similarly with FOR_WRITE. */
1778 flush_pending_lists (struct deps_desc
*deps
, rtx_insn
*insn
, int for_read
,
1783 add_dependence_list_and_free (deps
, insn
, &deps
->pending_read_insns
,
1784 1, REG_DEP_ANTI
, true);
1785 if (!deps
->readonly
)
1787 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1788 deps
->pending_read_list_length
= 0;
1792 add_dependence_list_and_free (deps
, insn
, &deps
->pending_write_insns
, 1,
1793 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
,
1796 add_dependence_list_and_free (deps
, insn
,
1797 &deps
->last_pending_memory_flush
, 1,
1798 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
,
1801 add_dependence_list_and_free (deps
, insn
, &deps
->pending_jump_insns
, 1,
1802 REG_DEP_ANTI
, true);
1804 if (DEBUG_INSN_P (insn
))
1807 free_INSN_LIST_list (&deps
->pending_read_insns
);
1808 free_INSN_LIST_list (&deps
->pending_write_insns
);
1809 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1810 free_INSN_LIST_list (&deps
->pending_jump_insns
);
1813 if (!deps
->readonly
)
1815 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1816 deps
->pending_write_list_length
= 0;
1818 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
1819 deps
->pending_flush_length
= 1;
1821 mark_as_hard
= false;
1824 /* Instruction which dependencies we are analyzing. */
1825 static rtx_insn
*cur_insn
= NULL
;
1827 /* Implement hooks for haifa scheduler. */
1830 haifa_start_insn (rtx_insn
*insn
)
1832 gcc_assert (insn
&& !cur_insn
);
1838 haifa_finish_insn (void)
1844 haifa_note_reg_set (int regno
)
1846 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
1850 haifa_note_reg_clobber (int regno
)
1852 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
1856 haifa_note_reg_use (int regno
)
1858 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
1862 haifa_note_mem_dep (rtx mem
, rtx pending_mem
, rtx_insn
*pending_insn
, ds_t ds
)
1864 if (!(ds
& SPECULATIVE
))
1867 pending_mem
= NULL_RTX
;
1870 gcc_assert (ds
& BEGIN_DATA
);
1873 dep_def _dep
, *dep
= &_dep
;
1875 init_dep_1 (dep
, pending_insn
, cur_insn
, ds_to_dt (ds
),
1876 current_sched_info
->flags
& USE_DEPS_LIST
? ds
: 0);
1877 DEP_NONREG (dep
) = 1;
1878 maybe_add_or_update_dep_1 (dep
, false, pending_mem
, mem
);
1884 haifa_note_dep (rtx_insn
*elem
, ds_t ds
)
1889 init_dep (dep
, elem
, cur_insn
, ds_to_dt (ds
));
1891 DEP_NONREG (dep
) = 1;
1892 maybe_add_or_update_dep_1 (dep
, false, NULL_RTX
, NULL_RTX
);
1896 note_reg_use (int r
)
1898 if (sched_deps_info
->note_reg_use
)
1899 sched_deps_info
->note_reg_use (r
);
1903 note_reg_set (int r
)
1905 if (sched_deps_info
->note_reg_set
)
1906 sched_deps_info
->note_reg_set (r
);
1910 note_reg_clobber (int r
)
1912 if (sched_deps_info
->note_reg_clobber
)
1913 sched_deps_info
->note_reg_clobber (r
);
1917 note_mem_dep (rtx m1
, rtx m2
, rtx_insn
*e
, ds_t ds
)
1919 if (sched_deps_info
->note_mem_dep
)
1920 sched_deps_info
->note_mem_dep (m1
, m2
, e
, ds
);
1924 note_dep (rtx_insn
*e
, ds_t ds
)
1926 if (sched_deps_info
->note_dep
)
1927 sched_deps_info
->note_dep (e
, ds
);
1930 /* Return corresponding to DS reg_note. */
1935 return REG_DEP_TRUE
;
1936 else if (ds
& DEP_OUTPUT
)
1937 return REG_DEP_OUTPUT
;
1938 else if (ds
& DEP_ANTI
)
1939 return REG_DEP_ANTI
;
1942 gcc_assert (ds
& DEP_CONTROL
);
1943 return REG_DEP_CONTROL
;
1949 /* Functions for computation of info needed for register pressure
1950 sensitive insn scheduling. */
1953 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1954 static struct reg_use_data
*
1955 create_insn_reg_use (int regno
, rtx_insn
*insn
)
1957 struct reg_use_data
*use
;
1959 use
= (struct reg_use_data
*) xmalloc (sizeof (struct reg_use_data
));
1962 use
->next_insn_use
= INSN_REG_USE_LIST (insn
);
1963 INSN_REG_USE_LIST (insn
) = use
;
1967 /* Allocate reg_set_data structure for REGNO and INSN. */
1969 create_insn_reg_set (int regno
, rtx insn
)
1971 struct reg_set_data
*set
;
1973 set
= (struct reg_set_data
*) xmalloc (sizeof (struct reg_set_data
));
1976 set
->next_insn_set
= INSN_REG_SET_LIST (insn
);
1977 INSN_REG_SET_LIST (insn
) = set
;
1980 /* Set up insn register uses for INSN and dependency context DEPS. */
1982 setup_insn_reg_uses (struct deps_desc
*deps
, rtx_insn
*insn
)
1985 reg_set_iterator rsi
;
1986 struct reg_use_data
*use
, *use2
, *next
;
1987 struct deps_reg
*reg_last
;
1989 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1991 if (i
< FIRST_PSEUDO_REGISTER
1992 && TEST_HARD_REG_BIT (ira_no_alloc_regs
, i
))
1995 if (find_regno_note (insn
, REG_DEAD
, i
) == NULL_RTX
1996 && ! REGNO_REG_SET_P (reg_pending_sets
, i
)
1997 && ! REGNO_REG_SET_P (reg_pending_clobbers
, i
))
1998 /* Ignore use which is not dying. */
2001 use
= create_insn_reg_use (i
, insn
);
2002 use
->next_regno_use
= use
;
2003 reg_last
= &deps
->reg_last
[i
];
2005 /* Create the cycle list of uses. */
2006 for (rtx_insn_list
*list
= reg_last
->uses
; list
; list
= list
->next ())
2008 use2
= create_insn_reg_use (i
, list
->insn ());
2009 next
= use
->next_regno_use
;
2010 use
->next_regno_use
= use2
;
2011 use2
->next_regno_use
= next
;
2016 /* Register pressure info for the currently processed insn. */
2017 static struct reg_pressure_data reg_pressure_info
[N_REG_CLASSES
];
2019 /* Return TRUE if INSN has the use structure for REGNO. */
2021 insn_use_p (rtx insn
, int regno
)
2023 struct reg_use_data
*use
;
2025 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2026 if (use
->regno
== regno
)
2031 /* Update the register pressure info after birth of pseudo register REGNO
2032 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2033 the register is in clobber or unused after the insn. */
2035 mark_insn_pseudo_birth (rtx insn
, int regno
, bool clobber_p
, bool unused_p
)
2040 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
2041 cl
= sched_regno_pressure_class
[regno
];
2044 incr
= ira_reg_class_max_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
2047 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ incr
;
2048 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
2052 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ incr
;
2053 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
2057 new_incr
= reg_pressure_info
[cl
].set_increase
+ incr
;
2058 reg_pressure_info
[cl
].set_increase
= new_incr
;
2059 if (! insn_use_p (insn
, regno
))
2060 reg_pressure_info
[cl
].change
+= incr
;
2061 create_insn_reg_set (regno
, insn
);
2063 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
2067 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2068 hard registers involved in the birth. */
2070 mark_insn_hard_regno_birth (rtx insn
, int regno
, int nregs
,
2071 bool clobber_p
, bool unused_p
)
2074 int new_incr
, last
= regno
+ nregs
;
2076 while (regno
< last
)
2078 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
2079 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
2081 cl
= sched_regno_pressure_class
[regno
];
2086 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ 1;
2087 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
2091 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ 1;
2092 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
2096 new_incr
= reg_pressure_info
[cl
].set_increase
+ 1;
2097 reg_pressure_info
[cl
].set_increase
= new_incr
;
2098 if (! insn_use_p (insn
, regno
))
2099 reg_pressure_info
[cl
].change
+= 1;
2100 create_insn_reg_set (regno
, insn
);
2102 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
2109 /* Update the register pressure info after birth of pseudo or hard
2110 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2111 correspondingly that the register is in clobber or unused after the
2114 mark_insn_reg_birth (rtx insn
, rtx reg
, bool clobber_p
, bool unused_p
)
2118 if (GET_CODE (reg
) == SUBREG
)
2119 reg
= SUBREG_REG (reg
);
2124 regno
= REGNO (reg
);
2125 if (regno
< FIRST_PSEUDO_REGISTER
)
2126 mark_insn_hard_regno_birth (insn
, regno
, REG_NREGS (reg
),
2127 clobber_p
, unused_p
);
2129 mark_insn_pseudo_birth (insn
, regno
, clobber_p
, unused_p
);
2132 /* Update the register pressure info after death of pseudo register
2135 mark_pseudo_death (int regno
)
2140 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
2141 cl
= sched_regno_pressure_class
[regno
];
2144 incr
= ira_reg_class_max_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
2145 reg_pressure_info
[cl
].change
-= incr
;
2149 /* Like mark_pseudo_death except that NREGS saying how many hard
2150 registers involved in the death. */
2152 mark_hard_regno_death (int regno
, int nregs
)
2155 int last
= regno
+ nregs
;
2157 while (regno
< last
)
2159 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
2160 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
2162 cl
= sched_regno_pressure_class
[regno
];
2164 reg_pressure_info
[cl
].change
-= 1;
2170 /* Update the register pressure info after death of pseudo or hard
2173 mark_reg_death (rtx reg
)
2177 if (GET_CODE (reg
) == SUBREG
)
2178 reg
= SUBREG_REG (reg
);
2183 regno
= REGNO (reg
);
2184 if (regno
< FIRST_PSEUDO_REGISTER
)
2185 mark_hard_regno_death (regno
, REG_NREGS (reg
));
2187 mark_pseudo_death (regno
);
2190 /* Process SETTER of REG. DATA is an insn containing the setter. */
2192 mark_insn_reg_store (rtx reg
, const_rtx setter
, void *data
)
2194 if (setter
!= NULL_RTX
&& GET_CODE (setter
) != SET
)
2197 ((rtx
) data
, reg
, false,
2198 find_reg_note ((const_rtx
) data
, REG_UNUSED
, reg
) != NULL_RTX
);
2201 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2203 mark_insn_reg_clobber (rtx reg
, const_rtx setter
, void *data
)
2205 if (GET_CODE (setter
) == CLOBBER
)
2206 mark_insn_reg_birth ((rtx
) data
, reg
, true, false);
2209 /* Set up reg pressure info related to INSN. */
2211 init_insn_reg_pressure_info (rtx_insn
*insn
)
2215 static struct reg_pressure_data
*pressure_info
;
2218 gcc_assert (sched_pressure
!= SCHED_PRESSURE_NONE
);
2220 if (! INSN_P (insn
))
2223 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2225 cl
= ira_pressure_classes
[i
];
2226 reg_pressure_info
[cl
].clobber_increase
= 0;
2227 reg_pressure_info
[cl
].set_increase
= 0;
2228 reg_pressure_info
[cl
].unused_set_increase
= 0;
2229 reg_pressure_info
[cl
].change
= 0;
2232 note_stores (PATTERN (insn
), mark_insn_reg_clobber
, insn
);
2234 note_stores (PATTERN (insn
), mark_insn_reg_store
, insn
);
2237 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2238 if (REG_NOTE_KIND (link
) == REG_INC
)
2239 mark_insn_reg_store (XEXP (link
, 0), NULL_RTX
, insn
);
2242 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2243 if (REG_NOTE_KIND (link
) == REG_DEAD
)
2244 mark_reg_death (XEXP (link
, 0));
2246 len
= sizeof (struct reg_pressure_data
) * ira_pressure_classes_num
;
2248 = INSN_REG_PRESSURE (insn
) = (struct reg_pressure_data
*) xmalloc (len
);
2249 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
2250 INSN_MAX_REG_PRESSURE (insn
) = (int *) xcalloc (ira_pressure_classes_num
2252 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2254 cl
= ira_pressure_classes
[i
];
2255 pressure_info
[i
].clobber_increase
2256 = reg_pressure_info
[cl
].clobber_increase
;
2257 pressure_info
[i
].set_increase
= reg_pressure_info
[cl
].set_increase
;
2258 pressure_info
[i
].unused_set_increase
2259 = reg_pressure_info
[cl
].unused_set_increase
;
2260 pressure_info
[i
].change
= reg_pressure_info
[cl
].change
;
2267 /* Internal variable for sched_analyze_[12] () functions.
2268 If it is nonzero, this means that sched_analyze_[12] looks
2269 at the most toplevel SET. */
2270 static bool can_start_lhs_rhs_p
;
2272 /* Extend reg info for the deps context DEPS given that
2273 we have just generated a register numbered REGNO. */
2275 extend_deps_reg_info (struct deps_desc
*deps
, int regno
)
2277 int max_regno
= regno
+ 1;
2279 gcc_assert (!reload_completed
);
2281 /* In a readonly context, it would not hurt to extend info,
2282 but it should not be needed. */
2283 if (reload_completed
&& deps
->readonly
)
2285 deps
->max_reg
= max_regno
;
2289 if (max_regno
> deps
->max_reg
)
2291 deps
->reg_last
= XRESIZEVEC (struct deps_reg
, deps
->reg_last
,
2293 memset (&deps
->reg_last
[deps
->max_reg
],
2294 0, (max_regno
- deps
->max_reg
)
2295 * sizeof (struct deps_reg
));
2296 deps
->max_reg
= max_regno
;
2300 /* Extends REG_INFO_P if needed. */
2302 maybe_extend_reg_info_p (void)
2304 /* Extend REG_INFO_P, if needed. */
2305 if ((unsigned int)max_regno
- 1 >= reg_info_p_size
)
2307 size_t new_reg_info_p_size
= max_regno
+ 128;
2309 gcc_assert (!reload_completed
&& sel_sched_p ());
2311 reg_info_p
= (struct reg_info_t
*) xrecalloc (reg_info_p
,
2312 new_reg_info_p_size
,
2314 sizeof (*reg_info_p
));
2315 reg_info_p_size
= new_reg_info_p_size
;
2319 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2320 The type of the reference is specified by REF and can be SET,
2321 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2324 sched_analyze_reg (struct deps_desc
*deps
, int regno
, machine_mode mode
,
2325 enum rtx_code ref
, rtx_insn
*insn
)
2327 /* We could emit new pseudos in renaming. Extend the reg structures. */
2328 if (!reload_completed
&& sel_sched_p ()
2329 && (regno
>= max_reg_num () - 1 || regno
>= deps
->max_reg
))
2330 extend_deps_reg_info (deps
, regno
);
2332 maybe_extend_reg_info_p ();
2334 /* A hard reg in a wide mode may really be multiple registers.
2335 If so, mark all of them just like the first. */
2336 if (regno
< FIRST_PSEUDO_REGISTER
)
2338 int i
= hard_regno_nregs
[regno
][mode
];
2342 note_reg_set (regno
+ i
);
2344 else if (ref
== USE
)
2347 note_reg_use (regno
+ i
);
2352 note_reg_clobber (regno
+ i
);
2356 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2357 it does not reload. Ignore these as they have served their
2359 else if (regno
>= deps
->max_reg
)
2361 enum rtx_code code
= GET_CODE (PATTERN (insn
));
2362 gcc_assert (code
== USE
|| code
== CLOBBER
);
2368 note_reg_set (regno
);
2369 else if (ref
== USE
)
2370 note_reg_use (regno
);
2372 note_reg_clobber (regno
);
2374 /* Pseudos that are REG_EQUIV to something may be replaced
2375 by that during reloading. We need only add dependencies for
2376 the address in the REG_EQUIV note. */
2377 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
2379 rtx t
= get_reg_known_value (regno
);
2381 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
2384 /* Don't let it cross a call after scheduling if it doesn't
2385 already cross one. */
2386 if (REG_N_CALLS_CROSSED (regno
) == 0)
2388 if (!deps
->readonly
&& ref
== USE
&& !DEBUG_INSN_P (insn
))
2389 deps
->sched_before_next_call
2390 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
2392 add_dependence_list (insn
, deps
->last_function_call
, 1,
2393 REG_DEP_ANTI
, false);
2398 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2399 rtx, X, creating all dependencies generated by the write to the
2400 destination of X, and reads of everything mentioned. */
2403 sched_analyze_1 (struct deps_desc
*deps
, rtx x
, rtx_insn
*insn
)
2405 rtx dest
= XEXP (x
, 0);
2406 enum rtx_code code
= GET_CODE (x
);
2407 bool cslr_p
= can_start_lhs_rhs_p
;
2409 can_start_lhs_rhs_p
= false;
2415 if (cslr_p
&& sched_deps_info
->start_lhs
)
2416 sched_deps_info
->start_lhs (dest
);
2418 if (GET_CODE (dest
) == PARALLEL
)
2422 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2423 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
2424 sched_analyze_1 (deps
,
2425 gen_rtx_CLOBBER (VOIDmode
,
2426 XEXP (XVECEXP (dest
, 0, i
), 0)),
2429 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2430 sched_deps_info
->finish_lhs ();
2434 can_start_lhs_rhs_p
= cslr_p
;
2436 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2438 can_start_lhs_rhs_p
= false;
2444 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
2445 || GET_CODE (dest
) == ZERO_EXTRACT
)
2447 if (GET_CODE (dest
) == STRICT_LOW_PART
2448 || GET_CODE (dest
) == ZERO_EXTRACT
2449 || df_read_modify_subreg_p (dest
))
2451 /* These both read and modify the result. We must handle
2452 them as writes to get proper dependencies for following
2453 instructions. We must handle them as reads to get proper
2454 dependencies from this to previous instructions.
2455 Thus we need to call sched_analyze_2. */
2457 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2459 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2461 /* The second and third arguments are values read by this insn. */
2462 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
2463 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
2465 dest
= XEXP (dest
, 0);
2470 int regno
= REGNO (dest
);
2471 machine_mode mode
= GET_MODE (dest
);
2473 sched_analyze_reg (deps
, regno
, mode
, code
, insn
);
2476 /* Treat all writes to a stack register as modifying the TOS. */
2477 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2479 /* Avoid analyzing the same register twice. */
2480 if (regno
!= FIRST_STACK_REG
)
2481 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, code
, insn
);
2483 add_to_hard_reg_set (&implicit_reg_pending_uses
, mode
,
2488 else if (MEM_P (dest
))
2490 /* Writing memory. */
2493 if (sched_deps_info
->use_cselib
)
2495 machine_mode address_mode
= get_address_mode (dest
);
2497 t
= shallow_copy_rtx (dest
);
2498 cselib_lookup_from_insn (XEXP (t
, 0), address_mode
, 1,
2499 GET_MODE (t
), insn
);
2501 = cselib_subst_to_values_from_insn (XEXP (t
, 0), GET_MODE (t
),
2506 /* Pending lists can't get larger with a readonly context. */
2508 && ((deps
->pending_read_list_length
+ deps
->pending_write_list_length
)
2509 >= MAX_PENDING_LIST_LENGTH
))
2511 /* Flush all pending reads and writes to prevent the pending lists
2512 from getting any larger. Insn scheduling runs too slowly when
2513 these lists get long. When compiling GCC with itself,
2514 this flush occurs 8 times for sparc, and 10 times for m88k using
2515 the default value of 32. */
2516 flush_pending_lists (deps
, insn
, false, true);
2520 rtx_insn_list
*pending
;
2521 rtx_expr_list
*pending_mem
;
2523 pending
= deps
->pending_read_insns
;
2524 pending_mem
= deps
->pending_read_mems
;
2527 if (anti_dependence (pending_mem
->element (), t
)
2528 && ! sched_insns_conditions_mutex_p (insn
, pending
->insn ()))
2529 note_mem_dep (t
, pending_mem
->element (), pending
->insn (),
2532 pending
= pending
->next ();
2533 pending_mem
= pending_mem
->next ();
2536 pending
= deps
->pending_write_insns
;
2537 pending_mem
= deps
->pending_write_mems
;
2540 if (output_dependence (pending_mem
->element (), t
)
2541 && ! sched_insns_conditions_mutex_p (insn
, pending
->insn ()))
2542 note_mem_dep (t
, pending_mem
->element (),
2546 pending
= pending
->next ();
2547 pending_mem
= pending_mem
-> next ();
2550 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
2551 REG_DEP_ANTI
, true);
2552 add_dependence_list (insn
, deps
->pending_jump_insns
, 1,
2553 REG_DEP_CONTROL
, true);
2555 if (!deps
->readonly
)
2556 add_insn_mem_dependence (deps
, false, insn
, dest
);
2558 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2561 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2562 sched_deps_info
->finish_lhs ();
2564 /* Analyze reads. */
2565 if (GET_CODE (x
) == SET
)
2567 can_start_lhs_rhs_p
= cslr_p
;
2569 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2571 can_start_lhs_rhs_p
= false;
2575 /* Analyze the uses of memory and registers in rtx X in INSN. */
2577 sched_analyze_2 (struct deps_desc
*deps
, rtx x
, rtx_insn
*insn
)
2583 bool cslr_p
= can_start_lhs_rhs_p
;
2585 can_start_lhs_rhs_p
= false;
2591 if (cslr_p
&& sched_deps_info
->start_rhs
)
2592 sched_deps_info
->start_rhs (x
);
2594 code
= GET_CODE (x
);
2602 /* Ignore constants. */
2603 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2604 sched_deps_info
->finish_rhs ();
2612 /* User of CC0 depends on immediately preceding insn. */
2613 SCHED_GROUP_P (insn
) = 1;
2614 /* Don't move CC0 setter to another block (it can set up the
2615 same flag for previous CC0 users which is safe). */
2616 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
2618 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2619 sched_deps_info
->finish_rhs ();
2625 int regno
= REGNO (x
);
2626 machine_mode mode
= GET_MODE (x
);
2628 sched_analyze_reg (deps
, regno
, mode
, USE
, insn
);
2631 /* Treat all reads of a stack register as modifying the TOS. */
2632 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2634 /* Avoid analyzing the same register twice. */
2635 if (regno
!= FIRST_STACK_REG
)
2636 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
2637 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, SET
, insn
);
2641 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2642 sched_deps_info
->finish_rhs ();
2649 /* Reading memory. */
2651 rtx_insn_list
*pending
;
2652 rtx_expr_list
*pending_mem
;
2655 if (sched_deps_info
->use_cselib
)
2657 machine_mode address_mode
= get_address_mode (t
);
2659 t
= shallow_copy_rtx (t
);
2660 cselib_lookup_from_insn (XEXP (t
, 0), address_mode
, 1,
2661 GET_MODE (t
), insn
);
2663 = cselib_subst_to_values_from_insn (XEXP (t
, 0), GET_MODE (t
),
2667 if (!DEBUG_INSN_P (insn
))
2670 pending
= deps
->pending_read_insns
;
2671 pending_mem
= deps
->pending_read_mems
;
2674 if (read_dependence (pending_mem
->element (), t
)
2675 && ! sched_insns_conditions_mutex_p (insn
,
2677 note_mem_dep (t
, pending_mem
->element (),
2681 pending
= pending
->next ();
2682 pending_mem
= pending_mem
->next ();
2685 pending
= deps
->pending_write_insns
;
2686 pending_mem
= deps
->pending_write_mems
;
2689 if (true_dependence (pending_mem
->element (), VOIDmode
, t
)
2690 && ! sched_insns_conditions_mutex_p (insn
,
2692 note_mem_dep (t
, pending_mem
->element (),
2694 sched_deps_info
->generate_spec_deps
2695 ? BEGIN_DATA
| DEP_TRUE
: DEP_TRUE
);
2697 pending
= pending
->next ();
2698 pending_mem
= pending_mem
->next ();
2701 for (u
= deps
->last_pending_memory_flush
; u
; u
= u
->next ())
2702 add_dependence (insn
, u
->insn (), REG_DEP_ANTI
);
2704 for (u
= deps
->pending_jump_insns
; u
; u
= u
->next ())
2705 if (deps_may_trap_p (x
))
2707 if ((sched_deps_info
->generate_spec_deps
)
2708 && sel_sched_p () && (spec_info
->mask
& BEGIN_CONTROL
))
2710 ds_t ds
= set_dep_weak (DEP_ANTI
, BEGIN_CONTROL
,
2713 note_dep (u
->insn (), ds
);
2716 add_dependence (insn
, u
->insn (), REG_DEP_CONTROL
);
2720 /* Always add these dependencies to pending_reads, since
2721 this insn may be followed by a write. */
2722 if (!deps
->readonly
)
2724 if ((deps
->pending_read_list_length
2725 + deps
->pending_write_list_length
)
2726 >= MAX_PENDING_LIST_LENGTH
2727 && !DEBUG_INSN_P (insn
))
2728 flush_pending_lists (deps
, insn
, true, true);
2729 add_insn_mem_dependence (deps
, true, insn
, x
);
2732 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2734 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2735 sched_deps_info
->finish_rhs ();
2740 /* Force pending stores to memory in case a trap handler needs them. */
2742 flush_pending_lists (deps
, insn
, true, false);
2746 if (PREFETCH_SCHEDULE_BARRIER_P (x
))
2747 reg_pending_barrier
= TRUE_BARRIER
;
2748 /* Prefetch insn contains addresses only. So if the prefetch
2749 address has no registers, there will be no dependencies on
2750 the prefetch insn. This is wrong with result code
2751 correctness point of view as such prefetch can be moved below
2752 a jump insn which usually generates MOVE_BARRIER preventing
2753 to move insns containing registers or memories through the
2754 barrier. It is also wrong with generated code performance
2755 point of view as prefetch withouth dependecies will have a
2756 tendency to be issued later instead of earlier. It is hard
2757 to generate accurate dependencies for prefetch insns as
2758 prefetch has only the start address but it is better to have
2759 something than nothing. */
2760 if (!deps
->readonly
)
2762 rtx x
= gen_rtx_MEM (Pmode
, XEXP (PATTERN (insn
), 0));
2763 if (sched_deps_info
->use_cselib
)
2764 cselib_lookup_from_insn (x
, Pmode
, true, VOIDmode
, insn
);
2765 add_insn_mem_dependence (deps
, true, insn
, x
);
2769 case UNSPEC_VOLATILE
:
2770 flush_pending_lists (deps
, insn
, true, true);
2776 /* Traditional and volatile asm instructions must be considered to use
2777 and clobber all hard registers, all pseudo-registers and all of
2778 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2780 Consider for instance a volatile asm that changes the fpu rounding
2781 mode. An insn should not be moved across this even if it only uses
2782 pseudo-regs because it might give an incorrectly rounded result. */
2783 if ((code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
2784 && !DEBUG_INSN_P (insn
))
2785 reg_pending_barrier
= TRUE_BARRIER
;
2787 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2788 We can not just fall through here since then we would be confused
2789 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2790 traditional asms unlike their normal usage. */
2792 if (code
== ASM_OPERANDS
)
2794 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
2795 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
2797 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2798 sched_deps_info
->finish_rhs ();
2809 /* These both read and modify the result. We must handle them as writes
2810 to get proper dependencies for following instructions. We must handle
2811 them as reads to get proper dependencies from this to previous
2812 instructions. Thus we need to pass them to both sched_analyze_1
2813 and sched_analyze_2. We must call sched_analyze_2 first in order
2814 to get the proper antecedent for the read. */
2815 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2816 sched_analyze_1 (deps
, x
, insn
);
2818 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2819 sched_deps_info
->finish_rhs ();
2825 /* op0 = op0 + op1 */
2826 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2827 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
2828 sched_analyze_1 (deps
, x
, insn
);
2830 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2831 sched_deps_info
->finish_rhs ();
2839 /* Other cases: walk the insn. */
2840 fmt
= GET_RTX_FORMAT (code
);
2841 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2844 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
2845 else if (fmt
[i
] == 'E')
2846 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2847 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
2850 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2851 sched_deps_info
->finish_rhs ();
2854 /* Try to group two fusible insns together to prevent scheduler
2855 from scheduling them apart. */
2858 sched_macro_fuse_insns (rtx_insn
*insn
)
2862 if (any_condjump_p (insn
))
2864 unsigned int condreg1
, condreg2
;
2866 targetm
.fixed_condition_code_regs (&condreg1
, &condreg2
);
2867 cc_reg_1
= gen_rtx_REG (CCmode
, condreg1
);
2868 prev
= prev_nonnote_nondebug_insn (insn
);
2869 if (!reg_referenced_p (cc_reg_1
, PATTERN (insn
))
2871 || !modified_in_p (cc_reg_1
, prev
))
2876 rtx insn_set
= single_set (insn
);
2878 prev
= prev_nonnote_nondebug_insn (insn
);
2881 || !single_set (prev
))
2886 if (targetm
.sched
.macro_fusion_pair_p (prev
, insn
))
2887 SCHED_GROUP_P (insn
) = 1;
2891 /* Analyze an INSN with pattern X to find all dependencies. */
2893 sched_analyze_insn (struct deps_desc
*deps
, rtx x
, rtx_insn
*insn
)
2895 RTX_CODE code
= GET_CODE (x
);
2898 reg_set_iterator rsi
;
2900 if (! reload_completed
)
2904 extract_insn (insn
);
2905 preprocess_constraints (insn
);
2906 ira_implicitly_set_insn_hard_regs (&temp
);
2907 AND_COMPL_HARD_REG_SET (temp
, ira_no_alloc_regs
);
2908 IOR_HARD_REG_SET (implicit_reg_pending_clobbers
, temp
);
2911 can_start_lhs_rhs_p
= (NONJUMP_INSN_P (insn
)
2914 /* Group compare and branch insns for macro-fusion. */
2915 if (targetm
.sched
.macro_fusion_p
2916 && targetm
.sched
.macro_fusion_p ())
2917 sched_macro_fuse_insns (insn
);
2920 /* Avoid moving trapping instructions across function calls that might
2921 not always return. */
2922 add_dependence_list (insn
, deps
->last_function_call_may_noreturn
,
2923 1, REG_DEP_ANTI
, true);
2925 /* We must avoid creating a situation in which two successors of the
2926 current block have different unwind info after scheduling. If at any
2927 point the two paths re-join this leads to incorrect unwind info. */
2928 /* ??? There are certain situations involving a forced frame pointer in
2929 which, with extra effort, we could fix up the unwind info at a later
2930 CFG join. However, it seems better to notice these cases earlier
2931 during prologue generation and avoid marking the frame pointer setup
2932 as frame-related at all. */
2933 if (RTX_FRAME_RELATED_P (insn
))
2935 /* Make sure prologue insn is scheduled before next jump. */
2936 deps
->sched_before_next_jump
2937 = alloc_INSN_LIST (insn
, deps
->sched_before_next_jump
);
2939 /* Make sure epilogue insn is scheduled after preceding jumps. */
2940 add_dependence_list (insn
, deps
->pending_jump_insns
, 1, REG_DEP_ANTI
,
2944 if (code
== COND_EXEC
)
2946 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
2948 /* ??? Should be recording conditions so we reduce the number of
2949 false dependencies. */
2950 x
= COND_EXEC_CODE (x
);
2951 code
= GET_CODE (x
);
2953 if (code
== SET
|| code
== CLOBBER
)
2955 sched_analyze_1 (deps
, x
, insn
);
2957 /* Bare clobber insns are used for letting life analysis, reg-stack
2958 and others know that a value is dead. Depend on the last call
2959 instruction so that reg-stack won't get confused. */
2960 if (code
== CLOBBER
)
2961 add_dependence_list (insn
, deps
->last_function_call
, 1,
2962 REG_DEP_OUTPUT
, true);
2964 else if (code
== PARALLEL
)
2966 for (i
= XVECLEN (x
, 0); i
--;)
2968 rtx sub
= XVECEXP (x
, 0, i
);
2969 code
= GET_CODE (sub
);
2971 if (code
== COND_EXEC
)
2973 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
2974 sub
= COND_EXEC_CODE (sub
);
2975 code
= GET_CODE (sub
);
2977 if (code
== SET
|| code
== CLOBBER
)
2978 sched_analyze_1 (deps
, sub
, insn
);
2980 sched_analyze_2 (deps
, sub
, insn
);
2984 sched_analyze_2 (deps
, x
, insn
);
2986 /* Mark registers CLOBBERED or used by called function. */
2989 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2991 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
2992 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
2993 else if (GET_CODE (XEXP (link
, 0)) != SET
)
2994 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
2996 /* Don't schedule anything after a tail call, tail call needs
2997 to use at least all call-saved registers. */
2998 if (SIBLING_CALL_P (insn
))
2999 reg_pending_barrier
= TRUE_BARRIER
;
3000 else if (find_reg_note (insn
, REG_SETJMP
, NULL
))
3001 reg_pending_barrier
= MOVE_BARRIER
;
3007 next
= next_nonnote_nondebug_insn (insn
);
3008 if (next
&& BARRIER_P (next
))
3009 reg_pending_barrier
= MOVE_BARRIER
;
3012 rtx_insn_list
*pending
;
3013 rtx_expr_list
*pending_mem
;
3015 if (sched_deps_info
->compute_jump_reg_dependencies
)
3017 (*sched_deps_info
->compute_jump_reg_dependencies
)
3018 (insn
, reg_pending_control_uses
);
3020 /* Make latency of jump equal to 0 by using anti-dependence. */
3021 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses
, 0, i
, rsi
)
3023 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3024 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
,
3026 add_dependence_list (insn
, reg_last
->implicit_sets
,
3027 0, REG_DEP_ANTI
, false);
3028 add_dependence_list (insn
, reg_last
->clobbers
, 0,
3029 REG_DEP_ANTI
, false);
3033 /* All memory writes and volatile reads must happen before the
3034 jump. Non-volatile reads must happen before the jump iff
3035 the result is needed by the above register used mask. */
3037 pending
= deps
->pending_write_insns
;
3038 pending_mem
= deps
->pending_write_mems
;
3041 if (! sched_insns_conditions_mutex_p (insn
, pending
->insn ()))
3042 add_dependence (insn
, pending
->insn (),
3044 pending
= pending
->next ();
3045 pending_mem
= pending_mem
->next ();
3048 pending
= deps
->pending_read_insns
;
3049 pending_mem
= deps
->pending_read_mems
;
3052 if (MEM_VOLATILE_P (pending_mem
->element ())
3053 && ! sched_insns_conditions_mutex_p (insn
, pending
->insn ()))
3054 add_dependence (insn
, pending
->insn (),
3056 pending
= pending
->next ();
3057 pending_mem
= pending_mem
->next ();
3060 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
3061 REG_DEP_ANTI
, true);
3062 add_dependence_list (insn
, deps
->pending_jump_insns
, 1,
3063 REG_DEP_ANTI
, true);
3067 /* If this instruction can throw an exception, then moving it changes
3068 where block boundaries fall. This is mighty confusing elsewhere.
3069 Therefore, prevent such an instruction from being moved. Same for
3070 non-jump instructions that define block boundaries.
3071 ??? Unclear whether this is still necessary in EBB mode. If not,
3072 add_branch_dependences should be adjusted for RGN mode instead. */
3073 if (((CALL_P (insn
) || JUMP_P (insn
)) && can_throw_internal (insn
))
3074 || (NONJUMP_INSN_P (insn
) && control_flow_insn_p (insn
)))
3075 reg_pending_barrier
= MOVE_BARRIER
;
3077 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
3079 setup_insn_reg_uses (deps
, insn
);
3080 init_insn_reg_pressure_info (insn
);
3083 /* Add register dependencies for insn. */
3084 if (DEBUG_INSN_P (insn
))
3086 rtx_insn
*prev
= deps
->last_debug_insn
;
3089 if (!deps
->readonly
)
3090 deps
->last_debug_insn
= insn
;
3093 add_dependence (insn
, prev
, REG_DEP_ANTI
);
3095 add_dependence_list (insn
, deps
->last_function_call
, 1,
3096 REG_DEP_ANTI
, false);
3098 if (!sel_sched_p ())
3099 for (u
= deps
->last_pending_memory_flush
; u
; u
= u
->next ())
3100 add_dependence (insn
, u
->insn (), REG_DEP_ANTI
);
3102 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
3104 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3105 add_dependence_list (insn
, reg_last
->sets
, 1, REG_DEP_ANTI
, false);
3106 /* There's no point in making REG_DEP_CONTROL dependencies for
3108 add_dependence_list (insn
, reg_last
->clobbers
, 1, REG_DEP_ANTI
,
3111 if (!deps
->readonly
)
3112 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3114 CLEAR_REG_SET (reg_pending_uses
);
3116 /* Quite often, a debug insn will refer to stuff in the
3117 previous instruction, but the reason we want this
3118 dependency here is to make sure the scheduler doesn't
3119 gratuitously move a debug insn ahead. This could dirty
3120 DF flags and cause additional analysis that wouldn't have
3121 occurred in compilation without debug insns, and such
3122 additional analysis can modify the generated code. */
3123 prev
= PREV_INSN (insn
);
3125 if (prev
&& NONDEBUG_INSN_P (prev
))
3126 add_dependence (insn
, prev
, REG_DEP_ANTI
);
3130 regset_head set_or_clobbered
;
3132 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
3134 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3135 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
, false);
3136 add_dependence_list (insn
, reg_last
->implicit_sets
, 0, REG_DEP_ANTI
,
3138 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
,
3141 if (!deps
->readonly
)
3143 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3144 reg_last
->uses_length
++;
3148 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3149 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
))
3151 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3152 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
, false);
3153 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3154 REG_DEP_ANTI
, false);
3155 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
,
3158 if (!deps
->readonly
)
3160 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3161 reg_last
->uses_length
++;
3165 if (targetm
.sched
.exposed_pipeline
)
3167 INIT_REG_SET (&set_or_clobbered
);
3168 bitmap_ior (&set_or_clobbered
, reg_pending_clobbers
,
3170 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered
, 0, i
, rsi
)
3172 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3174 for (list
= reg_last
->uses
; list
; list
= XEXP (list
, 1))
3176 rtx other
= XEXP (list
, 0);
3177 if (INSN_CACHED_COND (other
) != const_true_rtx
3178 && refers_to_regno_p (i
, INSN_CACHED_COND (other
)))
3179 INSN_CACHED_COND (other
) = const_true_rtx
;
3184 /* If the current insn is conditional, we can't free any
3186 if (sched_has_condition_p (insn
))
3188 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
3190 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3191 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3193 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3194 REG_DEP_ANTI
, false);
3195 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3197 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3198 REG_DEP_CONTROL
, false);
3200 if (!deps
->readonly
)
3203 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
3204 reg_last
->clobbers_length
++;
3207 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
3209 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3210 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3212 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3213 REG_DEP_ANTI
, false);
3214 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_OUTPUT
,
3216 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3218 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3219 REG_DEP_CONTROL
, false);
3221 if (!deps
->readonly
)
3222 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3227 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
3229 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3230 if (reg_last
->uses_length
>= MAX_PENDING_LIST_LENGTH
3231 || reg_last
->clobbers_length
>= MAX_PENDING_LIST_LENGTH
)
3233 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3234 REG_DEP_OUTPUT
, false);
3235 add_dependence_list_and_free (deps
, insn
,
3236 ®_last
->implicit_sets
, 0,
3237 REG_DEP_ANTI
, false);
3238 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3239 REG_DEP_ANTI
, false);
3240 add_dependence_list_and_free (deps
, insn
,
3241 ®_last
->control_uses
, 0,
3242 REG_DEP_ANTI
, false);
3243 add_dependence_list_and_free (deps
, insn
,
3244 ®_last
->clobbers
, 0,
3245 REG_DEP_OUTPUT
, false);
3247 if (!deps
->readonly
)
3249 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3250 reg_last
->clobbers_length
= 0;
3251 reg_last
->uses_length
= 0;
3256 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3258 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3259 REG_DEP_ANTI
, false);
3260 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3262 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3263 REG_DEP_CONTROL
, false);
3266 if (!deps
->readonly
)
3268 reg_last
->clobbers_length
++;
3270 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
3273 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
3275 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3277 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3278 REG_DEP_OUTPUT
, false);
3279 add_dependence_list_and_free (deps
, insn
,
3280 ®_last
->implicit_sets
,
3281 0, REG_DEP_ANTI
, false);
3282 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
3283 REG_DEP_OUTPUT
, false);
3284 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3285 REG_DEP_ANTI
, false);
3286 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3287 REG_DEP_CONTROL
, false);
3289 if (!deps
->readonly
)
3291 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3292 reg_last
->uses_length
= 0;
3293 reg_last
->clobbers_length
= 0;
3297 if (!deps
->readonly
)
3299 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses
, 0, i
, rsi
)
3301 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3302 reg_last
->control_uses
3303 = alloc_INSN_LIST (insn
, reg_last
->control_uses
);
3308 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3309 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
3311 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3312 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
, false);
3313 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_ANTI
, false);
3314 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
, false);
3315 add_dependence_list (insn
, reg_last
->control_uses
, 0, REG_DEP_ANTI
,
3318 if (!deps
->readonly
)
3319 reg_last
->implicit_sets
3320 = alloc_INSN_LIST (insn
, reg_last
->implicit_sets
);
3323 if (!deps
->readonly
)
3325 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
3326 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
3327 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
3328 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3329 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
)
3330 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
3331 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3333 /* Set up the pending barrier found. */
3334 deps
->last_reg_pending_barrier
= reg_pending_barrier
;
3337 CLEAR_REG_SET (reg_pending_uses
);
3338 CLEAR_REG_SET (reg_pending_clobbers
);
3339 CLEAR_REG_SET (reg_pending_sets
);
3340 CLEAR_REG_SET (reg_pending_control_uses
);
3341 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
3342 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
3344 /* Add dependencies if a scheduling barrier was found. */
3345 if (reg_pending_barrier
)
3347 /* In the case of barrier the most added dependencies are not
3348 real, so we use anti-dependence here. */
3349 if (sched_has_condition_p (insn
))
3351 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3353 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3354 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3356 add_dependence_list (insn
, reg_last
->sets
, 0,
3357 reg_pending_barrier
== TRUE_BARRIER
3358 ? REG_DEP_TRUE
: REG_DEP_ANTI
, true);
3359 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3360 REG_DEP_ANTI
, true);
3361 add_dependence_list (insn
, reg_last
->clobbers
, 0,
3362 reg_pending_barrier
== TRUE_BARRIER
3363 ? REG_DEP_TRUE
: REG_DEP_ANTI
, true);
3368 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3370 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3371 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3372 REG_DEP_ANTI
, true);
3373 add_dependence_list_and_free (deps
, insn
,
3374 ®_last
->control_uses
, 0,
3375 REG_DEP_CONTROL
, true);
3376 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3377 reg_pending_barrier
== TRUE_BARRIER
3378 ? REG_DEP_TRUE
: REG_DEP_ANTI
,
3380 add_dependence_list_and_free (deps
, insn
,
3381 ®_last
->implicit_sets
, 0,
3382 REG_DEP_ANTI
, true);
3383 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
3384 reg_pending_barrier
== TRUE_BARRIER
3385 ? REG_DEP_TRUE
: REG_DEP_ANTI
,
3388 if (!deps
->readonly
)
3390 reg_last
->uses_length
= 0;
3391 reg_last
->clobbers_length
= 0;
3396 if (!deps
->readonly
)
3397 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
3399 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3400 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3401 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3404 /* Don't flush pending lists on speculative checks for
3405 selective scheduling. */
3406 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn
))
3407 flush_pending_lists (deps
, insn
, true, true);
3409 reg_pending_barrier
= NOT_A_BARRIER
;
3412 /* If a post-call group is still open, see if it should remain so.
3413 This insn must be a simple move of a hard reg to a pseudo or
3416 We must avoid moving these insns for correctness on targets
3417 with small register classes, and for special registers like
3418 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3419 hard regs for all targets. */
3421 if (deps
->in_post_call_group_p
)
3423 rtx tmp
, set
= single_set (insn
);
3424 int src_regno
, dest_regno
;
3428 if (DEBUG_INSN_P (insn
))
3429 /* We don't want to mark debug insns as part of the same
3430 sched group. We know they really aren't, but if we use
3431 debug insns to tell that a call group is over, we'll
3432 get different code if debug insns are not there and
3433 instructions that follow seem like they should be part
3436 Also, if we did, chain_to_prev_insn would move the
3437 deps of the debug insn to the call insn, modifying
3438 non-debug post-dependency counts of the debug insn
3439 dependencies and otherwise messing with the scheduling
3442 Instead, let such debug insns be scheduled freely, but
3443 keep the call group open in case there are insns that
3444 should be part of it afterwards. Since we grant debug
3445 insns higher priority than even sched group insns, it
3446 will all turn out all right. */
3447 goto debug_dont_end_call_group
;
3449 goto end_call_group
;
3452 tmp
= SET_DEST (set
);
3453 if (GET_CODE (tmp
) == SUBREG
)
3454 tmp
= SUBREG_REG (tmp
);
3456 dest_regno
= REGNO (tmp
);
3458 goto end_call_group
;
3460 tmp
= SET_SRC (set
);
3461 if (GET_CODE (tmp
) == SUBREG
)
3462 tmp
= SUBREG_REG (tmp
);
3463 if ((GET_CODE (tmp
) == PLUS
3464 || GET_CODE (tmp
) == MINUS
)
3465 && REG_P (XEXP (tmp
, 0))
3466 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
3467 && dest_regno
== STACK_POINTER_REGNUM
)
3468 src_regno
= STACK_POINTER_REGNUM
;
3469 else if (REG_P (tmp
))
3470 src_regno
= REGNO (tmp
);
3472 goto end_call_group
;
3474 if (src_regno
< FIRST_PSEUDO_REGISTER
3475 || dest_regno
< FIRST_PSEUDO_REGISTER
)
3478 && deps
->in_post_call_group_p
== post_call_initial
)
3479 deps
->in_post_call_group_p
= post_call
;
3481 if (!sel_sched_p () || sched_emulate_haifa_p
)
3483 SCHED_GROUP_P (insn
) = 1;
3484 CANT_MOVE (insn
) = 1;
3490 if (!deps
->readonly
)
3491 deps
->in_post_call_group_p
= not_post_call
;
3495 debug_dont_end_call_group
:
3496 if ((current_sched_info
->flags
& DO_SPECULATION
)
3497 && !sched_insn_is_legitimate_for_speculation_p (insn
, 0))
3498 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3502 sel_mark_hard_insn (insn
);
3505 sd_iterator_def sd_it
;
3508 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
3509 sd_iterator_cond (&sd_it
, &dep
);)
3510 change_spec_dep_to_hard (sd_it
);
3514 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3515 honor their original ordering. */
3516 if (find_reg_note (insn
, REG_ARGS_SIZE
, NULL
))
3518 if (deps
->last_args_size
)
3519 add_dependence (insn
, deps
->last_args_size
, REG_DEP_OUTPUT
);
3520 deps
->last_args_size
= insn
;
3524 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3525 longjmp, loop forever, ...). */
3526 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3527 test for ECF_NORETURN? */
3529 call_may_noreturn_p (rtx_insn
*insn
)
3533 /* const or pure calls that aren't looping will always return. */
3534 if (RTL_CONST_OR_PURE_CALL_P (insn
)
3535 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn
))
3538 call
= get_call_rtx_from (insn
);
3539 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
3541 rtx symbol
= XEXP (XEXP (call
, 0), 0);
3542 if (SYMBOL_REF_DECL (symbol
)
3543 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
3545 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
3547 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
)))
3550 case BUILT_IN_BCOPY
:
3551 case BUILT_IN_BZERO
:
3552 case BUILT_IN_INDEX
:
3553 case BUILT_IN_MEMCHR
:
3554 case BUILT_IN_MEMCMP
:
3555 case BUILT_IN_MEMCPY
:
3556 case BUILT_IN_MEMMOVE
:
3557 case BUILT_IN_MEMPCPY
:
3558 case BUILT_IN_MEMSET
:
3559 case BUILT_IN_RINDEX
:
3560 case BUILT_IN_STPCPY
:
3561 case BUILT_IN_STPNCPY
:
3562 case BUILT_IN_STRCAT
:
3563 case BUILT_IN_STRCHR
:
3564 case BUILT_IN_STRCMP
:
3565 case BUILT_IN_STRCPY
:
3566 case BUILT_IN_STRCSPN
:
3567 case BUILT_IN_STRLEN
:
3568 case BUILT_IN_STRNCAT
:
3569 case BUILT_IN_STRNCMP
:
3570 case BUILT_IN_STRNCPY
:
3571 case BUILT_IN_STRPBRK
:
3572 case BUILT_IN_STRRCHR
:
3573 case BUILT_IN_STRSPN
:
3574 case BUILT_IN_STRSTR
:
3575 /* Assume certain string/memory builtins always return. */
3583 /* For all other calls assume that they might not always return. */
3587 /* Return true if INSN should be made dependent on the previous instruction
3588 group, and if all INSN's dependencies should be moved to the first
3589 instruction of that group. */
3592 chain_to_prev_insn_p (rtx_insn
*insn
)
3596 /* INSN forms a group with the previous instruction. */
3597 if (SCHED_GROUP_P (insn
))
3600 /* If the previous instruction clobbers a register R and this one sets
3601 part of R, the clobber was added specifically to help us track the
3602 liveness of R. There's no point scheduling the clobber and leaving
3603 INSN behind, especially if we move the clobber to another block. */
3604 prev
= prev_nonnote_nondebug_insn (insn
);
3607 && BLOCK_FOR_INSN (prev
) == BLOCK_FOR_INSN (insn
)
3608 && GET_CODE (PATTERN (prev
)) == CLOBBER
)
3610 x
= XEXP (PATTERN (prev
), 0);
3611 if (set_of (x
, insn
))
3618 /* Analyze INSN with DEPS as a context. */
3620 deps_analyze_insn (struct deps_desc
*deps
, rtx_insn
*insn
)
3622 if (sched_deps_info
->start_insn
)
3623 sched_deps_info
->start_insn (insn
);
3625 /* Record the condition for this insn. */
3626 if (NONDEBUG_INSN_P (insn
))
3629 sched_get_condition_with_rev (insn
, NULL
);
3630 t
= INSN_CACHED_COND (insn
);
3631 INSN_COND_DEPS (insn
) = NULL
;
3632 if (reload_completed
3633 && (current_sched_info
->flags
& DO_PREDICATION
)
3635 && REG_P (XEXP (t
, 0))
3636 && CONSTANT_P (XEXP (t
, 1)))
3640 rtx_insn_list
*cond_deps
= NULL
;
3643 nregs
= REG_NREGS (t
);
3646 struct deps_reg
*reg_last
= &deps
->reg_last
[regno
+ nregs
];
3647 cond_deps
= concat_INSN_LIST (reg_last
->sets
, cond_deps
);
3648 cond_deps
= concat_INSN_LIST (reg_last
->clobbers
, cond_deps
);
3649 cond_deps
= concat_INSN_LIST (reg_last
->implicit_sets
, cond_deps
);
3651 INSN_COND_DEPS (insn
) = cond_deps
;
3657 /* Make each JUMP_INSN (but not a speculative check)
3658 a scheduling barrier for memory references. */
3661 && sel_insn_is_speculation_check (insn
)))
3663 /* Keep the list a reasonable size. */
3664 if (deps
->pending_flush_length
++ >= MAX_PENDING_LIST_LENGTH
)
3665 flush_pending_lists (deps
, insn
, true, true);
3667 deps
->pending_jump_insns
3668 = alloc_INSN_LIST (insn
, deps
->pending_jump_insns
);
3671 /* For each insn which shouldn't cross a jump, add a dependence. */
3672 add_dependence_list_and_free (deps
, insn
,
3673 &deps
->sched_before_next_jump
, 1,
3674 REG_DEP_ANTI
, true);
3676 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3678 else if (NONJUMP_INSN_P (insn
) || DEBUG_INSN_P (insn
))
3680 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3682 else if (CALL_P (insn
))
3686 CANT_MOVE (insn
) = 1;
3688 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
3690 /* This is setjmp. Assume that all registers, not just
3691 hard registers, may be clobbered by this call. */
3692 reg_pending_barrier
= MOVE_BARRIER
;
3696 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3697 /* A call may read and modify global register variables. */
3700 SET_REGNO_REG_SET (reg_pending_sets
, i
);
3701 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3703 /* Other call-clobbered hard regs may be clobbered.
3704 Since we only have a choice between 'might be clobbered'
3705 and 'definitely not clobbered', we must include all
3706 partly call-clobbered registers here. */
3707 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
3708 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
3709 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
3710 /* We don't know what set of fixed registers might be used
3711 by the function, but it is certain that the stack pointer
3712 is among them, but be conservative. */
3713 else if (fixed_regs
[i
])
3714 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3715 /* The frame pointer is normally not used by the function
3716 itself, but by the debugger. */
3717 /* ??? MIPS o32 is an exception. It uses the frame pointer
3718 in the macro expansion of jal but does not represent this
3719 fact in the call_insn rtl. */
3720 else if (i
== FRAME_POINTER_REGNUM
3721 || (i
== HARD_FRAME_POINTER_REGNUM
3722 && (! reload_completed
|| frame_pointer_needed
)))
3723 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3726 /* For each insn which shouldn't cross a call, add a dependence
3727 between that insn and this call insn. */
3728 add_dependence_list_and_free (deps
, insn
,
3729 &deps
->sched_before_next_call
, 1,
3730 REG_DEP_ANTI
, true);
3732 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3734 /* If CALL would be in a sched group, then this will violate
3735 convention that sched group insns have dependencies only on the
3736 previous instruction.
3738 Of course one can say: "Hey! What about head of the sched group?"
3739 And I will answer: "Basic principles (one dep per insn) are always
3741 gcc_assert (!SCHED_GROUP_P (insn
));
3743 /* In the absence of interprocedural alias analysis, we must flush
3744 all pending reads and writes, and start new dependencies starting
3745 from here. But only flush writes for constant calls (which may
3746 be passed a pointer to something we haven't written yet). */
3747 flush_pending_lists (deps
, insn
, true, ! RTL_CONST_OR_PURE_CALL_P (insn
));
3749 if (!deps
->readonly
)
3751 /* Remember the last function call for limiting lifetimes. */
3752 free_INSN_LIST_list (&deps
->last_function_call
);
3753 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
3755 if (call_may_noreturn_p (insn
))
3757 /* Remember the last function call that might not always return
3758 normally for limiting moves of trapping insns. */
3759 free_INSN_LIST_list (&deps
->last_function_call_may_noreturn
);
3760 deps
->last_function_call_may_noreturn
3761 = alloc_INSN_LIST (insn
, NULL_RTX
);
3764 /* Before reload, begin a post-call group, so as to keep the
3765 lifetimes of hard registers correct. */
3766 if (! reload_completed
)
3767 deps
->in_post_call_group_p
= post_call
;
3771 if (sched_deps_info
->use_cselib
)
3772 cselib_process_insn (insn
);
3774 if (sched_deps_info
->finish_insn
)
3775 sched_deps_info
->finish_insn ();
3777 /* Fixup the dependencies in the sched group. */
3778 if ((NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
3779 && chain_to_prev_insn_p (insn
)
3781 chain_to_prev_insn (insn
);
3784 /* Initialize DEPS for the new block beginning with HEAD. */
3786 deps_start_bb (struct deps_desc
*deps
, rtx_insn
*head
)
3788 gcc_assert (!deps
->readonly
);
3790 /* Before reload, if the previous block ended in a call, show that
3791 we are inside a post-call group, so as to keep the lifetimes of
3792 hard registers correct. */
3793 if (! reload_completed
&& !LABEL_P (head
))
3795 rtx_insn
*insn
= prev_nonnote_nondebug_insn (head
);
3797 if (insn
&& CALL_P (insn
))
3798 deps
->in_post_call_group_p
= post_call_initial
;
3802 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3803 dependencies for each insn. */
3805 sched_analyze (struct deps_desc
*deps
, rtx_insn
*head
, rtx_insn
*tail
)
3809 if (sched_deps_info
->use_cselib
)
3810 cselib_init (CSELIB_RECORD_MEMORY
);
3812 deps_start_bb (deps
, head
);
3814 for (insn
= head
;; insn
= NEXT_INSN (insn
))
3819 /* And initialize deps_lists. */
3820 sd_init_insn (insn
);
3821 /* Clean up SCHED_GROUP_P which may be set by last
3823 if (SCHED_GROUP_P (insn
))
3824 SCHED_GROUP_P (insn
) = 0;
3827 deps_analyze_insn (deps
, insn
);
3831 if (sched_deps_info
->use_cselib
)
3839 /* Helper for sched_free_deps ().
3840 Delete INSN's (RESOLVED_P) backward dependencies. */
3842 delete_dep_nodes_in_back_deps (rtx_insn
*insn
, bool resolved_p
)
3844 sd_iterator_def sd_it
;
3846 sd_list_types_def types
;
3849 types
= SD_LIST_RES_BACK
;
3851 types
= SD_LIST_BACK
;
3853 for (sd_it
= sd_iterator_start (insn
, types
);
3854 sd_iterator_cond (&sd_it
, &dep
);)
3856 dep_link_t link
= *sd_it
.linkp
;
3857 dep_node_t node
= DEP_LINK_NODE (link
);
3858 deps_list_t back_list
;
3859 deps_list_t forw_list
;
3861 get_back_and_forw_lists (dep
, resolved_p
, &back_list
, &forw_list
);
3862 remove_from_deps_list (link
, back_list
);
3863 delete_dep_node (node
);
3867 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3870 sched_free_deps (rtx_insn
*head
, rtx_insn
*tail
, bool resolved_p
)
3873 rtx_insn
*next_tail
= NEXT_INSN (tail
);
3875 /* We make two passes since some insns may be scheduled before their
3876 dependencies are resolved. */
3877 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3878 if (INSN_P (insn
) && INSN_LUID (insn
) > 0)
3880 /* Clear forward deps and leave the dep_nodes to the
3881 corresponding back_deps list. */
3883 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
3885 clear_deps_list (INSN_FORW_DEPS (insn
));
3887 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3888 if (INSN_P (insn
) && INSN_LUID (insn
) > 0)
3890 /* Clear resolved back deps together with its dep_nodes. */
3891 delete_dep_nodes_in_back_deps (insn
, resolved_p
);
3893 sd_finish_insn (insn
);
3897 /* Initialize variables for region data dependence analysis.
3898 When LAZY_REG_LAST is true, do not allocate reg_last array
3899 of struct deps_desc immediately. */
3902 init_deps (struct deps_desc
*deps
, bool lazy_reg_last
)
3904 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
3906 deps
->max_reg
= max_reg
;
3908 deps
->reg_last
= NULL
;
3910 deps
->reg_last
= XCNEWVEC (struct deps_reg
, max_reg
);
3911 INIT_REG_SET (&deps
->reg_last_in_use
);
3913 deps
->pending_read_insns
= 0;
3914 deps
->pending_read_mems
= 0;
3915 deps
->pending_write_insns
= 0;
3916 deps
->pending_write_mems
= 0;
3917 deps
->pending_jump_insns
= 0;
3918 deps
->pending_read_list_length
= 0;
3919 deps
->pending_write_list_length
= 0;
3920 deps
->pending_flush_length
= 0;
3921 deps
->last_pending_memory_flush
= 0;
3922 deps
->last_function_call
= 0;
3923 deps
->last_function_call_may_noreturn
= 0;
3924 deps
->sched_before_next_call
= 0;
3925 deps
->sched_before_next_jump
= 0;
3926 deps
->in_post_call_group_p
= not_post_call
;
3927 deps
->last_debug_insn
= 0;
3928 deps
->last_args_size
= 0;
3929 deps
->last_reg_pending_barrier
= NOT_A_BARRIER
;
3933 /* Init only reg_last field of DEPS, which was not allocated before as
3934 we inited DEPS lazily. */
3936 init_deps_reg_last (struct deps_desc
*deps
)
3938 gcc_assert (deps
&& deps
->max_reg
> 0);
3939 gcc_assert (deps
->reg_last
== NULL
);
3941 deps
->reg_last
= XCNEWVEC (struct deps_reg
, deps
->max_reg
);
3945 /* Free insn lists found in DEPS. */
3948 free_deps (struct deps_desc
*deps
)
3951 reg_set_iterator rsi
;
3953 /* We set max_reg to 0 when this context was already freed. */
3954 if (deps
->max_reg
== 0)
3956 gcc_assert (deps
->reg_last
== NULL
);
3961 free_INSN_LIST_list (&deps
->pending_read_insns
);
3962 free_EXPR_LIST_list (&deps
->pending_read_mems
);
3963 free_INSN_LIST_list (&deps
->pending_write_insns
);
3964 free_EXPR_LIST_list (&deps
->pending_write_mems
);
3965 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
3967 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3968 times. For a testcase with 42000 regs and 8000 small basic blocks,
3969 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3970 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3972 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3974 free_INSN_LIST_list (®_last
->uses
);
3976 free_INSN_LIST_list (®_last
->sets
);
3977 if (reg_last
->implicit_sets
)
3978 free_INSN_LIST_list (®_last
->implicit_sets
);
3979 if (reg_last
->control_uses
)
3980 free_INSN_LIST_list (®_last
->control_uses
);
3981 if (reg_last
->clobbers
)
3982 free_INSN_LIST_list (®_last
->clobbers
);
3984 CLEAR_REG_SET (&deps
->reg_last_in_use
);
3986 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3988 free (deps
->reg_last
);
3989 deps
->reg_last
= NULL
;
3994 /* Remove INSN from dependence contexts DEPS. */
3996 remove_from_deps (struct deps_desc
*deps
, rtx_insn
*insn
)
4000 reg_set_iterator rsi
;
4002 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_read_insns
,
4003 &deps
->pending_read_mems
);
4004 if (!DEBUG_INSN_P (insn
))
4005 deps
->pending_read_list_length
-= removed
;
4006 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_write_insns
,
4007 &deps
->pending_write_mems
);
4008 deps
->pending_write_list_length
-= removed
;
4010 removed
= remove_from_dependence_list (insn
, &deps
->pending_jump_insns
);
4011 deps
->pending_flush_length
-= removed
;
4012 removed
= remove_from_dependence_list (insn
, &deps
->last_pending_memory_flush
);
4013 deps
->pending_flush_length
-= removed
;
4015 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
4017 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
4019 remove_from_dependence_list (insn
, ®_last
->uses
);
4021 remove_from_dependence_list (insn
, ®_last
->sets
);
4022 if (reg_last
->implicit_sets
)
4023 remove_from_dependence_list (insn
, ®_last
->implicit_sets
);
4024 if (reg_last
->clobbers
)
4025 remove_from_dependence_list (insn
, ®_last
->clobbers
);
4026 if (!reg_last
->uses
&& !reg_last
->sets
&& !reg_last
->implicit_sets
4027 && !reg_last
->clobbers
)
4028 CLEAR_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
4033 remove_from_dependence_list (insn
, &deps
->last_function_call
);
4034 remove_from_dependence_list (insn
,
4035 &deps
->last_function_call_may_noreturn
);
4037 remove_from_dependence_list (insn
, &deps
->sched_before_next_call
);
4040 /* Init deps data vector. */
4042 init_deps_data_vector (void)
4044 int reserve
= (sched_max_luid
+ 1 - h_d_i_d
.length ());
4045 if (reserve
> 0 && ! h_d_i_d
.space (reserve
))
4046 h_d_i_d
.safe_grow_cleared (3 * sched_max_luid
/ 2);
4049 /* If it is profitable to use them, initialize or extend (depending on
4050 GLOBAL_P) dependency data. */
4052 sched_deps_init (bool global_p
)
4054 /* Average number of insns in the basic block.
4055 '+ 1' is used to make it nonzero. */
4056 int insns_in_block
= sched_max_luid
/ n_basic_blocks_for_fn (cfun
) + 1;
4058 init_deps_data_vector ();
4060 /* We use another caching mechanism for selective scheduling, so
4061 we don't use this one. */
4062 if (!sel_sched_p () && global_p
&& insns_in_block
> 100 * 5)
4064 /* ?!? We could save some memory by computing a per-region luid mapping
4065 which could reduce both the number of vectors in the cache and the
4066 size of each vector. Instead we just avoid the cache entirely unless
4067 the average number of instructions in a basic block is very high. See
4068 the comment before the declaration of true_dependency_cache for
4069 what we consider "very high". */
4071 extend_dependency_caches (sched_max_luid
, true);
4076 dl_pool
= new pool_allocator
<_deps_list
> ("deps_list",
4077 /* Allocate lists for one block at a time. */
4079 dn_pool
= new pool_allocator
<_dep_node
> ("dep_node",
4080 /* Allocate nodes for one block at a time.
4081 We assume that average insn has
4083 5 * insns_in_block
);
4088 /* Create or extend (depending on CREATE_P) dependency caches to
4091 extend_dependency_caches (int n
, bool create_p
)
4093 if (create_p
|| true_dependency_cache
)
4095 int i
, luid
= cache_size
+ n
;
4097 true_dependency_cache
= XRESIZEVEC (bitmap_head
, true_dependency_cache
,
4099 output_dependency_cache
= XRESIZEVEC (bitmap_head
,
4100 output_dependency_cache
, luid
);
4101 anti_dependency_cache
= XRESIZEVEC (bitmap_head
, anti_dependency_cache
,
4103 control_dependency_cache
= XRESIZEVEC (bitmap_head
, control_dependency_cache
,
4106 if (current_sched_info
->flags
& DO_SPECULATION
)
4107 spec_dependency_cache
= XRESIZEVEC (bitmap_head
, spec_dependency_cache
,
4110 for (i
= cache_size
; i
< luid
; i
++)
4112 bitmap_initialize (&true_dependency_cache
[i
], 0);
4113 bitmap_initialize (&output_dependency_cache
[i
], 0);
4114 bitmap_initialize (&anti_dependency_cache
[i
], 0);
4115 bitmap_initialize (&control_dependency_cache
[i
], 0);
4117 if (current_sched_info
->flags
& DO_SPECULATION
)
4118 bitmap_initialize (&spec_dependency_cache
[i
], 0);
4124 /* Finalize dependency information for the whole function. */
4126 sched_deps_finish (void)
4128 gcc_assert (deps_pools_are_empty_p ());
4129 dn_pool
->release_if_empty ();
4131 dl_pool
->release_if_empty ();
4137 if (true_dependency_cache
)
4141 for (i
= 0; i
< cache_size
; i
++)
4143 bitmap_clear (&true_dependency_cache
[i
]);
4144 bitmap_clear (&output_dependency_cache
[i
]);
4145 bitmap_clear (&anti_dependency_cache
[i
]);
4146 bitmap_clear (&control_dependency_cache
[i
]);
4148 if (sched_deps_info
->generate_spec_deps
)
4149 bitmap_clear (&spec_dependency_cache
[i
]);
4151 free (true_dependency_cache
);
4152 true_dependency_cache
= NULL
;
4153 free (output_dependency_cache
);
4154 output_dependency_cache
= NULL
;
4155 free (anti_dependency_cache
);
4156 anti_dependency_cache
= NULL
;
4157 free (control_dependency_cache
);
4158 control_dependency_cache
= NULL
;
4160 if (sched_deps_info
->generate_spec_deps
)
4162 free (spec_dependency_cache
);
4163 spec_dependency_cache
= NULL
;
4169 /* Initialize some global variables needed by the dependency analysis
4173 init_deps_global (void)
4175 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
4176 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
4177 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
4178 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
4179 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
4180 reg_pending_control_uses
= ALLOC_REG_SET (®_obstack
);
4181 reg_pending_barrier
= NOT_A_BARRIER
;
4183 if (!sel_sched_p () || sched_emulate_haifa_p
)
4185 sched_deps_info
->start_insn
= haifa_start_insn
;
4186 sched_deps_info
->finish_insn
= haifa_finish_insn
;
4188 sched_deps_info
->note_reg_set
= haifa_note_reg_set
;
4189 sched_deps_info
->note_reg_clobber
= haifa_note_reg_clobber
;
4190 sched_deps_info
->note_reg_use
= haifa_note_reg_use
;
4192 sched_deps_info
->note_mem_dep
= haifa_note_mem_dep
;
4193 sched_deps_info
->note_dep
= haifa_note_dep
;
4197 /* Free everything used by the dependency analysis code. */
4200 finish_deps_global (void)
4202 FREE_REG_SET (reg_pending_sets
);
4203 FREE_REG_SET (reg_pending_clobbers
);
4204 FREE_REG_SET (reg_pending_uses
);
4205 FREE_REG_SET (reg_pending_control_uses
);
4208 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4210 estimate_dep_weak (rtx mem1
, rtx mem2
)
4215 /* MEMs are the same - don't speculate. */
4216 return MIN_DEP_WEAK
;
4218 r1
= XEXP (mem1
, 0);
4219 r2
= XEXP (mem2
, 0);
4222 || (REG_P (r1
) && REG_P (r2
)
4223 && REGNO (r1
) == REGNO (r2
)))
4224 /* Again, MEMs are the same. */
4225 return MIN_DEP_WEAK
;
4226 else if ((REG_P (r1
) && !REG_P (r2
))
4227 || (!REG_P (r1
) && REG_P (r2
)))
4228 /* Different addressing modes - reason to be more speculative,
4230 return NO_DEP_WEAK
- (NO_DEP_WEAK
- UNCERTAIN_DEP_WEAK
) / 2;
4232 /* We can't say anything about the dependence. */
4233 return UNCERTAIN_DEP_WEAK
;
4236 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4237 This function can handle same INSN and ELEM (INSN == ELEM).
4238 It is a convenience wrapper. */
4240 add_dependence_1 (rtx_insn
*insn
, rtx_insn
*elem
, enum reg_note dep_type
)
4245 if (dep_type
== REG_DEP_TRUE
)
4247 else if (dep_type
== REG_DEP_OUTPUT
)
4249 else if (dep_type
== REG_DEP_CONTROL
)
4253 gcc_assert (dep_type
== REG_DEP_ANTI
);
4257 /* When add_dependence is called from inside sched-deps.c, we expect
4258 cur_insn to be non-null. */
4259 internal
= cur_insn
!= NULL
;
4261 gcc_assert (insn
== cur_insn
);
4265 note_dep (elem
, ds
);
4270 /* Return weakness of speculative type TYPE in the dep_status DS,
4271 without checking to prevent ICEs on malformed input. */
4273 get_dep_weak_1 (ds_t ds
, ds_t type
)
4279 case BEGIN_DATA
: ds
>>= BEGIN_DATA_BITS_OFFSET
; break;
4280 case BE_IN_DATA
: ds
>>= BE_IN_DATA_BITS_OFFSET
; break;
4281 case BEGIN_CONTROL
: ds
>>= BEGIN_CONTROL_BITS_OFFSET
; break;
4282 case BE_IN_CONTROL
: ds
>>= BE_IN_CONTROL_BITS_OFFSET
; break;
4283 default: gcc_unreachable ();
4289 /* Return weakness of speculative type TYPE in the dep_status DS. */
4291 get_dep_weak (ds_t ds
, ds_t type
)
4293 dw_t dw
= get_dep_weak_1 (ds
, type
);
4295 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
4299 /* Return the dep_status, which has the same parameters as DS, except for
4300 speculative type TYPE, that will have weakness DW. */
4302 set_dep_weak (ds_t ds
, ds_t type
, dw_t dw
)
4304 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
4309 case BEGIN_DATA
: ds
|= ((ds_t
) dw
) << BEGIN_DATA_BITS_OFFSET
; break;
4310 case BE_IN_DATA
: ds
|= ((ds_t
) dw
) << BE_IN_DATA_BITS_OFFSET
; break;
4311 case BEGIN_CONTROL
: ds
|= ((ds_t
) dw
) << BEGIN_CONTROL_BITS_OFFSET
; break;
4312 case BE_IN_CONTROL
: ds
|= ((ds_t
) dw
) << BE_IN_CONTROL_BITS_OFFSET
; break;
4313 default: gcc_unreachable ();
4318 /* Return the join of two dep_statuses DS1 and DS2.
4319 If MAX_P is true then choose the greater probability,
4320 otherwise multiply probabilities.
4321 This function assumes that both DS1 and DS2 contain speculative bits. */
4323 ds_merge_1 (ds_t ds1
, ds_t ds2
, bool max_p
)
4327 gcc_assert ((ds1
& SPECULATIVE
) && (ds2
& SPECULATIVE
));
4329 ds
= (ds1
& DEP_TYPES
) | (ds2
& DEP_TYPES
);
4331 t
= FIRST_SPEC_TYPE
;
4334 if ((ds1
& t
) && !(ds2
& t
))
4336 else if (!(ds1
& t
) && (ds2
& t
))
4338 else if ((ds1
& t
) && (ds2
& t
))
4340 dw_t dw1
= get_dep_weak (ds1
, t
);
4341 dw_t dw2
= get_dep_weak (ds2
, t
);
4346 dw
= ((ds_t
) dw1
) * ((ds_t
) dw2
);
4348 if (dw
< MIN_DEP_WEAK
)
4359 ds
= set_dep_weak (ds
, t
, (dw_t
) dw
);
4362 if (t
== LAST_SPEC_TYPE
)
4364 t
<<= SPEC_TYPE_SHIFT
;
4371 /* Return the join of two dep_statuses DS1 and DS2.
4372 This function assumes that both DS1 and DS2 contain speculative bits. */
4374 ds_merge (ds_t ds1
, ds_t ds2
)
4376 return ds_merge_1 (ds1
, ds2
, false);
4379 /* Return the join of two dep_statuses DS1 and DS2. */
4381 ds_full_merge (ds_t ds
, ds_t ds2
, rtx mem1
, rtx mem2
)
4383 ds_t new_status
= ds
| ds2
;
4385 if (new_status
& SPECULATIVE
)
4387 if ((ds
&& !(ds
& SPECULATIVE
))
4388 || (ds2
&& !(ds2
& SPECULATIVE
)))
4389 /* Then this dep can't be speculative. */
4390 new_status
&= ~SPECULATIVE
;
4393 /* Both are speculative. Merging probabilities. */
4398 dw
= estimate_dep_weak (mem1
, mem2
);
4399 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
4407 new_status
= ds_merge (ds2
, ds
);
4414 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4417 ds_max_merge (ds_t ds1
, ds_t ds2
)
4419 if (ds1
== 0 && ds2
== 0)
4422 if (ds1
== 0 && ds2
!= 0)
4425 if (ds1
!= 0 && ds2
== 0)
4428 return ds_merge_1 (ds1
, ds2
, true);
4431 /* Return the probability of speculation success for the speculation
4439 dt
= FIRST_SPEC_TYPE
;
4444 res
*= (ds_t
) get_dep_weak (ds
, dt
);
4448 if (dt
== LAST_SPEC_TYPE
)
4450 dt
<<= SPEC_TYPE_SHIFT
;
4456 res
/= MAX_DEP_WEAK
;
4458 if (res
< MIN_DEP_WEAK
)
4461 gcc_assert (res
<= MAX_DEP_WEAK
);
4466 /* Return a dep status that contains all speculation types of DS. */
4468 ds_get_speculation_types (ds_t ds
)
4470 if (ds
& BEGIN_DATA
)
4472 if (ds
& BE_IN_DATA
)
4474 if (ds
& BEGIN_CONTROL
)
4475 ds
|= BEGIN_CONTROL
;
4476 if (ds
& BE_IN_CONTROL
)
4477 ds
|= BE_IN_CONTROL
;
4479 return ds
& SPECULATIVE
;
4482 /* Return a dep status that contains maximal weakness for each speculation
4483 type present in DS. */
4485 ds_get_max_dep_weak (ds_t ds
)
4487 if (ds
& BEGIN_DATA
)
4488 ds
= set_dep_weak (ds
, BEGIN_DATA
, MAX_DEP_WEAK
);
4489 if (ds
& BE_IN_DATA
)
4490 ds
= set_dep_weak (ds
, BE_IN_DATA
, MAX_DEP_WEAK
);
4491 if (ds
& BEGIN_CONTROL
)
4492 ds
= set_dep_weak (ds
, BEGIN_CONTROL
, MAX_DEP_WEAK
);
4493 if (ds
& BE_IN_CONTROL
)
4494 ds
= set_dep_weak (ds
, BE_IN_CONTROL
, MAX_DEP_WEAK
);
4499 /* Dump information about the dependence status S. */
4501 dump_ds (FILE *f
, ds_t s
)
4506 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak_1 (s
, BEGIN_DATA
));
4508 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak_1 (s
, BE_IN_DATA
));
4509 if (s
& BEGIN_CONTROL
)
4510 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s
, BEGIN_CONTROL
));
4511 if (s
& BE_IN_CONTROL
)
4512 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s
, BE_IN_CONTROL
));
4515 fprintf (f
, "HARD_DEP; ");
4518 fprintf (f
, "DEP_TRUE; ");
4520 fprintf (f
, "DEP_OUTPUT; ");
4522 fprintf (f
, "DEP_ANTI; ");
4523 if (s
& DEP_CONTROL
)
4524 fprintf (f
, "DEP_CONTROL; ");
4532 dump_ds (stderr
, s
);
4533 fprintf (stderr
, "\n");
4536 #ifdef ENABLE_CHECKING
4537 /* Verify that dependence type and status are consistent.
4538 If RELAXED_P is true, then skip dep_weakness checks. */
4540 check_dep (dep_t dep
, bool relaxed_p
)
4542 enum reg_note dt
= DEP_TYPE (dep
);
4543 ds_t ds
= DEP_STATUS (dep
);
4545 gcc_assert (DEP_PRO (dep
) != DEP_CON (dep
));
4547 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
4549 gcc_assert (ds
== 0);
4553 /* Check that dependence type contains the same bits as the status. */
4554 if (dt
== REG_DEP_TRUE
)
4555 gcc_assert (ds
& DEP_TRUE
);
4556 else if (dt
== REG_DEP_OUTPUT
)
4557 gcc_assert ((ds
& DEP_OUTPUT
)
4558 && !(ds
& DEP_TRUE
));
4559 else if (dt
== REG_DEP_ANTI
)
4560 gcc_assert ((ds
& DEP_ANTI
)
4561 && !(ds
& (DEP_OUTPUT
| DEP_TRUE
)));
4563 gcc_assert (dt
== REG_DEP_CONTROL
4564 && (ds
& DEP_CONTROL
)
4565 && !(ds
& (DEP_OUTPUT
| DEP_ANTI
| DEP_TRUE
)));
4567 /* HARD_DEP can not appear in dep_status of a link. */
4568 gcc_assert (!(ds
& HARD_DEP
));
4570 /* Check that dependence status is set correctly when speculation is not
4572 if (!sched_deps_info
->generate_spec_deps
)
4573 gcc_assert (!(ds
& SPECULATIVE
));
4574 else if (ds
& SPECULATIVE
)
4578 ds_t type
= FIRST_SPEC_TYPE
;
4580 /* Check that dependence weakness is in proper range. */
4584 get_dep_weak (ds
, type
);
4586 if (type
== LAST_SPEC_TYPE
)
4588 type
<<= SPEC_TYPE_SHIFT
;
4593 if (ds
& BEGIN_SPEC
)
4595 /* Only true dependence can be data speculative. */
4596 if (ds
& BEGIN_DATA
)
4597 gcc_assert (ds
& DEP_TRUE
);
4599 /* Control dependencies in the insn scheduler are represented by
4600 anti-dependencies, therefore only anti dependence can be
4601 control speculative. */
4602 if (ds
& BEGIN_CONTROL
)
4603 gcc_assert (ds
& DEP_ANTI
);
4607 /* Subsequent speculations should resolve true dependencies. */
4608 gcc_assert ((ds
& DEP_TYPES
) == DEP_TRUE
);
4611 /* Check that true and anti dependencies can't have other speculative
4614 gcc_assert (ds
& (BEGIN_DATA
| BE_IN_SPEC
));
4615 /* An output dependence can't be speculative at all. */
4616 gcc_assert (!(ds
& DEP_OUTPUT
));
4618 gcc_assert (ds
& BEGIN_CONTROL
);
4621 #endif /* ENABLE_CHECKING */
4623 /* The following code discovers opportunities to switch a memory reference
4624 and an increment by modifying the address. We ensure that this is done
4625 only for dependencies that are only used to show a single register
4626 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4627 instruction involved is subject to only one dep that can cause a pattern
4630 When we discover a suitable dependency, we fill in the dep_replacement
4631 structure to show how to modify the memory reference. */
4633 /* Holds information about a pair of memory reference and register increment
4634 insns which depend on each other, but could possibly be interchanged. */
4641 /* A register occurring in the memory address for which we wish to break
4642 the dependence. This must be identical to the destination register of
4645 /* Any kind of index that is added to that register. */
4647 /* The constant offset used in the memory address. */
4648 HOST_WIDE_INT mem_constant
;
4649 /* The constant added in the increment insn. Negated if the increment is
4650 after the memory address. */
4651 HOST_WIDE_INT inc_constant
;
4652 /* The source register used in the increment. May be different from mem_reg0
4653 if the increment occurs before the memory address. */
4657 /* Verify that the memory location described in MII can be replaced with
4658 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4659 insn remains unchanged by this function. */
4662 attempt_change (struct mem_inc_info
*mii
, rtx new_addr
)
4664 rtx mem
= *mii
->mem_loc
;
4667 /* Jump through a lot of hoops to keep the attributes up to date. We
4668 do not want to call one of the change address variants that take
4669 an offset even though we know the offset in many cases. These
4670 assume you are changing where the address is pointing by the
4672 new_mem
= replace_equiv_address_nv (mem
, new_addr
);
4673 if (! validate_change (mii
->mem_insn
, mii
->mem_loc
, new_mem
, 0))
4675 if (sched_verbose
>= 5)
4676 fprintf (sched_dump
, "validation failure\n");
4680 /* Put back the old one. */
4681 validate_change (mii
->mem_insn
, mii
->mem_loc
, mem
, 0);
4686 /* Return true if INSN is of a form "a = b op c" where a and b are
4687 regs. op is + if c is a reg and +|- if c is a const. Fill in
4688 informantion in MII about what is found.
4689 BEFORE_MEM indicates whether the increment is found before or after
4690 a corresponding memory reference. */
4693 parse_add_or_inc (struct mem_inc_info
*mii
, rtx_insn
*insn
, bool before_mem
)
4695 rtx pat
= single_set (insn
);
4699 if (RTX_FRAME_RELATED_P (insn
) || !pat
)
4702 /* Result must be single reg. */
4703 if (!REG_P (SET_DEST (pat
)))
4706 if (GET_CODE (SET_SRC (pat
)) != PLUS
)
4709 mii
->inc_insn
= insn
;
4710 src
= SET_SRC (pat
);
4711 mii
->inc_input
= XEXP (src
, 0);
4713 if (!REG_P (XEXP (src
, 0)))
4716 if (!rtx_equal_p (SET_DEST (pat
), mii
->mem_reg0
))
4719 cst
= XEXP (src
, 1);
4720 if (!CONST_INT_P (cst
))
4722 mii
->inc_constant
= INTVAL (cst
);
4724 regs_equal
= rtx_equal_p (mii
->inc_input
, mii
->mem_reg0
);
4728 mii
->inc_constant
= -mii
->inc_constant
;
4733 if (regs_equal
&& REGNO (SET_DEST (pat
)) == STACK_POINTER_REGNUM
)
4735 /* Note that the sign has already been reversed for !before_mem. */
4736 if (STACK_GROWS_DOWNWARD
)
4737 return mii
->inc_constant
> 0;
4739 return mii
->inc_constant
< 0;
4744 /* Once a suitable mem reference has been found and the corresponding data
4745 in MII has been filled in, this function is called to find a suitable
4746 add or inc insn involving the register we found in the memory
4750 find_inc (struct mem_inc_info
*mii
, bool backwards
)
4752 sd_iterator_def sd_it
;
4755 sd_it
= sd_iterator_start (mii
->mem_insn
,
4756 backwards
? SD_LIST_HARD_BACK
: SD_LIST_FORW
);
4757 while (sd_iterator_cond (&sd_it
, &dep
))
4759 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
4760 rtx_insn
*pro
= DEP_PRO (dep
);
4761 rtx_insn
*con
= DEP_CON (dep
);
4762 rtx_insn
*inc_cand
= backwards
? pro
: con
;
4763 if (DEP_NONREG (dep
) || DEP_MULTIPLE (dep
))
4765 if (parse_add_or_inc (mii
, inc_cand
, backwards
))
4767 struct dep_replacement
*desc
;
4769 rtx newaddr
, newmem
;
4771 if (sched_verbose
>= 5)
4772 fprintf (sched_dump
, "candidate mem/inc pair: %d %d\n",
4773 INSN_UID (mii
->mem_insn
), INSN_UID (inc_cand
));
4775 /* Need to assure that none of the operands of the inc
4776 instruction are assigned to by the mem insn. */
4777 FOR_EACH_INSN_DEF (def
, mii
->mem_insn
)
4778 if (reg_overlap_mentioned_p (DF_REF_REG (def
), mii
->inc_input
)
4779 || reg_overlap_mentioned_p (DF_REF_REG (def
), mii
->mem_reg0
))
4781 if (sched_verbose
>= 5)
4782 fprintf (sched_dump
,
4783 "inc conflicts with store failure.\n");
4787 newaddr
= mii
->inc_input
;
4788 if (mii
->mem_index
!= NULL_RTX
)
4789 newaddr
= gen_rtx_PLUS (GET_MODE (newaddr
), newaddr
,
4791 newaddr
= plus_constant (GET_MODE (newaddr
), newaddr
,
4792 mii
->mem_constant
+ mii
->inc_constant
);
4793 newmem
= attempt_change (mii
, newaddr
);
4794 if (newmem
== NULL_RTX
)
4796 if (sched_verbose
>= 5)
4797 fprintf (sched_dump
, "successful address replacement\n");
4798 desc
= XCNEW (struct dep_replacement
);
4799 DEP_REPLACE (dep
) = desc
;
4800 desc
->loc
= mii
->mem_loc
;
4801 desc
->newval
= newmem
;
4802 desc
->orig
= *desc
->loc
;
4803 desc
->insn
= mii
->mem_insn
;
4804 move_dep_link (DEP_NODE_BACK (node
), INSN_HARD_BACK_DEPS (con
),
4805 INSN_SPEC_BACK_DEPS (con
));
4808 FOR_EACH_DEP (mii
->inc_insn
, SD_LIST_BACK
, sd_it
, dep
)
4809 add_dependence_1 (mii
->mem_insn
, DEP_PRO (dep
),
4814 FOR_EACH_DEP (mii
->inc_insn
, SD_LIST_FORW
, sd_it
, dep
)
4815 add_dependence_1 (DEP_CON (dep
), mii
->mem_insn
,
4821 sd_iterator_next (&sd_it
);
4826 /* A recursive function that walks ADDRESS_OF_X to find memory references
4827 which could be modified during scheduling. We call find_inc for each
4828 one we find that has a recognizable form. MII holds information about
4829 the pair of memory/increment instructions.
4830 We ensure that every instruction with a memory reference (which will be
4831 the location of the replacement) is assigned at most one breakable
4835 find_mem (struct mem_inc_info
*mii
, rtx
*address_of_x
)
4837 rtx x
= *address_of_x
;
4838 enum rtx_code code
= GET_CODE (x
);
4839 const char *const fmt
= GET_RTX_FORMAT (code
);
4844 rtx reg0
= XEXP (x
, 0);
4846 mii
->mem_loc
= address_of_x
;
4847 mii
->mem_index
= NULL_RTX
;
4848 mii
->mem_constant
= 0;
4849 if (GET_CODE (reg0
) == PLUS
&& CONST_INT_P (XEXP (reg0
, 1)))
4851 mii
->mem_constant
= INTVAL (XEXP (reg0
, 1));
4852 reg0
= XEXP (reg0
, 0);
4854 if (GET_CODE (reg0
) == PLUS
)
4856 mii
->mem_index
= XEXP (reg0
, 1);
4857 reg0
= XEXP (reg0
, 0);
4862 int occurrences
= 0;
4864 /* Make sure this reg appears only once in this insn. Can't use
4865 count_occurrences since that only works for pseudos. */
4866 FOR_EACH_INSN_USE (use
, mii
->mem_insn
)
4867 if (reg_overlap_mentioned_p (reg0
, DF_REF_REG (use
)))
4868 if (++occurrences
> 1)
4870 if (sched_verbose
>= 5)
4871 fprintf (sched_dump
, "mem count failure\n");
4875 mii
->mem_reg0
= reg0
;
4876 return find_inc (mii
, true) || find_inc (mii
, false);
4881 if (code
== SIGN_EXTRACT
|| code
== ZERO_EXTRACT
)
4883 /* If REG occurs inside a MEM used in a bit-field reference,
4884 that is unacceptable. */
4888 /* Time for some deep diving. */
4889 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4893 if (find_mem (mii
, &XEXP (x
, i
)))
4896 else if (fmt
[i
] == 'E')
4899 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4900 if (find_mem (mii
, &XVECEXP (x
, i
, j
)))
4908 /* Examine the instructions between HEAD and TAIL and try to find
4909 dependencies that can be broken by modifying one of the patterns. */
4912 find_modifiable_mems (rtx_insn
*head
, rtx_insn
*tail
)
4914 rtx_insn
*insn
, *next_tail
= NEXT_INSN (tail
);
4915 int success_in_block
= 0;
4917 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
4919 struct mem_inc_info mii
;
4921 if (!NONDEBUG_INSN_P (insn
) || RTX_FRAME_RELATED_P (insn
))
4924 mii
.mem_insn
= insn
;
4925 if (find_mem (&mii
, &PATTERN (insn
)))
4928 if (success_in_block
&& sched_verbose
>= 5)
4929 fprintf (sched_dump
, "%d candidates for address modification found.\n",
4933 #endif /* INSN_SCHEDULING */