libsanitizer merge from upstream r191666
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2013 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22 \f
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "recog.h"
39 #include "emit-rtl.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
45
46 #ifdef INSN_SCHEDULING
47
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53
54 /* Holds current parameters for the dependency analyzer. */
55 struct sched_deps_info_def *sched_deps_info;
56
57 /* The data is specific to the Haifa scheduler. */
58 vec<haifa_deps_insn_data_def>
59 h_d_i_d = vNULL;
60
61 /* Return the major type present in the DS. */
62 enum reg_note
63 ds_to_dk (ds_t ds)
64 {
65 if (ds & DEP_TRUE)
66 return REG_DEP_TRUE;
67
68 if (ds & DEP_OUTPUT)
69 return REG_DEP_OUTPUT;
70
71 if (ds & DEP_CONTROL)
72 return REG_DEP_CONTROL;
73
74 gcc_assert (ds & DEP_ANTI);
75
76 return REG_DEP_ANTI;
77 }
78
79 /* Return equivalent dep_status. */
80 ds_t
81 dk_to_ds (enum reg_note dk)
82 {
83 switch (dk)
84 {
85 case REG_DEP_TRUE:
86 return DEP_TRUE;
87
88 case REG_DEP_OUTPUT:
89 return DEP_OUTPUT;
90
91 case REG_DEP_CONTROL:
92 return DEP_CONTROL;
93
94 default:
95 gcc_assert (dk == REG_DEP_ANTI);
96 return DEP_ANTI;
97 }
98 }
99
100 /* Functions to operate with dependence information container - dep_t. */
101
102 /* Init DEP with the arguments. */
103 void
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
105 {
106 DEP_PRO (dep) = pro;
107 DEP_CON (dep) = con;
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
111 DEP_NONREG (dep) = 0;
112 DEP_MULTIPLE (dep) = 0;
113 DEP_REPLACE (dep) = NULL;
114 }
115
116 /* Init DEP with the arguments.
117 While most of the scheduler (including targets) only need the major type
118 of the dependency, it is convenient to hide full dep_status from them. */
119 void
120 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 {
122 ds_t ds;
123
124 if ((current_sched_info->flags & USE_DEPS_LIST))
125 ds = dk_to_ds (kind);
126 else
127 ds = 0;
128
129 init_dep_1 (dep, pro, con, kind, ds);
130 }
131
132 /* Make a copy of FROM in TO. */
133 static void
134 copy_dep (dep_t to, dep_t from)
135 {
136 memcpy (to, from, sizeof (*to));
137 }
138
139 static void dump_ds (FILE *, ds_t);
140
141 /* Define flags for dump_dep (). */
142
143 /* Dump producer of the dependence. */
144 #define DUMP_DEP_PRO (2)
145
146 /* Dump consumer of the dependence. */
147 #define DUMP_DEP_CON (4)
148
149 /* Dump type of the dependence. */
150 #define DUMP_DEP_TYPE (8)
151
152 /* Dump status of the dependence. */
153 #define DUMP_DEP_STATUS (16)
154
155 /* Dump all information about the dependence. */
156 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 |DUMP_DEP_STATUS)
158
159 /* Dump DEP to DUMP.
160 FLAGS is a bit mask specifying what information about DEP needs
161 to be printed.
162 If FLAGS has the very first bit set, then dump all information about DEP
163 and propagate this bit into the callee dump functions. */
164 static void
165 dump_dep (FILE *dump, dep_t dep, int flags)
166 {
167 if (flags & 1)
168 flags |= DUMP_DEP_ALL;
169
170 fprintf (dump, "<");
171
172 if (flags & DUMP_DEP_PRO)
173 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
174
175 if (flags & DUMP_DEP_CON)
176 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
177
178 if (flags & DUMP_DEP_TYPE)
179 {
180 char t;
181 enum reg_note type = DEP_TYPE (dep);
182
183 switch (type)
184 {
185 case REG_DEP_TRUE:
186 t = 't';
187 break;
188
189 case REG_DEP_OUTPUT:
190 t = 'o';
191 break;
192
193 case REG_DEP_CONTROL:
194 t = 'c';
195 break;
196
197 case REG_DEP_ANTI:
198 t = 'a';
199 break;
200
201 default:
202 gcc_unreachable ();
203 break;
204 }
205
206 fprintf (dump, "%c; ", t);
207 }
208
209 if (flags & DUMP_DEP_STATUS)
210 {
211 if (current_sched_info->flags & USE_DEPS_LIST)
212 dump_ds (dump, DEP_STATUS (dep));
213 }
214
215 fprintf (dump, ">");
216 }
217
218 /* Default flags for dump_dep (). */
219 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
220
221 /* Dump all fields of DEP to STDERR. */
222 void
223 sd_debug_dep (dep_t dep)
224 {
225 dump_dep (stderr, dep, 1);
226 fprintf (stderr, "\n");
227 }
228
229 /* Determine whether DEP is a dependency link of a non-debug insn on a
230 debug insn. */
231
232 static inline bool
233 depl_on_debug_p (dep_link_t dep)
234 {
235 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
236 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
237 }
238
239 /* Functions to operate with a single link from the dependencies lists -
240 dep_link_t. */
241
242 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
243 PREV_NEXT_P. */
244 static void
245 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
246 {
247 dep_link_t next = *prev_nextp;
248
249 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
250 && DEP_LINK_NEXT (l) == NULL);
251
252 /* Init node being inserted. */
253 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
254 DEP_LINK_NEXT (l) = next;
255
256 /* Fix next node. */
257 if (next != NULL)
258 {
259 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
260
261 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
262 }
263
264 /* Fix prev node. */
265 *prev_nextp = l;
266 }
267
268 /* Add dep_link LINK to deps_list L. */
269 static void
270 add_to_deps_list (dep_link_t link, deps_list_t l)
271 {
272 attach_dep_link (link, &DEPS_LIST_FIRST (l));
273
274 /* Don't count debug deps. */
275 if (!depl_on_debug_p (link))
276 ++DEPS_LIST_N_LINKS (l);
277 }
278
279 /* Detach dep_link L from the list. */
280 static void
281 detach_dep_link (dep_link_t l)
282 {
283 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
284 dep_link_t next = DEP_LINK_NEXT (l);
285
286 *prev_nextp = next;
287
288 if (next != NULL)
289 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
290
291 DEP_LINK_PREV_NEXTP (l) = NULL;
292 DEP_LINK_NEXT (l) = NULL;
293 }
294
295 /* Remove link LINK from list LIST. */
296 static void
297 remove_from_deps_list (dep_link_t link, deps_list_t list)
298 {
299 detach_dep_link (link);
300
301 /* Don't count debug deps. */
302 if (!depl_on_debug_p (link))
303 --DEPS_LIST_N_LINKS (list);
304 }
305
306 /* Move link LINK from list FROM to list TO. */
307 static void
308 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
309 {
310 remove_from_deps_list (link, from);
311 add_to_deps_list (link, to);
312 }
313
314 /* Return true of LINK is not attached to any list. */
315 static bool
316 dep_link_is_detached_p (dep_link_t link)
317 {
318 return DEP_LINK_PREV_NEXTP (link) == NULL;
319 }
320
321 /* Pool to hold all dependency nodes (dep_node_t). */
322 static alloc_pool dn_pool;
323
324 /* Number of dep_nodes out there. */
325 static int dn_pool_diff = 0;
326
327 /* Create a dep_node. */
328 static dep_node_t
329 create_dep_node (void)
330 {
331 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
332 dep_link_t back = DEP_NODE_BACK (n);
333 dep_link_t forw = DEP_NODE_FORW (n);
334
335 DEP_LINK_NODE (back) = n;
336 DEP_LINK_NEXT (back) = NULL;
337 DEP_LINK_PREV_NEXTP (back) = NULL;
338
339 DEP_LINK_NODE (forw) = n;
340 DEP_LINK_NEXT (forw) = NULL;
341 DEP_LINK_PREV_NEXTP (forw) = NULL;
342
343 ++dn_pool_diff;
344
345 return n;
346 }
347
348 /* Delete dep_node N. N must not be connected to any deps_list. */
349 static void
350 delete_dep_node (dep_node_t n)
351 {
352 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
353 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
354
355 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
356
357 --dn_pool_diff;
358
359 pool_free (dn_pool, n);
360 }
361
362 /* Pool to hold dependencies lists (deps_list_t). */
363 static alloc_pool dl_pool;
364
365 /* Number of deps_lists out there. */
366 static int dl_pool_diff = 0;
367
368 /* Functions to operate with dependences lists - deps_list_t. */
369
370 /* Return true if list L is empty. */
371 static bool
372 deps_list_empty_p (deps_list_t l)
373 {
374 return DEPS_LIST_N_LINKS (l) == 0;
375 }
376
377 /* Create a new deps_list. */
378 static deps_list_t
379 create_deps_list (void)
380 {
381 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
382
383 DEPS_LIST_FIRST (l) = NULL;
384 DEPS_LIST_N_LINKS (l) = 0;
385
386 ++dl_pool_diff;
387 return l;
388 }
389
390 /* Free deps_list L. */
391 static void
392 free_deps_list (deps_list_t l)
393 {
394 gcc_assert (deps_list_empty_p (l));
395
396 --dl_pool_diff;
397
398 pool_free (dl_pool, l);
399 }
400
401 /* Return true if there is no dep_nodes and deps_lists out there.
402 After the region is scheduled all the dependency nodes and lists
403 should [generally] be returned to pool. */
404 bool
405 deps_pools_are_empty_p (void)
406 {
407 return dn_pool_diff == 0 && dl_pool_diff == 0;
408 }
409
410 /* Remove all elements from L. */
411 static void
412 clear_deps_list (deps_list_t l)
413 {
414 do
415 {
416 dep_link_t link = DEPS_LIST_FIRST (l);
417
418 if (link == NULL)
419 break;
420
421 remove_from_deps_list (link, l);
422 }
423 while (1);
424 }
425
426 /* Decide whether a dependency should be treated as a hard or a speculative
427 dependency. */
428 static bool
429 dep_spec_p (dep_t dep)
430 {
431 if (current_sched_info->flags & DO_SPECULATION)
432 {
433 if (DEP_STATUS (dep) & SPECULATIVE)
434 return true;
435 }
436 if (current_sched_info->flags & DO_PREDICATION)
437 {
438 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 return true;
440 }
441 if (DEP_REPLACE (dep) != NULL)
442 return true;
443 return false;
444 }
445
446 static regset reg_pending_sets;
447 static regset reg_pending_clobbers;
448 static regset reg_pending_uses;
449 static regset reg_pending_control_uses;
450 static enum reg_pending_barrier_mode reg_pending_barrier;
451
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458 static HARD_REG_SET implicit_reg_pending_clobbers;
459 static HARD_REG_SET implicit_reg_pending_uses;
460
461 /* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
464
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
469
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
473 allocated then the rest two ones are also allocated. */
474 static bitmap_head *true_dependency_cache = NULL;
475 static bitmap_head *output_dependency_cache = NULL;
476 static bitmap_head *anti_dependency_cache = NULL;
477 static bitmap_head *control_dependency_cache = NULL;
478 static bitmap_head *spec_dependency_cache = NULL;
479 static int cache_size;
480
481 /* True if we should mark added dependencies as a non-register deps. */
482 static bool mark_as_hard;
483
484 static int deps_may_trap_p (const_rtx);
485 static void add_dependence_1 (rtx, rtx, enum reg_note);
486 static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
487 static void add_dependence_list_and_free (struct deps_desc *, rtx,
488 rtx *, int, enum reg_note, bool);
489 static void delete_all_dependences (rtx);
490 static void chain_to_prev_insn (rtx);
491
492 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
493 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
494 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
495 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
496
497 static bool sched_has_condition_p (const_rtx);
498 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
499
500 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
501 rtx, rtx);
502 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
503
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t, bool);
506 #endif
507 \f
508 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
509
510 static int
511 deps_may_trap_p (const_rtx mem)
512 {
513 const_rtx addr = XEXP (mem, 0);
514
515 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
516 {
517 const_rtx t = get_reg_known_value (REGNO (addr));
518 if (t)
519 addr = t;
520 }
521 return rtx_addr_can_trap_p (addr);
522 }
523 \f
524
525 /* Find the condition under which INSN is executed. If REV is not NULL,
526 it is set to TRUE when the returned comparison should be reversed
527 to get the actual condition. */
528 static rtx
529 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
530 {
531 rtx pat = PATTERN (insn);
532 rtx src;
533
534 if (rev)
535 *rev = false;
536
537 if (GET_CODE (pat) == COND_EXEC)
538 return COND_EXEC_TEST (pat);
539
540 if (!any_condjump_p (insn) || !onlyjump_p (insn))
541 return 0;
542
543 src = SET_SRC (pc_set (insn));
544
545 if (XEXP (src, 2) == pc_rtx)
546 return XEXP (src, 0);
547 else if (XEXP (src, 1) == pc_rtx)
548 {
549 rtx cond = XEXP (src, 0);
550 enum rtx_code revcode = reversed_comparison_code (cond, insn);
551
552 if (revcode == UNKNOWN)
553 return 0;
554
555 if (rev)
556 *rev = true;
557 return cond;
558 }
559
560 return 0;
561 }
562
563 /* Return the condition under which INSN does not execute (i.e. the
564 not-taken condition for a conditional branch), or NULL if we cannot
565 find such a condition. The caller should make a copy of the condition
566 before using it. */
567 rtx
568 sched_get_reverse_condition_uncached (const_rtx insn)
569 {
570 bool rev;
571 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
572 if (cond == NULL_RTX)
573 return cond;
574 if (!rev)
575 {
576 enum rtx_code revcode = reversed_comparison_code (cond, insn);
577 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
578 XEXP (cond, 0),
579 XEXP (cond, 1));
580 }
581 return cond;
582 }
583
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585 We only do actual work the first time we come here for an insn; the
586 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
587 static rtx
588 sched_get_condition_with_rev (const_rtx insn, bool *rev)
589 {
590 bool tmp;
591
592 if (INSN_LUID (insn) == 0)
593 return sched_get_condition_with_rev_uncached (insn, rev);
594
595 if (INSN_CACHED_COND (insn) == const_true_rtx)
596 return NULL_RTX;
597
598 if (INSN_CACHED_COND (insn) != NULL_RTX)
599 {
600 if (rev)
601 *rev = INSN_REVERSE_COND (insn);
602 return INSN_CACHED_COND (insn);
603 }
604
605 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
606 INSN_REVERSE_COND (insn) = tmp;
607
608 if (INSN_CACHED_COND (insn) == NULL_RTX)
609 {
610 INSN_CACHED_COND (insn) = const_true_rtx;
611 return NULL_RTX;
612 }
613
614 if (rev)
615 *rev = INSN_REVERSE_COND (insn);
616 return INSN_CACHED_COND (insn);
617 }
618
619 /* True when we can find a condition under which INSN is executed. */
620 static bool
621 sched_has_condition_p (const_rtx insn)
622 {
623 return !! sched_get_condition_with_rev (insn, NULL);
624 }
625
626 \f
627
628 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
629 static int
630 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
631 {
632 if (COMPARISON_P (cond1)
633 && COMPARISON_P (cond2)
634 && GET_CODE (cond1) ==
635 (rev1==rev2
636 ? reversed_comparison_code (cond2, NULL)
637 : GET_CODE (cond2))
638 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
639 && XEXP (cond1, 1) == XEXP (cond2, 1))
640 return 1;
641 return 0;
642 }
643
644 /* Return true if insn1 and insn2 can never depend on one another because
645 the conditions under which they are executed are mutually exclusive. */
646 bool
647 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
648 {
649 rtx cond1, cond2;
650 bool rev1 = false, rev2 = false;
651
652 /* df doesn't handle conditional lifetimes entirely correctly;
653 calls mess up the conditional lifetimes. */
654 if (!CALL_P (insn1) && !CALL_P (insn2))
655 {
656 cond1 = sched_get_condition_with_rev (insn1, &rev1);
657 cond2 = sched_get_condition_with_rev (insn2, &rev2);
658 if (cond1 && cond2
659 && conditions_mutex_p (cond1, cond2, rev1, rev2)
660 /* Make sure first instruction doesn't affect condition of second
661 instruction if switched. */
662 && !modified_in_p (cond1, insn2)
663 /* Make sure second instruction doesn't affect condition of first
664 instruction if switched. */
665 && !modified_in_p (cond2, insn1))
666 return true;
667 }
668 return false;
669 }
670 \f
671
672 /* Return true if INSN can potentially be speculated with type DS. */
673 bool
674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
675 {
676 if (HAS_INTERNAL_DEP (insn))
677 return false;
678
679 if (!NONJUMP_INSN_P (insn))
680 return false;
681
682 if (SCHED_GROUP_P (insn))
683 return false;
684
685 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
686 return false;
687
688 if (side_effects_p (PATTERN (insn)))
689 return false;
690
691 if (ds & BE_IN_SPEC)
692 /* The following instructions, which depend on a speculatively scheduled
693 instruction, cannot be speculatively scheduled along. */
694 {
695 if (may_trap_or_fault_p (PATTERN (insn)))
696 /* If instruction might fault, it cannot be speculatively scheduled.
697 For control speculation it's obvious why and for data speculation
698 it's because the insn might get wrong input if speculation
699 wasn't successful. */
700 return false;
701
702 if ((ds & BE_IN_DATA)
703 && sched_has_condition_p (insn))
704 /* If this is a predicated instruction, then it cannot be
705 speculatively scheduled. See PR35659. */
706 return false;
707 }
708
709 return true;
710 }
711
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715 This function is used to switch sd_iterator to the next list.
716 !!! For internal use only. Might consider moving it to sched-int.h. */
717 void
718 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
719 deps_list_t *list_ptr, bool *resolved_p_ptr)
720 {
721 sd_list_types_def types = *types_ptr;
722
723 if (types & SD_LIST_HARD_BACK)
724 {
725 *list_ptr = INSN_HARD_BACK_DEPS (insn);
726 *resolved_p_ptr = false;
727 *types_ptr = types & ~SD_LIST_HARD_BACK;
728 }
729 else if (types & SD_LIST_SPEC_BACK)
730 {
731 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
732 *resolved_p_ptr = false;
733 *types_ptr = types & ~SD_LIST_SPEC_BACK;
734 }
735 else if (types & SD_LIST_FORW)
736 {
737 *list_ptr = INSN_FORW_DEPS (insn);
738 *resolved_p_ptr = false;
739 *types_ptr = types & ~SD_LIST_FORW;
740 }
741 else if (types & SD_LIST_RES_BACK)
742 {
743 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
744 *resolved_p_ptr = true;
745 *types_ptr = types & ~SD_LIST_RES_BACK;
746 }
747 else if (types & SD_LIST_RES_FORW)
748 {
749 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
750 *resolved_p_ptr = true;
751 *types_ptr = types & ~SD_LIST_RES_FORW;
752 }
753 else
754 {
755 *list_ptr = NULL;
756 *resolved_p_ptr = false;
757 *types_ptr = SD_LIST_NONE;
758 }
759 }
760
761 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
762 int
763 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
764 {
765 int size = 0;
766
767 while (list_types != SD_LIST_NONE)
768 {
769 deps_list_t list;
770 bool resolved_p;
771
772 sd_next_list (insn, &list_types, &list, &resolved_p);
773 if (list)
774 size += DEPS_LIST_N_LINKS (list);
775 }
776
777 return size;
778 }
779
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
781
782 bool
783 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
784 {
785 while (list_types != SD_LIST_NONE)
786 {
787 deps_list_t list;
788 bool resolved_p;
789
790 sd_next_list (insn, &list_types, &list, &resolved_p);
791 if (!deps_list_empty_p (list))
792 return false;
793 }
794
795 return true;
796 }
797
798 /* Initialize data for INSN. */
799 void
800 sd_init_insn (rtx insn)
801 {
802 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
803 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
804 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
805 INSN_FORW_DEPS (insn) = create_deps_list ();
806 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
807
808 /* ??? It would be nice to allocate dependency caches here. */
809 }
810
811 /* Free data for INSN. */
812 void
813 sd_finish_insn (rtx insn)
814 {
815 /* ??? It would be nice to deallocate dependency caches here. */
816
817 free_deps_list (INSN_HARD_BACK_DEPS (insn));
818 INSN_HARD_BACK_DEPS (insn) = NULL;
819
820 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
821 INSN_SPEC_BACK_DEPS (insn) = NULL;
822
823 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
824 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
825
826 free_deps_list (INSN_FORW_DEPS (insn));
827 INSN_FORW_DEPS (insn) = NULL;
828
829 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
830 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
831 }
832
833 /* Find a dependency between producer PRO and consumer CON.
834 Search through resolved dependency lists if RESOLVED_P is true.
835 If no such dependency is found return NULL,
836 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837 with an iterator pointing to it. */
838 static dep_t
839 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
840 sd_iterator_def *sd_it_ptr)
841 {
842 sd_list_types_def pro_list_type;
843 sd_list_types_def con_list_type;
844 sd_iterator_def sd_it;
845 dep_t dep;
846 bool found_p = false;
847
848 if (resolved_p)
849 {
850 pro_list_type = SD_LIST_RES_FORW;
851 con_list_type = SD_LIST_RES_BACK;
852 }
853 else
854 {
855 pro_list_type = SD_LIST_FORW;
856 con_list_type = SD_LIST_BACK;
857 }
858
859 /* Walk through either back list of INSN or forw list of ELEM
860 depending on which one is shorter. */
861 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
862 {
863 /* Find the dep_link with producer PRO in consumer's back_deps. */
864 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
865 if (DEP_PRO (dep) == pro)
866 {
867 found_p = true;
868 break;
869 }
870 }
871 else
872 {
873 /* Find the dep_link with consumer CON in producer's forw_deps. */
874 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
875 if (DEP_CON (dep) == con)
876 {
877 found_p = true;
878 break;
879 }
880 }
881
882 if (found_p)
883 {
884 if (sd_it_ptr != NULL)
885 *sd_it_ptr = sd_it;
886
887 return dep;
888 }
889
890 return NULL;
891 }
892
893 /* Find a dependency between producer PRO and consumer CON.
894 Use dependency [if available] to check if dependency is present at all.
895 Search through resolved dependency lists if RESOLVED_P is true.
896 If the dependency or NULL if none found. */
897 dep_t
898 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
899 {
900 if (true_dependency_cache != NULL)
901 /* Avoiding the list walk below can cut compile times dramatically
902 for some code. */
903 {
904 int elem_luid = INSN_LUID (pro);
905 int insn_luid = INSN_LUID (con);
906
907 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
908 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
909 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
910 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
911 return NULL;
912 }
913
914 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
915 }
916
917 /* Add or update a dependence described by DEP.
918 MEM1 and MEM2, if non-null, correspond to memory locations in case of
919 data speculation.
920
921 The function returns a value indicating if an old entry has been changed
922 or a new entry has been added to insn's backward deps.
923
924 This function merely checks if producer and consumer is the same insn
925 and doesn't create a dep in this case. Actual manipulation of
926 dependence data structures is performed in add_or_update_dep_1. */
927 static enum DEPS_ADJUST_RESULT
928 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
929 {
930 rtx elem = DEP_PRO (dep);
931 rtx insn = DEP_CON (dep);
932
933 gcc_assert (INSN_P (insn) && INSN_P (elem));
934
935 /* Don't depend an insn on itself. */
936 if (insn == elem)
937 {
938 if (sched_deps_info->generate_spec_deps)
939 /* INSN has an internal dependence, which we can't overcome. */
940 HAS_INTERNAL_DEP (insn) = 1;
941
942 return DEP_NODEP;
943 }
944
945 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
946 }
947
948 /* Ask dependency caches what needs to be done for dependence DEP.
949 Return DEP_CREATED if new dependence should be created and there is no
950 need to try to find one searching the dependencies lists.
951 Return DEP_PRESENT if there already is a dependence described by DEP and
952 hence nothing is to be done.
953 Return DEP_CHANGED if there already is a dependence, but it should be
954 updated to incorporate additional information from DEP. */
955 static enum DEPS_ADJUST_RESULT
956 ask_dependency_caches (dep_t dep)
957 {
958 int elem_luid = INSN_LUID (DEP_PRO (dep));
959 int insn_luid = INSN_LUID (DEP_CON (dep));
960
961 gcc_assert (true_dependency_cache != NULL
962 && output_dependency_cache != NULL
963 && anti_dependency_cache != NULL
964 && control_dependency_cache != NULL);
965
966 if (!(current_sched_info->flags & USE_DEPS_LIST))
967 {
968 enum reg_note present_dep_type;
969
970 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
971 present_dep_type = REG_DEP_TRUE;
972 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
973 present_dep_type = REG_DEP_OUTPUT;
974 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_ANTI;
976 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_CONTROL;
978 else
979 /* There is no existing dep so it should be created. */
980 return DEP_CREATED;
981
982 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
983 /* DEP does not add anything to the existing dependence. */
984 return DEP_PRESENT;
985 }
986 else
987 {
988 ds_t present_dep_types = 0;
989
990 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
991 present_dep_types |= DEP_TRUE;
992 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
993 present_dep_types |= DEP_OUTPUT;
994 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_ANTI;
996 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_CONTROL;
998
999 if (present_dep_types == 0)
1000 /* There is no existing dep so it should be created. */
1001 return DEP_CREATED;
1002
1003 if (!(current_sched_info->flags & DO_SPECULATION)
1004 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1005 {
1006 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1007 == present_dep_types)
1008 /* DEP does not add anything to the existing dependence. */
1009 return DEP_PRESENT;
1010 }
1011 else
1012 {
1013 /* Only true dependencies can be data speculative and
1014 only anti dependencies can be control speculative. */
1015 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1016 == present_dep_types);
1017
1018 /* if (DEP is SPECULATIVE) then
1019 ..we should update DEP_STATUS
1020 else
1021 ..we should reset existing dep to non-speculative. */
1022 }
1023 }
1024
1025 return DEP_CHANGED;
1026 }
1027
1028 /* Set dependency caches according to DEP. */
1029 static void
1030 set_dependency_caches (dep_t dep)
1031 {
1032 int elem_luid = INSN_LUID (DEP_PRO (dep));
1033 int insn_luid = INSN_LUID (DEP_CON (dep));
1034
1035 if (!(current_sched_info->flags & USE_DEPS_LIST))
1036 {
1037 switch (DEP_TYPE (dep))
1038 {
1039 case REG_DEP_TRUE:
1040 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1041 break;
1042
1043 case REG_DEP_OUTPUT:
1044 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1045 break;
1046
1047 case REG_DEP_ANTI:
1048 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1049 break;
1050
1051 case REG_DEP_CONTROL:
1052 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1053 break;
1054
1055 default:
1056 gcc_unreachable ();
1057 }
1058 }
1059 else
1060 {
1061 ds_t ds = DEP_STATUS (dep);
1062
1063 if (ds & DEP_TRUE)
1064 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1065 if (ds & DEP_OUTPUT)
1066 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1067 if (ds & DEP_ANTI)
1068 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_CONTROL)
1070 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1071
1072 if (ds & SPECULATIVE)
1073 {
1074 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1075 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1076 }
1077 }
1078 }
1079
1080 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1081 caches accordingly. */
1082 static void
1083 update_dependency_caches (dep_t dep, enum reg_note old_type)
1084 {
1085 int elem_luid = INSN_LUID (DEP_PRO (dep));
1086 int insn_luid = INSN_LUID (DEP_CON (dep));
1087
1088 /* Clear corresponding cache entry because type of the link
1089 may have changed. Keep them if we use_deps_list. */
1090 if (!(current_sched_info->flags & USE_DEPS_LIST))
1091 {
1092 switch (old_type)
1093 {
1094 case REG_DEP_OUTPUT:
1095 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1096 break;
1097
1098 case REG_DEP_ANTI:
1099 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1100 break;
1101
1102 case REG_DEP_CONTROL:
1103 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1104 break;
1105
1106 default:
1107 gcc_unreachable ();
1108 }
1109 }
1110
1111 set_dependency_caches (dep);
1112 }
1113
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1115 static void
1116 change_spec_dep_to_hard (sd_iterator_def sd_it)
1117 {
1118 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1119 dep_link_t link = DEP_NODE_BACK (node);
1120 dep_t dep = DEP_NODE_DEP (node);
1121 rtx elem = DEP_PRO (dep);
1122 rtx insn = DEP_CON (dep);
1123
1124 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1125
1126 DEP_STATUS (dep) &= ~SPECULATIVE;
1127
1128 if (true_dependency_cache != NULL)
1129 /* Clear the cache entry. */
1130 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1131 INSN_LUID (elem));
1132 }
1133
1134 /* Update DEP to incorporate information from NEW_DEP.
1135 SD_IT points to DEP in case it should be moved to another list.
1136 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137 data-speculative dependence should be updated. */
1138 static enum DEPS_ADJUST_RESULT
1139 update_dep (dep_t dep, dep_t new_dep,
1140 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1141 rtx mem1 ATTRIBUTE_UNUSED,
1142 rtx mem2 ATTRIBUTE_UNUSED)
1143 {
1144 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1145 enum reg_note old_type = DEP_TYPE (dep);
1146 bool was_spec = dep_spec_p (dep);
1147
1148 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1149 DEP_MULTIPLE (dep) = 1;
1150
1151 /* If this is a more restrictive type of dependence than the
1152 existing one, then change the existing dependence to this
1153 type. */
1154 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1155 {
1156 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1157 res = DEP_CHANGED;
1158 }
1159
1160 if (current_sched_info->flags & USE_DEPS_LIST)
1161 /* Update DEP_STATUS. */
1162 {
1163 ds_t dep_status = DEP_STATUS (dep);
1164 ds_t ds = DEP_STATUS (new_dep);
1165 ds_t new_status = ds | dep_status;
1166
1167 if (new_status & SPECULATIVE)
1168 {
1169 /* Either existing dep or a dep we're adding or both are
1170 speculative. */
1171 if (!(ds & SPECULATIVE)
1172 || !(dep_status & SPECULATIVE))
1173 /* The new dep can't be speculative. */
1174 new_status &= ~SPECULATIVE;
1175 else
1176 {
1177 /* Both are speculative. Merge probabilities. */
1178 if (mem1 != NULL)
1179 {
1180 dw_t dw;
1181
1182 dw = estimate_dep_weak (mem1, mem2);
1183 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1184 }
1185
1186 new_status = ds_merge (dep_status, ds);
1187 }
1188 }
1189
1190 ds = new_status;
1191
1192 if (dep_status != ds)
1193 {
1194 DEP_STATUS (dep) = ds;
1195 res = DEP_CHANGED;
1196 }
1197 }
1198
1199 if (was_spec && !dep_spec_p (dep))
1200 /* The old dep was speculative, but now it isn't. */
1201 change_spec_dep_to_hard (sd_it);
1202
1203 if (true_dependency_cache != NULL
1204 && res == DEP_CHANGED)
1205 update_dependency_caches (dep, old_type);
1206
1207 return res;
1208 }
1209
1210 /* Add or update a dependence described by DEP.
1211 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1212 data speculation.
1213
1214 The function returns a value indicating if an old entry has been changed
1215 or a new entry has been added to insn's backward deps or nothing has
1216 been updated at all. */
1217 static enum DEPS_ADJUST_RESULT
1218 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1219 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1220 {
1221 bool maybe_present_p = true;
1222 bool present_p = false;
1223
1224 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1225 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1226
1227 #ifdef ENABLE_CHECKING
1228 check_dep (new_dep, mem1 != NULL);
1229 #endif
1230
1231 if (true_dependency_cache != NULL)
1232 {
1233 switch (ask_dependency_caches (new_dep))
1234 {
1235 case DEP_PRESENT:
1236 return DEP_PRESENT;
1237
1238 case DEP_CHANGED:
1239 maybe_present_p = true;
1240 present_p = true;
1241 break;
1242
1243 case DEP_CREATED:
1244 maybe_present_p = false;
1245 present_p = false;
1246 break;
1247
1248 default:
1249 gcc_unreachable ();
1250 break;
1251 }
1252 }
1253
1254 /* Check that we don't already have this dependence. */
1255 if (maybe_present_p)
1256 {
1257 dep_t present_dep;
1258 sd_iterator_def sd_it;
1259
1260 gcc_assert (true_dependency_cache == NULL || present_p);
1261
1262 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1263 DEP_CON (new_dep),
1264 resolved_p, &sd_it);
1265
1266 if (present_dep != NULL)
1267 /* We found an existing dependency between ELEM and INSN. */
1268 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1269 else
1270 /* We didn't find a dep, it shouldn't present in the cache. */
1271 gcc_assert (!present_p);
1272 }
1273
1274 /* Might want to check one level of transitivity to save conses.
1275 This check should be done in maybe_add_or_update_dep_1.
1276 Since we made it to add_or_update_dep_1, we must create
1277 (or update) a link. */
1278
1279 if (mem1 != NULL_RTX)
1280 {
1281 gcc_assert (sched_deps_info->generate_spec_deps);
1282 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1283 estimate_dep_weak (mem1, mem2));
1284 }
1285
1286 sd_add_dep (new_dep, resolved_p);
1287
1288 return DEP_CREATED;
1289 }
1290
1291 /* Initialize BACK_LIST_PTR with consumer's backward list and
1292 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1293 initialize with lists that hold resolved deps. */
1294 static void
1295 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1296 deps_list_t *back_list_ptr,
1297 deps_list_t *forw_list_ptr)
1298 {
1299 rtx con = DEP_CON (dep);
1300
1301 if (!resolved_p)
1302 {
1303 if (dep_spec_p (dep))
1304 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1305 else
1306 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1307
1308 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1309 }
1310 else
1311 {
1312 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1313 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1314 }
1315 }
1316
1317 /* Add dependence described by DEP.
1318 If RESOLVED_P is true treat the dependence as a resolved one. */
1319 void
1320 sd_add_dep (dep_t dep, bool resolved_p)
1321 {
1322 dep_node_t n = create_dep_node ();
1323 deps_list_t con_back_deps;
1324 deps_list_t pro_forw_deps;
1325 rtx elem = DEP_PRO (dep);
1326 rtx insn = DEP_CON (dep);
1327
1328 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1329
1330 if ((current_sched_info->flags & DO_SPECULATION) == 0
1331 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1332 DEP_STATUS (dep) &= ~SPECULATIVE;
1333
1334 copy_dep (DEP_NODE_DEP (n), dep);
1335
1336 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1337
1338 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1339
1340 #ifdef ENABLE_CHECKING
1341 check_dep (dep, false);
1342 #endif
1343
1344 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1345
1346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1347 in the bitmap caches of dependency information. */
1348 if (true_dependency_cache != NULL)
1349 set_dependency_caches (dep);
1350 }
1351
1352 /* Add or update backward dependence between INSN and ELEM
1353 with given type DEP_TYPE and dep_status DS.
1354 This function is a convenience wrapper. */
1355 enum DEPS_ADJUST_RESULT
1356 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1357 {
1358 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1359 }
1360
1361 /* Resolved dependence pointed to by SD_IT.
1362 SD_IT will advance to the next element. */
1363 void
1364 sd_resolve_dep (sd_iterator_def sd_it)
1365 {
1366 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1367 dep_t dep = DEP_NODE_DEP (node);
1368 rtx pro = DEP_PRO (dep);
1369 rtx con = DEP_CON (dep);
1370
1371 if (dep_spec_p (dep))
1372 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1373 INSN_RESOLVED_BACK_DEPS (con));
1374 else
1375 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1376 INSN_RESOLVED_BACK_DEPS (con));
1377
1378 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1379 INSN_RESOLVED_FORW_DEPS (pro));
1380 }
1381
1382 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1383 pointed to by SD_IT to unresolved state. */
1384 void
1385 sd_unresolve_dep (sd_iterator_def sd_it)
1386 {
1387 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1388 dep_t dep = DEP_NODE_DEP (node);
1389 rtx pro = DEP_PRO (dep);
1390 rtx con = DEP_CON (dep);
1391
1392 if (dep_spec_p (dep))
1393 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 INSN_SPEC_BACK_DEPS (con));
1395 else
1396 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1397 INSN_HARD_BACK_DEPS (con));
1398
1399 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1400 INSN_FORW_DEPS (pro));
1401 }
1402
1403 /* Make TO depend on all the FROM's producers.
1404 If RESOLVED_P is true add dependencies to the resolved lists. */
1405 void
1406 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1407 {
1408 sd_list_types_def list_type;
1409 sd_iterator_def sd_it;
1410 dep_t dep;
1411
1412 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1413
1414 FOR_EACH_DEP (from, list_type, sd_it, dep)
1415 {
1416 dep_def _new_dep, *new_dep = &_new_dep;
1417
1418 copy_dep (new_dep, dep);
1419 DEP_CON (new_dep) = to;
1420 sd_add_dep (new_dep, resolved_p);
1421 }
1422 }
1423
1424 /* Remove a dependency referred to by SD_IT.
1425 SD_IT will point to the next dependence after removal. */
1426 void
1427 sd_delete_dep (sd_iterator_def sd_it)
1428 {
1429 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1430 dep_t dep = DEP_NODE_DEP (n);
1431 rtx pro = DEP_PRO (dep);
1432 rtx con = DEP_CON (dep);
1433 deps_list_t con_back_deps;
1434 deps_list_t pro_forw_deps;
1435
1436 if (true_dependency_cache != NULL)
1437 {
1438 int elem_luid = INSN_LUID (pro);
1439 int insn_luid = INSN_LUID (con);
1440
1441 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1442 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1443 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1444 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1445
1446 if (current_sched_info->flags & DO_SPECULATION)
1447 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1448 }
1449
1450 get_back_and_forw_lists (dep, sd_it.resolved_p,
1451 &con_back_deps, &pro_forw_deps);
1452
1453 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1454 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1455
1456 delete_dep_node (n);
1457 }
1458
1459 /* Dump size of the lists. */
1460 #define DUMP_LISTS_SIZE (2)
1461
1462 /* Dump dependencies of the lists. */
1463 #define DUMP_LISTS_DEPS (4)
1464
1465 /* Dump all information about the lists. */
1466 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1467
1468 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1469 FLAGS is a bit mask specifying what information about the lists needs
1470 to be printed.
1471 If FLAGS has the very first bit set, then dump all information about
1472 the lists and propagate this bit into the callee dump functions. */
1473 static void
1474 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1475 {
1476 sd_iterator_def sd_it;
1477 dep_t dep;
1478 int all;
1479
1480 all = (flags & 1);
1481
1482 if (all)
1483 flags |= DUMP_LISTS_ALL;
1484
1485 fprintf (dump, "[");
1486
1487 if (flags & DUMP_LISTS_SIZE)
1488 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1489
1490 if (flags & DUMP_LISTS_DEPS)
1491 {
1492 FOR_EACH_DEP (insn, types, sd_it, dep)
1493 {
1494 dump_dep (dump, dep, dump_dep_flags | all);
1495 fprintf (dump, " ");
1496 }
1497 }
1498 }
1499
1500 /* Dump all information about deps_lists of INSN specified by TYPES
1501 to STDERR. */
1502 void
1503 sd_debug_lists (rtx insn, sd_list_types_def types)
1504 {
1505 dump_lists (stderr, insn, types, 1);
1506 fprintf (stderr, "\n");
1507 }
1508
1509 /* A wrapper around add_dependence_1, to add a dependence of CON on
1510 PRO, with type DEP_TYPE. This function implements special handling
1511 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1512 the type to REG_DEP_ANTI if we can determine that predication is
1513 impossible; otherwise we add additional true dependencies on the
1514 INSN_COND_DEPS list of the jump (which PRO must be). */
1515 void
1516 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1517 {
1518 if (dep_type == REG_DEP_CONTROL
1519 && !(current_sched_info->flags & DO_PREDICATION))
1520 dep_type = REG_DEP_ANTI;
1521
1522 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1523 so we must also make the insn dependent on the setter of the
1524 condition. */
1525 if (dep_type == REG_DEP_CONTROL)
1526 {
1527 rtx real_pro = pro;
1528 rtx other = real_insn_for_shadow (real_pro);
1529 rtx cond;
1530
1531 if (other != NULL_RTX)
1532 real_pro = other;
1533 cond = sched_get_reverse_condition_uncached (real_pro);
1534 /* Verify that the insn does not use a different value in
1535 the condition register than the one that was present at
1536 the jump. */
1537 if (cond == NULL_RTX)
1538 dep_type = REG_DEP_ANTI;
1539 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1540 {
1541 HARD_REG_SET uses;
1542 CLEAR_HARD_REG_SET (uses);
1543 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1544 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1545 dep_type = REG_DEP_ANTI;
1546 }
1547 if (dep_type == REG_DEP_CONTROL)
1548 {
1549 if (sched_verbose >= 5)
1550 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1551 INSN_UID (real_pro));
1552 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1553 REG_DEP_TRUE, false);
1554 }
1555 }
1556
1557 add_dependence_1 (con, pro, dep_type);
1558 }
1559
1560 /* A convenience wrapper to operate on an entire list. HARD should be
1561 true if DEP_NONREG should be set on newly created dependencies. */
1562
1563 static void
1564 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
1565 bool hard)
1566 {
1567 mark_as_hard = hard;
1568 for (; list; list = XEXP (list, 1))
1569 {
1570 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1571 add_dependence (insn, XEXP (list, 0), dep_type);
1572 }
1573 mark_as_hard = false;
1574 }
1575
1576 /* Similar, but free *LISTP at the same time, when the context
1577 is not readonly. HARD should be true if DEP_NONREG should be set on
1578 newly created dependencies. */
1579
1580 static void
1581 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1582 int uncond, enum reg_note dep_type, bool hard)
1583 {
1584 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1585
1586 /* We don't want to short-circuit dependencies involving debug
1587 insns, because they may cause actual dependencies to be
1588 disregarded. */
1589 if (deps->readonly || DEBUG_INSN_P (insn))
1590 return;
1591
1592 free_INSN_LIST_list (listp);
1593 }
1594
1595 /* Remove all occurrences of INSN from LIST. Return the number of
1596 occurrences removed. */
1597
1598 static int
1599 remove_from_dependence_list (rtx insn, rtx* listp)
1600 {
1601 int removed = 0;
1602
1603 while (*listp)
1604 {
1605 if (XEXP (*listp, 0) == insn)
1606 {
1607 remove_free_INSN_LIST_node (listp);
1608 removed++;
1609 continue;
1610 }
1611
1612 listp = &XEXP (*listp, 1);
1613 }
1614
1615 return removed;
1616 }
1617
1618 /* Same as above, but process two lists at once. */
1619 static int
1620 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1621 {
1622 int removed = 0;
1623
1624 while (*listp)
1625 {
1626 if (XEXP (*listp, 0) == insn)
1627 {
1628 remove_free_INSN_LIST_node (listp);
1629 remove_free_EXPR_LIST_node (exprp);
1630 removed++;
1631 continue;
1632 }
1633
1634 listp = &XEXP (*listp, 1);
1635 exprp = &XEXP (*exprp, 1);
1636 }
1637
1638 return removed;
1639 }
1640
1641 /* Clear all dependencies for an insn. */
1642 static void
1643 delete_all_dependences (rtx insn)
1644 {
1645 sd_iterator_def sd_it;
1646 dep_t dep;
1647
1648 /* The below cycle can be optimized to clear the caches and back_deps
1649 in one call but that would provoke duplication of code from
1650 delete_dep (). */
1651
1652 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653 sd_iterator_cond (&sd_it, &dep);)
1654 sd_delete_dep (sd_it);
1655 }
1656
1657 /* All insns in a scheduling group except the first should only have
1658 dependencies on the previous insn in the group. So we find the
1659 first instruction in the scheduling group by walking the dependence
1660 chains backwards. Then we add the dependencies for the group to
1661 the previous nonnote insn. */
1662
1663 static void
1664 chain_to_prev_insn (rtx insn)
1665 {
1666 sd_iterator_def sd_it;
1667 dep_t dep;
1668 rtx prev_nonnote;
1669
1670 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671 {
1672 rtx i = insn;
1673 rtx pro = DEP_PRO (dep);
1674
1675 do
1676 {
1677 i = prev_nonnote_insn (i);
1678
1679 if (pro == i)
1680 goto next_link;
1681 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682
1683 if (! sched_insns_conditions_mutex_p (i, pro))
1684 add_dependence (i, pro, DEP_TYPE (dep));
1685 next_link:;
1686 }
1687
1688 delete_all_dependences (insn);
1689
1690 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 }
1695 \f
1696 /* Process an insn's memory dependencies. There are four kinds of
1697 dependencies:
1698
1699 (0) read dependence: read follows read
1700 (1) true dependence: read follows write
1701 (2) output dependence: write follows write
1702 (3) anti dependence: write follows read
1703
1704 We are careful to build only dependencies which actually exist, and
1705 use transitivity to avoid building too many links. */
1706
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 The MEM is a memory reference contained within INSN, which we are saving
1709 so that we can do memory aliasing on it. */
1710
1711 static void
1712 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1713 rtx insn, rtx mem)
1714 {
1715 rtx *insn_list;
1716 rtx *mem_list;
1717 rtx link;
1718
1719 gcc_assert (!deps->readonly);
1720 if (read_p)
1721 {
1722 insn_list = &deps->pending_read_insns;
1723 mem_list = &deps->pending_read_mems;
1724 if (!DEBUG_INSN_P (insn))
1725 deps->pending_read_list_length++;
1726 }
1727 else
1728 {
1729 insn_list = &deps->pending_write_insns;
1730 mem_list = &deps->pending_write_mems;
1731 deps->pending_write_list_length++;
1732 }
1733
1734 link = alloc_INSN_LIST (insn, *insn_list);
1735 *insn_list = link;
1736
1737 if (sched_deps_info->use_cselib)
1738 {
1739 mem = shallow_copy_rtx (mem);
1740 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 GET_MODE (mem), insn);
1742 }
1743 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744 *mem_list = link;
1745 }
1746
1747 /* Make a dependency between every memory reference on the pending lists
1748 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1749 dependencies for a read operation, similarly with FOR_WRITE. */
1750
1751 static void
1752 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1753 int for_write)
1754 {
1755 if (for_write)
1756 {
1757 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758 1, REG_DEP_ANTI, true);
1759 if (!deps->readonly)
1760 {
1761 free_EXPR_LIST_list (&deps->pending_read_mems);
1762 deps->pending_read_list_length = 0;
1763 }
1764 }
1765
1766 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 true);
1769
1770 add_dependence_list_and_free (deps, insn,
1771 &deps->last_pending_memory_flush, 1,
1772 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 true);
1774
1775 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 REG_DEP_ANTI, true);
1777
1778 if (DEBUG_INSN_P (insn))
1779 {
1780 if (for_write)
1781 free_INSN_LIST_list (&deps->pending_read_insns);
1782 free_INSN_LIST_list (&deps->pending_write_insns);
1783 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784 free_INSN_LIST_list (&deps->pending_jump_insns);
1785 }
1786
1787 if (!deps->readonly)
1788 {
1789 free_EXPR_LIST_list (&deps->pending_write_mems);
1790 deps->pending_write_list_length = 0;
1791
1792 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793 deps->pending_flush_length = 1;
1794 }
1795 mark_as_hard = false;
1796 }
1797 \f
1798 /* Instruction which dependencies we are analyzing. */
1799 static rtx cur_insn = NULL_RTX;
1800
1801 /* Implement hooks for haifa scheduler. */
1802
1803 static void
1804 haifa_start_insn (rtx insn)
1805 {
1806 gcc_assert (insn && !cur_insn);
1807
1808 cur_insn = insn;
1809 }
1810
1811 static void
1812 haifa_finish_insn (void)
1813 {
1814 cur_insn = NULL;
1815 }
1816
1817 void
1818 haifa_note_reg_set (int regno)
1819 {
1820 SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822
1823 void
1824 haifa_note_reg_clobber (int regno)
1825 {
1826 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828
1829 void
1830 haifa_note_reg_use (int regno)
1831 {
1832 SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834
1835 static void
1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1837 {
1838 if (!(ds & SPECULATIVE))
1839 {
1840 mem = NULL_RTX;
1841 pending_mem = NULL_RTX;
1842 }
1843 else
1844 gcc_assert (ds & BEGIN_DATA);
1845
1846 {
1847 dep_def _dep, *dep = &_dep;
1848
1849 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851 DEP_NONREG (dep) = 1;
1852 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853 }
1854
1855 }
1856
1857 static void
1858 haifa_note_dep (rtx elem, ds_t ds)
1859 {
1860 dep_def _dep;
1861 dep_t dep = &_dep;
1862
1863 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864 if (mark_as_hard)
1865 DEP_NONREG (dep) = 1;
1866 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868
1869 static void
1870 note_reg_use (int r)
1871 {
1872 if (sched_deps_info->note_reg_use)
1873 sched_deps_info->note_reg_use (r);
1874 }
1875
1876 static void
1877 note_reg_set (int r)
1878 {
1879 if (sched_deps_info->note_reg_set)
1880 sched_deps_info->note_reg_set (r);
1881 }
1882
1883 static void
1884 note_reg_clobber (int r)
1885 {
1886 if (sched_deps_info->note_reg_clobber)
1887 sched_deps_info->note_reg_clobber (r);
1888 }
1889
1890 static void
1891 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1892 {
1893 if (sched_deps_info->note_mem_dep)
1894 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896
1897 static void
1898 note_dep (rtx e, ds_t ds)
1899 {
1900 if (sched_deps_info->note_dep)
1901 sched_deps_info->note_dep (e, ds);
1902 }
1903
1904 /* Return corresponding to DS reg_note. */
1905 enum reg_note
1906 ds_to_dt (ds_t ds)
1907 {
1908 if (ds & DEP_TRUE)
1909 return REG_DEP_TRUE;
1910 else if (ds & DEP_OUTPUT)
1911 return REG_DEP_OUTPUT;
1912 else if (ds & DEP_ANTI)
1913 return REG_DEP_ANTI;
1914 else
1915 {
1916 gcc_assert (ds & DEP_CONTROL);
1917 return REG_DEP_CONTROL;
1918 }
1919 }
1920
1921 \f
1922
1923 /* Functions for computation of info needed for register pressure
1924 sensitive insn scheduling. */
1925
1926
1927 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1928 static struct reg_use_data *
1929 create_insn_reg_use (int regno, rtx insn)
1930 {
1931 struct reg_use_data *use;
1932
1933 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934 use->regno = regno;
1935 use->insn = insn;
1936 use->next_insn_use = INSN_REG_USE_LIST (insn);
1937 INSN_REG_USE_LIST (insn) = use;
1938 return use;
1939 }
1940
1941 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1942 static struct reg_set_data *
1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945 struct reg_set_data *set;
1946
1947 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948 set->regno = regno;
1949 set->insn = insn;
1950 set->next_insn_set = INSN_REG_SET_LIST (insn);
1951 INSN_REG_SET_LIST (insn) = set;
1952 return set;
1953 }
1954
1955 /* Set up insn register uses for INSN and dependency context DEPS. */
1956 static void
1957 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1958 {
1959 unsigned i;
1960 reg_set_iterator rsi;
1961 rtx list;
1962 struct reg_use_data *use, *use2, *next;
1963 struct deps_reg *reg_last;
1964
1965 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1966 {
1967 if (i < FIRST_PSEUDO_REGISTER
1968 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1969 continue;
1970
1971 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1972 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1973 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1974 /* Ignore use which is not dying. */
1975 continue;
1976
1977 use = create_insn_reg_use (i, insn);
1978 use->next_regno_use = use;
1979 reg_last = &deps->reg_last[i];
1980
1981 /* Create the cycle list of uses. */
1982 for (list = reg_last->uses; list; list = XEXP (list, 1))
1983 {
1984 use2 = create_insn_reg_use (i, XEXP (list, 0));
1985 next = use->next_regno_use;
1986 use->next_regno_use = use2;
1987 use2->next_regno_use = next;
1988 }
1989 }
1990 }
1991
1992 /* Register pressure info for the currently processed insn. */
1993 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1994
1995 /* Return TRUE if INSN has the use structure for REGNO. */
1996 static bool
1997 insn_use_p (rtx insn, int regno)
1998 {
1999 struct reg_use_data *use;
2000
2001 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2002 if (use->regno == regno)
2003 return true;
2004 return false;
2005 }
2006
2007 /* Update the register pressure info after birth of pseudo register REGNO
2008 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2009 the register is in clobber or unused after the insn. */
2010 static void
2011 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2012 {
2013 int incr, new_incr;
2014 enum reg_class cl;
2015
2016 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2017 cl = sched_regno_pressure_class[regno];
2018 if (cl != NO_REGS)
2019 {
2020 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2021 if (clobber_p)
2022 {
2023 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2024 reg_pressure_info[cl].clobber_increase = new_incr;
2025 }
2026 else if (unused_p)
2027 {
2028 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2029 reg_pressure_info[cl].unused_set_increase = new_incr;
2030 }
2031 else
2032 {
2033 new_incr = reg_pressure_info[cl].set_increase + incr;
2034 reg_pressure_info[cl].set_increase = new_incr;
2035 if (! insn_use_p (insn, regno))
2036 reg_pressure_info[cl].change += incr;
2037 create_insn_reg_set (regno, insn);
2038 }
2039 gcc_assert (new_incr < (1 << INCREASE_BITS));
2040 }
2041 }
2042
2043 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2044 hard registers involved in the birth. */
2045 static void
2046 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2047 bool clobber_p, bool unused_p)
2048 {
2049 enum reg_class cl;
2050 int new_incr, last = regno + nregs;
2051
2052 while (regno < last)
2053 {
2054 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2055 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2056 {
2057 cl = sched_regno_pressure_class[regno];
2058 if (cl != NO_REGS)
2059 {
2060 if (clobber_p)
2061 {
2062 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2063 reg_pressure_info[cl].clobber_increase = new_incr;
2064 }
2065 else if (unused_p)
2066 {
2067 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2068 reg_pressure_info[cl].unused_set_increase = new_incr;
2069 }
2070 else
2071 {
2072 new_incr = reg_pressure_info[cl].set_increase + 1;
2073 reg_pressure_info[cl].set_increase = new_incr;
2074 if (! insn_use_p (insn, regno))
2075 reg_pressure_info[cl].change += 1;
2076 create_insn_reg_set (regno, insn);
2077 }
2078 gcc_assert (new_incr < (1 << INCREASE_BITS));
2079 }
2080 }
2081 regno++;
2082 }
2083 }
2084
2085 /* Update the register pressure info after birth of pseudo or hard
2086 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2087 correspondingly that the register is in clobber or unused after the
2088 insn. */
2089 static void
2090 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2091 {
2092 int regno;
2093
2094 if (GET_CODE (reg) == SUBREG)
2095 reg = SUBREG_REG (reg);
2096
2097 if (! REG_P (reg))
2098 return;
2099
2100 regno = REGNO (reg);
2101 if (regno < FIRST_PSEUDO_REGISTER)
2102 mark_insn_hard_regno_birth (insn, regno,
2103 hard_regno_nregs[regno][GET_MODE (reg)],
2104 clobber_p, unused_p);
2105 else
2106 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2107 }
2108
2109 /* Update the register pressure info after death of pseudo register
2110 REGNO. */
2111 static void
2112 mark_pseudo_death (int regno)
2113 {
2114 int incr;
2115 enum reg_class cl;
2116
2117 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2118 cl = sched_regno_pressure_class[regno];
2119 if (cl != NO_REGS)
2120 {
2121 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2122 reg_pressure_info[cl].change -= incr;
2123 }
2124 }
2125
2126 /* Like mark_pseudo_death except that NREGS saying how many hard
2127 registers involved in the death. */
2128 static void
2129 mark_hard_regno_death (int regno, int nregs)
2130 {
2131 enum reg_class cl;
2132 int last = regno + nregs;
2133
2134 while (regno < last)
2135 {
2136 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2137 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2138 {
2139 cl = sched_regno_pressure_class[regno];
2140 if (cl != NO_REGS)
2141 reg_pressure_info[cl].change -= 1;
2142 }
2143 regno++;
2144 }
2145 }
2146
2147 /* Update the register pressure info after death of pseudo or hard
2148 register REG. */
2149 static void
2150 mark_reg_death (rtx reg)
2151 {
2152 int regno;
2153
2154 if (GET_CODE (reg) == SUBREG)
2155 reg = SUBREG_REG (reg);
2156
2157 if (! REG_P (reg))
2158 return;
2159
2160 regno = REGNO (reg);
2161 if (regno < FIRST_PSEUDO_REGISTER)
2162 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2163 else
2164 mark_pseudo_death (regno);
2165 }
2166
2167 /* Process SETTER of REG. DATA is an insn containing the setter. */
2168 static void
2169 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2170 {
2171 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2172 return;
2173 mark_insn_reg_birth
2174 ((rtx) data, reg, false,
2175 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2176 }
2177
2178 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2179 static void
2180 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2181 {
2182 if (GET_CODE (setter) == CLOBBER)
2183 mark_insn_reg_birth ((rtx) data, reg, true, false);
2184 }
2185
2186 /* Set up reg pressure info related to INSN. */
2187 void
2188 init_insn_reg_pressure_info (rtx insn)
2189 {
2190 int i, len;
2191 enum reg_class cl;
2192 static struct reg_pressure_data *pressure_info;
2193 rtx link;
2194
2195 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2196
2197 if (! INSN_P (insn))
2198 return;
2199
2200 for (i = 0; i < ira_pressure_classes_num; i++)
2201 {
2202 cl = ira_pressure_classes[i];
2203 reg_pressure_info[cl].clobber_increase = 0;
2204 reg_pressure_info[cl].set_increase = 0;
2205 reg_pressure_info[cl].unused_set_increase = 0;
2206 reg_pressure_info[cl].change = 0;
2207 }
2208
2209 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2210
2211 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2212
2213 #ifdef AUTO_INC_DEC
2214 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215 if (REG_NOTE_KIND (link) == REG_INC)
2216 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2217 #endif
2218
2219 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2220 if (REG_NOTE_KIND (link) == REG_DEAD)
2221 mark_reg_death (XEXP (link, 0));
2222
2223 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2224 pressure_info
2225 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2226 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2227 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2228 * sizeof (int), 1);
2229 for (i = 0; i < ira_pressure_classes_num; i++)
2230 {
2231 cl = ira_pressure_classes[i];
2232 pressure_info[i].clobber_increase
2233 = reg_pressure_info[cl].clobber_increase;
2234 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2235 pressure_info[i].unused_set_increase
2236 = reg_pressure_info[cl].unused_set_increase;
2237 pressure_info[i].change = reg_pressure_info[cl].change;
2238 }
2239 }
2240
2241
2242 \f
2243
2244 /* Internal variable for sched_analyze_[12] () functions.
2245 If it is nonzero, this means that sched_analyze_[12] looks
2246 at the most toplevel SET. */
2247 static bool can_start_lhs_rhs_p;
2248
2249 /* Extend reg info for the deps context DEPS given that
2250 we have just generated a register numbered REGNO. */
2251 static void
2252 extend_deps_reg_info (struct deps_desc *deps, int regno)
2253 {
2254 int max_regno = regno + 1;
2255
2256 gcc_assert (!reload_completed);
2257
2258 /* In a readonly context, it would not hurt to extend info,
2259 but it should not be needed. */
2260 if (reload_completed && deps->readonly)
2261 {
2262 deps->max_reg = max_regno;
2263 return;
2264 }
2265
2266 if (max_regno > deps->max_reg)
2267 {
2268 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2269 max_regno);
2270 memset (&deps->reg_last[deps->max_reg],
2271 0, (max_regno - deps->max_reg)
2272 * sizeof (struct deps_reg));
2273 deps->max_reg = max_regno;
2274 }
2275 }
2276
2277 /* Extends REG_INFO_P if needed. */
2278 void
2279 maybe_extend_reg_info_p (void)
2280 {
2281 /* Extend REG_INFO_P, if needed. */
2282 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2283 {
2284 size_t new_reg_info_p_size = max_regno + 128;
2285
2286 gcc_assert (!reload_completed && sel_sched_p ());
2287
2288 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2289 new_reg_info_p_size,
2290 reg_info_p_size,
2291 sizeof (*reg_info_p));
2292 reg_info_p_size = new_reg_info_p_size;
2293 }
2294 }
2295
2296 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2297 The type of the reference is specified by REF and can be SET,
2298 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2299
2300 static void
2301 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2302 enum rtx_code ref, rtx insn)
2303 {
2304 /* We could emit new pseudos in renaming. Extend the reg structures. */
2305 if (!reload_completed && sel_sched_p ()
2306 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2307 extend_deps_reg_info (deps, regno);
2308
2309 maybe_extend_reg_info_p ();
2310
2311 /* A hard reg in a wide mode may really be multiple registers.
2312 If so, mark all of them just like the first. */
2313 if (regno < FIRST_PSEUDO_REGISTER)
2314 {
2315 int i = hard_regno_nregs[regno][mode];
2316 if (ref == SET)
2317 {
2318 while (--i >= 0)
2319 note_reg_set (regno + i);
2320 }
2321 else if (ref == USE)
2322 {
2323 while (--i >= 0)
2324 note_reg_use (regno + i);
2325 }
2326 else
2327 {
2328 while (--i >= 0)
2329 note_reg_clobber (regno + i);
2330 }
2331 }
2332
2333 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2334 it does not reload. Ignore these as they have served their
2335 purpose already. */
2336 else if (regno >= deps->max_reg)
2337 {
2338 enum rtx_code code = GET_CODE (PATTERN (insn));
2339 gcc_assert (code == USE || code == CLOBBER);
2340 }
2341
2342 else
2343 {
2344 if (ref == SET)
2345 note_reg_set (regno);
2346 else if (ref == USE)
2347 note_reg_use (regno);
2348 else
2349 note_reg_clobber (regno);
2350
2351 /* Pseudos that are REG_EQUIV to something may be replaced
2352 by that during reloading. We need only add dependencies for
2353 the address in the REG_EQUIV note. */
2354 if (!reload_completed && get_reg_known_equiv_p (regno))
2355 {
2356 rtx t = get_reg_known_value (regno);
2357 if (MEM_P (t))
2358 sched_analyze_2 (deps, XEXP (t, 0), insn);
2359 }
2360
2361 /* Don't let it cross a call after scheduling if it doesn't
2362 already cross one. */
2363 if (REG_N_CALLS_CROSSED (regno) == 0)
2364 {
2365 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2366 deps->sched_before_next_call
2367 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2368 else
2369 add_dependence_list (insn, deps->last_function_call, 1,
2370 REG_DEP_ANTI, false);
2371 }
2372 }
2373 }
2374
2375 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2376 rtx, X, creating all dependencies generated by the write to the
2377 destination of X, and reads of everything mentioned. */
2378
2379 static void
2380 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2381 {
2382 rtx dest = XEXP (x, 0);
2383 enum rtx_code code = GET_CODE (x);
2384 bool cslr_p = can_start_lhs_rhs_p;
2385
2386 can_start_lhs_rhs_p = false;
2387
2388 gcc_assert (dest);
2389 if (dest == 0)
2390 return;
2391
2392 if (cslr_p && sched_deps_info->start_lhs)
2393 sched_deps_info->start_lhs (dest);
2394
2395 if (GET_CODE (dest) == PARALLEL)
2396 {
2397 int i;
2398
2399 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2400 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2401 sched_analyze_1 (deps,
2402 gen_rtx_CLOBBER (VOIDmode,
2403 XEXP (XVECEXP (dest, 0, i), 0)),
2404 insn);
2405
2406 if (cslr_p && sched_deps_info->finish_lhs)
2407 sched_deps_info->finish_lhs ();
2408
2409 if (code == SET)
2410 {
2411 can_start_lhs_rhs_p = cslr_p;
2412
2413 sched_analyze_2 (deps, SET_SRC (x), insn);
2414
2415 can_start_lhs_rhs_p = false;
2416 }
2417
2418 return;
2419 }
2420
2421 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2422 || GET_CODE (dest) == ZERO_EXTRACT)
2423 {
2424 if (GET_CODE (dest) == STRICT_LOW_PART
2425 || GET_CODE (dest) == ZERO_EXTRACT
2426 || df_read_modify_subreg_p (dest))
2427 {
2428 /* These both read and modify the result. We must handle
2429 them as writes to get proper dependencies for following
2430 instructions. We must handle them as reads to get proper
2431 dependencies from this to previous instructions.
2432 Thus we need to call sched_analyze_2. */
2433
2434 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2435 }
2436 if (GET_CODE (dest) == ZERO_EXTRACT)
2437 {
2438 /* The second and third arguments are values read by this insn. */
2439 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2440 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2441 }
2442 dest = XEXP (dest, 0);
2443 }
2444
2445 if (REG_P (dest))
2446 {
2447 int regno = REGNO (dest);
2448 enum machine_mode mode = GET_MODE (dest);
2449
2450 sched_analyze_reg (deps, regno, mode, code, insn);
2451
2452 #ifdef STACK_REGS
2453 /* Treat all writes to a stack register as modifying the TOS. */
2454 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2455 {
2456 /* Avoid analyzing the same register twice. */
2457 if (regno != FIRST_STACK_REG)
2458 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2459
2460 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2461 FIRST_STACK_REG);
2462 }
2463 #endif
2464 }
2465 else if (MEM_P (dest))
2466 {
2467 /* Writing memory. */
2468 rtx t = dest;
2469
2470 if (sched_deps_info->use_cselib)
2471 {
2472 enum machine_mode address_mode = get_address_mode (dest);
2473
2474 t = shallow_copy_rtx (dest);
2475 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2476 GET_MODE (t), insn);
2477 XEXP (t, 0)
2478 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2479 insn);
2480 }
2481 t = canon_rtx (t);
2482
2483 /* Pending lists can't get larger with a readonly context. */
2484 if (!deps->readonly
2485 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2486 > MAX_PENDING_LIST_LENGTH))
2487 {
2488 /* Flush all pending reads and writes to prevent the pending lists
2489 from getting any larger. Insn scheduling runs too slowly when
2490 these lists get long. When compiling GCC with itself,
2491 this flush occurs 8 times for sparc, and 10 times for m88k using
2492 the default value of 32. */
2493 flush_pending_lists (deps, insn, false, true);
2494 }
2495 else
2496 {
2497 rtx pending, pending_mem;
2498
2499 pending = deps->pending_read_insns;
2500 pending_mem = deps->pending_read_mems;
2501 while (pending)
2502 {
2503 if (anti_dependence (XEXP (pending_mem, 0), t)
2504 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2505 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2506 DEP_ANTI);
2507
2508 pending = XEXP (pending, 1);
2509 pending_mem = XEXP (pending_mem, 1);
2510 }
2511
2512 pending = deps->pending_write_insns;
2513 pending_mem = deps->pending_write_mems;
2514 while (pending)
2515 {
2516 if (output_dependence (XEXP (pending_mem, 0), t)
2517 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2518 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2519 DEP_OUTPUT);
2520
2521 pending = XEXP (pending, 1);
2522 pending_mem = XEXP (pending_mem, 1);
2523 }
2524
2525 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2526 REG_DEP_ANTI, true);
2527 add_dependence_list (insn, deps->pending_jump_insns, 1,
2528 REG_DEP_CONTROL, true);
2529
2530 if (!deps->readonly)
2531 add_insn_mem_dependence (deps, false, insn, dest);
2532 }
2533 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2534 }
2535
2536 if (cslr_p && sched_deps_info->finish_lhs)
2537 sched_deps_info->finish_lhs ();
2538
2539 /* Analyze reads. */
2540 if (GET_CODE (x) == SET)
2541 {
2542 can_start_lhs_rhs_p = cslr_p;
2543
2544 sched_analyze_2 (deps, SET_SRC (x), insn);
2545
2546 can_start_lhs_rhs_p = false;
2547 }
2548 }
2549
2550 /* Analyze the uses of memory and registers in rtx X in INSN. */
2551 static void
2552 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2553 {
2554 int i;
2555 int j;
2556 enum rtx_code code;
2557 const char *fmt;
2558 bool cslr_p = can_start_lhs_rhs_p;
2559
2560 can_start_lhs_rhs_p = false;
2561
2562 gcc_assert (x);
2563 if (x == 0)
2564 return;
2565
2566 if (cslr_p && sched_deps_info->start_rhs)
2567 sched_deps_info->start_rhs (x);
2568
2569 code = GET_CODE (x);
2570
2571 switch (code)
2572 {
2573 CASE_CONST_ANY:
2574 case SYMBOL_REF:
2575 case CONST:
2576 case LABEL_REF:
2577 /* Ignore constants. */
2578 if (cslr_p && sched_deps_info->finish_rhs)
2579 sched_deps_info->finish_rhs ();
2580
2581 return;
2582
2583 #ifdef HAVE_cc0
2584 case CC0:
2585 /* User of CC0 depends on immediately preceding insn. */
2586 SCHED_GROUP_P (insn) = 1;
2587 /* Don't move CC0 setter to another block (it can set up the
2588 same flag for previous CC0 users which is safe). */
2589 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590
2591 if (cslr_p && sched_deps_info->finish_rhs)
2592 sched_deps_info->finish_rhs ();
2593
2594 return;
2595 #endif
2596
2597 case REG:
2598 {
2599 int regno = REGNO (x);
2600 enum machine_mode mode = GET_MODE (x);
2601
2602 sched_analyze_reg (deps, regno, mode, USE, insn);
2603
2604 #ifdef STACK_REGS
2605 /* Treat all reads of a stack register as modifying the TOS. */
2606 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2607 {
2608 /* Avoid analyzing the same register twice. */
2609 if (regno != FIRST_STACK_REG)
2610 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2611 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2612 }
2613 #endif
2614
2615 if (cslr_p && sched_deps_info->finish_rhs)
2616 sched_deps_info->finish_rhs ();
2617
2618 return;
2619 }
2620
2621 case MEM:
2622 {
2623 /* Reading memory. */
2624 rtx u;
2625 rtx pending, pending_mem;
2626 rtx t = x;
2627
2628 if (sched_deps_info->use_cselib)
2629 {
2630 enum machine_mode address_mode = get_address_mode (t);
2631
2632 t = shallow_copy_rtx (t);
2633 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2634 GET_MODE (t), insn);
2635 XEXP (t, 0)
2636 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2637 insn);
2638 }
2639
2640 if (!DEBUG_INSN_P (insn))
2641 {
2642 t = canon_rtx (t);
2643 pending = deps->pending_read_insns;
2644 pending_mem = deps->pending_read_mems;
2645 while (pending)
2646 {
2647 if (read_dependence (XEXP (pending_mem, 0), t)
2648 && ! sched_insns_conditions_mutex_p (insn,
2649 XEXP (pending, 0)))
2650 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2651 DEP_ANTI);
2652
2653 pending = XEXP (pending, 1);
2654 pending_mem = XEXP (pending_mem, 1);
2655 }
2656
2657 pending = deps->pending_write_insns;
2658 pending_mem = deps->pending_write_mems;
2659 while (pending)
2660 {
2661 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2662 && ! sched_insns_conditions_mutex_p (insn,
2663 XEXP (pending, 0)))
2664 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2665 sched_deps_info->generate_spec_deps
2666 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2667
2668 pending = XEXP (pending, 1);
2669 pending_mem = XEXP (pending_mem, 1);
2670 }
2671
2672 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2673 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2674
2675 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2676 if (deps_may_trap_p (x))
2677 {
2678 if ((sched_deps_info->generate_spec_deps)
2679 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2680 {
2681 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2682 MAX_DEP_WEAK);
2683
2684 note_dep (XEXP (u, 0), ds);
2685 }
2686 else
2687 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2688 }
2689 }
2690
2691 /* Always add these dependencies to pending_reads, since
2692 this insn may be followed by a write. */
2693 if (!deps->readonly)
2694 {
2695 if ((deps->pending_read_list_length
2696 + deps->pending_write_list_length)
2697 > MAX_PENDING_LIST_LENGTH
2698 && !DEBUG_INSN_P (insn))
2699 flush_pending_lists (deps, insn, true, true);
2700 add_insn_mem_dependence (deps, true, insn, x);
2701 }
2702
2703 sched_analyze_2 (deps, XEXP (x, 0), insn);
2704
2705 if (cslr_p && sched_deps_info->finish_rhs)
2706 sched_deps_info->finish_rhs ();
2707
2708 return;
2709 }
2710
2711 /* Force pending stores to memory in case a trap handler needs them. */
2712 case TRAP_IF:
2713 flush_pending_lists (deps, insn, true, false);
2714 break;
2715
2716 case PREFETCH:
2717 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2718 reg_pending_barrier = TRUE_BARRIER;
2719 /* Prefetch insn contains addresses only. So if the prefetch
2720 address has no registers, there will be no dependencies on
2721 the prefetch insn. This is wrong with result code
2722 correctness point of view as such prefetch can be moved below
2723 a jump insn which usually generates MOVE_BARRIER preventing
2724 to move insns containing registers or memories through the
2725 barrier. It is also wrong with generated code performance
2726 point of view as prefetch withouth dependecies will have a
2727 tendency to be issued later instead of earlier. It is hard
2728 to generate accurate dependencies for prefetch insns as
2729 prefetch has only the start address but it is better to have
2730 something than nothing. */
2731 if (!deps->readonly)
2732 {
2733 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2734 if (sched_deps_info->use_cselib)
2735 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2736 add_insn_mem_dependence (deps, true, insn, x);
2737 }
2738 break;
2739
2740 case UNSPEC_VOLATILE:
2741 flush_pending_lists (deps, insn, true, true);
2742 /* FALLTHRU */
2743
2744 case ASM_OPERANDS:
2745 case ASM_INPUT:
2746 {
2747 /* Traditional and volatile asm instructions must be considered to use
2748 and clobber all hard registers, all pseudo-registers and all of
2749 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2750
2751 Consider for instance a volatile asm that changes the fpu rounding
2752 mode. An insn should not be moved across this even if it only uses
2753 pseudo-regs because it might give an incorrectly rounded result. */
2754 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2755 reg_pending_barrier = TRUE_BARRIER;
2756
2757 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2758 We can not just fall through here since then we would be confused
2759 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2760 traditional asms unlike their normal usage. */
2761
2762 if (code == ASM_OPERANDS)
2763 {
2764 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2765 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2766
2767 if (cslr_p && sched_deps_info->finish_rhs)
2768 sched_deps_info->finish_rhs ();
2769
2770 return;
2771 }
2772 break;
2773 }
2774
2775 case PRE_DEC:
2776 case POST_DEC:
2777 case PRE_INC:
2778 case POST_INC:
2779 /* These both read and modify the result. We must handle them as writes
2780 to get proper dependencies for following instructions. We must handle
2781 them as reads to get proper dependencies from this to previous
2782 instructions. Thus we need to pass them to both sched_analyze_1
2783 and sched_analyze_2. We must call sched_analyze_2 first in order
2784 to get the proper antecedent for the read. */
2785 sched_analyze_2 (deps, XEXP (x, 0), insn);
2786 sched_analyze_1 (deps, x, insn);
2787
2788 if (cslr_p && sched_deps_info->finish_rhs)
2789 sched_deps_info->finish_rhs ();
2790
2791 return;
2792
2793 case POST_MODIFY:
2794 case PRE_MODIFY:
2795 /* op0 = op0 + op1 */
2796 sched_analyze_2 (deps, XEXP (x, 0), insn);
2797 sched_analyze_2 (deps, XEXP (x, 1), insn);
2798 sched_analyze_1 (deps, x, insn);
2799
2800 if (cslr_p && sched_deps_info->finish_rhs)
2801 sched_deps_info->finish_rhs ();
2802
2803 return;
2804
2805 default:
2806 break;
2807 }
2808
2809 /* Other cases: walk the insn. */
2810 fmt = GET_RTX_FORMAT (code);
2811 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2812 {
2813 if (fmt[i] == 'e')
2814 sched_analyze_2 (deps, XEXP (x, i), insn);
2815 else if (fmt[i] == 'E')
2816 for (j = 0; j < XVECLEN (x, i); j++)
2817 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2818 }
2819
2820 if (cslr_p && sched_deps_info->finish_rhs)
2821 sched_deps_info->finish_rhs ();
2822 }
2823
2824 /* Analyze an INSN with pattern X to find all dependencies. */
2825 static void
2826 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2827 {
2828 RTX_CODE code = GET_CODE (x);
2829 rtx link;
2830 unsigned i;
2831 reg_set_iterator rsi;
2832
2833 if (! reload_completed)
2834 {
2835 HARD_REG_SET temp;
2836
2837 extract_insn (insn);
2838 preprocess_constraints ();
2839 ira_implicitly_set_insn_hard_regs (&temp);
2840 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2841 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2842 }
2843
2844 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2845 && code == SET);
2846
2847 if (may_trap_p (x))
2848 /* Avoid moving trapping instructions across function calls that might
2849 not always return. */
2850 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2851 1, REG_DEP_ANTI, true);
2852
2853 /* We must avoid creating a situation in which two successors of the
2854 current block have different unwind info after scheduling. If at any
2855 point the two paths re-join this leads to incorrect unwind info. */
2856 /* ??? There are certain situations involving a forced frame pointer in
2857 which, with extra effort, we could fix up the unwind info at a later
2858 CFG join. However, it seems better to notice these cases earlier
2859 during prologue generation and avoid marking the frame pointer setup
2860 as frame-related at all. */
2861 if (RTX_FRAME_RELATED_P (insn))
2862 {
2863 /* Make sure prologue insn is scheduled before next jump. */
2864 deps->sched_before_next_jump
2865 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2866
2867 /* Make sure epilogue insn is scheduled after preceding jumps. */
2868 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2869 true);
2870 }
2871
2872 if (code == COND_EXEC)
2873 {
2874 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2875
2876 /* ??? Should be recording conditions so we reduce the number of
2877 false dependencies. */
2878 x = COND_EXEC_CODE (x);
2879 code = GET_CODE (x);
2880 }
2881 if (code == SET || code == CLOBBER)
2882 {
2883 sched_analyze_1 (deps, x, insn);
2884
2885 /* Bare clobber insns are used for letting life analysis, reg-stack
2886 and others know that a value is dead. Depend on the last call
2887 instruction so that reg-stack won't get confused. */
2888 if (code == CLOBBER)
2889 add_dependence_list (insn, deps->last_function_call, 1,
2890 REG_DEP_OUTPUT, true);
2891 }
2892 else if (code == PARALLEL)
2893 {
2894 for (i = XVECLEN (x, 0); i--;)
2895 {
2896 rtx sub = XVECEXP (x, 0, i);
2897 code = GET_CODE (sub);
2898
2899 if (code == COND_EXEC)
2900 {
2901 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2902 sub = COND_EXEC_CODE (sub);
2903 code = GET_CODE (sub);
2904 }
2905 if (code == SET || code == CLOBBER)
2906 sched_analyze_1 (deps, sub, insn);
2907 else
2908 sched_analyze_2 (deps, sub, insn);
2909 }
2910 }
2911 else
2912 sched_analyze_2 (deps, x, insn);
2913
2914 /* Mark registers CLOBBERED or used by called function. */
2915 if (CALL_P (insn))
2916 {
2917 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2918 {
2919 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2920 sched_analyze_1 (deps, XEXP (link, 0), insn);
2921 else if (GET_CODE (XEXP (link, 0)) != SET)
2922 sched_analyze_2 (deps, XEXP (link, 0), insn);
2923 }
2924 /* Don't schedule anything after a tail call, tail call needs
2925 to use at least all call-saved registers. */
2926 if (SIBLING_CALL_P (insn))
2927 reg_pending_barrier = TRUE_BARRIER;
2928 else if (find_reg_note (insn, REG_SETJMP, NULL))
2929 reg_pending_barrier = MOVE_BARRIER;
2930 }
2931
2932 if (JUMP_P (insn))
2933 {
2934 rtx next;
2935 next = next_nonnote_nondebug_insn (insn);
2936 if (next && BARRIER_P (next))
2937 reg_pending_barrier = MOVE_BARRIER;
2938 else
2939 {
2940 rtx pending, pending_mem;
2941
2942 if (sched_deps_info->compute_jump_reg_dependencies)
2943 {
2944 (*sched_deps_info->compute_jump_reg_dependencies)
2945 (insn, reg_pending_control_uses);
2946
2947 /* Make latency of jump equal to 0 by using anti-dependence. */
2948 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2949 {
2950 struct deps_reg *reg_last = &deps->reg_last[i];
2951 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2952 false);
2953 add_dependence_list (insn, reg_last->implicit_sets,
2954 0, REG_DEP_ANTI, false);
2955 add_dependence_list (insn, reg_last->clobbers, 0,
2956 REG_DEP_ANTI, false);
2957 }
2958 }
2959
2960 /* All memory writes and volatile reads must happen before the
2961 jump. Non-volatile reads must happen before the jump iff
2962 the result is needed by the above register used mask. */
2963
2964 pending = deps->pending_write_insns;
2965 pending_mem = deps->pending_write_mems;
2966 while (pending)
2967 {
2968 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2969 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2970 pending = XEXP (pending, 1);
2971 pending_mem = XEXP (pending_mem, 1);
2972 }
2973
2974 pending = deps->pending_read_insns;
2975 pending_mem = deps->pending_read_mems;
2976 while (pending)
2977 {
2978 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2979 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2980 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2981 pending = XEXP (pending, 1);
2982 pending_mem = XEXP (pending_mem, 1);
2983 }
2984
2985 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2986 REG_DEP_ANTI, true);
2987 add_dependence_list (insn, deps->pending_jump_insns, 1,
2988 REG_DEP_ANTI, true);
2989 }
2990 }
2991
2992 /* If this instruction can throw an exception, then moving it changes
2993 where block boundaries fall. This is mighty confusing elsewhere.
2994 Therefore, prevent such an instruction from being moved. Same for
2995 non-jump instructions that define block boundaries.
2996 ??? Unclear whether this is still necessary in EBB mode. If not,
2997 add_branch_dependences should be adjusted for RGN mode instead. */
2998 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2999 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3000 reg_pending_barrier = MOVE_BARRIER;
3001
3002 if (sched_pressure != SCHED_PRESSURE_NONE)
3003 {
3004 setup_insn_reg_uses (deps, insn);
3005 init_insn_reg_pressure_info (insn);
3006 }
3007
3008 /* Add register dependencies for insn. */
3009 if (DEBUG_INSN_P (insn))
3010 {
3011 rtx prev = deps->last_debug_insn;
3012 rtx u;
3013
3014 if (!deps->readonly)
3015 deps->last_debug_insn = insn;
3016
3017 if (prev)
3018 add_dependence (insn, prev, REG_DEP_ANTI);
3019
3020 add_dependence_list (insn, deps->last_function_call, 1,
3021 REG_DEP_ANTI, false);
3022
3023 if (!sel_sched_p ())
3024 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3025 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3026
3027 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3028 {
3029 struct deps_reg *reg_last = &deps->reg_last[i];
3030 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3031 /* There's no point in making REG_DEP_CONTROL dependencies for
3032 debug insns. */
3033 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3034 false);
3035
3036 if (!deps->readonly)
3037 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3038 }
3039 CLEAR_REG_SET (reg_pending_uses);
3040
3041 /* Quite often, a debug insn will refer to stuff in the
3042 previous instruction, but the reason we want this
3043 dependency here is to make sure the scheduler doesn't
3044 gratuitously move a debug insn ahead. This could dirty
3045 DF flags and cause additional analysis that wouldn't have
3046 occurred in compilation without debug insns, and such
3047 additional analysis can modify the generated code. */
3048 prev = PREV_INSN (insn);
3049
3050 if (prev && NONDEBUG_INSN_P (prev))
3051 add_dependence (insn, prev, REG_DEP_ANTI);
3052 }
3053 else
3054 {
3055 regset_head set_or_clobbered;
3056
3057 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3058 {
3059 struct deps_reg *reg_last = &deps->reg_last[i];
3060 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3061 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3062 false);
3063 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3064 false);
3065
3066 if (!deps->readonly)
3067 {
3068 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3069 reg_last->uses_length++;
3070 }
3071 }
3072
3073 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3074 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3075 {
3076 struct deps_reg *reg_last = &deps->reg_last[i];
3077 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3078 add_dependence_list (insn, reg_last->implicit_sets, 0,
3079 REG_DEP_ANTI, false);
3080 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3081 false);
3082
3083 if (!deps->readonly)
3084 {
3085 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3086 reg_last->uses_length++;
3087 }
3088 }
3089
3090 if (targetm.sched.exposed_pipeline)
3091 {
3092 INIT_REG_SET (&set_or_clobbered);
3093 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3094 reg_pending_sets);
3095 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3096 {
3097 struct deps_reg *reg_last = &deps->reg_last[i];
3098 rtx list;
3099 for (list = reg_last->uses; list; list = XEXP (list, 1))
3100 {
3101 rtx other = XEXP (list, 0);
3102 if (INSN_CACHED_COND (other) != const_true_rtx
3103 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3104 INSN_CACHED_COND (other) = const_true_rtx;
3105 }
3106 }
3107 }
3108
3109 /* If the current insn is conditional, we can't free any
3110 of the lists. */
3111 if (sched_has_condition_p (insn))
3112 {
3113 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3114 {
3115 struct deps_reg *reg_last = &deps->reg_last[i];
3116 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3117 false);
3118 add_dependence_list (insn, reg_last->implicit_sets, 0,
3119 REG_DEP_ANTI, false);
3120 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3121 false);
3122 add_dependence_list (insn, reg_last->control_uses, 0,
3123 REG_DEP_CONTROL, false);
3124
3125 if (!deps->readonly)
3126 {
3127 reg_last->clobbers
3128 = alloc_INSN_LIST (insn, reg_last->clobbers);
3129 reg_last->clobbers_length++;
3130 }
3131 }
3132 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3133 {
3134 struct deps_reg *reg_last = &deps->reg_last[i];
3135 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3136 false);
3137 add_dependence_list (insn, reg_last->implicit_sets, 0,
3138 REG_DEP_ANTI, false);
3139 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3140 false);
3141 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3142 false);
3143 add_dependence_list (insn, reg_last->control_uses, 0,
3144 REG_DEP_CONTROL, false);
3145
3146 if (!deps->readonly)
3147 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3148 }
3149 }
3150 else
3151 {
3152 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3153 {
3154 struct deps_reg *reg_last = &deps->reg_last[i];
3155 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3156 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3157 {
3158 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3159 REG_DEP_OUTPUT, false);
3160 add_dependence_list_and_free (deps, insn,
3161 &reg_last->implicit_sets, 0,
3162 REG_DEP_ANTI, false);
3163 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3164 REG_DEP_ANTI, false);
3165 add_dependence_list_and_free (deps, insn,
3166 &reg_last->control_uses, 0,
3167 REG_DEP_ANTI, false);
3168 add_dependence_list_and_free (deps, insn,
3169 &reg_last->clobbers, 0,
3170 REG_DEP_OUTPUT, false);
3171
3172 if (!deps->readonly)
3173 {
3174 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3175 reg_last->clobbers_length = 0;
3176 reg_last->uses_length = 0;
3177 }
3178 }
3179 else
3180 {
3181 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3182 false);
3183 add_dependence_list (insn, reg_last->implicit_sets, 0,
3184 REG_DEP_ANTI, false);
3185 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3186 false);
3187 add_dependence_list (insn, reg_last->control_uses, 0,
3188 REG_DEP_CONTROL, false);
3189 }
3190
3191 if (!deps->readonly)
3192 {
3193 reg_last->clobbers_length++;
3194 reg_last->clobbers
3195 = alloc_INSN_LIST (insn, reg_last->clobbers);
3196 }
3197 }
3198 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3199 {
3200 struct deps_reg *reg_last = &deps->reg_last[i];
3201
3202 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3203 REG_DEP_OUTPUT, false);
3204 add_dependence_list_and_free (deps, insn,
3205 &reg_last->implicit_sets,
3206 0, REG_DEP_ANTI, false);
3207 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3208 REG_DEP_OUTPUT, false);
3209 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3210 REG_DEP_ANTI, false);
3211 add_dependence_list (insn, reg_last->control_uses, 0,
3212 REG_DEP_CONTROL, false);
3213
3214 if (!deps->readonly)
3215 {
3216 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3217 reg_last->uses_length = 0;
3218 reg_last->clobbers_length = 0;
3219 }
3220 }
3221 }
3222 if (!deps->readonly)
3223 {
3224 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3225 {
3226 struct deps_reg *reg_last = &deps->reg_last[i];
3227 reg_last->control_uses
3228 = alloc_INSN_LIST (insn, reg_last->control_uses);
3229 }
3230 }
3231 }
3232
3233 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3234 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3235 {
3236 struct deps_reg *reg_last = &deps->reg_last[i];
3237 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3238 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3239 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3240 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3241 false);
3242
3243 if (!deps->readonly)
3244 reg_last->implicit_sets
3245 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3246 }
3247
3248 if (!deps->readonly)
3249 {
3250 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3251 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3252 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3253 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3254 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3255 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3256 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3257
3258 /* Set up the pending barrier found. */
3259 deps->last_reg_pending_barrier = reg_pending_barrier;
3260 }
3261
3262 CLEAR_REG_SET (reg_pending_uses);
3263 CLEAR_REG_SET (reg_pending_clobbers);
3264 CLEAR_REG_SET (reg_pending_sets);
3265 CLEAR_REG_SET (reg_pending_control_uses);
3266 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3267 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3268
3269 /* Add dependencies if a scheduling barrier was found. */
3270 if (reg_pending_barrier)
3271 {
3272 /* In the case of barrier the most added dependencies are not
3273 real, so we use anti-dependence here. */
3274 if (sched_has_condition_p (insn))
3275 {
3276 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3277 {
3278 struct deps_reg *reg_last = &deps->reg_last[i];
3279 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3280 true);
3281 add_dependence_list (insn, reg_last->sets, 0,
3282 reg_pending_barrier == TRUE_BARRIER
3283 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3284 add_dependence_list (insn, reg_last->implicit_sets, 0,
3285 REG_DEP_ANTI, true);
3286 add_dependence_list (insn, reg_last->clobbers, 0,
3287 reg_pending_barrier == TRUE_BARRIER
3288 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3289 }
3290 }
3291 else
3292 {
3293 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3294 {
3295 struct deps_reg *reg_last = &deps->reg_last[i];
3296 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3297 REG_DEP_ANTI, true);
3298 add_dependence_list_and_free (deps, insn,
3299 &reg_last->control_uses, 0,
3300 REG_DEP_CONTROL, true);
3301 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3302 reg_pending_barrier == TRUE_BARRIER
3303 ? REG_DEP_TRUE : REG_DEP_ANTI,
3304 true);
3305 add_dependence_list_and_free (deps, insn,
3306 &reg_last->implicit_sets, 0,
3307 REG_DEP_ANTI, true);
3308 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3309 reg_pending_barrier == TRUE_BARRIER
3310 ? REG_DEP_TRUE : REG_DEP_ANTI,
3311 true);
3312
3313 if (!deps->readonly)
3314 {
3315 reg_last->uses_length = 0;
3316 reg_last->clobbers_length = 0;
3317 }
3318 }
3319 }
3320
3321 if (!deps->readonly)
3322 for (i = 0; i < (unsigned)deps->max_reg; i++)
3323 {
3324 struct deps_reg *reg_last = &deps->reg_last[i];
3325 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3326 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3327 }
3328
3329 /* Don't flush pending lists on speculative checks for
3330 selective scheduling. */
3331 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3332 flush_pending_lists (deps, insn, true, true);
3333
3334 reg_pending_barrier = NOT_A_BARRIER;
3335 }
3336
3337 /* If a post-call group is still open, see if it should remain so.
3338 This insn must be a simple move of a hard reg to a pseudo or
3339 vice-versa.
3340
3341 We must avoid moving these insns for correctness on targets
3342 with small register classes, and for special registers like
3343 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3344 hard regs for all targets. */
3345
3346 if (deps->in_post_call_group_p)
3347 {
3348 rtx tmp, set = single_set (insn);
3349 int src_regno, dest_regno;
3350
3351 if (set == NULL)
3352 {
3353 if (DEBUG_INSN_P (insn))
3354 /* We don't want to mark debug insns as part of the same
3355 sched group. We know they really aren't, but if we use
3356 debug insns to tell that a call group is over, we'll
3357 get different code if debug insns are not there and
3358 instructions that follow seem like they should be part
3359 of the call group.
3360
3361 Also, if we did, chain_to_prev_insn would move the
3362 deps of the debug insn to the call insn, modifying
3363 non-debug post-dependency counts of the debug insn
3364 dependencies and otherwise messing with the scheduling
3365 order.
3366
3367 Instead, let such debug insns be scheduled freely, but
3368 keep the call group open in case there are insns that
3369 should be part of it afterwards. Since we grant debug
3370 insns higher priority than even sched group insns, it
3371 will all turn out all right. */
3372 goto debug_dont_end_call_group;
3373 else
3374 goto end_call_group;
3375 }
3376
3377 tmp = SET_DEST (set);
3378 if (GET_CODE (tmp) == SUBREG)
3379 tmp = SUBREG_REG (tmp);
3380 if (REG_P (tmp))
3381 dest_regno = REGNO (tmp);
3382 else
3383 goto end_call_group;
3384
3385 tmp = SET_SRC (set);
3386 if (GET_CODE (tmp) == SUBREG)
3387 tmp = SUBREG_REG (tmp);
3388 if ((GET_CODE (tmp) == PLUS
3389 || GET_CODE (tmp) == MINUS)
3390 && REG_P (XEXP (tmp, 0))
3391 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3392 && dest_regno == STACK_POINTER_REGNUM)
3393 src_regno = STACK_POINTER_REGNUM;
3394 else if (REG_P (tmp))
3395 src_regno = REGNO (tmp);
3396 else
3397 goto end_call_group;
3398
3399 if (src_regno < FIRST_PSEUDO_REGISTER
3400 || dest_regno < FIRST_PSEUDO_REGISTER)
3401 {
3402 if (!deps->readonly
3403 && deps->in_post_call_group_p == post_call_initial)
3404 deps->in_post_call_group_p = post_call;
3405
3406 if (!sel_sched_p () || sched_emulate_haifa_p)
3407 {
3408 SCHED_GROUP_P (insn) = 1;
3409 CANT_MOVE (insn) = 1;
3410 }
3411 }
3412 else
3413 {
3414 end_call_group:
3415 if (!deps->readonly)
3416 deps->in_post_call_group_p = not_post_call;
3417 }
3418 }
3419
3420 debug_dont_end_call_group:
3421 if ((current_sched_info->flags & DO_SPECULATION)
3422 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3423 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3424 be speculated. */
3425 {
3426 if (sel_sched_p ())
3427 sel_mark_hard_insn (insn);
3428 else
3429 {
3430 sd_iterator_def sd_it;
3431 dep_t dep;
3432
3433 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3434 sd_iterator_cond (&sd_it, &dep);)
3435 change_spec_dep_to_hard (sd_it);
3436 }
3437 }
3438 }
3439
3440 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3441 longjmp, loop forever, ...). */
3442 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3443 test for ECF_NORETURN? */
3444 static bool
3445 call_may_noreturn_p (rtx insn)
3446 {
3447 rtx call;
3448
3449 /* const or pure calls that aren't looping will always return. */
3450 if (RTL_CONST_OR_PURE_CALL_P (insn)
3451 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3452 return false;
3453
3454 call = get_call_rtx_from (insn);
3455 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3456 {
3457 rtx symbol = XEXP (XEXP (call, 0), 0);
3458 if (SYMBOL_REF_DECL (symbol)
3459 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3460 {
3461 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3462 == BUILT_IN_NORMAL)
3463 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3464 {
3465 case BUILT_IN_BCMP:
3466 case BUILT_IN_BCOPY:
3467 case BUILT_IN_BZERO:
3468 case BUILT_IN_INDEX:
3469 case BUILT_IN_MEMCHR:
3470 case BUILT_IN_MEMCMP:
3471 case BUILT_IN_MEMCPY:
3472 case BUILT_IN_MEMMOVE:
3473 case BUILT_IN_MEMPCPY:
3474 case BUILT_IN_MEMSET:
3475 case BUILT_IN_RINDEX:
3476 case BUILT_IN_STPCPY:
3477 case BUILT_IN_STPNCPY:
3478 case BUILT_IN_STRCAT:
3479 case BUILT_IN_STRCHR:
3480 case BUILT_IN_STRCMP:
3481 case BUILT_IN_STRCPY:
3482 case BUILT_IN_STRCSPN:
3483 case BUILT_IN_STRLEN:
3484 case BUILT_IN_STRNCAT:
3485 case BUILT_IN_STRNCMP:
3486 case BUILT_IN_STRNCPY:
3487 case BUILT_IN_STRPBRK:
3488 case BUILT_IN_STRRCHR:
3489 case BUILT_IN_STRSPN:
3490 case BUILT_IN_STRSTR:
3491 /* Assume certain string/memory builtins always return. */
3492 return false;
3493 default:
3494 break;
3495 }
3496 }
3497 }
3498
3499 /* For all other calls assume that they might not always return. */
3500 return true;
3501 }
3502
3503 /* Return true if INSN should be made dependent on the previous instruction
3504 group, and if all INSN's dependencies should be moved to the first
3505 instruction of that group. */
3506
3507 static bool
3508 chain_to_prev_insn_p (rtx insn)
3509 {
3510 rtx prev, x;
3511
3512 /* INSN forms a group with the previous instruction. */
3513 if (SCHED_GROUP_P (insn))
3514 return true;
3515
3516 /* If the previous instruction clobbers a register R and this one sets
3517 part of R, the clobber was added specifically to help us track the
3518 liveness of R. There's no point scheduling the clobber and leaving
3519 INSN behind, especially if we move the clobber to another block. */
3520 prev = prev_nonnote_nondebug_insn (insn);
3521 if (prev
3522 && INSN_P (prev)
3523 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3524 && GET_CODE (PATTERN (prev)) == CLOBBER)
3525 {
3526 x = XEXP (PATTERN (prev), 0);
3527 if (set_of (x, insn))
3528 return true;
3529 }
3530
3531 return false;
3532 }
3533
3534 /* Analyze INSN with DEPS as a context. */
3535 void
3536 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3537 {
3538 if (sched_deps_info->start_insn)
3539 sched_deps_info->start_insn (insn);
3540
3541 /* Record the condition for this insn. */
3542 if (NONDEBUG_INSN_P (insn))
3543 {
3544 rtx t;
3545 sched_get_condition_with_rev (insn, NULL);
3546 t = INSN_CACHED_COND (insn);
3547 INSN_COND_DEPS (insn) = NULL_RTX;
3548 if (reload_completed
3549 && (current_sched_info->flags & DO_PREDICATION)
3550 && COMPARISON_P (t)
3551 && REG_P (XEXP (t, 0))
3552 && CONSTANT_P (XEXP (t, 1)))
3553 {
3554 unsigned int regno;
3555 int nregs;
3556 t = XEXP (t, 0);
3557 regno = REGNO (t);
3558 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3559 t = NULL_RTX;
3560 while (nregs-- > 0)
3561 {
3562 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3563 t = concat_INSN_LIST (reg_last->sets, t);
3564 t = concat_INSN_LIST (reg_last->clobbers, t);
3565 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3566 }
3567 INSN_COND_DEPS (insn) = t;
3568 }
3569 }
3570
3571 if (JUMP_P (insn))
3572 {
3573 /* Make each JUMP_INSN (but not a speculative check)
3574 a scheduling barrier for memory references. */
3575 if (!deps->readonly
3576 && !(sel_sched_p ()
3577 && sel_insn_is_speculation_check (insn)))
3578 {
3579 /* Keep the list a reasonable size. */
3580 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3581 flush_pending_lists (deps, insn, true, true);
3582 else
3583 deps->pending_jump_insns
3584 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3585 }
3586
3587 /* For each insn which shouldn't cross a jump, add a dependence. */
3588 add_dependence_list_and_free (deps, insn,
3589 &deps->sched_before_next_jump, 1,
3590 REG_DEP_ANTI, true);
3591
3592 sched_analyze_insn (deps, PATTERN (insn), insn);
3593 }
3594 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3595 {
3596 sched_analyze_insn (deps, PATTERN (insn), insn);
3597 }
3598 else if (CALL_P (insn))
3599 {
3600 int i;
3601
3602 CANT_MOVE (insn) = 1;
3603
3604 if (find_reg_note (insn, REG_SETJMP, NULL))
3605 {
3606 /* This is setjmp. Assume that all registers, not just
3607 hard registers, may be clobbered by this call. */
3608 reg_pending_barrier = MOVE_BARRIER;
3609 }
3610 else
3611 {
3612 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3613 /* A call may read and modify global register variables. */
3614 if (global_regs[i])
3615 {
3616 SET_REGNO_REG_SET (reg_pending_sets, i);
3617 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3618 }
3619 /* Other call-clobbered hard regs may be clobbered.
3620 Since we only have a choice between 'might be clobbered'
3621 and 'definitely not clobbered', we must include all
3622 partly call-clobbered registers here. */
3623 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3624 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3625 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3626 /* We don't know what set of fixed registers might be used
3627 by the function, but it is certain that the stack pointer
3628 is among them, but be conservative. */
3629 else if (fixed_regs[i])
3630 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3631 /* The frame pointer is normally not used by the function
3632 itself, but by the debugger. */
3633 /* ??? MIPS o32 is an exception. It uses the frame pointer
3634 in the macro expansion of jal but does not represent this
3635 fact in the call_insn rtl. */
3636 else if (i == FRAME_POINTER_REGNUM
3637 || (i == HARD_FRAME_POINTER_REGNUM
3638 && (! reload_completed || frame_pointer_needed)))
3639 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3640 }
3641
3642 /* For each insn which shouldn't cross a call, add a dependence
3643 between that insn and this call insn. */
3644 add_dependence_list_and_free (deps, insn,
3645 &deps->sched_before_next_call, 1,
3646 REG_DEP_ANTI, true);
3647
3648 sched_analyze_insn (deps, PATTERN (insn), insn);
3649
3650 /* If CALL would be in a sched group, then this will violate
3651 convention that sched group insns have dependencies only on the
3652 previous instruction.
3653
3654 Of course one can say: "Hey! What about head of the sched group?"
3655 And I will answer: "Basic principles (one dep per insn) are always
3656 the same." */
3657 gcc_assert (!SCHED_GROUP_P (insn));
3658
3659 /* In the absence of interprocedural alias analysis, we must flush
3660 all pending reads and writes, and start new dependencies starting
3661 from here. But only flush writes for constant calls (which may
3662 be passed a pointer to something we haven't written yet). */
3663 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3664
3665 if (!deps->readonly)
3666 {
3667 /* Remember the last function call for limiting lifetimes. */
3668 free_INSN_LIST_list (&deps->last_function_call);
3669 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3670
3671 if (call_may_noreturn_p (insn))
3672 {
3673 /* Remember the last function call that might not always return
3674 normally for limiting moves of trapping insns. */
3675 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3676 deps->last_function_call_may_noreturn
3677 = alloc_INSN_LIST (insn, NULL_RTX);
3678 }
3679
3680 /* Before reload, begin a post-call group, so as to keep the
3681 lifetimes of hard registers correct. */
3682 if (! reload_completed)
3683 deps->in_post_call_group_p = post_call;
3684 }
3685 }
3686
3687 if (sched_deps_info->use_cselib)
3688 cselib_process_insn (insn);
3689
3690 if (sched_deps_info->finish_insn)
3691 sched_deps_info->finish_insn ();
3692
3693 /* Fixup the dependencies in the sched group. */
3694 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3695 && chain_to_prev_insn_p (insn)
3696 && !sel_sched_p ())
3697 chain_to_prev_insn (insn);
3698 }
3699
3700 /* Initialize DEPS for the new block beginning with HEAD. */
3701 void
3702 deps_start_bb (struct deps_desc *deps, rtx head)
3703 {
3704 gcc_assert (!deps->readonly);
3705
3706 /* Before reload, if the previous block ended in a call, show that
3707 we are inside a post-call group, so as to keep the lifetimes of
3708 hard registers correct. */
3709 if (! reload_completed && !LABEL_P (head))
3710 {
3711 rtx insn = prev_nonnote_nondebug_insn (head);
3712
3713 if (insn && CALL_P (insn))
3714 deps->in_post_call_group_p = post_call_initial;
3715 }
3716 }
3717
3718 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3719 dependencies for each insn. */
3720 void
3721 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3722 {
3723 rtx insn;
3724
3725 if (sched_deps_info->use_cselib)
3726 cselib_init (CSELIB_RECORD_MEMORY);
3727
3728 deps_start_bb (deps, head);
3729
3730 for (insn = head;; insn = NEXT_INSN (insn))
3731 {
3732
3733 if (INSN_P (insn))
3734 {
3735 /* And initialize deps_lists. */
3736 sd_init_insn (insn);
3737 }
3738
3739 deps_analyze_insn (deps, insn);
3740
3741 if (insn == tail)
3742 {
3743 if (sched_deps_info->use_cselib)
3744 cselib_finish ();
3745 return;
3746 }
3747 }
3748 gcc_unreachable ();
3749 }
3750
3751 /* Helper for sched_free_deps ().
3752 Delete INSN's (RESOLVED_P) backward dependencies. */
3753 static void
3754 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3755 {
3756 sd_iterator_def sd_it;
3757 dep_t dep;
3758 sd_list_types_def types;
3759
3760 if (resolved_p)
3761 types = SD_LIST_RES_BACK;
3762 else
3763 types = SD_LIST_BACK;
3764
3765 for (sd_it = sd_iterator_start (insn, types);
3766 sd_iterator_cond (&sd_it, &dep);)
3767 {
3768 dep_link_t link = *sd_it.linkp;
3769 dep_node_t node = DEP_LINK_NODE (link);
3770 deps_list_t back_list;
3771 deps_list_t forw_list;
3772
3773 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3774 remove_from_deps_list (link, back_list);
3775 delete_dep_node (node);
3776 }
3777 }
3778
3779 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3780 deps_lists. */
3781 void
3782 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3783 {
3784 rtx insn;
3785 rtx next_tail = NEXT_INSN (tail);
3786
3787 /* We make two passes since some insns may be scheduled before their
3788 dependencies are resolved. */
3789 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3790 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3791 {
3792 /* Clear forward deps and leave the dep_nodes to the
3793 corresponding back_deps list. */
3794 if (resolved_p)
3795 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3796 else
3797 clear_deps_list (INSN_FORW_DEPS (insn));
3798 }
3799 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3800 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3801 {
3802 /* Clear resolved back deps together with its dep_nodes. */
3803 delete_dep_nodes_in_back_deps (insn, resolved_p);
3804
3805 sd_finish_insn (insn);
3806 }
3807 }
3808 \f
3809 /* Initialize variables for region data dependence analysis.
3810 When LAZY_REG_LAST is true, do not allocate reg_last array
3811 of struct deps_desc immediately. */
3812
3813 void
3814 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3815 {
3816 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3817
3818 deps->max_reg = max_reg;
3819 if (lazy_reg_last)
3820 deps->reg_last = NULL;
3821 else
3822 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3823 INIT_REG_SET (&deps->reg_last_in_use);
3824
3825 deps->pending_read_insns = 0;
3826 deps->pending_read_mems = 0;
3827 deps->pending_write_insns = 0;
3828 deps->pending_write_mems = 0;
3829 deps->pending_jump_insns = 0;
3830 deps->pending_read_list_length = 0;
3831 deps->pending_write_list_length = 0;
3832 deps->pending_flush_length = 0;
3833 deps->last_pending_memory_flush = 0;
3834 deps->last_function_call = 0;
3835 deps->last_function_call_may_noreturn = 0;
3836 deps->sched_before_next_call = 0;
3837 deps->sched_before_next_jump = 0;
3838 deps->in_post_call_group_p = not_post_call;
3839 deps->last_debug_insn = 0;
3840 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3841 deps->readonly = 0;
3842 }
3843
3844 /* Init only reg_last field of DEPS, which was not allocated before as
3845 we inited DEPS lazily. */
3846 void
3847 init_deps_reg_last (struct deps_desc *deps)
3848 {
3849 gcc_assert (deps && deps->max_reg > 0);
3850 gcc_assert (deps->reg_last == NULL);
3851
3852 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3853 }
3854
3855
3856 /* Free insn lists found in DEPS. */
3857
3858 void
3859 free_deps (struct deps_desc *deps)
3860 {
3861 unsigned i;
3862 reg_set_iterator rsi;
3863
3864 /* We set max_reg to 0 when this context was already freed. */
3865 if (deps->max_reg == 0)
3866 {
3867 gcc_assert (deps->reg_last == NULL);
3868 return;
3869 }
3870 deps->max_reg = 0;
3871
3872 free_INSN_LIST_list (&deps->pending_read_insns);
3873 free_EXPR_LIST_list (&deps->pending_read_mems);
3874 free_INSN_LIST_list (&deps->pending_write_insns);
3875 free_EXPR_LIST_list (&deps->pending_write_mems);
3876 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3877
3878 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3879 times. For a testcase with 42000 regs and 8000 small basic blocks,
3880 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3881 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3882 {
3883 struct deps_reg *reg_last = &deps->reg_last[i];
3884 if (reg_last->uses)
3885 free_INSN_LIST_list (&reg_last->uses);
3886 if (reg_last->sets)
3887 free_INSN_LIST_list (&reg_last->sets);
3888 if (reg_last->implicit_sets)
3889 free_INSN_LIST_list (&reg_last->implicit_sets);
3890 if (reg_last->control_uses)
3891 free_INSN_LIST_list (&reg_last->control_uses);
3892 if (reg_last->clobbers)
3893 free_INSN_LIST_list (&reg_last->clobbers);
3894 }
3895 CLEAR_REG_SET (&deps->reg_last_in_use);
3896
3897 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3898 it at all. */
3899 free (deps->reg_last);
3900 deps->reg_last = NULL;
3901
3902 deps = NULL;
3903 }
3904
3905 /* Remove INSN from dependence contexts DEPS. */
3906 void
3907 remove_from_deps (struct deps_desc *deps, rtx insn)
3908 {
3909 int removed;
3910 unsigned i;
3911 reg_set_iterator rsi;
3912
3913 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3914 &deps->pending_read_mems);
3915 if (!DEBUG_INSN_P (insn))
3916 deps->pending_read_list_length -= removed;
3917 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3918 &deps->pending_write_mems);
3919 deps->pending_write_list_length -= removed;
3920
3921 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3922 deps->pending_flush_length -= removed;
3923 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3924 deps->pending_flush_length -= removed;
3925
3926 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3927 {
3928 struct deps_reg *reg_last = &deps->reg_last[i];
3929 if (reg_last->uses)
3930 remove_from_dependence_list (insn, &reg_last->uses);
3931 if (reg_last->sets)
3932 remove_from_dependence_list (insn, &reg_last->sets);
3933 if (reg_last->implicit_sets)
3934 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3935 if (reg_last->clobbers)
3936 remove_from_dependence_list (insn, &reg_last->clobbers);
3937 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3938 && !reg_last->clobbers)
3939 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3940 }
3941
3942 if (CALL_P (insn))
3943 {
3944 remove_from_dependence_list (insn, &deps->last_function_call);
3945 remove_from_dependence_list (insn,
3946 &deps->last_function_call_may_noreturn);
3947 }
3948 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3949 }
3950
3951 /* Init deps data vector. */
3952 static void
3953 init_deps_data_vector (void)
3954 {
3955 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
3956 if (reserve > 0 && ! h_d_i_d.space (reserve))
3957 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
3958 }
3959
3960 /* If it is profitable to use them, initialize or extend (depending on
3961 GLOBAL_P) dependency data. */
3962 void
3963 sched_deps_init (bool global_p)
3964 {
3965 /* Average number of insns in the basic block.
3966 '+ 1' is used to make it nonzero. */
3967 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3968
3969 init_deps_data_vector ();
3970
3971 /* We use another caching mechanism for selective scheduling, so
3972 we don't use this one. */
3973 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3974 {
3975 /* ?!? We could save some memory by computing a per-region luid mapping
3976 which could reduce both the number of vectors in the cache and the
3977 size of each vector. Instead we just avoid the cache entirely unless
3978 the average number of instructions in a basic block is very high. See
3979 the comment before the declaration of true_dependency_cache for
3980 what we consider "very high". */
3981 cache_size = 0;
3982 extend_dependency_caches (sched_max_luid, true);
3983 }
3984
3985 if (global_p)
3986 {
3987 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3988 /* Allocate lists for one block at a time. */
3989 insns_in_block);
3990 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3991 /* Allocate nodes for one block at a time.
3992 We assume that average insn has
3993 5 producers. */
3994 5 * insns_in_block);
3995 }
3996 }
3997
3998
3999 /* Create or extend (depending on CREATE_P) dependency caches to
4000 size N. */
4001 void
4002 extend_dependency_caches (int n, bool create_p)
4003 {
4004 if (create_p || true_dependency_cache)
4005 {
4006 int i, luid = cache_size + n;
4007
4008 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4009 luid);
4010 output_dependency_cache = XRESIZEVEC (bitmap_head,
4011 output_dependency_cache, luid);
4012 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4013 luid);
4014 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4015 luid);
4016
4017 if (current_sched_info->flags & DO_SPECULATION)
4018 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4019 luid);
4020
4021 for (i = cache_size; i < luid; i++)
4022 {
4023 bitmap_initialize (&true_dependency_cache[i], 0);
4024 bitmap_initialize (&output_dependency_cache[i], 0);
4025 bitmap_initialize (&anti_dependency_cache[i], 0);
4026 bitmap_initialize (&control_dependency_cache[i], 0);
4027
4028 if (current_sched_info->flags & DO_SPECULATION)
4029 bitmap_initialize (&spec_dependency_cache[i], 0);
4030 }
4031 cache_size = luid;
4032 }
4033 }
4034
4035 /* Finalize dependency information for the whole function. */
4036 void
4037 sched_deps_finish (void)
4038 {
4039 gcc_assert (deps_pools_are_empty_p ());
4040 free_alloc_pool_if_empty (&dn_pool);
4041 free_alloc_pool_if_empty (&dl_pool);
4042 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4043
4044 h_d_i_d.release ();
4045 cache_size = 0;
4046
4047 if (true_dependency_cache)
4048 {
4049 int i;
4050
4051 for (i = 0; i < cache_size; i++)
4052 {
4053 bitmap_clear (&true_dependency_cache[i]);
4054 bitmap_clear (&output_dependency_cache[i]);
4055 bitmap_clear (&anti_dependency_cache[i]);
4056 bitmap_clear (&control_dependency_cache[i]);
4057
4058 if (sched_deps_info->generate_spec_deps)
4059 bitmap_clear (&spec_dependency_cache[i]);
4060 }
4061 free (true_dependency_cache);
4062 true_dependency_cache = NULL;
4063 free (output_dependency_cache);
4064 output_dependency_cache = NULL;
4065 free (anti_dependency_cache);
4066 anti_dependency_cache = NULL;
4067 free (control_dependency_cache);
4068 control_dependency_cache = NULL;
4069
4070 if (sched_deps_info->generate_spec_deps)
4071 {
4072 free (spec_dependency_cache);
4073 spec_dependency_cache = NULL;
4074 }
4075
4076 }
4077 }
4078
4079 /* Initialize some global variables needed by the dependency analysis
4080 code. */
4081
4082 void
4083 init_deps_global (void)
4084 {
4085 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4086 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4087 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4088 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4089 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4090 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4091 reg_pending_barrier = NOT_A_BARRIER;
4092
4093 if (!sel_sched_p () || sched_emulate_haifa_p)
4094 {
4095 sched_deps_info->start_insn = haifa_start_insn;
4096 sched_deps_info->finish_insn = haifa_finish_insn;
4097
4098 sched_deps_info->note_reg_set = haifa_note_reg_set;
4099 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4100 sched_deps_info->note_reg_use = haifa_note_reg_use;
4101
4102 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4103 sched_deps_info->note_dep = haifa_note_dep;
4104 }
4105 }
4106
4107 /* Free everything used by the dependency analysis code. */
4108
4109 void
4110 finish_deps_global (void)
4111 {
4112 FREE_REG_SET (reg_pending_sets);
4113 FREE_REG_SET (reg_pending_clobbers);
4114 FREE_REG_SET (reg_pending_uses);
4115 FREE_REG_SET (reg_pending_control_uses);
4116 }
4117
4118 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4119 dw_t
4120 estimate_dep_weak (rtx mem1, rtx mem2)
4121 {
4122 rtx r1, r2;
4123
4124 if (mem1 == mem2)
4125 /* MEMs are the same - don't speculate. */
4126 return MIN_DEP_WEAK;
4127
4128 r1 = XEXP (mem1, 0);
4129 r2 = XEXP (mem2, 0);
4130
4131 if (r1 == r2
4132 || (REG_P (r1) && REG_P (r2)
4133 && REGNO (r1) == REGNO (r2)))
4134 /* Again, MEMs are the same. */
4135 return MIN_DEP_WEAK;
4136 else if ((REG_P (r1) && !REG_P (r2))
4137 || (!REG_P (r1) && REG_P (r2)))
4138 /* Different addressing modes - reason to be more speculative,
4139 than usual. */
4140 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4141 else
4142 /* We can't say anything about the dependence. */
4143 return UNCERTAIN_DEP_WEAK;
4144 }
4145
4146 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4147 This function can handle same INSN and ELEM (INSN == ELEM).
4148 It is a convenience wrapper. */
4149 static void
4150 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4151 {
4152 ds_t ds;
4153 bool internal;
4154
4155 if (dep_type == REG_DEP_TRUE)
4156 ds = DEP_TRUE;
4157 else if (dep_type == REG_DEP_OUTPUT)
4158 ds = DEP_OUTPUT;
4159 else if (dep_type == REG_DEP_CONTROL)
4160 ds = DEP_CONTROL;
4161 else
4162 {
4163 gcc_assert (dep_type == REG_DEP_ANTI);
4164 ds = DEP_ANTI;
4165 }
4166
4167 /* When add_dependence is called from inside sched-deps.c, we expect
4168 cur_insn to be non-null. */
4169 internal = cur_insn != NULL;
4170 if (internal)
4171 gcc_assert (insn == cur_insn);
4172 else
4173 cur_insn = insn;
4174
4175 note_dep (elem, ds);
4176 if (!internal)
4177 cur_insn = NULL;
4178 }
4179
4180 /* Return weakness of speculative type TYPE in the dep_status DS,
4181 without checking to prevent ICEs on malformed input. */
4182 static dw_t
4183 get_dep_weak_1 (ds_t ds, ds_t type)
4184 {
4185 ds = ds & type;
4186
4187 switch (type)
4188 {
4189 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4190 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4191 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4192 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4193 default: gcc_unreachable ();
4194 }
4195
4196 return (dw_t) ds;
4197 }
4198
4199 /* Return weakness of speculative type TYPE in the dep_status DS. */
4200 dw_t
4201 get_dep_weak (ds_t ds, ds_t type)
4202 {
4203 dw_t dw = get_dep_weak_1 (ds, type);
4204
4205 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4206 return dw;
4207 }
4208
4209 /* Return the dep_status, which has the same parameters as DS, except for
4210 speculative type TYPE, that will have weakness DW. */
4211 ds_t
4212 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4213 {
4214 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4215
4216 ds &= ~type;
4217 switch (type)
4218 {
4219 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4220 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4221 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4222 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4223 default: gcc_unreachable ();
4224 }
4225 return ds;
4226 }
4227
4228 /* Return the join of two dep_statuses DS1 and DS2.
4229 If MAX_P is true then choose the greater probability,
4230 otherwise multiply probabilities.
4231 This function assumes that both DS1 and DS2 contain speculative bits. */
4232 static ds_t
4233 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4234 {
4235 ds_t ds, t;
4236
4237 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4238
4239 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4240
4241 t = FIRST_SPEC_TYPE;
4242 do
4243 {
4244 if ((ds1 & t) && !(ds2 & t))
4245 ds |= ds1 & t;
4246 else if (!(ds1 & t) && (ds2 & t))
4247 ds |= ds2 & t;
4248 else if ((ds1 & t) && (ds2 & t))
4249 {
4250 dw_t dw1 = get_dep_weak (ds1, t);
4251 dw_t dw2 = get_dep_weak (ds2, t);
4252 ds_t dw;
4253
4254 if (!max_p)
4255 {
4256 dw = ((ds_t) dw1) * ((ds_t) dw2);
4257 dw /= MAX_DEP_WEAK;
4258 if (dw < MIN_DEP_WEAK)
4259 dw = MIN_DEP_WEAK;
4260 }
4261 else
4262 {
4263 if (dw1 >= dw2)
4264 dw = dw1;
4265 else
4266 dw = dw2;
4267 }
4268
4269 ds = set_dep_weak (ds, t, (dw_t) dw);
4270 }
4271
4272 if (t == LAST_SPEC_TYPE)
4273 break;
4274 t <<= SPEC_TYPE_SHIFT;
4275 }
4276 while (1);
4277
4278 return ds;
4279 }
4280
4281 /* Return the join of two dep_statuses DS1 and DS2.
4282 This function assumes that both DS1 and DS2 contain speculative bits. */
4283 ds_t
4284 ds_merge (ds_t ds1, ds_t ds2)
4285 {
4286 return ds_merge_1 (ds1, ds2, false);
4287 }
4288
4289 /* Return the join of two dep_statuses DS1 and DS2. */
4290 ds_t
4291 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4292 {
4293 ds_t new_status = ds | ds2;
4294
4295 if (new_status & SPECULATIVE)
4296 {
4297 if ((ds && !(ds & SPECULATIVE))
4298 || (ds2 && !(ds2 & SPECULATIVE)))
4299 /* Then this dep can't be speculative. */
4300 new_status &= ~SPECULATIVE;
4301 else
4302 {
4303 /* Both are speculative. Merging probabilities. */
4304 if (mem1)
4305 {
4306 dw_t dw;
4307
4308 dw = estimate_dep_weak (mem1, mem2);
4309 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4310 }
4311
4312 if (!ds)
4313 new_status = ds2;
4314 else if (!ds2)
4315 new_status = ds;
4316 else
4317 new_status = ds_merge (ds2, ds);
4318 }
4319 }
4320
4321 return new_status;
4322 }
4323
4324 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4325 probabilities. */
4326 ds_t
4327 ds_max_merge (ds_t ds1, ds_t ds2)
4328 {
4329 if (ds1 == 0 && ds2 == 0)
4330 return 0;
4331
4332 if (ds1 == 0 && ds2 != 0)
4333 return ds2;
4334
4335 if (ds1 != 0 && ds2 == 0)
4336 return ds1;
4337
4338 return ds_merge_1 (ds1, ds2, true);
4339 }
4340
4341 /* Return the probability of speculation success for the speculation
4342 status DS. */
4343 dw_t
4344 ds_weak (ds_t ds)
4345 {
4346 ds_t res = 1, dt;
4347 int n = 0;
4348
4349 dt = FIRST_SPEC_TYPE;
4350 do
4351 {
4352 if (ds & dt)
4353 {
4354 res *= (ds_t) get_dep_weak (ds, dt);
4355 n++;
4356 }
4357
4358 if (dt == LAST_SPEC_TYPE)
4359 break;
4360 dt <<= SPEC_TYPE_SHIFT;
4361 }
4362 while (1);
4363
4364 gcc_assert (n);
4365 while (--n)
4366 res /= MAX_DEP_WEAK;
4367
4368 if (res < MIN_DEP_WEAK)
4369 res = MIN_DEP_WEAK;
4370
4371 gcc_assert (res <= MAX_DEP_WEAK);
4372
4373 return (dw_t) res;
4374 }
4375
4376 /* Return a dep status that contains all speculation types of DS. */
4377 ds_t
4378 ds_get_speculation_types (ds_t ds)
4379 {
4380 if (ds & BEGIN_DATA)
4381 ds |= BEGIN_DATA;
4382 if (ds & BE_IN_DATA)
4383 ds |= BE_IN_DATA;
4384 if (ds & BEGIN_CONTROL)
4385 ds |= BEGIN_CONTROL;
4386 if (ds & BE_IN_CONTROL)
4387 ds |= BE_IN_CONTROL;
4388
4389 return ds & SPECULATIVE;
4390 }
4391
4392 /* Return a dep status that contains maximal weakness for each speculation
4393 type present in DS. */
4394 ds_t
4395 ds_get_max_dep_weak (ds_t ds)
4396 {
4397 if (ds & BEGIN_DATA)
4398 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4399 if (ds & BE_IN_DATA)
4400 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4401 if (ds & BEGIN_CONTROL)
4402 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4403 if (ds & BE_IN_CONTROL)
4404 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4405
4406 return ds;
4407 }
4408
4409 /* Dump information about the dependence status S. */
4410 static void
4411 dump_ds (FILE *f, ds_t s)
4412 {
4413 fprintf (f, "{");
4414
4415 if (s & BEGIN_DATA)
4416 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4417 if (s & BE_IN_DATA)
4418 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4419 if (s & BEGIN_CONTROL)
4420 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4421 if (s & BE_IN_CONTROL)
4422 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4423
4424 if (s & HARD_DEP)
4425 fprintf (f, "HARD_DEP; ");
4426
4427 if (s & DEP_TRUE)
4428 fprintf (f, "DEP_TRUE; ");
4429 if (s & DEP_OUTPUT)
4430 fprintf (f, "DEP_OUTPUT; ");
4431 if (s & DEP_ANTI)
4432 fprintf (f, "DEP_ANTI; ");
4433 if (s & DEP_CONTROL)
4434 fprintf (f, "DEP_CONTROL; ");
4435
4436 fprintf (f, "}");
4437 }
4438
4439 DEBUG_FUNCTION void
4440 debug_ds (ds_t s)
4441 {
4442 dump_ds (stderr, s);
4443 fprintf (stderr, "\n");
4444 }
4445
4446 #ifdef ENABLE_CHECKING
4447 /* Verify that dependence type and status are consistent.
4448 If RELAXED_P is true, then skip dep_weakness checks. */
4449 static void
4450 check_dep (dep_t dep, bool relaxed_p)
4451 {
4452 enum reg_note dt = DEP_TYPE (dep);
4453 ds_t ds = DEP_STATUS (dep);
4454
4455 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4456
4457 if (!(current_sched_info->flags & USE_DEPS_LIST))
4458 {
4459 gcc_assert (ds == 0);
4460 return;
4461 }
4462
4463 /* Check that dependence type contains the same bits as the status. */
4464 if (dt == REG_DEP_TRUE)
4465 gcc_assert (ds & DEP_TRUE);
4466 else if (dt == REG_DEP_OUTPUT)
4467 gcc_assert ((ds & DEP_OUTPUT)
4468 && !(ds & DEP_TRUE));
4469 else if (dt == REG_DEP_ANTI)
4470 gcc_assert ((ds & DEP_ANTI)
4471 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4472 else
4473 gcc_assert (dt == REG_DEP_CONTROL
4474 && (ds & DEP_CONTROL)
4475 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4476
4477 /* HARD_DEP can not appear in dep_status of a link. */
4478 gcc_assert (!(ds & HARD_DEP));
4479
4480 /* Check that dependence status is set correctly when speculation is not
4481 supported. */
4482 if (!sched_deps_info->generate_spec_deps)
4483 gcc_assert (!(ds & SPECULATIVE));
4484 else if (ds & SPECULATIVE)
4485 {
4486 if (!relaxed_p)
4487 {
4488 ds_t type = FIRST_SPEC_TYPE;
4489
4490 /* Check that dependence weakness is in proper range. */
4491 do
4492 {
4493 if (ds & type)
4494 get_dep_weak (ds, type);
4495
4496 if (type == LAST_SPEC_TYPE)
4497 break;
4498 type <<= SPEC_TYPE_SHIFT;
4499 }
4500 while (1);
4501 }
4502
4503 if (ds & BEGIN_SPEC)
4504 {
4505 /* Only true dependence can be data speculative. */
4506 if (ds & BEGIN_DATA)
4507 gcc_assert (ds & DEP_TRUE);
4508
4509 /* Control dependencies in the insn scheduler are represented by
4510 anti-dependencies, therefore only anti dependence can be
4511 control speculative. */
4512 if (ds & BEGIN_CONTROL)
4513 gcc_assert (ds & DEP_ANTI);
4514 }
4515 else
4516 {
4517 /* Subsequent speculations should resolve true dependencies. */
4518 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4519 }
4520
4521 /* Check that true and anti dependencies can't have other speculative
4522 statuses. */
4523 if (ds & DEP_TRUE)
4524 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4525 /* An output dependence can't be speculative at all. */
4526 gcc_assert (!(ds & DEP_OUTPUT));
4527 if (ds & DEP_ANTI)
4528 gcc_assert (ds & BEGIN_CONTROL);
4529 }
4530 }
4531 #endif /* ENABLE_CHECKING */
4532
4533 /* The following code discovers opportunities to switch a memory reference
4534 and an increment by modifying the address. We ensure that this is done
4535 only for dependencies that are only used to show a single register
4536 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4537 instruction involved is subject to only one dep that can cause a pattern
4538 change.
4539
4540 When we discover a suitable dependency, we fill in the dep_replacement
4541 structure to show how to modify the memory reference. */
4542
4543 /* Holds information about a pair of memory reference and register increment
4544 insns which depend on each other, but could possibly be interchanged. */
4545 struct mem_inc_info
4546 {
4547 rtx inc_insn;
4548 rtx mem_insn;
4549
4550 rtx *mem_loc;
4551 /* A register occurring in the memory address for which we wish to break
4552 the dependence. This must be identical to the destination register of
4553 the increment. */
4554 rtx mem_reg0;
4555 /* Any kind of index that is added to that register. */
4556 rtx mem_index;
4557 /* The constant offset used in the memory address. */
4558 HOST_WIDE_INT mem_constant;
4559 /* The constant added in the increment insn. Negated if the increment is
4560 after the memory address. */
4561 HOST_WIDE_INT inc_constant;
4562 /* The source register used in the increment. May be different from mem_reg0
4563 if the increment occurs before the memory address. */
4564 rtx inc_input;
4565 };
4566
4567 /* Verify that the memory location described in MII can be replaced with
4568 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4569 insn remains unchanged by this function. */
4570
4571 static rtx
4572 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4573 {
4574 rtx mem = *mii->mem_loc;
4575 rtx new_mem;
4576
4577 /* Jump through a lot of hoops to keep the attributes up to date. We
4578 do not want to call one of the change address variants that take
4579 an offset even though we know the offset in many cases. These
4580 assume you are changing where the address is pointing by the
4581 offset. */
4582 new_mem = replace_equiv_address_nv (mem, new_addr);
4583 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4584 {
4585 if (sched_verbose >= 5)
4586 fprintf (sched_dump, "validation failure\n");
4587 return NULL_RTX;
4588 }
4589
4590 /* Put back the old one. */
4591 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4592
4593 return new_mem;
4594 }
4595
4596 /* Return true if INSN is of a form "a = b op c" where a and b are
4597 regs. op is + if c is a reg and +|- if c is a const. Fill in
4598 informantion in MII about what is found.
4599 BEFORE_MEM indicates whether the increment is found before or after
4600 a corresponding memory reference. */
4601
4602 static bool
4603 parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
4604 {
4605 rtx pat = single_set (insn);
4606 rtx src, cst;
4607 bool regs_equal;
4608
4609 if (RTX_FRAME_RELATED_P (insn) || !pat)
4610 return false;
4611
4612 /* Result must be single reg. */
4613 if (!REG_P (SET_DEST (pat)))
4614 return false;
4615
4616 if (GET_CODE (SET_SRC (pat)) != PLUS)
4617 return false;
4618
4619 mii->inc_insn = insn;
4620 src = SET_SRC (pat);
4621 mii->inc_input = XEXP (src, 0);
4622
4623 if (!REG_P (XEXP (src, 0)))
4624 return false;
4625
4626 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4627 return false;
4628
4629 cst = XEXP (src, 1);
4630 if (!CONST_INT_P (cst))
4631 return false;
4632 mii->inc_constant = INTVAL (cst);
4633
4634 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4635
4636 if (!before_mem)
4637 {
4638 mii->inc_constant = -mii->inc_constant;
4639 if (!regs_equal)
4640 return false;
4641 }
4642
4643 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4644 {
4645 /* Note that the sign has already been reversed for !before_mem. */
4646 #ifdef STACK_GROWS_DOWNWARD
4647 return mii->inc_constant > 0;
4648 #else
4649 return mii->inc_constant < 0;
4650 #endif
4651 }
4652 return true;
4653 }
4654
4655 /* Once a suitable mem reference has been found and the corresponding data
4656 in MII has been filled in, this function is called to find a suitable
4657 add or inc insn involving the register we found in the memory
4658 reference. */
4659
4660 static bool
4661 find_inc (struct mem_inc_info *mii, bool backwards)
4662 {
4663 sd_iterator_def sd_it;
4664 dep_t dep;
4665
4666 sd_it = sd_iterator_start (mii->mem_insn,
4667 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4668 while (sd_iterator_cond (&sd_it, &dep))
4669 {
4670 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4671 rtx pro = DEP_PRO (dep);
4672 rtx con = DEP_CON (dep);
4673 rtx inc_cand = backwards ? pro : con;
4674 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4675 goto next;
4676 if (parse_add_or_inc (mii, inc_cand, backwards))
4677 {
4678 struct dep_replacement *desc;
4679 df_ref *def_rec;
4680 rtx newaddr, newmem;
4681
4682 if (sched_verbose >= 5)
4683 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4684 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4685
4686 /* Need to assure that none of the operands of the inc
4687 instruction are assigned to by the mem insn. */
4688 for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
4689 {
4690 df_ref def = *def_rec;
4691 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4692 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4693 {
4694 if (sched_verbose >= 5)
4695 fprintf (sched_dump,
4696 "inc conflicts with store failure.\n");
4697 goto next;
4698 }
4699 }
4700 newaddr = mii->inc_input;
4701 if (mii->mem_index != NULL_RTX)
4702 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4703 mii->mem_index);
4704 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4705 mii->mem_constant + mii->inc_constant);
4706 newmem = attempt_change (mii, newaddr);
4707 if (newmem == NULL_RTX)
4708 goto next;
4709 if (sched_verbose >= 5)
4710 fprintf (sched_dump, "successful address replacement\n");
4711 desc = XCNEW (struct dep_replacement);
4712 DEP_REPLACE (dep) = desc;
4713 desc->loc = mii->mem_loc;
4714 desc->newval = newmem;
4715 desc->orig = *desc->loc;
4716 desc->insn = mii->mem_insn;
4717 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4718 INSN_SPEC_BACK_DEPS (con));
4719 if (backwards)
4720 {
4721 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4722 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4723 REG_DEP_TRUE);
4724 }
4725 else
4726 {
4727 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4728 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4729 REG_DEP_ANTI);
4730 }
4731 return true;
4732 }
4733 next:
4734 sd_iterator_next (&sd_it);
4735 }
4736 return false;
4737 }
4738
4739 /* A recursive function that walks ADDRESS_OF_X to find memory references
4740 which could be modified during scheduling. We call find_inc for each
4741 one we find that has a recognizable form. MII holds information about
4742 the pair of memory/increment instructions.
4743 We ensure that every instruction with a memory reference (which will be
4744 the location of the replacement) is assigned at most one breakable
4745 dependency. */
4746
4747 static bool
4748 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4749 {
4750 rtx x = *address_of_x;
4751 enum rtx_code code = GET_CODE (x);
4752 const char *const fmt = GET_RTX_FORMAT (code);
4753 int i;
4754
4755 if (code == MEM)
4756 {
4757 rtx reg0 = XEXP (x, 0);
4758
4759 mii->mem_loc = address_of_x;
4760 mii->mem_index = NULL_RTX;
4761 mii->mem_constant = 0;
4762 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4763 {
4764 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4765 reg0 = XEXP (reg0, 0);
4766 }
4767 if (GET_CODE (reg0) == PLUS)
4768 {
4769 mii->mem_index = XEXP (reg0, 1);
4770 reg0 = XEXP (reg0, 0);
4771 }
4772 if (REG_P (reg0))
4773 {
4774 df_ref *def_rec;
4775 int occurrences = 0;
4776
4777 /* Make sure this reg appears only once in this insn. Can't use
4778 count_occurrences since that only works for pseudos. */
4779 for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
4780 {
4781 df_ref def = *def_rec;
4782 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
4783 if (++occurrences > 1)
4784 {
4785 if (sched_verbose >= 5)
4786 fprintf (sched_dump, "mem count failure\n");
4787 return false;
4788 }
4789 }
4790
4791 mii->mem_reg0 = reg0;
4792 return find_inc (mii, true) || find_inc (mii, false);
4793 }
4794 return false;
4795 }
4796
4797 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4798 {
4799 /* If REG occurs inside a MEM used in a bit-field reference,
4800 that is unacceptable. */
4801 return false;
4802 }
4803
4804 /* Time for some deep diving. */
4805 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4806 {
4807 if (fmt[i] == 'e')
4808 {
4809 if (find_mem (mii, &XEXP (x, i)))
4810 return true;
4811 }
4812 else if (fmt[i] == 'E')
4813 {
4814 int j;
4815 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4816 if (find_mem (mii, &XVECEXP (x, i, j)))
4817 return true;
4818 }
4819 }
4820 return false;
4821 }
4822
4823
4824 /* Examine the instructions between HEAD and TAIL and try to find
4825 dependencies that can be broken by modifying one of the patterns. */
4826
4827 void
4828 find_modifiable_mems (rtx head, rtx tail)
4829 {
4830 rtx insn, next_tail = NEXT_INSN (tail);
4831 int success_in_block = 0;
4832
4833 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4834 {
4835 struct mem_inc_info mii;
4836
4837 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4838 continue;
4839
4840 mii.mem_insn = insn;
4841 if (find_mem (&mii, &PATTERN (insn)))
4842 success_in_block++;
4843 }
4844 if (success_in_block && sched_verbose >= 5)
4845 fprintf (sched_dump, "%d candidates for address modification found.\n",
4846 success_in_block);
4847 }
4848
4849 #endif /* INSN_SCHEDULING */