re PR rtl-optimization/61801 (sched2 miscompiles syscall sequence with -g)
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2014 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22 \f
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "recog.h"
39 #include "emit-rtl.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
45
46 #ifdef INSN_SCHEDULING
47
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53
54 /* Holds current parameters for the dependency analyzer. */
55 struct sched_deps_info_def *sched_deps_info;
56
57 /* The data is specific to the Haifa scheduler. */
58 vec<haifa_deps_insn_data_def>
59 h_d_i_d = vNULL;
60
61 /* Return the major type present in the DS. */
62 enum reg_note
63 ds_to_dk (ds_t ds)
64 {
65 if (ds & DEP_TRUE)
66 return REG_DEP_TRUE;
67
68 if (ds & DEP_OUTPUT)
69 return REG_DEP_OUTPUT;
70
71 if (ds & DEP_CONTROL)
72 return REG_DEP_CONTROL;
73
74 gcc_assert (ds & DEP_ANTI);
75
76 return REG_DEP_ANTI;
77 }
78
79 /* Return equivalent dep_status. */
80 ds_t
81 dk_to_ds (enum reg_note dk)
82 {
83 switch (dk)
84 {
85 case REG_DEP_TRUE:
86 return DEP_TRUE;
87
88 case REG_DEP_OUTPUT:
89 return DEP_OUTPUT;
90
91 case REG_DEP_CONTROL:
92 return DEP_CONTROL;
93
94 default:
95 gcc_assert (dk == REG_DEP_ANTI);
96 return DEP_ANTI;
97 }
98 }
99
100 /* Functions to operate with dependence information container - dep_t. */
101
102 /* Init DEP with the arguments. */
103 void
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
105 {
106 DEP_PRO (dep) = pro;
107 DEP_CON (dep) = con;
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
111 DEP_NONREG (dep) = 0;
112 DEP_MULTIPLE (dep) = 0;
113 DEP_REPLACE (dep) = NULL;
114 }
115
116 /* Init DEP with the arguments.
117 While most of the scheduler (including targets) only need the major type
118 of the dependency, it is convenient to hide full dep_status from them. */
119 void
120 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 {
122 ds_t ds;
123
124 if ((current_sched_info->flags & USE_DEPS_LIST))
125 ds = dk_to_ds (kind);
126 else
127 ds = 0;
128
129 init_dep_1 (dep, pro, con, kind, ds);
130 }
131
132 /* Make a copy of FROM in TO. */
133 static void
134 copy_dep (dep_t to, dep_t from)
135 {
136 memcpy (to, from, sizeof (*to));
137 }
138
139 static void dump_ds (FILE *, ds_t);
140
141 /* Define flags for dump_dep (). */
142
143 /* Dump producer of the dependence. */
144 #define DUMP_DEP_PRO (2)
145
146 /* Dump consumer of the dependence. */
147 #define DUMP_DEP_CON (4)
148
149 /* Dump type of the dependence. */
150 #define DUMP_DEP_TYPE (8)
151
152 /* Dump status of the dependence. */
153 #define DUMP_DEP_STATUS (16)
154
155 /* Dump all information about the dependence. */
156 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 |DUMP_DEP_STATUS)
158
159 /* Dump DEP to DUMP.
160 FLAGS is a bit mask specifying what information about DEP needs
161 to be printed.
162 If FLAGS has the very first bit set, then dump all information about DEP
163 and propagate this bit into the callee dump functions. */
164 static void
165 dump_dep (FILE *dump, dep_t dep, int flags)
166 {
167 if (flags & 1)
168 flags |= DUMP_DEP_ALL;
169
170 fprintf (dump, "<");
171
172 if (flags & DUMP_DEP_PRO)
173 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
174
175 if (flags & DUMP_DEP_CON)
176 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
177
178 if (flags & DUMP_DEP_TYPE)
179 {
180 char t;
181 enum reg_note type = DEP_TYPE (dep);
182
183 switch (type)
184 {
185 case REG_DEP_TRUE:
186 t = 't';
187 break;
188
189 case REG_DEP_OUTPUT:
190 t = 'o';
191 break;
192
193 case REG_DEP_CONTROL:
194 t = 'c';
195 break;
196
197 case REG_DEP_ANTI:
198 t = 'a';
199 break;
200
201 default:
202 gcc_unreachable ();
203 break;
204 }
205
206 fprintf (dump, "%c; ", t);
207 }
208
209 if (flags & DUMP_DEP_STATUS)
210 {
211 if (current_sched_info->flags & USE_DEPS_LIST)
212 dump_ds (dump, DEP_STATUS (dep));
213 }
214
215 fprintf (dump, ">");
216 }
217
218 /* Default flags for dump_dep (). */
219 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
220
221 /* Dump all fields of DEP to STDERR. */
222 void
223 sd_debug_dep (dep_t dep)
224 {
225 dump_dep (stderr, dep, 1);
226 fprintf (stderr, "\n");
227 }
228
229 /* Determine whether DEP is a dependency link of a non-debug insn on a
230 debug insn. */
231
232 static inline bool
233 depl_on_debug_p (dep_link_t dep)
234 {
235 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
236 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
237 }
238
239 /* Functions to operate with a single link from the dependencies lists -
240 dep_link_t. */
241
242 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
243 PREV_NEXT_P. */
244 static void
245 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
246 {
247 dep_link_t next = *prev_nextp;
248
249 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
250 && DEP_LINK_NEXT (l) == NULL);
251
252 /* Init node being inserted. */
253 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
254 DEP_LINK_NEXT (l) = next;
255
256 /* Fix next node. */
257 if (next != NULL)
258 {
259 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
260
261 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
262 }
263
264 /* Fix prev node. */
265 *prev_nextp = l;
266 }
267
268 /* Add dep_link LINK to deps_list L. */
269 static void
270 add_to_deps_list (dep_link_t link, deps_list_t l)
271 {
272 attach_dep_link (link, &DEPS_LIST_FIRST (l));
273
274 /* Don't count debug deps. */
275 if (!depl_on_debug_p (link))
276 ++DEPS_LIST_N_LINKS (l);
277 }
278
279 /* Detach dep_link L from the list. */
280 static void
281 detach_dep_link (dep_link_t l)
282 {
283 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
284 dep_link_t next = DEP_LINK_NEXT (l);
285
286 *prev_nextp = next;
287
288 if (next != NULL)
289 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
290
291 DEP_LINK_PREV_NEXTP (l) = NULL;
292 DEP_LINK_NEXT (l) = NULL;
293 }
294
295 /* Remove link LINK from list LIST. */
296 static void
297 remove_from_deps_list (dep_link_t link, deps_list_t list)
298 {
299 detach_dep_link (link);
300
301 /* Don't count debug deps. */
302 if (!depl_on_debug_p (link))
303 --DEPS_LIST_N_LINKS (list);
304 }
305
306 /* Move link LINK from list FROM to list TO. */
307 static void
308 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
309 {
310 remove_from_deps_list (link, from);
311 add_to_deps_list (link, to);
312 }
313
314 /* Return true of LINK is not attached to any list. */
315 static bool
316 dep_link_is_detached_p (dep_link_t link)
317 {
318 return DEP_LINK_PREV_NEXTP (link) == NULL;
319 }
320
321 /* Pool to hold all dependency nodes (dep_node_t). */
322 static alloc_pool dn_pool;
323
324 /* Number of dep_nodes out there. */
325 static int dn_pool_diff = 0;
326
327 /* Create a dep_node. */
328 static dep_node_t
329 create_dep_node (void)
330 {
331 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
332 dep_link_t back = DEP_NODE_BACK (n);
333 dep_link_t forw = DEP_NODE_FORW (n);
334
335 DEP_LINK_NODE (back) = n;
336 DEP_LINK_NEXT (back) = NULL;
337 DEP_LINK_PREV_NEXTP (back) = NULL;
338
339 DEP_LINK_NODE (forw) = n;
340 DEP_LINK_NEXT (forw) = NULL;
341 DEP_LINK_PREV_NEXTP (forw) = NULL;
342
343 ++dn_pool_diff;
344
345 return n;
346 }
347
348 /* Delete dep_node N. N must not be connected to any deps_list. */
349 static void
350 delete_dep_node (dep_node_t n)
351 {
352 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
353 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
354
355 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
356
357 --dn_pool_diff;
358
359 pool_free (dn_pool, n);
360 }
361
362 /* Pool to hold dependencies lists (deps_list_t). */
363 static alloc_pool dl_pool;
364
365 /* Number of deps_lists out there. */
366 static int dl_pool_diff = 0;
367
368 /* Functions to operate with dependences lists - deps_list_t. */
369
370 /* Return true if list L is empty. */
371 static bool
372 deps_list_empty_p (deps_list_t l)
373 {
374 return DEPS_LIST_N_LINKS (l) == 0;
375 }
376
377 /* Create a new deps_list. */
378 static deps_list_t
379 create_deps_list (void)
380 {
381 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
382
383 DEPS_LIST_FIRST (l) = NULL;
384 DEPS_LIST_N_LINKS (l) = 0;
385
386 ++dl_pool_diff;
387 return l;
388 }
389
390 /* Free deps_list L. */
391 static void
392 free_deps_list (deps_list_t l)
393 {
394 gcc_assert (deps_list_empty_p (l));
395
396 --dl_pool_diff;
397
398 pool_free (dl_pool, l);
399 }
400
401 /* Return true if there is no dep_nodes and deps_lists out there.
402 After the region is scheduled all the dependency nodes and lists
403 should [generally] be returned to pool. */
404 bool
405 deps_pools_are_empty_p (void)
406 {
407 return dn_pool_diff == 0 && dl_pool_diff == 0;
408 }
409
410 /* Remove all elements from L. */
411 static void
412 clear_deps_list (deps_list_t l)
413 {
414 do
415 {
416 dep_link_t link = DEPS_LIST_FIRST (l);
417
418 if (link == NULL)
419 break;
420
421 remove_from_deps_list (link, l);
422 }
423 while (1);
424 }
425
426 /* Decide whether a dependency should be treated as a hard or a speculative
427 dependency. */
428 static bool
429 dep_spec_p (dep_t dep)
430 {
431 if (current_sched_info->flags & DO_SPECULATION)
432 {
433 if (DEP_STATUS (dep) & SPECULATIVE)
434 return true;
435 }
436 if (current_sched_info->flags & DO_PREDICATION)
437 {
438 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 return true;
440 }
441 if (DEP_REPLACE (dep) != NULL)
442 return true;
443 return false;
444 }
445
446 static regset reg_pending_sets;
447 static regset reg_pending_clobbers;
448 static regset reg_pending_uses;
449 static regset reg_pending_control_uses;
450 static enum reg_pending_barrier_mode reg_pending_barrier;
451
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458 static HARD_REG_SET implicit_reg_pending_clobbers;
459 static HARD_REG_SET implicit_reg_pending_uses;
460
461 /* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
464
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
469
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
473 allocated then the rest two ones are also allocated. */
474 static bitmap_head *true_dependency_cache = NULL;
475 static bitmap_head *output_dependency_cache = NULL;
476 static bitmap_head *anti_dependency_cache = NULL;
477 static bitmap_head *control_dependency_cache = NULL;
478 static bitmap_head *spec_dependency_cache = NULL;
479 static int cache_size;
480
481 /* True if we should mark added dependencies as a non-register deps. */
482 static bool mark_as_hard;
483
484 static int deps_may_trap_p (const_rtx);
485 static void add_dependence_1 (rtx, rtx, enum reg_note);
486 static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
487 static void add_dependence_list_and_free (struct deps_desc *, rtx,
488 rtx *, int, enum reg_note, bool);
489 static void delete_all_dependences (rtx);
490 static void chain_to_prev_insn (rtx);
491
492 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
493 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
494 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
495 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
496
497 static bool sched_has_condition_p (const_rtx);
498 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
499
500 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
501 rtx, rtx);
502 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
503
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t, bool);
506 #endif
507 \f
508 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
509
510 static int
511 deps_may_trap_p (const_rtx mem)
512 {
513 const_rtx addr = XEXP (mem, 0);
514
515 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
516 {
517 const_rtx t = get_reg_known_value (REGNO (addr));
518 if (t)
519 addr = t;
520 }
521 return rtx_addr_can_trap_p (addr);
522 }
523 \f
524
525 /* Find the condition under which INSN is executed. If REV is not NULL,
526 it is set to TRUE when the returned comparison should be reversed
527 to get the actual condition. */
528 static rtx
529 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
530 {
531 rtx pat = PATTERN (insn);
532 rtx src;
533
534 if (rev)
535 *rev = false;
536
537 if (GET_CODE (pat) == COND_EXEC)
538 return COND_EXEC_TEST (pat);
539
540 if (!any_condjump_p (insn) || !onlyjump_p (insn))
541 return 0;
542
543 src = SET_SRC (pc_set (insn));
544
545 if (XEXP (src, 2) == pc_rtx)
546 return XEXP (src, 0);
547 else if (XEXP (src, 1) == pc_rtx)
548 {
549 rtx cond = XEXP (src, 0);
550 enum rtx_code revcode = reversed_comparison_code (cond, insn);
551
552 if (revcode == UNKNOWN)
553 return 0;
554
555 if (rev)
556 *rev = true;
557 return cond;
558 }
559
560 return 0;
561 }
562
563 /* Return the condition under which INSN does not execute (i.e. the
564 not-taken condition for a conditional branch), or NULL if we cannot
565 find such a condition. The caller should make a copy of the condition
566 before using it. */
567 rtx
568 sched_get_reverse_condition_uncached (const_rtx insn)
569 {
570 bool rev;
571 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
572 if (cond == NULL_RTX)
573 return cond;
574 if (!rev)
575 {
576 enum rtx_code revcode = reversed_comparison_code (cond, insn);
577 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
578 XEXP (cond, 0),
579 XEXP (cond, 1));
580 }
581 return cond;
582 }
583
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585 We only do actual work the first time we come here for an insn; the
586 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
587 static rtx
588 sched_get_condition_with_rev (const_rtx insn, bool *rev)
589 {
590 bool tmp;
591
592 if (INSN_LUID (insn) == 0)
593 return sched_get_condition_with_rev_uncached (insn, rev);
594
595 if (INSN_CACHED_COND (insn) == const_true_rtx)
596 return NULL_RTX;
597
598 if (INSN_CACHED_COND (insn) != NULL_RTX)
599 {
600 if (rev)
601 *rev = INSN_REVERSE_COND (insn);
602 return INSN_CACHED_COND (insn);
603 }
604
605 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
606 INSN_REVERSE_COND (insn) = tmp;
607
608 if (INSN_CACHED_COND (insn) == NULL_RTX)
609 {
610 INSN_CACHED_COND (insn) = const_true_rtx;
611 return NULL_RTX;
612 }
613
614 if (rev)
615 *rev = INSN_REVERSE_COND (insn);
616 return INSN_CACHED_COND (insn);
617 }
618
619 /* True when we can find a condition under which INSN is executed. */
620 static bool
621 sched_has_condition_p (const_rtx insn)
622 {
623 return !! sched_get_condition_with_rev (insn, NULL);
624 }
625
626 \f
627
628 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
629 static int
630 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
631 {
632 if (COMPARISON_P (cond1)
633 && COMPARISON_P (cond2)
634 && GET_CODE (cond1) ==
635 (rev1==rev2
636 ? reversed_comparison_code (cond2, NULL)
637 : GET_CODE (cond2))
638 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
639 && XEXP (cond1, 1) == XEXP (cond2, 1))
640 return 1;
641 return 0;
642 }
643
644 /* Return true if insn1 and insn2 can never depend on one another because
645 the conditions under which they are executed are mutually exclusive. */
646 bool
647 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
648 {
649 rtx cond1, cond2;
650 bool rev1 = false, rev2 = false;
651
652 /* df doesn't handle conditional lifetimes entirely correctly;
653 calls mess up the conditional lifetimes. */
654 if (!CALL_P (insn1) && !CALL_P (insn2))
655 {
656 cond1 = sched_get_condition_with_rev (insn1, &rev1);
657 cond2 = sched_get_condition_with_rev (insn2, &rev2);
658 if (cond1 && cond2
659 && conditions_mutex_p (cond1, cond2, rev1, rev2)
660 /* Make sure first instruction doesn't affect condition of second
661 instruction if switched. */
662 && !modified_in_p (cond1, insn2)
663 /* Make sure second instruction doesn't affect condition of first
664 instruction if switched. */
665 && !modified_in_p (cond2, insn1))
666 return true;
667 }
668 return false;
669 }
670 \f
671
672 /* Return true if INSN can potentially be speculated with type DS. */
673 bool
674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
675 {
676 if (HAS_INTERNAL_DEP (insn))
677 return false;
678
679 if (!NONJUMP_INSN_P (insn))
680 return false;
681
682 if (SCHED_GROUP_P (insn))
683 return false;
684
685 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
686 return false;
687
688 if (side_effects_p (PATTERN (insn)))
689 return false;
690
691 if (ds & BE_IN_SPEC)
692 /* The following instructions, which depend on a speculatively scheduled
693 instruction, cannot be speculatively scheduled along. */
694 {
695 if (may_trap_or_fault_p (PATTERN (insn)))
696 /* If instruction might fault, it cannot be speculatively scheduled.
697 For control speculation it's obvious why and for data speculation
698 it's because the insn might get wrong input if speculation
699 wasn't successful. */
700 return false;
701
702 if ((ds & BE_IN_DATA)
703 && sched_has_condition_p (insn))
704 /* If this is a predicated instruction, then it cannot be
705 speculatively scheduled. See PR35659. */
706 return false;
707 }
708
709 return true;
710 }
711
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715 This function is used to switch sd_iterator to the next list.
716 !!! For internal use only. Might consider moving it to sched-int.h. */
717 void
718 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
719 deps_list_t *list_ptr, bool *resolved_p_ptr)
720 {
721 sd_list_types_def types = *types_ptr;
722
723 if (types & SD_LIST_HARD_BACK)
724 {
725 *list_ptr = INSN_HARD_BACK_DEPS (insn);
726 *resolved_p_ptr = false;
727 *types_ptr = types & ~SD_LIST_HARD_BACK;
728 }
729 else if (types & SD_LIST_SPEC_BACK)
730 {
731 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
732 *resolved_p_ptr = false;
733 *types_ptr = types & ~SD_LIST_SPEC_BACK;
734 }
735 else if (types & SD_LIST_FORW)
736 {
737 *list_ptr = INSN_FORW_DEPS (insn);
738 *resolved_p_ptr = false;
739 *types_ptr = types & ~SD_LIST_FORW;
740 }
741 else if (types & SD_LIST_RES_BACK)
742 {
743 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
744 *resolved_p_ptr = true;
745 *types_ptr = types & ~SD_LIST_RES_BACK;
746 }
747 else if (types & SD_LIST_RES_FORW)
748 {
749 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
750 *resolved_p_ptr = true;
751 *types_ptr = types & ~SD_LIST_RES_FORW;
752 }
753 else
754 {
755 *list_ptr = NULL;
756 *resolved_p_ptr = false;
757 *types_ptr = SD_LIST_NONE;
758 }
759 }
760
761 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
762 int
763 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
764 {
765 int size = 0;
766
767 while (list_types != SD_LIST_NONE)
768 {
769 deps_list_t list;
770 bool resolved_p;
771
772 sd_next_list (insn, &list_types, &list, &resolved_p);
773 if (list)
774 size += DEPS_LIST_N_LINKS (list);
775 }
776
777 return size;
778 }
779
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
781
782 bool
783 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
784 {
785 while (list_types != SD_LIST_NONE)
786 {
787 deps_list_t list;
788 bool resolved_p;
789
790 sd_next_list (insn, &list_types, &list, &resolved_p);
791 if (!deps_list_empty_p (list))
792 return false;
793 }
794
795 return true;
796 }
797
798 /* Initialize data for INSN. */
799 void
800 sd_init_insn (rtx insn)
801 {
802 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
803 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
804 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
805 INSN_FORW_DEPS (insn) = create_deps_list ();
806 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
807
808 /* ??? It would be nice to allocate dependency caches here. */
809 }
810
811 /* Free data for INSN. */
812 void
813 sd_finish_insn (rtx insn)
814 {
815 /* ??? It would be nice to deallocate dependency caches here. */
816
817 free_deps_list (INSN_HARD_BACK_DEPS (insn));
818 INSN_HARD_BACK_DEPS (insn) = NULL;
819
820 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
821 INSN_SPEC_BACK_DEPS (insn) = NULL;
822
823 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
824 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
825
826 free_deps_list (INSN_FORW_DEPS (insn));
827 INSN_FORW_DEPS (insn) = NULL;
828
829 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
830 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
831 }
832
833 /* Find a dependency between producer PRO and consumer CON.
834 Search through resolved dependency lists if RESOLVED_P is true.
835 If no such dependency is found return NULL,
836 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837 with an iterator pointing to it. */
838 static dep_t
839 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
840 sd_iterator_def *sd_it_ptr)
841 {
842 sd_list_types_def pro_list_type;
843 sd_list_types_def con_list_type;
844 sd_iterator_def sd_it;
845 dep_t dep;
846 bool found_p = false;
847
848 if (resolved_p)
849 {
850 pro_list_type = SD_LIST_RES_FORW;
851 con_list_type = SD_LIST_RES_BACK;
852 }
853 else
854 {
855 pro_list_type = SD_LIST_FORW;
856 con_list_type = SD_LIST_BACK;
857 }
858
859 /* Walk through either back list of INSN or forw list of ELEM
860 depending on which one is shorter. */
861 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
862 {
863 /* Find the dep_link with producer PRO in consumer's back_deps. */
864 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
865 if (DEP_PRO (dep) == pro)
866 {
867 found_p = true;
868 break;
869 }
870 }
871 else
872 {
873 /* Find the dep_link with consumer CON in producer's forw_deps. */
874 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
875 if (DEP_CON (dep) == con)
876 {
877 found_p = true;
878 break;
879 }
880 }
881
882 if (found_p)
883 {
884 if (sd_it_ptr != NULL)
885 *sd_it_ptr = sd_it;
886
887 return dep;
888 }
889
890 return NULL;
891 }
892
893 /* Find a dependency between producer PRO and consumer CON.
894 Use dependency [if available] to check if dependency is present at all.
895 Search through resolved dependency lists if RESOLVED_P is true.
896 If the dependency or NULL if none found. */
897 dep_t
898 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
899 {
900 if (true_dependency_cache != NULL)
901 /* Avoiding the list walk below can cut compile times dramatically
902 for some code. */
903 {
904 int elem_luid = INSN_LUID (pro);
905 int insn_luid = INSN_LUID (con);
906
907 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
908 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
909 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
910 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
911 return NULL;
912 }
913
914 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
915 }
916
917 /* Add or update a dependence described by DEP.
918 MEM1 and MEM2, if non-null, correspond to memory locations in case of
919 data speculation.
920
921 The function returns a value indicating if an old entry has been changed
922 or a new entry has been added to insn's backward deps.
923
924 This function merely checks if producer and consumer is the same insn
925 and doesn't create a dep in this case. Actual manipulation of
926 dependence data structures is performed in add_or_update_dep_1. */
927 static enum DEPS_ADJUST_RESULT
928 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
929 {
930 rtx elem = DEP_PRO (dep);
931 rtx insn = DEP_CON (dep);
932
933 gcc_assert (INSN_P (insn) && INSN_P (elem));
934
935 /* Don't depend an insn on itself. */
936 if (insn == elem)
937 {
938 if (sched_deps_info->generate_spec_deps)
939 /* INSN has an internal dependence, which we can't overcome. */
940 HAS_INTERNAL_DEP (insn) = 1;
941
942 return DEP_NODEP;
943 }
944
945 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
946 }
947
948 /* Ask dependency caches what needs to be done for dependence DEP.
949 Return DEP_CREATED if new dependence should be created and there is no
950 need to try to find one searching the dependencies lists.
951 Return DEP_PRESENT if there already is a dependence described by DEP and
952 hence nothing is to be done.
953 Return DEP_CHANGED if there already is a dependence, but it should be
954 updated to incorporate additional information from DEP. */
955 static enum DEPS_ADJUST_RESULT
956 ask_dependency_caches (dep_t dep)
957 {
958 int elem_luid = INSN_LUID (DEP_PRO (dep));
959 int insn_luid = INSN_LUID (DEP_CON (dep));
960
961 gcc_assert (true_dependency_cache != NULL
962 && output_dependency_cache != NULL
963 && anti_dependency_cache != NULL
964 && control_dependency_cache != NULL);
965
966 if (!(current_sched_info->flags & USE_DEPS_LIST))
967 {
968 enum reg_note present_dep_type;
969
970 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
971 present_dep_type = REG_DEP_TRUE;
972 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
973 present_dep_type = REG_DEP_OUTPUT;
974 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_ANTI;
976 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_CONTROL;
978 else
979 /* There is no existing dep so it should be created. */
980 return DEP_CREATED;
981
982 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
983 /* DEP does not add anything to the existing dependence. */
984 return DEP_PRESENT;
985 }
986 else
987 {
988 ds_t present_dep_types = 0;
989
990 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
991 present_dep_types |= DEP_TRUE;
992 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
993 present_dep_types |= DEP_OUTPUT;
994 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_ANTI;
996 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_CONTROL;
998
999 if (present_dep_types == 0)
1000 /* There is no existing dep so it should be created. */
1001 return DEP_CREATED;
1002
1003 if (!(current_sched_info->flags & DO_SPECULATION)
1004 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1005 {
1006 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1007 == present_dep_types)
1008 /* DEP does not add anything to the existing dependence. */
1009 return DEP_PRESENT;
1010 }
1011 else
1012 {
1013 /* Only true dependencies can be data speculative and
1014 only anti dependencies can be control speculative. */
1015 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1016 == present_dep_types);
1017
1018 /* if (DEP is SPECULATIVE) then
1019 ..we should update DEP_STATUS
1020 else
1021 ..we should reset existing dep to non-speculative. */
1022 }
1023 }
1024
1025 return DEP_CHANGED;
1026 }
1027
1028 /* Set dependency caches according to DEP. */
1029 static void
1030 set_dependency_caches (dep_t dep)
1031 {
1032 int elem_luid = INSN_LUID (DEP_PRO (dep));
1033 int insn_luid = INSN_LUID (DEP_CON (dep));
1034
1035 if (!(current_sched_info->flags & USE_DEPS_LIST))
1036 {
1037 switch (DEP_TYPE (dep))
1038 {
1039 case REG_DEP_TRUE:
1040 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1041 break;
1042
1043 case REG_DEP_OUTPUT:
1044 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1045 break;
1046
1047 case REG_DEP_ANTI:
1048 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1049 break;
1050
1051 case REG_DEP_CONTROL:
1052 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1053 break;
1054
1055 default:
1056 gcc_unreachable ();
1057 }
1058 }
1059 else
1060 {
1061 ds_t ds = DEP_STATUS (dep);
1062
1063 if (ds & DEP_TRUE)
1064 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1065 if (ds & DEP_OUTPUT)
1066 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1067 if (ds & DEP_ANTI)
1068 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_CONTROL)
1070 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1071
1072 if (ds & SPECULATIVE)
1073 {
1074 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1075 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1076 }
1077 }
1078 }
1079
1080 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1081 caches accordingly. */
1082 static void
1083 update_dependency_caches (dep_t dep, enum reg_note old_type)
1084 {
1085 int elem_luid = INSN_LUID (DEP_PRO (dep));
1086 int insn_luid = INSN_LUID (DEP_CON (dep));
1087
1088 /* Clear corresponding cache entry because type of the link
1089 may have changed. Keep them if we use_deps_list. */
1090 if (!(current_sched_info->flags & USE_DEPS_LIST))
1091 {
1092 switch (old_type)
1093 {
1094 case REG_DEP_OUTPUT:
1095 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1096 break;
1097
1098 case REG_DEP_ANTI:
1099 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1100 break;
1101
1102 case REG_DEP_CONTROL:
1103 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1104 break;
1105
1106 default:
1107 gcc_unreachable ();
1108 }
1109 }
1110
1111 set_dependency_caches (dep);
1112 }
1113
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1115 static void
1116 change_spec_dep_to_hard (sd_iterator_def sd_it)
1117 {
1118 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1119 dep_link_t link = DEP_NODE_BACK (node);
1120 dep_t dep = DEP_NODE_DEP (node);
1121 rtx elem = DEP_PRO (dep);
1122 rtx insn = DEP_CON (dep);
1123
1124 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1125
1126 DEP_STATUS (dep) &= ~SPECULATIVE;
1127
1128 if (true_dependency_cache != NULL)
1129 /* Clear the cache entry. */
1130 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1131 INSN_LUID (elem));
1132 }
1133
1134 /* Update DEP to incorporate information from NEW_DEP.
1135 SD_IT points to DEP in case it should be moved to another list.
1136 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137 data-speculative dependence should be updated. */
1138 static enum DEPS_ADJUST_RESULT
1139 update_dep (dep_t dep, dep_t new_dep,
1140 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1141 rtx mem1 ATTRIBUTE_UNUSED,
1142 rtx mem2 ATTRIBUTE_UNUSED)
1143 {
1144 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1145 enum reg_note old_type = DEP_TYPE (dep);
1146 bool was_spec = dep_spec_p (dep);
1147
1148 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1149 DEP_MULTIPLE (dep) = 1;
1150
1151 /* If this is a more restrictive type of dependence than the
1152 existing one, then change the existing dependence to this
1153 type. */
1154 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1155 {
1156 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1157 res = DEP_CHANGED;
1158 }
1159
1160 if (current_sched_info->flags & USE_DEPS_LIST)
1161 /* Update DEP_STATUS. */
1162 {
1163 ds_t dep_status = DEP_STATUS (dep);
1164 ds_t ds = DEP_STATUS (new_dep);
1165 ds_t new_status = ds | dep_status;
1166
1167 if (new_status & SPECULATIVE)
1168 {
1169 /* Either existing dep or a dep we're adding or both are
1170 speculative. */
1171 if (!(ds & SPECULATIVE)
1172 || !(dep_status & SPECULATIVE))
1173 /* The new dep can't be speculative. */
1174 new_status &= ~SPECULATIVE;
1175 else
1176 {
1177 /* Both are speculative. Merge probabilities. */
1178 if (mem1 != NULL)
1179 {
1180 dw_t dw;
1181
1182 dw = estimate_dep_weak (mem1, mem2);
1183 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1184 }
1185
1186 new_status = ds_merge (dep_status, ds);
1187 }
1188 }
1189
1190 ds = new_status;
1191
1192 if (dep_status != ds)
1193 {
1194 DEP_STATUS (dep) = ds;
1195 res = DEP_CHANGED;
1196 }
1197 }
1198
1199 if (was_spec && !dep_spec_p (dep))
1200 /* The old dep was speculative, but now it isn't. */
1201 change_spec_dep_to_hard (sd_it);
1202
1203 if (true_dependency_cache != NULL
1204 && res == DEP_CHANGED)
1205 update_dependency_caches (dep, old_type);
1206
1207 return res;
1208 }
1209
1210 /* Add or update a dependence described by DEP.
1211 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1212 data speculation.
1213
1214 The function returns a value indicating if an old entry has been changed
1215 or a new entry has been added to insn's backward deps or nothing has
1216 been updated at all. */
1217 static enum DEPS_ADJUST_RESULT
1218 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1219 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1220 {
1221 bool maybe_present_p = true;
1222 bool present_p = false;
1223
1224 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1225 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1226
1227 #ifdef ENABLE_CHECKING
1228 check_dep (new_dep, mem1 != NULL);
1229 #endif
1230
1231 if (true_dependency_cache != NULL)
1232 {
1233 switch (ask_dependency_caches (new_dep))
1234 {
1235 case DEP_PRESENT:
1236 return DEP_PRESENT;
1237
1238 case DEP_CHANGED:
1239 maybe_present_p = true;
1240 present_p = true;
1241 break;
1242
1243 case DEP_CREATED:
1244 maybe_present_p = false;
1245 present_p = false;
1246 break;
1247
1248 default:
1249 gcc_unreachable ();
1250 break;
1251 }
1252 }
1253
1254 /* Check that we don't already have this dependence. */
1255 if (maybe_present_p)
1256 {
1257 dep_t present_dep;
1258 sd_iterator_def sd_it;
1259
1260 gcc_assert (true_dependency_cache == NULL || present_p);
1261
1262 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1263 DEP_CON (new_dep),
1264 resolved_p, &sd_it);
1265
1266 if (present_dep != NULL)
1267 /* We found an existing dependency between ELEM and INSN. */
1268 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1269 else
1270 /* We didn't find a dep, it shouldn't present in the cache. */
1271 gcc_assert (!present_p);
1272 }
1273
1274 /* Might want to check one level of transitivity to save conses.
1275 This check should be done in maybe_add_or_update_dep_1.
1276 Since we made it to add_or_update_dep_1, we must create
1277 (or update) a link. */
1278
1279 if (mem1 != NULL_RTX)
1280 {
1281 gcc_assert (sched_deps_info->generate_spec_deps);
1282 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1283 estimate_dep_weak (mem1, mem2));
1284 }
1285
1286 sd_add_dep (new_dep, resolved_p);
1287
1288 return DEP_CREATED;
1289 }
1290
1291 /* Initialize BACK_LIST_PTR with consumer's backward list and
1292 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1293 initialize with lists that hold resolved deps. */
1294 static void
1295 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1296 deps_list_t *back_list_ptr,
1297 deps_list_t *forw_list_ptr)
1298 {
1299 rtx con = DEP_CON (dep);
1300
1301 if (!resolved_p)
1302 {
1303 if (dep_spec_p (dep))
1304 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1305 else
1306 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1307
1308 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1309 }
1310 else
1311 {
1312 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1313 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1314 }
1315 }
1316
1317 /* Add dependence described by DEP.
1318 If RESOLVED_P is true treat the dependence as a resolved one. */
1319 void
1320 sd_add_dep (dep_t dep, bool resolved_p)
1321 {
1322 dep_node_t n = create_dep_node ();
1323 deps_list_t con_back_deps;
1324 deps_list_t pro_forw_deps;
1325 rtx elem = DEP_PRO (dep);
1326 rtx insn = DEP_CON (dep);
1327
1328 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1329
1330 if ((current_sched_info->flags & DO_SPECULATION) == 0
1331 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1332 DEP_STATUS (dep) &= ~SPECULATIVE;
1333
1334 copy_dep (DEP_NODE_DEP (n), dep);
1335
1336 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1337
1338 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1339
1340 #ifdef ENABLE_CHECKING
1341 check_dep (dep, false);
1342 #endif
1343
1344 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1345
1346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1347 in the bitmap caches of dependency information. */
1348 if (true_dependency_cache != NULL)
1349 set_dependency_caches (dep);
1350 }
1351
1352 /* Add or update backward dependence between INSN and ELEM
1353 with given type DEP_TYPE and dep_status DS.
1354 This function is a convenience wrapper. */
1355 enum DEPS_ADJUST_RESULT
1356 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1357 {
1358 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1359 }
1360
1361 /* Resolved dependence pointed to by SD_IT.
1362 SD_IT will advance to the next element. */
1363 void
1364 sd_resolve_dep (sd_iterator_def sd_it)
1365 {
1366 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1367 dep_t dep = DEP_NODE_DEP (node);
1368 rtx pro = DEP_PRO (dep);
1369 rtx con = DEP_CON (dep);
1370
1371 if (dep_spec_p (dep))
1372 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1373 INSN_RESOLVED_BACK_DEPS (con));
1374 else
1375 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1376 INSN_RESOLVED_BACK_DEPS (con));
1377
1378 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1379 INSN_RESOLVED_FORW_DEPS (pro));
1380 }
1381
1382 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1383 pointed to by SD_IT to unresolved state. */
1384 void
1385 sd_unresolve_dep (sd_iterator_def sd_it)
1386 {
1387 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1388 dep_t dep = DEP_NODE_DEP (node);
1389 rtx pro = DEP_PRO (dep);
1390 rtx con = DEP_CON (dep);
1391
1392 if (dep_spec_p (dep))
1393 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 INSN_SPEC_BACK_DEPS (con));
1395 else
1396 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1397 INSN_HARD_BACK_DEPS (con));
1398
1399 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1400 INSN_FORW_DEPS (pro));
1401 }
1402
1403 /* Make TO depend on all the FROM's producers.
1404 If RESOLVED_P is true add dependencies to the resolved lists. */
1405 void
1406 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1407 {
1408 sd_list_types_def list_type;
1409 sd_iterator_def sd_it;
1410 dep_t dep;
1411
1412 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1413
1414 FOR_EACH_DEP (from, list_type, sd_it, dep)
1415 {
1416 dep_def _new_dep, *new_dep = &_new_dep;
1417
1418 copy_dep (new_dep, dep);
1419 DEP_CON (new_dep) = to;
1420 sd_add_dep (new_dep, resolved_p);
1421 }
1422 }
1423
1424 /* Remove a dependency referred to by SD_IT.
1425 SD_IT will point to the next dependence after removal. */
1426 void
1427 sd_delete_dep (sd_iterator_def sd_it)
1428 {
1429 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1430 dep_t dep = DEP_NODE_DEP (n);
1431 rtx pro = DEP_PRO (dep);
1432 rtx con = DEP_CON (dep);
1433 deps_list_t con_back_deps;
1434 deps_list_t pro_forw_deps;
1435
1436 if (true_dependency_cache != NULL)
1437 {
1438 int elem_luid = INSN_LUID (pro);
1439 int insn_luid = INSN_LUID (con);
1440
1441 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1442 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1443 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1444 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1445
1446 if (current_sched_info->flags & DO_SPECULATION)
1447 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1448 }
1449
1450 get_back_and_forw_lists (dep, sd_it.resolved_p,
1451 &con_back_deps, &pro_forw_deps);
1452
1453 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1454 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1455
1456 delete_dep_node (n);
1457 }
1458
1459 /* Dump size of the lists. */
1460 #define DUMP_LISTS_SIZE (2)
1461
1462 /* Dump dependencies of the lists. */
1463 #define DUMP_LISTS_DEPS (4)
1464
1465 /* Dump all information about the lists. */
1466 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1467
1468 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1469 FLAGS is a bit mask specifying what information about the lists needs
1470 to be printed.
1471 If FLAGS has the very first bit set, then dump all information about
1472 the lists and propagate this bit into the callee dump functions. */
1473 static void
1474 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1475 {
1476 sd_iterator_def sd_it;
1477 dep_t dep;
1478 int all;
1479
1480 all = (flags & 1);
1481
1482 if (all)
1483 flags |= DUMP_LISTS_ALL;
1484
1485 fprintf (dump, "[");
1486
1487 if (flags & DUMP_LISTS_SIZE)
1488 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1489
1490 if (flags & DUMP_LISTS_DEPS)
1491 {
1492 FOR_EACH_DEP (insn, types, sd_it, dep)
1493 {
1494 dump_dep (dump, dep, dump_dep_flags | all);
1495 fprintf (dump, " ");
1496 }
1497 }
1498 }
1499
1500 /* Dump all information about deps_lists of INSN specified by TYPES
1501 to STDERR. */
1502 void
1503 sd_debug_lists (rtx insn, sd_list_types_def types)
1504 {
1505 dump_lists (stderr, insn, types, 1);
1506 fprintf (stderr, "\n");
1507 }
1508
1509 /* A wrapper around add_dependence_1, to add a dependence of CON on
1510 PRO, with type DEP_TYPE. This function implements special handling
1511 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1512 the type to REG_DEP_ANTI if we can determine that predication is
1513 impossible; otherwise we add additional true dependencies on the
1514 INSN_COND_DEPS list of the jump (which PRO must be). */
1515 void
1516 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1517 {
1518 if (dep_type == REG_DEP_CONTROL
1519 && !(current_sched_info->flags & DO_PREDICATION))
1520 dep_type = REG_DEP_ANTI;
1521
1522 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1523 so we must also make the insn dependent on the setter of the
1524 condition. */
1525 if (dep_type == REG_DEP_CONTROL)
1526 {
1527 rtx real_pro = pro;
1528 rtx other = real_insn_for_shadow (real_pro);
1529 rtx cond;
1530
1531 if (other != NULL_RTX)
1532 real_pro = other;
1533 cond = sched_get_reverse_condition_uncached (real_pro);
1534 /* Verify that the insn does not use a different value in
1535 the condition register than the one that was present at
1536 the jump. */
1537 if (cond == NULL_RTX)
1538 dep_type = REG_DEP_ANTI;
1539 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1540 {
1541 HARD_REG_SET uses;
1542 CLEAR_HARD_REG_SET (uses);
1543 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1544 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1545 dep_type = REG_DEP_ANTI;
1546 }
1547 if (dep_type == REG_DEP_CONTROL)
1548 {
1549 if (sched_verbose >= 5)
1550 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1551 INSN_UID (real_pro));
1552 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1553 REG_DEP_TRUE, false);
1554 }
1555 }
1556
1557 add_dependence_1 (con, pro, dep_type);
1558 }
1559
1560 /* A convenience wrapper to operate on an entire list. HARD should be
1561 true if DEP_NONREG should be set on newly created dependencies. */
1562
1563 static void
1564 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
1565 bool hard)
1566 {
1567 mark_as_hard = hard;
1568 for (; list; list = XEXP (list, 1))
1569 {
1570 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1571 add_dependence (insn, XEXP (list, 0), dep_type);
1572 }
1573 mark_as_hard = false;
1574 }
1575
1576 /* Similar, but free *LISTP at the same time, when the context
1577 is not readonly. HARD should be true if DEP_NONREG should be set on
1578 newly created dependencies. */
1579
1580 static void
1581 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1582 int uncond, enum reg_note dep_type, bool hard)
1583 {
1584 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1585
1586 /* We don't want to short-circuit dependencies involving debug
1587 insns, because they may cause actual dependencies to be
1588 disregarded. */
1589 if (deps->readonly || DEBUG_INSN_P (insn))
1590 return;
1591
1592 free_INSN_LIST_list (listp);
1593 }
1594
1595 /* Remove all occurrences of INSN from LIST. Return the number of
1596 occurrences removed. */
1597
1598 static int
1599 remove_from_dependence_list (rtx insn, rtx* listp)
1600 {
1601 int removed = 0;
1602
1603 while (*listp)
1604 {
1605 if (XEXP (*listp, 0) == insn)
1606 {
1607 remove_free_INSN_LIST_node (listp);
1608 removed++;
1609 continue;
1610 }
1611
1612 listp = &XEXP (*listp, 1);
1613 }
1614
1615 return removed;
1616 }
1617
1618 /* Same as above, but process two lists at once. */
1619 static int
1620 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1621 {
1622 int removed = 0;
1623
1624 while (*listp)
1625 {
1626 if (XEXP (*listp, 0) == insn)
1627 {
1628 remove_free_INSN_LIST_node (listp);
1629 remove_free_EXPR_LIST_node (exprp);
1630 removed++;
1631 continue;
1632 }
1633
1634 listp = &XEXP (*listp, 1);
1635 exprp = &XEXP (*exprp, 1);
1636 }
1637
1638 return removed;
1639 }
1640
1641 /* Clear all dependencies for an insn. */
1642 static void
1643 delete_all_dependences (rtx insn)
1644 {
1645 sd_iterator_def sd_it;
1646 dep_t dep;
1647
1648 /* The below cycle can be optimized to clear the caches and back_deps
1649 in one call but that would provoke duplication of code from
1650 delete_dep (). */
1651
1652 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653 sd_iterator_cond (&sd_it, &dep);)
1654 sd_delete_dep (sd_it);
1655 }
1656
1657 /* All insns in a scheduling group except the first should only have
1658 dependencies on the previous insn in the group. So we find the
1659 first instruction in the scheduling group by walking the dependence
1660 chains backwards. Then we add the dependencies for the group to
1661 the previous nonnote insn. */
1662
1663 static void
1664 chain_to_prev_insn (rtx insn)
1665 {
1666 sd_iterator_def sd_it;
1667 dep_t dep;
1668 rtx prev_nonnote;
1669
1670 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671 {
1672 rtx i = insn;
1673 rtx pro = DEP_PRO (dep);
1674
1675 do
1676 {
1677 i = prev_nonnote_insn (i);
1678
1679 if (pro == i)
1680 goto next_link;
1681 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682
1683 if (! sched_insns_conditions_mutex_p (i, pro))
1684 add_dependence (i, pro, DEP_TYPE (dep));
1685 next_link:;
1686 }
1687
1688 delete_all_dependences (insn);
1689
1690 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 }
1695 \f
1696 /* Process an insn's memory dependencies. There are four kinds of
1697 dependencies:
1698
1699 (0) read dependence: read follows read
1700 (1) true dependence: read follows write
1701 (2) output dependence: write follows write
1702 (3) anti dependence: write follows read
1703
1704 We are careful to build only dependencies which actually exist, and
1705 use transitivity to avoid building too many links. */
1706
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 The MEM is a memory reference contained within INSN, which we are saving
1709 so that we can do memory aliasing on it. */
1710
1711 static void
1712 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1713 rtx insn, rtx mem)
1714 {
1715 rtx *insn_list;
1716 rtx *mem_list;
1717 rtx link;
1718
1719 gcc_assert (!deps->readonly);
1720 if (read_p)
1721 {
1722 insn_list = &deps->pending_read_insns;
1723 mem_list = &deps->pending_read_mems;
1724 if (!DEBUG_INSN_P (insn))
1725 deps->pending_read_list_length++;
1726 }
1727 else
1728 {
1729 insn_list = &deps->pending_write_insns;
1730 mem_list = &deps->pending_write_mems;
1731 deps->pending_write_list_length++;
1732 }
1733
1734 link = alloc_INSN_LIST (insn, *insn_list);
1735 *insn_list = link;
1736
1737 if (sched_deps_info->use_cselib)
1738 {
1739 mem = shallow_copy_rtx (mem);
1740 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 GET_MODE (mem), insn);
1742 }
1743 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744 *mem_list = link;
1745 }
1746
1747 /* Make a dependency between every memory reference on the pending lists
1748 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1749 dependencies for a read operation, similarly with FOR_WRITE. */
1750
1751 static void
1752 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1753 int for_write)
1754 {
1755 if (for_write)
1756 {
1757 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758 1, REG_DEP_ANTI, true);
1759 if (!deps->readonly)
1760 {
1761 free_EXPR_LIST_list (&deps->pending_read_mems);
1762 deps->pending_read_list_length = 0;
1763 }
1764 }
1765
1766 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 true);
1769
1770 add_dependence_list_and_free (deps, insn,
1771 &deps->last_pending_memory_flush, 1,
1772 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 true);
1774
1775 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 REG_DEP_ANTI, true);
1777
1778 if (DEBUG_INSN_P (insn))
1779 {
1780 if (for_write)
1781 free_INSN_LIST_list (&deps->pending_read_insns);
1782 free_INSN_LIST_list (&deps->pending_write_insns);
1783 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784 free_INSN_LIST_list (&deps->pending_jump_insns);
1785 }
1786
1787 if (!deps->readonly)
1788 {
1789 free_EXPR_LIST_list (&deps->pending_write_mems);
1790 deps->pending_write_list_length = 0;
1791
1792 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793 deps->pending_flush_length = 1;
1794 }
1795 mark_as_hard = false;
1796 }
1797 \f
1798 /* Instruction which dependencies we are analyzing. */
1799 static rtx cur_insn = NULL_RTX;
1800
1801 /* Implement hooks for haifa scheduler. */
1802
1803 static void
1804 haifa_start_insn (rtx insn)
1805 {
1806 gcc_assert (insn && !cur_insn);
1807
1808 cur_insn = insn;
1809 }
1810
1811 static void
1812 haifa_finish_insn (void)
1813 {
1814 cur_insn = NULL;
1815 }
1816
1817 void
1818 haifa_note_reg_set (int regno)
1819 {
1820 SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822
1823 void
1824 haifa_note_reg_clobber (int regno)
1825 {
1826 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828
1829 void
1830 haifa_note_reg_use (int regno)
1831 {
1832 SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834
1835 static void
1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1837 {
1838 if (!(ds & SPECULATIVE))
1839 {
1840 mem = NULL_RTX;
1841 pending_mem = NULL_RTX;
1842 }
1843 else
1844 gcc_assert (ds & BEGIN_DATA);
1845
1846 {
1847 dep_def _dep, *dep = &_dep;
1848
1849 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851 DEP_NONREG (dep) = 1;
1852 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853 }
1854
1855 }
1856
1857 static void
1858 haifa_note_dep (rtx elem, ds_t ds)
1859 {
1860 dep_def _dep;
1861 dep_t dep = &_dep;
1862
1863 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864 if (mark_as_hard)
1865 DEP_NONREG (dep) = 1;
1866 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868
1869 static void
1870 note_reg_use (int r)
1871 {
1872 if (sched_deps_info->note_reg_use)
1873 sched_deps_info->note_reg_use (r);
1874 }
1875
1876 static void
1877 note_reg_set (int r)
1878 {
1879 if (sched_deps_info->note_reg_set)
1880 sched_deps_info->note_reg_set (r);
1881 }
1882
1883 static void
1884 note_reg_clobber (int r)
1885 {
1886 if (sched_deps_info->note_reg_clobber)
1887 sched_deps_info->note_reg_clobber (r);
1888 }
1889
1890 static void
1891 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1892 {
1893 if (sched_deps_info->note_mem_dep)
1894 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896
1897 static void
1898 note_dep (rtx e, ds_t ds)
1899 {
1900 if (sched_deps_info->note_dep)
1901 sched_deps_info->note_dep (e, ds);
1902 }
1903
1904 /* Return corresponding to DS reg_note. */
1905 enum reg_note
1906 ds_to_dt (ds_t ds)
1907 {
1908 if (ds & DEP_TRUE)
1909 return REG_DEP_TRUE;
1910 else if (ds & DEP_OUTPUT)
1911 return REG_DEP_OUTPUT;
1912 else if (ds & DEP_ANTI)
1913 return REG_DEP_ANTI;
1914 else
1915 {
1916 gcc_assert (ds & DEP_CONTROL);
1917 return REG_DEP_CONTROL;
1918 }
1919 }
1920
1921 \f
1922
1923 /* Functions for computation of info needed for register pressure
1924 sensitive insn scheduling. */
1925
1926
1927 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1928 static struct reg_use_data *
1929 create_insn_reg_use (int regno, rtx insn)
1930 {
1931 struct reg_use_data *use;
1932
1933 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934 use->regno = regno;
1935 use->insn = insn;
1936 use->next_insn_use = INSN_REG_USE_LIST (insn);
1937 INSN_REG_USE_LIST (insn) = use;
1938 return use;
1939 }
1940
1941 /* Allocate reg_set_data structure for REGNO and INSN. */
1942 static void
1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945 struct reg_set_data *set;
1946
1947 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948 set->regno = regno;
1949 set->insn = insn;
1950 set->next_insn_set = INSN_REG_SET_LIST (insn);
1951 INSN_REG_SET_LIST (insn) = set;
1952 }
1953
1954 /* Set up insn register uses for INSN and dependency context DEPS. */
1955 static void
1956 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1957 {
1958 unsigned i;
1959 reg_set_iterator rsi;
1960 rtx list;
1961 struct reg_use_data *use, *use2, *next;
1962 struct deps_reg *reg_last;
1963
1964 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1965 {
1966 if (i < FIRST_PSEUDO_REGISTER
1967 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1968 continue;
1969
1970 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1971 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1972 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1973 /* Ignore use which is not dying. */
1974 continue;
1975
1976 use = create_insn_reg_use (i, insn);
1977 use->next_regno_use = use;
1978 reg_last = &deps->reg_last[i];
1979
1980 /* Create the cycle list of uses. */
1981 for (list = reg_last->uses; list; list = XEXP (list, 1))
1982 {
1983 use2 = create_insn_reg_use (i, XEXP (list, 0));
1984 next = use->next_regno_use;
1985 use->next_regno_use = use2;
1986 use2->next_regno_use = next;
1987 }
1988 }
1989 }
1990
1991 /* Register pressure info for the currently processed insn. */
1992 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1993
1994 /* Return TRUE if INSN has the use structure for REGNO. */
1995 static bool
1996 insn_use_p (rtx insn, int regno)
1997 {
1998 struct reg_use_data *use;
1999
2000 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2001 if (use->regno == regno)
2002 return true;
2003 return false;
2004 }
2005
2006 /* Update the register pressure info after birth of pseudo register REGNO
2007 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2008 the register is in clobber or unused after the insn. */
2009 static void
2010 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2011 {
2012 int incr, new_incr;
2013 enum reg_class cl;
2014
2015 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2016 cl = sched_regno_pressure_class[regno];
2017 if (cl != NO_REGS)
2018 {
2019 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2020 if (clobber_p)
2021 {
2022 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2023 reg_pressure_info[cl].clobber_increase = new_incr;
2024 }
2025 else if (unused_p)
2026 {
2027 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2028 reg_pressure_info[cl].unused_set_increase = new_incr;
2029 }
2030 else
2031 {
2032 new_incr = reg_pressure_info[cl].set_increase + incr;
2033 reg_pressure_info[cl].set_increase = new_incr;
2034 if (! insn_use_p (insn, regno))
2035 reg_pressure_info[cl].change += incr;
2036 create_insn_reg_set (regno, insn);
2037 }
2038 gcc_assert (new_incr < (1 << INCREASE_BITS));
2039 }
2040 }
2041
2042 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2043 hard registers involved in the birth. */
2044 static void
2045 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2046 bool clobber_p, bool unused_p)
2047 {
2048 enum reg_class cl;
2049 int new_incr, last = regno + nregs;
2050
2051 while (regno < last)
2052 {
2053 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2054 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2055 {
2056 cl = sched_regno_pressure_class[regno];
2057 if (cl != NO_REGS)
2058 {
2059 if (clobber_p)
2060 {
2061 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2062 reg_pressure_info[cl].clobber_increase = new_incr;
2063 }
2064 else if (unused_p)
2065 {
2066 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2067 reg_pressure_info[cl].unused_set_increase = new_incr;
2068 }
2069 else
2070 {
2071 new_incr = reg_pressure_info[cl].set_increase + 1;
2072 reg_pressure_info[cl].set_increase = new_incr;
2073 if (! insn_use_p (insn, regno))
2074 reg_pressure_info[cl].change += 1;
2075 create_insn_reg_set (regno, insn);
2076 }
2077 gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 }
2079 }
2080 regno++;
2081 }
2082 }
2083
2084 /* Update the register pressure info after birth of pseudo or hard
2085 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2086 correspondingly that the register is in clobber or unused after the
2087 insn. */
2088 static void
2089 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2090 {
2091 int regno;
2092
2093 if (GET_CODE (reg) == SUBREG)
2094 reg = SUBREG_REG (reg);
2095
2096 if (! REG_P (reg))
2097 return;
2098
2099 regno = REGNO (reg);
2100 if (regno < FIRST_PSEUDO_REGISTER)
2101 mark_insn_hard_regno_birth (insn, regno,
2102 hard_regno_nregs[regno][GET_MODE (reg)],
2103 clobber_p, unused_p);
2104 else
2105 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2106 }
2107
2108 /* Update the register pressure info after death of pseudo register
2109 REGNO. */
2110 static void
2111 mark_pseudo_death (int regno)
2112 {
2113 int incr;
2114 enum reg_class cl;
2115
2116 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2117 cl = sched_regno_pressure_class[regno];
2118 if (cl != NO_REGS)
2119 {
2120 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2121 reg_pressure_info[cl].change -= incr;
2122 }
2123 }
2124
2125 /* Like mark_pseudo_death except that NREGS saying how many hard
2126 registers involved in the death. */
2127 static void
2128 mark_hard_regno_death (int regno, int nregs)
2129 {
2130 enum reg_class cl;
2131 int last = regno + nregs;
2132
2133 while (regno < last)
2134 {
2135 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2136 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2137 {
2138 cl = sched_regno_pressure_class[regno];
2139 if (cl != NO_REGS)
2140 reg_pressure_info[cl].change -= 1;
2141 }
2142 regno++;
2143 }
2144 }
2145
2146 /* Update the register pressure info after death of pseudo or hard
2147 register REG. */
2148 static void
2149 mark_reg_death (rtx reg)
2150 {
2151 int regno;
2152
2153 if (GET_CODE (reg) == SUBREG)
2154 reg = SUBREG_REG (reg);
2155
2156 if (! REG_P (reg))
2157 return;
2158
2159 regno = REGNO (reg);
2160 if (regno < FIRST_PSEUDO_REGISTER)
2161 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2162 else
2163 mark_pseudo_death (regno);
2164 }
2165
2166 /* Process SETTER of REG. DATA is an insn containing the setter. */
2167 static void
2168 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2169 {
2170 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2171 return;
2172 mark_insn_reg_birth
2173 ((rtx) data, reg, false,
2174 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2175 }
2176
2177 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2178 static void
2179 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2180 {
2181 if (GET_CODE (setter) == CLOBBER)
2182 mark_insn_reg_birth ((rtx) data, reg, true, false);
2183 }
2184
2185 /* Set up reg pressure info related to INSN. */
2186 void
2187 init_insn_reg_pressure_info (rtx insn)
2188 {
2189 int i, len;
2190 enum reg_class cl;
2191 static struct reg_pressure_data *pressure_info;
2192 rtx link;
2193
2194 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2195
2196 if (! INSN_P (insn))
2197 return;
2198
2199 for (i = 0; i < ira_pressure_classes_num; i++)
2200 {
2201 cl = ira_pressure_classes[i];
2202 reg_pressure_info[cl].clobber_increase = 0;
2203 reg_pressure_info[cl].set_increase = 0;
2204 reg_pressure_info[cl].unused_set_increase = 0;
2205 reg_pressure_info[cl].change = 0;
2206 }
2207
2208 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2209
2210 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2211
2212 #ifdef AUTO_INC_DEC
2213 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2214 if (REG_NOTE_KIND (link) == REG_INC)
2215 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2216 #endif
2217
2218 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2219 if (REG_NOTE_KIND (link) == REG_DEAD)
2220 mark_reg_death (XEXP (link, 0));
2221
2222 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2223 pressure_info
2224 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2225 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2226 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2227 * sizeof (int), 1);
2228 for (i = 0; i < ira_pressure_classes_num; i++)
2229 {
2230 cl = ira_pressure_classes[i];
2231 pressure_info[i].clobber_increase
2232 = reg_pressure_info[cl].clobber_increase;
2233 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2234 pressure_info[i].unused_set_increase
2235 = reg_pressure_info[cl].unused_set_increase;
2236 pressure_info[i].change = reg_pressure_info[cl].change;
2237 }
2238 }
2239
2240
2241 \f
2242
2243 /* Internal variable for sched_analyze_[12] () functions.
2244 If it is nonzero, this means that sched_analyze_[12] looks
2245 at the most toplevel SET. */
2246 static bool can_start_lhs_rhs_p;
2247
2248 /* Extend reg info for the deps context DEPS given that
2249 we have just generated a register numbered REGNO. */
2250 static void
2251 extend_deps_reg_info (struct deps_desc *deps, int regno)
2252 {
2253 int max_regno = regno + 1;
2254
2255 gcc_assert (!reload_completed);
2256
2257 /* In a readonly context, it would not hurt to extend info,
2258 but it should not be needed. */
2259 if (reload_completed && deps->readonly)
2260 {
2261 deps->max_reg = max_regno;
2262 return;
2263 }
2264
2265 if (max_regno > deps->max_reg)
2266 {
2267 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2268 max_regno);
2269 memset (&deps->reg_last[deps->max_reg],
2270 0, (max_regno - deps->max_reg)
2271 * sizeof (struct deps_reg));
2272 deps->max_reg = max_regno;
2273 }
2274 }
2275
2276 /* Extends REG_INFO_P if needed. */
2277 void
2278 maybe_extend_reg_info_p (void)
2279 {
2280 /* Extend REG_INFO_P, if needed. */
2281 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2282 {
2283 size_t new_reg_info_p_size = max_regno + 128;
2284
2285 gcc_assert (!reload_completed && sel_sched_p ());
2286
2287 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2288 new_reg_info_p_size,
2289 reg_info_p_size,
2290 sizeof (*reg_info_p));
2291 reg_info_p_size = new_reg_info_p_size;
2292 }
2293 }
2294
2295 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2296 The type of the reference is specified by REF and can be SET,
2297 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2298
2299 static void
2300 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2301 enum rtx_code ref, rtx insn)
2302 {
2303 /* We could emit new pseudos in renaming. Extend the reg structures. */
2304 if (!reload_completed && sel_sched_p ()
2305 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2306 extend_deps_reg_info (deps, regno);
2307
2308 maybe_extend_reg_info_p ();
2309
2310 /* A hard reg in a wide mode may really be multiple registers.
2311 If so, mark all of them just like the first. */
2312 if (regno < FIRST_PSEUDO_REGISTER)
2313 {
2314 int i = hard_regno_nregs[regno][mode];
2315 if (ref == SET)
2316 {
2317 while (--i >= 0)
2318 note_reg_set (regno + i);
2319 }
2320 else if (ref == USE)
2321 {
2322 while (--i >= 0)
2323 note_reg_use (regno + i);
2324 }
2325 else
2326 {
2327 while (--i >= 0)
2328 note_reg_clobber (regno + i);
2329 }
2330 }
2331
2332 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2333 it does not reload. Ignore these as they have served their
2334 purpose already. */
2335 else if (regno >= deps->max_reg)
2336 {
2337 enum rtx_code code = GET_CODE (PATTERN (insn));
2338 gcc_assert (code == USE || code == CLOBBER);
2339 }
2340
2341 else
2342 {
2343 if (ref == SET)
2344 note_reg_set (regno);
2345 else if (ref == USE)
2346 note_reg_use (regno);
2347 else
2348 note_reg_clobber (regno);
2349
2350 /* Pseudos that are REG_EQUIV to something may be replaced
2351 by that during reloading. We need only add dependencies for
2352 the address in the REG_EQUIV note. */
2353 if (!reload_completed && get_reg_known_equiv_p (regno))
2354 {
2355 rtx t = get_reg_known_value (regno);
2356 if (MEM_P (t))
2357 sched_analyze_2 (deps, XEXP (t, 0), insn);
2358 }
2359
2360 /* Don't let it cross a call after scheduling if it doesn't
2361 already cross one. */
2362 if (REG_N_CALLS_CROSSED (regno) == 0)
2363 {
2364 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2365 deps->sched_before_next_call
2366 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2367 else
2368 add_dependence_list (insn, deps->last_function_call, 1,
2369 REG_DEP_ANTI, false);
2370 }
2371 }
2372 }
2373
2374 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2375 rtx, X, creating all dependencies generated by the write to the
2376 destination of X, and reads of everything mentioned. */
2377
2378 static void
2379 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2380 {
2381 rtx dest = XEXP (x, 0);
2382 enum rtx_code code = GET_CODE (x);
2383 bool cslr_p = can_start_lhs_rhs_p;
2384
2385 can_start_lhs_rhs_p = false;
2386
2387 gcc_assert (dest);
2388 if (dest == 0)
2389 return;
2390
2391 if (cslr_p && sched_deps_info->start_lhs)
2392 sched_deps_info->start_lhs (dest);
2393
2394 if (GET_CODE (dest) == PARALLEL)
2395 {
2396 int i;
2397
2398 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2399 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2400 sched_analyze_1 (deps,
2401 gen_rtx_CLOBBER (VOIDmode,
2402 XEXP (XVECEXP (dest, 0, i), 0)),
2403 insn);
2404
2405 if (cslr_p && sched_deps_info->finish_lhs)
2406 sched_deps_info->finish_lhs ();
2407
2408 if (code == SET)
2409 {
2410 can_start_lhs_rhs_p = cslr_p;
2411
2412 sched_analyze_2 (deps, SET_SRC (x), insn);
2413
2414 can_start_lhs_rhs_p = false;
2415 }
2416
2417 return;
2418 }
2419
2420 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2421 || GET_CODE (dest) == ZERO_EXTRACT)
2422 {
2423 if (GET_CODE (dest) == STRICT_LOW_PART
2424 || GET_CODE (dest) == ZERO_EXTRACT
2425 || df_read_modify_subreg_p (dest))
2426 {
2427 /* These both read and modify the result. We must handle
2428 them as writes to get proper dependencies for following
2429 instructions. We must handle them as reads to get proper
2430 dependencies from this to previous instructions.
2431 Thus we need to call sched_analyze_2. */
2432
2433 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2434 }
2435 if (GET_CODE (dest) == ZERO_EXTRACT)
2436 {
2437 /* The second and third arguments are values read by this insn. */
2438 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2439 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2440 }
2441 dest = XEXP (dest, 0);
2442 }
2443
2444 if (REG_P (dest))
2445 {
2446 int regno = REGNO (dest);
2447 enum machine_mode mode = GET_MODE (dest);
2448
2449 sched_analyze_reg (deps, regno, mode, code, insn);
2450
2451 #ifdef STACK_REGS
2452 /* Treat all writes to a stack register as modifying the TOS. */
2453 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2454 {
2455 /* Avoid analyzing the same register twice. */
2456 if (regno != FIRST_STACK_REG)
2457 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2458
2459 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2460 FIRST_STACK_REG);
2461 }
2462 #endif
2463 }
2464 else if (MEM_P (dest))
2465 {
2466 /* Writing memory. */
2467 rtx t = dest;
2468
2469 if (sched_deps_info->use_cselib)
2470 {
2471 enum machine_mode address_mode = get_address_mode (dest);
2472
2473 t = shallow_copy_rtx (dest);
2474 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2475 GET_MODE (t), insn);
2476 XEXP (t, 0)
2477 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2478 insn);
2479 }
2480 t = canon_rtx (t);
2481
2482 /* Pending lists can't get larger with a readonly context. */
2483 if (!deps->readonly
2484 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2485 > MAX_PENDING_LIST_LENGTH))
2486 {
2487 /* Flush all pending reads and writes to prevent the pending lists
2488 from getting any larger. Insn scheduling runs too slowly when
2489 these lists get long. When compiling GCC with itself,
2490 this flush occurs 8 times for sparc, and 10 times for m88k using
2491 the default value of 32. */
2492 flush_pending_lists (deps, insn, false, true);
2493 }
2494 else
2495 {
2496 rtx pending, pending_mem;
2497
2498 pending = deps->pending_read_insns;
2499 pending_mem = deps->pending_read_mems;
2500 while (pending)
2501 {
2502 if (anti_dependence (XEXP (pending_mem, 0), t)
2503 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2504 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2505 DEP_ANTI);
2506
2507 pending = XEXP (pending, 1);
2508 pending_mem = XEXP (pending_mem, 1);
2509 }
2510
2511 pending = deps->pending_write_insns;
2512 pending_mem = deps->pending_write_mems;
2513 while (pending)
2514 {
2515 if (output_dependence (XEXP (pending_mem, 0), t)
2516 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2517 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2518 DEP_OUTPUT);
2519
2520 pending = XEXP (pending, 1);
2521 pending_mem = XEXP (pending_mem, 1);
2522 }
2523
2524 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2525 REG_DEP_ANTI, true);
2526 add_dependence_list (insn, deps->pending_jump_insns, 1,
2527 REG_DEP_CONTROL, true);
2528
2529 if (!deps->readonly)
2530 add_insn_mem_dependence (deps, false, insn, dest);
2531 }
2532 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2533 }
2534
2535 if (cslr_p && sched_deps_info->finish_lhs)
2536 sched_deps_info->finish_lhs ();
2537
2538 /* Analyze reads. */
2539 if (GET_CODE (x) == SET)
2540 {
2541 can_start_lhs_rhs_p = cslr_p;
2542
2543 sched_analyze_2 (deps, SET_SRC (x), insn);
2544
2545 can_start_lhs_rhs_p = false;
2546 }
2547 }
2548
2549 /* Analyze the uses of memory and registers in rtx X in INSN. */
2550 static void
2551 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2552 {
2553 int i;
2554 int j;
2555 enum rtx_code code;
2556 const char *fmt;
2557 bool cslr_p = can_start_lhs_rhs_p;
2558
2559 can_start_lhs_rhs_p = false;
2560
2561 gcc_assert (x);
2562 if (x == 0)
2563 return;
2564
2565 if (cslr_p && sched_deps_info->start_rhs)
2566 sched_deps_info->start_rhs (x);
2567
2568 code = GET_CODE (x);
2569
2570 switch (code)
2571 {
2572 CASE_CONST_ANY:
2573 case SYMBOL_REF:
2574 case CONST:
2575 case LABEL_REF:
2576 /* Ignore constants. */
2577 if (cslr_p && sched_deps_info->finish_rhs)
2578 sched_deps_info->finish_rhs ();
2579
2580 return;
2581
2582 #ifdef HAVE_cc0
2583 case CC0:
2584 /* User of CC0 depends on immediately preceding insn. */
2585 SCHED_GROUP_P (insn) = 1;
2586 /* Don't move CC0 setter to another block (it can set up the
2587 same flag for previous CC0 users which is safe). */
2588 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2589
2590 if (cslr_p && sched_deps_info->finish_rhs)
2591 sched_deps_info->finish_rhs ();
2592
2593 return;
2594 #endif
2595
2596 case REG:
2597 {
2598 int regno = REGNO (x);
2599 enum machine_mode mode = GET_MODE (x);
2600
2601 sched_analyze_reg (deps, regno, mode, USE, insn);
2602
2603 #ifdef STACK_REGS
2604 /* Treat all reads of a stack register as modifying the TOS. */
2605 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2606 {
2607 /* Avoid analyzing the same register twice. */
2608 if (regno != FIRST_STACK_REG)
2609 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2610 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2611 }
2612 #endif
2613
2614 if (cslr_p && sched_deps_info->finish_rhs)
2615 sched_deps_info->finish_rhs ();
2616
2617 return;
2618 }
2619
2620 case MEM:
2621 {
2622 /* Reading memory. */
2623 rtx u;
2624 rtx pending, pending_mem;
2625 rtx t = x;
2626
2627 if (sched_deps_info->use_cselib)
2628 {
2629 enum machine_mode address_mode = get_address_mode (t);
2630
2631 t = shallow_copy_rtx (t);
2632 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2633 GET_MODE (t), insn);
2634 XEXP (t, 0)
2635 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2636 insn);
2637 }
2638
2639 if (!DEBUG_INSN_P (insn))
2640 {
2641 t = canon_rtx (t);
2642 pending = deps->pending_read_insns;
2643 pending_mem = deps->pending_read_mems;
2644 while (pending)
2645 {
2646 if (read_dependence (XEXP (pending_mem, 0), t)
2647 && ! sched_insns_conditions_mutex_p (insn,
2648 XEXP (pending, 0)))
2649 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2650 DEP_ANTI);
2651
2652 pending = XEXP (pending, 1);
2653 pending_mem = XEXP (pending_mem, 1);
2654 }
2655
2656 pending = deps->pending_write_insns;
2657 pending_mem = deps->pending_write_mems;
2658 while (pending)
2659 {
2660 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2661 && ! sched_insns_conditions_mutex_p (insn,
2662 XEXP (pending, 0)))
2663 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2664 sched_deps_info->generate_spec_deps
2665 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2666
2667 pending = XEXP (pending, 1);
2668 pending_mem = XEXP (pending_mem, 1);
2669 }
2670
2671 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2672 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2673
2674 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2675 if (deps_may_trap_p (x))
2676 {
2677 if ((sched_deps_info->generate_spec_deps)
2678 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2679 {
2680 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2681 MAX_DEP_WEAK);
2682
2683 note_dep (XEXP (u, 0), ds);
2684 }
2685 else
2686 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2687 }
2688 }
2689
2690 /* Always add these dependencies to pending_reads, since
2691 this insn may be followed by a write. */
2692 if (!deps->readonly)
2693 {
2694 if ((deps->pending_read_list_length
2695 + deps->pending_write_list_length)
2696 > MAX_PENDING_LIST_LENGTH
2697 && !DEBUG_INSN_P (insn))
2698 flush_pending_lists (deps, insn, true, true);
2699 add_insn_mem_dependence (deps, true, insn, x);
2700 }
2701
2702 sched_analyze_2 (deps, XEXP (x, 0), insn);
2703
2704 if (cslr_p && sched_deps_info->finish_rhs)
2705 sched_deps_info->finish_rhs ();
2706
2707 return;
2708 }
2709
2710 /* Force pending stores to memory in case a trap handler needs them. */
2711 case TRAP_IF:
2712 flush_pending_lists (deps, insn, true, false);
2713 break;
2714
2715 case PREFETCH:
2716 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2717 reg_pending_barrier = TRUE_BARRIER;
2718 /* Prefetch insn contains addresses only. So if the prefetch
2719 address has no registers, there will be no dependencies on
2720 the prefetch insn. This is wrong with result code
2721 correctness point of view as such prefetch can be moved below
2722 a jump insn which usually generates MOVE_BARRIER preventing
2723 to move insns containing registers or memories through the
2724 barrier. It is also wrong with generated code performance
2725 point of view as prefetch withouth dependecies will have a
2726 tendency to be issued later instead of earlier. It is hard
2727 to generate accurate dependencies for prefetch insns as
2728 prefetch has only the start address but it is better to have
2729 something than nothing. */
2730 if (!deps->readonly)
2731 {
2732 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2733 if (sched_deps_info->use_cselib)
2734 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2735 add_insn_mem_dependence (deps, true, insn, x);
2736 }
2737 break;
2738
2739 case UNSPEC_VOLATILE:
2740 flush_pending_lists (deps, insn, true, true);
2741 /* FALLTHRU */
2742
2743 case ASM_OPERANDS:
2744 case ASM_INPUT:
2745 {
2746 /* Traditional and volatile asm instructions must be considered to use
2747 and clobber all hard registers, all pseudo-registers and all of
2748 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2749
2750 Consider for instance a volatile asm that changes the fpu rounding
2751 mode. An insn should not be moved across this even if it only uses
2752 pseudo-regs because it might give an incorrectly rounded result. */
2753 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2754 && !DEBUG_INSN_P (insn))
2755 reg_pending_barrier = TRUE_BARRIER;
2756
2757 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2758 We can not just fall through here since then we would be confused
2759 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2760 traditional asms unlike their normal usage. */
2761
2762 if (code == ASM_OPERANDS)
2763 {
2764 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2765 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2766
2767 if (cslr_p && sched_deps_info->finish_rhs)
2768 sched_deps_info->finish_rhs ();
2769
2770 return;
2771 }
2772 break;
2773 }
2774
2775 case PRE_DEC:
2776 case POST_DEC:
2777 case PRE_INC:
2778 case POST_INC:
2779 /* These both read and modify the result. We must handle them as writes
2780 to get proper dependencies for following instructions. We must handle
2781 them as reads to get proper dependencies from this to previous
2782 instructions. Thus we need to pass them to both sched_analyze_1
2783 and sched_analyze_2. We must call sched_analyze_2 first in order
2784 to get the proper antecedent for the read. */
2785 sched_analyze_2 (deps, XEXP (x, 0), insn);
2786 sched_analyze_1 (deps, x, insn);
2787
2788 if (cslr_p && sched_deps_info->finish_rhs)
2789 sched_deps_info->finish_rhs ();
2790
2791 return;
2792
2793 case POST_MODIFY:
2794 case PRE_MODIFY:
2795 /* op0 = op0 + op1 */
2796 sched_analyze_2 (deps, XEXP (x, 0), insn);
2797 sched_analyze_2 (deps, XEXP (x, 1), insn);
2798 sched_analyze_1 (deps, x, insn);
2799
2800 if (cslr_p && sched_deps_info->finish_rhs)
2801 sched_deps_info->finish_rhs ();
2802
2803 return;
2804
2805 default:
2806 break;
2807 }
2808
2809 /* Other cases: walk the insn. */
2810 fmt = GET_RTX_FORMAT (code);
2811 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2812 {
2813 if (fmt[i] == 'e')
2814 sched_analyze_2 (deps, XEXP (x, i), insn);
2815 else if (fmt[i] == 'E')
2816 for (j = 0; j < XVECLEN (x, i); j++)
2817 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2818 }
2819
2820 if (cslr_p && sched_deps_info->finish_rhs)
2821 sched_deps_info->finish_rhs ();
2822 }
2823
2824 /* Try to group comparison and the following conditional jump INSN if
2825 they're already adjacent. This is to prevent scheduler from scheduling
2826 them apart. */
2827
2828 static void
2829 try_group_insn (rtx insn)
2830 {
2831 unsigned int condreg1, condreg2;
2832 rtx cc_reg_1;
2833 rtx prev;
2834
2835 if (!any_condjump_p (insn))
2836 return;
2837
2838 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2839 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2840 prev = prev_nonnote_nondebug_insn (insn);
2841 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2842 || !prev
2843 || !modified_in_p (cc_reg_1, prev))
2844 return;
2845
2846 /* Different microarchitectures support macro fusions for different
2847 combinations of insn pairs. */
2848 if (!targetm.sched.macro_fusion_pair_p
2849 || !targetm.sched.macro_fusion_pair_p (prev, insn))
2850 return;
2851
2852 SCHED_GROUP_P (insn) = 1;
2853 }
2854
2855 /* Analyze an INSN with pattern X to find all dependencies. */
2856 static void
2857 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2858 {
2859 RTX_CODE code = GET_CODE (x);
2860 rtx link;
2861 unsigned i;
2862 reg_set_iterator rsi;
2863
2864 if (! reload_completed)
2865 {
2866 HARD_REG_SET temp;
2867
2868 extract_insn (insn);
2869 preprocess_constraints (insn);
2870 ira_implicitly_set_insn_hard_regs (&temp);
2871 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2872 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2873 }
2874
2875 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2876 && code == SET);
2877
2878 /* Group compare and branch insns for macro-fusion. */
2879 if (targetm.sched.macro_fusion_p
2880 && targetm.sched.macro_fusion_p ())
2881 try_group_insn (insn);
2882
2883 if (may_trap_p (x))
2884 /* Avoid moving trapping instructions across function calls that might
2885 not always return. */
2886 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2887 1, REG_DEP_ANTI, true);
2888
2889 /* We must avoid creating a situation in which two successors of the
2890 current block have different unwind info after scheduling. If at any
2891 point the two paths re-join this leads to incorrect unwind info. */
2892 /* ??? There are certain situations involving a forced frame pointer in
2893 which, with extra effort, we could fix up the unwind info at a later
2894 CFG join. However, it seems better to notice these cases earlier
2895 during prologue generation and avoid marking the frame pointer setup
2896 as frame-related at all. */
2897 if (RTX_FRAME_RELATED_P (insn))
2898 {
2899 /* Make sure prologue insn is scheduled before next jump. */
2900 deps->sched_before_next_jump
2901 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2902
2903 /* Make sure epilogue insn is scheduled after preceding jumps. */
2904 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2905 true);
2906 }
2907
2908 if (code == COND_EXEC)
2909 {
2910 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2911
2912 /* ??? Should be recording conditions so we reduce the number of
2913 false dependencies. */
2914 x = COND_EXEC_CODE (x);
2915 code = GET_CODE (x);
2916 }
2917 if (code == SET || code == CLOBBER)
2918 {
2919 sched_analyze_1 (deps, x, insn);
2920
2921 /* Bare clobber insns are used for letting life analysis, reg-stack
2922 and others know that a value is dead. Depend on the last call
2923 instruction so that reg-stack won't get confused. */
2924 if (code == CLOBBER)
2925 add_dependence_list (insn, deps->last_function_call, 1,
2926 REG_DEP_OUTPUT, true);
2927 }
2928 else if (code == PARALLEL)
2929 {
2930 for (i = XVECLEN (x, 0); i--;)
2931 {
2932 rtx sub = XVECEXP (x, 0, i);
2933 code = GET_CODE (sub);
2934
2935 if (code == COND_EXEC)
2936 {
2937 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2938 sub = COND_EXEC_CODE (sub);
2939 code = GET_CODE (sub);
2940 }
2941 if (code == SET || code == CLOBBER)
2942 sched_analyze_1 (deps, sub, insn);
2943 else
2944 sched_analyze_2 (deps, sub, insn);
2945 }
2946 }
2947 else
2948 sched_analyze_2 (deps, x, insn);
2949
2950 /* Mark registers CLOBBERED or used by called function. */
2951 if (CALL_P (insn))
2952 {
2953 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2954 {
2955 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2956 sched_analyze_1 (deps, XEXP (link, 0), insn);
2957 else if (GET_CODE (XEXP (link, 0)) != SET)
2958 sched_analyze_2 (deps, XEXP (link, 0), insn);
2959 }
2960 /* Don't schedule anything after a tail call, tail call needs
2961 to use at least all call-saved registers. */
2962 if (SIBLING_CALL_P (insn))
2963 reg_pending_barrier = TRUE_BARRIER;
2964 else if (find_reg_note (insn, REG_SETJMP, NULL))
2965 reg_pending_barrier = MOVE_BARRIER;
2966 }
2967
2968 if (JUMP_P (insn))
2969 {
2970 rtx next;
2971 next = next_nonnote_nondebug_insn (insn);
2972 if (next && BARRIER_P (next))
2973 reg_pending_barrier = MOVE_BARRIER;
2974 else
2975 {
2976 rtx pending, pending_mem;
2977
2978 if (sched_deps_info->compute_jump_reg_dependencies)
2979 {
2980 (*sched_deps_info->compute_jump_reg_dependencies)
2981 (insn, reg_pending_control_uses);
2982
2983 /* Make latency of jump equal to 0 by using anti-dependence. */
2984 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2985 {
2986 struct deps_reg *reg_last = &deps->reg_last[i];
2987 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2988 false);
2989 add_dependence_list (insn, reg_last->implicit_sets,
2990 0, REG_DEP_ANTI, false);
2991 add_dependence_list (insn, reg_last->clobbers, 0,
2992 REG_DEP_ANTI, false);
2993 }
2994 }
2995
2996 /* All memory writes and volatile reads must happen before the
2997 jump. Non-volatile reads must happen before the jump iff
2998 the result is needed by the above register used mask. */
2999
3000 pending = deps->pending_write_insns;
3001 pending_mem = deps->pending_write_mems;
3002 while (pending)
3003 {
3004 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
3005 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3006 pending = XEXP (pending, 1);
3007 pending_mem = XEXP (pending_mem, 1);
3008 }
3009
3010 pending = deps->pending_read_insns;
3011 pending_mem = deps->pending_read_mems;
3012 while (pending)
3013 {
3014 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
3015 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
3016 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3017 pending = XEXP (pending, 1);
3018 pending_mem = XEXP (pending_mem, 1);
3019 }
3020
3021 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3022 REG_DEP_ANTI, true);
3023 add_dependence_list (insn, deps->pending_jump_insns, 1,
3024 REG_DEP_ANTI, true);
3025 }
3026 }
3027
3028 /* If this instruction can throw an exception, then moving it changes
3029 where block boundaries fall. This is mighty confusing elsewhere.
3030 Therefore, prevent such an instruction from being moved. Same for
3031 non-jump instructions that define block boundaries.
3032 ??? Unclear whether this is still necessary in EBB mode. If not,
3033 add_branch_dependences should be adjusted for RGN mode instead. */
3034 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3035 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3036 reg_pending_barrier = MOVE_BARRIER;
3037
3038 if (sched_pressure != SCHED_PRESSURE_NONE)
3039 {
3040 setup_insn_reg_uses (deps, insn);
3041 init_insn_reg_pressure_info (insn);
3042 }
3043
3044 /* Add register dependencies for insn. */
3045 if (DEBUG_INSN_P (insn))
3046 {
3047 rtx prev = deps->last_debug_insn;
3048 rtx u;
3049
3050 if (!deps->readonly)
3051 deps->last_debug_insn = insn;
3052
3053 if (prev)
3054 add_dependence (insn, prev, REG_DEP_ANTI);
3055
3056 add_dependence_list (insn, deps->last_function_call, 1,
3057 REG_DEP_ANTI, false);
3058
3059 if (!sel_sched_p ())
3060 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3061 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3062
3063 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3064 {
3065 struct deps_reg *reg_last = &deps->reg_last[i];
3066 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3067 /* There's no point in making REG_DEP_CONTROL dependencies for
3068 debug insns. */
3069 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3070 false);
3071
3072 if (!deps->readonly)
3073 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3074 }
3075 CLEAR_REG_SET (reg_pending_uses);
3076
3077 /* Quite often, a debug insn will refer to stuff in the
3078 previous instruction, but the reason we want this
3079 dependency here is to make sure the scheduler doesn't
3080 gratuitously move a debug insn ahead. This could dirty
3081 DF flags and cause additional analysis that wouldn't have
3082 occurred in compilation without debug insns, and such
3083 additional analysis can modify the generated code. */
3084 prev = PREV_INSN (insn);
3085
3086 if (prev && NONDEBUG_INSN_P (prev))
3087 add_dependence (insn, prev, REG_DEP_ANTI);
3088 }
3089 else
3090 {
3091 regset_head set_or_clobbered;
3092
3093 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3094 {
3095 struct deps_reg *reg_last = &deps->reg_last[i];
3096 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3097 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3098 false);
3099 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3100 false);
3101
3102 if (!deps->readonly)
3103 {
3104 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3105 reg_last->uses_length++;
3106 }
3107 }
3108
3109 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3110 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3111 {
3112 struct deps_reg *reg_last = &deps->reg_last[i];
3113 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3114 add_dependence_list (insn, reg_last->implicit_sets, 0,
3115 REG_DEP_ANTI, false);
3116 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3117 false);
3118
3119 if (!deps->readonly)
3120 {
3121 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3122 reg_last->uses_length++;
3123 }
3124 }
3125
3126 if (targetm.sched.exposed_pipeline)
3127 {
3128 INIT_REG_SET (&set_or_clobbered);
3129 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3130 reg_pending_sets);
3131 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3132 {
3133 struct deps_reg *reg_last = &deps->reg_last[i];
3134 rtx list;
3135 for (list = reg_last->uses; list; list = XEXP (list, 1))
3136 {
3137 rtx other = XEXP (list, 0);
3138 if (INSN_CACHED_COND (other) != const_true_rtx
3139 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3140 INSN_CACHED_COND (other) = const_true_rtx;
3141 }
3142 }
3143 }
3144
3145 /* If the current insn is conditional, we can't free any
3146 of the lists. */
3147 if (sched_has_condition_p (insn))
3148 {
3149 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3150 {
3151 struct deps_reg *reg_last = &deps->reg_last[i];
3152 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3153 false);
3154 add_dependence_list (insn, reg_last->implicit_sets, 0,
3155 REG_DEP_ANTI, false);
3156 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3157 false);
3158 add_dependence_list (insn, reg_last->control_uses, 0,
3159 REG_DEP_CONTROL, false);
3160
3161 if (!deps->readonly)
3162 {
3163 reg_last->clobbers
3164 = alloc_INSN_LIST (insn, reg_last->clobbers);
3165 reg_last->clobbers_length++;
3166 }
3167 }
3168 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3169 {
3170 struct deps_reg *reg_last = &deps->reg_last[i];
3171 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3172 false);
3173 add_dependence_list (insn, reg_last->implicit_sets, 0,
3174 REG_DEP_ANTI, false);
3175 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3176 false);
3177 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3178 false);
3179 add_dependence_list (insn, reg_last->control_uses, 0,
3180 REG_DEP_CONTROL, false);
3181
3182 if (!deps->readonly)
3183 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3184 }
3185 }
3186 else
3187 {
3188 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3189 {
3190 struct deps_reg *reg_last = &deps->reg_last[i];
3191 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3192 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3193 {
3194 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3195 REG_DEP_OUTPUT, false);
3196 add_dependence_list_and_free (deps, insn,
3197 &reg_last->implicit_sets, 0,
3198 REG_DEP_ANTI, false);
3199 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3200 REG_DEP_ANTI, false);
3201 add_dependence_list_and_free (deps, insn,
3202 &reg_last->control_uses, 0,
3203 REG_DEP_ANTI, false);
3204 add_dependence_list_and_free (deps, insn,
3205 &reg_last->clobbers, 0,
3206 REG_DEP_OUTPUT, false);
3207
3208 if (!deps->readonly)
3209 {
3210 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3211 reg_last->clobbers_length = 0;
3212 reg_last->uses_length = 0;
3213 }
3214 }
3215 else
3216 {
3217 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3218 false);
3219 add_dependence_list (insn, reg_last->implicit_sets, 0,
3220 REG_DEP_ANTI, false);
3221 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3222 false);
3223 add_dependence_list (insn, reg_last->control_uses, 0,
3224 REG_DEP_CONTROL, false);
3225 }
3226
3227 if (!deps->readonly)
3228 {
3229 reg_last->clobbers_length++;
3230 reg_last->clobbers
3231 = alloc_INSN_LIST (insn, reg_last->clobbers);
3232 }
3233 }
3234 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3235 {
3236 struct deps_reg *reg_last = &deps->reg_last[i];
3237
3238 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3239 REG_DEP_OUTPUT, false);
3240 add_dependence_list_and_free (deps, insn,
3241 &reg_last->implicit_sets,
3242 0, REG_DEP_ANTI, false);
3243 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3244 REG_DEP_OUTPUT, false);
3245 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3246 REG_DEP_ANTI, false);
3247 add_dependence_list (insn, reg_last->control_uses, 0,
3248 REG_DEP_CONTROL, false);
3249
3250 if (!deps->readonly)
3251 {
3252 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3253 reg_last->uses_length = 0;
3254 reg_last->clobbers_length = 0;
3255 }
3256 }
3257 }
3258 if (!deps->readonly)
3259 {
3260 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3261 {
3262 struct deps_reg *reg_last = &deps->reg_last[i];
3263 reg_last->control_uses
3264 = alloc_INSN_LIST (insn, reg_last->control_uses);
3265 }
3266 }
3267 }
3268
3269 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3270 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3271 {
3272 struct deps_reg *reg_last = &deps->reg_last[i];
3273 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3274 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3275 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3276 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3277 false);
3278
3279 if (!deps->readonly)
3280 reg_last->implicit_sets
3281 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3282 }
3283
3284 if (!deps->readonly)
3285 {
3286 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3287 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3288 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3289 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3290 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3291 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3292 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3293
3294 /* Set up the pending barrier found. */
3295 deps->last_reg_pending_barrier = reg_pending_barrier;
3296 }
3297
3298 CLEAR_REG_SET (reg_pending_uses);
3299 CLEAR_REG_SET (reg_pending_clobbers);
3300 CLEAR_REG_SET (reg_pending_sets);
3301 CLEAR_REG_SET (reg_pending_control_uses);
3302 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3303 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3304
3305 /* Add dependencies if a scheduling barrier was found. */
3306 if (reg_pending_barrier)
3307 {
3308 /* In the case of barrier the most added dependencies are not
3309 real, so we use anti-dependence here. */
3310 if (sched_has_condition_p (insn))
3311 {
3312 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3313 {
3314 struct deps_reg *reg_last = &deps->reg_last[i];
3315 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3316 true);
3317 add_dependence_list (insn, reg_last->sets, 0,
3318 reg_pending_barrier == TRUE_BARRIER
3319 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3320 add_dependence_list (insn, reg_last->implicit_sets, 0,
3321 REG_DEP_ANTI, true);
3322 add_dependence_list (insn, reg_last->clobbers, 0,
3323 reg_pending_barrier == TRUE_BARRIER
3324 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3325 }
3326 }
3327 else
3328 {
3329 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3330 {
3331 struct deps_reg *reg_last = &deps->reg_last[i];
3332 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3333 REG_DEP_ANTI, true);
3334 add_dependence_list_and_free (deps, insn,
3335 &reg_last->control_uses, 0,
3336 REG_DEP_CONTROL, true);
3337 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3338 reg_pending_barrier == TRUE_BARRIER
3339 ? REG_DEP_TRUE : REG_DEP_ANTI,
3340 true);
3341 add_dependence_list_and_free (deps, insn,
3342 &reg_last->implicit_sets, 0,
3343 REG_DEP_ANTI, true);
3344 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3345 reg_pending_barrier == TRUE_BARRIER
3346 ? REG_DEP_TRUE : REG_DEP_ANTI,
3347 true);
3348
3349 if (!deps->readonly)
3350 {
3351 reg_last->uses_length = 0;
3352 reg_last->clobbers_length = 0;
3353 }
3354 }
3355 }
3356
3357 if (!deps->readonly)
3358 for (i = 0; i < (unsigned)deps->max_reg; i++)
3359 {
3360 struct deps_reg *reg_last = &deps->reg_last[i];
3361 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3362 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3363 }
3364
3365 /* Don't flush pending lists on speculative checks for
3366 selective scheduling. */
3367 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3368 flush_pending_lists (deps, insn, true, true);
3369
3370 reg_pending_barrier = NOT_A_BARRIER;
3371 }
3372
3373 /* If a post-call group is still open, see if it should remain so.
3374 This insn must be a simple move of a hard reg to a pseudo or
3375 vice-versa.
3376
3377 We must avoid moving these insns for correctness on targets
3378 with small register classes, and for special registers like
3379 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3380 hard regs for all targets. */
3381
3382 if (deps->in_post_call_group_p)
3383 {
3384 rtx tmp, set = single_set (insn);
3385 int src_regno, dest_regno;
3386
3387 if (set == NULL)
3388 {
3389 if (DEBUG_INSN_P (insn))
3390 /* We don't want to mark debug insns as part of the same
3391 sched group. We know they really aren't, but if we use
3392 debug insns to tell that a call group is over, we'll
3393 get different code if debug insns are not there and
3394 instructions that follow seem like they should be part
3395 of the call group.
3396
3397 Also, if we did, chain_to_prev_insn would move the
3398 deps of the debug insn to the call insn, modifying
3399 non-debug post-dependency counts of the debug insn
3400 dependencies and otherwise messing with the scheduling
3401 order.
3402
3403 Instead, let such debug insns be scheduled freely, but
3404 keep the call group open in case there are insns that
3405 should be part of it afterwards. Since we grant debug
3406 insns higher priority than even sched group insns, it
3407 will all turn out all right. */
3408 goto debug_dont_end_call_group;
3409 else
3410 goto end_call_group;
3411 }
3412
3413 tmp = SET_DEST (set);
3414 if (GET_CODE (tmp) == SUBREG)
3415 tmp = SUBREG_REG (tmp);
3416 if (REG_P (tmp))
3417 dest_regno = REGNO (tmp);
3418 else
3419 goto end_call_group;
3420
3421 tmp = SET_SRC (set);
3422 if (GET_CODE (tmp) == SUBREG)
3423 tmp = SUBREG_REG (tmp);
3424 if ((GET_CODE (tmp) == PLUS
3425 || GET_CODE (tmp) == MINUS)
3426 && REG_P (XEXP (tmp, 0))
3427 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3428 && dest_regno == STACK_POINTER_REGNUM)
3429 src_regno = STACK_POINTER_REGNUM;
3430 else if (REG_P (tmp))
3431 src_regno = REGNO (tmp);
3432 else
3433 goto end_call_group;
3434
3435 if (src_regno < FIRST_PSEUDO_REGISTER
3436 || dest_regno < FIRST_PSEUDO_REGISTER)
3437 {
3438 if (!deps->readonly
3439 && deps->in_post_call_group_p == post_call_initial)
3440 deps->in_post_call_group_p = post_call;
3441
3442 if (!sel_sched_p () || sched_emulate_haifa_p)
3443 {
3444 SCHED_GROUP_P (insn) = 1;
3445 CANT_MOVE (insn) = 1;
3446 }
3447 }
3448 else
3449 {
3450 end_call_group:
3451 if (!deps->readonly)
3452 deps->in_post_call_group_p = not_post_call;
3453 }
3454 }
3455
3456 debug_dont_end_call_group:
3457 if ((current_sched_info->flags & DO_SPECULATION)
3458 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3459 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3460 be speculated. */
3461 {
3462 if (sel_sched_p ())
3463 sel_mark_hard_insn (insn);
3464 else
3465 {
3466 sd_iterator_def sd_it;
3467 dep_t dep;
3468
3469 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3470 sd_iterator_cond (&sd_it, &dep);)
3471 change_spec_dep_to_hard (sd_it);
3472 }
3473 }
3474
3475 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3476 honor their original ordering. */
3477 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3478 {
3479 if (deps->last_args_size)
3480 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3481 deps->last_args_size = insn;
3482 }
3483 }
3484
3485 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3486 longjmp, loop forever, ...). */
3487 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3488 test for ECF_NORETURN? */
3489 static bool
3490 call_may_noreturn_p (rtx insn)
3491 {
3492 rtx call;
3493
3494 /* const or pure calls that aren't looping will always return. */
3495 if (RTL_CONST_OR_PURE_CALL_P (insn)
3496 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3497 return false;
3498
3499 call = get_call_rtx_from (insn);
3500 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3501 {
3502 rtx symbol = XEXP (XEXP (call, 0), 0);
3503 if (SYMBOL_REF_DECL (symbol)
3504 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3505 {
3506 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3507 == BUILT_IN_NORMAL)
3508 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3509 {
3510 case BUILT_IN_BCMP:
3511 case BUILT_IN_BCOPY:
3512 case BUILT_IN_BZERO:
3513 case BUILT_IN_INDEX:
3514 case BUILT_IN_MEMCHR:
3515 case BUILT_IN_MEMCMP:
3516 case BUILT_IN_MEMCPY:
3517 case BUILT_IN_MEMMOVE:
3518 case BUILT_IN_MEMPCPY:
3519 case BUILT_IN_MEMSET:
3520 case BUILT_IN_RINDEX:
3521 case BUILT_IN_STPCPY:
3522 case BUILT_IN_STPNCPY:
3523 case BUILT_IN_STRCAT:
3524 case BUILT_IN_STRCHR:
3525 case BUILT_IN_STRCMP:
3526 case BUILT_IN_STRCPY:
3527 case BUILT_IN_STRCSPN:
3528 case BUILT_IN_STRLEN:
3529 case BUILT_IN_STRNCAT:
3530 case BUILT_IN_STRNCMP:
3531 case BUILT_IN_STRNCPY:
3532 case BUILT_IN_STRPBRK:
3533 case BUILT_IN_STRRCHR:
3534 case BUILT_IN_STRSPN:
3535 case BUILT_IN_STRSTR:
3536 /* Assume certain string/memory builtins always return. */
3537 return false;
3538 default:
3539 break;
3540 }
3541 }
3542 }
3543
3544 /* For all other calls assume that they might not always return. */
3545 return true;
3546 }
3547
3548 /* Return true if INSN should be made dependent on the previous instruction
3549 group, and if all INSN's dependencies should be moved to the first
3550 instruction of that group. */
3551
3552 static bool
3553 chain_to_prev_insn_p (rtx insn)
3554 {
3555 rtx prev, x;
3556
3557 /* INSN forms a group with the previous instruction. */
3558 if (SCHED_GROUP_P (insn))
3559 return true;
3560
3561 /* If the previous instruction clobbers a register R and this one sets
3562 part of R, the clobber was added specifically to help us track the
3563 liveness of R. There's no point scheduling the clobber and leaving
3564 INSN behind, especially if we move the clobber to another block. */
3565 prev = prev_nonnote_nondebug_insn (insn);
3566 if (prev
3567 && INSN_P (prev)
3568 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3569 && GET_CODE (PATTERN (prev)) == CLOBBER)
3570 {
3571 x = XEXP (PATTERN (prev), 0);
3572 if (set_of (x, insn))
3573 return true;
3574 }
3575
3576 return false;
3577 }
3578
3579 /* Analyze INSN with DEPS as a context. */
3580 void
3581 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3582 {
3583 if (sched_deps_info->start_insn)
3584 sched_deps_info->start_insn (insn);
3585
3586 /* Record the condition for this insn. */
3587 if (NONDEBUG_INSN_P (insn))
3588 {
3589 rtx t;
3590 sched_get_condition_with_rev (insn, NULL);
3591 t = INSN_CACHED_COND (insn);
3592 INSN_COND_DEPS (insn) = NULL_RTX;
3593 if (reload_completed
3594 && (current_sched_info->flags & DO_PREDICATION)
3595 && COMPARISON_P (t)
3596 && REG_P (XEXP (t, 0))
3597 && CONSTANT_P (XEXP (t, 1)))
3598 {
3599 unsigned int regno;
3600 int nregs;
3601 t = XEXP (t, 0);
3602 regno = REGNO (t);
3603 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3604 t = NULL_RTX;
3605 while (nregs-- > 0)
3606 {
3607 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3608 t = concat_INSN_LIST (reg_last->sets, t);
3609 t = concat_INSN_LIST (reg_last->clobbers, t);
3610 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3611 }
3612 INSN_COND_DEPS (insn) = t;
3613 }
3614 }
3615
3616 if (JUMP_P (insn))
3617 {
3618 /* Make each JUMP_INSN (but not a speculative check)
3619 a scheduling barrier for memory references. */
3620 if (!deps->readonly
3621 && !(sel_sched_p ()
3622 && sel_insn_is_speculation_check (insn)))
3623 {
3624 /* Keep the list a reasonable size. */
3625 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3626 flush_pending_lists (deps, insn, true, true);
3627 else
3628 deps->pending_jump_insns
3629 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3630 }
3631
3632 /* For each insn which shouldn't cross a jump, add a dependence. */
3633 add_dependence_list_and_free (deps, insn,
3634 &deps->sched_before_next_jump, 1,
3635 REG_DEP_ANTI, true);
3636
3637 sched_analyze_insn (deps, PATTERN (insn), insn);
3638 }
3639 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3640 {
3641 sched_analyze_insn (deps, PATTERN (insn), insn);
3642 }
3643 else if (CALL_P (insn))
3644 {
3645 int i;
3646
3647 CANT_MOVE (insn) = 1;
3648
3649 if (find_reg_note (insn, REG_SETJMP, NULL))
3650 {
3651 /* This is setjmp. Assume that all registers, not just
3652 hard registers, may be clobbered by this call. */
3653 reg_pending_barrier = MOVE_BARRIER;
3654 }
3655 else
3656 {
3657 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3658 /* A call may read and modify global register variables. */
3659 if (global_regs[i])
3660 {
3661 SET_REGNO_REG_SET (reg_pending_sets, i);
3662 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3663 }
3664 /* Other call-clobbered hard regs may be clobbered.
3665 Since we only have a choice between 'might be clobbered'
3666 and 'definitely not clobbered', we must include all
3667 partly call-clobbered registers here. */
3668 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3669 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3670 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3671 /* We don't know what set of fixed registers might be used
3672 by the function, but it is certain that the stack pointer
3673 is among them, but be conservative. */
3674 else if (fixed_regs[i])
3675 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3676 /* The frame pointer is normally not used by the function
3677 itself, but by the debugger. */
3678 /* ??? MIPS o32 is an exception. It uses the frame pointer
3679 in the macro expansion of jal but does not represent this
3680 fact in the call_insn rtl. */
3681 else if (i == FRAME_POINTER_REGNUM
3682 || (i == HARD_FRAME_POINTER_REGNUM
3683 && (! reload_completed || frame_pointer_needed)))
3684 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3685 }
3686
3687 /* For each insn which shouldn't cross a call, add a dependence
3688 between that insn and this call insn. */
3689 add_dependence_list_and_free (deps, insn,
3690 &deps->sched_before_next_call, 1,
3691 REG_DEP_ANTI, true);
3692
3693 sched_analyze_insn (deps, PATTERN (insn), insn);
3694
3695 /* If CALL would be in a sched group, then this will violate
3696 convention that sched group insns have dependencies only on the
3697 previous instruction.
3698
3699 Of course one can say: "Hey! What about head of the sched group?"
3700 And I will answer: "Basic principles (one dep per insn) are always
3701 the same." */
3702 gcc_assert (!SCHED_GROUP_P (insn));
3703
3704 /* In the absence of interprocedural alias analysis, we must flush
3705 all pending reads and writes, and start new dependencies starting
3706 from here. But only flush writes for constant calls (which may
3707 be passed a pointer to something we haven't written yet). */
3708 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3709
3710 if (!deps->readonly)
3711 {
3712 /* Remember the last function call for limiting lifetimes. */
3713 free_INSN_LIST_list (&deps->last_function_call);
3714 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3715
3716 if (call_may_noreturn_p (insn))
3717 {
3718 /* Remember the last function call that might not always return
3719 normally for limiting moves of trapping insns. */
3720 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3721 deps->last_function_call_may_noreturn
3722 = alloc_INSN_LIST (insn, NULL_RTX);
3723 }
3724
3725 /* Before reload, begin a post-call group, so as to keep the
3726 lifetimes of hard registers correct. */
3727 if (! reload_completed)
3728 deps->in_post_call_group_p = post_call;
3729 }
3730 }
3731
3732 if (sched_deps_info->use_cselib)
3733 cselib_process_insn (insn);
3734
3735 if (sched_deps_info->finish_insn)
3736 sched_deps_info->finish_insn ();
3737
3738 /* Fixup the dependencies in the sched group. */
3739 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3740 && chain_to_prev_insn_p (insn)
3741 && !sel_sched_p ())
3742 chain_to_prev_insn (insn);
3743 }
3744
3745 /* Initialize DEPS for the new block beginning with HEAD. */
3746 void
3747 deps_start_bb (struct deps_desc *deps, rtx head)
3748 {
3749 gcc_assert (!deps->readonly);
3750
3751 /* Before reload, if the previous block ended in a call, show that
3752 we are inside a post-call group, so as to keep the lifetimes of
3753 hard registers correct. */
3754 if (! reload_completed && !LABEL_P (head))
3755 {
3756 rtx insn = prev_nonnote_nondebug_insn (head);
3757
3758 if (insn && CALL_P (insn))
3759 deps->in_post_call_group_p = post_call_initial;
3760 }
3761 }
3762
3763 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3764 dependencies for each insn. */
3765 void
3766 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3767 {
3768 rtx insn;
3769
3770 if (sched_deps_info->use_cselib)
3771 cselib_init (CSELIB_RECORD_MEMORY);
3772
3773 deps_start_bb (deps, head);
3774
3775 for (insn = head;; insn = NEXT_INSN (insn))
3776 {
3777
3778 if (INSN_P (insn))
3779 {
3780 /* And initialize deps_lists. */
3781 sd_init_insn (insn);
3782 /* Clean up SCHED_GROUP_P which may be set by last
3783 scheduler pass. */
3784 if (SCHED_GROUP_P (insn))
3785 SCHED_GROUP_P (insn) = 0;
3786 }
3787
3788 deps_analyze_insn (deps, insn);
3789
3790 if (insn == tail)
3791 {
3792 if (sched_deps_info->use_cselib)
3793 cselib_finish ();
3794 return;
3795 }
3796 }
3797 gcc_unreachable ();
3798 }
3799
3800 /* Helper for sched_free_deps ().
3801 Delete INSN's (RESOLVED_P) backward dependencies. */
3802 static void
3803 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3804 {
3805 sd_iterator_def sd_it;
3806 dep_t dep;
3807 sd_list_types_def types;
3808
3809 if (resolved_p)
3810 types = SD_LIST_RES_BACK;
3811 else
3812 types = SD_LIST_BACK;
3813
3814 for (sd_it = sd_iterator_start (insn, types);
3815 sd_iterator_cond (&sd_it, &dep);)
3816 {
3817 dep_link_t link = *sd_it.linkp;
3818 dep_node_t node = DEP_LINK_NODE (link);
3819 deps_list_t back_list;
3820 deps_list_t forw_list;
3821
3822 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3823 remove_from_deps_list (link, back_list);
3824 delete_dep_node (node);
3825 }
3826 }
3827
3828 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3829 deps_lists. */
3830 void
3831 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3832 {
3833 rtx insn;
3834 rtx next_tail = NEXT_INSN (tail);
3835
3836 /* We make two passes since some insns may be scheduled before their
3837 dependencies are resolved. */
3838 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3839 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3840 {
3841 /* Clear forward deps and leave the dep_nodes to the
3842 corresponding back_deps list. */
3843 if (resolved_p)
3844 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3845 else
3846 clear_deps_list (INSN_FORW_DEPS (insn));
3847 }
3848 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3849 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3850 {
3851 /* Clear resolved back deps together with its dep_nodes. */
3852 delete_dep_nodes_in_back_deps (insn, resolved_p);
3853
3854 sd_finish_insn (insn);
3855 }
3856 }
3857 \f
3858 /* Initialize variables for region data dependence analysis.
3859 When LAZY_REG_LAST is true, do not allocate reg_last array
3860 of struct deps_desc immediately. */
3861
3862 void
3863 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3864 {
3865 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3866
3867 deps->max_reg = max_reg;
3868 if (lazy_reg_last)
3869 deps->reg_last = NULL;
3870 else
3871 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3872 INIT_REG_SET (&deps->reg_last_in_use);
3873
3874 deps->pending_read_insns = 0;
3875 deps->pending_read_mems = 0;
3876 deps->pending_write_insns = 0;
3877 deps->pending_write_mems = 0;
3878 deps->pending_jump_insns = 0;
3879 deps->pending_read_list_length = 0;
3880 deps->pending_write_list_length = 0;
3881 deps->pending_flush_length = 0;
3882 deps->last_pending_memory_flush = 0;
3883 deps->last_function_call = 0;
3884 deps->last_function_call_may_noreturn = 0;
3885 deps->sched_before_next_call = 0;
3886 deps->sched_before_next_jump = 0;
3887 deps->in_post_call_group_p = not_post_call;
3888 deps->last_debug_insn = 0;
3889 deps->last_args_size = 0;
3890 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3891 deps->readonly = 0;
3892 }
3893
3894 /* Init only reg_last field of DEPS, which was not allocated before as
3895 we inited DEPS lazily. */
3896 void
3897 init_deps_reg_last (struct deps_desc *deps)
3898 {
3899 gcc_assert (deps && deps->max_reg > 0);
3900 gcc_assert (deps->reg_last == NULL);
3901
3902 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3903 }
3904
3905
3906 /* Free insn lists found in DEPS. */
3907
3908 void
3909 free_deps (struct deps_desc *deps)
3910 {
3911 unsigned i;
3912 reg_set_iterator rsi;
3913
3914 /* We set max_reg to 0 when this context was already freed. */
3915 if (deps->max_reg == 0)
3916 {
3917 gcc_assert (deps->reg_last == NULL);
3918 return;
3919 }
3920 deps->max_reg = 0;
3921
3922 free_INSN_LIST_list (&deps->pending_read_insns);
3923 free_EXPR_LIST_list (&deps->pending_read_mems);
3924 free_INSN_LIST_list (&deps->pending_write_insns);
3925 free_EXPR_LIST_list (&deps->pending_write_mems);
3926 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3927
3928 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3929 times. For a testcase with 42000 regs and 8000 small basic blocks,
3930 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3931 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3932 {
3933 struct deps_reg *reg_last = &deps->reg_last[i];
3934 if (reg_last->uses)
3935 free_INSN_LIST_list (&reg_last->uses);
3936 if (reg_last->sets)
3937 free_INSN_LIST_list (&reg_last->sets);
3938 if (reg_last->implicit_sets)
3939 free_INSN_LIST_list (&reg_last->implicit_sets);
3940 if (reg_last->control_uses)
3941 free_INSN_LIST_list (&reg_last->control_uses);
3942 if (reg_last->clobbers)
3943 free_INSN_LIST_list (&reg_last->clobbers);
3944 }
3945 CLEAR_REG_SET (&deps->reg_last_in_use);
3946
3947 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3948 it at all. */
3949 free (deps->reg_last);
3950 deps->reg_last = NULL;
3951
3952 deps = NULL;
3953 }
3954
3955 /* Remove INSN from dependence contexts DEPS. */
3956 void
3957 remove_from_deps (struct deps_desc *deps, rtx insn)
3958 {
3959 int removed;
3960 unsigned i;
3961 reg_set_iterator rsi;
3962
3963 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3964 &deps->pending_read_mems);
3965 if (!DEBUG_INSN_P (insn))
3966 deps->pending_read_list_length -= removed;
3967 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3968 &deps->pending_write_mems);
3969 deps->pending_write_list_length -= removed;
3970
3971 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3972 deps->pending_flush_length -= removed;
3973 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3974 deps->pending_flush_length -= removed;
3975
3976 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3977 {
3978 struct deps_reg *reg_last = &deps->reg_last[i];
3979 if (reg_last->uses)
3980 remove_from_dependence_list (insn, &reg_last->uses);
3981 if (reg_last->sets)
3982 remove_from_dependence_list (insn, &reg_last->sets);
3983 if (reg_last->implicit_sets)
3984 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3985 if (reg_last->clobbers)
3986 remove_from_dependence_list (insn, &reg_last->clobbers);
3987 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3988 && !reg_last->clobbers)
3989 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3990 }
3991
3992 if (CALL_P (insn))
3993 {
3994 remove_from_dependence_list (insn, &deps->last_function_call);
3995 remove_from_dependence_list (insn,
3996 &deps->last_function_call_may_noreturn);
3997 }
3998 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3999 }
4000
4001 /* Init deps data vector. */
4002 static void
4003 init_deps_data_vector (void)
4004 {
4005 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4006 if (reserve > 0 && ! h_d_i_d.space (reserve))
4007 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4008 }
4009
4010 /* If it is profitable to use them, initialize or extend (depending on
4011 GLOBAL_P) dependency data. */
4012 void
4013 sched_deps_init (bool global_p)
4014 {
4015 /* Average number of insns in the basic block.
4016 '+ 1' is used to make it nonzero. */
4017 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4018
4019 init_deps_data_vector ();
4020
4021 /* We use another caching mechanism for selective scheduling, so
4022 we don't use this one. */
4023 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4024 {
4025 /* ?!? We could save some memory by computing a per-region luid mapping
4026 which could reduce both the number of vectors in the cache and the
4027 size of each vector. Instead we just avoid the cache entirely unless
4028 the average number of instructions in a basic block is very high. See
4029 the comment before the declaration of true_dependency_cache for
4030 what we consider "very high". */
4031 cache_size = 0;
4032 extend_dependency_caches (sched_max_luid, true);
4033 }
4034
4035 if (global_p)
4036 {
4037 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4038 /* Allocate lists for one block at a time. */
4039 insns_in_block);
4040 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4041 /* Allocate nodes for one block at a time.
4042 We assume that average insn has
4043 5 producers. */
4044 5 * insns_in_block);
4045 }
4046 }
4047
4048
4049 /* Create or extend (depending on CREATE_P) dependency caches to
4050 size N. */
4051 void
4052 extend_dependency_caches (int n, bool create_p)
4053 {
4054 if (create_p || true_dependency_cache)
4055 {
4056 int i, luid = cache_size + n;
4057
4058 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4059 luid);
4060 output_dependency_cache = XRESIZEVEC (bitmap_head,
4061 output_dependency_cache, luid);
4062 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4063 luid);
4064 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4065 luid);
4066
4067 if (current_sched_info->flags & DO_SPECULATION)
4068 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4069 luid);
4070
4071 for (i = cache_size; i < luid; i++)
4072 {
4073 bitmap_initialize (&true_dependency_cache[i], 0);
4074 bitmap_initialize (&output_dependency_cache[i], 0);
4075 bitmap_initialize (&anti_dependency_cache[i], 0);
4076 bitmap_initialize (&control_dependency_cache[i], 0);
4077
4078 if (current_sched_info->flags & DO_SPECULATION)
4079 bitmap_initialize (&spec_dependency_cache[i], 0);
4080 }
4081 cache_size = luid;
4082 }
4083 }
4084
4085 /* Finalize dependency information for the whole function. */
4086 void
4087 sched_deps_finish (void)
4088 {
4089 gcc_assert (deps_pools_are_empty_p ());
4090 free_alloc_pool_if_empty (&dn_pool);
4091 free_alloc_pool_if_empty (&dl_pool);
4092 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4093
4094 h_d_i_d.release ();
4095 cache_size = 0;
4096
4097 if (true_dependency_cache)
4098 {
4099 int i;
4100
4101 for (i = 0; i < cache_size; i++)
4102 {
4103 bitmap_clear (&true_dependency_cache[i]);
4104 bitmap_clear (&output_dependency_cache[i]);
4105 bitmap_clear (&anti_dependency_cache[i]);
4106 bitmap_clear (&control_dependency_cache[i]);
4107
4108 if (sched_deps_info->generate_spec_deps)
4109 bitmap_clear (&spec_dependency_cache[i]);
4110 }
4111 free (true_dependency_cache);
4112 true_dependency_cache = NULL;
4113 free (output_dependency_cache);
4114 output_dependency_cache = NULL;
4115 free (anti_dependency_cache);
4116 anti_dependency_cache = NULL;
4117 free (control_dependency_cache);
4118 control_dependency_cache = NULL;
4119
4120 if (sched_deps_info->generate_spec_deps)
4121 {
4122 free (spec_dependency_cache);
4123 spec_dependency_cache = NULL;
4124 }
4125
4126 }
4127 }
4128
4129 /* Initialize some global variables needed by the dependency analysis
4130 code. */
4131
4132 void
4133 init_deps_global (void)
4134 {
4135 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4136 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4137 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4138 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4139 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4140 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4141 reg_pending_barrier = NOT_A_BARRIER;
4142
4143 if (!sel_sched_p () || sched_emulate_haifa_p)
4144 {
4145 sched_deps_info->start_insn = haifa_start_insn;
4146 sched_deps_info->finish_insn = haifa_finish_insn;
4147
4148 sched_deps_info->note_reg_set = haifa_note_reg_set;
4149 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4150 sched_deps_info->note_reg_use = haifa_note_reg_use;
4151
4152 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4153 sched_deps_info->note_dep = haifa_note_dep;
4154 }
4155 }
4156
4157 /* Free everything used by the dependency analysis code. */
4158
4159 void
4160 finish_deps_global (void)
4161 {
4162 FREE_REG_SET (reg_pending_sets);
4163 FREE_REG_SET (reg_pending_clobbers);
4164 FREE_REG_SET (reg_pending_uses);
4165 FREE_REG_SET (reg_pending_control_uses);
4166 }
4167
4168 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4169 dw_t
4170 estimate_dep_weak (rtx mem1, rtx mem2)
4171 {
4172 rtx r1, r2;
4173
4174 if (mem1 == mem2)
4175 /* MEMs are the same - don't speculate. */
4176 return MIN_DEP_WEAK;
4177
4178 r1 = XEXP (mem1, 0);
4179 r2 = XEXP (mem2, 0);
4180
4181 if (r1 == r2
4182 || (REG_P (r1) && REG_P (r2)
4183 && REGNO (r1) == REGNO (r2)))
4184 /* Again, MEMs are the same. */
4185 return MIN_DEP_WEAK;
4186 else if ((REG_P (r1) && !REG_P (r2))
4187 || (!REG_P (r1) && REG_P (r2)))
4188 /* Different addressing modes - reason to be more speculative,
4189 than usual. */
4190 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4191 else
4192 /* We can't say anything about the dependence. */
4193 return UNCERTAIN_DEP_WEAK;
4194 }
4195
4196 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4197 This function can handle same INSN and ELEM (INSN == ELEM).
4198 It is a convenience wrapper. */
4199 static void
4200 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4201 {
4202 ds_t ds;
4203 bool internal;
4204
4205 if (dep_type == REG_DEP_TRUE)
4206 ds = DEP_TRUE;
4207 else if (dep_type == REG_DEP_OUTPUT)
4208 ds = DEP_OUTPUT;
4209 else if (dep_type == REG_DEP_CONTROL)
4210 ds = DEP_CONTROL;
4211 else
4212 {
4213 gcc_assert (dep_type == REG_DEP_ANTI);
4214 ds = DEP_ANTI;
4215 }
4216
4217 /* When add_dependence is called from inside sched-deps.c, we expect
4218 cur_insn to be non-null. */
4219 internal = cur_insn != NULL;
4220 if (internal)
4221 gcc_assert (insn == cur_insn);
4222 else
4223 cur_insn = insn;
4224
4225 note_dep (elem, ds);
4226 if (!internal)
4227 cur_insn = NULL;
4228 }
4229
4230 /* Return weakness of speculative type TYPE in the dep_status DS,
4231 without checking to prevent ICEs on malformed input. */
4232 static dw_t
4233 get_dep_weak_1 (ds_t ds, ds_t type)
4234 {
4235 ds = ds & type;
4236
4237 switch (type)
4238 {
4239 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4240 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4241 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4242 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4243 default: gcc_unreachable ();
4244 }
4245
4246 return (dw_t) ds;
4247 }
4248
4249 /* Return weakness of speculative type TYPE in the dep_status DS. */
4250 dw_t
4251 get_dep_weak (ds_t ds, ds_t type)
4252 {
4253 dw_t dw = get_dep_weak_1 (ds, type);
4254
4255 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4256 return dw;
4257 }
4258
4259 /* Return the dep_status, which has the same parameters as DS, except for
4260 speculative type TYPE, that will have weakness DW. */
4261 ds_t
4262 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4263 {
4264 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4265
4266 ds &= ~type;
4267 switch (type)
4268 {
4269 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4270 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4271 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4272 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4273 default: gcc_unreachable ();
4274 }
4275 return ds;
4276 }
4277
4278 /* Return the join of two dep_statuses DS1 and DS2.
4279 If MAX_P is true then choose the greater probability,
4280 otherwise multiply probabilities.
4281 This function assumes that both DS1 and DS2 contain speculative bits. */
4282 static ds_t
4283 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4284 {
4285 ds_t ds, t;
4286
4287 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4288
4289 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4290
4291 t = FIRST_SPEC_TYPE;
4292 do
4293 {
4294 if ((ds1 & t) && !(ds2 & t))
4295 ds |= ds1 & t;
4296 else if (!(ds1 & t) && (ds2 & t))
4297 ds |= ds2 & t;
4298 else if ((ds1 & t) && (ds2 & t))
4299 {
4300 dw_t dw1 = get_dep_weak (ds1, t);
4301 dw_t dw2 = get_dep_weak (ds2, t);
4302 ds_t dw;
4303
4304 if (!max_p)
4305 {
4306 dw = ((ds_t) dw1) * ((ds_t) dw2);
4307 dw /= MAX_DEP_WEAK;
4308 if (dw < MIN_DEP_WEAK)
4309 dw = MIN_DEP_WEAK;
4310 }
4311 else
4312 {
4313 if (dw1 >= dw2)
4314 dw = dw1;
4315 else
4316 dw = dw2;
4317 }
4318
4319 ds = set_dep_weak (ds, t, (dw_t) dw);
4320 }
4321
4322 if (t == LAST_SPEC_TYPE)
4323 break;
4324 t <<= SPEC_TYPE_SHIFT;
4325 }
4326 while (1);
4327
4328 return ds;
4329 }
4330
4331 /* Return the join of two dep_statuses DS1 and DS2.
4332 This function assumes that both DS1 and DS2 contain speculative bits. */
4333 ds_t
4334 ds_merge (ds_t ds1, ds_t ds2)
4335 {
4336 return ds_merge_1 (ds1, ds2, false);
4337 }
4338
4339 /* Return the join of two dep_statuses DS1 and DS2. */
4340 ds_t
4341 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4342 {
4343 ds_t new_status = ds | ds2;
4344
4345 if (new_status & SPECULATIVE)
4346 {
4347 if ((ds && !(ds & SPECULATIVE))
4348 || (ds2 && !(ds2 & SPECULATIVE)))
4349 /* Then this dep can't be speculative. */
4350 new_status &= ~SPECULATIVE;
4351 else
4352 {
4353 /* Both are speculative. Merging probabilities. */
4354 if (mem1)
4355 {
4356 dw_t dw;
4357
4358 dw = estimate_dep_weak (mem1, mem2);
4359 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4360 }
4361
4362 if (!ds)
4363 new_status = ds2;
4364 else if (!ds2)
4365 new_status = ds;
4366 else
4367 new_status = ds_merge (ds2, ds);
4368 }
4369 }
4370
4371 return new_status;
4372 }
4373
4374 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4375 probabilities. */
4376 ds_t
4377 ds_max_merge (ds_t ds1, ds_t ds2)
4378 {
4379 if (ds1 == 0 && ds2 == 0)
4380 return 0;
4381
4382 if (ds1 == 0 && ds2 != 0)
4383 return ds2;
4384
4385 if (ds1 != 0 && ds2 == 0)
4386 return ds1;
4387
4388 return ds_merge_1 (ds1, ds2, true);
4389 }
4390
4391 /* Return the probability of speculation success for the speculation
4392 status DS. */
4393 dw_t
4394 ds_weak (ds_t ds)
4395 {
4396 ds_t res = 1, dt;
4397 int n = 0;
4398
4399 dt = FIRST_SPEC_TYPE;
4400 do
4401 {
4402 if (ds & dt)
4403 {
4404 res *= (ds_t) get_dep_weak (ds, dt);
4405 n++;
4406 }
4407
4408 if (dt == LAST_SPEC_TYPE)
4409 break;
4410 dt <<= SPEC_TYPE_SHIFT;
4411 }
4412 while (1);
4413
4414 gcc_assert (n);
4415 while (--n)
4416 res /= MAX_DEP_WEAK;
4417
4418 if (res < MIN_DEP_WEAK)
4419 res = MIN_DEP_WEAK;
4420
4421 gcc_assert (res <= MAX_DEP_WEAK);
4422
4423 return (dw_t) res;
4424 }
4425
4426 /* Return a dep status that contains all speculation types of DS. */
4427 ds_t
4428 ds_get_speculation_types (ds_t ds)
4429 {
4430 if (ds & BEGIN_DATA)
4431 ds |= BEGIN_DATA;
4432 if (ds & BE_IN_DATA)
4433 ds |= BE_IN_DATA;
4434 if (ds & BEGIN_CONTROL)
4435 ds |= BEGIN_CONTROL;
4436 if (ds & BE_IN_CONTROL)
4437 ds |= BE_IN_CONTROL;
4438
4439 return ds & SPECULATIVE;
4440 }
4441
4442 /* Return a dep status that contains maximal weakness for each speculation
4443 type present in DS. */
4444 ds_t
4445 ds_get_max_dep_weak (ds_t ds)
4446 {
4447 if (ds & BEGIN_DATA)
4448 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4449 if (ds & BE_IN_DATA)
4450 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4451 if (ds & BEGIN_CONTROL)
4452 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4453 if (ds & BE_IN_CONTROL)
4454 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4455
4456 return ds;
4457 }
4458
4459 /* Dump information about the dependence status S. */
4460 static void
4461 dump_ds (FILE *f, ds_t s)
4462 {
4463 fprintf (f, "{");
4464
4465 if (s & BEGIN_DATA)
4466 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4467 if (s & BE_IN_DATA)
4468 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4469 if (s & BEGIN_CONTROL)
4470 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4471 if (s & BE_IN_CONTROL)
4472 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4473
4474 if (s & HARD_DEP)
4475 fprintf (f, "HARD_DEP; ");
4476
4477 if (s & DEP_TRUE)
4478 fprintf (f, "DEP_TRUE; ");
4479 if (s & DEP_OUTPUT)
4480 fprintf (f, "DEP_OUTPUT; ");
4481 if (s & DEP_ANTI)
4482 fprintf (f, "DEP_ANTI; ");
4483 if (s & DEP_CONTROL)
4484 fprintf (f, "DEP_CONTROL; ");
4485
4486 fprintf (f, "}");
4487 }
4488
4489 DEBUG_FUNCTION void
4490 debug_ds (ds_t s)
4491 {
4492 dump_ds (stderr, s);
4493 fprintf (stderr, "\n");
4494 }
4495
4496 #ifdef ENABLE_CHECKING
4497 /* Verify that dependence type and status are consistent.
4498 If RELAXED_P is true, then skip dep_weakness checks. */
4499 static void
4500 check_dep (dep_t dep, bool relaxed_p)
4501 {
4502 enum reg_note dt = DEP_TYPE (dep);
4503 ds_t ds = DEP_STATUS (dep);
4504
4505 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4506
4507 if (!(current_sched_info->flags & USE_DEPS_LIST))
4508 {
4509 gcc_assert (ds == 0);
4510 return;
4511 }
4512
4513 /* Check that dependence type contains the same bits as the status. */
4514 if (dt == REG_DEP_TRUE)
4515 gcc_assert (ds & DEP_TRUE);
4516 else if (dt == REG_DEP_OUTPUT)
4517 gcc_assert ((ds & DEP_OUTPUT)
4518 && !(ds & DEP_TRUE));
4519 else if (dt == REG_DEP_ANTI)
4520 gcc_assert ((ds & DEP_ANTI)
4521 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4522 else
4523 gcc_assert (dt == REG_DEP_CONTROL
4524 && (ds & DEP_CONTROL)
4525 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4526
4527 /* HARD_DEP can not appear in dep_status of a link. */
4528 gcc_assert (!(ds & HARD_DEP));
4529
4530 /* Check that dependence status is set correctly when speculation is not
4531 supported. */
4532 if (!sched_deps_info->generate_spec_deps)
4533 gcc_assert (!(ds & SPECULATIVE));
4534 else if (ds & SPECULATIVE)
4535 {
4536 if (!relaxed_p)
4537 {
4538 ds_t type = FIRST_SPEC_TYPE;
4539
4540 /* Check that dependence weakness is in proper range. */
4541 do
4542 {
4543 if (ds & type)
4544 get_dep_weak (ds, type);
4545
4546 if (type == LAST_SPEC_TYPE)
4547 break;
4548 type <<= SPEC_TYPE_SHIFT;
4549 }
4550 while (1);
4551 }
4552
4553 if (ds & BEGIN_SPEC)
4554 {
4555 /* Only true dependence can be data speculative. */
4556 if (ds & BEGIN_DATA)
4557 gcc_assert (ds & DEP_TRUE);
4558
4559 /* Control dependencies in the insn scheduler are represented by
4560 anti-dependencies, therefore only anti dependence can be
4561 control speculative. */
4562 if (ds & BEGIN_CONTROL)
4563 gcc_assert (ds & DEP_ANTI);
4564 }
4565 else
4566 {
4567 /* Subsequent speculations should resolve true dependencies. */
4568 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4569 }
4570
4571 /* Check that true and anti dependencies can't have other speculative
4572 statuses. */
4573 if (ds & DEP_TRUE)
4574 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4575 /* An output dependence can't be speculative at all. */
4576 gcc_assert (!(ds & DEP_OUTPUT));
4577 if (ds & DEP_ANTI)
4578 gcc_assert (ds & BEGIN_CONTROL);
4579 }
4580 }
4581 #endif /* ENABLE_CHECKING */
4582
4583 /* The following code discovers opportunities to switch a memory reference
4584 and an increment by modifying the address. We ensure that this is done
4585 only for dependencies that are only used to show a single register
4586 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4587 instruction involved is subject to only one dep that can cause a pattern
4588 change.
4589
4590 When we discover a suitable dependency, we fill in the dep_replacement
4591 structure to show how to modify the memory reference. */
4592
4593 /* Holds information about a pair of memory reference and register increment
4594 insns which depend on each other, but could possibly be interchanged. */
4595 struct mem_inc_info
4596 {
4597 rtx inc_insn;
4598 rtx mem_insn;
4599
4600 rtx *mem_loc;
4601 /* A register occurring in the memory address for which we wish to break
4602 the dependence. This must be identical to the destination register of
4603 the increment. */
4604 rtx mem_reg0;
4605 /* Any kind of index that is added to that register. */
4606 rtx mem_index;
4607 /* The constant offset used in the memory address. */
4608 HOST_WIDE_INT mem_constant;
4609 /* The constant added in the increment insn. Negated if the increment is
4610 after the memory address. */
4611 HOST_WIDE_INT inc_constant;
4612 /* The source register used in the increment. May be different from mem_reg0
4613 if the increment occurs before the memory address. */
4614 rtx inc_input;
4615 };
4616
4617 /* Verify that the memory location described in MII can be replaced with
4618 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4619 insn remains unchanged by this function. */
4620
4621 static rtx
4622 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4623 {
4624 rtx mem = *mii->mem_loc;
4625 rtx new_mem;
4626
4627 /* Jump through a lot of hoops to keep the attributes up to date. We
4628 do not want to call one of the change address variants that take
4629 an offset even though we know the offset in many cases. These
4630 assume you are changing where the address is pointing by the
4631 offset. */
4632 new_mem = replace_equiv_address_nv (mem, new_addr);
4633 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4634 {
4635 if (sched_verbose >= 5)
4636 fprintf (sched_dump, "validation failure\n");
4637 return NULL_RTX;
4638 }
4639
4640 /* Put back the old one. */
4641 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4642
4643 return new_mem;
4644 }
4645
4646 /* Return true if INSN is of a form "a = b op c" where a and b are
4647 regs. op is + if c is a reg and +|- if c is a const. Fill in
4648 informantion in MII about what is found.
4649 BEFORE_MEM indicates whether the increment is found before or after
4650 a corresponding memory reference. */
4651
4652 static bool
4653 parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
4654 {
4655 rtx pat = single_set (insn);
4656 rtx src, cst;
4657 bool regs_equal;
4658
4659 if (RTX_FRAME_RELATED_P (insn) || !pat)
4660 return false;
4661
4662 /* Result must be single reg. */
4663 if (!REG_P (SET_DEST (pat)))
4664 return false;
4665
4666 if (GET_CODE (SET_SRC (pat)) != PLUS)
4667 return false;
4668
4669 mii->inc_insn = insn;
4670 src = SET_SRC (pat);
4671 mii->inc_input = XEXP (src, 0);
4672
4673 if (!REG_P (XEXP (src, 0)))
4674 return false;
4675
4676 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4677 return false;
4678
4679 cst = XEXP (src, 1);
4680 if (!CONST_INT_P (cst))
4681 return false;
4682 mii->inc_constant = INTVAL (cst);
4683
4684 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4685
4686 if (!before_mem)
4687 {
4688 mii->inc_constant = -mii->inc_constant;
4689 if (!regs_equal)
4690 return false;
4691 }
4692
4693 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4694 {
4695 /* Note that the sign has already been reversed for !before_mem. */
4696 #ifdef STACK_GROWS_DOWNWARD
4697 return mii->inc_constant > 0;
4698 #else
4699 return mii->inc_constant < 0;
4700 #endif
4701 }
4702 return true;
4703 }
4704
4705 /* Once a suitable mem reference has been found and the corresponding data
4706 in MII has been filled in, this function is called to find a suitable
4707 add or inc insn involving the register we found in the memory
4708 reference. */
4709
4710 static bool
4711 find_inc (struct mem_inc_info *mii, bool backwards)
4712 {
4713 sd_iterator_def sd_it;
4714 dep_t dep;
4715
4716 sd_it = sd_iterator_start (mii->mem_insn,
4717 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4718 while (sd_iterator_cond (&sd_it, &dep))
4719 {
4720 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4721 rtx pro = DEP_PRO (dep);
4722 rtx con = DEP_CON (dep);
4723 rtx inc_cand = backwards ? pro : con;
4724 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4725 goto next;
4726 if (parse_add_or_inc (mii, inc_cand, backwards))
4727 {
4728 struct dep_replacement *desc;
4729 df_ref def;
4730 rtx newaddr, newmem;
4731
4732 if (sched_verbose >= 5)
4733 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4734 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4735
4736 /* Need to assure that none of the operands of the inc
4737 instruction are assigned to by the mem insn. */
4738 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4739 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4740 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4741 {
4742 if (sched_verbose >= 5)
4743 fprintf (sched_dump,
4744 "inc conflicts with store failure.\n");
4745 goto next;
4746 }
4747 newaddr = mii->inc_input;
4748 if (mii->mem_index != NULL_RTX)
4749 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4750 mii->mem_index);
4751 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4752 mii->mem_constant + mii->inc_constant);
4753 newmem = attempt_change (mii, newaddr);
4754 if (newmem == NULL_RTX)
4755 goto next;
4756 if (sched_verbose >= 5)
4757 fprintf (sched_dump, "successful address replacement\n");
4758 desc = XCNEW (struct dep_replacement);
4759 DEP_REPLACE (dep) = desc;
4760 desc->loc = mii->mem_loc;
4761 desc->newval = newmem;
4762 desc->orig = *desc->loc;
4763 desc->insn = mii->mem_insn;
4764 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4765 INSN_SPEC_BACK_DEPS (con));
4766 if (backwards)
4767 {
4768 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4769 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4770 REG_DEP_TRUE);
4771 }
4772 else
4773 {
4774 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4775 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4776 REG_DEP_ANTI);
4777 }
4778 return true;
4779 }
4780 next:
4781 sd_iterator_next (&sd_it);
4782 }
4783 return false;
4784 }
4785
4786 /* A recursive function that walks ADDRESS_OF_X to find memory references
4787 which could be modified during scheduling. We call find_inc for each
4788 one we find that has a recognizable form. MII holds information about
4789 the pair of memory/increment instructions.
4790 We ensure that every instruction with a memory reference (which will be
4791 the location of the replacement) is assigned at most one breakable
4792 dependency. */
4793
4794 static bool
4795 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4796 {
4797 rtx x = *address_of_x;
4798 enum rtx_code code = GET_CODE (x);
4799 const char *const fmt = GET_RTX_FORMAT (code);
4800 int i;
4801
4802 if (code == MEM)
4803 {
4804 rtx reg0 = XEXP (x, 0);
4805
4806 mii->mem_loc = address_of_x;
4807 mii->mem_index = NULL_RTX;
4808 mii->mem_constant = 0;
4809 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4810 {
4811 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4812 reg0 = XEXP (reg0, 0);
4813 }
4814 if (GET_CODE (reg0) == PLUS)
4815 {
4816 mii->mem_index = XEXP (reg0, 1);
4817 reg0 = XEXP (reg0, 0);
4818 }
4819 if (REG_P (reg0))
4820 {
4821 df_ref use;
4822 int occurrences = 0;
4823
4824 /* Make sure this reg appears only once in this insn. Can't use
4825 count_occurrences since that only works for pseudos. */
4826 FOR_EACH_INSN_USE (use, mii->mem_insn)
4827 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4828 if (++occurrences > 1)
4829 {
4830 if (sched_verbose >= 5)
4831 fprintf (sched_dump, "mem count failure\n");
4832 return false;
4833 }
4834
4835 mii->mem_reg0 = reg0;
4836 return find_inc (mii, true) || find_inc (mii, false);
4837 }
4838 return false;
4839 }
4840
4841 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4842 {
4843 /* If REG occurs inside a MEM used in a bit-field reference,
4844 that is unacceptable. */
4845 return false;
4846 }
4847
4848 /* Time for some deep diving. */
4849 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4850 {
4851 if (fmt[i] == 'e')
4852 {
4853 if (find_mem (mii, &XEXP (x, i)))
4854 return true;
4855 }
4856 else if (fmt[i] == 'E')
4857 {
4858 int j;
4859 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4860 if (find_mem (mii, &XVECEXP (x, i, j)))
4861 return true;
4862 }
4863 }
4864 return false;
4865 }
4866
4867
4868 /* Examine the instructions between HEAD and TAIL and try to find
4869 dependencies that can be broken by modifying one of the patterns. */
4870
4871 void
4872 find_modifiable_mems (rtx head, rtx tail)
4873 {
4874 rtx insn, next_tail = NEXT_INSN (tail);
4875 int success_in_block = 0;
4876
4877 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4878 {
4879 struct mem_inc_info mii;
4880
4881 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4882 continue;
4883
4884 mii.mem_insn = insn;
4885 if (find_mem (&mii, &PATTERN (insn)))
4886 success_in_block++;
4887 }
4888 if (success_in_block && sched_verbose >= 5)
4889 fprintf (sched_dump, "%d candidates for address modification found.\n",
4890 success_in_block);
4891 }
4892
4893 #endif /* INSN_SCHEDULING */