coretypes.h: Include machmode.h...
[gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22 \f
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "hash-set.h"
30 #include "vec.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "inchash.h"
35 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
36 #include "tm_p.h"
37 #include "hard-reg-set.h"
38 #include "regs.h"
39 #include "input.h"
40 #include "function.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "insn-attr.h"
44 #include "except.h"
45 #include "recog.h"
46 #include "emit-rtl.h"
47 #include "dominance.h"
48 #include "cfg.h"
49 #include "cfgbuild.h"
50 #include "predict.h"
51 #include "basic-block.h"
52 #include "sched-int.h"
53 #include "params.h"
54 #include "alloc-pool.h"
55 #include "cselib.h"
56 #include "ira.h"
57 #include "target.h"
58
59 #ifdef INSN_SCHEDULING
60
61 #ifdef ENABLE_CHECKING
62 #define CHECK (true)
63 #else
64 #define CHECK (false)
65 #endif
66
67 /* Holds current parameters for the dependency analyzer. */
68 struct sched_deps_info_def *sched_deps_info;
69
70 /* The data is specific to the Haifa scheduler. */
71 vec<haifa_deps_insn_data_def>
72 h_d_i_d = vNULL;
73
74 /* Return the major type present in the DS. */
75 enum reg_note
76 ds_to_dk (ds_t ds)
77 {
78 if (ds & DEP_TRUE)
79 return REG_DEP_TRUE;
80
81 if (ds & DEP_OUTPUT)
82 return REG_DEP_OUTPUT;
83
84 if (ds & DEP_CONTROL)
85 return REG_DEP_CONTROL;
86
87 gcc_assert (ds & DEP_ANTI);
88
89 return REG_DEP_ANTI;
90 }
91
92 /* Return equivalent dep_status. */
93 ds_t
94 dk_to_ds (enum reg_note dk)
95 {
96 switch (dk)
97 {
98 case REG_DEP_TRUE:
99 return DEP_TRUE;
100
101 case REG_DEP_OUTPUT:
102 return DEP_OUTPUT;
103
104 case REG_DEP_CONTROL:
105 return DEP_CONTROL;
106
107 default:
108 gcc_assert (dk == REG_DEP_ANTI);
109 return DEP_ANTI;
110 }
111 }
112
113 /* Functions to operate with dependence information container - dep_t. */
114
115 /* Init DEP with the arguments. */
116 void
117 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
118 {
119 DEP_PRO (dep) = pro;
120 DEP_CON (dep) = con;
121 DEP_TYPE (dep) = type;
122 DEP_STATUS (dep) = ds;
123 DEP_COST (dep) = UNKNOWN_DEP_COST;
124 DEP_NONREG (dep) = 0;
125 DEP_MULTIPLE (dep) = 0;
126 DEP_REPLACE (dep) = NULL;
127 }
128
129 /* Init DEP with the arguments.
130 While most of the scheduler (including targets) only need the major type
131 of the dependency, it is convenient to hide full dep_status from them. */
132 void
133 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
134 {
135 ds_t ds;
136
137 if ((current_sched_info->flags & USE_DEPS_LIST))
138 ds = dk_to_ds (kind);
139 else
140 ds = 0;
141
142 init_dep_1 (dep, pro, con, kind, ds);
143 }
144
145 /* Make a copy of FROM in TO. */
146 static void
147 copy_dep (dep_t to, dep_t from)
148 {
149 memcpy (to, from, sizeof (*to));
150 }
151
152 static void dump_ds (FILE *, ds_t);
153
154 /* Define flags for dump_dep (). */
155
156 /* Dump producer of the dependence. */
157 #define DUMP_DEP_PRO (2)
158
159 /* Dump consumer of the dependence. */
160 #define DUMP_DEP_CON (4)
161
162 /* Dump type of the dependence. */
163 #define DUMP_DEP_TYPE (8)
164
165 /* Dump status of the dependence. */
166 #define DUMP_DEP_STATUS (16)
167
168 /* Dump all information about the dependence. */
169 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
170 |DUMP_DEP_STATUS)
171
172 /* Dump DEP to DUMP.
173 FLAGS is a bit mask specifying what information about DEP needs
174 to be printed.
175 If FLAGS has the very first bit set, then dump all information about DEP
176 and propagate this bit into the callee dump functions. */
177 static void
178 dump_dep (FILE *dump, dep_t dep, int flags)
179 {
180 if (flags & 1)
181 flags |= DUMP_DEP_ALL;
182
183 fprintf (dump, "<");
184
185 if (flags & DUMP_DEP_PRO)
186 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
187
188 if (flags & DUMP_DEP_CON)
189 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
190
191 if (flags & DUMP_DEP_TYPE)
192 {
193 char t;
194 enum reg_note type = DEP_TYPE (dep);
195
196 switch (type)
197 {
198 case REG_DEP_TRUE:
199 t = 't';
200 break;
201
202 case REG_DEP_OUTPUT:
203 t = 'o';
204 break;
205
206 case REG_DEP_CONTROL:
207 t = 'c';
208 break;
209
210 case REG_DEP_ANTI:
211 t = 'a';
212 break;
213
214 default:
215 gcc_unreachable ();
216 break;
217 }
218
219 fprintf (dump, "%c; ", t);
220 }
221
222 if (flags & DUMP_DEP_STATUS)
223 {
224 if (current_sched_info->flags & USE_DEPS_LIST)
225 dump_ds (dump, DEP_STATUS (dep));
226 }
227
228 fprintf (dump, ">");
229 }
230
231 /* Default flags for dump_dep (). */
232 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
233
234 /* Dump all fields of DEP to STDERR. */
235 void
236 sd_debug_dep (dep_t dep)
237 {
238 dump_dep (stderr, dep, 1);
239 fprintf (stderr, "\n");
240 }
241
242 /* Determine whether DEP is a dependency link of a non-debug insn on a
243 debug insn. */
244
245 static inline bool
246 depl_on_debug_p (dep_link_t dep)
247 {
248 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
249 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
250 }
251
252 /* Functions to operate with a single link from the dependencies lists -
253 dep_link_t. */
254
255 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
256 PREV_NEXT_P. */
257 static void
258 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
259 {
260 dep_link_t next = *prev_nextp;
261
262 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
263 && DEP_LINK_NEXT (l) == NULL);
264
265 /* Init node being inserted. */
266 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
267 DEP_LINK_NEXT (l) = next;
268
269 /* Fix next node. */
270 if (next != NULL)
271 {
272 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
273
274 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
275 }
276
277 /* Fix prev node. */
278 *prev_nextp = l;
279 }
280
281 /* Add dep_link LINK to deps_list L. */
282 static void
283 add_to_deps_list (dep_link_t link, deps_list_t l)
284 {
285 attach_dep_link (link, &DEPS_LIST_FIRST (l));
286
287 /* Don't count debug deps. */
288 if (!depl_on_debug_p (link))
289 ++DEPS_LIST_N_LINKS (l);
290 }
291
292 /* Detach dep_link L from the list. */
293 static void
294 detach_dep_link (dep_link_t l)
295 {
296 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
297 dep_link_t next = DEP_LINK_NEXT (l);
298
299 *prev_nextp = next;
300
301 if (next != NULL)
302 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
303
304 DEP_LINK_PREV_NEXTP (l) = NULL;
305 DEP_LINK_NEXT (l) = NULL;
306 }
307
308 /* Remove link LINK from list LIST. */
309 static void
310 remove_from_deps_list (dep_link_t link, deps_list_t list)
311 {
312 detach_dep_link (link);
313
314 /* Don't count debug deps. */
315 if (!depl_on_debug_p (link))
316 --DEPS_LIST_N_LINKS (list);
317 }
318
319 /* Move link LINK from list FROM to list TO. */
320 static void
321 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
322 {
323 remove_from_deps_list (link, from);
324 add_to_deps_list (link, to);
325 }
326
327 /* Return true of LINK is not attached to any list. */
328 static bool
329 dep_link_is_detached_p (dep_link_t link)
330 {
331 return DEP_LINK_PREV_NEXTP (link) == NULL;
332 }
333
334 /* Pool to hold all dependency nodes (dep_node_t). */
335 static pool_allocator<_dep_node> *dn_pool;
336
337 /* Number of dep_nodes out there. */
338 static int dn_pool_diff = 0;
339
340 /* Create a dep_node. */
341 static dep_node_t
342 create_dep_node (void)
343 {
344 dep_node_t n = dn_pool->allocate ();
345 dep_link_t back = DEP_NODE_BACK (n);
346 dep_link_t forw = DEP_NODE_FORW (n);
347
348 DEP_LINK_NODE (back) = n;
349 DEP_LINK_NEXT (back) = NULL;
350 DEP_LINK_PREV_NEXTP (back) = NULL;
351
352 DEP_LINK_NODE (forw) = n;
353 DEP_LINK_NEXT (forw) = NULL;
354 DEP_LINK_PREV_NEXTP (forw) = NULL;
355
356 ++dn_pool_diff;
357
358 return n;
359 }
360
361 /* Delete dep_node N. N must not be connected to any deps_list. */
362 static void
363 delete_dep_node (dep_node_t n)
364 {
365 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
366 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
367
368 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
369
370 --dn_pool_diff;
371
372 dn_pool->remove (n);
373 }
374
375 /* Pool to hold dependencies lists (deps_list_t). */
376 static pool_allocator<_deps_list> *dl_pool;
377
378 /* Number of deps_lists out there. */
379 static int dl_pool_diff = 0;
380
381 /* Functions to operate with dependences lists - deps_list_t. */
382
383 /* Return true if list L is empty. */
384 static bool
385 deps_list_empty_p (deps_list_t l)
386 {
387 return DEPS_LIST_N_LINKS (l) == 0;
388 }
389
390 /* Create a new deps_list. */
391 static deps_list_t
392 create_deps_list (void)
393 {
394 deps_list_t l = dl_pool->allocate ();
395
396 DEPS_LIST_FIRST (l) = NULL;
397 DEPS_LIST_N_LINKS (l) = 0;
398
399 ++dl_pool_diff;
400 return l;
401 }
402
403 /* Free deps_list L. */
404 static void
405 free_deps_list (deps_list_t l)
406 {
407 gcc_assert (deps_list_empty_p (l));
408
409 --dl_pool_diff;
410
411 dl_pool->remove (l);
412 }
413
414 /* Return true if there is no dep_nodes and deps_lists out there.
415 After the region is scheduled all the dependency nodes and lists
416 should [generally] be returned to pool. */
417 bool
418 deps_pools_are_empty_p (void)
419 {
420 return dn_pool_diff == 0 && dl_pool_diff == 0;
421 }
422
423 /* Remove all elements from L. */
424 static void
425 clear_deps_list (deps_list_t l)
426 {
427 do
428 {
429 dep_link_t link = DEPS_LIST_FIRST (l);
430
431 if (link == NULL)
432 break;
433
434 remove_from_deps_list (link, l);
435 }
436 while (1);
437 }
438
439 /* Decide whether a dependency should be treated as a hard or a speculative
440 dependency. */
441 static bool
442 dep_spec_p (dep_t dep)
443 {
444 if (current_sched_info->flags & DO_SPECULATION)
445 {
446 if (DEP_STATUS (dep) & SPECULATIVE)
447 return true;
448 }
449 if (current_sched_info->flags & DO_PREDICATION)
450 {
451 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
452 return true;
453 }
454 if (DEP_REPLACE (dep) != NULL)
455 return true;
456 return false;
457 }
458
459 static regset reg_pending_sets;
460 static regset reg_pending_clobbers;
461 static regset reg_pending_uses;
462 static regset reg_pending_control_uses;
463 static enum reg_pending_barrier_mode reg_pending_barrier;
464
465 /* Hard registers implicitly clobbered or used (or may be implicitly
466 clobbered or used) by the currently analyzed insn. For example,
467 insn in its constraint has one register class. Even if there is
468 currently no hard register in the insn, the particular hard
469 register will be in the insn after reload pass because the
470 constraint requires it. */
471 static HARD_REG_SET implicit_reg_pending_clobbers;
472 static HARD_REG_SET implicit_reg_pending_uses;
473
474 /* To speed up the test for duplicate dependency links we keep a
475 record of dependencies created by add_dependence when the average
476 number of instructions in a basic block is very large.
477
478 Studies have shown that there is typically around 5 instructions between
479 branches for typical C code. So we can make a guess that the average
480 basic block is approximately 5 instructions long; we will choose 100X
481 the average size as a very large basic block.
482
483 Each insn has associated bitmaps for its dependencies. Each bitmap
484 has enough entries to represent a dependency on any other insn in
485 the insn chain. All bitmap for true dependencies cache is
486 allocated then the rest two ones are also allocated. */
487 static bitmap_head *true_dependency_cache = NULL;
488 static bitmap_head *output_dependency_cache = NULL;
489 static bitmap_head *anti_dependency_cache = NULL;
490 static bitmap_head *control_dependency_cache = NULL;
491 static bitmap_head *spec_dependency_cache = NULL;
492 static int cache_size;
493
494 /* True if we should mark added dependencies as a non-register deps. */
495 static bool mark_as_hard;
496
497 static int deps_may_trap_p (const_rtx);
498 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
499 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
500 enum reg_note, bool);
501 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
502 rtx_insn_list **, int, enum reg_note,
503 bool);
504 static void delete_all_dependences (rtx_insn *);
505 static void chain_to_prev_insn (rtx_insn *);
506
507 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
508 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
509 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
510 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
511
512 static bool sched_has_condition_p (const rtx_insn *);
513 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
514
515 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
516 rtx, rtx);
517 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
518
519 #ifdef ENABLE_CHECKING
520 static void check_dep (dep_t, bool);
521 #endif
522 \f
523 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
524
525 static int
526 deps_may_trap_p (const_rtx mem)
527 {
528 const_rtx addr = XEXP (mem, 0);
529
530 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
531 {
532 const_rtx t = get_reg_known_value (REGNO (addr));
533 if (t)
534 addr = t;
535 }
536 return rtx_addr_can_trap_p (addr);
537 }
538 \f
539
540 /* Find the condition under which INSN is executed. If REV is not NULL,
541 it is set to TRUE when the returned comparison should be reversed
542 to get the actual condition. */
543 static rtx
544 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
545 {
546 rtx pat = PATTERN (insn);
547 rtx src;
548
549 if (rev)
550 *rev = false;
551
552 if (GET_CODE (pat) == COND_EXEC)
553 return COND_EXEC_TEST (pat);
554
555 if (!any_condjump_p (insn) || !onlyjump_p (insn))
556 return 0;
557
558 src = SET_SRC (pc_set (insn));
559
560 if (XEXP (src, 2) == pc_rtx)
561 return XEXP (src, 0);
562 else if (XEXP (src, 1) == pc_rtx)
563 {
564 rtx cond = XEXP (src, 0);
565 enum rtx_code revcode = reversed_comparison_code (cond, insn);
566
567 if (revcode == UNKNOWN)
568 return 0;
569
570 if (rev)
571 *rev = true;
572 return cond;
573 }
574
575 return 0;
576 }
577
578 /* Return the condition under which INSN does not execute (i.e. the
579 not-taken condition for a conditional branch), or NULL if we cannot
580 find such a condition. The caller should make a copy of the condition
581 before using it. */
582 rtx
583 sched_get_reverse_condition_uncached (const rtx_insn *insn)
584 {
585 bool rev;
586 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
587 if (cond == NULL_RTX)
588 return cond;
589 if (!rev)
590 {
591 enum rtx_code revcode = reversed_comparison_code (cond, insn);
592 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
593 XEXP (cond, 0),
594 XEXP (cond, 1));
595 }
596 return cond;
597 }
598
599 /* Caching variant of sched_get_condition_with_rev_uncached.
600 We only do actual work the first time we come here for an insn; the
601 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
602 static rtx
603 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
604 {
605 bool tmp;
606
607 if (INSN_LUID (insn) == 0)
608 return sched_get_condition_with_rev_uncached (insn, rev);
609
610 if (INSN_CACHED_COND (insn) == const_true_rtx)
611 return NULL_RTX;
612
613 if (INSN_CACHED_COND (insn) != NULL_RTX)
614 {
615 if (rev)
616 *rev = INSN_REVERSE_COND (insn);
617 return INSN_CACHED_COND (insn);
618 }
619
620 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
621 INSN_REVERSE_COND (insn) = tmp;
622
623 if (INSN_CACHED_COND (insn) == NULL_RTX)
624 {
625 INSN_CACHED_COND (insn) = const_true_rtx;
626 return NULL_RTX;
627 }
628
629 if (rev)
630 *rev = INSN_REVERSE_COND (insn);
631 return INSN_CACHED_COND (insn);
632 }
633
634 /* True when we can find a condition under which INSN is executed. */
635 static bool
636 sched_has_condition_p (const rtx_insn *insn)
637 {
638 return !! sched_get_condition_with_rev (insn, NULL);
639 }
640
641 \f
642
643 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
644 static int
645 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
646 {
647 if (COMPARISON_P (cond1)
648 && COMPARISON_P (cond2)
649 && GET_CODE (cond1) ==
650 (rev1==rev2
651 ? reversed_comparison_code (cond2, NULL)
652 : GET_CODE (cond2))
653 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
654 && XEXP (cond1, 1) == XEXP (cond2, 1))
655 return 1;
656 return 0;
657 }
658
659 /* Return true if insn1 and insn2 can never depend on one another because
660 the conditions under which they are executed are mutually exclusive. */
661 bool
662 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
663 {
664 rtx cond1, cond2;
665 bool rev1 = false, rev2 = false;
666
667 /* df doesn't handle conditional lifetimes entirely correctly;
668 calls mess up the conditional lifetimes. */
669 if (!CALL_P (insn1) && !CALL_P (insn2))
670 {
671 cond1 = sched_get_condition_with_rev (insn1, &rev1);
672 cond2 = sched_get_condition_with_rev (insn2, &rev2);
673 if (cond1 && cond2
674 && conditions_mutex_p (cond1, cond2, rev1, rev2)
675 /* Make sure first instruction doesn't affect condition of second
676 instruction if switched. */
677 && !modified_in_p (cond1, insn2)
678 /* Make sure second instruction doesn't affect condition of first
679 instruction if switched. */
680 && !modified_in_p (cond2, insn1))
681 return true;
682 }
683 return false;
684 }
685 \f
686
687 /* Return true if INSN can potentially be speculated with type DS. */
688 bool
689 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
690 {
691 if (HAS_INTERNAL_DEP (insn))
692 return false;
693
694 if (!NONJUMP_INSN_P (insn))
695 return false;
696
697 if (SCHED_GROUP_P (insn))
698 return false;
699
700 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
701 return false;
702
703 if (side_effects_p (PATTERN (insn)))
704 return false;
705
706 if (ds & BE_IN_SPEC)
707 /* The following instructions, which depend on a speculatively scheduled
708 instruction, cannot be speculatively scheduled along. */
709 {
710 if (may_trap_or_fault_p (PATTERN (insn)))
711 /* If instruction might fault, it cannot be speculatively scheduled.
712 For control speculation it's obvious why and for data speculation
713 it's because the insn might get wrong input if speculation
714 wasn't successful. */
715 return false;
716
717 if ((ds & BE_IN_DATA)
718 && sched_has_condition_p (insn))
719 /* If this is a predicated instruction, then it cannot be
720 speculatively scheduled. See PR35659. */
721 return false;
722 }
723
724 return true;
725 }
726
727 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
728 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
729 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
730 This function is used to switch sd_iterator to the next list.
731 !!! For internal use only. Might consider moving it to sched-int.h. */
732 void
733 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
734 deps_list_t *list_ptr, bool *resolved_p_ptr)
735 {
736 sd_list_types_def types = *types_ptr;
737
738 if (types & SD_LIST_HARD_BACK)
739 {
740 *list_ptr = INSN_HARD_BACK_DEPS (insn);
741 *resolved_p_ptr = false;
742 *types_ptr = types & ~SD_LIST_HARD_BACK;
743 }
744 else if (types & SD_LIST_SPEC_BACK)
745 {
746 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
747 *resolved_p_ptr = false;
748 *types_ptr = types & ~SD_LIST_SPEC_BACK;
749 }
750 else if (types & SD_LIST_FORW)
751 {
752 *list_ptr = INSN_FORW_DEPS (insn);
753 *resolved_p_ptr = false;
754 *types_ptr = types & ~SD_LIST_FORW;
755 }
756 else if (types & SD_LIST_RES_BACK)
757 {
758 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
759 *resolved_p_ptr = true;
760 *types_ptr = types & ~SD_LIST_RES_BACK;
761 }
762 else if (types & SD_LIST_RES_FORW)
763 {
764 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
765 *resolved_p_ptr = true;
766 *types_ptr = types & ~SD_LIST_RES_FORW;
767 }
768 else
769 {
770 *list_ptr = NULL;
771 *resolved_p_ptr = false;
772 *types_ptr = SD_LIST_NONE;
773 }
774 }
775
776 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
777 int
778 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
779 {
780 int size = 0;
781
782 while (list_types != SD_LIST_NONE)
783 {
784 deps_list_t list;
785 bool resolved_p;
786
787 sd_next_list (insn, &list_types, &list, &resolved_p);
788 if (list)
789 size += DEPS_LIST_N_LINKS (list);
790 }
791
792 return size;
793 }
794
795 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
796
797 bool
798 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
799 {
800 while (list_types != SD_LIST_NONE)
801 {
802 deps_list_t list;
803 bool resolved_p;
804
805 sd_next_list (insn, &list_types, &list, &resolved_p);
806 if (!deps_list_empty_p (list))
807 return false;
808 }
809
810 return true;
811 }
812
813 /* Initialize data for INSN. */
814 void
815 sd_init_insn (rtx_insn *insn)
816 {
817 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
818 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
819 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
820 INSN_FORW_DEPS (insn) = create_deps_list ();
821 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
822
823 /* ??? It would be nice to allocate dependency caches here. */
824 }
825
826 /* Free data for INSN. */
827 void
828 sd_finish_insn (rtx_insn *insn)
829 {
830 /* ??? It would be nice to deallocate dependency caches here. */
831
832 free_deps_list (INSN_HARD_BACK_DEPS (insn));
833 INSN_HARD_BACK_DEPS (insn) = NULL;
834
835 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
836 INSN_SPEC_BACK_DEPS (insn) = NULL;
837
838 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
839 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
840
841 free_deps_list (INSN_FORW_DEPS (insn));
842 INSN_FORW_DEPS (insn) = NULL;
843
844 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
845 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
846 }
847
848 /* Find a dependency between producer PRO and consumer CON.
849 Search through resolved dependency lists if RESOLVED_P is true.
850 If no such dependency is found return NULL,
851 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
852 with an iterator pointing to it. */
853 static dep_t
854 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
855 sd_iterator_def *sd_it_ptr)
856 {
857 sd_list_types_def pro_list_type;
858 sd_list_types_def con_list_type;
859 sd_iterator_def sd_it;
860 dep_t dep;
861 bool found_p = false;
862
863 if (resolved_p)
864 {
865 pro_list_type = SD_LIST_RES_FORW;
866 con_list_type = SD_LIST_RES_BACK;
867 }
868 else
869 {
870 pro_list_type = SD_LIST_FORW;
871 con_list_type = SD_LIST_BACK;
872 }
873
874 /* Walk through either back list of INSN or forw list of ELEM
875 depending on which one is shorter. */
876 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
877 {
878 /* Find the dep_link with producer PRO in consumer's back_deps. */
879 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
880 if (DEP_PRO (dep) == pro)
881 {
882 found_p = true;
883 break;
884 }
885 }
886 else
887 {
888 /* Find the dep_link with consumer CON in producer's forw_deps. */
889 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
890 if (DEP_CON (dep) == con)
891 {
892 found_p = true;
893 break;
894 }
895 }
896
897 if (found_p)
898 {
899 if (sd_it_ptr != NULL)
900 *sd_it_ptr = sd_it;
901
902 return dep;
903 }
904
905 return NULL;
906 }
907
908 /* Find a dependency between producer PRO and consumer CON.
909 Use dependency [if available] to check if dependency is present at all.
910 Search through resolved dependency lists if RESOLVED_P is true.
911 If the dependency or NULL if none found. */
912 dep_t
913 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
914 {
915 if (true_dependency_cache != NULL)
916 /* Avoiding the list walk below can cut compile times dramatically
917 for some code. */
918 {
919 int elem_luid = INSN_LUID (pro);
920 int insn_luid = INSN_LUID (con);
921
922 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
923 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
924 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
925 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
926 return NULL;
927 }
928
929 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
930 }
931
932 /* Add or update a dependence described by DEP.
933 MEM1 and MEM2, if non-null, correspond to memory locations in case of
934 data speculation.
935
936 The function returns a value indicating if an old entry has been changed
937 or a new entry has been added to insn's backward deps.
938
939 This function merely checks if producer and consumer is the same insn
940 and doesn't create a dep in this case. Actual manipulation of
941 dependence data structures is performed in add_or_update_dep_1. */
942 static enum DEPS_ADJUST_RESULT
943 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
944 {
945 rtx_insn *elem = DEP_PRO (dep);
946 rtx_insn *insn = DEP_CON (dep);
947
948 gcc_assert (INSN_P (insn) && INSN_P (elem));
949
950 /* Don't depend an insn on itself. */
951 if (insn == elem)
952 {
953 if (sched_deps_info->generate_spec_deps)
954 /* INSN has an internal dependence, which we can't overcome. */
955 HAS_INTERNAL_DEP (insn) = 1;
956
957 return DEP_NODEP;
958 }
959
960 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
961 }
962
963 /* Ask dependency caches what needs to be done for dependence DEP.
964 Return DEP_CREATED if new dependence should be created and there is no
965 need to try to find one searching the dependencies lists.
966 Return DEP_PRESENT if there already is a dependence described by DEP and
967 hence nothing is to be done.
968 Return DEP_CHANGED if there already is a dependence, but it should be
969 updated to incorporate additional information from DEP. */
970 static enum DEPS_ADJUST_RESULT
971 ask_dependency_caches (dep_t dep)
972 {
973 int elem_luid = INSN_LUID (DEP_PRO (dep));
974 int insn_luid = INSN_LUID (DEP_CON (dep));
975
976 gcc_assert (true_dependency_cache != NULL
977 && output_dependency_cache != NULL
978 && anti_dependency_cache != NULL
979 && control_dependency_cache != NULL);
980
981 if (!(current_sched_info->flags & USE_DEPS_LIST))
982 {
983 enum reg_note present_dep_type;
984
985 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
986 present_dep_type = REG_DEP_TRUE;
987 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
988 present_dep_type = REG_DEP_OUTPUT;
989 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
990 present_dep_type = REG_DEP_ANTI;
991 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
992 present_dep_type = REG_DEP_CONTROL;
993 else
994 /* There is no existing dep so it should be created. */
995 return DEP_CREATED;
996
997 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
998 /* DEP does not add anything to the existing dependence. */
999 return DEP_PRESENT;
1000 }
1001 else
1002 {
1003 ds_t present_dep_types = 0;
1004
1005 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1006 present_dep_types |= DEP_TRUE;
1007 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1008 present_dep_types |= DEP_OUTPUT;
1009 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1010 present_dep_types |= DEP_ANTI;
1011 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1012 present_dep_types |= DEP_CONTROL;
1013
1014 if (present_dep_types == 0)
1015 /* There is no existing dep so it should be created. */
1016 return DEP_CREATED;
1017
1018 if (!(current_sched_info->flags & DO_SPECULATION)
1019 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1020 {
1021 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1022 == present_dep_types)
1023 /* DEP does not add anything to the existing dependence. */
1024 return DEP_PRESENT;
1025 }
1026 else
1027 {
1028 /* Only true dependencies can be data speculative and
1029 only anti dependencies can be control speculative. */
1030 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1031 == present_dep_types);
1032
1033 /* if (DEP is SPECULATIVE) then
1034 ..we should update DEP_STATUS
1035 else
1036 ..we should reset existing dep to non-speculative. */
1037 }
1038 }
1039
1040 return DEP_CHANGED;
1041 }
1042
1043 /* Set dependency caches according to DEP. */
1044 static void
1045 set_dependency_caches (dep_t dep)
1046 {
1047 int elem_luid = INSN_LUID (DEP_PRO (dep));
1048 int insn_luid = INSN_LUID (DEP_CON (dep));
1049
1050 if (!(current_sched_info->flags & USE_DEPS_LIST))
1051 {
1052 switch (DEP_TYPE (dep))
1053 {
1054 case REG_DEP_TRUE:
1055 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1056 break;
1057
1058 case REG_DEP_OUTPUT:
1059 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1060 break;
1061
1062 case REG_DEP_ANTI:
1063 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1064 break;
1065
1066 case REG_DEP_CONTROL:
1067 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1068 break;
1069
1070 default:
1071 gcc_unreachable ();
1072 }
1073 }
1074 else
1075 {
1076 ds_t ds = DEP_STATUS (dep);
1077
1078 if (ds & DEP_TRUE)
1079 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1080 if (ds & DEP_OUTPUT)
1081 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1082 if (ds & DEP_ANTI)
1083 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1084 if (ds & DEP_CONTROL)
1085 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1086
1087 if (ds & SPECULATIVE)
1088 {
1089 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1090 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1091 }
1092 }
1093 }
1094
1095 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1096 caches accordingly. */
1097 static void
1098 update_dependency_caches (dep_t dep, enum reg_note old_type)
1099 {
1100 int elem_luid = INSN_LUID (DEP_PRO (dep));
1101 int insn_luid = INSN_LUID (DEP_CON (dep));
1102
1103 /* Clear corresponding cache entry because type of the link
1104 may have changed. Keep them if we use_deps_list. */
1105 if (!(current_sched_info->flags & USE_DEPS_LIST))
1106 {
1107 switch (old_type)
1108 {
1109 case REG_DEP_OUTPUT:
1110 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1111 break;
1112
1113 case REG_DEP_ANTI:
1114 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1115 break;
1116
1117 case REG_DEP_CONTROL:
1118 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1119 break;
1120
1121 default:
1122 gcc_unreachable ();
1123 }
1124 }
1125
1126 set_dependency_caches (dep);
1127 }
1128
1129 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1130 static void
1131 change_spec_dep_to_hard (sd_iterator_def sd_it)
1132 {
1133 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1134 dep_link_t link = DEP_NODE_BACK (node);
1135 dep_t dep = DEP_NODE_DEP (node);
1136 rtx_insn *elem = DEP_PRO (dep);
1137 rtx_insn *insn = DEP_CON (dep);
1138
1139 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1140
1141 DEP_STATUS (dep) &= ~SPECULATIVE;
1142
1143 if (true_dependency_cache != NULL)
1144 /* Clear the cache entry. */
1145 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1146 INSN_LUID (elem));
1147 }
1148
1149 /* Update DEP to incorporate information from NEW_DEP.
1150 SD_IT points to DEP in case it should be moved to another list.
1151 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1152 data-speculative dependence should be updated. */
1153 static enum DEPS_ADJUST_RESULT
1154 update_dep (dep_t dep, dep_t new_dep,
1155 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1156 rtx mem1 ATTRIBUTE_UNUSED,
1157 rtx mem2 ATTRIBUTE_UNUSED)
1158 {
1159 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1160 enum reg_note old_type = DEP_TYPE (dep);
1161 bool was_spec = dep_spec_p (dep);
1162
1163 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1164 DEP_MULTIPLE (dep) = 1;
1165
1166 /* If this is a more restrictive type of dependence than the
1167 existing one, then change the existing dependence to this
1168 type. */
1169 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1170 {
1171 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1172 res = DEP_CHANGED;
1173 }
1174
1175 if (current_sched_info->flags & USE_DEPS_LIST)
1176 /* Update DEP_STATUS. */
1177 {
1178 ds_t dep_status = DEP_STATUS (dep);
1179 ds_t ds = DEP_STATUS (new_dep);
1180 ds_t new_status = ds | dep_status;
1181
1182 if (new_status & SPECULATIVE)
1183 {
1184 /* Either existing dep or a dep we're adding or both are
1185 speculative. */
1186 if (!(ds & SPECULATIVE)
1187 || !(dep_status & SPECULATIVE))
1188 /* The new dep can't be speculative. */
1189 new_status &= ~SPECULATIVE;
1190 else
1191 {
1192 /* Both are speculative. Merge probabilities. */
1193 if (mem1 != NULL)
1194 {
1195 dw_t dw;
1196
1197 dw = estimate_dep_weak (mem1, mem2);
1198 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1199 }
1200
1201 new_status = ds_merge (dep_status, ds);
1202 }
1203 }
1204
1205 ds = new_status;
1206
1207 if (dep_status != ds)
1208 {
1209 DEP_STATUS (dep) = ds;
1210 res = DEP_CHANGED;
1211 }
1212 }
1213
1214 if (was_spec && !dep_spec_p (dep))
1215 /* The old dep was speculative, but now it isn't. */
1216 change_spec_dep_to_hard (sd_it);
1217
1218 if (true_dependency_cache != NULL
1219 && res == DEP_CHANGED)
1220 update_dependency_caches (dep, old_type);
1221
1222 return res;
1223 }
1224
1225 /* Add or update a dependence described by DEP.
1226 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1227 data speculation.
1228
1229 The function returns a value indicating if an old entry has been changed
1230 or a new entry has been added to insn's backward deps or nothing has
1231 been updated at all. */
1232 static enum DEPS_ADJUST_RESULT
1233 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1234 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1235 {
1236 bool maybe_present_p = true;
1237 bool present_p = false;
1238
1239 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1240 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1241
1242 #ifdef ENABLE_CHECKING
1243 check_dep (new_dep, mem1 != NULL);
1244 #endif
1245
1246 if (true_dependency_cache != NULL)
1247 {
1248 switch (ask_dependency_caches (new_dep))
1249 {
1250 case DEP_PRESENT:
1251 dep_t present_dep;
1252 sd_iterator_def sd_it;
1253
1254 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1255 DEP_CON (new_dep),
1256 resolved_p, &sd_it);
1257 DEP_MULTIPLE (present_dep) = 1;
1258 return DEP_PRESENT;
1259
1260 case DEP_CHANGED:
1261 maybe_present_p = true;
1262 present_p = true;
1263 break;
1264
1265 case DEP_CREATED:
1266 maybe_present_p = false;
1267 present_p = false;
1268 break;
1269
1270 default:
1271 gcc_unreachable ();
1272 break;
1273 }
1274 }
1275
1276 /* Check that we don't already have this dependence. */
1277 if (maybe_present_p)
1278 {
1279 dep_t present_dep;
1280 sd_iterator_def sd_it;
1281
1282 gcc_assert (true_dependency_cache == NULL || present_p);
1283
1284 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1285 DEP_CON (new_dep),
1286 resolved_p, &sd_it);
1287
1288 if (present_dep != NULL)
1289 /* We found an existing dependency between ELEM and INSN. */
1290 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1291 else
1292 /* We didn't find a dep, it shouldn't present in the cache. */
1293 gcc_assert (!present_p);
1294 }
1295
1296 /* Might want to check one level of transitivity to save conses.
1297 This check should be done in maybe_add_or_update_dep_1.
1298 Since we made it to add_or_update_dep_1, we must create
1299 (or update) a link. */
1300
1301 if (mem1 != NULL_RTX)
1302 {
1303 gcc_assert (sched_deps_info->generate_spec_deps);
1304 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1305 estimate_dep_weak (mem1, mem2));
1306 }
1307
1308 sd_add_dep (new_dep, resolved_p);
1309
1310 return DEP_CREATED;
1311 }
1312
1313 /* Initialize BACK_LIST_PTR with consumer's backward list and
1314 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1315 initialize with lists that hold resolved deps. */
1316 static void
1317 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1318 deps_list_t *back_list_ptr,
1319 deps_list_t *forw_list_ptr)
1320 {
1321 rtx_insn *con = DEP_CON (dep);
1322
1323 if (!resolved_p)
1324 {
1325 if (dep_spec_p (dep))
1326 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1327 else
1328 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1329
1330 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1331 }
1332 else
1333 {
1334 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1335 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1336 }
1337 }
1338
1339 /* Add dependence described by DEP.
1340 If RESOLVED_P is true treat the dependence as a resolved one. */
1341 void
1342 sd_add_dep (dep_t dep, bool resolved_p)
1343 {
1344 dep_node_t n = create_dep_node ();
1345 deps_list_t con_back_deps;
1346 deps_list_t pro_forw_deps;
1347 rtx_insn *elem = DEP_PRO (dep);
1348 rtx_insn *insn = DEP_CON (dep);
1349
1350 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1351
1352 if ((current_sched_info->flags & DO_SPECULATION) == 0
1353 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1354 DEP_STATUS (dep) &= ~SPECULATIVE;
1355
1356 copy_dep (DEP_NODE_DEP (n), dep);
1357
1358 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1359
1360 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1361
1362 #ifdef ENABLE_CHECKING
1363 check_dep (dep, false);
1364 #endif
1365
1366 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1367
1368 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1369 in the bitmap caches of dependency information. */
1370 if (true_dependency_cache != NULL)
1371 set_dependency_caches (dep);
1372 }
1373
1374 /* Add or update backward dependence between INSN and ELEM
1375 with given type DEP_TYPE and dep_status DS.
1376 This function is a convenience wrapper. */
1377 enum DEPS_ADJUST_RESULT
1378 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1379 {
1380 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1381 }
1382
1383 /* Resolved dependence pointed to by SD_IT.
1384 SD_IT will advance to the next element. */
1385 void
1386 sd_resolve_dep (sd_iterator_def sd_it)
1387 {
1388 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1389 dep_t dep = DEP_NODE_DEP (node);
1390 rtx_insn *pro = DEP_PRO (dep);
1391 rtx_insn *con = DEP_CON (dep);
1392
1393 if (dep_spec_p (dep))
1394 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1395 INSN_RESOLVED_BACK_DEPS (con));
1396 else
1397 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1398 INSN_RESOLVED_BACK_DEPS (con));
1399
1400 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1401 INSN_RESOLVED_FORW_DEPS (pro));
1402 }
1403
1404 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1405 pointed to by SD_IT to unresolved state. */
1406 void
1407 sd_unresolve_dep (sd_iterator_def sd_it)
1408 {
1409 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1410 dep_t dep = DEP_NODE_DEP (node);
1411 rtx_insn *pro = DEP_PRO (dep);
1412 rtx_insn *con = DEP_CON (dep);
1413
1414 if (dep_spec_p (dep))
1415 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1416 INSN_SPEC_BACK_DEPS (con));
1417 else
1418 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1419 INSN_HARD_BACK_DEPS (con));
1420
1421 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1422 INSN_FORW_DEPS (pro));
1423 }
1424
1425 /* Make TO depend on all the FROM's producers.
1426 If RESOLVED_P is true add dependencies to the resolved lists. */
1427 void
1428 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1429 {
1430 sd_list_types_def list_type;
1431 sd_iterator_def sd_it;
1432 dep_t dep;
1433
1434 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1435
1436 FOR_EACH_DEP (from, list_type, sd_it, dep)
1437 {
1438 dep_def _new_dep, *new_dep = &_new_dep;
1439
1440 copy_dep (new_dep, dep);
1441 DEP_CON (new_dep) = to;
1442 sd_add_dep (new_dep, resolved_p);
1443 }
1444 }
1445
1446 /* Remove a dependency referred to by SD_IT.
1447 SD_IT will point to the next dependence after removal. */
1448 void
1449 sd_delete_dep (sd_iterator_def sd_it)
1450 {
1451 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1452 dep_t dep = DEP_NODE_DEP (n);
1453 rtx_insn *pro = DEP_PRO (dep);
1454 rtx_insn *con = DEP_CON (dep);
1455 deps_list_t con_back_deps;
1456 deps_list_t pro_forw_deps;
1457
1458 if (true_dependency_cache != NULL)
1459 {
1460 int elem_luid = INSN_LUID (pro);
1461 int insn_luid = INSN_LUID (con);
1462
1463 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1464 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1465 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1466 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1467
1468 if (current_sched_info->flags & DO_SPECULATION)
1469 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1470 }
1471
1472 get_back_and_forw_lists (dep, sd_it.resolved_p,
1473 &con_back_deps, &pro_forw_deps);
1474
1475 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1476 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1477
1478 delete_dep_node (n);
1479 }
1480
1481 /* Dump size of the lists. */
1482 #define DUMP_LISTS_SIZE (2)
1483
1484 /* Dump dependencies of the lists. */
1485 #define DUMP_LISTS_DEPS (4)
1486
1487 /* Dump all information about the lists. */
1488 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1489
1490 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1491 FLAGS is a bit mask specifying what information about the lists needs
1492 to be printed.
1493 If FLAGS has the very first bit set, then dump all information about
1494 the lists and propagate this bit into the callee dump functions. */
1495 static void
1496 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1497 {
1498 sd_iterator_def sd_it;
1499 dep_t dep;
1500 int all;
1501
1502 all = (flags & 1);
1503
1504 if (all)
1505 flags |= DUMP_LISTS_ALL;
1506
1507 fprintf (dump, "[");
1508
1509 if (flags & DUMP_LISTS_SIZE)
1510 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1511
1512 if (flags & DUMP_LISTS_DEPS)
1513 {
1514 FOR_EACH_DEP (insn, types, sd_it, dep)
1515 {
1516 dump_dep (dump, dep, dump_dep_flags | all);
1517 fprintf (dump, " ");
1518 }
1519 }
1520 }
1521
1522 /* Dump all information about deps_lists of INSN specified by TYPES
1523 to STDERR. */
1524 void
1525 sd_debug_lists (rtx insn, sd_list_types_def types)
1526 {
1527 dump_lists (stderr, insn, types, 1);
1528 fprintf (stderr, "\n");
1529 }
1530
1531 /* A wrapper around add_dependence_1, to add a dependence of CON on
1532 PRO, with type DEP_TYPE. This function implements special handling
1533 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1534 the type to REG_DEP_ANTI if we can determine that predication is
1535 impossible; otherwise we add additional true dependencies on the
1536 INSN_COND_DEPS list of the jump (which PRO must be). */
1537 void
1538 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1539 {
1540 if (dep_type == REG_DEP_CONTROL
1541 && !(current_sched_info->flags & DO_PREDICATION))
1542 dep_type = REG_DEP_ANTI;
1543
1544 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1545 so we must also make the insn dependent on the setter of the
1546 condition. */
1547 if (dep_type == REG_DEP_CONTROL)
1548 {
1549 rtx_insn *real_pro = pro;
1550 rtx_insn *other = real_insn_for_shadow (real_pro);
1551 rtx cond;
1552
1553 if (other != NULL_RTX)
1554 real_pro = other;
1555 cond = sched_get_reverse_condition_uncached (real_pro);
1556 /* Verify that the insn does not use a different value in
1557 the condition register than the one that was present at
1558 the jump. */
1559 if (cond == NULL_RTX)
1560 dep_type = REG_DEP_ANTI;
1561 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1562 {
1563 HARD_REG_SET uses;
1564 CLEAR_HARD_REG_SET (uses);
1565 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1566 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1567 dep_type = REG_DEP_ANTI;
1568 }
1569 if (dep_type == REG_DEP_CONTROL)
1570 {
1571 if (sched_verbose >= 5)
1572 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1573 INSN_UID (real_pro));
1574 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1575 REG_DEP_TRUE, false);
1576 }
1577 }
1578
1579 add_dependence_1 (con, pro, dep_type);
1580 }
1581
1582 /* A convenience wrapper to operate on an entire list. HARD should be
1583 true if DEP_NONREG should be set on newly created dependencies. */
1584
1585 static void
1586 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1587 enum reg_note dep_type, bool hard)
1588 {
1589 mark_as_hard = hard;
1590 for (; list; list = list->next ())
1591 {
1592 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1593 add_dependence (insn, list->insn (), dep_type);
1594 }
1595 mark_as_hard = false;
1596 }
1597
1598 /* Similar, but free *LISTP at the same time, when the context
1599 is not readonly. HARD should be true if DEP_NONREG should be set on
1600 newly created dependencies. */
1601
1602 static void
1603 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1604 rtx_insn_list **listp,
1605 int uncond, enum reg_note dep_type, bool hard)
1606 {
1607 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1608
1609 /* We don't want to short-circuit dependencies involving debug
1610 insns, because they may cause actual dependencies to be
1611 disregarded. */
1612 if (deps->readonly || DEBUG_INSN_P (insn))
1613 return;
1614
1615 free_INSN_LIST_list (listp);
1616 }
1617
1618 /* Remove all occurrences of INSN from LIST. Return the number of
1619 occurrences removed. */
1620
1621 static int
1622 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1623 {
1624 int removed = 0;
1625
1626 while (*listp)
1627 {
1628 if ((*listp)->insn () == insn)
1629 {
1630 remove_free_INSN_LIST_node (listp);
1631 removed++;
1632 continue;
1633 }
1634
1635 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1636 }
1637
1638 return removed;
1639 }
1640
1641 /* Same as above, but process two lists at once. */
1642 static int
1643 remove_from_both_dependence_lists (rtx_insn *insn,
1644 rtx_insn_list **listp,
1645 rtx_expr_list **exprp)
1646 {
1647 int removed = 0;
1648
1649 while (*listp)
1650 {
1651 if (XEXP (*listp, 0) == insn)
1652 {
1653 remove_free_INSN_LIST_node (listp);
1654 remove_free_EXPR_LIST_node (exprp);
1655 removed++;
1656 continue;
1657 }
1658
1659 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1660 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1661 }
1662
1663 return removed;
1664 }
1665
1666 /* Clear all dependencies for an insn. */
1667 static void
1668 delete_all_dependences (rtx_insn *insn)
1669 {
1670 sd_iterator_def sd_it;
1671 dep_t dep;
1672
1673 /* The below cycle can be optimized to clear the caches and back_deps
1674 in one call but that would provoke duplication of code from
1675 delete_dep (). */
1676
1677 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1678 sd_iterator_cond (&sd_it, &dep);)
1679 sd_delete_dep (sd_it);
1680 }
1681
1682 /* All insns in a scheduling group except the first should only have
1683 dependencies on the previous insn in the group. So we find the
1684 first instruction in the scheduling group by walking the dependence
1685 chains backwards. Then we add the dependencies for the group to
1686 the previous nonnote insn. */
1687
1688 static void
1689 chain_to_prev_insn (rtx_insn *insn)
1690 {
1691 sd_iterator_def sd_it;
1692 dep_t dep;
1693 rtx_insn *prev_nonnote;
1694
1695 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1696 {
1697 rtx_insn *i = insn;
1698 rtx_insn *pro = DEP_PRO (dep);
1699
1700 do
1701 {
1702 i = prev_nonnote_insn (i);
1703
1704 if (pro == i)
1705 goto next_link;
1706 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1707
1708 if (! sched_insns_conditions_mutex_p (i, pro))
1709 add_dependence (i, pro, DEP_TYPE (dep));
1710 next_link:;
1711 }
1712
1713 delete_all_dependences (insn);
1714
1715 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1716 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1717 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1718 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1719 }
1720 \f
1721 /* Process an insn's memory dependencies. There are four kinds of
1722 dependencies:
1723
1724 (0) read dependence: read follows read
1725 (1) true dependence: read follows write
1726 (2) output dependence: write follows write
1727 (3) anti dependence: write follows read
1728
1729 We are careful to build only dependencies which actually exist, and
1730 use transitivity to avoid building too many links. */
1731
1732 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1733 The MEM is a memory reference contained within INSN, which we are saving
1734 so that we can do memory aliasing on it. */
1735
1736 static void
1737 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1738 rtx_insn *insn, rtx mem)
1739 {
1740 rtx_insn_list **insn_list;
1741 rtx_insn_list *insn_node;
1742 rtx_expr_list **mem_list;
1743 rtx_expr_list *mem_node;
1744
1745 gcc_assert (!deps->readonly);
1746 if (read_p)
1747 {
1748 insn_list = &deps->pending_read_insns;
1749 mem_list = &deps->pending_read_mems;
1750 if (!DEBUG_INSN_P (insn))
1751 deps->pending_read_list_length++;
1752 }
1753 else
1754 {
1755 insn_list = &deps->pending_write_insns;
1756 mem_list = &deps->pending_write_mems;
1757 deps->pending_write_list_length++;
1758 }
1759
1760 insn_node = alloc_INSN_LIST (insn, *insn_list);
1761 *insn_list = insn_node;
1762
1763 if (sched_deps_info->use_cselib)
1764 {
1765 mem = shallow_copy_rtx (mem);
1766 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1767 GET_MODE (mem), insn);
1768 }
1769 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1770 *mem_list = mem_node;
1771 }
1772
1773 /* Make a dependency between every memory reference on the pending lists
1774 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1775 dependencies for a read operation, similarly with FOR_WRITE. */
1776
1777 static void
1778 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1779 int for_write)
1780 {
1781 if (for_write)
1782 {
1783 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1784 1, REG_DEP_ANTI, true);
1785 if (!deps->readonly)
1786 {
1787 free_EXPR_LIST_list (&deps->pending_read_mems);
1788 deps->pending_read_list_length = 0;
1789 }
1790 }
1791
1792 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1793 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1794 true);
1795
1796 add_dependence_list_and_free (deps, insn,
1797 &deps->last_pending_memory_flush, 1,
1798 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1799 true);
1800
1801 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1802 REG_DEP_ANTI, true);
1803
1804 if (DEBUG_INSN_P (insn))
1805 {
1806 if (for_write)
1807 free_INSN_LIST_list (&deps->pending_read_insns);
1808 free_INSN_LIST_list (&deps->pending_write_insns);
1809 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1810 free_INSN_LIST_list (&deps->pending_jump_insns);
1811 }
1812
1813 if (!deps->readonly)
1814 {
1815 free_EXPR_LIST_list (&deps->pending_write_mems);
1816 deps->pending_write_list_length = 0;
1817
1818 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1819 deps->pending_flush_length = 1;
1820 }
1821 mark_as_hard = false;
1822 }
1823 \f
1824 /* Instruction which dependencies we are analyzing. */
1825 static rtx_insn *cur_insn = NULL;
1826
1827 /* Implement hooks for haifa scheduler. */
1828
1829 static void
1830 haifa_start_insn (rtx_insn *insn)
1831 {
1832 gcc_assert (insn && !cur_insn);
1833
1834 cur_insn = insn;
1835 }
1836
1837 static void
1838 haifa_finish_insn (void)
1839 {
1840 cur_insn = NULL;
1841 }
1842
1843 void
1844 haifa_note_reg_set (int regno)
1845 {
1846 SET_REGNO_REG_SET (reg_pending_sets, regno);
1847 }
1848
1849 void
1850 haifa_note_reg_clobber (int regno)
1851 {
1852 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1853 }
1854
1855 void
1856 haifa_note_reg_use (int regno)
1857 {
1858 SET_REGNO_REG_SET (reg_pending_uses, regno);
1859 }
1860
1861 static void
1862 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1863 {
1864 if (!(ds & SPECULATIVE))
1865 {
1866 mem = NULL_RTX;
1867 pending_mem = NULL_RTX;
1868 }
1869 else
1870 gcc_assert (ds & BEGIN_DATA);
1871
1872 {
1873 dep_def _dep, *dep = &_dep;
1874
1875 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1876 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1877 DEP_NONREG (dep) = 1;
1878 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1879 }
1880
1881 }
1882
1883 static void
1884 haifa_note_dep (rtx_insn *elem, ds_t ds)
1885 {
1886 dep_def _dep;
1887 dep_t dep = &_dep;
1888
1889 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1890 if (mark_as_hard)
1891 DEP_NONREG (dep) = 1;
1892 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1893 }
1894
1895 static void
1896 note_reg_use (int r)
1897 {
1898 if (sched_deps_info->note_reg_use)
1899 sched_deps_info->note_reg_use (r);
1900 }
1901
1902 static void
1903 note_reg_set (int r)
1904 {
1905 if (sched_deps_info->note_reg_set)
1906 sched_deps_info->note_reg_set (r);
1907 }
1908
1909 static void
1910 note_reg_clobber (int r)
1911 {
1912 if (sched_deps_info->note_reg_clobber)
1913 sched_deps_info->note_reg_clobber (r);
1914 }
1915
1916 static void
1917 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1918 {
1919 if (sched_deps_info->note_mem_dep)
1920 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1921 }
1922
1923 static void
1924 note_dep (rtx_insn *e, ds_t ds)
1925 {
1926 if (sched_deps_info->note_dep)
1927 sched_deps_info->note_dep (e, ds);
1928 }
1929
1930 /* Return corresponding to DS reg_note. */
1931 enum reg_note
1932 ds_to_dt (ds_t ds)
1933 {
1934 if (ds & DEP_TRUE)
1935 return REG_DEP_TRUE;
1936 else if (ds & DEP_OUTPUT)
1937 return REG_DEP_OUTPUT;
1938 else if (ds & DEP_ANTI)
1939 return REG_DEP_ANTI;
1940 else
1941 {
1942 gcc_assert (ds & DEP_CONTROL);
1943 return REG_DEP_CONTROL;
1944 }
1945 }
1946
1947 \f
1948
1949 /* Functions for computation of info needed for register pressure
1950 sensitive insn scheduling. */
1951
1952
1953 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1954 static struct reg_use_data *
1955 create_insn_reg_use (int regno, rtx_insn *insn)
1956 {
1957 struct reg_use_data *use;
1958
1959 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1960 use->regno = regno;
1961 use->insn = insn;
1962 use->next_insn_use = INSN_REG_USE_LIST (insn);
1963 INSN_REG_USE_LIST (insn) = use;
1964 return use;
1965 }
1966
1967 /* Allocate reg_set_data structure for REGNO and INSN. */
1968 static void
1969 create_insn_reg_set (int regno, rtx insn)
1970 {
1971 struct reg_set_data *set;
1972
1973 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1974 set->regno = regno;
1975 set->insn = insn;
1976 set->next_insn_set = INSN_REG_SET_LIST (insn);
1977 INSN_REG_SET_LIST (insn) = set;
1978 }
1979
1980 /* Set up insn register uses for INSN and dependency context DEPS. */
1981 static void
1982 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1983 {
1984 unsigned i;
1985 reg_set_iterator rsi;
1986 struct reg_use_data *use, *use2, *next;
1987 struct deps_reg *reg_last;
1988
1989 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1990 {
1991 if (i < FIRST_PSEUDO_REGISTER
1992 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1993 continue;
1994
1995 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1996 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1997 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1998 /* Ignore use which is not dying. */
1999 continue;
2000
2001 use = create_insn_reg_use (i, insn);
2002 use->next_regno_use = use;
2003 reg_last = &deps->reg_last[i];
2004
2005 /* Create the cycle list of uses. */
2006 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
2007 {
2008 use2 = create_insn_reg_use (i, list->insn ());
2009 next = use->next_regno_use;
2010 use->next_regno_use = use2;
2011 use2->next_regno_use = next;
2012 }
2013 }
2014 }
2015
2016 /* Register pressure info for the currently processed insn. */
2017 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2018
2019 /* Return TRUE if INSN has the use structure for REGNO. */
2020 static bool
2021 insn_use_p (rtx insn, int regno)
2022 {
2023 struct reg_use_data *use;
2024
2025 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2026 if (use->regno == regno)
2027 return true;
2028 return false;
2029 }
2030
2031 /* Update the register pressure info after birth of pseudo register REGNO
2032 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2033 the register is in clobber or unused after the insn. */
2034 static void
2035 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2036 {
2037 int incr, new_incr;
2038 enum reg_class cl;
2039
2040 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2041 cl = sched_regno_pressure_class[regno];
2042 if (cl != NO_REGS)
2043 {
2044 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2045 if (clobber_p)
2046 {
2047 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2048 reg_pressure_info[cl].clobber_increase = new_incr;
2049 }
2050 else if (unused_p)
2051 {
2052 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2053 reg_pressure_info[cl].unused_set_increase = new_incr;
2054 }
2055 else
2056 {
2057 new_incr = reg_pressure_info[cl].set_increase + incr;
2058 reg_pressure_info[cl].set_increase = new_incr;
2059 if (! insn_use_p (insn, regno))
2060 reg_pressure_info[cl].change += incr;
2061 create_insn_reg_set (regno, insn);
2062 }
2063 gcc_assert (new_incr < (1 << INCREASE_BITS));
2064 }
2065 }
2066
2067 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2068 hard registers involved in the birth. */
2069 static void
2070 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2071 bool clobber_p, bool unused_p)
2072 {
2073 enum reg_class cl;
2074 int new_incr, last = regno + nregs;
2075
2076 while (regno < last)
2077 {
2078 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2079 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2080 {
2081 cl = sched_regno_pressure_class[regno];
2082 if (cl != NO_REGS)
2083 {
2084 if (clobber_p)
2085 {
2086 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2087 reg_pressure_info[cl].clobber_increase = new_incr;
2088 }
2089 else if (unused_p)
2090 {
2091 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2092 reg_pressure_info[cl].unused_set_increase = new_incr;
2093 }
2094 else
2095 {
2096 new_incr = reg_pressure_info[cl].set_increase + 1;
2097 reg_pressure_info[cl].set_increase = new_incr;
2098 if (! insn_use_p (insn, regno))
2099 reg_pressure_info[cl].change += 1;
2100 create_insn_reg_set (regno, insn);
2101 }
2102 gcc_assert (new_incr < (1 << INCREASE_BITS));
2103 }
2104 }
2105 regno++;
2106 }
2107 }
2108
2109 /* Update the register pressure info after birth of pseudo or hard
2110 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2111 correspondingly that the register is in clobber or unused after the
2112 insn. */
2113 static void
2114 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2115 {
2116 int regno;
2117
2118 if (GET_CODE (reg) == SUBREG)
2119 reg = SUBREG_REG (reg);
2120
2121 if (! REG_P (reg))
2122 return;
2123
2124 regno = REGNO (reg);
2125 if (regno < FIRST_PSEUDO_REGISTER)
2126 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2127 clobber_p, unused_p);
2128 else
2129 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2130 }
2131
2132 /* Update the register pressure info after death of pseudo register
2133 REGNO. */
2134 static void
2135 mark_pseudo_death (int regno)
2136 {
2137 int incr;
2138 enum reg_class cl;
2139
2140 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2141 cl = sched_regno_pressure_class[regno];
2142 if (cl != NO_REGS)
2143 {
2144 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2145 reg_pressure_info[cl].change -= incr;
2146 }
2147 }
2148
2149 /* Like mark_pseudo_death except that NREGS saying how many hard
2150 registers involved in the death. */
2151 static void
2152 mark_hard_regno_death (int regno, int nregs)
2153 {
2154 enum reg_class cl;
2155 int last = regno + nregs;
2156
2157 while (regno < last)
2158 {
2159 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2160 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2161 {
2162 cl = sched_regno_pressure_class[regno];
2163 if (cl != NO_REGS)
2164 reg_pressure_info[cl].change -= 1;
2165 }
2166 regno++;
2167 }
2168 }
2169
2170 /* Update the register pressure info after death of pseudo or hard
2171 register REG. */
2172 static void
2173 mark_reg_death (rtx reg)
2174 {
2175 int regno;
2176
2177 if (GET_CODE (reg) == SUBREG)
2178 reg = SUBREG_REG (reg);
2179
2180 if (! REG_P (reg))
2181 return;
2182
2183 regno = REGNO (reg);
2184 if (regno < FIRST_PSEUDO_REGISTER)
2185 mark_hard_regno_death (regno, REG_NREGS (reg));
2186 else
2187 mark_pseudo_death (regno);
2188 }
2189
2190 /* Process SETTER of REG. DATA is an insn containing the setter. */
2191 static void
2192 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2193 {
2194 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2195 return;
2196 mark_insn_reg_birth
2197 ((rtx) data, reg, false,
2198 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2199 }
2200
2201 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2202 static void
2203 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2204 {
2205 if (GET_CODE (setter) == CLOBBER)
2206 mark_insn_reg_birth ((rtx) data, reg, true, false);
2207 }
2208
2209 /* Set up reg pressure info related to INSN. */
2210 void
2211 init_insn_reg_pressure_info (rtx_insn *insn)
2212 {
2213 int i, len;
2214 enum reg_class cl;
2215 static struct reg_pressure_data *pressure_info;
2216 rtx link;
2217
2218 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2219
2220 if (! INSN_P (insn))
2221 return;
2222
2223 for (i = 0; i < ira_pressure_classes_num; i++)
2224 {
2225 cl = ira_pressure_classes[i];
2226 reg_pressure_info[cl].clobber_increase = 0;
2227 reg_pressure_info[cl].set_increase = 0;
2228 reg_pressure_info[cl].unused_set_increase = 0;
2229 reg_pressure_info[cl].change = 0;
2230 }
2231
2232 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2233
2234 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2235
2236 #ifdef AUTO_INC_DEC
2237 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2238 if (REG_NOTE_KIND (link) == REG_INC)
2239 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2240 #endif
2241
2242 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2243 if (REG_NOTE_KIND (link) == REG_DEAD)
2244 mark_reg_death (XEXP (link, 0));
2245
2246 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2247 pressure_info
2248 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2249 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2250 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2251 * sizeof (int), 1);
2252 for (i = 0; i < ira_pressure_classes_num; i++)
2253 {
2254 cl = ira_pressure_classes[i];
2255 pressure_info[i].clobber_increase
2256 = reg_pressure_info[cl].clobber_increase;
2257 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2258 pressure_info[i].unused_set_increase
2259 = reg_pressure_info[cl].unused_set_increase;
2260 pressure_info[i].change = reg_pressure_info[cl].change;
2261 }
2262 }
2263
2264
2265 \f
2266
2267 /* Internal variable for sched_analyze_[12] () functions.
2268 If it is nonzero, this means that sched_analyze_[12] looks
2269 at the most toplevel SET. */
2270 static bool can_start_lhs_rhs_p;
2271
2272 /* Extend reg info for the deps context DEPS given that
2273 we have just generated a register numbered REGNO. */
2274 static void
2275 extend_deps_reg_info (struct deps_desc *deps, int regno)
2276 {
2277 int max_regno = regno + 1;
2278
2279 gcc_assert (!reload_completed);
2280
2281 /* In a readonly context, it would not hurt to extend info,
2282 but it should not be needed. */
2283 if (reload_completed && deps->readonly)
2284 {
2285 deps->max_reg = max_regno;
2286 return;
2287 }
2288
2289 if (max_regno > deps->max_reg)
2290 {
2291 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2292 max_regno);
2293 memset (&deps->reg_last[deps->max_reg],
2294 0, (max_regno - deps->max_reg)
2295 * sizeof (struct deps_reg));
2296 deps->max_reg = max_regno;
2297 }
2298 }
2299
2300 /* Extends REG_INFO_P if needed. */
2301 void
2302 maybe_extend_reg_info_p (void)
2303 {
2304 /* Extend REG_INFO_P, if needed. */
2305 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2306 {
2307 size_t new_reg_info_p_size = max_regno + 128;
2308
2309 gcc_assert (!reload_completed && sel_sched_p ());
2310
2311 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2312 new_reg_info_p_size,
2313 reg_info_p_size,
2314 sizeof (*reg_info_p));
2315 reg_info_p_size = new_reg_info_p_size;
2316 }
2317 }
2318
2319 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2320 The type of the reference is specified by REF and can be SET,
2321 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2322
2323 static void
2324 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2325 enum rtx_code ref, rtx_insn *insn)
2326 {
2327 /* We could emit new pseudos in renaming. Extend the reg structures. */
2328 if (!reload_completed && sel_sched_p ()
2329 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2330 extend_deps_reg_info (deps, regno);
2331
2332 maybe_extend_reg_info_p ();
2333
2334 /* A hard reg in a wide mode may really be multiple registers.
2335 If so, mark all of them just like the first. */
2336 if (regno < FIRST_PSEUDO_REGISTER)
2337 {
2338 int i = hard_regno_nregs[regno][mode];
2339 if (ref == SET)
2340 {
2341 while (--i >= 0)
2342 note_reg_set (regno + i);
2343 }
2344 else if (ref == USE)
2345 {
2346 while (--i >= 0)
2347 note_reg_use (regno + i);
2348 }
2349 else
2350 {
2351 while (--i >= 0)
2352 note_reg_clobber (regno + i);
2353 }
2354 }
2355
2356 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2357 it does not reload. Ignore these as they have served their
2358 purpose already. */
2359 else if (regno >= deps->max_reg)
2360 {
2361 enum rtx_code code = GET_CODE (PATTERN (insn));
2362 gcc_assert (code == USE || code == CLOBBER);
2363 }
2364
2365 else
2366 {
2367 if (ref == SET)
2368 note_reg_set (regno);
2369 else if (ref == USE)
2370 note_reg_use (regno);
2371 else
2372 note_reg_clobber (regno);
2373
2374 /* Pseudos that are REG_EQUIV to something may be replaced
2375 by that during reloading. We need only add dependencies for
2376 the address in the REG_EQUIV note. */
2377 if (!reload_completed && get_reg_known_equiv_p (regno))
2378 {
2379 rtx t = get_reg_known_value (regno);
2380 if (MEM_P (t))
2381 sched_analyze_2 (deps, XEXP (t, 0), insn);
2382 }
2383
2384 /* Don't let it cross a call after scheduling if it doesn't
2385 already cross one. */
2386 if (REG_N_CALLS_CROSSED (regno) == 0)
2387 {
2388 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2389 deps->sched_before_next_call
2390 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2391 else
2392 add_dependence_list (insn, deps->last_function_call, 1,
2393 REG_DEP_ANTI, false);
2394 }
2395 }
2396 }
2397
2398 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2399 rtx, X, creating all dependencies generated by the write to the
2400 destination of X, and reads of everything mentioned. */
2401
2402 static void
2403 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2404 {
2405 rtx dest = XEXP (x, 0);
2406 enum rtx_code code = GET_CODE (x);
2407 bool cslr_p = can_start_lhs_rhs_p;
2408
2409 can_start_lhs_rhs_p = false;
2410
2411 gcc_assert (dest);
2412 if (dest == 0)
2413 return;
2414
2415 if (cslr_p && sched_deps_info->start_lhs)
2416 sched_deps_info->start_lhs (dest);
2417
2418 if (GET_CODE (dest) == PARALLEL)
2419 {
2420 int i;
2421
2422 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2423 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2424 sched_analyze_1 (deps,
2425 gen_rtx_CLOBBER (VOIDmode,
2426 XEXP (XVECEXP (dest, 0, i), 0)),
2427 insn);
2428
2429 if (cslr_p && sched_deps_info->finish_lhs)
2430 sched_deps_info->finish_lhs ();
2431
2432 if (code == SET)
2433 {
2434 can_start_lhs_rhs_p = cslr_p;
2435
2436 sched_analyze_2 (deps, SET_SRC (x), insn);
2437
2438 can_start_lhs_rhs_p = false;
2439 }
2440
2441 return;
2442 }
2443
2444 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2445 || GET_CODE (dest) == ZERO_EXTRACT)
2446 {
2447 if (GET_CODE (dest) == STRICT_LOW_PART
2448 || GET_CODE (dest) == ZERO_EXTRACT
2449 || df_read_modify_subreg_p (dest))
2450 {
2451 /* These both read and modify the result. We must handle
2452 them as writes to get proper dependencies for following
2453 instructions. We must handle them as reads to get proper
2454 dependencies from this to previous instructions.
2455 Thus we need to call sched_analyze_2. */
2456
2457 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2458 }
2459 if (GET_CODE (dest) == ZERO_EXTRACT)
2460 {
2461 /* The second and third arguments are values read by this insn. */
2462 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2463 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2464 }
2465 dest = XEXP (dest, 0);
2466 }
2467
2468 if (REG_P (dest))
2469 {
2470 int regno = REGNO (dest);
2471 machine_mode mode = GET_MODE (dest);
2472
2473 sched_analyze_reg (deps, regno, mode, code, insn);
2474
2475 #ifdef STACK_REGS
2476 /* Treat all writes to a stack register as modifying the TOS. */
2477 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2478 {
2479 /* Avoid analyzing the same register twice. */
2480 if (regno != FIRST_STACK_REG)
2481 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2482
2483 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2484 FIRST_STACK_REG);
2485 }
2486 #endif
2487 }
2488 else if (MEM_P (dest))
2489 {
2490 /* Writing memory. */
2491 rtx t = dest;
2492
2493 if (sched_deps_info->use_cselib)
2494 {
2495 machine_mode address_mode = get_address_mode (dest);
2496
2497 t = shallow_copy_rtx (dest);
2498 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2499 GET_MODE (t), insn);
2500 XEXP (t, 0)
2501 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2502 insn);
2503 }
2504 t = canon_rtx (t);
2505
2506 /* Pending lists can't get larger with a readonly context. */
2507 if (!deps->readonly
2508 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2509 >= MAX_PENDING_LIST_LENGTH))
2510 {
2511 /* Flush all pending reads and writes to prevent the pending lists
2512 from getting any larger. Insn scheduling runs too slowly when
2513 these lists get long. When compiling GCC with itself,
2514 this flush occurs 8 times for sparc, and 10 times for m88k using
2515 the default value of 32. */
2516 flush_pending_lists (deps, insn, false, true);
2517 }
2518 else
2519 {
2520 rtx_insn_list *pending;
2521 rtx_expr_list *pending_mem;
2522
2523 pending = deps->pending_read_insns;
2524 pending_mem = deps->pending_read_mems;
2525 while (pending)
2526 {
2527 if (anti_dependence (pending_mem->element (), t)
2528 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2529 note_mem_dep (t, pending_mem->element (), pending->insn (),
2530 DEP_ANTI);
2531
2532 pending = pending->next ();
2533 pending_mem = pending_mem->next ();
2534 }
2535
2536 pending = deps->pending_write_insns;
2537 pending_mem = deps->pending_write_mems;
2538 while (pending)
2539 {
2540 if (output_dependence (pending_mem->element (), t)
2541 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2542 note_mem_dep (t, pending_mem->element (),
2543 pending->insn (),
2544 DEP_OUTPUT);
2545
2546 pending = pending->next ();
2547 pending_mem = pending_mem-> next ();
2548 }
2549
2550 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2551 REG_DEP_ANTI, true);
2552 add_dependence_list (insn, deps->pending_jump_insns, 1,
2553 REG_DEP_CONTROL, true);
2554
2555 if (!deps->readonly)
2556 add_insn_mem_dependence (deps, false, insn, dest);
2557 }
2558 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2559 }
2560
2561 if (cslr_p && sched_deps_info->finish_lhs)
2562 sched_deps_info->finish_lhs ();
2563
2564 /* Analyze reads. */
2565 if (GET_CODE (x) == SET)
2566 {
2567 can_start_lhs_rhs_p = cslr_p;
2568
2569 sched_analyze_2 (deps, SET_SRC (x), insn);
2570
2571 can_start_lhs_rhs_p = false;
2572 }
2573 }
2574
2575 /* Analyze the uses of memory and registers in rtx X in INSN. */
2576 static void
2577 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2578 {
2579 int i;
2580 int j;
2581 enum rtx_code code;
2582 const char *fmt;
2583 bool cslr_p = can_start_lhs_rhs_p;
2584
2585 can_start_lhs_rhs_p = false;
2586
2587 gcc_assert (x);
2588 if (x == 0)
2589 return;
2590
2591 if (cslr_p && sched_deps_info->start_rhs)
2592 sched_deps_info->start_rhs (x);
2593
2594 code = GET_CODE (x);
2595
2596 switch (code)
2597 {
2598 CASE_CONST_ANY:
2599 case SYMBOL_REF:
2600 case CONST:
2601 case LABEL_REF:
2602 /* Ignore constants. */
2603 if (cslr_p && sched_deps_info->finish_rhs)
2604 sched_deps_info->finish_rhs ();
2605
2606 return;
2607
2608 case CC0:
2609 if (!HAVE_cc0)
2610 gcc_unreachable ();
2611
2612 /* User of CC0 depends on immediately preceding insn. */
2613 SCHED_GROUP_P (insn) = 1;
2614 /* Don't move CC0 setter to another block (it can set up the
2615 same flag for previous CC0 users which is safe). */
2616 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2617
2618 if (cslr_p && sched_deps_info->finish_rhs)
2619 sched_deps_info->finish_rhs ();
2620
2621 return;
2622
2623 case REG:
2624 {
2625 int regno = REGNO (x);
2626 machine_mode mode = GET_MODE (x);
2627
2628 sched_analyze_reg (deps, regno, mode, USE, insn);
2629
2630 #ifdef STACK_REGS
2631 /* Treat all reads of a stack register as modifying the TOS. */
2632 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2633 {
2634 /* Avoid analyzing the same register twice. */
2635 if (regno != FIRST_STACK_REG)
2636 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2637 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2638 }
2639 #endif
2640
2641 if (cslr_p && sched_deps_info->finish_rhs)
2642 sched_deps_info->finish_rhs ();
2643
2644 return;
2645 }
2646
2647 case MEM:
2648 {
2649 /* Reading memory. */
2650 rtx_insn_list *u;
2651 rtx_insn_list *pending;
2652 rtx_expr_list *pending_mem;
2653 rtx t = x;
2654
2655 if (sched_deps_info->use_cselib)
2656 {
2657 machine_mode address_mode = get_address_mode (t);
2658
2659 t = shallow_copy_rtx (t);
2660 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2661 GET_MODE (t), insn);
2662 XEXP (t, 0)
2663 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2664 insn);
2665 }
2666
2667 if (!DEBUG_INSN_P (insn))
2668 {
2669 t = canon_rtx (t);
2670 pending = deps->pending_read_insns;
2671 pending_mem = deps->pending_read_mems;
2672 while (pending)
2673 {
2674 if (read_dependence (pending_mem->element (), t)
2675 && ! sched_insns_conditions_mutex_p (insn,
2676 pending->insn ()))
2677 note_mem_dep (t, pending_mem->element (),
2678 pending->insn (),
2679 DEP_ANTI);
2680
2681 pending = pending->next ();
2682 pending_mem = pending_mem->next ();
2683 }
2684
2685 pending = deps->pending_write_insns;
2686 pending_mem = deps->pending_write_mems;
2687 while (pending)
2688 {
2689 if (true_dependence (pending_mem->element (), VOIDmode, t)
2690 && ! sched_insns_conditions_mutex_p (insn,
2691 pending->insn ()))
2692 note_mem_dep (t, pending_mem->element (),
2693 pending->insn (),
2694 sched_deps_info->generate_spec_deps
2695 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2696
2697 pending = pending->next ();
2698 pending_mem = pending_mem->next ();
2699 }
2700
2701 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2702 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2703
2704 for (u = deps->pending_jump_insns; u; u = u->next ())
2705 if (deps_may_trap_p (x))
2706 {
2707 if ((sched_deps_info->generate_spec_deps)
2708 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2709 {
2710 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2711 MAX_DEP_WEAK);
2712
2713 note_dep (u->insn (), ds);
2714 }
2715 else
2716 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2717 }
2718 }
2719
2720 /* Always add these dependencies to pending_reads, since
2721 this insn may be followed by a write. */
2722 if (!deps->readonly)
2723 {
2724 if ((deps->pending_read_list_length
2725 + deps->pending_write_list_length)
2726 >= MAX_PENDING_LIST_LENGTH
2727 && !DEBUG_INSN_P (insn))
2728 flush_pending_lists (deps, insn, true, true);
2729 add_insn_mem_dependence (deps, true, insn, x);
2730 }
2731
2732 sched_analyze_2 (deps, XEXP (x, 0), insn);
2733
2734 if (cslr_p && sched_deps_info->finish_rhs)
2735 sched_deps_info->finish_rhs ();
2736
2737 return;
2738 }
2739
2740 /* Force pending stores to memory in case a trap handler needs them. */
2741 case TRAP_IF:
2742 flush_pending_lists (deps, insn, true, false);
2743 break;
2744
2745 case PREFETCH:
2746 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2747 reg_pending_barrier = TRUE_BARRIER;
2748 /* Prefetch insn contains addresses only. So if the prefetch
2749 address has no registers, there will be no dependencies on
2750 the prefetch insn. This is wrong with result code
2751 correctness point of view as such prefetch can be moved below
2752 a jump insn which usually generates MOVE_BARRIER preventing
2753 to move insns containing registers or memories through the
2754 barrier. It is also wrong with generated code performance
2755 point of view as prefetch withouth dependecies will have a
2756 tendency to be issued later instead of earlier. It is hard
2757 to generate accurate dependencies for prefetch insns as
2758 prefetch has only the start address but it is better to have
2759 something than nothing. */
2760 if (!deps->readonly)
2761 {
2762 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2763 if (sched_deps_info->use_cselib)
2764 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2765 add_insn_mem_dependence (deps, true, insn, x);
2766 }
2767 break;
2768
2769 case UNSPEC_VOLATILE:
2770 flush_pending_lists (deps, insn, true, true);
2771 /* FALLTHRU */
2772
2773 case ASM_OPERANDS:
2774 case ASM_INPUT:
2775 {
2776 /* Traditional and volatile asm instructions must be considered to use
2777 and clobber all hard registers, all pseudo-registers and all of
2778 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2779
2780 Consider for instance a volatile asm that changes the fpu rounding
2781 mode. An insn should not be moved across this even if it only uses
2782 pseudo-regs because it might give an incorrectly rounded result. */
2783 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2784 && !DEBUG_INSN_P (insn))
2785 reg_pending_barrier = TRUE_BARRIER;
2786
2787 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2788 We can not just fall through here since then we would be confused
2789 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2790 traditional asms unlike their normal usage. */
2791
2792 if (code == ASM_OPERANDS)
2793 {
2794 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2795 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2796
2797 if (cslr_p && sched_deps_info->finish_rhs)
2798 sched_deps_info->finish_rhs ();
2799
2800 return;
2801 }
2802 break;
2803 }
2804
2805 case PRE_DEC:
2806 case POST_DEC:
2807 case PRE_INC:
2808 case POST_INC:
2809 /* These both read and modify the result. We must handle them as writes
2810 to get proper dependencies for following instructions. We must handle
2811 them as reads to get proper dependencies from this to previous
2812 instructions. Thus we need to pass them to both sched_analyze_1
2813 and sched_analyze_2. We must call sched_analyze_2 first in order
2814 to get the proper antecedent for the read. */
2815 sched_analyze_2 (deps, XEXP (x, 0), insn);
2816 sched_analyze_1 (deps, x, insn);
2817
2818 if (cslr_p && sched_deps_info->finish_rhs)
2819 sched_deps_info->finish_rhs ();
2820
2821 return;
2822
2823 case POST_MODIFY:
2824 case PRE_MODIFY:
2825 /* op0 = op0 + op1 */
2826 sched_analyze_2 (deps, XEXP (x, 0), insn);
2827 sched_analyze_2 (deps, XEXP (x, 1), insn);
2828 sched_analyze_1 (deps, x, insn);
2829
2830 if (cslr_p && sched_deps_info->finish_rhs)
2831 sched_deps_info->finish_rhs ();
2832
2833 return;
2834
2835 default:
2836 break;
2837 }
2838
2839 /* Other cases: walk the insn. */
2840 fmt = GET_RTX_FORMAT (code);
2841 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2842 {
2843 if (fmt[i] == 'e')
2844 sched_analyze_2 (deps, XEXP (x, i), insn);
2845 else if (fmt[i] == 'E')
2846 for (j = 0; j < XVECLEN (x, i); j++)
2847 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2848 }
2849
2850 if (cslr_p && sched_deps_info->finish_rhs)
2851 sched_deps_info->finish_rhs ();
2852 }
2853
2854 /* Try to group two fusible insns together to prevent scheduler
2855 from scheduling them apart. */
2856
2857 static void
2858 sched_macro_fuse_insns (rtx_insn *insn)
2859 {
2860 rtx_insn *prev;
2861
2862 if (any_condjump_p (insn))
2863 {
2864 unsigned int condreg1, condreg2;
2865 rtx cc_reg_1;
2866 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2867 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2868 prev = prev_nonnote_nondebug_insn (insn);
2869 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2870 || !prev
2871 || !modified_in_p (cc_reg_1, prev))
2872 return;
2873 }
2874 else
2875 {
2876 rtx insn_set = single_set (insn);
2877
2878 prev = prev_nonnote_nondebug_insn (insn);
2879 if (!prev
2880 || !insn_set
2881 || !single_set (prev))
2882 return;
2883
2884 }
2885
2886 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2887 SCHED_GROUP_P (insn) = 1;
2888
2889 }
2890
2891 /* Analyze an INSN with pattern X to find all dependencies. */
2892 static void
2893 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2894 {
2895 RTX_CODE code = GET_CODE (x);
2896 rtx link;
2897 unsigned i;
2898 reg_set_iterator rsi;
2899
2900 if (! reload_completed)
2901 {
2902 HARD_REG_SET temp;
2903
2904 extract_insn (insn);
2905 preprocess_constraints (insn);
2906 ira_implicitly_set_insn_hard_regs (&temp);
2907 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2908 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2909 }
2910
2911 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2912 && code == SET);
2913
2914 /* Group compare and branch insns for macro-fusion. */
2915 if (targetm.sched.macro_fusion_p
2916 && targetm.sched.macro_fusion_p ())
2917 sched_macro_fuse_insns (insn);
2918
2919 if (may_trap_p (x))
2920 /* Avoid moving trapping instructions across function calls that might
2921 not always return. */
2922 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2923 1, REG_DEP_ANTI, true);
2924
2925 /* We must avoid creating a situation in which two successors of the
2926 current block have different unwind info after scheduling. If at any
2927 point the two paths re-join this leads to incorrect unwind info. */
2928 /* ??? There are certain situations involving a forced frame pointer in
2929 which, with extra effort, we could fix up the unwind info at a later
2930 CFG join. However, it seems better to notice these cases earlier
2931 during prologue generation and avoid marking the frame pointer setup
2932 as frame-related at all. */
2933 if (RTX_FRAME_RELATED_P (insn))
2934 {
2935 /* Make sure prologue insn is scheduled before next jump. */
2936 deps->sched_before_next_jump
2937 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2938
2939 /* Make sure epilogue insn is scheduled after preceding jumps. */
2940 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2941 true);
2942 }
2943
2944 if (code == COND_EXEC)
2945 {
2946 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2947
2948 /* ??? Should be recording conditions so we reduce the number of
2949 false dependencies. */
2950 x = COND_EXEC_CODE (x);
2951 code = GET_CODE (x);
2952 }
2953 if (code == SET || code == CLOBBER)
2954 {
2955 sched_analyze_1 (deps, x, insn);
2956
2957 /* Bare clobber insns are used for letting life analysis, reg-stack
2958 and others know that a value is dead. Depend on the last call
2959 instruction so that reg-stack won't get confused. */
2960 if (code == CLOBBER)
2961 add_dependence_list (insn, deps->last_function_call, 1,
2962 REG_DEP_OUTPUT, true);
2963 }
2964 else if (code == PARALLEL)
2965 {
2966 for (i = XVECLEN (x, 0); i--;)
2967 {
2968 rtx sub = XVECEXP (x, 0, i);
2969 code = GET_CODE (sub);
2970
2971 if (code == COND_EXEC)
2972 {
2973 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2974 sub = COND_EXEC_CODE (sub);
2975 code = GET_CODE (sub);
2976 }
2977 if (code == SET || code == CLOBBER)
2978 sched_analyze_1 (deps, sub, insn);
2979 else
2980 sched_analyze_2 (deps, sub, insn);
2981 }
2982 }
2983 else
2984 sched_analyze_2 (deps, x, insn);
2985
2986 /* Mark registers CLOBBERED or used by called function. */
2987 if (CALL_P (insn))
2988 {
2989 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2990 {
2991 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2992 sched_analyze_1 (deps, XEXP (link, 0), insn);
2993 else if (GET_CODE (XEXP (link, 0)) != SET)
2994 sched_analyze_2 (deps, XEXP (link, 0), insn);
2995 }
2996 /* Don't schedule anything after a tail call, tail call needs
2997 to use at least all call-saved registers. */
2998 if (SIBLING_CALL_P (insn))
2999 reg_pending_barrier = TRUE_BARRIER;
3000 else if (find_reg_note (insn, REG_SETJMP, NULL))
3001 reg_pending_barrier = MOVE_BARRIER;
3002 }
3003
3004 if (JUMP_P (insn))
3005 {
3006 rtx next;
3007 next = next_nonnote_nondebug_insn (insn);
3008 if (next && BARRIER_P (next))
3009 reg_pending_barrier = MOVE_BARRIER;
3010 else
3011 {
3012 rtx_insn_list *pending;
3013 rtx_expr_list *pending_mem;
3014
3015 if (sched_deps_info->compute_jump_reg_dependencies)
3016 {
3017 (*sched_deps_info->compute_jump_reg_dependencies)
3018 (insn, reg_pending_control_uses);
3019
3020 /* Make latency of jump equal to 0 by using anti-dependence. */
3021 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3022 {
3023 struct deps_reg *reg_last = &deps->reg_last[i];
3024 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3025 false);
3026 add_dependence_list (insn, reg_last->implicit_sets,
3027 0, REG_DEP_ANTI, false);
3028 add_dependence_list (insn, reg_last->clobbers, 0,
3029 REG_DEP_ANTI, false);
3030 }
3031 }
3032
3033 /* All memory writes and volatile reads must happen before the
3034 jump. Non-volatile reads must happen before the jump iff
3035 the result is needed by the above register used mask. */
3036
3037 pending = deps->pending_write_insns;
3038 pending_mem = deps->pending_write_mems;
3039 while (pending)
3040 {
3041 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3042 add_dependence (insn, pending->insn (),
3043 REG_DEP_OUTPUT);
3044 pending = pending->next ();
3045 pending_mem = pending_mem->next ();
3046 }
3047
3048 pending = deps->pending_read_insns;
3049 pending_mem = deps->pending_read_mems;
3050 while (pending)
3051 {
3052 if (MEM_VOLATILE_P (pending_mem->element ())
3053 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3054 add_dependence (insn, pending->insn (),
3055 REG_DEP_OUTPUT);
3056 pending = pending->next ();
3057 pending_mem = pending_mem->next ();
3058 }
3059
3060 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3061 REG_DEP_ANTI, true);
3062 add_dependence_list (insn, deps->pending_jump_insns, 1,
3063 REG_DEP_ANTI, true);
3064 }
3065 }
3066
3067 /* If this instruction can throw an exception, then moving it changes
3068 where block boundaries fall. This is mighty confusing elsewhere.
3069 Therefore, prevent such an instruction from being moved. Same for
3070 non-jump instructions that define block boundaries.
3071 ??? Unclear whether this is still necessary in EBB mode. If not,
3072 add_branch_dependences should be adjusted for RGN mode instead. */
3073 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3074 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3075 reg_pending_barrier = MOVE_BARRIER;
3076
3077 if (sched_pressure != SCHED_PRESSURE_NONE)
3078 {
3079 setup_insn_reg_uses (deps, insn);
3080 init_insn_reg_pressure_info (insn);
3081 }
3082
3083 /* Add register dependencies for insn. */
3084 if (DEBUG_INSN_P (insn))
3085 {
3086 rtx_insn *prev = deps->last_debug_insn;
3087 rtx_insn_list *u;
3088
3089 if (!deps->readonly)
3090 deps->last_debug_insn = insn;
3091
3092 if (prev)
3093 add_dependence (insn, prev, REG_DEP_ANTI);
3094
3095 add_dependence_list (insn, deps->last_function_call, 1,
3096 REG_DEP_ANTI, false);
3097
3098 if (!sel_sched_p ())
3099 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3100 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3101
3102 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3103 {
3104 struct deps_reg *reg_last = &deps->reg_last[i];
3105 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3106 /* There's no point in making REG_DEP_CONTROL dependencies for
3107 debug insns. */
3108 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3109 false);
3110
3111 if (!deps->readonly)
3112 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3113 }
3114 CLEAR_REG_SET (reg_pending_uses);
3115
3116 /* Quite often, a debug insn will refer to stuff in the
3117 previous instruction, but the reason we want this
3118 dependency here is to make sure the scheduler doesn't
3119 gratuitously move a debug insn ahead. This could dirty
3120 DF flags and cause additional analysis that wouldn't have
3121 occurred in compilation without debug insns, and such
3122 additional analysis can modify the generated code. */
3123 prev = PREV_INSN (insn);
3124
3125 if (prev && NONDEBUG_INSN_P (prev))
3126 add_dependence (insn, prev, REG_DEP_ANTI);
3127 }
3128 else
3129 {
3130 regset_head set_or_clobbered;
3131
3132 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3133 {
3134 struct deps_reg *reg_last = &deps->reg_last[i];
3135 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3136 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3137 false);
3138 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3139 false);
3140
3141 if (!deps->readonly)
3142 {
3143 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3144 reg_last->uses_length++;
3145 }
3146 }
3147
3148 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3149 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3150 {
3151 struct deps_reg *reg_last = &deps->reg_last[i];
3152 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3153 add_dependence_list (insn, reg_last->implicit_sets, 0,
3154 REG_DEP_ANTI, false);
3155 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3156 false);
3157
3158 if (!deps->readonly)
3159 {
3160 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3161 reg_last->uses_length++;
3162 }
3163 }
3164
3165 if (targetm.sched.exposed_pipeline)
3166 {
3167 INIT_REG_SET (&set_or_clobbered);
3168 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3169 reg_pending_sets);
3170 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3171 {
3172 struct deps_reg *reg_last = &deps->reg_last[i];
3173 rtx list;
3174 for (list = reg_last->uses; list; list = XEXP (list, 1))
3175 {
3176 rtx other = XEXP (list, 0);
3177 if (INSN_CACHED_COND (other) != const_true_rtx
3178 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3179 INSN_CACHED_COND (other) = const_true_rtx;
3180 }
3181 }
3182 }
3183
3184 /* If the current insn is conditional, we can't free any
3185 of the lists. */
3186 if (sched_has_condition_p (insn))
3187 {
3188 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3189 {
3190 struct deps_reg *reg_last = &deps->reg_last[i];
3191 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3192 false);
3193 add_dependence_list (insn, reg_last->implicit_sets, 0,
3194 REG_DEP_ANTI, false);
3195 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3196 false);
3197 add_dependence_list (insn, reg_last->control_uses, 0,
3198 REG_DEP_CONTROL, false);
3199
3200 if (!deps->readonly)
3201 {
3202 reg_last->clobbers
3203 = alloc_INSN_LIST (insn, reg_last->clobbers);
3204 reg_last->clobbers_length++;
3205 }
3206 }
3207 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3208 {
3209 struct deps_reg *reg_last = &deps->reg_last[i];
3210 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3211 false);
3212 add_dependence_list (insn, reg_last->implicit_sets, 0,
3213 REG_DEP_ANTI, false);
3214 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3215 false);
3216 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3217 false);
3218 add_dependence_list (insn, reg_last->control_uses, 0,
3219 REG_DEP_CONTROL, false);
3220
3221 if (!deps->readonly)
3222 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3223 }
3224 }
3225 else
3226 {
3227 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3228 {
3229 struct deps_reg *reg_last = &deps->reg_last[i];
3230 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3231 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3232 {
3233 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3234 REG_DEP_OUTPUT, false);
3235 add_dependence_list_and_free (deps, insn,
3236 &reg_last->implicit_sets, 0,
3237 REG_DEP_ANTI, false);
3238 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3239 REG_DEP_ANTI, false);
3240 add_dependence_list_and_free (deps, insn,
3241 &reg_last->control_uses, 0,
3242 REG_DEP_ANTI, false);
3243 add_dependence_list_and_free (deps, insn,
3244 &reg_last->clobbers, 0,
3245 REG_DEP_OUTPUT, false);
3246
3247 if (!deps->readonly)
3248 {
3249 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3250 reg_last->clobbers_length = 0;
3251 reg_last->uses_length = 0;
3252 }
3253 }
3254 else
3255 {
3256 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3257 false);
3258 add_dependence_list (insn, reg_last->implicit_sets, 0,
3259 REG_DEP_ANTI, false);
3260 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3261 false);
3262 add_dependence_list (insn, reg_last->control_uses, 0,
3263 REG_DEP_CONTROL, false);
3264 }
3265
3266 if (!deps->readonly)
3267 {
3268 reg_last->clobbers_length++;
3269 reg_last->clobbers
3270 = alloc_INSN_LIST (insn, reg_last->clobbers);
3271 }
3272 }
3273 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3274 {
3275 struct deps_reg *reg_last = &deps->reg_last[i];
3276
3277 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3278 REG_DEP_OUTPUT, false);
3279 add_dependence_list_and_free (deps, insn,
3280 &reg_last->implicit_sets,
3281 0, REG_DEP_ANTI, false);
3282 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3283 REG_DEP_OUTPUT, false);
3284 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3285 REG_DEP_ANTI, false);
3286 add_dependence_list (insn, reg_last->control_uses, 0,
3287 REG_DEP_CONTROL, false);
3288
3289 if (!deps->readonly)
3290 {
3291 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3292 reg_last->uses_length = 0;
3293 reg_last->clobbers_length = 0;
3294 }
3295 }
3296 }
3297 if (!deps->readonly)
3298 {
3299 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3300 {
3301 struct deps_reg *reg_last = &deps->reg_last[i];
3302 reg_last->control_uses
3303 = alloc_INSN_LIST (insn, reg_last->control_uses);
3304 }
3305 }
3306 }
3307
3308 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3309 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3310 {
3311 struct deps_reg *reg_last = &deps->reg_last[i];
3312 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3313 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3314 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3315 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3316 false);
3317
3318 if (!deps->readonly)
3319 reg_last->implicit_sets
3320 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3321 }
3322
3323 if (!deps->readonly)
3324 {
3325 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3326 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3327 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3328 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3329 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3330 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3331 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3332
3333 /* Set up the pending barrier found. */
3334 deps->last_reg_pending_barrier = reg_pending_barrier;
3335 }
3336
3337 CLEAR_REG_SET (reg_pending_uses);
3338 CLEAR_REG_SET (reg_pending_clobbers);
3339 CLEAR_REG_SET (reg_pending_sets);
3340 CLEAR_REG_SET (reg_pending_control_uses);
3341 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3342 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3343
3344 /* Add dependencies if a scheduling barrier was found. */
3345 if (reg_pending_barrier)
3346 {
3347 /* In the case of barrier the most added dependencies are not
3348 real, so we use anti-dependence here. */
3349 if (sched_has_condition_p (insn))
3350 {
3351 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3352 {
3353 struct deps_reg *reg_last = &deps->reg_last[i];
3354 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3355 true);
3356 add_dependence_list (insn, reg_last->sets, 0,
3357 reg_pending_barrier == TRUE_BARRIER
3358 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3359 add_dependence_list (insn, reg_last->implicit_sets, 0,
3360 REG_DEP_ANTI, true);
3361 add_dependence_list (insn, reg_last->clobbers, 0,
3362 reg_pending_barrier == TRUE_BARRIER
3363 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3364 }
3365 }
3366 else
3367 {
3368 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3369 {
3370 struct deps_reg *reg_last = &deps->reg_last[i];
3371 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3372 REG_DEP_ANTI, true);
3373 add_dependence_list_and_free (deps, insn,
3374 &reg_last->control_uses, 0,
3375 REG_DEP_CONTROL, true);
3376 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3377 reg_pending_barrier == TRUE_BARRIER
3378 ? REG_DEP_TRUE : REG_DEP_ANTI,
3379 true);
3380 add_dependence_list_and_free (deps, insn,
3381 &reg_last->implicit_sets, 0,
3382 REG_DEP_ANTI, true);
3383 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3384 reg_pending_barrier == TRUE_BARRIER
3385 ? REG_DEP_TRUE : REG_DEP_ANTI,
3386 true);
3387
3388 if (!deps->readonly)
3389 {
3390 reg_last->uses_length = 0;
3391 reg_last->clobbers_length = 0;
3392 }
3393 }
3394 }
3395
3396 if (!deps->readonly)
3397 for (i = 0; i < (unsigned)deps->max_reg; i++)
3398 {
3399 struct deps_reg *reg_last = &deps->reg_last[i];
3400 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3401 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3402 }
3403
3404 /* Don't flush pending lists on speculative checks for
3405 selective scheduling. */
3406 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3407 flush_pending_lists (deps, insn, true, true);
3408
3409 reg_pending_barrier = NOT_A_BARRIER;
3410 }
3411
3412 /* If a post-call group is still open, see if it should remain so.
3413 This insn must be a simple move of a hard reg to a pseudo or
3414 vice-versa.
3415
3416 We must avoid moving these insns for correctness on targets
3417 with small register classes, and for special registers like
3418 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3419 hard regs for all targets. */
3420
3421 if (deps->in_post_call_group_p)
3422 {
3423 rtx tmp, set = single_set (insn);
3424 int src_regno, dest_regno;
3425
3426 if (set == NULL)
3427 {
3428 if (DEBUG_INSN_P (insn))
3429 /* We don't want to mark debug insns as part of the same
3430 sched group. We know they really aren't, but if we use
3431 debug insns to tell that a call group is over, we'll
3432 get different code if debug insns are not there and
3433 instructions that follow seem like they should be part
3434 of the call group.
3435
3436 Also, if we did, chain_to_prev_insn would move the
3437 deps of the debug insn to the call insn, modifying
3438 non-debug post-dependency counts of the debug insn
3439 dependencies and otherwise messing with the scheduling
3440 order.
3441
3442 Instead, let such debug insns be scheduled freely, but
3443 keep the call group open in case there are insns that
3444 should be part of it afterwards. Since we grant debug
3445 insns higher priority than even sched group insns, it
3446 will all turn out all right. */
3447 goto debug_dont_end_call_group;
3448 else
3449 goto end_call_group;
3450 }
3451
3452 tmp = SET_DEST (set);
3453 if (GET_CODE (tmp) == SUBREG)
3454 tmp = SUBREG_REG (tmp);
3455 if (REG_P (tmp))
3456 dest_regno = REGNO (tmp);
3457 else
3458 goto end_call_group;
3459
3460 tmp = SET_SRC (set);
3461 if (GET_CODE (tmp) == SUBREG)
3462 tmp = SUBREG_REG (tmp);
3463 if ((GET_CODE (tmp) == PLUS
3464 || GET_CODE (tmp) == MINUS)
3465 && REG_P (XEXP (tmp, 0))
3466 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3467 && dest_regno == STACK_POINTER_REGNUM)
3468 src_regno = STACK_POINTER_REGNUM;
3469 else if (REG_P (tmp))
3470 src_regno = REGNO (tmp);
3471 else
3472 goto end_call_group;
3473
3474 if (src_regno < FIRST_PSEUDO_REGISTER
3475 || dest_regno < FIRST_PSEUDO_REGISTER)
3476 {
3477 if (!deps->readonly
3478 && deps->in_post_call_group_p == post_call_initial)
3479 deps->in_post_call_group_p = post_call;
3480
3481 if (!sel_sched_p () || sched_emulate_haifa_p)
3482 {
3483 SCHED_GROUP_P (insn) = 1;
3484 CANT_MOVE (insn) = 1;
3485 }
3486 }
3487 else
3488 {
3489 end_call_group:
3490 if (!deps->readonly)
3491 deps->in_post_call_group_p = not_post_call;
3492 }
3493 }
3494
3495 debug_dont_end_call_group:
3496 if ((current_sched_info->flags & DO_SPECULATION)
3497 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3498 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3499 be speculated. */
3500 {
3501 if (sel_sched_p ())
3502 sel_mark_hard_insn (insn);
3503 else
3504 {
3505 sd_iterator_def sd_it;
3506 dep_t dep;
3507
3508 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3509 sd_iterator_cond (&sd_it, &dep);)
3510 change_spec_dep_to_hard (sd_it);
3511 }
3512 }
3513
3514 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3515 honor their original ordering. */
3516 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3517 {
3518 if (deps->last_args_size)
3519 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3520 deps->last_args_size = insn;
3521 }
3522 }
3523
3524 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3525 longjmp, loop forever, ...). */
3526 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3527 test for ECF_NORETURN? */
3528 static bool
3529 call_may_noreturn_p (rtx_insn *insn)
3530 {
3531 rtx call;
3532
3533 /* const or pure calls that aren't looping will always return. */
3534 if (RTL_CONST_OR_PURE_CALL_P (insn)
3535 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3536 return false;
3537
3538 call = get_call_rtx_from (insn);
3539 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3540 {
3541 rtx symbol = XEXP (XEXP (call, 0), 0);
3542 if (SYMBOL_REF_DECL (symbol)
3543 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3544 {
3545 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3546 == BUILT_IN_NORMAL)
3547 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3548 {
3549 case BUILT_IN_BCMP:
3550 case BUILT_IN_BCOPY:
3551 case BUILT_IN_BZERO:
3552 case BUILT_IN_INDEX:
3553 case BUILT_IN_MEMCHR:
3554 case BUILT_IN_MEMCMP:
3555 case BUILT_IN_MEMCPY:
3556 case BUILT_IN_MEMMOVE:
3557 case BUILT_IN_MEMPCPY:
3558 case BUILT_IN_MEMSET:
3559 case BUILT_IN_RINDEX:
3560 case BUILT_IN_STPCPY:
3561 case BUILT_IN_STPNCPY:
3562 case BUILT_IN_STRCAT:
3563 case BUILT_IN_STRCHR:
3564 case BUILT_IN_STRCMP:
3565 case BUILT_IN_STRCPY:
3566 case BUILT_IN_STRCSPN:
3567 case BUILT_IN_STRLEN:
3568 case BUILT_IN_STRNCAT:
3569 case BUILT_IN_STRNCMP:
3570 case BUILT_IN_STRNCPY:
3571 case BUILT_IN_STRPBRK:
3572 case BUILT_IN_STRRCHR:
3573 case BUILT_IN_STRSPN:
3574 case BUILT_IN_STRSTR:
3575 /* Assume certain string/memory builtins always return. */
3576 return false;
3577 default:
3578 break;
3579 }
3580 }
3581 }
3582
3583 /* For all other calls assume that they might not always return. */
3584 return true;
3585 }
3586
3587 /* Return true if INSN should be made dependent on the previous instruction
3588 group, and if all INSN's dependencies should be moved to the first
3589 instruction of that group. */
3590
3591 static bool
3592 chain_to_prev_insn_p (rtx_insn *insn)
3593 {
3594 rtx prev, x;
3595
3596 /* INSN forms a group with the previous instruction. */
3597 if (SCHED_GROUP_P (insn))
3598 return true;
3599
3600 /* If the previous instruction clobbers a register R and this one sets
3601 part of R, the clobber was added specifically to help us track the
3602 liveness of R. There's no point scheduling the clobber and leaving
3603 INSN behind, especially if we move the clobber to another block. */
3604 prev = prev_nonnote_nondebug_insn (insn);
3605 if (prev
3606 && INSN_P (prev)
3607 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3608 && GET_CODE (PATTERN (prev)) == CLOBBER)
3609 {
3610 x = XEXP (PATTERN (prev), 0);
3611 if (set_of (x, insn))
3612 return true;
3613 }
3614
3615 return false;
3616 }
3617
3618 /* Analyze INSN with DEPS as a context. */
3619 void
3620 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3621 {
3622 if (sched_deps_info->start_insn)
3623 sched_deps_info->start_insn (insn);
3624
3625 /* Record the condition for this insn. */
3626 if (NONDEBUG_INSN_P (insn))
3627 {
3628 rtx t;
3629 sched_get_condition_with_rev (insn, NULL);
3630 t = INSN_CACHED_COND (insn);
3631 INSN_COND_DEPS (insn) = NULL;
3632 if (reload_completed
3633 && (current_sched_info->flags & DO_PREDICATION)
3634 && COMPARISON_P (t)
3635 && REG_P (XEXP (t, 0))
3636 && CONSTANT_P (XEXP (t, 1)))
3637 {
3638 unsigned int regno;
3639 int nregs;
3640 rtx_insn_list *cond_deps = NULL;
3641 t = XEXP (t, 0);
3642 regno = REGNO (t);
3643 nregs = REG_NREGS (t);
3644 while (nregs-- > 0)
3645 {
3646 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3647 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3648 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3649 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3650 }
3651 INSN_COND_DEPS (insn) = cond_deps;
3652 }
3653 }
3654
3655 if (JUMP_P (insn))
3656 {
3657 /* Make each JUMP_INSN (but not a speculative check)
3658 a scheduling barrier for memory references. */
3659 if (!deps->readonly
3660 && !(sel_sched_p ()
3661 && sel_insn_is_speculation_check (insn)))
3662 {
3663 /* Keep the list a reasonable size. */
3664 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3665 flush_pending_lists (deps, insn, true, true);
3666 else
3667 deps->pending_jump_insns
3668 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3669 }
3670
3671 /* For each insn which shouldn't cross a jump, add a dependence. */
3672 add_dependence_list_and_free (deps, insn,
3673 &deps->sched_before_next_jump, 1,
3674 REG_DEP_ANTI, true);
3675
3676 sched_analyze_insn (deps, PATTERN (insn), insn);
3677 }
3678 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3679 {
3680 sched_analyze_insn (deps, PATTERN (insn), insn);
3681 }
3682 else if (CALL_P (insn))
3683 {
3684 int i;
3685
3686 CANT_MOVE (insn) = 1;
3687
3688 if (find_reg_note (insn, REG_SETJMP, NULL))
3689 {
3690 /* This is setjmp. Assume that all registers, not just
3691 hard registers, may be clobbered by this call. */
3692 reg_pending_barrier = MOVE_BARRIER;
3693 }
3694 else
3695 {
3696 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3697 /* A call may read and modify global register variables. */
3698 if (global_regs[i])
3699 {
3700 SET_REGNO_REG_SET (reg_pending_sets, i);
3701 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3702 }
3703 /* Other call-clobbered hard regs may be clobbered.
3704 Since we only have a choice between 'might be clobbered'
3705 and 'definitely not clobbered', we must include all
3706 partly call-clobbered registers here. */
3707 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3708 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3709 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3710 /* We don't know what set of fixed registers might be used
3711 by the function, but it is certain that the stack pointer
3712 is among them, but be conservative. */
3713 else if (fixed_regs[i])
3714 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3715 /* The frame pointer is normally not used by the function
3716 itself, but by the debugger. */
3717 /* ??? MIPS o32 is an exception. It uses the frame pointer
3718 in the macro expansion of jal but does not represent this
3719 fact in the call_insn rtl. */
3720 else if (i == FRAME_POINTER_REGNUM
3721 || (i == HARD_FRAME_POINTER_REGNUM
3722 && (! reload_completed || frame_pointer_needed)))
3723 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3724 }
3725
3726 /* For each insn which shouldn't cross a call, add a dependence
3727 between that insn and this call insn. */
3728 add_dependence_list_and_free (deps, insn,
3729 &deps->sched_before_next_call, 1,
3730 REG_DEP_ANTI, true);
3731
3732 sched_analyze_insn (deps, PATTERN (insn), insn);
3733
3734 /* If CALL would be in a sched group, then this will violate
3735 convention that sched group insns have dependencies only on the
3736 previous instruction.
3737
3738 Of course one can say: "Hey! What about head of the sched group?"
3739 And I will answer: "Basic principles (one dep per insn) are always
3740 the same." */
3741 gcc_assert (!SCHED_GROUP_P (insn));
3742
3743 /* In the absence of interprocedural alias analysis, we must flush
3744 all pending reads and writes, and start new dependencies starting
3745 from here. But only flush writes for constant calls (which may
3746 be passed a pointer to something we haven't written yet). */
3747 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3748
3749 if (!deps->readonly)
3750 {
3751 /* Remember the last function call for limiting lifetimes. */
3752 free_INSN_LIST_list (&deps->last_function_call);
3753 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3754
3755 if (call_may_noreturn_p (insn))
3756 {
3757 /* Remember the last function call that might not always return
3758 normally for limiting moves of trapping insns. */
3759 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3760 deps->last_function_call_may_noreturn
3761 = alloc_INSN_LIST (insn, NULL_RTX);
3762 }
3763
3764 /* Before reload, begin a post-call group, so as to keep the
3765 lifetimes of hard registers correct. */
3766 if (! reload_completed)
3767 deps->in_post_call_group_p = post_call;
3768 }
3769 }
3770
3771 if (sched_deps_info->use_cselib)
3772 cselib_process_insn (insn);
3773
3774 if (sched_deps_info->finish_insn)
3775 sched_deps_info->finish_insn ();
3776
3777 /* Fixup the dependencies in the sched group. */
3778 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3779 && chain_to_prev_insn_p (insn)
3780 && !sel_sched_p ())
3781 chain_to_prev_insn (insn);
3782 }
3783
3784 /* Initialize DEPS for the new block beginning with HEAD. */
3785 void
3786 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3787 {
3788 gcc_assert (!deps->readonly);
3789
3790 /* Before reload, if the previous block ended in a call, show that
3791 we are inside a post-call group, so as to keep the lifetimes of
3792 hard registers correct. */
3793 if (! reload_completed && !LABEL_P (head))
3794 {
3795 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3796
3797 if (insn && CALL_P (insn))
3798 deps->in_post_call_group_p = post_call_initial;
3799 }
3800 }
3801
3802 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3803 dependencies for each insn. */
3804 void
3805 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3806 {
3807 rtx_insn *insn;
3808
3809 if (sched_deps_info->use_cselib)
3810 cselib_init (CSELIB_RECORD_MEMORY);
3811
3812 deps_start_bb (deps, head);
3813
3814 for (insn = head;; insn = NEXT_INSN (insn))
3815 {
3816
3817 if (INSN_P (insn))
3818 {
3819 /* And initialize deps_lists. */
3820 sd_init_insn (insn);
3821 /* Clean up SCHED_GROUP_P which may be set by last
3822 scheduler pass. */
3823 if (SCHED_GROUP_P (insn))
3824 SCHED_GROUP_P (insn) = 0;
3825 }
3826
3827 deps_analyze_insn (deps, insn);
3828
3829 if (insn == tail)
3830 {
3831 if (sched_deps_info->use_cselib)
3832 cselib_finish ();
3833 return;
3834 }
3835 }
3836 gcc_unreachable ();
3837 }
3838
3839 /* Helper for sched_free_deps ().
3840 Delete INSN's (RESOLVED_P) backward dependencies. */
3841 static void
3842 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3843 {
3844 sd_iterator_def sd_it;
3845 dep_t dep;
3846 sd_list_types_def types;
3847
3848 if (resolved_p)
3849 types = SD_LIST_RES_BACK;
3850 else
3851 types = SD_LIST_BACK;
3852
3853 for (sd_it = sd_iterator_start (insn, types);
3854 sd_iterator_cond (&sd_it, &dep);)
3855 {
3856 dep_link_t link = *sd_it.linkp;
3857 dep_node_t node = DEP_LINK_NODE (link);
3858 deps_list_t back_list;
3859 deps_list_t forw_list;
3860
3861 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3862 remove_from_deps_list (link, back_list);
3863 delete_dep_node (node);
3864 }
3865 }
3866
3867 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3868 deps_lists. */
3869 void
3870 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3871 {
3872 rtx_insn *insn;
3873 rtx_insn *next_tail = NEXT_INSN (tail);
3874
3875 /* We make two passes since some insns may be scheduled before their
3876 dependencies are resolved. */
3877 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3878 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3879 {
3880 /* Clear forward deps and leave the dep_nodes to the
3881 corresponding back_deps list. */
3882 if (resolved_p)
3883 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3884 else
3885 clear_deps_list (INSN_FORW_DEPS (insn));
3886 }
3887 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3888 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3889 {
3890 /* Clear resolved back deps together with its dep_nodes. */
3891 delete_dep_nodes_in_back_deps (insn, resolved_p);
3892
3893 sd_finish_insn (insn);
3894 }
3895 }
3896 \f
3897 /* Initialize variables for region data dependence analysis.
3898 When LAZY_REG_LAST is true, do not allocate reg_last array
3899 of struct deps_desc immediately. */
3900
3901 void
3902 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3903 {
3904 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3905
3906 deps->max_reg = max_reg;
3907 if (lazy_reg_last)
3908 deps->reg_last = NULL;
3909 else
3910 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3911 INIT_REG_SET (&deps->reg_last_in_use);
3912
3913 deps->pending_read_insns = 0;
3914 deps->pending_read_mems = 0;
3915 deps->pending_write_insns = 0;
3916 deps->pending_write_mems = 0;
3917 deps->pending_jump_insns = 0;
3918 deps->pending_read_list_length = 0;
3919 deps->pending_write_list_length = 0;
3920 deps->pending_flush_length = 0;
3921 deps->last_pending_memory_flush = 0;
3922 deps->last_function_call = 0;
3923 deps->last_function_call_may_noreturn = 0;
3924 deps->sched_before_next_call = 0;
3925 deps->sched_before_next_jump = 0;
3926 deps->in_post_call_group_p = not_post_call;
3927 deps->last_debug_insn = 0;
3928 deps->last_args_size = 0;
3929 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3930 deps->readonly = 0;
3931 }
3932
3933 /* Init only reg_last field of DEPS, which was not allocated before as
3934 we inited DEPS lazily. */
3935 void
3936 init_deps_reg_last (struct deps_desc *deps)
3937 {
3938 gcc_assert (deps && deps->max_reg > 0);
3939 gcc_assert (deps->reg_last == NULL);
3940
3941 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3942 }
3943
3944
3945 /* Free insn lists found in DEPS. */
3946
3947 void
3948 free_deps (struct deps_desc *deps)
3949 {
3950 unsigned i;
3951 reg_set_iterator rsi;
3952
3953 /* We set max_reg to 0 when this context was already freed. */
3954 if (deps->max_reg == 0)
3955 {
3956 gcc_assert (deps->reg_last == NULL);
3957 return;
3958 }
3959 deps->max_reg = 0;
3960
3961 free_INSN_LIST_list (&deps->pending_read_insns);
3962 free_EXPR_LIST_list (&deps->pending_read_mems);
3963 free_INSN_LIST_list (&deps->pending_write_insns);
3964 free_EXPR_LIST_list (&deps->pending_write_mems);
3965 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3966
3967 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3968 times. For a testcase with 42000 regs and 8000 small basic blocks,
3969 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3970 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3971 {
3972 struct deps_reg *reg_last = &deps->reg_last[i];
3973 if (reg_last->uses)
3974 free_INSN_LIST_list (&reg_last->uses);
3975 if (reg_last->sets)
3976 free_INSN_LIST_list (&reg_last->sets);
3977 if (reg_last->implicit_sets)
3978 free_INSN_LIST_list (&reg_last->implicit_sets);
3979 if (reg_last->control_uses)
3980 free_INSN_LIST_list (&reg_last->control_uses);
3981 if (reg_last->clobbers)
3982 free_INSN_LIST_list (&reg_last->clobbers);
3983 }
3984 CLEAR_REG_SET (&deps->reg_last_in_use);
3985
3986 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3987 it at all. */
3988 free (deps->reg_last);
3989 deps->reg_last = NULL;
3990
3991 deps = NULL;
3992 }
3993
3994 /* Remove INSN from dependence contexts DEPS. */
3995 void
3996 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
3997 {
3998 int removed;
3999 unsigned i;
4000 reg_set_iterator rsi;
4001
4002 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4003 &deps->pending_read_mems);
4004 if (!DEBUG_INSN_P (insn))
4005 deps->pending_read_list_length -= removed;
4006 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4007 &deps->pending_write_mems);
4008 deps->pending_write_list_length -= removed;
4009
4010 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4011 deps->pending_flush_length -= removed;
4012 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4013 deps->pending_flush_length -= removed;
4014
4015 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4016 {
4017 struct deps_reg *reg_last = &deps->reg_last[i];
4018 if (reg_last->uses)
4019 remove_from_dependence_list (insn, &reg_last->uses);
4020 if (reg_last->sets)
4021 remove_from_dependence_list (insn, &reg_last->sets);
4022 if (reg_last->implicit_sets)
4023 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4024 if (reg_last->clobbers)
4025 remove_from_dependence_list (insn, &reg_last->clobbers);
4026 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4027 && !reg_last->clobbers)
4028 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4029 }
4030
4031 if (CALL_P (insn))
4032 {
4033 remove_from_dependence_list (insn, &deps->last_function_call);
4034 remove_from_dependence_list (insn,
4035 &deps->last_function_call_may_noreturn);
4036 }
4037 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4038 }
4039
4040 /* Init deps data vector. */
4041 static void
4042 init_deps_data_vector (void)
4043 {
4044 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4045 if (reserve > 0 && ! h_d_i_d.space (reserve))
4046 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4047 }
4048
4049 /* If it is profitable to use them, initialize or extend (depending on
4050 GLOBAL_P) dependency data. */
4051 void
4052 sched_deps_init (bool global_p)
4053 {
4054 /* Average number of insns in the basic block.
4055 '+ 1' is used to make it nonzero. */
4056 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4057
4058 init_deps_data_vector ();
4059
4060 /* We use another caching mechanism for selective scheduling, so
4061 we don't use this one. */
4062 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4063 {
4064 /* ?!? We could save some memory by computing a per-region luid mapping
4065 which could reduce both the number of vectors in the cache and the
4066 size of each vector. Instead we just avoid the cache entirely unless
4067 the average number of instructions in a basic block is very high. See
4068 the comment before the declaration of true_dependency_cache for
4069 what we consider "very high". */
4070 cache_size = 0;
4071 extend_dependency_caches (sched_max_luid, true);
4072 }
4073
4074 if (global_p)
4075 {
4076 dl_pool = new pool_allocator<_deps_list> ("deps_list",
4077 /* Allocate lists for one block at a time. */
4078 insns_in_block);
4079 dn_pool = new pool_allocator<_dep_node> ("dep_node",
4080 /* Allocate nodes for one block at a time.
4081 We assume that average insn has
4082 5 producers. */
4083 5 * insns_in_block);
4084 }
4085 }
4086
4087
4088 /* Create or extend (depending on CREATE_P) dependency caches to
4089 size N. */
4090 void
4091 extend_dependency_caches (int n, bool create_p)
4092 {
4093 if (create_p || true_dependency_cache)
4094 {
4095 int i, luid = cache_size + n;
4096
4097 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4098 luid);
4099 output_dependency_cache = XRESIZEVEC (bitmap_head,
4100 output_dependency_cache, luid);
4101 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4102 luid);
4103 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4104 luid);
4105
4106 if (current_sched_info->flags & DO_SPECULATION)
4107 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4108 luid);
4109
4110 for (i = cache_size; i < luid; i++)
4111 {
4112 bitmap_initialize (&true_dependency_cache[i], 0);
4113 bitmap_initialize (&output_dependency_cache[i], 0);
4114 bitmap_initialize (&anti_dependency_cache[i], 0);
4115 bitmap_initialize (&control_dependency_cache[i], 0);
4116
4117 if (current_sched_info->flags & DO_SPECULATION)
4118 bitmap_initialize (&spec_dependency_cache[i], 0);
4119 }
4120 cache_size = luid;
4121 }
4122 }
4123
4124 /* Finalize dependency information for the whole function. */
4125 void
4126 sched_deps_finish (void)
4127 {
4128 gcc_assert (deps_pools_are_empty_p ());
4129 dn_pool->release_if_empty ();
4130 dn_pool = NULL;
4131 dl_pool->release_if_empty ();
4132 dl_pool = NULL;
4133
4134 h_d_i_d.release ();
4135 cache_size = 0;
4136
4137 if (true_dependency_cache)
4138 {
4139 int i;
4140
4141 for (i = 0; i < cache_size; i++)
4142 {
4143 bitmap_clear (&true_dependency_cache[i]);
4144 bitmap_clear (&output_dependency_cache[i]);
4145 bitmap_clear (&anti_dependency_cache[i]);
4146 bitmap_clear (&control_dependency_cache[i]);
4147
4148 if (sched_deps_info->generate_spec_deps)
4149 bitmap_clear (&spec_dependency_cache[i]);
4150 }
4151 free (true_dependency_cache);
4152 true_dependency_cache = NULL;
4153 free (output_dependency_cache);
4154 output_dependency_cache = NULL;
4155 free (anti_dependency_cache);
4156 anti_dependency_cache = NULL;
4157 free (control_dependency_cache);
4158 control_dependency_cache = NULL;
4159
4160 if (sched_deps_info->generate_spec_deps)
4161 {
4162 free (spec_dependency_cache);
4163 spec_dependency_cache = NULL;
4164 }
4165
4166 }
4167 }
4168
4169 /* Initialize some global variables needed by the dependency analysis
4170 code. */
4171
4172 void
4173 init_deps_global (void)
4174 {
4175 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4176 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4177 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4178 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4179 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4180 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4181 reg_pending_barrier = NOT_A_BARRIER;
4182
4183 if (!sel_sched_p () || sched_emulate_haifa_p)
4184 {
4185 sched_deps_info->start_insn = haifa_start_insn;
4186 sched_deps_info->finish_insn = haifa_finish_insn;
4187
4188 sched_deps_info->note_reg_set = haifa_note_reg_set;
4189 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4190 sched_deps_info->note_reg_use = haifa_note_reg_use;
4191
4192 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4193 sched_deps_info->note_dep = haifa_note_dep;
4194 }
4195 }
4196
4197 /* Free everything used by the dependency analysis code. */
4198
4199 void
4200 finish_deps_global (void)
4201 {
4202 FREE_REG_SET (reg_pending_sets);
4203 FREE_REG_SET (reg_pending_clobbers);
4204 FREE_REG_SET (reg_pending_uses);
4205 FREE_REG_SET (reg_pending_control_uses);
4206 }
4207
4208 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4209 dw_t
4210 estimate_dep_weak (rtx mem1, rtx mem2)
4211 {
4212 rtx r1, r2;
4213
4214 if (mem1 == mem2)
4215 /* MEMs are the same - don't speculate. */
4216 return MIN_DEP_WEAK;
4217
4218 r1 = XEXP (mem1, 0);
4219 r2 = XEXP (mem2, 0);
4220
4221 if (r1 == r2
4222 || (REG_P (r1) && REG_P (r2)
4223 && REGNO (r1) == REGNO (r2)))
4224 /* Again, MEMs are the same. */
4225 return MIN_DEP_WEAK;
4226 else if ((REG_P (r1) && !REG_P (r2))
4227 || (!REG_P (r1) && REG_P (r2)))
4228 /* Different addressing modes - reason to be more speculative,
4229 than usual. */
4230 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4231 else
4232 /* We can't say anything about the dependence. */
4233 return UNCERTAIN_DEP_WEAK;
4234 }
4235
4236 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4237 This function can handle same INSN and ELEM (INSN == ELEM).
4238 It is a convenience wrapper. */
4239 static void
4240 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4241 {
4242 ds_t ds;
4243 bool internal;
4244
4245 if (dep_type == REG_DEP_TRUE)
4246 ds = DEP_TRUE;
4247 else if (dep_type == REG_DEP_OUTPUT)
4248 ds = DEP_OUTPUT;
4249 else if (dep_type == REG_DEP_CONTROL)
4250 ds = DEP_CONTROL;
4251 else
4252 {
4253 gcc_assert (dep_type == REG_DEP_ANTI);
4254 ds = DEP_ANTI;
4255 }
4256
4257 /* When add_dependence is called from inside sched-deps.c, we expect
4258 cur_insn to be non-null. */
4259 internal = cur_insn != NULL;
4260 if (internal)
4261 gcc_assert (insn == cur_insn);
4262 else
4263 cur_insn = insn;
4264
4265 note_dep (elem, ds);
4266 if (!internal)
4267 cur_insn = NULL;
4268 }
4269
4270 /* Return weakness of speculative type TYPE in the dep_status DS,
4271 without checking to prevent ICEs on malformed input. */
4272 static dw_t
4273 get_dep_weak_1 (ds_t ds, ds_t type)
4274 {
4275 ds = ds & type;
4276
4277 switch (type)
4278 {
4279 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4280 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4281 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4282 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4283 default: gcc_unreachable ();
4284 }
4285
4286 return (dw_t) ds;
4287 }
4288
4289 /* Return weakness of speculative type TYPE in the dep_status DS. */
4290 dw_t
4291 get_dep_weak (ds_t ds, ds_t type)
4292 {
4293 dw_t dw = get_dep_weak_1 (ds, type);
4294
4295 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4296 return dw;
4297 }
4298
4299 /* Return the dep_status, which has the same parameters as DS, except for
4300 speculative type TYPE, that will have weakness DW. */
4301 ds_t
4302 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4303 {
4304 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4305
4306 ds &= ~type;
4307 switch (type)
4308 {
4309 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4310 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4311 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4312 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4313 default: gcc_unreachable ();
4314 }
4315 return ds;
4316 }
4317
4318 /* Return the join of two dep_statuses DS1 and DS2.
4319 If MAX_P is true then choose the greater probability,
4320 otherwise multiply probabilities.
4321 This function assumes that both DS1 and DS2 contain speculative bits. */
4322 static ds_t
4323 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4324 {
4325 ds_t ds, t;
4326
4327 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4328
4329 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4330
4331 t = FIRST_SPEC_TYPE;
4332 do
4333 {
4334 if ((ds1 & t) && !(ds2 & t))
4335 ds |= ds1 & t;
4336 else if (!(ds1 & t) && (ds2 & t))
4337 ds |= ds2 & t;
4338 else if ((ds1 & t) && (ds2 & t))
4339 {
4340 dw_t dw1 = get_dep_weak (ds1, t);
4341 dw_t dw2 = get_dep_weak (ds2, t);
4342 ds_t dw;
4343
4344 if (!max_p)
4345 {
4346 dw = ((ds_t) dw1) * ((ds_t) dw2);
4347 dw /= MAX_DEP_WEAK;
4348 if (dw < MIN_DEP_WEAK)
4349 dw = MIN_DEP_WEAK;
4350 }
4351 else
4352 {
4353 if (dw1 >= dw2)
4354 dw = dw1;
4355 else
4356 dw = dw2;
4357 }
4358
4359 ds = set_dep_weak (ds, t, (dw_t) dw);
4360 }
4361
4362 if (t == LAST_SPEC_TYPE)
4363 break;
4364 t <<= SPEC_TYPE_SHIFT;
4365 }
4366 while (1);
4367
4368 return ds;
4369 }
4370
4371 /* Return the join of two dep_statuses DS1 and DS2.
4372 This function assumes that both DS1 and DS2 contain speculative bits. */
4373 ds_t
4374 ds_merge (ds_t ds1, ds_t ds2)
4375 {
4376 return ds_merge_1 (ds1, ds2, false);
4377 }
4378
4379 /* Return the join of two dep_statuses DS1 and DS2. */
4380 ds_t
4381 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4382 {
4383 ds_t new_status = ds | ds2;
4384
4385 if (new_status & SPECULATIVE)
4386 {
4387 if ((ds && !(ds & SPECULATIVE))
4388 || (ds2 && !(ds2 & SPECULATIVE)))
4389 /* Then this dep can't be speculative. */
4390 new_status &= ~SPECULATIVE;
4391 else
4392 {
4393 /* Both are speculative. Merging probabilities. */
4394 if (mem1)
4395 {
4396 dw_t dw;
4397
4398 dw = estimate_dep_weak (mem1, mem2);
4399 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4400 }
4401
4402 if (!ds)
4403 new_status = ds2;
4404 else if (!ds2)
4405 new_status = ds;
4406 else
4407 new_status = ds_merge (ds2, ds);
4408 }
4409 }
4410
4411 return new_status;
4412 }
4413
4414 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4415 probabilities. */
4416 ds_t
4417 ds_max_merge (ds_t ds1, ds_t ds2)
4418 {
4419 if (ds1 == 0 && ds2 == 0)
4420 return 0;
4421
4422 if (ds1 == 0 && ds2 != 0)
4423 return ds2;
4424
4425 if (ds1 != 0 && ds2 == 0)
4426 return ds1;
4427
4428 return ds_merge_1 (ds1, ds2, true);
4429 }
4430
4431 /* Return the probability of speculation success for the speculation
4432 status DS. */
4433 dw_t
4434 ds_weak (ds_t ds)
4435 {
4436 ds_t res = 1, dt;
4437 int n = 0;
4438
4439 dt = FIRST_SPEC_TYPE;
4440 do
4441 {
4442 if (ds & dt)
4443 {
4444 res *= (ds_t) get_dep_weak (ds, dt);
4445 n++;
4446 }
4447
4448 if (dt == LAST_SPEC_TYPE)
4449 break;
4450 dt <<= SPEC_TYPE_SHIFT;
4451 }
4452 while (1);
4453
4454 gcc_assert (n);
4455 while (--n)
4456 res /= MAX_DEP_WEAK;
4457
4458 if (res < MIN_DEP_WEAK)
4459 res = MIN_DEP_WEAK;
4460
4461 gcc_assert (res <= MAX_DEP_WEAK);
4462
4463 return (dw_t) res;
4464 }
4465
4466 /* Return a dep status that contains all speculation types of DS. */
4467 ds_t
4468 ds_get_speculation_types (ds_t ds)
4469 {
4470 if (ds & BEGIN_DATA)
4471 ds |= BEGIN_DATA;
4472 if (ds & BE_IN_DATA)
4473 ds |= BE_IN_DATA;
4474 if (ds & BEGIN_CONTROL)
4475 ds |= BEGIN_CONTROL;
4476 if (ds & BE_IN_CONTROL)
4477 ds |= BE_IN_CONTROL;
4478
4479 return ds & SPECULATIVE;
4480 }
4481
4482 /* Return a dep status that contains maximal weakness for each speculation
4483 type present in DS. */
4484 ds_t
4485 ds_get_max_dep_weak (ds_t ds)
4486 {
4487 if (ds & BEGIN_DATA)
4488 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4489 if (ds & BE_IN_DATA)
4490 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4491 if (ds & BEGIN_CONTROL)
4492 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4493 if (ds & BE_IN_CONTROL)
4494 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4495
4496 return ds;
4497 }
4498
4499 /* Dump information about the dependence status S. */
4500 static void
4501 dump_ds (FILE *f, ds_t s)
4502 {
4503 fprintf (f, "{");
4504
4505 if (s & BEGIN_DATA)
4506 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4507 if (s & BE_IN_DATA)
4508 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4509 if (s & BEGIN_CONTROL)
4510 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4511 if (s & BE_IN_CONTROL)
4512 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4513
4514 if (s & HARD_DEP)
4515 fprintf (f, "HARD_DEP; ");
4516
4517 if (s & DEP_TRUE)
4518 fprintf (f, "DEP_TRUE; ");
4519 if (s & DEP_OUTPUT)
4520 fprintf (f, "DEP_OUTPUT; ");
4521 if (s & DEP_ANTI)
4522 fprintf (f, "DEP_ANTI; ");
4523 if (s & DEP_CONTROL)
4524 fprintf (f, "DEP_CONTROL; ");
4525
4526 fprintf (f, "}");
4527 }
4528
4529 DEBUG_FUNCTION void
4530 debug_ds (ds_t s)
4531 {
4532 dump_ds (stderr, s);
4533 fprintf (stderr, "\n");
4534 }
4535
4536 #ifdef ENABLE_CHECKING
4537 /* Verify that dependence type and status are consistent.
4538 If RELAXED_P is true, then skip dep_weakness checks. */
4539 static void
4540 check_dep (dep_t dep, bool relaxed_p)
4541 {
4542 enum reg_note dt = DEP_TYPE (dep);
4543 ds_t ds = DEP_STATUS (dep);
4544
4545 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4546
4547 if (!(current_sched_info->flags & USE_DEPS_LIST))
4548 {
4549 gcc_assert (ds == 0);
4550 return;
4551 }
4552
4553 /* Check that dependence type contains the same bits as the status. */
4554 if (dt == REG_DEP_TRUE)
4555 gcc_assert (ds & DEP_TRUE);
4556 else if (dt == REG_DEP_OUTPUT)
4557 gcc_assert ((ds & DEP_OUTPUT)
4558 && !(ds & DEP_TRUE));
4559 else if (dt == REG_DEP_ANTI)
4560 gcc_assert ((ds & DEP_ANTI)
4561 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4562 else
4563 gcc_assert (dt == REG_DEP_CONTROL
4564 && (ds & DEP_CONTROL)
4565 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4566
4567 /* HARD_DEP can not appear in dep_status of a link. */
4568 gcc_assert (!(ds & HARD_DEP));
4569
4570 /* Check that dependence status is set correctly when speculation is not
4571 supported. */
4572 if (!sched_deps_info->generate_spec_deps)
4573 gcc_assert (!(ds & SPECULATIVE));
4574 else if (ds & SPECULATIVE)
4575 {
4576 if (!relaxed_p)
4577 {
4578 ds_t type = FIRST_SPEC_TYPE;
4579
4580 /* Check that dependence weakness is in proper range. */
4581 do
4582 {
4583 if (ds & type)
4584 get_dep_weak (ds, type);
4585
4586 if (type == LAST_SPEC_TYPE)
4587 break;
4588 type <<= SPEC_TYPE_SHIFT;
4589 }
4590 while (1);
4591 }
4592
4593 if (ds & BEGIN_SPEC)
4594 {
4595 /* Only true dependence can be data speculative. */
4596 if (ds & BEGIN_DATA)
4597 gcc_assert (ds & DEP_TRUE);
4598
4599 /* Control dependencies in the insn scheduler are represented by
4600 anti-dependencies, therefore only anti dependence can be
4601 control speculative. */
4602 if (ds & BEGIN_CONTROL)
4603 gcc_assert (ds & DEP_ANTI);
4604 }
4605 else
4606 {
4607 /* Subsequent speculations should resolve true dependencies. */
4608 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4609 }
4610
4611 /* Check that true and anti dependencies can't have other speculative
4612 statuses. */
4613 if (ds & DEP_TRUE)
4614 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4615 /* An output dependence can't be speculative at all. */
4616 gcc_assert (!(ds & DEP_OUTPUT));
4617 if (ds & DEP_ANTI)
4618 gcc_assert (ds & BEGIN_CONTROL);
4619 }
4620 }
4621 #endif /* ENABLE_CHECKING */
4622
4623 /* The following code discovers opportunities to switch a memory reference
4624 and an increment by modifying the address. We ensure that this is done
4625 only for dependencies that are only used to show a single register
4626 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4627 instruction involved is subject to only one dep that can cause a pattern
4628 change.
4629
4630 When we discover a suitable dependency, we fill in the dep_replacement
4631 structure to show how to modify the memory reference. */
4632
4633 /* Holds information about a pair of memory reference and register increment
4634 insns which depend on each other, but could possibly be interchanged. */
4635 struct mem_inc_info
4636 {
4637 rtx_insn *inc_insn;
4638 rtx_insn *mem_insn;
4639
4640 rtx *mem_loc;
4641 /* A register occurring in the memory address for which we wish to break
4642 the dependence. This must be identical to the destination register of
4643 the increment. */
4644 rtx mem_reg0;
4645 /* Any kind of index that is added to that register. */
4646 rtx mem_index;
4647 /* The constant offset used in the memory address. */
4648 HOST_WIDE_INT mem_constant;
4649 /* The constant added in the increment insn. Negated if the increment is
4650 after the memory address. */
4651 HOST_WIDE_INT inc_constant;
4652 /* The source register used in the increment. May be different from mem_reg0
4653 if the increment occurs before the memory address. */
4654 rtx inc_input;
4655 };
4656
4657 /* Verify that the memory location described in MII can be replaced with
4658 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4659 insn remains unchanged by this function. */
4660
4661 static rtx
4662 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4663 {
4664 rtx mem = *mii->mem_loc;
4665 rtx new_mem;
4666
4667 /* Jump through a lot of hoops to keep the attributes up to date. We
4668 do not want to call one of the change address variants that take
4669 an offset even though we know the offset in many cases. These
4670 assume you are changing where the address is pointing by the
4671 offset. */
4672 new_mem = replace_equiv_address_nv (mem, new_addr);
4673 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4674 {
4675 if (sched_verbose >= 5)
4676 fprintf (sched_dump, "validation failure\n");
4677 return NULL_RTX;
4678 }
4679
4680 /* Put back the old one. */
4681 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4682
4683 return new_mem;
4684 }
4685
4686 /* Return true if INSN is of a form "a = b op c" where a and b are
4687 regs. op is + if c is a reg and +|- if c is a const. Fill in
4688 informantion in MII about what is found.
4689 BEFORE_MEM indicates whether the increment is found before or after
4690 a corresponding memory reference. */
4691
4692 static bool
4693 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4694 {
4695 rtx pat = single_set (insn);
4696 rtx src, cst;
4697 bool regs_equal;
4698
4699 if (RTX_FRAME_RELATED_P (insn) || !pat)
4700 return false;
4701
4702 /* Result must be single reg. */
4703 if (!REG_P (SET_DEST (pat)))
4704 return false;
4705
4706 if (GET_CODE (SET_SRC (pat)) != PLUS)
4707 return false;
4708
4709 mii->inc_insn = insn;
4710 src = SET_SRC (pat);
4711 mii->inc_input = XEXP (src, 0);
4712
4713 if (!REG_P (XEXP (src, 0)))
4714 return false;
4715
4716 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4717 return false;
4718
4719 cst = XEXP (src, 1);
4720 if (!CONST_INT_P (cst))
4721 return false;
4722 mii->inc_constant = INTVAL (cst);
4723
4724 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4725
4726 if (!before_mem)
4727 {
4728 mii->inc_constant = -mii->inc_constant;
4729 if (!regs_equal)
4730 return false;
4731 }
4732
4733 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4734 {
4735 /* Note that the sign has already been reversed for !before_mem. */
4736 if (STACK_GROWS_DOWNWARD)
4737 return mii->inc_constant > 0;
4738 else
4739 return mii->inc_constant < 0;
4740 }
4741 return true;
4742 }
4743
4744 /* Once a suitable mem reference has been found and the corresponding data
4745 in MII has been filled in, this function is called to find a suitable
4746 add or inc insn involving the register we found in the memory
4747 reference. */
4748
4749 static bool
4750 find_inc (struct mem_inc_info *mii, bool backwards)
4751 {
4752 sd_iterator_def sd_it;
4753 dep_t dep;
4754
4755 sd_it = sd_iterator_start (mii->mem_insn,
4756 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4757 while (sd_iterator_cond (&sd_it, &dep))
4758 {
4759 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4760 rtx_insn *pro = DEP_PRO (dep);
4761 rtx_insn *con = DEP_CON (dep);
4762 rtx_insn *inc_cand = backwards ? pro : con;
4763 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4764 goto next;
4765 if (parse_add_or_inc (mii, inc_cand, backwards))
4766 {
4767 struct dep_replacement *desc;
4768 df_ref def;
4769 rtx newaddr, newmem;
4770
4771 if (sched_verbose >= 5)
4772 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4773 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4774
4775 /* Need to assure that none of the operands of the inc
4776 instruction are assigned to by the mem insn. */
4777 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4778 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4779 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4780 {
4781 if (sched_verbose >= 5)
4782 fprintf (sched_dump,
4783 "inc conflicts with store failure.\n");
4784 goto next;
4785 }
4786
4787 newaddr = mii->inc_input;
4788 if (mii->mem_index != NULL_RTX)
4789 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4790 mii->mem_index);
4791 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4792 mii->mem_constant + mii->inc_constant);
4793 newmem = attempt_change (mii, newaddr);
4794 if (newmem == NULL_RTX)
4795 goto next;
4796 if (sched_verbose >= 5)
4797 fprintf (sched_dump, "successful address replacement\n");
4798 desc = XCNEW (struct dep_replacement);
4799 DEP_REPLACE (dep) = desc;
4800 desc->loc = mii->mem_loc;
4801 desc->newval = newmem;
4802 desc->orig = *desc->loc;
4803 desc->insn = mii->mem_insn;
4804 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4805 INSN_SPEC_BACK_DEPS (con));
4806 if (backwards)
4807 {
4808 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4809 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4810 REG_DEP_TRUE);
4811 }
4812 else
4813 {
4814 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4815 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4816 REG_DEP_ANTI);
4817 }
4818 return true;
4819 }
4820 next:
4821 sd_iterator_next (&sd_it);
4822 }
4823 return false;
4824 }
4825
4826 /* A recursive function that walks ADDRESS_OF_X to find memory references
4827 which could be modified during scheduling. We call find_inc for each
4828 one we find that has a recognizable form. MII holds information about
4829 the pair of memory/increment instructions.
4830 We ensure that every instruction with a memory reference (which will be
4831 the location of the replacement) is assigned at most one breakable
4832 dependency. */
4833
4834 static bool
4835 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4836 {
4837 rtx x = *address_of_x;
4838 enum rtx_code code = GET_CODE (x);
4839 const char *const fmt = GET_RTX_FORMAT (code);
4840 int i;
4841
4842 if (code == MEM)
4843 {
4844 rtx reg0 = XEXP (x, 0);
4845
4846 mii->mem_loc = address_of_x;
4847 mii->mem_index = NULL_RTX;
4848 mii->mem_constant = 0;
4849 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4850 {
4851 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4852 reg0 = XEXP (reg0, 0);
4853 }
4854 if (GET_CODE (reg0) == PLUS)
4855 {
4856 mii->mem_index = XEXP (reg0, 1);
4857 reg0 = XEXP (reg0, 0);
4858 }
4859 if (REG_P (reg0))
4860 {
4861 df_ref use;
4862 int occurrences = 0;
4863
4864 /* Make sure this reg appears only once in this insn. Can't use
4865 count_occurrences since that only works for pseudos. */
4866 FOR_EACH_INSN_USE (use, mii->mem_insn)
4867 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4868 if (++occurrences > 1)
4869 {
4870 if (sched_verbose >= 5)
4871 fprintf (sched_dump, "mem count failure\n");
4872 return false;
4873 }
4874
4875 mii->mem_reg0 = reg0;
4876 return find_inc (mii, true) || find_inc (mii, false);
4877 }
4878 return false;
4879 }
4880
4881 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4882 {
4883 /* If REG occurs inside a MEM used in a bit-field reference,
4884 that is unacceptable. */
4885 return false;
4886 }
4887
4888 /* Time for some deep diving. */
4889 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4890 {
4891 if (fmt[i] == 'e')
4892 {
4893 if (find_mem (mii, &XEXP (x, i)))
4894 return true;
4895 }
4896 else if (fmt[i] == 'E')
4897 {
4898 int j;
4899 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4900 if (find_mem (mii, &XVECEXP (x, i, j)))
4901 return true;
4902 }
4903 }
4904 return false;
4905 }
4906
4907
4908 /* Examine the instructions between HEAD and TAIL and try to find
4909 dependencies that can be broken by modifying one of the patterns. */
4910
4911 void
4912 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4913 {
4914 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4915 int success_in_block = 0;
4916
4917 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4918 {
4919 struct mem_inc_info mii;
4920
4921 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4922 continue;
4923
4924 mii.mem_insn = insn;
4925 if (find_mem (&mii, &PATTERN (insn)))
4926 success_in_block++;
4927 }
4928 if (success_in_block && sched_verbose >= 5)
4929 fprintf (sched_dump, "%d candidates for address modification found.\n",
4930 success_in_block);
4931 }
4932
4933 #endif /* INSN_SCHEDULING */