calls.c (store_one_arg): Remove incorrect const qualification on the type of the...
[gcc.git] / gcc / sel-sched-ir.c
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "diagnostic-core.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "recog.h"
35 #include "params.h"
36 #include "target.h"
37 #include "sched-int.h"
38 #include "ggc.h"
39 #include "tree.h"
40 #include "vec.h"
41 #include "langhooks.h"
42 #include "rtlhooks-def.h"
43 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
44
45 #ifdef INSN_SCHEDULING
46 #include "sel-sched-ir.h"
47 /* We don't have to use it except for sel_print_insn. */
48 #include "sel-sched-dump.h"
49
50 /* A vector holding bb info for whole scheduling pass. */
51 vec<sel_global_bb_info_def>
52 sel_global_bb_info = vNULL;
53
54 /* A vector holding bb info. */
55 vec<sel_region_bb_info_def>
56 sel_region_bb_info = vNULL;
57
58 /* A pool for allocating all lists. */
59 alloc_pool sched_lists_pool;
60
61 /* This contains information about successors for compute_av_set. */
62 struct succs_info current_succs;
63
64 /* Data structure to describe interaction with the generic scheduler utils. */
65 static struct common_sched_info_def sel_common_sched_info;
66
67 /* The loop nest being pipelined. */
68 struct loop *current_loop_nest;
69
70 /* LOOP_NESTS is a vector containing the corresponding loop nest for
71 each region. */
72 static vec<loop_p> loop_nests = vNULL;
73
74 /* Saves blocks already in loop regions, indexed by bb->index. */
75 static sbitmap bbs_in_loop_rgns = NULL;
76
77 /* CFG hooks that are saved before changing create_basic_block hook. */
78 static struct cfg_hooks orig_cfg_hooks;
79 \f
80
81 /* Array containing reverse topological index of function basic blocks,
82 indexed by BB->INDEX. */
83 static int *rev_top_order_index = NULL;
84
85 /* Length of the above array. */
86 static int rev_top_order_index_len = -1;
87
88 /* A regset pool structure. */
89 static struct
90 {
91 /* The stack to which regsets are returned. */
92 regset *v;
93
94 /* Its pointer. */
95 int n;
96
97 /* Its size. */
98 int s;
99
100 /* In VV we save all generated regsets so that, when destructing the
101 pool, we can compare it with V and check that every regset was returned
102 back to pool. */
103 regset *vv;
104
105 /* The pointer of VV stack. */
106 int nn;
107
108 /* Its size. */
109 int ss;
110
111 /* The difference between allocated and returned regsets. */
112 int diff;
113 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
114
115 /* This represents the nop pool. */
116 static struct
117 {
118 /* The vector which holds previously emitted nops. */
119 insn_t *v;
120
121 /* Its pointer. */
122 int n;
123
124 /* Its size. */
125 int s;
126 } nop_pool = { NULL, 0, 0 };
127
128 /* The pool for basic block notes. */
129 static rtx_vec_t bb_note_pool;
130
131 /* A NOP pattern used to emit placeholder insns. */
132 rtx nop_pattern = NULL_RTX;
133 /* A special instruction that resides in EXIT_BLOCK.
134 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
135 rtx exit_insn = NULL_RTX;
136
137 /* TRUE if while scheduling current region, which is loop, its preheader
138 was removed. */
139 bool preheader_removed = false;
140 \f
141
142 /* Forward static declarations. */
143 static void fence_clear (fence_t);
144
145 static void deps_init_id (idata_t, insn_t, bool);
146 static void init_id_from_df (idata_t, insn_t, bool);
147 static expr_t set_insn_init (expr_t, vinsn_t, int);
148
149 static void cfg_preds (basic_block, insn_t **, int *);
150 static void prepare_insn_expr (insn_t, int);
151 static void free_history_vect (vec<expr_history_def> &);
152
153 static void move_bb_info (basic_block, basic_block);
154 static void remove_empty_bb (basic_block, bool);
155 static void sel_merge_blocks (basic_block, basic_block);
156 static void sel_remove_loop_preheader (void);
157 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
158
159 static bool insn_is_the_only_one_in_bb_p (insn_t);
160 static void create_initial_data_sets (basic_block);
161
162 static void free_av_set (basic_block);
163 static void invalidate_av_set (basic_block);
164 static void extend_insn_data (void);
165 static void sel_init_new_insn (insn_t, int);
166 static void finish_insns (void);
167 \f
168 /* Various list functions. */
169
170 /* Copy an instruction list L. */
171 ilist_t
172 ilist_copy (ilist_t l)
173 {
174 ilist_t head = NULL, *tailp = &head;
175
176 while (l)
177 {
178 ilist_add (tailp, ILIST_INSN (l));
179 tailp = &ILIST_NEXT (*tailp);
180 l = ILIST_NEXT (l);
181 }
182
183 return head;
184 }
185
186 /* Invert an instruction list L. */
187 ilist_t
188 ilist_invert (ilist_t l)
189 {
190 ilist_t res = NULL;
191
192 while (l)
193 {
194 ilist_add (&res, ILIST_INSN (l));
195 l = ILIST_NEXT (l);
196 }
197
198 return res;
199 }
200
201 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
202 void
203 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
204 {
205 bnd_t bnd;
206
207 _list_add (lp);
208 bnd = BLIST_BND (*lp);
209
210 BND_TO (bnd) = to;
211 BND_PTR (bnd) = ptr;
212 BND_AV (bnd) = NULL;
213 BND_AV1 (bnd) = NULL;
214 BND_DC (bnd) = dc;
215 }
216
217 /* Remove the list note pointed to by LP. */
218 void
219 blist_remove (blist_t *lp)
220 {
221 bnd_t b = BLIST_BND (*lp);
222
223 av_set_clear (&BND_AV (b));
224 av_set_clear (&BND_AV1 (b));
225 ilist_clear (&BND_PTR (b));
226
227 _list_remove (lp);
228 }
229
230 /* Init a fence tail L. */
231 void
232 flist_tail_init (flist_tail_t l)
233 {
234 FLIST_TAIL_HEAD (l) = NULL;
235 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
236 }
237
238 /* Try to find fence corresponding to INSN in L. */
239 fence_t
240 flist_lookup (flist_t l, insn_t insn)
241 {
242 while (l)
243 {
244 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
245 return FLIST_FENCE (l);
246
247 l = FLIST_NEXT (l);
248 }
249
250 return NULL;
251 }
252
253 /* Init the fields of F before running fill_insns. */
254 static void
255 init_fence_for_scheduling (fence_t f)
256 {
257 FENCE_BNDS (f) = NULL;
258 FENCE_PROCESSED_P (f) = false;
259 FENCE_SCHEDULED_P (f) = false;
260 }
261
262 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
263 static void
264 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
265 insn_t last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
266 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
267 int cycle, int cycle_issued_insns, int issue_more,
268 bool starts_cycle_p, bool after_stall_p)
269 {
270 fence_t f;
271
272 _list_add (lp);
273 f = FLIST_FENCE (*lp);
274
275 FENCE_INSN (f) = insn;
276
277 gcc_assert (state != NULL);
278 FENCE_STATE (f) = state;
279
280 FENCE_CYCLE (f) = cycle;
281 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
282 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
283 FENCE_AFTER_STALL_P (f) = after_stall_p;
284
285 gcc_assert (dc != NULL);
286 FENCE_DC (f) = dc;
287
288 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
289 FENCE_TC (f) = tc;
290
291 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
292 FENCE_ISSUE_MORE (f) = issue_more;
293 FENCE_EXECUTING_INSNS (f) = executing_insns;
294 FENCE_READY_TICKS (f) = ready_ticks;
295 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
296 FENCE_SCHED_NEXT (f) = sched_next;
297
298 init_fence_for_scheduling (f);
299 }
300
301 /* Remove the head node of the list pointed to by LP. */
302 static void
303 flist_remove (flist_t *lp)
304 {
305 if (FENCE_INSN (FLIST_FENCE (*lp)))
306 fence_clear (FLIST_FENCE (*lp));
307 _list_remove (lp);
308 }
309
310 /* Clear the fence list pointed to by LP. */
311 void
312 flist_clear (flist_t *lp)
313 {
314 while (*lp)
315 flist_remove (lp);
316 }
317
318 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
319 void
320 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
321 {
322 def_t d;
323
324 _list_add (dl);
325 d = DEF_LIST_DEF (*dl);
326
327 d->orig_insn = original_insn;
328 d->crosses_call = crosses_call;
329 }
330 \f
331
332 /* Functions to work with target contexts. */
333
334 /* Bulk target context. It is convenient for debugging purposes to ensure
335 that there are no uninitialized (null) target contexts. */
336 static tc_t bulk_tc = (tc_t) 1;
337
338 /* Target hooks wrappers. In the future we can provide some default
339 implementations for them. */
340
341 /* Allocate a store for the target context. */
342 static tc_t
343 alloc_target_context (void)
344 {
345 return (targetm.sched.alloc_sched_context
346 ? targetm.sched.alloc_sched_context () : bulk_tc);
347 }
348
349 /* Init target context TC.
350 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
351 Overwise, copy current backend context to TC. */
352 static void
353 init_target_context (tc_t tc, bool clean_p)
354 {
355 if (targetm.sched.init_sched_context)
356 targetm.sched.init_sched_context (tc, clean_p);
357 }
358
359 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
360 int init_target_context (). */
361 tc_t
362 create_target_context (bool clean_p)
363 {
364 tc_t tc = alloc_target_context ();
365
366 init_target_context (tc, clean_p);
367 return tc;
368 }
369
370 /* Copy TC to the current backend context. */
371 void
372 set_target_context (tc_t tc)
373 {
374 if (targetm.sched.set_sched_context)
375 targetm.sched.set_sched_context (tc);
376 }
377
378 /* TC is about to be destroyed. Free any internal data. */
379 static void
380 clear_target_context (tc_t tc)
381 {
382 if (targetm.sched.clear_sched_context)
383 targetm.sched.clear_sched_context (tc);
384 }
385
386 /* Clear and free it. */
387 static void
388 delete_target_context (tc_t tc)
389 {
390 clear_target_context (tc);
391
392 if (targetm.sched.free_sched_context)
393 targetm.sched.free_sched_context (tc);
394 }
395
396 /* Make a copy of FROM in TO.
397 NB: May be this should be a hook. */
398 static void
399 copy_target_context (tc_t to, tc_t from)
400 {
401 tc_t tmp = create_target_context (false);
402
403 set_target_context (from);
404 init_target_context (to, false);
405
406 set_target_context (tmp);
407 delete_target_context (tmp);
408 }
409
410 /* Create a copy of TC. */
411 static tc_t
412 create_copy_of_target_context (tc_t tc)
413 {
414 tc_t copy = alloc_target_context ();
415
416 copy_target_context (copy, tc);
417
418 return copy;
419 }
420
421 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
422 is the same as in init_target_context (). */
423 void
424 reset_target_context (tc_t tc, bool clean_p)
425 {
426 clear_target_context (tc);
427 init_target_context (tc, clean_p);
428 }
429 \f
430 /* Functions to work with dependence contexts.
431 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
432 context. It accumulates information about processed insns to decide if
433 current insn is dependent on the processed ones. */
434
435 /* Make a copy of FROM in TO. */
436 static void
437 copy_deps_context (deps_t to, deps_t from)
438 {
439 init_deps (to, false);
440 deps_join (to, from);
441 }
442
443 /* Allocate store for dep context. */
444 static deps_t
445 alloc_deps_context (void)
446 {
447 return XNEW (struct deps_desc);
448 }
449
450 /* Allocate and initialize dep context. */
451 static deps_t
452 create_deps_context (void)
453 {
454 deps_t dc = alloc_deps_context ();
455
456 init_deps (dc, false);
457 return dc;
458 }
459
460 /* Create a copy of FROM. */
461 static deps_t
462 create_copy_of_deps_context (deps_t from)
463 {
464 deps_t to = alloc_deps_context ();
465
466 copy_deps_context (to, from);
467 return to;
468 }
469
470 /* Clean up internal data of DC. */
471 static void
472 clear_deps_context (deps_t dc)
473 {
474 free_deps (dc);
475 }
476
477 /* Clear and free DC. */
478 static void
479 delete_deps_context (deps_t dc)
480 {
481 clear_deps_context (dc);
482 free (dc);
483 }
484
485 /* Clear and init DC. */
486 static void
487 reset_deps_context (deps_t dc)
488 {
489 clear_deps_context (dc);
490 init_deps (dc, false);
491 }
492
493 /* This structure describes the dependence analysis hooks for advancing
494 dependence context. */
495 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
496 {
497 NULL,
498
499 NULL, /* start_insn */
500 NULL, /* finish_insn */
501 NULL, /* start_lhs */
502 NULL, /* finish_lhs */
503 NULL, /* start_rhs */
504 NULL, /* finish_rhs */
505 haifa_note_reg_set,
506 haifa_note_reg_clobber,
507 haifa_note_reg_use,
508 NULL, /* note_mem_dep */
509 NULL, /* note_dep */
510
511 0, 0, 0
512 };
513
514 /* Process INSN and add its impact on DC. */
515 void
516 advance_deps_context (deps_t dc, insn_t insn)
517 {
518 sched_deps_info = &advance_deps_context_sched_deps_info;
519 deps_analyze_insn (dc, insn);
520 }
521 \f
522
523 /* Functions to work with DFA states. */
524
525 /* Allocate store for a DFA state. */
526 static state_t
527 state_alloc (void)
528 {
529 return xmalloc (dfa_state_size);
530 }
531
532 /* Allocate and initialize DFA state. */
533 static state_t
534 state_create (void)
535 {
536 state_t state = state_alloc ();
537
538 state_reset (state);
539 advance_state (state);
540 return state;
541 }
542
543 /* Free DFA state. */
544 static void
545 state_free (state_t state)
546 {
547 free (state);
548 }
549
550 /* Make a copy of FROM in TO. */
551 static void
552 state_copy (state_t to, state_t from)
553 {
554 memcpy (to, from, dfa_state_size);
555 }
556
557 /* Create a copy of FROM. */
558 static state_t
559 state_create_copy (state_t from)
560 {
561 state_t to = state_alloc ();
562
563 state_copy (to, from);
564 return to;
565 }
566 \f
567
568 /* Functions to work with fences. */
569
570 /* Clear the fence. */
571 static void
572 fence_clear (fence_t f)
573 {
574 state_t s = FENCE_STATE (f);
575 deps_t dc = FENCE_DC (f);
576 void *tc = FENCE_TC (f);
577
578 ilist_clear (&FENCE_BNDS (f));
579
580 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
581 || (s == NULL && dc == NULL && tc == NULL));
582
583 free (s);
584
585 if (dc != NULL)
586 delete_deps_context (dc);
587
588 if (tc != NULL)
589 delete_target_context (tc);
590 vec_free (FENCE_EXECUTING_INSNS (f));
591 free (FENCE_READY_TICKS (f));
592 FENCE_READY_TICKS (f) = NULL;
593 }
594
595 /* Init a list of fences with successors of OLD_FENCE. */
596 void
597 init_fences (insn_t old_fence)
598 {
599 insn_t succ;
600 succ_iterator si;
601 bool first = true;
602 int ready_ticks_size = get_max_uid () + 1;
603
604 FOR_EACH_SUCC_1 (succ, si, old_fence,
605 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
606 {
607
608 if (first)
609 first = false;
610 else
611 gcc_assert (flag_sel_sched_pipelining_outer_loops);
612
613 flist_add (&fences, succ,
614 state_create (),
615 create_deps_context () /* dc */,
616 create_target_context (true) /* tc */,
617 NULL_RTX /* last_scheduled_insn */,
618 NULL, /* executing_insns */
619 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
620 ready_ticks_size,
621 NULL_RTX /* sched_next */,
622 1 /* cycle */, 0 /* cycle_issued_insns */,
623 issue_rate, /* issue_more */
624 1 /* starts_cycle_p */, 0 /* after_stall_p */);
625 }
626 }
627
628 /* Merges two fences (filling fields of fence F with resulting values) by
629 following rules: 1) state, target context and last scheduled insn are
630 propagated from fallthrough edge if it is available;
631 2) deps context and cycle is propagated from more probable edge;
632 3) all other fields are set to corresponding constant values.
633
634 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
635 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
636 and AFTER_STALL_P are the corresponding fields of the second fence. */
637 static void
638 merge_fences (fence_t f, insn_t insn,
639 state_t state, deps_t dc, void *tc,
640 rtx last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
641 int *ready_ticks, int ready_ticks_size,
642 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
643 {
644 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
645
646 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
647 && !sched_next && !FENCE_SCHED_NEXT (f));
648
649 /* Check if we can decide which path fences came.
650 If we can't (or don't want to) - reset all. */
651 if (last_scheduled_insn == NULL
652 || last_scheduled_insn_old == NULL
653 /* This is a case when INSN is reachable on several paths from
654 one insn (this can happen when pipelining of outer loops is on and
655 there are two edges: one going around of inner loop and the other -
656 right through it; in such case just reset everything). */
657 || last_scheduled_insn == last_scheduled_insn_old)
658 {
659 state_reset (FENCE_STATE (f));
660 state_free (state);
661
662 reset_deps_context (FENCE_DC (f));
663 delete_deps_context (dc);
664
665 reset_target_context (FENCE_TC (f), true);
666 delete_target_context (tc);
667
668 if (cycle > FENCE_CYCLE (f))
669 FENCE_CYCLE (f) = cycle;
670
671 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
672 FENCE_ISSUE_MORE (f) = issue_rate;
673 vec_free (executing_insns);
674 free (ready_ticks);
675 if (FENCE_EXECUTING_INSNS (f))
676 FENCE_EXECUTING_INSNS (f)->block_remove (0,
677 FENCE_EXECUTING_INSNS (f)->length ());
678 if (FENCE_READY_TICKS (f))
679 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
680 }
681 else
682 {
683 edge edge_old = NULL, edge_new = NULL;
684 edge candidate;
685 succ_iterator si;
686 insn_t succ;
687
688 /* Find fallthrough edge. */
689 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
690 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
691
692 if (!candidate
693 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
694 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
695 {
696 /* No fallthrough edge leading to basic block of INSN. */
697 state_reset (FENCE_STATE (f));
698 state_free (state);
699
700 reset_target_context (FENCE_TC (f), true);
701 delete_target_context (tc);
702
703 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
704 FENCE_ISSUE_MORE (f) = issue_rate;
705 }
706 else
707 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
708 {
709 /* Would be weird if same insn is successor of several fallthrough
710 edges. */
711 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
712 != BLOCK_FOR_INSN (last_scheduled_insn_old));
713
714 state_free (FENCE_STATE (f));
715 FENCE_STATE (f) = state;
716
717 delete_target_context (FENCE_TC (f));
718 FENCE_TC (f) = tc;
719
720 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
721 FENCE_ISSUE_MORE (f) = issue_more;
722 }
723 else
724 {
725 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
726 state_free (state);
727 delete_target_context (tc);
728
729 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
730 != BLOCK_FOR_INSN (last_scheduled_insn));
731 }
732
733 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
734 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
735 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
736 {
737 if (succ == insn)
738 {
739 /* No same successor allowed from several edges. */
740 gcc_assert (!edge_old);
741 edge_old = si.e1;
742 }
743 }
744 /* Find edge of second predecessor (last_scheduled_insn->insn). */
745 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
746 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
747 {
748 if (succ == insn)
749 {
750 /* No same successor allowed from several edges. */
751 gcc_assert (!edge_new);
752 edge_new = si.e1;
753 }
754 }
755
756 /* Check if we can choose most probable predecessor. */
757 if (edge_old == NULL || edge_new == NULL)
758 {
759 reset_deps_context (FENCE_DC (f));
760 delete_deps_context (dc);
761 vec_free (executing_insns);
762 free (ready_ticks);
763
764 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
765 if (FENCE_EXECUTING_INSNS (f))
766 FENCE_EXECUTING_INSNS (f)->block_remove (0,
767 FENCE_EXECUTING_INSNS (f)->length ());
768 if (FENCE_READY_TICKS (f))
769 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
770 }
771 else
772 if (edge_new->probability > edge_old->probability)
773 {
774 delete_deps_context (FENCE_DC (f));
775 FENCE_DC (f) = dc;
776 vec_free (FENCE_EXECUTING_INSNS (f));
777 FENCE_EXECUTING_INSNS (f) = executing_insns;
778 free (FENCE_READY_TICKS (f));
779 FENCE_READY_TICKS (f) = ready_ticks;
780 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
781 FENCE_CYCLE (f) = cycle;
782 }
783 else
784 {
785 /* Leave DC and CYCLE untouched. */
786 delete_deps_context (dc);
787 vec_free (executing_insns);
788 free (ready_ticks);
789 }
790 }
791
792 /* Fill remaining invariant fields. */
793 if (after_stall_p)
794 FENCE_AFTER_STALL_P (f) = 1;
795
796 FENCE_ISSUED_INSNS (f) = 0;
797 FENCE_STARTS_CYCLE_P (f) = 1;
798 FENCE_SCHED_NEXT (f) = NULL;
799 }
800
801 /* Add a new fence to NEW_FENCES list, initializing it from all
802 other parameters. */
803 static void
804 add_to_fences (flist_tail_t new_fences, insn_t insn,
805 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
806 vec<rtx, va_gc> *executing_insns, int *ready_ticks,
807 int ready_ticks_size, rtx sched_next, int cycle,
808 int cycle_issued_insns, int issue_rate,
809 bool starts_cycle_p, bool after_stall_p)
810 {
811 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
812
813 if (! f)
814 {
815 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
816 last_scheduled_insn, executing_insns, ready_ticks,
817 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
818 issue_rate, starts_cycle_p, after_stall_p);
819
820 FLIST_TAIL_TAILP (new_fences)
821 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
822 }
823 else
824 {
825 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
826 executing_insns, ready_ticks, ready_ticks_size,
827 sched_next, cycle, issue_rate, after_stall_p);
828 }
829 }
830
831 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
832 void
833 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
834 {
835 fence_t f, old;
836 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
837
838 old = FLIST_FENCE (old_fences);
839 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
840 FENCE_INSN (FLIST_FENCE (old_fences)));
841 if (f)
842 {
843 merge_fences (f, old->insn, old->state, old->dc, old->tc,
844 old->last_scheduled_insn, old->executing_insns,
845 old->ready_ticks, old->ready_ticks_size,
846 old->sched_next, old->cycle, old->issue_more,
847 old->after_stall_p);
848 }
849 else
850 {
851 _list_add (tailp);
852 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
853 *FLIST_FENCE (*tailp) = *old;
854 init_fence_for_scheduling (FLIST_FENCE (*tailp));
855 }
856 FENCE_INSN (old) = NULL;
857 }
858
859 /* Add a new fence to NEW_FENCES list and initialize most of its data
860 as a clean one. */
861 void
862 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
863 {
864 int ready_ticks_size = get_max_uid () + 1;
865
866 add_to_fences (new_fences,
867 succ, state_create (), create_deps_context (),
868 create_target_context (true),
869 NULL_RTX, NULL,
870 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
871 NULL_RTX, FENCE_CYCLE (fence) + 1,
872 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
873 }
874
875 /* Add a new fence to NEW_FENCES list and initialize all of its data
876 from FENCE and SUCC. */
877 void
878 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
879 {
880 int * new_ready_ticks
881 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
882
883 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
884 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
885 add_to_fences (new_fences,
886 succ, state_create_copy (FENCE_STATE (fence)),
887 create_copy_of_deps_context (FENCE_DC (fence)),
888 create_copy_of_target_context (FENCE_TC (fence)),
889 FENCE_LAST_SCHEDULED_INSN (fence),
890 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
891 new_ready_ticks,
892 FENCE_READY_TICKS_SIZE (fence),
893 FENCE_SCHED_NEXT (fence),
894 FENCE_CYCLE (fence),
895 FENCE_ISSUED_INSNS (fence),
896 FENCE_ISSUE_MORE (fence),
897 FENCE_STARTS_CYCLE_P (fence),
898 FENCE_AFTER_STALL_P (fence));
899 }
900 \f
901
902 /* Functions to work with regset and nop pools. */
903
904 /* Returns the new regset from pool. It might have some of the bits set
905 from the previous usage. */
906 regset
907 get_regset_from_pool (void)
908 {
909 regset rs;
910
911 if (regset_pool.n != 0)
912 rs = regset_pool.v[--regset_pool.n];
913 else
914 /* We need to create the regset. */
915 {
916 rs = ALLOC_REG_SET (&reg_obstack);
917
918 if (regset_pool.nn == regset_pool.ss)
919 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
920 (regset_pool.ss = 2 * regset_pool.ss + 1));
921 regset_pool.vv[regset_pool.nn++] = rs;
922 }
923
924 regset_pool.diff++;
925
926 return rs;
927 }
928
929 /* Same as above, but returns the empty regset. */
930 regset
931 get_clear_regset_from_pool (void)
932 {
933 regset rs = get_regset_from_pool ();
934
935 CLEAR_REG_SET (rs);
936 return rs;
937 }
938
939 /* Return regset RS to the pool for future use. */
940 void
941 return_regset_to_pool (regset rs)
942 {
943 gcc_assert (rs);
944 regset_pool.diff--;
945
946 if (regset_pool.n == regset_pool.s)
947 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
948 (regset_pool.s = 2 * regset_pool.s + 1));
949 regset_pool.v[regset_pool.n++] = rs;
950 }
951
952 #ifdef ENABLE_CHECKING
953 /* This is used as a qsort callback for sorting regset pool stacks.
954 X and XX are addresses of two regsets. They are never equal. */
955 static int
956 cmp_v_in_regset_pool (const void *x, const void *xx)
957 {
958 uintptr_t r1 = (uintptr_t) *((const regset *) x);
959 uintptr_t r2 = (uintptr_t) *((const regset *) xx);
960 if (r1 > r2)
961 return 1;
962 else if (r1 < r2)
963 return -1;
964 gcc_unreachable ();
965 }
966 #endif
967
968 /* Free the regset pool possibly checking for memory leaks. */
969 void
970 free_regset_pool (void)
971 {
972 #ifdef ENABLE_CHECKING
973 {
974 regset *v = regset_pool.v;
975 int i = 0;
976 int n = regset_pool.n;
977
978 regset *vv = regset_pool.vv;
979 int ii = 0;
980 int nn = regset_pool.nn;
981
982 int diff = 0;
983
984 gcc_assert (n <= nn);
985
986 /* Sort both vectors so it will be possible to compare them. */
987 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
988 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
989
990 while (ii < nn)
991 {
992 if (v[i] == vv[ii])
993 i++;
994 else
995 /* VV[II] was lost. */
996 diff++;
997
998 ii++;
999 }
1000
1001 gcc_assert (diff == regset_pool.diff);
1002 }
1003 #endif
1004
1005 /* If not true - we have a memory leak. */
1006 gcc_assert (regset_pool.diff == 0);
1007
1008 while (regset_pool.n)
1009 {
1010 --regset_pool.n;
1011 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1012 }
1013
1014 free (regset_pool.v);
1015 regset_pool.v = NULL;
1016 regset_pool.s = 0;
1017
1018 free (regset_pool.vv);
1019 regset_pool.vv = NULL;
1020 regset_pool.nn = 0;
1021 regset_pool.ss = 0;
1022
1023 regset_pool.diff = 0;
1024 }
1025 \f
1026
1027 /* Functions to work with nop pools. NOP insns are used as temporary
1028 placeholders of the insns being scheduled to allow correct update of
1029 the data sets. When update is finished, NOPs are deleted. */
1030
1031 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1032 nops sel-sched generates. */
1033 static vinsn_t nop_vinsn = NULL;
1034
1035 /* Emit a nop before INSN, taking it from pool. */
1036 insn_t
1037 get_nop_from_pool (insn_t insn)
1038 {
1039 insn_t nop;
1040 bool old_p = nop_pool.n != 0;
1041 int flags;
1042
1043 if (old_p)
1044 nop = nop_pool.v[--nop_pool.n];
1045 else
1046 nop = nop_pattern;
1047
1048 nop = emit_insn_before (nop, insn);
1049
1050 if (old_p)
1051 flags = INSN_INIT_TODO_SSID;
1052 else
1053 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1054
1055 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1056 sel_init_new_insn (nop, flags);
1057
1058 return nop;
1059 }
1060
1061 /* Remove NOP from the instruction stream and return it to the pool. */
1062 void
1063 return_nop_to_pool (insn_t nop, bool full_tidying)
1064 {
1065 gcc_assert (INSN_IN_STREAM_P (nop));
1066 sel_remove_insn (nop, false, full_tidying);
1067
1068 /* We'll recycle this nop. */
1069 INSN_DELETED_P (nop) = 0;
1070
1071 if (nop_pool.n == nop_pool.s)
1072 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1073 (nop_pool.s = 2 * nop_pool.s + 1));
1074 nop_pool.v[nop_pool.n++] = nop;
1075 }
1076
1077 /* Free the nop pool. */
1078 void
1079 free_nop_pool (void)
1080 {
1081 nop_pool.n = 0;
1082 nop_pool.s = 0;
1083 free (nop_pool.v);
1084 nop_pool.v = NULL;
1085 }
1086 \f
1087
1088 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1089 The callback is given two rtxes XX and YY and writes the new rtxes
1090 to NX and NY in case some needs to be skipped. */
1091 static int
1092 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1093 {
1094 const_rtx x = *xx;
1095 const_rtx y = *yy;
1096
1097 if (GET_CODE (x) == UNSPEC
1098 && (targetm.sched.skip_rtx_p == NULL
1099 || targetm.sched.skip_rtx_p (x)))
1100 {
1101 *nx = XVECEXP (x, 0, 0);
1102 *ny = CONST_CAST_RTX (y);
1103 return 1;
1104 }
1105
1106 if (GET_CODE (y) == UNSPEC
1107 && (targetm.sched.skip_rtx_p == NULL
1108 || targetm.sched.skip_rtx_p (y)))
1109 {
1110 *nx = CONST_CAST_RTX (x);
1111 *ny = XVECEXP (y, 0, 0);
1112 return 1;
1113 }
1114
1115 return 0;
1116 }
1117
1118 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1119 to support ia64 speculation. When changes are needed, new rtx X and new mode
1120 NMODE are written, and the callback returns true. */
1121 static int
1122 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1123 rtx *nx, enum machine_mode* nmode)
1124 {
1125 if (GET_CODE (x) == UNSPEC
1126 && targetm.sched.skip_rtx_p
1127 && targetm.sched.skip_rtx_p (x))
1128 {
1129 *nx = XVECEXP (x, 0 ,0);
1130 *nmode = VOIDmode;
1131 return 1;
1132 }
1133
1134 return 0;
1135 }
1136
1137 /* Returns LHS and RHS are ok to be scheduled separately. */
1138 static bool
1139 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1140 {
1141 if (lhs == NULL || rhs == NULL)
1142 return false;
1143
1144 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1145 to use reg, if const can be used. Moreover, scheduling const as rhs may
1146 lead to mode mismatch cause consts don't have modes but they could be
1147 merged from branches where the same const used in different modes. */
1148 if (CONSTANT_P (rhs))
1149 return false;
1150
1151 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1152 if (COMPARISON_P (rhs))
1153 return false;
1154
1155 /* Do not allow single REG to be an rhs. */
1156 if (REG_P (rhs))
1157 return false;
1158
1159 /* See comment at find_used_regs_1 (*1) for explanation of this
1160 restriction. */
1161 /* FIXME: remove this later. */
1162 if (MEM_P (lhs))
1163 return false;
1164
1165 /* This will filter all tricky things like ZERO_EXTRACT etc.
1166 For now we don't handle it. */
1167 if (!REG_P (lhs) && !MEM_P (lhs))
1168 return false;
1169
1170 return true;
1171 }
1172
1173 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1174 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1175 used e.g. for insns from recovery blocks. */
1176 static void
1177 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1178 {
1179 hash_rtx_callback_function hrcf;
1180 int insn_class;
1181
1182 VINSN_INSN_RTX (vi) = insn;
1183 VINSN_COUNT (vi) = 0;
1184 vi->cost = -1;
1185
1186 if (INSN_NOP_P (insn))
1187 return;
1188
1189 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1190 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1191 else
1192 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1193
1194 /* Hash vinsn depending on whether it is separable or not. */
1195 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1196 if (VINSN_SEPARABLE_P (vi))
1197 {
1198 rtx rhs = VINSN_RHS (vi);
1199
1200 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1201 NULL, NULL, false, hrcf);
1202 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1203 VOIDmode, NULL, NULL,
1204 false, hrcf);
1205 }
1206 else
1207 {
1208 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1209 NULL, NULL, false, hrcf);
1210 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1211 }
1212
1213 insn_class = haifa_classify_insn (insn);
1214 if (insn_class >= 2
1215 && (!targetm.sched.get_insn_spec_ds
1216 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1217 == 0)))
1218 VINSN_MAY_TRAP_P (vi) = true;
1219 else
1220 VINSN_MAY_TRAP_P (vi) = false;
1221 }
1222
1223 /* Indicate that VI has become the part of an rtx object. */
1224 void
1225 vinsn_attach (vinsn_t vi)
1226 {
1227 /* Assert that VI is not pending for deletion. */
1228 gcc_assert (VINSN_INSN_RTX (vi));
1229
1230 VINSN_COUNT (vi)++;
1231 }
1232
1233 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1234 VINSN_TYPE (VI). */
1235 static vinsn_t
1236 vinsn_create (insn_t insn, bool force_unique_p)
1237 {
1238 vinsn_t vi = XCNEW (struct vinsn_def);
1239
1240 vinsn_init (vi, insn, force_unique_p);
1241 return vi;
1242 }
1243
1244 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1245 the copy. */
1246 vinsn_t
1247 vinsn_copy (vinsn_t vi, bool reattach_p)
1248 {
1249 rtx copy;
1250 bool unique = VINSN_UNIQUE_P (vi);
1251 vinsn_t new_vi;
1252
1253 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1254 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1255 if (reattach_p)
1256 {
1257 vinsn_detach (vi);
1258 vinsn_attach (new_vi);
1259 }
1260
1261 return new_vi;
1262 }
1263
1264 /* Delete the VI vinsn and free its data. */
1265 static void
1266 vinsn_delete (vinsn_t vi)
1267 {
1268 gcc_assert (VINSN_COUNT (vi) == 0);
1269
1270 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1271 {
1272 return_regset_to_pool (VINSN_REG_SETS (vi));
1273 return_regset_to_pool (VINSN_REG_USES (vi));
1274 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1275 }
1276
1277 free (vi);
1278 }
1279
1280 /* Indicate that VI is no longer a part of some rtx object.
1281 Remove VI if it is no longer needed. */
1282 void
1283 vinsn_detach (vinsn_t vi)
1284 {
1285 gcc_assert (VINSN_COUNT (vi) > 0);
1286
1287 if (--VINSN_COUNT (vi) == 0)
1288 vinsn_delete (vi);
1289 }
1290
1291 /* Returns TRUE if VI is a branch. */
1292 bool
1293 vinsn_cond_branch_p (vinsn_t vi)
1294 {
1295 insn_t insn;
1296
1297 if (!VINSN_UNIQUE_P (vi))
1298 return false;
1299
1300 insn = VINSN_INSN_RTX (vi);
1301 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1302 return false;
1303
1304 return control_flow_insn_p (insn);
1305 }
1306
1307 /* Return latency of INSN. */
1308 static int
1309 sel_insn_rtx_cost (rtx insn)
1310 {
1311 int cost;
1312
1313 /* A USE insn, or something else we don't need to
1314 understand. We can't pass these directly to
1315 result_ready_cost or insn_default_latency because it will
1316 trigger a fatal error for unrecognizable insns. */
1317 if (recog_memoized (insn) < 0)
1318 cost = 0;
1319 else
1320 {
1321 cost = insn_default_latency (insn);
1322
1323 if (cost < 0)
1324 cost = 0;
1325 }
1326
1327 return cost;
1328 }
1329
1330 /* Return the cost of the VI.
1331 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1332 int
1333 sel_vinsn_cost (vinsn_t vi)
1334 {
1335 int cost = vi->cost;
1336
1337 if (cost < 0)
1338 {
1339 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1340 vi->cost = cost;
1341 }
1342
1343 return cost;
1344 }
1345 \f
1346
1347 /* Functions for insn emitting. */
1348
1349 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1350 EXPR and SEQNO. */
1351 insn_t
1352 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1353 {
1354 insn_t new_insn;
1355
1356 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1357
1358 new_insn = emit_insn_after (pattern, after);
1359 set_insn_init (expr, NULL, seqno);
1360 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1361
1362 return new_insn;
1363 }
1364
1365 /* Force newly generated vinsns to be unique. */
1366 static bool init_insn_force_unique_p = false;
1367
1368 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1369 initialize its data from EXPR and SEQNO. */
1370 insn_t
1371 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1372 insn_t after)
1373 {
1374 insn_t insn;
1375
1376 gcc_assert (!init_insn_force_unique_p);
1377
1378 init_insn_force_unique_p = true;
1379 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1380 CANT_MOVE (insn) = 1;
1381 init_insn_force_unique_p = false;
1382
1383 return insn;
1384 }
1385
1386 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1387 take it as a new vinsn instead of EXPR's vinsn.
1388 We simplify insns later, after scheduling region in
1389 simplify_changed_insns. */
1390 insn_t
1391 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1392 insn_t after)
1393 {
1394 expr_t emit_expr;
1395 insn_t insn;
1396 int flags;
1397
1398 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1399 seqno);
1400 insn = EXPR_INSN_RTX (emit_expr);
1401
1402 /* The insn may come from the transformation cache, which may hold already
1403 deleted insns, so mark it as not deleted. */
1404 INSN_DELETED_P (insn) = 0;
1405
1406 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1407
1408 flags = INSN_INIT_TODO_SSID;
1409 if (INSN_LUID (insn) == 0)
1410 flags |= INSN_INIT_TODO_LUID;
1411 sel_init_new_insn (insn, flags);
1412
1413 return insn;
1414 }
1415
1416 /* Move insn from EXPR after AFTER. */
1417 insn_t
1418 sel_move_insn (expr_t expr, int seqno, insn_t after)
1419 {
1420 insn_t insn = EXPR_INSN_RTX (expr);
1421 basic_block bb = BLOCK_FOR_INSN (after);
1422 insn_t next = NEXT_INSN (after);
1423
1424 /* Assert that in move_op we disconnected this insn properly. */
1425 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1426 PREV_INSN (insn) = after;
1427 NEXT_INSN (insn) = next;
1428
1429 NEXT_INSN (after) = insn;
1430 PREV_INSN (next) = insn;
1431
1432 /* Update links from insn to bb and vice versa. */
1433 df_insn_change_bb (insn, bb);
1434 if (BB_END (bb) == after)
1435 BB_END (bb) = insn;
1436
1437 prepare_insn_expr (insn, seqno);
1438 return insn;
1439 }
1440
1441 \f
1442 /* Functions to work with right-hand sides. */
1443
1444 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1445 VECT and return true when found. Use NEW_VINSN for comparison only when
1446 COMPARE_VINSNS is true. Write to INDP the index on which
1447 the search has stopped, such that inserting the new element at INDP will
1448 retain VECT's sort order. */
1449 static bool
1450 find_in_history_vect_1 (vec<expr_history_def> vect,
1451 unsigned uid, vinsn_t new_vinsn,
1452 bool compare_vinsns, int *indp)
1453 {
1454 expr_history_def *arr;
1455 int i, j, len = vect.length ();
1456
1457 if (len == 0)
1458 {
1459 *indp = 0;
1460 return false;
1461 }
1462
1463 arr = vect.address ();
1464 i = 0, j = len - 1;
1465
1466 while (i <= j)
1467 {
1468 unsigned auid = arr[i].uid;
1469 vinsn_t avinsn = arr[i].new_expr_vinsn;
1470
1471 if (auid == uid
1472 /* When undoing transformation on a bookkeeping copy, the new vinsn
1473 may not be exactly equal to the one that is saved in the vector.
1474 This is because the insn whose copy we're checking was possibly
1475 substituted itself. */
1476 && (! compare_vinsns
1477 || vinsn_equal_p (avinsn, new_vinsn)))
1478 {
1479 *indp = i;
1480 return true;
1481 }
1482 else if (auid > uid)
1483 break;
1484 i++;
1485 }
1486
1487 *indp = i;
1488 return false;
1489 }
1490
1491 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1492 the position found or -1, if no such value is in vector.
1493 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1494 int
1495 find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1496 vinsn_t new_vinsn, bool originators_p)
1497 {
1498 int ind;
1499
1500 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1501 false, &ind))
1502 return ind;
1503
1504 if (INSN_ORIGINATORS (insn) && originators_p)
1505 {
1506 unsigned uid;
1507 bitmap_iterator bi;
1508
1509 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1510 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1511 return ind;
1512 }
1513
1514 return -1;
1515 }
1516
1517 /* Insert new element in a sorted history vector pointed to by PVECT,
1518 if it is not there already. The element is searched using
1519 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1520 the history of a transformation. */
1521 void
1522 insert_in_history_vect (vec<expr_history_def> *pvect,
1523 unsigned uid, enum local_trans_type type,
1524 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1525 ds_t spec_ds)
1526 {
1527 vec<expr_history_def> vect = *pvect;
1528 expr_history_def temp;
1529 bool res;
1530 int ind;
1531
1532 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1533
1534 if (res)
1535 {
1536 expr_history_def *phist = &vect[ind];
1537
1538 /* It is possible that speculation types of expressions that were
1539 propagated through different paths will be different here. In this
1540 case, merge the status to get the correct check later. */
1541 if (phist->spec_ds != spec_ds)
1542 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1543 return;
1544 }
1545
1546 temp.uid = uid;
1547 temp.old_expr_vinsn = old_expr_vinsn;
1548 temp.new_expr_vinsn = new_expr_vinsn;
1549 temp.spec_ds = spec_ds;
1550 temp.type = type;
1551
1552 vinsn_attach (old_expr_vinsn);
1553 vinsn_attach (new_expr_vinsn);
1554 vect.safe_insert (ind, temp);
1555 *pvect = vect;
1556 }
1557
1558 /* Free history vector PVECT. */
1559 static void
1560 free_history_vect (vec<expr_history_def> &pvect)
1561 {
1562 unsigned i;
1563 expr_history_def *phist;
1564
1565 if (! pvect.exists ())
1566 return;
1567
1568 for (i = 0; pvect.iterate (i, &phist); i++)
1569 {
1570 vinsn_detach (phist->old_expr_vinsn);
1571 vinsn_detach (phist->new_expr_vinsn);
1572 }
1573
1574 pvect.release ();
1575 }
1576
1577 /* Merge vector FROM to PVECT. */
1578 static void
1579 merge_history_vect (vec<expr_history_def> *pvect,
1580 vec<expr_history_def> from)
1581 {
1582 expr_history_def *phist;
1583 int i;
1584
1585 /* We keep this vector sorted. */
1586 for (i = 0; from.iterate (i, &phist); i++)
1587 insert_in_history_vect (pvect, phist->uid, phist->type,
1588 phist->old_expr_vinsn, phist->new_expr_vinsn,
1589 phist->spec_ds);
1590 }
1591
1592 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1593 bool
1594 vinsn_equal_p (vinsn_t x, vinsn_t y)
1595 {
1596 rtx_equal_p_callback_function repcf;
1597
1598 if (x == y)
1599 return true;
1600
1601 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1602 return false;
1603
1604 if (VINSN_HASH (x) != VINSN_HASH (y))
1605 return false;
1606
1607 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1608 if (VINSN_SEPARABLE_P (x))
1609 {
1610 /* Compare RHSes of VINSNs. */
1611 gcc_assert (VINSN_RHS (x));
1612 gcc_assert (VINSN_RHS (y));
1613
1614 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1615 }
1616
1617 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1618 }
1619 \f
1620
1621 /* Functions for working with expressions. */
1622
1623 /* Initialize EXPR. */
1624 static void
1625 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1626 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1627 ds_t spec_to_check_ds, int orig_sched_cycle,
1628 vec<expr_history_def> history,
1629 signed char target_available,
1630 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1631 bool cant_move)
1632 {
1633 vinsn_attach (vi);
1634
1635 EXPR_VINSN (expr) = vi;
1636 EXPR_SPEC (expr) = spec;
1637 EXPR_USEFULNESS (expr) = use;
1638 EXPR_PRIORITY (expr) = priority;
1639 EXPR_PRIORITY_ADJ (expr) = 0;
1640 EXPR_SCHED_TIMES (expr) = sched_times;
1641 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1642 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1643 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1644 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1645
1646 if (history.exists ())
1647 EXPR_HISTORY_OF_CHANGES (expr) = history;
1648 else
1649 EXPR_HISTORY_OF_CHANGES (expr).create (0);
1650
1651 EXPR_TARGET_AVAILABLE (expr) = target_available;
1652 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1653 EXPR_WAS_RENAMED (expr) = was_renamed;
1654 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1655 EXPR_CANT_MOVE (expr) = cant_move;
1656 }
1657
1658 /* Make a copy of the expr FROM into the expr TO. */
1659 void
1660 copy_expr (expr_t to, expr_t from)
1661 {
1662 vec<expr_history_def> temp = vNULL;
1663
1664 if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1665 {
1666 unsigned i;
1667 expr_history_def *phist;
1668
1669 temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1670 for (i = 0;
1671 temp.iterate (i, &phist);
1672 i++)
1673 {
1674 vinsn_attach (phist->old_expr_vinsn);
1675 vinsn_attach (phist->new_expr_vinsn);
1676 }
1677 }
1678
1679 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1680 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1681 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1682 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1683 EXPR_ORIG_SCHED_CYCLE (from), temp,
1684 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1685 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1686 EXPR_CANT_MOVE (from));
1687 }
1688
1689 /* Same, but the final expr will not ever be in av sets, so don't copy
1690 "uninteresting" data such as bitmap cache. */
1691 void
1692 copy_expr_onside (expr_t to, expr_t from)
1693 {
1694 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1695 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1696 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1697 vNULL,
1698 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1699 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1700 EXPR_CANT_MOVE (from));
1701 }
1702
1703 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1704 initializing new insns. */
1705 static void
1706 prepare_insn_expr (insn_t insn, int seqno)
1707 {
1708 expr_t expr = INSN_EXPR (insn);
1709 ds_t ds;
1710
1711 INSN_SEQNO (insn) = seqno;
1712 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1713 EXPR_SPEC (expr) = 0;
1714 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1715 EXPR_WAS_SUBSTITUTED (expr) = 0;
1716 EXPR_WAS_RENAMED (expr) = 0;
1717 EXPR_TARGET_AVAILABLE (expr) = 1;
1718 INSN_LIVE_VALID_P (insn) = false;
1719
1720 /* ??? If this expression is speculative, make its dependence
1721 as weak as possible. We can filter this expression later
1722 in process_spec_exprs, because we do not distinguish
1723 between the status we got during compute_av_set and the
1724 existing status. To be fixed. */
1725 ds = EXPR_SPEC_DONE_DS (expr);
1726 if (ds)
1727 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1728
1729 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1730 }
1731
1732 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1733 is non-null when expressions are merged from different successors at
1734 a split point. */
1735 static void
1736 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1737 {
1738 if (EXPR_TARGET_AVAILABLE (to) < 0
1739 || EXPR_TARGET_AVAILABLE (from) < 0)
1740 EXPR_TARGET_AVAILABLE (to) = -1;
1741 else
1742 {
1743 /* We try to detect the case when one of the expressions
1744 can only be reached through another one. In this case,
1745 we can do better. */
1746 if (split_point == NULL)
1747 {
1748 int toind, fromind;
1749
1750 toind = EXPR_ORIG_BB_INDEX (to);
1751 fromind = EXPR_ORIG_BB_INDEX (from);
1752
1753 if (toind && toind == fromind)
1754 /* Do nothing -- everything is done in
1755 merge_with_other_exprs. */
1756 ;
1757 else
1758 EXPR_TARGET_AVAILABLE (to) = -1;
1759 }
1760 else if (EXPR_TARGET_AVAILABLE (from) == 0
1761 && EXPR_LHS (from)
1762 && REG_P (EXPR_LHS (from))
1763 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1764 EXPR_TARGET_AVAILABLE (to) = -1;
1765 else
1766 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1767 }
1768 }
1769
1770 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1771 is non-null when expressions are merged from different successors at
1772 a split point. */
1773 static void
1774 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1775 {
1776 ds_t old_to_ds, old_from_ds;
1777
1778 old_to_ds = EXPR_SPEC_DONE_DS (to);
1779 old_from_ds = EXPR_SPEC_DONE_DS (from);
1780
1781 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1782 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1783 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1784
1785 /* When merging e.g. control & data speculative exprs, or a control
1786 speculative with a control&data speculative one, we really have
1787 to change vinsn too. Also, when speculative status is changed,
1788 we also need to record this as a transformation in expr's history. */
1789 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1790 {
1791 old_to_ds = ds_get_speculation_types (old_to_ds);
1792 old_from_ds = ds_get_speculation_types (old_from_ds);
1793
1794 if (old_to_ds != old_from_ds)
1795 {
1796 ds_t record_ds;
1797
1798 /* When both expressions are speculative, we need to change
1799 the vinsn first. */
1800 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1801 {
1802 int res;
1803
1804 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1805 gcc_assert (res >= 0);
1806 }
1807
1808 if (split_point != NULL)
1809 {
1810 /* Record the change with proper status. */
1811 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1812 record_ds &= ~(old_to_ds & SPECULATIVE);
1813 record_ds &= ~(old_from_ds & SPECULATIVE);
1814
1815 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1816 INSN_UID (split_point), TRANS_SPECULATION,
1817 EXPR_VINSN (from), EXPR_VINSN (to),
1818 record_ds);
1819 }
1820 }
1821 }
1822 }
1823
1824
1825 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1826 this is done along different paths. */
1827 void
1828 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1829 {
1830 /* Choose the maximum of the specs of merged exprs. This is required
1831 for correctness of bookkeeping. */
1832 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1833 EXPR_SPEC (to) = EXPR_SPEC (from);
1834
1835 if (split_point)
1836 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1837 else
1838 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1839 EXPR_USEFULNESS (from));
1840
1841 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1842 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1843
1844 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1845 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1846
1847 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1848 EXPR_ORIG_BB_INDEX (to) = 0;
1849
1850 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1851 EXPR_ORIG_SCHED_CYCLE (from));
1852
1853 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1854 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1855 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1856
1857 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1858 EXPR_HISTORY_OF_CHANGES (from));
1859 update_target_availability (to, from, split_point);
1860 update_speculative_bits (to, from, split_point);
1861 }
1862
1863 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1864 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1865 are merged from different successors at a split point. */
1866 void
1867 merge_expr (expr_t to, expr_t from, insn_t split_point)
1868 {
1869 vinsn_t to_vi = EXPR_VINSN (to);
1870 vinsn_t from_vi = EXPR_VINSN (from);
1871
1872 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1873
1874 /* Make sure that speculative pattern is propagated into exprs that
1875 have non-speculative one. This will provide us with consistent
1876 speculative bits and speculative patterns inside expr. */
1877 if ((EXPR_SPEC_DONE_DS (from) != 0
1878 && EXPR_SPEC_DONE_DS (to) == 0)
1879 /* Do likewise for volatile insns, so that we always retain
1880 the may_trap_p bit on the resulting expression. */
1881 || (VINSN_MAY_TRAP_P (EXPR_VINSN (from))
1882 && !VINSN_MAY_TRAP_P (EXPR_VINSN (to))))
1883 change_vinsn_in_expr (to, EXPR_VINSN (from));
1884
1885 merge_expr_data (to, from, split_point);
1886 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1887 }
1888
1889 /* Clear the information of this EXPR. */
1890 void
1891 clear_expr (expr_t expr)
1892 {
1893
1894 vinsn_detach (EXPR_VINSN (expr));
1895 EXPR_VINSN (expr) = NULL;
1896
1897 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1898 }
1899
1900 /* For a given LV_SET, mark EXPR having unavailable target register. */
1901 static void
1902 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1903 {
1904 if (EXPR_SEPARABLE_P (expr))
1905 {
1906 if (REG_P (EXPR_LHS (expr))
1907 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1908 {
1909 /* If it's an insn like r1 = use (r1, ...), and it exists in
1910 different forms in each of the av_sets being merged, we can't say
1911 whether original destination register is available or not.
1912 However, this still works if destination register is not used
1913 in the original expression: if the branch at which LV_SET we're
1914 looking here is not actually 'other branch' in sense that same
1915 expression is available through it (but it can't be determined
1916 at computation stage because of transformations on one of the
1917 branches), it still won't affect the availability.
1918 Liveness of a register somewhere on a code motion path means
1919 it's either read somewhere on a codemotion path, live on
1920 'other' branch, live at the point immediately following
1921 the original operation, or is read by the original operation.
1922 The latter case is filtered out in the condition below.
1923 It still doesn't cover the case when register is defined and used
1924 somewhere within the code motion path, and in this case we could
1925 miss a unifying code motion along both branches using a renamed
1926 register, but it won't affect a code correctness since upon
1927 an actual code motion a bookkeeping code would be generated. */
1928 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1929 EXPR_LHS (expr)))
1930 EXPR_TARGET_AVAILABLE (expr) = -1;
1931 else
1932 EXPR_TARGET_AVAILABLE (expr) = false;
1933 }
1934 }
1935 else
1936 {
1937 unsigned regno;
1938 reg_set_iterator rsi;
1939
1940 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1941 0, regno, rsi)
1942 if (bitmap_bit_p (lv_set, regno))
1943 {
1944 EXPR_TARGET_AVAILABLE (expr) = false;
1945 break;
1946 }
1947
1948 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1949 0, regno, rsi)
1950 if (bitmap_bit_p (lv_set, regno))
1951 {
1952 EXPR_TARGET_AVAILABLE (expr) = false;
1953 break;
1954 }
1955 }
1956 }
1957
1958 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1959 or dependence status have changed, 2 when also the target register
1960 became unavailable, 0 if nothing had to be changed. */
1961 int
1962 speculate_expr (expr_t expr, ds_t ds)
1963 {
1964 int res;
1965 rtx orig_insn_rtx;
1966 rtx spec_pat;
1967 ds_t target_ds, current_ds;
1968
1969 /* Obtain the status we need to put on EXPR. */
1970 target_ds = (ds & SPECULATIVE);
1971 current_ds = EXPR_SPEC_DONE_DS (expr);
1972 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1973
1974 orig_insn_rtx = EXPR_INSN_RTX (expr);
1975
1976 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1977
1978 switch (res)
1979 {
1980 case 0:
1981 EXPR_SPEC_DONE_DS (expr) = ds;
1982 return current_ds != ds ? 1 : 0;
1983
1984 case 1:
1985 {
1986 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1987 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1988
1989 change_vinsn_in_expr (expr, spec_vinsn);
1990 EXPR_SPEC_DONE_DS (expr) = ds;
1991 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1992
1993 /* Do not allow clobbering the address register of speculative
1994 insns. */
1995 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1996 expr_dest_reg (expr)))
1997 {
1998 EXPR_TARGET_AVAILABLE (expr) = false;
1999 return 2;
2000 }
2001
2002 return 1;
2003 }
2004
2005 case -1:
2006 return -1;
2007
2008 default:
2009 gcc_unreachable ();
2010 return -1;
2011 }
2012 }
2013
2014 /* Return a destination register, if any, of EXPR. */
2015 rtx
2016 expr_dest_reg (expr_t expr)
2017 {
2018 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2019
2020 if (dest != NULL_RTX && REG_P (dest))
2021 return dest;
2022
2023 return NULL_RTX;
2024 }
2025
2026 /* Returns the REGNO of the R's destination. */
2027 unsigned
2028 expr_dest_regno (expr_t expr)
2029 {
2030 rtx dest = expr_dest_reg (expr);
2031
2032 gcc_assert (dest != NULL_RTX);
2033 return REGNO (dest);
2034 }
2035
2036 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2037 AV_SET having unavailable target register. */
2038 void
2039 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2040 {
2041 expr_t expr;
2042 av_set_iterator avi;
2043
2044 FOR_EACH_EXPR (expr, avi, join_set)
2045 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2046 set_unavailable_target_for_expr (expr, lv_set);
2047 }
2048 \f
2049
2050 /* Returns true if REG (at least partially) is present in REGS. */
2051 bool
2052 register_unavailable_p (regset regs, rtx reg)
2053 {
2054 unsigned regno, end_regno;
2055
2056 regno = REGNO (reg);
2057 if (bitmap_bit_p (regs, regno))
2058 return true;
2059
2060 end_regno = END_REGNO (reg);
2061
2062 while (++regno < end_regno)
2063 if (bitmap_bit_p (regs, regno))
2064 return true;
2065
2066 return false;
2067 }
2068
2069 /* Av set functions. */
2070
2071 /* Add a new element to av set SETP.
2072 Return the element added. */
2073 static av_set_t
2074 av_set_add_element (av_set_t *setp)
2075 {
2076 /* Insert at the beginning of the list. */
2077 _list_add (setp);
2078 return *setp;
2079 }
2080
2081 /* Add EXPR to SETP. */
2082 void
2083 av_set_add (av_set_t *setp, expr_t expr)
2084 {
2085 av_set_t elem;
2086
2087 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2088 elem = av_set_add_element (setp);
2089 copy_expr (_AV_SET_EXPR (elem), expr);
2090 }
2091
2092 /* Same, but do not copy EXPR. */
2093 static void
2094 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2095 {
2096 av_set_t elem;
2097
2098 elem = av_set_add_element (setp);
2099 *_AV_SET_EXPR (elem) = *expr;
2100 }
2101
2102 /* Remove expr pointed to by IP from the av_set. */
2103 void
2104 av_set_iter_remove (av_set_iterator *ip)
2105 {
2106 clear_expr (_AV_SET_EXPR (*ip->lp));
2107 _list_iter_remove (ip);
2108 }
2109
2110 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2111 sense of vinsn_equal_p function. Return NULL if no such expr is
2112 in SET was found. */
2113 expr_t
2114 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2115 {
2116 expr_t expr;
2117 av_set_iterator i;
2118
2119 FOR_EACH_EXPR (expr, i, set)
2120 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2121 return expr;
2122 return NULL;
2123 }
2124
2125 /* Same, but also remove the EXPR found. */
2126 static expr_t
2127 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2128 {
2129 expr_t expr;
2130 av_set_iterator i;
2131
2132 FOR_EACH_EXPR_1 (expr, i, setp)
2133 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2134 {
2135 _list_iter_remove_nofree (&i);
2136 return expr;
2137 }
2138 return NULL;
2139 }
2140
2141 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2142 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2143 Returns NULL if no such expr is in SET was found. */
2144 static expr_t
2145 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2146 {
2147 expr_t cur_expr;
2148 av_set_iterator i;
2149
2150 FOR_EACH_EXPR (cur_expr, i, set)
2151 {
2152 if (cur_expr == expr)
2153 continue;
2154 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2155 return cur_expr;
2156 }
2157
2158 return NULL;
2159 }
2160
2161 /* If other expression is already in AVP, remove one of them. */
2162 expr_t
2163 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2164 {
2165 expr_t expr2;
2166
2167 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2168 if (expr2 != NULL)
2169 {
2170 /* Reset target availability on merge, since taking it only from one
2171 of the exprs would be controversial for different code. */
2172 EXPR_TARGET_AVAILABLE (expr2) = -1;
2173 EXPR_USEFULNESS (expr2) = 0;
2174
2175 merge_expr (expr2, expr, NULL);
2176
2177 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2178 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2179
2180 av_set_iter_remove (ip);
2181 return expr2;
2182 }
2183
2184 return expr;
2185 }
2186
2187 /* Return true if there is an expr that correlates to VI in SET. */
2188 bool
2189 av_set_is_in_p (av_set_t set, vinsn_t vi)
2190 {
2191 return av_set_lookup (set, vi) != NULL;
2192 }
2193
2194 /* Return a copy of SET. */
2195 av_set_t
2196 av_set_copy (av_set_t set)
2197 {
2198 expr_t expr;
2199 av_set_iterator i;
2200 av_set_t res = NULL;
2201
2202 FOR_EACH_EXPR (expr, i, set)
2203 av_set_add (&res, expr);
2204
2205 return res;
2206 }
2207
2208 /* Join two av sets that do not have common elements by attaching second set
2209 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2210 _AV_SET_NEXT of first set's last element). */
2211 static void
2212 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2213 {
2214 gcc_assert (*to_tailp == NULL);
2215 *to_tailp = *fromp;
2216 *fromp = NULL;
2217 }
2218
2219 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2220 pointed to by FROMP afterwards. */
2221 void
2222 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2223 {
2224 expr_t expr1;
2225 av_set_iterator i;
2226
2227 /* Delete from TOP all exprs, that present in FROMP. */
2228 FOR_EACH_EXPR_1 (expr1, i, top)
2229 {
2230 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2231
2232 if (expr2)
2233 {
2234 merge_expr (expr2, expr1, insn);
2235 av_set_iter_remove (&i);
2236 }
2237 }
2238
2239 join_distinct_sets (i.lp, fromp);
2240 }
2241
2242 /* Same as above, but also update availability of target register in
2243 TOP judging by TO_LV_SET and FROM_LV_SET. */
2244 void
2245 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2246 regset from_lv_set, insn_t insn)
2247 {
2248 expr_t expr1;
2249 av_set_iterator i;
2250 av_set_t *to_tailp, in_both_set = NULL;
2251
2252 /* Delete from TOP all expres, that present in FROMP. */
2253 FOR_EACH_EXPR_1 (expr1, i, top)
2254 {
2255 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2256
2257 if (expr2)
2258 {
2259 /* It may be that the expressions have different destination
2260 registers, in which case we need to check liveness here. */
2261 if (EXPR_SEPARABLE_P (expr1))
2262 {
2263 int regno1 = (REG_P (EXPR_LHS (expr1))
2264 ? (int) expr_dest_regno (expr1) : -1);
2265 int regno2 = (REG_P (EXPR_LHS (expr2))
2266 ? (int) expr_dest_regno (expr2) : -1);
2267
2268 /* ??? We don't have a way to check restrictions for
2269 *other* register on the current path, we did it only
2270 for the current target register. Give up. */
2271 if (regno1 != regno2)
2272 EXPR_TARGET_AVAILABLE (expr2) = -1;
2273 }
2274 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2275 EXPR_TARGET_AVAILABLE (expr2) = -1;
2276
2277 merge_expr (expr2, expr1, insn);
2278 av_set_add_nocopy (&in_both_set, expr2);
2279 av_set_iter_remove (&i);
2280 }
2281 else
2282 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2283 FROM_LV_SET. */
2284 set_unavailable_target_for_expr (expr1, from_lv_set);
2285 }
2286 to_tailp = i.lp;
2287
2288 /* These expressions are not present in TOP. Check liveness
2289 restrictions on TO_LV_SET. */
2290 FOR_EACH_EXPR (expr1, i, *fromp)
2291 set_unavailable_target_for_expr (expr1, to_lv_set);
2292
2293 join_distinct_sets (i.lp, &in_both_set);
2294 join_distinct_sets (to_tailp, fromp);
2295 }
2296
2297 /* Clear av_set pointed to by SETP. */
2298 void
2299 av_set_clear (av_set_t *setp)
2300 {
2301 expr_t expr;
2302 av_set_iterator i;
2303
2304 FOR_EACH_EXPR_1 (expr, i, setp)
2305 av_set_iter_remove (&i);
2306
2307 gcc_assert (*setp == NULL);
2308 }
2309
2310 /* Leave only one non-speculative element in the SETP. */
2311 void
2312 av_set_leave_one_nonspec (av_set_t *setp)
2313 {
2314 expr_t expr;
2315 av_set_iterator i;
2316 bool has_one_nonspec = false;
2317
2318 /* Keep all speculative exprs, and leave one non-speculative
2319 (the first one). */
2320 FOR_EACH_EXPR_1 (expr, i, setp)
2321 {
2322 if (!EXPR_SPEC_DONE_DS (expr))
2323 {
2324 if (has_one_nonspec)
2325 av_set_iter_remove (&i);
2326 else
2327 has_one_nonspec = true;
2328 }
2329 }
2330 }
2331
2332 /* Return the N'th element of the SET. */
2333 expr_t
2334 av_set_element (av_set_t set, int n)
2335 {
2336 expr_t expr;
2337 av_set_iterator i;
2338
2339 FOR_EACH_EXPR (expr, i, set)
2340 if (n-- == 0)
2341 return expr;
2342
2343 gcc_unreachable ();
2344 return NULL;
2345 }
2346
2347 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2348 void
2349 av_set_substract_cond_branches (av_set_t *avp)
2350 {
2351 av_set_iterator i;
2352 expr_t expr;
2353
2354 FOR_EACH_EXPR_1 (expr, i, avp)
2355 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2356 av_set_iter_remove (&i);
2357 }
2358
2359 /* Multiplies usefulness attribute of each member of av-set *AVP by
2360 value PROB / ALL_PROB. */
2361 void
2362 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2363 {
2364 av_set_iterator i;
2365 expr_t expr;
2366
2367 FOR_EACH_EXPR (expr, i, av)
2368 EXPR_USEFULNESS (expr) = (all_prob
2369 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2370 : 0);
2371 }
2372
2373 /* Leave in AVP only those expressions, which are present in AV,
2374 and return it, merging history expressions. */
2375 void
2376 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2377 {
2378 av_set_iterator i;
2379 expr_t expr, expr2;
2380
2381 FOR_EACH_EXPR_1 (expr, i, avp)
2382 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2383 av_set_iter_remove (&i);
2384 else
2385 /* When updating av sets in bookkeeping blocks, we can add more insns
2386 there which will be transformed but the upper av sets will not
2387 reflect those transformations. We then fail to undo those
2388 when searching for such insns. So merge the history saved
2389 in the av set of the block we are processing. */
2390 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2391 EXPR_HISTORY_OF_CHANGES (expr2));
2392 }
2393
2394 \f
2395
2396 /* Dependence hooks to initialize insn data. */
2397
2398 /* This is used in hooks callable from dependence analysis when initializing
2399 instruction's data. */
2400 static struct
2401 {
2402 /* Where the dependence was found (lhs/rhs). */
2403 deps_where_t where;
2404
2405 /* The actual data object to initialize. */
2406 idata_t id;
2407
2408 /* True when the insn should not be made clonable. */
2409 bool force_unique_p;
2410
2411 /* True when insn should be treated as of type USE, i.e. never renamed. */
2412 bool force_use_p;
2413 } deps_init_id_data;
2414
2415
2416 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2417 clonable. */
2418 static void
2419 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2420 {
2421 int type;
2422
2423 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2424 That clonable insns which can be separated into lhs and rhs have type SET.
2425 Other clonable insns have type USE. */
2426 type = GET_CODE (insn);
2427
2428 /* Only regular insns could be cloned. */
2429 if (type == INSN && !force_unique_p)
2430 type = SET;
2431 else if (type == JUMP_INSN && simplejump_p (insn))
2432 type = PC;
2433 else if (type == DEBUG_INSN)
2434 type = !force_unique_p ? USE : INSN;
2435
2436 IDATA_TYPE (id) = type;
2437 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2438 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2439 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2440 }
2441
2442 /* Start initializing insn data. */
2443 static void
2444 deps_init_id_start_insn (insn_t insn)
2445 {
2446 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2447
2448 setup_id_for_insn (deps_init_id_data.id, insn,
2449 deps_init_id_data.force_unique_p);
2450 deps_init_id_data.where = DEPS_IN_INSN;
2451 }
2452
2453 /* Start initializing lhs data. */
2454 static void
2455 deps_init_id_start_lhs (rtx lhs)
2456 {
2457 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2458 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2459
2460 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2461 {
2462 IDATA_LHS (deps_init_id_data.id) = lhs;
2463 deps_init_id_data.where = DEPS_IN_LHS;
2464 }
2465 }
2466
2467 /* Finish initializing lhs data. */
2468 static void
2469 deps_init_id_finish_lhs (void)
2470 {
2471 deps_init_id_data.where = DEPS_IN_INSN;
2472 }
2473
2474 /* Note a set of REGNO. */
2475 static void
2476 deps_init_id_note_reg_set (int regno)
2477 {
2478 haifa_note_reg_set (regno);
2479
2480 if (deps_init_id_data.where == DEPS_IN_RHS)
2481 deps_init_id_data.force_use_p = true;
2482
2483 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2484 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2485
2486 #ifdef STACK_REGS
2487 /* Make instructions that set stack registers to be ineligible for
2488 renaming to avoid issues with find_used_regs. */
2489 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2490 deps_init_id_data.force_use_p = true;
2491 #endif
2492 }
2493
2494 /* Note a clobber of REGNO. */
2495 static void
2496 deps_init_id_note_reg_clobber (int regno)
2497 {
2498 haifa_note_reg_clobber (regno);
2499
2500 if (deps_init_id_data.where == DEPS_IN_RHS)
2501 deps_init_id_data.force_use_p = true;
2502
2503 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2504 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2505 }
2506
2507 /* Note a use of REGNO. */
2508 static void
2509 deps_init_id_note_reg_use (int regno)
2510 {
2511 haifa_note_reg_use (regno);
2512
2513 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2514 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2515 }
2516
2517 /* Start initializing rhs data. */
2518 static void
2519 deps_init_id_start_rhs (rtx rhs)
2520 {
2521 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2522
2523 /* And there was no sel_deps_reset_to_insn (). */
2524 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2525 {
2526 IDATA_RHS (deps_init_id_data.id) = rhs;
2527 deps_init_id_data.where = DEPS_IN_RHS;
2528 }
2529 }
2530
2531 /* Finish initializing rhs data. */
2532 static void
2533 deps_init_id_finish_rhs (void)
2534 {
2535 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2536 || deps_init_id_data.where == DEPS_IN_INSN);
2537 deps_init_id_data.where = DEPS_IN_INSN;
2538 }
2539
2540 /* Finish initializing insn data. */
2541 static void
2542 deps_init_id_finish_insn (void)
2543 {
2544 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2545
2546 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2547 {
2548 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2549 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2550
2551 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2552 || deps_init_id_data.force_use_p)
2553 {
2554 /* This should be a USE, as we don't want to schedule its RHS
2555 separately. However, we still want to have them recorded
2556 for the purposes of substitution. That's why we don't
2557 simply call downgrade_to_use () here. */
2558 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2559 gcc_assert (!lhs == !rhs);
2560
2561 IDATA_TYPE (deps_init_id_data.id) = USE;
2562 }
2563 }
2564
2565 deps_init_id_data.where = DEPS_IN_NOWHERE;
2566 }
2567
2568 /* This is dependence info used for initializing insn's data. */
2569 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2570
2571 /* This initializes most of the static part of the above structure. */
2572 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2573 {
2574 NULL,
2575
2576 deps_init_id_start_insn,
2577 deps_init_id_finish_insn,
2578 deps_init_id_start_lhs,
2579 deps_init_id_finish_lhs,
2580 deps_init_id_start_rhs,
2581 deps_init_id_finish_rhs,
2582 deps_init_id_note_reg_set,
2583 deps_init_id_note_reg_clobber,
2584 deps_init_id_note_reg_use,
2585 NULL, /* note_mem_dep */
2586 NULL, /* note_dep */
2587
2588 0, /* use_cselib */
2589 0, /* use_deps_list */
2590 0 /* generate_spec_deps */
2591 };
2592
2593 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2594 we don't actually need information about lhs and rhs. */
2595 static void
2596 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2597 {
2598 rtx pat = PATTERN (insn);
2599
2600 if (NONJUMP_INSN_P (insn)
2601 && GET_CODE (pat) == SET
2602 && !force_unique_p)
2603 {
2604 IDATA_RHS (id) = SET_SRC (pat);
2605 IDATA_LHS (id) = SET_DEST (pat);
2606 }
2607 else
2608 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2609 }
2610
2611 /* Possibly downgrade INSN to USE. */
2612 static void
2613 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2614 {
2615 bool must_be_use = false;
2616 unsigned uid = INSN_UID (insn);
2617 df_ref *rec;
2618 rtx lhs = IDATA_LHS (id);
2619 rtx rhs = IDATA_RHS (id);
2620
2621 /* We downgrade only SETs. */
2622 if (IDATA_TYPE (id) != SET)
2623 return;
2624
2625 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2626 {
2627 IDATA_TYPE (id) = USE;
2628 return;
2629 }
2630
2631 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2632 {
2633 df_ref def = *rec;
2634
2635 if (DF_REF_INSN (def)
2636 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2637 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2638 {
2639 must_be_use = true;
2640 break;
2641 }
2642
2643 #ifdef STACK_REGS
2644 /* Make instructions that set stack registers to be ineligible for
2645 renaming to avoid issues with find_used_regs. */
2646 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2647 {
2648 must_be_use = true;
2649 break;
2650 }
2651 #endif
2652 }
2653
2654 if (must_be_use)
2655 IDATA_TYPE (id) = USE;
2656 }
2657
2658 /* Setup register sets describing INSN in ID. */
2659 static void
2660 setup_id_reg_sets (idata_t id, insn_t insn)
2661 {
2662 unsigned uid = INSN_UID (insn);
2663 df_ref *rec;
2664 regset tmp = get_clear_regset_from_pool ();
2665
2666 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2667 {
2668 df_ref def = *rec;
2669 unsigned int regno = DF_REF_REGNO (def);
2670
2671 /* Post modifies are treated like clobbers by sched-deps.c. */
2672 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2673 | DF_REF_PRE_POST_MODIFY)))
2674 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2675 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2676 {
2677 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2678
2679 #ifdef STACK_REGS
2680 /* For stack registers, treat writes to them as writes
2681 to the first one to be consistent with sched-deps.c. */
2682 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2683 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2684 #endif
2685 }
2686 /* Mark special refs that generate read/write def pair. */
2687 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2688 || regno == STACK_POINTER_REGNUM)
2689 bitmap_set_bit (tmp, regno);
2690 }
2691
2692 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2693 {
2694 df_ref use = *rec;
2695 unsigned int regno = DF_REF_REGNO (use);
2696
2697 /* When these refs are met for the first time, skip them, as
2698 these uses are just counterparts of some defs. */
2699 if (bitmap_bit_p (tmp, regno))
2700 bitmap_clear_bit (tmp, regno);
2701 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2702 {
2703 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2704
2705 #ifdef STACK_REGS
2706 /* For stack registers, treat reads from them as reads from
2707 the first one to be consistent with sched-deps.c. */
2708 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2709 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2710 #endif
2711 }
2712 }
2713
2714 return_regset_to_pool (tmp);
2715 }
2716
2717 /* Initialize instruction data for INSN in ID using DF's data. */
2718 static void
2719 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2720 {
2721 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2722
2723 setup_id_for_insn (id, insn, force_unique_p);
2724 setup_id_lhs_rhs (id, insn, force_unique_p);
2725
2726 if (INSN_NOP_P (insn))
2727 return;
2728
2729 maybe_downgrade_id_to_use (id, insn);
2730 setup_id_reg_sets (id, insn);
2731 }
2732
2733 /* Initialize instruction data for INSN in ID. */
2734 static void
2735 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2736 {
2737 struct deps_desc _dc, *dc = &_dc;
2738
2739 deps_init_id_data.where = DEPS_IN_NOWHERE;
2740 deps_init_id_data.id = id;
2741 deps_init_id_data.force_unique_p = force_unique_p;
2742 deps_init_id_data.force_use_p = false;
2743
2744 init_deps (dc, false);
2745
2746 memcpy (&deps_init_id_sched_deps_info,
2747 &const_deps_init_id_sched_deps_info,
2748 sizeof (deps_init_id_sched_deps_info));
2749
2750 if (spec_info != NULL)
2751 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2752
2753 sched_deps_info = &deps_init_id_sched_deps_info;
2754
2755 deps_analyze_insn (dc, insn);
2756
2757 free_deps (dc);
2758
2759 deps_init_id_data.id = NULL;
2760 }
2761
2762 \f
2763 struct sched_scan_info_def
2764 {
2765 /* This hook notifies scheduler frontend to extend its internal per basic
2766 block data structures. This hook should be called once before a series of
2767 calls to bb_init (). */
2768 void (*extend_bb) (void);
2769
2770 /* This hook makes scheduler frontend to initialize its internal data
2771 structures for the passed basic block. */
2772 void (*init_bb) (basic_block);
2773
2774 /* This hook notifies scheduler frontend to extend its internal per insn data
2775 structures. This hook should be called once before a series of calls to
2776 insn_init (). */
2777 void (*extend_insn) (void);
2778
2779 /* This hook makes scheduler frontend to initialize its internal data
2780 structures for the passed insn. */
2781 void (*init_insn) (rtx);
2782 };
2783
2784 /* A driver function to add a set of basic blocks (BBS) to the
2785 scheduling region. */
2786 static void
2787 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2788 {
2789 unsigned i;
2790 basic_block bb;
2791
2792 if (ssi->extend_bb)
2793 ssi->extend_bb ();
2794
2795 if (ssi->init_bb)
2796 FOR_EACH_VEC_ELT (bbs, i, bb)
2797 ssi->init_bb (bb);
2798
2799 if (ssi->extend_insn)
2800 ssi->extend_insn ();
2801
2802 if (ssi->init_insn)
2803 FOR_EACH_VEC_ELT (bbs, i, bb)
2804 {
2805 rtx insn;
2806
2807 FOR_BB_INSNS (bb, insn)
2808 ssi->init_insn (insn);
2809 }
2810 }
2811
2812 /* Implement hooks for collecting fundamental insn properties like if insn is
2813 an ASM or is within a SCHED_GROUP. */
2814
2815 /* True when a "one-time init" data for INSN was already inited. */
2816 static bool
2817 first_time_insn_init (insn_t insn)
2818 {
2819 return INSN_LIVE (insn) == NULL;
2820 }
2821
2822 /* Hash an entry in a transformed_insns hashtable. */
2823 static hashval_t
2824 hash_transformed_insns (const void *p)
2825 {
2826 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2827 }
2828
2829 /* Compare the entries in a transformed_insns hashtable. */
2830 static int
2831 eq_transformed_insns (const void *p, const void *q)
2832 {
2833 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2834 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2835
2836 if (INSN_UID (i1) == INSN_UID (i2))
2837 return 1;
2838 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2839 }
2840
2841 /* Free an entry in a transformed_insns hashtable. */
2842 static void
2843 free_transformed_insns (void *p)
2844 {
2845 struct transformed_insns *pti = (struct transformed_insns *) p;
2846
2847 vinsn_detach (pti->vinsn_old);
2848 vinsn_detach (pti->vinsn_new);
2849 free (pti);
2850 }
2851
2852 /* Init the s_i_d data for INSN which should be inited just once, when
2853 we first see the insn. */
2854 static void
2855 init_first_time_insn_data (insn_t insn)
2856 {
2857 /* This should not be set if this is the first time we init data for
2858 insn. */
2859 gcc_assert (first_time_insn_init (insn));
2860
2861 /* These are needed for nops too. */
2862 INSN_LIVE (insn) = get_regset_from_pool ();
2863 INSN_LIVE_VALID_P (insn) = false;
2864
2865 if (!INSN_NOP_P (insn))
2866 {
2867 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2868 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2869 INSN_TRANSFORMED_INSNS (insn)
2870 = htab_create (16, hash_transformed_insns,
2871 eq_transformed_insns, free_transformed_insns);
2872 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2873 }
2874 }
2875
2876 /* Free almost all above data for INSN that is scheduled already.
2877 Used for extra-large basic blocks. */
2878 void
2879 free_data_for_scheduled_insn (insn_t insn)
2880 {
2881 gcc_assert (! first_time_insn_init (insn));
2882
2883 if (! INSN_ANALYZED_DEPS (insn))
2884 return;
2885
2886 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2887 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2888 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2889
2890 /* This is allocated only for bookkeeping insns. */
2891 if (INSN_ORIGINATORS (insn))
2892 BITMAP_FREE (INSN_ORIGINATORS (insn));
2893 free_deps (&INSN_DEPS_CONTEXT (insn));
2894
2895 INSN_ANALYZED_DEPS (insn) = NULL;
2896
2897 /* Clear the readonly flag so we would ICE when trying to recalculate
2898 the deps context (as we believe that it should not happen). */
2899 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2900 }
2901
2902 /* Free the same data as above for INSN. */
2903 static void
2904 free_first_time_insn_data (insn_t insn)
2905 {
2906 gcc_assert (! first_time_insn_init (insn));
2907
2908 free_data_for_scheduled_insn (insn);
2909 return_regset_to_pool (INSN_LIVE (insn));
2910 INSN_LIVE (insn) = NULL;
2911 INSN_LIVE_VALID_P (insn) = false;
2912 }
2913
2914 /* Initialize region-scope data structures for basic blocks. */
2915 static void
2916 init_global_and_expr_for_bb (basic_block bb)
2917 {
2918 if (sel_bb_empty_p (bb))
2919 return;
2920
2921 invalidate_av_set (bb);
2922 }
2923
2924 /* Data for global dependency analysis (to initialize CANT_MOVE and
2925 SCHED_GROUP_P). */
2926 static struct
2927 {
2928 /* Previous insn. */
2929 insn_t prev_insn;
2930 } init_global_data;
2931
2932 /* Determine if INSN is in the sched_group, is an asm or should not be
2933 cloned. After that initialize its expr. */
2934 static void
2935 init_global_and_expr_for_insn (insn_t insn)
2936 {
2937 if (LABEL_P (insn))
2938 return;
2939
2940 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2941 {
2942 init_global_data.prev_insn = NULL_RTX;
2943 return;
2944 }
2945
2946 gcc_assert (INSN_P (insn));
2947
2948 if (SCHED_GROUP_P (insn))
2949 /* Setup a sched_group. */
2950 {
2951 insn_t prev_insn = init_global_data.prev_insn;
2952
2953 if (prev_insn)
2954 INSN_SCHED_NEXT (prev_insn) = insn;
2955
2956 init_global_data.prev_insn = insn;
2957 }
2958 else
2959 init_global_data.prev_insn = NULL_RTX;
2960
2961 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2962 || asm_noperands (PATTERN (insn)) >= 0)
2963 /* Mark INSN as an asm. */
2964 INSN_ASM_P (insn) = true;
2965
2966 {
2967 bool force_unique_p;
2968 ds_t spec_done_ds;
2969
2970 /* Certain instructions cannot be cloned, and frame related insns and
2971 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2972 their block. */
2973 if (prologue_epilogue_contains (insn))
2974 {
2975 if (RTX_FRAME_RELATED_P (insn))
2976 CANT_MOVE (insn) = 1;
2977 else
2978 {
2979 rtx note;
2980 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2981 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2982 && ((enum insn_note) INTVAL (XEXP (note, 0))
2983 == NOTE_INSN_EPILOGUE_BEG))
2984 {
2985 CANT_MOVE (insn) = 1;
2986 break;
2987 }
2988 }
2989 force_unique_p = true;
2990 }
2991 else
2992 if (CANT_MOVE (insn)
2993 || INSN_ASM_P (insn)
2994 || SCHED_GROUP_P (insn)
2995 || CALL_P (insn)
2996 /* Exception handling insns are always unique. */
2997 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2998 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2999 || control_flow_insn_p (insn)
3000 || volatile_insn_p (PATTERN (insn))
3001 || (targetm.cannot_copy_insn_p
3002 && targetm.cannot_copy_insn_p (insn)))
3003 force_unique_p = true;
3004 else
3005 force_unique_p = false;
3006
3007 if (targetm.sched.get_insn_spec_ds)
3008 {
3009 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3010 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3011 }
3012 else
3013 spec_done_ds = 0;
3014
3015 /* Initialize INSN's expr. */
3016 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3017 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3018 spec_done_ds, 0, 0, vNULL, true,
3019 false, false, false, CANT_MOVE (insn));
3020 }
3021
3022 init_first_time_insn_data (insn);
3023 }
3024
3025 /* Scan the region and initialize instruction data for basic blocks BBS. */
3026 void
3027 sel_init_global_and_expr (bb_vec_t bbs)
3028 {
3029 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3030 const struct sched_scan_info_def ssi =
3031 {
3032 NULL, /* extend_bb */
3033 init_global_and_expr_for_bb, /* init_bb */
3034 extend_insn_data, /* extend_insn */
3035 init_global_and_expr_for_insn /* init_insn */
3036 };
3037
3038 sched_scan (&ssi, bbs);
3039 }
3040
3041 /* Finalize region-scope data structures for basic blocks. */
3042 static void
3043 finish_global_and_expr_for_bb (basic_block bb)
3044 {
3045 av_set_clear (&BB_AV_SET (bb));
3046 BB_AV_LEVEL (bb) = 0;
3047 }
3048
3049 /* Finalize INSN's data. */
3050 static void
3051 finish_global_and_expr_insn (insn_t insn)
3052 {
3053 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3054 return;
3055
3056 gcc_assert (INSN_P (insn));
3057
3058 if (INSN_LUID (insn) > 0)
3059 {
3060 free_first_time_insn_data (insn);
3061 INSN_WS_LEVEL (insn) = 0;
3062 CANT_MOVE (insn) = 0;
3063
3064 /* We can no longer assert this, as vinsns of this insn could be
3065 easily live in other insn's caches. This should be changed to
3066 a counter-like approach among all vinsns. */
3067 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3068 clear_expr (INSN_EXPR (insn));
3069 }
3070 }
3071
3072 /* Finalize per instruction data for the whole region. */
3073 void
3074 sel_finish_global_and_expr (void)
3075 {
3076 {
3077 bb_vec_t bbs;
3078 int i;
3079
3080 bbs.create (current_nr_blocks);
3081
3082 for (i = 0; i < current_nr_blocks; i++)
3083 bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
3084
3085 /* Clear AV_SETs and INSN_EXPRs. */
3086 {
3087 const struct sched_scan_info_def ssi =
3088 {
3089 NULL, /* extend_bb */
3090 finish_global_and_expr_for_bb, /* init_bb */
3091 NULL, /* extend_insn */
3092 finish_global_and_expr_insn /* init_insn */
3093 };
3094
3095 sched_scan (&ssi, bbs);
3096 }
3097
3098 bbs.release ();
3099 }
3100
3101 finish_insns ();
3102 }
3103 \f
3104
3105 /* In the below hooks, we merely calculate whether or not a dependence
3106 exists, and in what part of insn. However, we will need more data
3107 when we'll start caching dependence requests. */
3108
3109 /* Container to hold information for dependency analysis. */
3110 static struct
3111 {
3112 deps_t dc;
3113
3114 /* A variable to track which part of rtx we are scanning in
3115 sched-deps.c: sched_analyze_insn (). */
3116 deps_where_t where;
3117
3118 /* Current producer. */
3119 insn_t pro;
3120
3121 /* Current consumer. */
3122 vinsn_t con;
3123
3124 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3125 X is from { INSN, LHS, RHS }. */
3126 ds_t has_dep_p[DEPS_IN_NOWHERE];
3127 } has_dependence_data;
3128
3129 /* Start analyzing dependencies of INSN. */
3130 static void
3131 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3132 {
3133 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3134
3135 has_dependence_data.where = DEPS_IN_INSN;
3136 }
3137
3138 /* Finish analyzing dependencies of an insn. */
3139 static void
3140 has_dependence_finish_insn (void)
3141 {
3142 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3143
3144 has_dependence_data.where = DEPS_IN_NOWHERE;
3145 }
3146
3147 /* Start analyzing dependencies of LHS. */
3148 static void
3149 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3150 {
3151 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3152
3153 if (VINSN_LHS (has_dependence_data.con) != NULL)
3154 has_dependence_data.where = DEPS_IN_LHS;
3155 }
3156
3157 /* Finish analyzing dependencies of an lhs. */
3158 static void
3159 has_dependence_finish_lhs (void)
3160 {
3161 has_dependence_data.where = DEPS_IN_INSN;
3162 }
3163
3164 /* Start analyzing dependencies of RHS. */
3165 static void
3166 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3167 {
3168 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3169
3170 if (VINSN_RHS (has_dependence_data.con) != NULL)
3171 has_dependence_data.where = DEPS_IN_RHS;
3172 }
3173
3174 /* Start analyzing dependencies of an rhs. */
3175 static void
3176 has_dependence_finish_rhs (void)
3177 {
3178 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3179 || has_dependence_data.where == DEPS_IN_INSN);
3180
3181 has_dependence_data.where = DEPS_IN_INSN;
3182 }
3183
3184 /* Note a set of REGNO. */
3185 static void
3186 has_dependence_note_reg_set (int regno)
3187 {
3188 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3189
3190 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3191 VINSN_INSN_RTX
3192 (has_dependence_data.con)))
3193 {
3194 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3195
3196 if (reg_last->sets != NULL
3197 || reg_last->clobbers != NULL)
3198 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3199
3200 if (reg_last->uses || reg_last->implicit_sets)
3201 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3202 }
3203 }
3204
3205 /* Note a clobber of REGNO. */
3206 static void
3207 has_dependence_note_reg_clobber (int regno)
3208 {
3209 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3210
3211 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3212 VINSN_INSN_RTX
3213 (has_dependence_data.con)))
3214 {
3215 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3216
3217 if (reg_last->sets)
3218 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3219
3220 if (reg_last->uses || reg_last->implicit_sets)
3221 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3222 }
3223 }
3224
3225 /* Note a use of REGNO. */
3226 static void
3227 has_dependence_note_reg_use (int regno)
3228 {
3229 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3230
3231 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3232 VINSN_INSN_RTX
3233 (has_dependence_data.con)))
3234 {
3235 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3236
3237 if (reg_last->sets)
3238 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3239
3240 if (reg_last->clobbers || reg_last->implicit_sets)
3241 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3242
3243 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3244 is actually a check insn. We need to do this for any register
3245 read-read dependency with the check unless we track properly
3246 all registers written by BE_IN_SPEC-speculated insns, as
3247 we don't have explicit dependence lists. See PR 53975. */
3248 if (reg_last->uses)
3249 {
3250 ds_t pro_spec_checked_ds;
3251
3252 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3253 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3254
3255 if (pro_spec_checked_ds != 0)
3256 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3257 NULL_RTX, NULL_RTX);
3258 }
3259 }
3260 }
3261
3262 /* Note a memory dependence. */
3263 static void
3264 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3265 rtx pending_mem ATTRIBUTE_UNUSED,
3266 insn_t pending_insn ATTRIBUTE_UNUSED,
3267 ds_t ds ATTRIBUTE_UNUSED)
3268 {
3269 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3270 VINSN_INSN_RTX (has_dependence_data.con)))
3271 {
3272 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3273
3274 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3275 }
3276 }
3277
3278 /* Note a dependence. */
3279 static void
3280 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3281 ds_t ds ATTRIBUTE_UNUSED)
3282 {
3283 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3284 VINSN_INSN_RTX (has_dependence_data.con)))
3285 {
3286 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3287
3288 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3289 }
3290 }
3291
3292 /* Mark the insn as having a hard dependence that prevents speculation. */
3293 void
3294 sel_mark_hard_insn (rtx insn)
3295 {
3296 int i;
3297
3298 /* Only work when we're in has_dependence_p mode.
3299 ??? This is a hack, this should actually be a hook. */
3300 if (!has_dependence_data.dc || !has_dependence_data.pro)
3301 return;
3302
3303 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3304 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3305
3306 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3307 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3308 }
3309
3310 /* This structure holds the hooks for the dependency analysis used when
3311 actually processing dependencies in the scheduler. */
3312 static struct sched_deps_info_def has_dependence_sched_deps_info;
3313
3314 /* This initializes most of the fields of the above structure. */
3315 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3316 {
3317 NULL,
3318
3319 has_dependence_start_insn,
3320 has_dependence_finish_insn,
3321 has_dependence_start_lhs,
3322 has_dependence_finish_lhs,
3323 has_dependence_start_rhs,
3324 has_dependence_finish_rhs,
3325 has_dependence_note_reg_set,
3326 has_dependence_note_reg_clobber,
3327 has_dependence_note_reg_use,
3328 has_dependence_note_mem_dep,
3329 has_dependence_note_dep,
3330
3331 0, /* use_cselib */
3332 0, /* use_deps_list */
3333 0 /* generate_spec_deps */
3334 };
3335
3336 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3337 static void
3338 setup_has_dependence_sched_deps_info (void)
3339 {
3340 memcpy (&has_dependence_sched_deps_info,
3341 &const_has_dependence_sched_deps_info,
3342 sizeof (has_dependence_sched_deps_info));
3343
3344 if (spec_info != NULL)
3345 has_dependence_sched_deps_info.generate_spec_deps = 1;
3346
3347 sched_deps_info = &has_dependence_sched_deps_info;
3348 }
3349
3350 /* Remove all dependences found and recorded in has_dependence_data array. */
3351 void
3352 sel_clear_has_dependence (void)
3353 {
3354 int i;
3355
3356 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3357 has_dependence_data.has_dep_p[i] = 0;
3358 }
3359
3360 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3361 to the dependence information array in HAS_DEP_PP. */
3362 ds_t
3363 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3364 {
3365 int i;
3366 ds_t ds;
3367 struct deps_desc *dc;
3368
3369 if (INSN_SIMPLEJUMP_P (pred))
3370 /* Unconditional jump is just a transfer of control flow.
3371 Ignore it. */
3372 return false;
3373
3374 dc = &INSN_DEPS_CONTEXT (pred);
3375
3376 /* We init this field lazily. */
3377 if (dc->reg_last == NULL)
3378 init_deps_reg_last (dc);
3379
3380 if (!dc->readonly)
3381 {
3382 has_dependence_data.pro = NULL;
3383 /* Initialize empty dep context with information about PRED. */
3384 advance_deps_context (dc, pred);
3385 dc->readonly = 1;
3386 }
3387
3388 has_dependence_data.where = DEPS_IN_NOWHERE;
3389 has_dependence_data.pro = pred;
3390 has_dependence_data.con = EXPR_VINSN (expr);
3391 has_dependence_data.dc = dc;
3392
3393 sel_clear_has_dependence ();
3394
3395 /* Now catch all dependencies that would be generated between PRED and
3396 INSN. */
3397 setup_has_dependence_sched_deps_info ();
3398 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3399 has_dependence_data.dc = NULL;
3400
3401 /* When a barrier was found, set DEPS_IN_INSN bits. */
3402 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3403 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3404 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3405 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3406
3407 /* Do not allow stores to memory to move through checks. Currently
3408 we don't move this to sched-deps.c as the check doesn't have
3409 obvious places to which this dependence can be attached.
3410 FIMXE: this should go to a hook. */
3411 if (EXPR_LHS (expr)
3412 && MEM_P (EXPR_LHS (expr))
3413 && sel_insn_is_speculation_check (pred))
3414 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3415
3416 *has_dep_pp = has_dependence_data.has_dep_p;
3417 ds = 0;
3418 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3419 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3420 NULL_RTX, NULL_RTX);
3421
3422 return ds;
3423 }
3424 \f
3425
3426 /* Dependence hooks implementation that checks dependence latency constraints
3427 on the insns being scheduled. The entry point for these routines is
3428 tick_check_p predicate. */
3429
3430 static struct
3431 {
3432 /* An expr we are currently checking. */
3433 expr_t expr;
3434
3435 /* A minimal cycle for its scheduling. */
3436 int cycle;
3437
3438 /* Whether we have seen a true dependence while checking. */
3439 bool seen_true_dep_p;
3440 } tick_check_data;
3441
3442 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3443 on PRO with status DS and weight DW. */
3444 static void
3445 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3446 {
3447 expr_t con_expr = tick_check_data.expr;
3448 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3449
3450 if (con_insn != pro_insn)
3451 {
3452 enum reg_note dt;
3453 int tick;
3454
3455 if (/* PROducer was removed from above due to pipelining. */
3456 !INSN_IN_STREAM_P (pro_insn)
3457 /* Or PROducer was originally on the next iteration regarding the
3458 CONsumer. */
3459 || (INSN_SCHED_TIMES (pro_insn)
3460 - EXPR_SCHED_TIMES (con_expr)) > 1)
3461 /* Don't count this dependence. */
3462 return;
3463
3464 dt = ds_to_dt (ds);
3465 if (dt == REG_DEP_TRUE)
3466 tick_check_data.seen_true_dep_p = true;
3467
3468 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3469
3470 {
3471 dep_def _dep, *dep = &_dep;
3472
3473 init_dep (dep, pro_insn, con_insn, dt);
3474
3475 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3476 }
3477
3478 /* When there are several kinds of dependencies between pro and con,
3479 only REG_DEP_TRUE should be taken into account. */
3480 if (tick > tick_check_data.cycle
3481 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3482 tick_check_data.cycle = tick;
3483 }
3484 }
3485
3486 /* An implementation of note_dep hook. */
3487 static void
3488 tick_check_note_dep (insn_t pro, ds_t ds)
3489 {
3490 tick_check_dep_with_dw (pro, ds, 0);
3491 }
3492
3493 /* An implementation of note_mem_dep hook. */
3494 static void
3495 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3496 {
3497 dw_t dw;
3498
3499 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3500 ? estimate_dep_weak (mem1, mem2)
3501 : 0);
3502
3503 tick_check_dep_with_dw (pro, ds, dw);
3504 }
3505
3506 /* This structure contains hooks for dependence analysis used when determining
3507 whether an insn is ready for scheduling. */
3508 static struct sched_deps_info_def tick_check_sched_deps_info =
3509 {
3510 NULL,
3511
3512 NULL,
3513 NULL,
3514 NULL,
3515 NULL,
3516 NULL,
3517 NULL,
3518 haifa_note_reg_set,
3519 haifa_note_reg_clobber,
3520 haifa_note_reg_use,
3521 tick_check_note_mem_dep,
3522 tick_check_note_dep,
3523
3524 0, 0, 0
3525 };
3526
3527 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3528 scheduled. Return 0 if all data from producers in DC is ready. */
3529 int
3530 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3531 {
3532 int cycles_left;
3533 /* Initialize variables. */
3534 tick_check_data.expr = expr;
3535 tick_check_data.cycle = 0;
3536 tick_check_data.seen_true_dep_p = false;
3537 sched_deps_info = &tick_check_sched_deps_info;
3538
3539 gcc_assert (!dc->readonly);
3540 dc->readonly = 1;
3541 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3542 dc->readonly = 0;
3543
3544 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3545
3546 return cycles_left >= 0 ? cycles_left : 0;
3547 }
3548 \f
3549
3550 /* Functions to work with insns. */
3551
3552 /* Returns true if LHS of INSN is the same as DEST of an insn
3553 being moved. */
3554 bool
3555 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3556 {
3557 rtx lhs = INSN_LHS (insn);
3558
3559 if (lhs == NULL || dest == NULL)
3560 return false;
3561
3562 return rtx_equal_p (lhs, dest);
3563 }
3564
3565 /* Return s_i_d entry of INSN. Callable from debugger. */
3566 sel_insn_data_def
3567 insn_sid (insn_t insn)
3568 {
3569 return *SID (insn);
3570 }
3571
3572 /* True when INSN is a speculative check. We can tell this by looking
3573 at the data structures of the selective scheduler, not by examining
3574 the pattern. */
3575 bool
3576 sel_insn_is_speculation_check (rtx insn)
3577 {
3578 return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3579 }
3580
3581 /* Extracts machine mode MODE and destination location DST_LOC
3582 for given INSN. */
3583 void
3584 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3585 {
3586 rtx pat = PATTERN (insn);
3587
3588 gcc_assert (dst_loc);
3589 gcc_assert (GET_CODE (pat) == SET);
3590
3591 *dst_loc = SET_DEST (pat);
3592
3593 gcc_assert (*dst_loc);
3594 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3595
3596 if (mode)
3597 *mode = GET_MODE (*dst_loc);
3598 }
3599
3600 /* Returns true when moving through JUMP will result in bookkeeping
3601 creation. */
3602 bool
3603 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3604 {
3605 insn_t succ;
3606 succ_iterator si;
3607
3608 FOR_EACH_SUCC (succ, si, jump)
3609 if (sel_num_cfg_preds_gt_1 (succ))
3610 return true;
3611
3612 return false;
3613 }
3614
3615 /* Return 'true' if INSN is the only one in its basic block. */
3616 static bool
3617 insn_is_the_only_one_in_bb_p (insn_t insn)
3618 {
3619 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3620 }
3621
3622 #ifdef ENABLE_CHECKING
3623 /* Check that the region we're scheduling still has at most one
3624 backedge. */
3625 static void
3626 verify_backedges (void)
3627 {
3628 if (pipelining_p)
3629 {
3630 int i, n = 0;
3631 edge e;
3632 edge_iterator ei;
3633
3634 for (i = 0; i < current_nr_blocks; i++)
3635 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
3636 if (in_current_region_p (e->dest)
3637 && BLOCK_TO_BB (e->dest->index) < i)
3638 n++;
3639
3640 gcc_assert (n <= 1);
3641 }
3642 }
3643 #endif
3644 \f
3645
3646 /* Functions to work with control flow. */
3647
3648 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3649 are sorted in topological order (it might have been invalidated by
3650 redirecting an edge). */
3651 static void
3652 sel_recompute_toporder (void)
3653 {
3654 int i, n, rgn;
3655 int *postorder, n_blocks;
3656
3657 postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
3658 n_blocks = post_order_compute (postorder, false, false);
3659
3660 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3661 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3662 if (CONTAINING_RGN (postorder[i]) == rgn)
3663 {
3664 BLOCK_TO_BB (postorder[i]) = n;
3665 BB_TO_BLOCK (n) = postorder[i];
3666 n++;
3667 }
3668
3669 /* Assert that we updated info for all blocks. We may miss some blocks if
3670 this function is called when redirecting an edge made a block
3671 unreachable, but that block is not deleted yet. */
3672 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3673 }
3674
3675 /* Tidy the possibly empty block BB. */
3676 static bool
3677 maybe_tidy_empty_bb (basic_block bb)
3678 {
3679 basic_block succ_bb, pred_bb, note_bb;
3680 vec<basic_block> dom_bbs;
3681 edge e;
3682 edge_iterator ei;
3683 bool rescan_p;
3684
3685 /* Keep empty bb only if this block immediately precedes EXIT and
3686 has incoming non-fallthrough edge, or it has no predecessors or
3687 successors. Otherwise remove it. */
3688 if (!sel_bb_empty_p (bb)
3689 || (single_succ_p (bb)
3690 && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
3691 && (!single_pred_p (bb)
3692 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3693 || EDGE_COUNT (bb->preds) == 0
3694 || EDGE_COUNT (bb->succs) == 0)
3695 return false;
3696
3697 /* Do not attempt to redirect complex edges. */
3698 FOR_EACH_EDGE (e, ei, bb->preds)
3699 if (e->flags & EDGE_COMPLEX)
3700 return false;
3701 else if (e->flags & EDGE_FALLTHRU)
3702 {
3703 rtx note;
3704 /* If prev bb ends with asm goto, see if any of the
3705 ASM_OPERANDS_LABELs don't point to the fallthru
3706 label. Do not attempt to redirect it in that case. */
3707 if (JUMP_P (BB_END (e->src))
3708 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3709 {
3710 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3711
3712 for (i = 0; i < n; ++i)
3713 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3714 return false;
3715 }
3716 }
3717
3718 free_data_sets (bb);
3719
3720 /* Do not delete BB if it has more than one successor.
3721 That can occur when we moving a jump. */
3722 if (!single_succ_p (bb))
3723 {
3724 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3725 sel_merge_blocks (bb->prev_bb, bb);
3726 return true;
3727 }
3728
3729 succ_bb = single_succ (bb);
3730 rescan_p = true;
3731 pred_bb = NULL;
3732 dom_bbs.create (0);
3733
3734 /* Save a pred/succ from the current region to attach the notes to. */
3735 note_bb = NULL;
3736 FOR_EACH_EDGE (e, ei, bb->preds)
3737 if (in_current_region_p (e->src))
3738 {
3739 note_bb = e->src;
3740 break;
3741 }
3742 if (note_bb == NULL)
3743 note_bb = succ_bb;
3744
3745 /* Redirect all non-fallthru edges to the next bb. */
3746 while (rescan_p)
3747 {
3748 rescan_p = false;
3749
3750 FOR_EACH_EDGE (e, ei, bb->preds)
3751 {
3752 pred_bb = e->src;
3753
3754 if (!(e->flags & EDGE_FALLTHRU))
3755 {
3756 /* We can not invalidate computed topological order by moving
3757 the edge destination block (E->SUCC) along a fallthru edge.
3758
3759 We will update dominators here only when we'll get
3760 an unreachable block when redirecting, otherwise
3761 sel_redirect_edge_and_branch will take care of it. */
3762 if (e->dest != bb
3763 && single_pred_p (e->dest))
3764 dom_bbs.safe_push (e->dest);
3765 sel_redirect_edge_and_branch (e, succ_bb);
3766 rescan_p = true;
3767 break;
3768 }
3769 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3770 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3771 still have to adjust it. */
3772 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3773 {
3774 /* If possible, try to remove the unneeded conditional jump. */
3775 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3776 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3777 {
3778 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3779 tidy_fallthru_edge (e);
3780 }
3781 else
3782 sel_redirect_edge_and_branch (e, succ_bb);
3783 rescan_p = true;
3784 break;
3785 }
3786 }
3787 }
3788
3789 if (can_merge_blocks_p (bb->prev_bb, bb))
3790 sel_merge_blocks (bb->prev_bb, bb);
3791 else
3792 {
3793 /* This is a block without fallthru predecessor. Just delete it. */
3794 gcc_assert (note_bb);
3795 move_bb_info (note_bb, bb);
3796 remove_empty_bb (bb, true);
3797 }
3798
3799 if (!dom_bbs.is_empty ())
3800 {
3801 dom_bbs.safe_push (succ_bb);
3802 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3803 dom_bbs.release ();
3804 }
3805
3806 return true;
3807 }
3808
3809 /* Tidy the control flow after we have removed original insn from
3810 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3811 is true, also try to optimize control flow on non-empty blocks. */
3812 bool
3813 tidy_control_flow (basic_block xbb, bool full_tidying)
3814 {
3815 bool changed = true;
3816 insn_t first, last;
3817
3818 /* First check whether XBB is empty. */
3819 changed = maybe_tidy_empty_bb (xbb);
3820 if (changed || !full_tidying)
3821 return changed;
3822
3823 /* Check if there is a unnecessary jump after insn left. */
3824 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3825 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3826 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3827 {
3828 if (sel_remove_insn (BB_END (xbb), false, false))
3829 return true;
3830 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3831 }
3832
3833 first = sel_bb_head (xbb);
3834 last = sel_bb_end (xbb);
3835 if (MAY_HAVE_DEBUG_INSNS)
3836 {
3837 if (first != last && DEBUG_INSN_P (first))
3838 do
3839 first = NEXT_INSN (first);
3840 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3841
3842 if (first != last && DEBUG_INSN_P (last))
3843 do
3844 last = PREV_INSN (last);
3845 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3846 }
3847 /* Check if there is an unnecessary jump in previous basic block leading
3848 to next basic block left after removing INSN from stream.
3849 If it is so, remove that jump and redirect edge to current
3850 basic block (where there was INSN before deletion). This way
3851 when NOP will be deleted several instructions later with its
3852 basic block we will not get a jump to next instruction, which
3853 can be harmful. */
3854 if (first == last
3855 && !sel_bb_empty_p (xbb)
3856 && INSN_NOP_P (last)
3857 /* Flow goes fallthru from current block to the next. */
3858 && EDGE_COUNT (xbb->succs) == 1
3859 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3860 /* When successor is an EXIT block, it may not be the next block. */
3861 && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
3862 /* And unconditional jump in previous basic block leads to
3863 next basic block of XBB and this jump can be safely removed. */
3864 && in_current_region_p (xbb->prev_bb)
3865 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3866 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3867 /* Also this jump is not at the scheduling boundary. */
3868 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3869 {
3870 bool recompute_toporder_p;
3871 /* Clear data structures of jump - jump itself will be removed
3872 by sel_redirect_edge_and_branch. */
3873 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3874 recompute_toporder_p
3875 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3876
3877 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3878
3879 /* It can turn out that after removing unused jump, basic block
3880 that contained that jump, becomes empty too. In such case
3881 remove it too. */
3882 if (sel_bb_empty_p (xbb->prev_bb))
3883 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3884 if (recompute_toporder_p)
3885 sel_recompute_toporder ();
3886 }
3887
3888 #ifdef ENABLE_CHECKING
3889 verify_backedges ();
3890 verify_dominators (CDI_DOMINATORS);
3891 #endif
3892
3893 return changed;
3894 }
3895
3896 /* Purge meaningless empty blocks in the middle of a region. */
3897 void
3898 purge_empty_blocks (void)
3899 {
3900 int i;
3901
3902 /* Do not attempt to delete the first basic block in the region. */
3903 for (i = 1; i < current_nr_blocks; )
3904 {
3905 basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
3906
3907 if (maybe_tidy_empty_bb (b))
3908 continue;
3909
3910 i++;
3911 }
3912 }
3913
3914 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3915 do not delete insn's data, because it will be later re-emitted.
3916 Return true if we have removed some blocks afterwards. */
3917 bool
3918 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3919 {
3920 basic_block bb = BLOCK_FOR_INSN (insn);
3921
3922 gcc_assert (INSN_IN_STREAM_P (insn));
3923
3924 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3925 {
3926 expr_t expr;
3927 av_set_iterator i;
3928
3929 /* When we remove a debug insn that is head of a BB, it remains
3930 in the AV_SET of the block, but it shouldn't. */
3931 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3932 if (EXPR_INSN_RTX (expr) == insn)
3933 {
3934 av_set_iter_remove (&i);
3935 break;
3936 }
3937 }
3938
3939 if (only_disconnect)
3940 remove_insn (insn);
3941 else
3942 {
3943 delete_insn (insn);
3944 clear_expr (INSN_EXPR (insn));
3945 }
3946
3947 /* It is necessary to NULL these fields in case we are going to re-insert
3948 INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3949 case, but also for NOPs that we will return to the nop pool. */
3950 PREV_INSN (insn) = NULL_RTX;
3951 NEXT_INSN (insn) = NULL_RTX;
3952 set_block_for_insn (insn, NULL);
3953
3954 return tidy_control_flow (bb, full_tidying);
3955 }
3956
3957 /* Estimate number of the insns in BB. */
3958 static int
3959 sel_estimate_number_of_insns (basic_block bb)
3960 {
3961 int res = 0;
3962 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3963
3964 for (; insn != next_tail; insn = NEXT_INSN (insn))
3965 if (NONDEBUG_INSN_P (insn))
3966 res++;
3967
3968 return res;
3969 }
3970
3971 /* We don't need separate luids for notes or labels. */
3972 static int
3973 sel_luid_for_non_insn (rtx x)
3974 {
3975 gcc_assert (NOTE_P (x) || LABEL_P (x));
3976
3977 return -1;
3978 }
3979
3980 /* Find the proper seqno for inserting at INSN by successors.
3981 Return -1 if no successors with positive seqno exist. */
3982 static int
3983 get_seqno_by_succs (rtx insn)
3984 {
3985 basic_block bb = BLOCK_FOR_INSN (insn);
3986 rtx tmp = insn, end = BB_END (bb);
3987 int seqno;
3988 insn_t succ = NULL;
3989 succ_iterator si;
3990
3991 while (tmp != end)
3992 {
3993 tmp = NEXT_INSN (tmp);
3994 if (INSN_P (tmp))
3995 return INSN_SEQNO (tmp);
3996 }
3997
3998 seqno = INT_MAX;
3999
4000 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4001 if (INSN_SEQNO (succ) > 0)
4002 seqno = MIN (seqno, INSN_SEQNO (succ));
4003
4004 if (seqno == INT_MAX)
4005 return -1;
4006
4007 return seqno;
4008 }
4009
4010 /* Compute seqno for INSN by its preds or succs. */
4011 static int
4012 get_seqno_for_a_jump (insn_t insn)
4013 {
4014 int seqno;
4015
4016 gcc_assert (INSN_SIMPLEJUMP_P (insn));
4017
4018 if (!sel_bb_head_p (insn))
4019 seqno = INSN_SEQNO (PREV_INSN (insn));
4020 else
4021 {
4022 basic_block bb = BLOCK_FOR_INSN (insn);
4023
4024 if (single_pred_p (bb)
4025 && !in_current_region_p (single_pred (bb)))
4026 {
4027 /* We can have preds outside a region when splitting edges
4028 for pipelining of an outer loop. Use succ instead.
4029 There should be only one of them. */
4030 insn_t succ = NULL;
4031 succ_iterator si;
4032 bool first = true;
4033
4034 gcc_assert (flag_sel_sched_pipelining_outer_loops
4035 && current_loop_nest);
4036 FOR_EACH_SUCC_1 (succ, si, insn,
4037 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4038 {
4039 gcc_assert (first);
4040 first = false;
4041 }
4042
4043 gcc_assert (succ != NULL);
4044 seqno = INSN_SEQNO (succ);
4045 }
4046 else
4047 {
4048 insn_t *preds;
4049 int n;
4050
4051 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4052
4053 gcc_assert (n > 0);
4054 /* For one predecessor, use simple method. */
4055 if (n == 1)
4056 seqno = INSN_SEQNO (preds[0]);
4057 else
4058 seqno = get_seqno_by_preds (insn);
4059
4060 free (preds);
4061 }
4062 }
4063
4064 /* We were unable to find a good seqno among preds. */
4065 if (seqno < 0)
4066 seqno = get_seqno_by_succs (insn);
4067
4068 gcc_assert (seqno >= 0);
4069
4070 return seqno;
4071 }
4072
4073 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4074 with positive seqno exist. */
4075 int
4076 get_seqno_by_preds (rtx insn)
4077 {
4078 basic_block bb = BLOCK_FOR_INSN (insn);
4079 rtx tmp = insn, head = BB_HEAD (bb);
4080 insn_t *preds;
4081 int n, i, seqno;
4082
4083 while (tmp != head)
4084 {
4085 tmp = PREV_INSN (tmp);
4086 if (INSN_P (tmp))
4087 return INSN_SEQNO (tmp);
4088 }
4089
4090 cfg_preds (bb, &preds, &n);
4091 for (i = 0, seqno = -1; i < n; i++)
4092 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4093
4094 return seqno;
4095 }
4096
4097 \f
4098
4099 /* Extend pass-scope data structures for basic blocks. */
4100 void
4101 sel_extend_global_bb_info (void)
4102 {
4103 sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4104 }
4105
4106 /* Extend region-scope data structures for basic blocks. */
4107 static void
4108 extend_region_bb_info (void)
4109 {
4110 sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4111 }
4112
4113 /* Extend all data structures to fit for all basic blocks. */
4114 static void
4115 extend_bb_info (void)
4116 {
4117 sel_extend_global_bb_info ();
4118 extend_region_bb_info ();
4119 }
4120
4121 /* Finalize pass-scope data structures for basic blocks. */
4122 void
4123 sel_finish_global_bb_info (void)
4124 {
4125 sel_global_bb_info.release ();
4126 }
4127
4128 /* Finalize region-scope data structures for basic blocks. */
4129 static void
4130 finish_region_bb_info (void)
4131 {
4132 sel_region_bb_info.release ();
4133 }
4134 \f
4135
4136 /* Data for each insn in current region. */
4137 vec<sel_insn_data_def> s_i_d = vNULL;
4138
4139 /* Extend data structures for insns from current region. */
4140 static void
4141 extend_insn_data (void)
4142 {
4143 int reserve;
4144
4145 sched_extend_target ();
4146 sched_deps_init (false);
4147
4148 /* Extend data structures for insns from current region. */
4149 reserve = (sched_max_luid + 1 - s_i_d.length ());
4150 if (reserve > 0 && ! s_i_d.space (reserve))
4151 {
4152 int size;
4153
4154 if (sched_max_luid / 2 > 1024)
4155 size = sched_max_luid + 1024;
4156 else
4157 size = 3 * sched_max_luid / 2;
4158
4159
4160 s_i_d.safe_grow_cleared (size);
4161 }
4162 }
4163
4164 /* Finalize data structures for insns from current region. */
4165 static void
4166 finish_insns (void)
4167 {
4168 unsigned i;
4169
4170 /* Clear here all dependence contexts that may have left from insns that were
4171 removed during the scheduling. */
4172 for (i = 0; i < s_i_d.length (); i++)
4173 {
4174 sel_insn_data_def *sid_entry = &s_i_d[i];
4175
4176 if (sid_entry->live)
4177 return_regset_to_pool (sid_entry->live);
4178 if (sid_entry->analyzed_deps)
4179 {
4180 BITMAP_FREE (sid_entry->analyzed_deps);
4181 BITMAP_FREE (sid_entry->found_deps);
4182 htab_delete (sid_entry->transformed_insns);
4183 free_deps (&sid_entry->deps_context);
4184 }
4185 if (EXPR_VINSN (&sid_entry->expr))
4186 {
4187 clear_expr (&sid_entry->expr);
4188
4189 /* Also, clear CANT_MOVE bit here, because we really don't want it
4190 to be passed to the next region. */
4191 CANT_MOVE_BY_LUID (i) = 0;
4192 }
4193 }
4194
4195 s_i_d.release ();
4196 }
4197
4198 /* A proxy to pass initialization data to init_insn (). */
4199 static sel_insn_data_def _insn_init_ssid;
4200 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4201
4202 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4203 static bool insn_init_create_new_vinsn_p;
4204
4205 /* Set all necessary data for initialization of the new insn[s]. */
4206 static expr_t
4207 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4208 {
4209 expr_t x = &insn_init_ssid->expr;
4210
4211 copy_expr_onside (x, expr);
4212 if (vi != NULL)
4213 {
4214 insn_init_create_new_vinsn_p = false;
4215 change_vinsn_in_expr (x, vi);
4216 }
4217 else
4218 insn_init_create_new_vinsn_p = true;
4219
4220 insn_init_ssid->seqno = seqno;
4221 return x;
4222 }
4223
4224 /* Init data for INSN. */
4225 static void
4226 init_insn_data (insn_t insn)
4227 {
4228 expr_t expr;
4229 sel_insn_data_t ssid = insn_init_ssid;
4230
4231 /* The fields mentioned below are special and hence are not being
4232 propagated to the new insns. */
4233 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4234 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4235 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4236
4237 expr = INSN_EXPR (insn);
4238 copy_expr (expr, &ssid->expr);
4239 prepare_insn_expr (insn, ssid->seqno);
4240
4241 if (insn_init_create_new_vinsn_p)
4242 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4243
4244 if (first_time_insn_init (insn))
4245 init_first_time_insn_data (insn);
4246 }
4247
4248 /* This is used to initialize spurious jumps generated by
4249 sel_redirect_edge (). */
4250 static void
4251 init_simplejump_data (insn_t insn)
4252 {
4253 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4254 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4255 vNULL, true, false, false,
4256 false, true);
4257 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn);
4258 init_first_time_insn_data (insn);
4259 }
4260
4261 /* Perform deferred initialization of insns. This is used to process
4262 a new jump that may be created by redirect_edge. */
4263 void
4264 sel_init_new_insn (insn_t insn, int flags)
4265 {
4266 /* We create data structures for bb when the first insn is emitted in it. */
4267 if (INSN_P (insn)
4268 && INSN_IN_STREAM_P (insn)
4269 && insn_is_the_only_one_in_bb_p (insn))
4270 {
4271 extend_bb_info ();
4272 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4273 }
4274
4275 if (flags & INSN_INIT_TODO_LUID)
4276 {
4277 sched_extend_luids ();
4278 sched_init_insn_luid (insn);
4279 }
4280
4281 if (flags & INSN_INIT_TODO_SSID)
4282 {
4283 extend_insn_data ();
4284 init_insn_data (insn);
4285 clear_expr (&insn_init_ssid->expr);
4286 }
4287
4288 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4289 {
4290 extend_insn_data ();
4291 init_simplejump_data (insn);
4292 }
4293
4294 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4295 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4296 }
4297 \f
4298
4299 /* Functions to init/finish work with lv sets. */
4300
4301 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4302 static void
4303 init_lv_set (basic_block bb)
4304 {
4305 gcc_assert (!BB_LV_SET_VALID_P (bb));
4306
4307 BB_LV_SET (bb) = get_regset_from_pool ();
4308 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4309 BB_LV_SET_VALID_P (bb) = true;
4310 }
4311
4312 /* Copy liveness information to BB from FROM_BB. */
4313 static void
4314 copy_lv_set_from (basic_block bb, basic_block from_bb)
4315 {
4316 gcc_assert (!BB_LV_SET_VALID_P (bb));
4317
4318 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4319 BB_LV_SET_VALID_P (bb) = true;
4320 }
4321
4322 /* Initialize lv set of all bb headers. */
4323 void
4324 init_lv_sets (void)
4325 {
4326 basic_block bb;
4327
4328 /* Initialize of LV sets. */
4329 FOR_EACH_BB_FN (bb, cfun)
4330 init_lv_set (bb);
4331
4332 /* Don't forget EXIT_BLOCK. */
4333 init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4334 }
4335
4336 /* Release lv set of HEAD. */
4337 static void
4338 free_lv_set (basic_block bb)
4339 {
4340 gcc_assert (BB_LV_SET (bb) != NULL);
4341
4342 return_regset_to_pool (BB_LV_SET (bb));
4343 BB_LV_SET (bb) = NULL;
4344 BB_LV_SET_VALID_P (bb) = false;
4345 }
4346
4347 /* Finalize lv sets of all bb headers. */
4348 void
4349 free_lv_sets (void)
4350 {
4351 basic_block bb;
4352
4353 /* Don't forget EXIT_BLOCK. */
4354 free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4355
4356 /* Free LV sets. */
4357 FOR_EACH_BB_FN (bb, cfun)
4358 if (BB_LV_SET (bb))
4359 free_lv_set (bb);
4360 }
4361
4362 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4363 compute_av() processes BB. This function is called when creating new basic
4364 blocks, as well as for blocks (either new or existing) where new jumps are
4365 created when the control flow is being updated. */
4366 static void
4367 invalidate_av_set (basic_block bb)
4368 {
4369 BB_AV_LEVEL (bb) = -1;
4370 }
4371
4372 /* Create initial data sets for BB (they will be invalid). */
4373 static void
4374 create_initial_data_sets (basic_block bb)
4375 {
4376 if (BB_LV_SET (bb))
4377 BB_LV_SET_VALID_P (bb) = false;
4378 else
4379 BB_LV_SET (bb) = get_regset_from_pool ();
4380 invalidate_av_set (bb);
4381 }
4382
4383 /* Free av set of BB. */
4384 static void
4385 free_av_set (basic_block bb)
4386 {
4387 av_set_clear (&BB_AV_SET (bb));
4388 BB_AV_LEVEL (bb) = 0;
4389 }
4390
4391 /* Free data sets of BB. */
4392 void
4393 free_data_sets (basic_block bb)
4394 {
4395 free_lv_set (bb);
4396 free_av_set (bb);
4397 }
4398
4399 /* Exchange lv sets of TO and FROM. */
4400 static void
4401 exchange_lv_sets (basic_block to, basic_block from)
4402 {
4403 {
4404 regset to_lv_set = BB_LV_SET (to);
4405
4406 BB_LV_SET (to) = BB_LV_SET (from);
4407 BB_LV_SET (from) = to_lv_set;
4408 }
4409
4410 {
4411 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4412
4413 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4414 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4415 }
4416 }
4417
4418
4419 /* Exchange av sets of TO and FROM. */
4420 static void
4421 exchange_av_sets (basic_block to, basic_block from)
4422 {
4423 {
4424 av_set_t to_av_set = BB_AV_SET (to);
4425
4426 BB_AV_SET (to) = BB_AV_SET (from);
4427 BB_AV_SET (from) = to_av_set;
4428 }
4429
4430 {
4431 int to_av_level = BB_AV_LEVEL (to);
4432
4433 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4434 BB_AV_LEVEL (from) = to_av_level;
4435 }
4436 }
4437
4438 /* Exchange data sets of TO and FROM. */
4439 void
4440 exchange_data_sets (basic_block to, basic_block from)
4441 {
4442 exchange_lv_sets (to, from);
4443 exchange_av_sets (to, from);
4444 }
4445
4446 /* Copy data sets of FROM to TO. */
4447 void
4448 copy_data_sets (basic_block to, basic_block from)
4449 {
4450 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4451 gcc_assert (BB_AV_SET (to) == NULL);
4452
4453 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4454 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4455
4456 if (BB_AV_SET_VALID_P (from))
4457 {
4458 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4459 }
4460 if (BB_LV_SET_VALID_P (from))
4461 {
4462 gcc_assert (BB_LV_SET (to) != NULL);
4463 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4464 }
4465 }
4466
4467 /* Return an av set for INSN, if any. */
4468 av_set_t
4469 get_av_set (insn_t insn)
4470 {
4471 av_set_t av_set;
4472
4473 gcc_assert (AV_SET_VALID_P (insn));
4474
4475 if (sel_bb_head_p (insn))
4476 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4477 else
4478 av_set = NULL;
4479
4480 return av_set;
4481 }
4482
4483 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4484 int
4485 get_av_level (insn_t insn)
4486 {
4487 int av_level;
4488
4489 gcc_assert (INSN_P (insn));
4490
4491 if (sel_bb_head_p (insn))
4492 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4493 else
4494 av_level = INSN_WS_LEVEL (insn);
4495
4496 return av_level;
4497 }
4498
4499 \f
4500
4501 /* Variables to work with control-flow graph. */
4502
4503 /* The basic block that already has been processed by the sched_data_update (),
4504 but hasn't been in sel_add_bb () yet. */
4505 static vec<basic_block>
4506 last_added_blocks = vNULL;
4507
4508 /* A pool for allocating successor infos. */
4509 static struct
4510 {
4511 /* A stack for saving succs_info structures. */
4512 struct succs_info *stack;
4513
4514 /* Its size. */
4515 int size;
4516
4517 /* Top of the stack. */
4518 int top;
4519
4520 /* Maximal value of the top. */
4521 int max_top;
4522 } succs_info_pool;
4523
4524 /* Functions to work with control-flow graph. */
4525
4526 /* Return basic block note of BB. */
4527 insn_t
4528 sel_bb_head (basic_block bb)
4529 {
4530 insn_t head;
4531
4532 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4533 {
4534 gcc_assert (exit_insn != NULL_RTX);
4535 head = exit_insn;
4536 }
4537 else
4538 {
4539 insn_t note;
4540
4541 note = bb_note (bb);
4542 head = next_nonnote_insn (note);
4543
4544 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4545 head = NULL_RTX;
4546 }
4547
4548 return head;
4549 }
4550
4551 /* Return true if INSN is a basic block header. */
4552 bool
4553 sel_bb_head_p (insn_t insn)
4554 {
4555 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4556 }
4557
4558 /* Return last insn of BB. */
4559 insn_t
4560 sel_bb_end (basic_block bb)
4561 {
4562 if (sel_bb_empty_p (bb))
4563 return NULL_RTX;
4564
4565 gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
4566
4567 return BB_END (bb);
4568 }
4569
4570 /* Return true if INSN is the last insn in its basic block. */
4571 bool
4572 sel_bb_end_p (insn_t insn)
4573 {
4574 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4575 }
4576
4577 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4578 bool
4579 sel_bb_empty_p (basic_block bb)
4580 {
4581 return sel_bb_head (bb) == NULL;
4582 }
4583
4584 /* True when BB belongs to the current scheduling region. */
4585 bool
4586 in_current_region_p (basic_block bb)
4587 {
4588 if (bb->index < NUM_FIXED_BLOCKS)
4589 return false;
4590
4591 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4592 }
4593
4594 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4595 basic_block
4596 fallthru_bb_of_jump (rtx jump)
4597 {
4598 if (!JUMP_P (jump))
4599 return NULL;
4600
4601 if (!any_condjump_p (jump))
4602 return NULL;
4603
4604 /* A basic block that ends with a conditional jump may still have one successor
4605 (and be followed by a barrier), we are not interested. */
4606 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4607 return NULL;
4608
4609 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4610 }
4611
4612 /* Remove all notes from BB. */
4613 static void
4614 init_bb (basic_block bb)
4615 {
4616 remove_notes (bb_note (bb), BB_END (bb));
4617 BB_NOTE_LIST (bb) = note_list;
4618 }
4619
4620 void
4621 sel_init_bbs (bb_vec_t bbs)
4622 {
4623 const struct sched_scan_info_def ssi =
4624 {
4625 extend_bb_info, /* extend_bb */
4626 init_bb, /* init_bb */
4627 NULL, /* extend_insn */
4628 NULL /* init_insn */
4629 };
4630
4631 sched_scan (&ssi, bbs);
4632 }
4633
4634 /* Restore notes for the whole region. */
4635 static void
4636 sel_restore_notes (void)
4637 {
4638 int bb;
4639 insn_t insn;
4640
4641 for (bb = 0; bb < current_nr_blocks; bb++)
4642 {
4643 basic_block first, last;
4644
4645 first = EBB_FIRST_BB (bb);
4646 last = EBB_LAST_BB (bb)->next_bb;
4647
4648 do
4649 {
4650 note_list = BB_NOTE_LIST (first);
4651 restore_other_notes (NULL, first);
4652 BB_NOTE_LIST (first) = NULL_RTX;
4653
4654 FOR_BB_INSNS (first, insn)
4655 if (NONDEBUG_INSN_P (insn))
4656 reemit_notes (insn);
4657
4658 first = first->next_bb;
4659 }
4660 while (first != last);
4661 }
4662 }
4663
4664 /* Free per-bb data structures. */
4665 void
4666 sel_finish_bbs (void)
4667 {
4668 sel_restore_notes ();
4669
4670 /* Remove current loop preheader from this loop. */
4671 if (current_loop_nest)
4672 sel_remove_loop_preheader ();
4673
4674 finish_region_bb_info ();
4675 }
4676
4677 /* Return true if INSN has a single successor of type FLAGS. */
4678 bool
4679 sel_insn_has_single_succ_p (insn_t insn, int flags)
4680 {
4681 insn_t succ;
4682 succ_iterator si;
4683 bool first_p = true;
4684
4685 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4686 {
4687 if (first_p)
4688 first_p = false;
4689 else
4690 return false;
4691 }
4692
4693 return true;
4694 }
4695
4696 /* Allocate successor's info. */
4697 static struct succs_info *
4698 alloc_succs_info (void)
4699 {
4700 if (succs_info_pool.top == succs_info_pool.max_top)
4701 {
4702 int i;
4703
4704 if (++succs_info_pool.max_top >= succs_info_pool.size)
4705 gcc_unreachable ();
4706
4707 i = ++succs_info_pool.top;
4708 succs_info_pool.stack[i].succs_ok.create (10);
4709 succs_info_pool.stack[i].succs_other.create (10);
4710 succs_info_pool.stack[i].probs_ok.create (10);
4711 }
4712 else
4713 succs_info_pool.top++;
4714
4715 return &succs_info_pool.stack[succs_info_pool.top];
4716 }
4717
4718 /* Free successor's info. */
4719 void
4720 free_succs_info (struct succs_info * sinfo)
4721 {
4722 gcc_assert (succs_info_pool.top >= 0
4723 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4724 succs_info_pool.top--;
4725
4726 /* Clear stale info. */
4727 sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4728 sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4729 sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4730 sinfo->all_prob = 0;
4731 sinfo->succs_ok_n = 0;
4732 sinfo->all_succs_n = 0;
4733 }
4734
4735 /* Compute successor info for INSN. FLAGS are the flags passed
4736 to the FOR_EACH_SUCC_1 iterator. */
4737 struct succs_info *
4738 compute_succs_info (insn_t insn, short flags)
4739 {
4740 succ_iterator si;
4741 insn_t succ;
4742 struct succs_info *sinfo = alloc_succs_info ();
4743
4744 /* Traverse *all* successors and decide what to do with each. */
4745 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4746 {
4747 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4748 perform code motion through inner loops. */
4749 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4750
4751 if (current_flags & flags)
4752 {
4753 sinfo->succs_ok.safe_push (succ);
4754 sinfo->probs_ok.safe_push (
4755 /* FIXME: Improve calculation when skipping
4756 inner loop to exits. */
4757 si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
4758 sinfo->succs_ok_n++;
4759 }
4760 else
4761 sinfo->succs_other.safe_push (succ);
4762
4763 /* Compute all_prob. */
4764 if (!si.bb_end)
4765 sinfo->all_prob = REG_BR_PROB_BASE;
4766 else
4767 sinfo->all_prob += si.e1->probability;
4768
4769 sinfo->all_succs_n++;
4770 }
4771
4772 return sinfo;
4773 }
4774
4775 /* Return the predecessors of BB in PREDS and their number in N.
4776 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4777 static void
4778 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4779 {
4780 edge e;
4781 edge_iterator ei;
4782
4783 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4784
4785 FOR_EACH_EDGE (e, ei, bb->preds)
4786 {
4787 basic_block pred_bb = e->src;
4788 insn_t bb_end = BB_END (pred_bb);
4789
4790 if (!in_current_region_p (pred_bb))
4791 {
4792 gcc_assert (flag_sel_sched_pipelining_outer_loops
4793 && current_loop_nest);
4794 continue;
4795 }
4796
4797 if (sel_bb_empty_p (pred_bb))
4798 cfg_preds_1 (pred_bb, preds, n, size);
4799 else
4800 {
4801 if (*n == *size)
4802 *preds = XRESIZEVEC (insn_t, *preds,
4803 (*size = 2 * *size + 1));
4804 (*preds)[(*n)++] = bb_end;
4805 }
4806 }
4807
4808 gcc_assert (*n != 0
4809 || (flag_sel_sched_pipelining_outer_loops
4810 && current_loop_nest));
4811 }
4812
4813 /* Find all predecessors of BB and record them in PREDS and their number
4814 in N. Empty blocks are skipped, and only normal (forward in-region)
4815 edges are processed. */
4816 static void
4817 cfg_preds (basic_block bb, insn_t **preds, int *n)
4818 {
4819 int size = 0;
4820
4821 *preds = NULL;
4822 *n = 0;
4823 cfg_preds_1 (bb, preds, n, &size);
4824 }
4825
4826 /* Returns true if we are moving INSN through join point. */
4827 bool
4828 sel_num_cfg_preds_gt_1 (insn_t insn)
4829 {
4830 basic_block bb;
4831
4832 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4833 return false;
4834
4835 bb = BLOCK_FOR_INSN (insn);
4836
4837 while (1)
4838 {
4839 if (EDGE_COUNT (bb->preds) > 1)
4840 return true;
4841
4842 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4843 bb = EDGE_PRED (bb, 0)->src;
4844
4845 if (!sel_bb_empty_p (bb))
4846 break;
4847 }
4848
4849 return false;
4850 }
4851
4852 /* Returns true when BB should be the end of an ebb. Adapted from the
4853 code in sched-ebb.c. */
4854 bool
4855 bb_ends_ebb_p (basic_block bb)
4856 {
4857 basic_block next_bb = bb_next_bb (bb);
4858 edge e;
4859
4860 if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4861 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4862 || (LABEL_P (BB_HEAD (next_bb))
4863 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4864 Work around that. */
4865 && !single_pred_p (next_bb)))
4866 return true;
4867
4868 if (!in_current_region_p (next_bb))
4869 return true;
4870
4871 e = find_fallthru_edge (bb->succs);
4872 if (e)
4873 {
4874 gcc_assert (e->dest == next_bb);
4875
4876 return false;
4877 }
4878
4879 return true;
4880 }
4881
4882 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4883 successor of INSN. */
4884 bool
4885 in_same_ebb_p (insn_t insn, insn_t succ)
4886 {
4887 basic_block ptr = BLOCK_FOR_INSN (insn);
4888
4889 for (;;)
4890 {
4891 if (ptr == BLOCK_FOR_INSN (succ))
4892 return true;
4893
4894 if (bb_ends_ebb_p (ptr))
4895 return false;
4896
4897 ptr = bb_next_bb (ptr);
4898 }
4899
4900 gcc_unreachable ();
4901 return false;
4902 }
4903
4904 /* Recomputes the reverse topological order for the function and
4905 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4906 modified appropriately. */
4907 static void
4908 recompute_rev_top_order (void)
4909 {
4910 int *postorder;
4911 int n_blocks, i;
4912
4913 if (!rev_top_order_index
4914 || rev_top_order_index_len < last_basic_block_for_fn (cfun))
4915 {
4916 rev_top_order_index_len = last_basic_block_for_fn (cfun);
4917 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4918 rev_top_order_index_len);
4919 }
4920
4921 postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
4922
4923 n_blocks = post_order_compute (postorder, true, false);
4924 gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
4925
4926 /* Build reverse function: for each basic block with BB->INDEX == K
4927 rev_top_order_index[K] is it's reverse topological sort number. */
4928 for (i = 0; i < n_blocks; i++)
4929 {
4930 gcc_assert (postorder[i] < rev_top_order_index_len);
4931 rev_top_order_index[postorder[i]] = i;
4932 }
4933
4934 free (postorder);
4935 }
4936
4937 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4938 void
4939 clear_outdated_rtx_info (basic_block bb)
4940 {
4941 rtx insn;
4942
4943 FOR_BB_INSNS (bb, insn)
4944 if (INSN_P (insn))
4945 {
4946 SCHED_GROUP_P (insn) = 0;
4947 INSN_AFTER_STALL_P (insn) = 0;
4948 INSN_SCHED_TIMES (insn) = 0;
4949 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4950
4951 /* We cannot use the changed caches, as previously we could ignore
4952 the LHS dependence due to enabled renaming and transform
4953 the expression, and currently we'll be unable to do this. */
4954 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4955 }
4956 }
4957
4958 /* Add BB_NOTE to the pool of available basic block notes. */
4959 static void
4960 return_bb_to_pool (basic_block bb)
4961 {
4962 rtx note = bb_note (bb);
4963
4964 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4965 && bb->aux == NULL);
4966
4967 /* It turns out that current cfg infrastructure does not support
4968 reuse of basic blocks. Don't bother for now. */
4969 /*bb_note_pool.safe_push (note);*/
4970 }
4971
4972 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4973 static rtx
4974 get_bb_note_from_pool (void)
4975 {
4976 if (bb_note_pool.is_empty ())
4977 return NULL_RTX;
4978 else
4979 {
4980 rtx note = bb_note_pool.pop ();
4981
4982 PREV_INSN (note) = NULL_RTX;
4983 NEXT_INSN (note) = NULL_RTX;
4984
4985 return note;
4986 }
4987 }
4988
4989 /* Free bb_note_pool. */
4990 void
4991 free_bb_note_pool (void)
4992 {
4993 bb_note_pool.release ();
4994 }
4995
4996 /* Setup scheduler pool and successor structure. */
4997 void
4998 alloc_sched_pools (void)
4999 {
5000 int succs_size;
5001
5002 succs_size = MAX_WS + 1;
5003 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
5004 succs_info_pool.size = succs_size;
5005 succs_info_pool.top = -1;
5006 succs_info_pool.max_top = -1;
5007
5008 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
5009 sizeof (struct _list_node), 500);
5010 }
5011
5012 /* Free the pools. */
5013 void
5014 free_sched_pools (void)
5015 {
5016 int i;
5017
5018 free_alloc_pool (sched_lists_pool);
5019 gcc_assert (succs_info_pool.top == -1);
5020 for (i = 0; i <= succs_info_pool.max_top; i++)
5021 {
5022 succs_info_pool.stack[i].succs_ok.release ();
5023 succs_info_pool.stack[i].succs_other.release ();
5024 succs_info_pool.stack[i].probs_ok.release ();
5025 }
5026 free (succs_info_pool.stack);
5027 }
5028 \f
5029
5030 /* Returns a position in RGN where BB can be inserted retaining
5031 topological order. */
5032 static int
5033 find_place_to_insert_bb (basic_block bb, int rgn)
5034 {
5035 bool has_preds_outside_rgn = false;
5036 edge e;
5037 edge_iterator ei;
5038
5039 /* Find whether we have preds outside the region. */
5040 FOR_EACH_EDGE (e, ei, bb->preds)
5041 if (!in_current_region_p (e->src))
5042 {
5043 has_preds_outside_rgn = true;
5044 break;
5045 }
5046
5047 /* Recompute the top order -- needed when we have > 1 pred
5048 and in case we don't have preds outside. */
5049 if (flag_sel_sched_pipelining_outer_loops
5050 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5051 {
5052 int i, bbi = bb->index, cur_bbi;
5053
5054 recompute_rev_top_order ();
5055 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5056 {
5057 cur_bbi = BB_TO_BLOCK (i);
5058 if (rev_top_order_index[bbi]
5059 < rev_top_order_index[cur_bbi])
5060 break;
5061 }
5062
5063 /* We skipped the right block, so we increase i. We accommodate
5064 it for increasing by step later, so we decrease i. */
5065 return (i + 1) - 1;
5066 }
5067 else if (has_preds_outside_rgn)
5068 {
5069 /* This is the case when we generate an extra empty block
5070 to serve as region head during pipelining. */
5071 e = EDGE_SUCC (bb, 0);
5072 gcc_assert (EDGE_COUNT (bb->succs) == 1
5073 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5074 && (BLOCK_TO_BB (e->dest->index) == 0));
5075 return -1;
5076 }
5077
5078 /* We don't have preds outside the region. We should have
5079 the only pred, because the multiple preds case comes from
5080 the pipelining of outer loops, and that is handled above.
5081 Just take the bbi of this single pred. */
5082 if (EDGE_COUNT (bb->succs) > 0)
5083 {
5084 int pred_bbi;
5085
5086 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5087
5088 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5089 return BLOCK_TO_BB (pred_bbi);
5090 }
5091 else
5092 /* BB has no successors. It is safe to put it in the end. */
5093 return current_nr_blocks - 1;
5094 }
5095
5096 /* Deletes an empty basic block freeing its data. */
5097 static void
5098 delete_and_free_basic_block (basic_block bb)
5099 {
5100 gcc_assert (sel_bb_empty_p (bb));
5101
5102 if (BB_LV_SET (bb))
5103 free_lv_set (bb);
5104
5105 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5106
5107 /* Can't assert av_set properties because we use sel_aremove_bb
5108 when removing loop preheader from the region. At the point of
5109 removing the preheader we already have deallocated sel_region_bb_info. */
5110 gcc_assert (BB_LV_SET (bb) == NULL
5111 && !BB_LV_SET_VALID_P (bb)
5112 && BB_AV_LEVEL (bb) == 0
5113 && BB_AV_SET (bb) == NULL);
5114
5115 delete_basic_block (bb);
5116 }
5117
5118 /* Add BB to the current region and update the region data. */
5119 static void
5120 add_block_to_current_region (basic_block bb)
5121 {
5122 int i, pos, bbi = -2, rgn;
5123
5124 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5125 bbi = find_place_to_insert_bb (bb, rgn);
5126 bbi += 1;
5127 pos = RGN_BLOCKS (rgn) + bbi;
5128
5129 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5130 && ebb_head[bbi] == pos);
5131
5132 /* Make a place for the new block. */
5133 extend_regions ();
5134
5135 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5136 BLOCK_TO_BB (rgn_bb_table[i])++;
5137
5138 memmove (rgn_bb_table + pos + 1,
5139 rgn_bb_table + pos,
5140 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5141
5142 /* Initialize data for BB. */
5143 rgn_bb_table[pos] = bb->index;
5144 BLOCK_TO_BB (bb->index) = bbi;
5145 CONTAINING_RGN (bb->index) = rgn;
5146
5147 RGN_NR_BLOCKS (rgn)++;
5148
5149 for (i = rgn + 1; i <= nr_regions; i++)
5150 RGN_BLOCKS (i)++;
5151 }
5152
5153 /* Remove BB from the current region and update the region data. */
5154 static void
5155 remove_bb_from_region (basic_block bb)
5156 {
5157 int i, pos, bbi = -2, rgn;
5158
5159 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5160 bbi = BLOCK_TO_BB (bb->index);
5161 pos = RGN_BLOCKS (rgn) + bbi;
5162
5163 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5164 && ebb_head[bbi] == pos);
5165
5166 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5167 BLOCK_TO_BB (rgn_bb_table[i])--;
5168
5169 memmove (rgn_bb_table + pos,
5170 rgn_bb_table + pos + 1,
5171 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5172
5173 RGN_NR_BLOCKS (rgn)--;
5174 for (i = rgn + 1; i <= nr_regions; i++)
5175 RGN_BLOCKS (i)--;
5176 }
5177
5178 /* Add BB to the current region and update all data. If BB is NULL, add all
5179 blocks from last_added_blocks vector. */
5180 static void
5181 sel_add_bb (basic_block bb)
5182 {
5183 /* Extend luids so that new notes will receive zero luids. */
5184 sched_extend_luids ();
5185 sched_init_bbs ();
5186 sel_init_bbs (last_added_blocks);
5187
5188 /* When bb is passed explicitly, the vector should contain
5189 the only element that equals to bb; otherwise, the vector
5190 should not be NULL. */
5191 gcc_assert (last_added_blocks.exists ());
5192
5193 if (bb != NULL)
5194 {
5195 gcc_assert (last_added_blocks.length () == 1
5196 && last_added_blocks[0] == bb);
5197 add_block_to_current_region (bb);
5198
5199 /* We associate creating/deleting data sets with the first insn
5200 appearing / disappearing in the bb. */
5201 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5202 create_initial_data_sets (bb);
5203
5204 last_added_blocks.release ();
5205 }
5206 else
5207 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5208 {
5209 int i;
5210 basic_block temp_bb = NULL;
5211
5212 for (i = 0;
5213 last_added_blocks.iterate (i, &bb); i++)
5214 {
5215 add_block_to_current_region (bb);
5216 temp_bb = bb;
5217 }
5218
5219 /* We need to fetch at least one bb so we know the region
5220 to update. */
5221 gcc_assert (temp_bb != NULL);
5222 bb = temp_bb;
5223
5224 last_added_blocks.release ();
5225 }
5226
5227 rgn_setup_region (CONTAINING_RGN (bb->index));
5228 }
5229
5230 /* Remove BB from the current region and update all data.
5231 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5232 static void
5233 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5234 {
5235 unsigned idx = bb->index;
5236
5237 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5238
5239 remove_bb_from_region (bb);
5240 return_bb_to_pool (bb);
5241 bitmap_clear_bit (blocks_to_reschedule, idx);
5242
5243 if (remove_from_cfg_p)
5244 {
5245 basic_block succ = single_succ (bb);
5246 delete_and_free_basic_block (bb);
5247 set_immediate_dominator (CDI_DOMINATORS, succ,
5248 recompute_dominator (CDI_DOMINATORS, succ));
5249 }
5250
5251 rgn_setup_region (CONTAINING_RGN (idx));
5252 }
5253
5254 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5255 static void
5256 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5257 {
5258 if (in_current_region_p (merge_bb))
5259 concat_note_lists (BB_NOTE_LIST (empty_bb),
5260 &BB_NOTE_LIST (merge_bb));
5261 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5262
5263 }
5264
5265 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5266 region, but keep it in CFG. */
5267 static void
5268 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5269 {
5270 /* The block should contain just a note or a label.
5271 We try to check whether it is unused below. */
5272 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5273 || LABEL_P (BB_HEAD (empty_bb)));
5274
5275 /* If basic block has predecessors or successors, redirect them. */
5276 if (remove_from_cfg_p
5277 && (EDGE_COUNT (empty_bb->preds) > 0
5278 || EDGE_COUNT (empty_bb->succs) > 0))
5279 {
5280 basic_block pred;
5281 basic_block succ;
5282
5283 /* We need to init PRED and SUCC before redirecting edges. */
5284 if (EDGE_COUNT (empty_bb->preds) > 0)
5285 {
5286 edge e;
5287
5288 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5289
5290 e = EDGE_PRED (empty_bb, 0);
5291 gcc_assert (e->src == empty_bb->prev_bb
5292 && (e->flags & EDGE_FALLTHRU));
5293
5294 pred = empty_bb->prev_bb;
5295 }
5296 else
5297 pred = NULL;
5298
5299 if (EDGE_COUNT (empty_bb->succs) > 0)
5300 {
5301 /* We do not check fallthruness here as above, because
5302 after removing a jump the edge may actually be not fallthru. */
5303 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5304 succ = EDGE_SUCC (empty_bb, 0)->dest;
5305 }
5306 else
5307 succ = NULL;
5308
5309 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5310 {
5311 edge e = EDGE_PRED (empty_bb, 0);
5312
5313 if (e->flags & EDGE_FALLTHRU)
5314 redirect_edge_succ_nodup (e, succ);
5315 else
5316 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5317 }
5318
5319 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5320 {
5321 edge e = EDGE_SUCC (empty_bb, 0);
5322
5323 if (find_edge (pred, e->dest) == NULL)
5324 redirect_edge_pred (e, pred);
5325 }
5326 }
5327
5328 /* Finish removing. */
5329 sel_remove_bb (empty_bb, remove_from_cfg_p);
5330 }
5331
5332 /* An implementation of create_basic_block hook, which additionally updates
5333 per-bb data structures. */
5334 static basic_block
5335 sel_create_basic_block (void *headp, void *endp, basic_block after)
5336 {
5337 basic_block new_bb;
5338 insn_t new_bb_note;
5339
5340 gcc_assert (flag_sel_sched_pipelining_outer_loops
5341 || !last_added_blocks.exists ());
5342
5343 new_bb_note = get_bb_note_from_pool ();
5344
5345 if (new_bb_note == NULL_RTX)
5346 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5347 else
5348 {
5349 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5350 new_bb_note, after);
5351 new_bb->aux = NULL;
5352 }
5353
5354 last_added_blocks.safe_push (new_bb);
5355
5356 return new_bb;
5357 }
5358
5359 /* Implement sched_init_only_bb (). */
5360 static void
5361 sel_init_only_bb (basic_block bb, basic_block after)
5362 {
5363 gcc_assert (after == NULL);
5364
5365 extend_regions ();
5366 rgn_make_new_region_out_of_new_block (bb);
5367 }
5368
5369 /* Update the latch when we've splitted or merged it from FROM block to TO.
5370 This should be checked for all outer loops, too. */
5371 static void
5372 change_loops_latches (basic_block from, basic_block to)
5373 {
5374 gcc_assert (from != to);
5375
5376 if (current_loop_nest)
5377 {
5378 struct loop *loop;
5379
5380 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5381 if (considered_for_pipelining_p (loop) && loop->latch == from)
5382 {
5383 gcc_assert (loop == current_loop_nest);
5384 loop->latch = to;
5385 gcc_assert (loop_latch_edge (loop));
5386 }
5387 }
5388 }
5389
5390 /* Splits BB on two basic blocks, adding it to the region and extending
5391 per-bb data structures. Returns the newly created bb. */
5392 static basic_block
5393 sel_split_block (basic_block bb, rtx after)
5394 {
5395 basic_block new_bb;
5396 insn_t insn;
5397
5398 new_bb = sched_split_block_1 (bb, after);
5399 sel_add_bb (new_bb);
5400
5401 /* This should be called after sel_add_bb, because this uses
5402 CONTAINING_RGN for the new block, which is not yet initialized.
5403 FIXME: this function may be a no-op now. */
5404 change_loops_latches (bb, new_bb);
5405
5406 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5407 FOR_BB_INSNS (new_bb, insn)
5408 if (INSN_P (insn))
5409 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5410
5411 if (sel_bb_empty_p (bb))
5412 {
5413 gcc_assert (!sel_bb_empty_p (new_bb));
5414
5415 /* NEW_BB has data sets that need to be updated and BB holds
5416 data sets that should be removed. Exchange these data sets
5417 so that we won't lose BB's valid data sets. */
5418 exchange_data_sets (new_bb, bb);
5419 free_data_sets (bb);
5420 }
5421
5422 if (!sel_bb_empty_p (new_bb)
5423 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5424 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5425
5426 return new_bb;
5427 }
5428
5429 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5430 Otherwise returns NULL. */
5431 static rtx
5432 check_for_new_jump (basic_block bb, int prev_max_uid)
5433 {
5434 rtx end;
5435
5436 end = sel_bb_end (bb);
5437 if (end && INSN_UID (end) >= prev_max_uid)
5438 return end;
5439 return NULL;
5440 }
5441
5442 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5443 New means having UID at least equal to PREV_MAX_UID. */
5444 static rtx
5445 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5446 {
5447 rtx jump;
5448
5449 /* Return immediately if no new insns were emitted. */
5450 if (get_max_uid () == prev_max_uid)
5451 return NULL;
5452
5453 /* Now check both blocks for new jumps. It will ever be only one. */
5454 if ((jump = check_for_new_jump (from, prev_max_uid)))
5455 return jump;
5456
5457 if (jump_bb != NULL
5458 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5459 return jump;
5460 return NULL;
5461 }
5462
5463 /* Splits E and adds the newly created basic block to the current region.
5464 Returns this basic block. */
5465 basic_block
5466 sel_split_edge (edge e)
5467 {
5468 basic_block new_bb, src, other_bb = NULL;
5469 int prev_max_uid;
5470 rtx jump;
5471
5472 src = e->src;
5473 prev_max_uid = get_max_uid ();
5474 new_bb = split_edge (e);
5475
5476 if (flag_sel_sched_pipelining_outer_loops
5477 && current_loop_nest)
5478 {
5479 int i;
5480 basic_block bb;
5481
5482 /* Some of the basic blocks might not have been added to the loop.
5483 Add them here, until this is fixed in force_fallthru. */
5484 for (i = 0;
5485 last_added_blocks.iterate (i, &bb); i++)
5486 if (!bb->loop_father)
5487 {
5488 add_bb_to_loop (bb, e->dest->loop_father);
5489
5490 gcc_assert (!other_bb && (new_bb->index != bb->index));
5491 other_bb = bb;
5492 }
5493 }
5494
5495 /* Add all last_added_blocks to the region. */
5496 sel_add_bb (NULL);
5497
5498 jump = find_new_jump (src, new_bb, prev_max_uid);
5499 if (jump)
5500 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5501
5502 /* Put the correct lv set on this block. */
5503 if (other_bb && !sel_bb_empty_p (other_bb))
5504 compute_live (sel_bb_head (other_bb));
5505
5506 return new_bb;
5507 }
5508
5509 /* Implement sched_create_empty_bb (). */
5510 static basic_block
5511 sel_create_empty_bb (basic_block after)
5512 {
5513 basic_block new_bb;
5514
5515 new_bb = sched_create_empty_bb_1 (after);
5516
5517 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5518 later. */
5519 gcc_assert (last_added_blocks.length () == 1
5520 && last_added_blocks[0] == new_bb);
5521
5522 last_added_blocks.release ();
5523 return new_bb;
5524 }
5525
5526 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5527 will be splitted to insert a check. */
5528 basic_block
5529 sel_create_recovery_block (insn_t orig_insn)
5530 {
5531 basic_block first_bb, second_bb, recovery_block;
5532 basic_block before_recovery = NULL;
5533 rtx jump;
5534
5535 first_bb = BLOCK_FOR_INSN (orig_insn);
5536 if (sel_bb_end_p (orig_insn))
5537 {
5538 /* Avoid introducing an empty block while splitting. */
5539 gcc_assert (single_succ_p (first_bb));
5540 second_bb = single_succ (first_bb);
5541 }
5542 else
5543 second_bb = sched_split_block (first_bb, orig_insn);
5544
5545 recovery_block = sched_create_recovery_block (&before_recovery);
5546 if (before_recovery)
5547 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
5548
5549 gcc_assert (sel_bb_empty_p (recovery_block));
5550 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5551 if (current_loops != NULL)
5552 add_bb_to_loop (recovery_block, first_bb->loop_father);
5553
5554 sel_add_bb (recovery_block);
5555
5556 jump = BB_END (recovery_block);
5557 gcc_assert (sel_bb_head (recovery_block) == jump);
5558 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5559
5560 return recovery_block;
5561 }
5562
5563 /* Merge basic block B into basic block A. */
5564 static void
5565 sel_merge_blocks (basic_block a, basic_block b)
5566 {
5567 gcc_assert (sel_bb_empty_p (b)
5568 && EDGE_COUNT (b->preds) == 1
5569 && EDGE_PRED (b, 0)->src == b->prev_bb);
5570
5571 move_bb_info (b->prev_bb, b);
5572 remove_empty_bb (b, false);
5573 merge_blocks (a, b);
5574 change_loops_latches (b, a);
5575 }
5576
5577 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5578 data structures for possibly created bb and insns. Returns the newly
5579 added bb or NULL, when a bb was not needed. */
5580 void
5581 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5582 {
5583 basic_block jump_bb, src, orig_dest = e->dest;
5584 int prev_max_uid;
5585 rtx jump;
5586
5587 /* This function is now used only for bookkeeping code creation, where
5588 we'll never get the single pred of orig_dest block and thus will not
5589 hit unreachable blocks when updating dominator info. */
5590 gcc_assert (!sel_bb_empty_p (e->src)
5591 && !single_pred_p (orig_dest));
5592 src = e->src;
5593 prev_max_uid = get_max_uid ();
5594 jump_bb = redirect_edge_and_branch_force (e, to);
5595
5596 if (jump_bb != NULL)
5597 sel_add_bb (jump_bb);
5598
5599 /* This function could not be used to spoil the loop structure by now,
5600 thus we don't care to update anything. But check it to be sure. */
5601 if (current_loop_nest
5602 && pipelining_p)
5603 gcc_assert (loop_latch_edge (current_loop_nest));
5604
5605 jump = find_new_jump (src, jump_bb, prev_max_uid);
5606 if (jump)
5607 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5608 set_immediate_dominator (CDI_DOMINATORS, to,
5609 recompute_dominator (CDI_DOMINATORS, to));
5610 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5611 recompute_dominator (CDI_DOMINATORS, orig_dest));
5612 }
5613
5614 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5615 redirected edge are in reverse topological order. */
5616 bool
5617 sel_redirect_edge_and_branch (edge e, basic_block to)
5618 {
5619 bool latch_edge_p;
5620 basic_block src, orig_dest = e->dest;
5621 int prev_max_uid;
5622 rtx jump;
5623 edge redirected;
5624 bool recompute_toporder_p = false;
5625 bool maybe_unreachable = single_pred_p (orig_dest);
5626
5627 latch_edge_p = (pipelining_p
5628 && current_loop_nest
5629 && e == loop_latch_edge (current_loop_nest));
5630
5631 src = e->src;
5632 prev_max_uid = get_max_uid ();
5633
5634 redirected = redirect_edge_and_branch (e, to);
5635
5636 gcc_assert (redirected && !last_added_blocks.exists ());
5637
5638 /* When we've redirected a latch edge, update the header. */
5639 if (latch_edge_p)
5640 {
5641 current_loop_nest->header = to;
5642 gcc_assert (loop_latch_edge (current_loop_nest));
5643 }
5644
5645 /* In rare situations, the topological relation between the blocks connected
5646 by the redirected edge can change (see PR42245 for an example). Update
5647 block_to_bb/bb_to_block. */
5648 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5649 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5650 recompute_toporder_p = true;
5651
5652 jump = find_new_jump (src, NULL, prev_max_uid);
5653 if (jump)
5654 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5655
5656 /* Only update dominator info when we don't have unreachable blocks.
5657 Otherwise we'll update in maybe_tidy_empty_bb. */
5658 if (!maybe_unreachable)
5659 {
5660 set_immediate_dominator (CDI_DOMINATORS, to,
5661 recompute_dominator (CDI_DOMINATORS, to));
5662 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5663 recompute_dominator (CDI_DOMINATORS, orig_dest));
5664 }
5665 return recompute_toporder_p;
5666 }
5667
5668 /* This variable holds the cfg hooks used by the selective scheduler. */
5669 static struct cfg_hooks sel_cfg_hooks;
5670
5671 /* Register sel-sched cfg hooks. */
5672 void
5673 sel_register_cfg_hooks (void)
5674 {
5675 sched_split_block = sel_split_block;
5676
5677 orig_cfg_hooks = get_cfg_hooks ();
5678 sel_cfg_hooks = orig_cfg_hooks;
5679
5680 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5681
5682 set_cfg_hooks (sel_cfg_hooks);
5683
5684 sched_init_only_bb = sel_init_only_bb;
5685 sched_split_block = sel_split_block;
5686 sched_create_empty_bb = sel_create_empty_bb;
5687 }
5688
5689 /* Unregister sel-sched cfg hooks. */
5690 void
5691 sel_unregister_cfg_hooks (void)
5692 {
5693 sched_create_empty_bb = NULL;
5694 sched_split_block = NULL;
5695 sched_init_only_bb = NULL;
5696
5697 set_cfg_hooks (orig_cfg_hooks);
5698 }
5699 \f
5700
5701 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5702 LABEL is where this jump should be directed. */
5703 rtx
5704 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5705 {
5706 rtx insn_rtx;
5707
5708 gcc_assert (!INSN_P (pattern));
5709
5710 start_sequence ();
5711
5712 if (label == NULL_RTX)
5713 insn_rtx = emit_insn (pattern);
5714 else if (DEBUG_INSN_P (label))
5715 insn_rtx = emit_debug_insn (pattern);
5716 else
5717 {
5718 insn_rtx = emit_jump_insn (pattern);
5719 JUMP_LABEL (insn_rtx) = label;
5720 ++LABEL_NUSES (label);
5721 }
5722
5723 end_sequence ();
5724
5725 sched_extend_luids ();
5726 sched_extend_target ();
5727 sched_deps_init (false);
5728
5729 /* Initialize INSN_CODE now. */
5730 recog_memoized (insn_rtx);
5731 return insn_rtx;
5732 }
5733
5734 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5735 must not be clonable. */
5736 vinsn_t
5737 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5738 {
5739 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5740
5741 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5742 return vinsn_create (insn_rtx, force_unique_p);
5743 }
5744
5745 /* Create a copy of INSN_RTX. */
5746 rtx
5747 create_copy_of_insn_rtx (rtx insn_rtx)
5748 {
5749 rtx res, link;
5750
5751 if (DEBUG_INSN_P (insn_rtx))
5752 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5753 insn_rtx);
5754
5755 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5756
5757 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5758 NULL_RTX);
5759
5760 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5761 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5762 there too, but are supposed to be sticky, so we copy them. */
5763 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5764 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5765 && REG_NOTE_KIND (link) != REG_EQUAL
5766 && REG_NOTE_KIND (link) != REG_EQUIV)
5767 {
5768 if (GET_CODE (link) == EXPR_LIST)
5769 add_reg_note (res, REG_NOTE_KIND (link),
5770 copy_insn_1 (XEXP (link, 0)));
5771 else
5772 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5773 }
5774
5775 return res;
5776 }
5777
5778 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5779 void
5780 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5781 {
5782 vinsn_detach (EXPR_VINSN (expr));
5783
5784 EXPR_VINSN (expr) = new_vinsn;
5785 vinsn_attach (new_vinsn);
5786 }
5787
5788 /* Helpers for global init. */
5789 /* This structure is used to be able to call existing bundling mechanism
5790 and calculate insn priorities. */
5791 static struct haifa_sched_info sched_sel_haifa_sched_info =
5792 {
5793 NULL, /* init_ready_list */
5794 NULL, /* can_schedule_ready_p */
5795 NULL, /* schedule_more_p */
5796 NULL, /* new_ready */
5797 NULL, /* rgn_rank */
5798 sel_print_insn, /* rgn_print_insn */
5799 contributes_to_priority,
5800 NULL, /* insn_finishes_block_p */
5801
5802 NULL, NULL,
5803 NULL, NULL,
5804 0, 0,
5805
5806 NULL, /* add_remove_insn */
5807 NULL, /* begin_schedule_ready */
5808 NULL, /* begin_move_insn */
5809 NULL, /* advance_target_bb */
5810
5811 NULL,
5812 NULL,
5813
5814 SEL_SCHED | NEW_BBS
5815 };
5816
5817 /* Setup special insns used in the scheduler. */
5818 void
5819 setup_nop_and_exit_insns (void)
5820 {
5821 gcc_assert (nop_pattern == NULL_RTX
5822 && exit_insn == NULL_RTX);
5823
5824 nop_pattern = constm1_rtx;
5825
5826 start_sequence ();
5827 emit_insn (nop_pattern);
5828 exit_insn = get_insns ();
5829 end_sequence ();
5830 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
5831 }
5832
5833 /* Free special insns used in the scheduler. */
5834 void
5835 free_nop_and_exit_insns (void)
5836 {
5837 exit_insn = NULL_RTX;
5838 nop_pattern = NULL_RTX;
5839 }
5840
5841 /* Setup a special vinsn used in new insns initialization. */
5842 void
5843 setup_nop_vinsn (void)
5844 {
5845 nop_vinsn = vinsn_create (exit_insn, false);
5846 vinsn_attach (nop_vinsn);
5847 }
5848
5849 /* Free a special vinsn used in new insns initialization. */
5850 void
5851 free_nop_vinsn (void)
5852 {
5853 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5854 vinsn_detach (nop_vinsn);
5855 nop_vinsn = NULL;
5856 }
5857
5858 /* Call a set_sched_flags hook. */
5859 void
5860 sel_set_sched_flags (void)
5861 {
5862 /* ??? This means that set_sched_flags were called, and we decided to
5863 support speculation. However, set_sched_flags also modifies flags
5864 on current_sched_info, doing this only at global init. And we
5865 sometimes change c_s_i later. So put the correct flags again. */
5866 if (spec_info && targetm.sched.set_sched_flags)
5867 targetm.sched.set_sched_flags (spec_info);
5868 }
5869
5870 /* Setup pointers to global sched info structures. */
5871 void
5872 sel_setup_sched_infos (void)
5873 {
5874 rgn_setup_common_sched_info ();
5875
5876 memcpy (&sel_common_sched_info, common_sched_info,
5877 sizeof (sel_common_sched_info));
5878
5879 sel_common_sched_info.fix_recovery_cfg = NULL;
5880 sel_common_sched_info.add_block = NULL;
5881 sel_common_sched_info.estimate_number_of_insns
5882 = sel_estimate_number_of_insns;
5883 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5884 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5885
5886 common_sched_info = &sel_common_sched_info;
5887
5888 current_sched_info = &sched_sel_haifa_sched_info;
5889 current_sched_info->sched_max_insns_priority =
5890 get_rgn_sched_max_insns_priority ();
5891
5892 sel_set_sched_flags ();
5893 }
5894 \f
5895
5896 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5897 *BB_ORD_INDEX after that is increased. */
5898 static void
5899 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5900 {
5901 RGN_NR_BLOCKS (rgn) += 1;
5902 RGN_DONT_CALC_DEPS (rgn) = 0;
5903 RGN_HAS_REAL_EBB (rgn) = 0;
5904 CONTAINING_RGN (bb->index) = rgn;
5905 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5906 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5907 (*bb_ord_index)++;
5908
5909 /* FIXME: it is true only when not scheduling ebbs. */
5910 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5911 }
5912
5913 /* Functions to support pipelining of outer loops. */
5914
5915 /* Creates a new empty region and returns it's number. */
5916 static int
5917 sel_create_new_region (void)
5918 {
5919 int new_rgn_number = nr_regions;
5920
5921 RGN_NR_BLOCKS (new_rgn_number) = 0;
5922
5923 /* FIXME: This will work only when EBBs are not created. */
5924 if (new_rgn_number != 0)
5925 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5926 RGN_NR_BLOCKS (new_rgn_number - 1);
5927 else
5928 RGN_BLOCKS (new_rgn_number) = 0;
5929
5930 /* Set the blocks of the next region so the other functions may
5931 calculate the number of blocks in the region. */
5932 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5933 RGN_NR_BLOCKS (new_rgn_number);
5934
5935 nr_regions++;
5936
5937 return new_rgn_number;
5938 }
5939
5940 /* If X has a smaller topological sort number than Y, returns -1;
5941 if greater, returns 1. */
5942 static int
5943 bb_top_order_comparator (const void *x, const void *y)
5944 {
5945 basic_block bb1 = *(const basic_block *) x;
5946 basic_block bb2 = *(const basic_block *) y;
5947
5948 gcc_assert (bb1 == bb2
5949 || rev_top_order_index[bb1->index]
5950 != rev_top_order_index[bb2->index]);
5951
5952 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5953 bbs with greater number should go earlier. */
5954 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5955 return -1;
5956 else
5957 return 1;
5958 }
5959
5960 /* Create a region for LOOP and return its number. If we don't want
5961 to pipeline LOOP, return -1. */
5962 static int
5963 make_region_from_loop (struct loop *loop)
5964 {
5965 unsigned int i;
5966 int new_rgn_number = -1;
5967 struct loop *inner;
5968
5969 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5970 int bb_ord_index = 0;
5971 basic_block *loop_blocks;
5972 basic_block preheader_block;
5973
5974 if (loop->num_nodes
5975 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5976 return -1;
5977
5978 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5979 for (inner = loop->inner; inner; inner = inner->inner)
5980 if (flow_bb_inside_loop_p (inner, loop->latch))
5981 return -1;
5982
5983 loop->ninsns = num_loop_insns (loop);
5984 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5985 return -1;
5986
5987 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5988
5989 for (i = 0; i < loop->num_nodes; i++)
5990 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5991 {
5992 free (loop_blocks);
5993 return -1;
5994 }
5995
5996 preheader_block = loop_preheader_edge (loop)->src;
5997 gcc_assert (preheader_block);
5998 gcc_assert (loop_blocks[0] == loop->header);
5999
6000 new_rgn_number = sel_create_new_region ();
6001
6002 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6003 bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6004
6005 for (i = 0; i < loop->num_nodes; i++)
6006 {
6007 /* Add only those blocks that haven't been scheduled in the inner loop.
6008 The exception is the basic blocks with bookkeeping code - they should
6009 be added to the region (and they actually don't belong to the loop
6010 body, but to the region containing that loop body). */
6011
6012 gcc_assert (new_rgn_number >= 0);
6013
6014 if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6015 {
6016 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6017 new_rgn_number);
6018 bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6019 }
6020 }
6021
6022 free (loop_blocks);
6023 MARK_LOOP_FOR_PIPELINING (loop);
6024
6025 return new_rgn_number;
6026 }
6027
6028 /* Create a new region from preheader blocks LOOP_BLOCKS. */
6029 void
6030 make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6031 {
6032 unsigned int i;
6033 int new_rgn_number = -1;
6034 basic_block bb;
6035
6036 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6037 int bb_ord_index = 0;
6038
6039 new_rgn_number = sel_create_new_region ();
6040
6041 FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6042 {
6043 gcc_assert (new_rgn_number >= 0);
6044
6045 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6046 }
6047
6048 vec_free (loop_blocks);
6049 }
6050
6051
6052 /* Create region(s) from loop nest LOOP, such that inner loops will be
6053 pipelined before outer loops. Returns true when a region for LOOP
6054 is created. */
6055 static bool
6056 make_regions_from_loop_nest (struct loop *loop)
6057 {
6058 struct loop *cur_loop;
6059 int rgn_number;
6060
6061 /* Traverse all inner nodes of the loop. */
6062 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6063 if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6064 return false;
6065
6066 /* At this moment all regular inner loops should have been pipelined.
6067 Try to create a region from this loop. */
6068 rgn_number = make_region_from_loop (loop);
6069
6070 if (rgn_number < 0)
6071 return false;
6072
6073 loop_nests.safe_push (loop);
6074 return true;
6075 }
6076
6077 /* Initalize data structures needed. */
6078 void
6079 sel_init_pipelining (void)
6080 {
6081 /* Collect loop information to be used in outer loops pipelining. */
6082 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6083 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6084 | LOOPS_HAVE_RECORDED_EXITS
6085 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6086 current_loop_nest = NULL;
6087
6088 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
6089 bitmap_clear (bbs_in_loop_rgns);
6090
6091 recompute_rev_top_order ();
6092 }
6093
6094 /* Returns a struct loop for region RGN. */
6095 loop_p
6096 get_loop_nest_for_rgn (unsigned int rgn)
6097 {
6098 /* Regions created with extend_rgns don't have corresponding loop nests,
6099 because they don't represent loops. */
6100 if (rgn < loop_nests.length ())
6101 return loop_nests[rgn];
6102 else
6103 return NULL;
6104 }
6105
6106 /* True when LOOP was included into pipelining regions. */
6107 bool
6108 considered_for_pipelining_p (struct loop *loop)
6109 {
6110 if (loop_depth (loop) == 0)
6111 return false;
6112
6113 /* Now, the loop could be too large or irreducible. Check whether its
6114 region is in LOOP_NESTS.
6115 We determine the region number of LOOP as the region number of its
6116 latch. We can't use header here, because this header could be
6117 just removed preheader and it will give us the wrong region number.
6118 Latch can't be used because it could be in the inner loop too. */
6119 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6120 {
6121 int rgn = CONTAINING_RGN (loop->latch->index);
6122
6123 gcc_assert ((unsigned) rgn < loop_nests.length ());
6124 return true;
6125 }
6126
6127 return false;
6128 }
6129
6130 /* Makes regions from the rest of the blocks, after loops are chosen
6131 for pipelining. */
6132 static void
6133 make_regions_from_the_rest (void)
6134 {
6135 int cur_rgn_blocks;
6136 int *loop_hdr;
6137 int i;
6138
6139 basic_block bb;
6140 edge e;
6141 edge_iterator ei;
6142 int *degree;
6143
6144 /* Index in rgn_bb_table where to start allocating new regions. */
6145 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6146
6147 /* Make regions from all the rest basic blocks - those that don't belong to
6148 any loop or belong to irreducible loops. Prepare the data structures
6149 for extend_rgns. */
6150
6151 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6152 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6153 loop. */
6154 loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6155 degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6156
6157
6158 /* For each basic block that belongs to some loop assign the number
6159 of innermost loop it belongs to. */
6160 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
6161 loop_hdr[i] = -1;
6162
6163 FOR_EACH_BB_FN (bb, cfun)
6164 {
6165 if (bb->loop_father && !bb->loop_father->num == 0
6166 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6167 loop_hdr[bb->index] = bb->loop_father->num;
6168 }
6169
6170 /* For each basic block degree is calculated as the number of incoming
6171 edges, that are going out of bbs that are not yet scheduled.
6172 The basic blocks that are scheduled have degree value of zero. */
6173 FOR_EACH_BB_FN (bb, cfun)
6174 {
6175 degree[bb->index] = 0;
6176
6177 if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6178 {
6179 FOR_EACH_EDGE (e, ei, bb->preds)
6180 if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6181 degree[bb->index]++;
6182 }
6183 else
6184 degree[bb->index] = -1;
6185 }
6186
6187 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6188
6189 /* Any block that did not end up in a region is placed into a region
6190 by itself. */
6191 FOR_EACH_BB_FN (bb, cfun)
6192 if (degree[bb->index] >= 0)
6193 {
6194 rgn_bb_table[cur_rgn_blocks] = bb->index;
6195 RGN_NR_BLOCKS (nr_regions) = 1;
6196 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6197 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6198 RGN_HAS_REAL_EBB (nr_regions) = 0;
6199 CONTAINING_RGN (bb->index) = nr_regions++;
6200 BLOCK_TO_BB (bb->index) = 0;
6201 }
6202
6203 free (degree);
6204 free (loop_hdr);
6205 }
6206
6207 /* Free data structures used in pipelining of loops. */
6208 void sel_finish_pipelining (void)
6209 {
6210 struct loop *loop;
6211
6212 /* Release aux fields so we don't free them later by mistake. */
6213 FOR_EACH_LOOP (loop, 0)
6214 loop->aux = NULL;
6215
6216 loop_optimizer_finalize ();
6217
6218 loop_nests.release ();
6219
6220 free (rev_top_order_index);
6221 rev_top_order_index = NULL;
6222 }
6223
6224 /* This function replaces the find_rgns when
6225 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6226 void
6227 sel_find_rgns (void)
6228 {
6229 sel_init_pipelining ();
6230 extend_regions ();
6231
6232 if (current_loops)
6233 {
6234 loop_p loop;
6235
6236 FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
6237 ? LI_FROM_INNERMOST
6238 : LI_ONLY_INNERMOST))
6239 make_regions_from_loop_nest (loop);
6240 }
6241
6242 /* Make regions from all the rest basic blocks and schedule them.
6243 These blocks include blocks that don't belong to any loop or belong
6244 to irreducible loops. */
6245 make_regions_from_the_rest ();
6246
6247 /* We don't need bbs_in_loop_rgns anymore. */
6248 sbitmap_free (bbs_in_loop_rgns);
6249 bbs_in_loop_rgns = NULL;
6250 }
6251
6252 /* Add the preheader blocks from previous loop to current region taking
6253 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6254 This function is only used with -fsel-sched-pipelining-outer-loops. */
6255 void
6256 sel_add_loop_preheaders (bb_vec_t *bbs)
6257 {
6258 int i;
6259 basic_block bb;
6260 vec<basic_block> *preheader_blocks
6261 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6262
6263 if (!preheader_blocks)
6264 return;
6265
6266 for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6267 {
6268 bbs->safe_push (bb);
6269 last_added_blocks.safe_push (bb);
6270 sel_add_bb (bb);
6271 }
6272
6273 vec_free (preheader_blocks);
6274 }
6275
6276 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6277 Please note that the function should also work when pipelining_p is
6278 false, because it is used when deciding whether we should or should
6279 not reschedule pipelined code. */
6280 bool
6281 sel_is_loop_preheader_p (basic_block bb)
6282 {
6283 if (current_loop_nest)
6284 {
6285 struct loop *outer;
6286
6287 if (preheader_removed)
6288 return false;
6289
6290 /* Preheader is the first block in the region. */
6291 if (BLOCK_TO_BB (bb->index) == 0)
6292 return true;
6293
6294 /* We used to find a preheader with the topological information.
6295 Check that the above code is equivalent to what we did before. */
6296
6297 if (in_current_region_p (current_loop_nest->header))
6298 gcc_assert (!(BLOCK_TO_BB (bb->index)
6299 < BLOCK_TO_BB (current_loop_nest->header->index)));
6300
6301 /* Support the situation when the latch block of outer loop
6302 could be from here. */
6303 for (outer = loop_outer (current_loop_nest);
6304 outer;
6305 outer = loop_outer (outer))
6306 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6307 gcc_unreachable ();
6308 }
6309
6310 return false;
6311 }
6312
6313 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6314 can be removed, making the corresponding edge fallthrough (assuming that
6315 all basic blocks between JUMP_BB and DEST_BB are empty). */
6316 static bool
6317 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6318 {
6319 if (!onlyjump_p (BB_END (jump_bb))
6320 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6321 return false;
6322
6323 /* Several outgoing edges, abnormal edge or destination of jump is
6324 not DEST_BB. */
6325 if (EDGE_COUNT (jump_bb->succs) != 1
6326 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6327 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6328 return false;
6329
6330 /* If not anything of the upper. */
6331 return true;
6332 }
6333
6334 /* Removes the loop preheader from the current region and saves it in
6335 PREHEADER_BLOCKS of the father loop, so they will be added later to
6336 region that represents an outer loop. */
6337 static void
6338 sel_remove_loop_preheader (void)
6339 {
6340 int i, old_len;
6341 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6342 basic_block bb;
6343 bool all_empty_p = true;
6344 vec<basic_block> *preheader_blocks
6345 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6346
6347 vec_check_alloc (preheader_blocks, 0);
6348
6349 gcc_assert (current_loop_nest);
6350 old_len = preheader_blocks->length ();
6351
6352 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6353 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6354 {
6355 bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6356
6357 /* If the basic block belongs to region, but doesn't belong to
6358 corresponding loop, then it should be a preheader. */
6359 if (sel_is_loop_preheader_p (bb))
6360 {
6361 preheader_blocks->safe_push (bb);
6362 if (BB_END (bb) != bb_note (bb))
6363 all_empty_p = false;
6364 }
6365 }
6366
6367 /* Remove these blocks only after iterating over the whole region. */
6368 for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6369 {
6370 bb = (*preheader_blocks)[i];
6371 sel_remove_bb (bb, false);
6372 }
6373
6374 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6375 {
6376 if (!all_empty_p)
6377 /* Immediately create new region from preheader. */
6378 make_region_from_loop_preheader (preheader_blocks);
6379 else
6380 {
6381 /* If all preheader blocks are empty - dont create new empty region.
6382 Instead, remove them completely. */
6383 FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6384 {
6385 edge e;
6386 edge_iterator ei;
6387 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6388
6389 /* Redirect all incoming edges to next basic block. */
6390 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6391 {
6392 if (! (e->flags & EDGE_FALLTHRU))
6393 redirect_edge_and_branch (e, bb->next_bb);
6394 else
6395 redirect_edge_succ (e, bb->next_bb);
6396 }
6397 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6398 delete_and_free_basic_block (bb);
6399
6400 /* Check if after deleting preheader there is a nonconditional
6401 jump in PREV_BB that leads to the next basic block NEXT_BB.
6402 If it is so - delete this jump and clear data sets of its
6403 basic block if it becomes empty. */
6404 if (next_bb->prev_bb == prev_bb
6405 && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
6406 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6407 {
6408 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6409 if (BB_END (prev_bb) == bb_note (prev_bb))
6410 free_data_sets (prev_bb);
6411 }
6412
6413 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6414 recompute_dominator (CDI_DOMINATORS,
6415 next_bb));
6416 }
6417 }
6418 vec_free (preheader_blocks);
6419 }
6420 else
6421 /* Store preheader within the father's loop structure. */
6422 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6423 preheader_blocks);
6424 }
6425 #endif