re PR rtl-optimization/42388 (ICE in move_bb_info with sel-sched and modulo-sched...
[gcc.git] / gcc / sel-sched-ir.c
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "toplev.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "toplev.h"
35 #include "recog.h"
36 #include "params.h"
37 #include "target.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46
47 #ifdef INSN_SCHEDULING
48 #include "sel-sched-ir.h"
49 /* We don't have to use it except for sel_print_insn. */
50 #include "sel-sched-dump.h"
51
52 /* A vector holding bb info for whole scheduling pass. */
53 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
54
55 /* A vector holding bb info. */
56 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
57
58 /* A pool for allocating all lists. */
59 alloc_pool sched_lists_pool;
60
61 /* This contains information about successors for compute_av_set. */
62 struct succs_info current_succs;
63
64 /* Data structure to describe interaction with the generic scheduler utils. */
65 static struct common_sched_info_def sel_common_sched_info;
66
67 /* The loop nest being pipelined. */
68 struct loop *current_loop_nest;
69
70 /* LOOP_NESTS is a vector containing the corresponding loop nest for
71 each region. */
72 static VEC(loop_p, heap) *loop_nests = NULL;
73
74 /* Saves blocks already in loop regions, indexed by bb->index. */
75 static sbitmap bbs_in_loop_rgns = NULL;
76
77 /* CFG hooks that are saved before changing create_basic_block hook. */
78 static struct cfg_hooks orig_cfg_hooks;
79 \f
80
81 /* Array containing reverse topological index of function basic blocks,
82 indexed by BB->INDEX. */
83 static int *rev_top_order_index = NULL;
84
85 /* Length of the above array. */
86 static int rev_top_order_index_len = -1;
87
88 /* A regset pool structure. */
89 static struct
90 {
91 /* The stack to which regsets are returned. */
92 regset *v;
93
94 /* Its pointer. */
95 int n;
96
97 /* Its size. */
98 int s;
99
100 /* In VV we save all generated regsets so that, when destructing the
101 pool, we can compare it with V and check that every regset was returned
102 back to pool. */
103 regset *vv;
104
105 /* The pointer of VV stack. */
106 int nn;
107
108 /* Its size. */
109 int ss;
110
111 /* The difference between allocated and returned regsets. */
112 int diff;
113 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
114
115 /* This represents the nop pool. */
116 static struct
117 {
118 /* The vector which holds previously emitted nops. */
119 insn_t *v;
120
121 /* Its pointer. */
122 int n;
123
124 /* Its size. */
125 int s;
126 } nop_pool = { NULL, 0, 0 };
127
128 /* The pool for basic block notes. */
129 static rtx_vec_t bb_note_pool;
130
131 /* A NOP pattern used to emit placeholder insns. */
132 rtx nop_pattern = NULL_RTX;
133 /* A special instruction that resides in EXIT_BLOCK.
134 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
135 rtx exit_insn = NULL_RTX;
136
137 /* TRUE if while scheduling current region, which is loop, its preheader
138 was removed. */
139 bool preheader_removed = false;
140 \f
141
142 /* Forward static declarations. */
143 static void fence_clear (fence_t);
144
145 static void deps_init_id (idata_t, insn_t, bool);
146 static void init_id_from_df (idata_t, insn_t, bool);
147 static expr_t set_insn_init (expr_t, vinsn_t, int);
148
149 static void cfg_preds (basic_block, insn_t **, int *);
150 static void prepare_insn_expr (insn_t, int);
151 static void free_history_vect (VEC (expr_history_def, heap) **);
152
153 static void move_bb_info (basic_block, basic_block);
154 static void remove_empty_bb (basic_block, bool);
155 static void sel_remove_loop_preheader (void);
156
157 static bool insn_is_the_only_one_in_bb_p (insn_t);
158 static void create_initial_data_sets (basic_block);
159
160 static void free_av_set (basic_block);
161 static void invalidate_av_set (basic_block);
162 static void extend_insn_data (void);
163 static void sel_init_new_insn (insn_t, int);
164 static void finish_insns (void);
165 \f
166 /* Various list functions. */
167
168 /* Copy an instruction list L. */
169 ilist_t
170 ilist_copy (ilist_t l)
171 {
172 ilist_t head = NULL, *tailp = &head;
173
174 while (l)
175 {
176 ilist_add (tailp, ILIST_INSN (l));
177 tailp = &ILIST_NEXT (*tailp);
178 l = ILIST_NEXT (l);
179 }
180
181 return head;
182 }
183
184 /* Invert an instruction list L. */
185 ilist_t
186 ilist_invert (ilist_t l)
187 {
188 ilist_t res = NULL;
189
190 while (l)
191 {
192 ilist_add (&res, ILIST_INSN (l));
193 l = ILIST_NEXT (l);
194 }
195
196 return res;
197 }
198
199 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
200 void
201 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
202 {
203 bnd_t bnd;
204
205 _list_add (lp);
206 bnd = BLIST_BND (*lp);
207
208 BND_TO (bnd) = to;
209 BND_PTR (bnd) = ptr;
210 BND_AV (bnd) = NULL;
211 BND_AV1 (bnd) = NULL;
212 BND_DC (bnd) = dc;
213 }
214
215 /* Remove the list note pointed to by LP. */
216 void
217 blist_remove (blist_t *lp)
218 {
219 bnd_t b = BLIST_BND (*lp);
220
221 av_set_clear (&BND_AV (b));
222 av_set_clear (&BND_AV1 (b));
223 ilist_clear (&BND_PTR (b));
224
225 _list_remove (lp);
226 }
227
228 /* Init a fence tail L. */
229 void
230 flist_tail_init (flist_tail_t l)
231 {
232 FLIST_TAIL_HEAD (l) = NULL;
233 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
234 }
235
236 /* Try to find fence corresponding to INSN in L. */
237 fence_t
238 flist_lookup (flist_t l, insn_t insn)
239 {
240 while (l)
241 {
242 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
243 return FLIST_FENCE (l);
244
245 l = FLIST_NEXT (l);
246 }
247
248 return NULL;
249 }
250
251 /* Init the fields of F before running fill_insns. */
252 static void
253 init_fence_for_scheduling (fence_t f)
254 {
255 FENCE_BNDS (f) = NULL;
256 FENCE_PROCESSED_P (f) = false;
257 FENCE_SCHEDULED_P (f) = false;
258 }
259
260 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
261 static void
262 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
263 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
264 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
265 int cycle, int cycle_issued_insns,
266 bool starts_cycle_p, bool after_stall_p)
267 {
268 fence_t f;
269
270 _list_add (lp);
271 f = FLIST_FENCE (*lp);
272
273 FENCE_INSN (f) = insn;
274
275 gcc_assert (state != NULL);
276 FENCE_STATE (f) = state;
277
278 FENCE_CYCLE (f) = cycle;
279 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
280 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
281 FENCE_AFTER_STALL_P (f) = after_stall_p;
282
283 gcc_assert (dc != NULL);
284 FENCE_DC (f) = dc;
285
286 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
287 FENCE_TC (f) = tc;
288
289 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
290 FENCE_EXECUTING_INSNS (f) = executing_insns;
291 FENCE_READY_TICKS (f) = ready_ticks;
292 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
293 FENCE_SCHED_NEXT (f) = sched_next;
294
295 init_fence_for_scheduling (f);
296 }
297
298 /* Remove the head node of the list pointed to by LP. */
299 static void
300 flist_remove (flist_t *lp)
301 {
302 if (FENCE_INSN (FLIST_FENCE (*lp)))
303 fence_clear (FLIST_FENCE (*lp));
304 _list_remove (lp);
305 }
306
307 /* Clear the fence list pointed to by LP. */
308 void
309 flist_clear (flist_t *lp)
310 {
311 while (*lp)
312 flist_remove (lp);
313 }
314
315 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
316 void
317 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
318 {
319 def_t d;
320
321 _list_add (dl);
322 d = DEF_LIST_DEF (*dl);
323
324 d->orig_insn = original_insn;
325 d->crosses_call = crosses_call;
326 }
327 \f
328
329 /* Functions to work with target contexts. */
330
331 /* Bulk target context. It is convenient for debugging purposes to ensure
332 that there are no uninitialized (null) target contexts. */
333 static tc_t bulk_tc = (tc_t) 1;
334
335 /* Target hooks wrappers. In the future we can provide some default
336 implementations for them. */
337
338 /* Allocate a store for the target context. */
339 static tc_t
340 alloc_target_context (void)
341 {
342 return (targetm.sched.alloc_sched_context
343 ? targetm.sched.alloc_sched_context () : bulk_tc);
344 }
345
346 /* Init target context TC.
347 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
348 Overwise, copy current backend context to TC. */
349 static void
350 init_target_context (tc_t tc, bool clean_p)
351 {
352 if (targetm.sched.init_sched_context)
353 targetm.sched.init_sched_context (tc, clean_p);
354 }
355
356 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
357 int init_target_context (). */
358 tc_t
359 create_target_context (bool clean_p)
360 {
361 tc_t tc = alloc_target_context ();
362
363 init_target_context (tc, clean_p);
364 return tc;
365 }
366
367 /* Copy TC to the current backend context. */
368 void
369 set_target_context (tc_t tc)
370 {
371 if (targetm.sched.set_sched_context)
372 targetm.sched.set_sched_context (tc);
373 }
374
375 /* TC is about to be destroyed. Free any internal data. */
376 static void
377 clear_target_context (tc_t tc)
378 {
379 if (targetm.sched.clear_sched_context)
380 targetm.sched.clear_sched_context (tc);
381 }
382
383 /* Clear and free it. */
384 static void
385 delete_target_context (tc_t tc)
386 {
387 clear_target_context (tc);
388
389 if (targetm.sched.free_sched_context)
390 targetm.sched.free_sched_context (tc);
391 }
392
393 /* Make a copy of FROM in TO.
394 NB: May be this should be a hook. */
395 static void
396 copy_target_context (tc_t to, tc_t from)
397 {
398 tc_t tmp = create_target_context (false);
399
400 set_target_context (from);
401 init_target_context (to, false);
402
403 set_target_context (tmp);
404 delete_target_context (tmp);
405 }
406
407 /* Create a copy of TC. */
408 static tc_t
409 create_copy_of_target_context (tc_t tc)
410 {
411 tc_t copy = alloc_target_context ();
412
413 copy_target_context (copy, tc);
414
415 return copy;
416 }
417
418 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
419 is the same as in init_target_context (). */
420 void
421 reset_target_context (tc_t tc, bool clean_p)
422 {
423 clear_target_context (tc);
424 init_target_context (tc, clean_p);
425 }
426 \f
427 /* Functions to work with dependence contexts.
428 Dc (aka deps context, aka deps_t, aka struct deps *) is short for dependence
429 context. It accumulates information about processed insns to decide if
430 current insn is dependent on the processed ones. */
431
432 /* Make a copy of FROM in TO. */
433 static void
434 copy_deps_context (deps_t to, deps_t from)
435 {
436 init_deps (to, false);
437 deps_join (to, from);
438 }
439
440 /* Allocate store for dep context. */
441 static deps_t
442 alloc_deps_context (void)
443 {
444 return XNEW (struct deps);
445 }
446
447 /* Allocate and initialize dep context. */
448 static deps_t
449 create_deps_context (void)
450 {
451 deps_t dc = alloc_deps_context ();
452
453 init_deps (dc, false);
454 return dc;
455 }
456
457 /* Create a copy of FROM. */
458 static deps_t
459 create_copy_of_deps_context (deps_t from)
460 {
461 deps_t to = alloc_deps_context ();
462
463 copy_deps_context (to, from);
464 return to;
465 }
466
467 /* Clean up internal data of DC. */
468 static void
469 clear_deps_context (deps_t dc)
470 {
471 free_deps (dc);
472 }
473
474 /* Clear and free DC. */
475 static void
476 delete_deps_context (deps_t dc)
477 {
478 clear_deps_context (dc);
479 free (dc);
480 }
481
482 /* Clear and init DC. */
483 static void
484 reset_deps_context (deps_t dc)
485 {
486 clear_deps_context (dc);
487 init_deps (dc, false);
488 }
489
490 /* This structure describes the dependence analysis hooks for advancing
491 dependence context. */
492 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
493 {
494 NULL,
495
496 NULL, /* start_insn */
497 NULL, /* finish_insn */
498 NULL, /* start_lhs */
499 NULL, /* finish_lhs */
500 NULL, /* start_rhs */
501 NULL, /* finish_rhs */
502 haifa_note_reg_set,
503 haifa_note_reg_clobber,
504 haifa_note_reg_use,
505 NULL, /* note_mem_dep */
506 NULL, /* note_dep */
507
508 0, 0, 0
509 };
510
511 /* Process INSN and add its impact on DC. */
512 void
513 advance_deps_context (deps_t dc, insn_t insn)
514 {
515 sched_deps_info = &advance_deps_context_sched_deps_info;
516 deps_analyze_insn (dc, insn);
517 }
518 \f
519
520 /* Functions to work with DFA states. */
521
522 /* Allocate store for a DFA state. */
523 static state_t
524 state_alloc (void)
525 {
526 return xmalloc (dfa_state_size);
527 }
528
529 /* Allocate and initialize DFA state. */
530 static state_t
531 state_create (void)
532 {
533 state_t state = state_alloc ();
534
535 state_reset (state);
536 advance_state (state);
537 return state;
538 }
539
540 /* Free DFA state. */
541 static void
542 state_free (state_t state)
543 {
544 free (state);
545 }
546
547 /* Make a copy of FROM in TO. */
548 static void
549 state_copy (state_t to, state_t from)
550 {
551 memcpy (to, from, dfa_state_size);
552 }
553
554 /* Create a copy of FROM. */
555 static state_t
556 state_create_copy (state_t from)
557 {
558 state_t to = state_alloc ();
559
560 state_copy (to, from);
561 return to;
562 }
563 \f
564
565 /* Functions to work with fences. */
566
567 /* Clear the fence. */
568 static void
569 fence_clear (fence_t f)
570 {
571 state_t s = FENCE_STATE (f);
572 deps_t dc = FENCE_DC (f);
573 void *tc = FENCE_TC (f);
574
575 ilist_clear (&FENCE_BNDS (f));
576
577 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
578 || (s == NULL && dc == NULL && tc == NULL));
579
580 if (s != NULL)
581 free (s);
582
583 if (dc != NULL)
584 delete_deps_context (dc);
585
586 if (tc != NULL)
587 delete_target_context (tc);
588 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
589 free (FENCE_READY_TICKS (f));
590 FENCE_READY_TICKS (f) = NULL;
591 }
592
593 /* Init a list of fences with successors of OLD_FENCE. */
594 void
595 init_fences (insn_t old_fence)
596 {
597 insn_t succ;
598 succ_iterator si;
599 bool first = true;
600 int ready_ticks_size = get_max_uid () + 1;
601
602 FOR_EACH_SUCC_1 (succ, si, old_fence,
603 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
604 {
605
606 if (first)
607 first = false;
608 else
609 gcc_assert (flag_sel_sched_pipelining_outer_loops);
610
611 flist_add (&fences, succ,
612 state_create (),
613 create_deps_context () /* dc */,
614 create_target_context (true) /* tc */,
615 NULL_RTX /* last_scheduled_insn */,
616 NULL, /* executing_insns */
617 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
618 ready_ticks_size,
619 NULL_RTX /* sched_next */,
620 1 /* cycle */, 0 /* cycle_issued_insns */,
621 1 /* starts_cycle_p */, 0 /* after_stall_p */);
622 }
623 }
624
625 /* Merges two fences (filling fields of fence F with resulting values) by
626 following rules: 1) state, target context and last scheduled insn are
627 propagated from fallthrough edge if it is available;
628 2) deps context and cycle is propagated from more probable edge;
629 3) all other fields are set to corresponding constant values.
630
631 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
632 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE and AFTER_STALL_P
633 are the corresponding fields of the second fence. */
634 static void
635 merge_fences (fence_t f, insn_t insn,
636 state_t state, deps_t dc, void *tc,
637 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
638 int *ready_ticks, int ready_ticks_size,
639 rtx sched_next, int cycle, bool after_stall_p)
640 {
641 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
642
643 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
644 && !sched_next && !FENCE_SCHED_NEXT (f));
645
646 /* Check if we can decide which path fences came.
647 If we can't (or don't want to) - reset all. */
648 if (last_scheduled_insn == NULL
649 || last_scheduled_insn_old == NULL
650 /* This is a case when INSN is reachable on several paths from
651 one insn (this can happen when pipelining of outer loops is on and
652 there are two edges: one going around of inner loop and the other -
653 right through it; in such case just reset everything). */
654 || last_scheduled_insn == last_scheduled_insn_old)
655 {
656 state_reset (FENCE_STATE (f));
657 state_free (state);
658
659 reset_deps_context (FENCE_DC (f));
660 delete_deps_context (dc);
661
662 reset_target_context (FENCE_TC (f), true);
663 delete_target_context (tc);
664
665 if (cycle > FENCE_CYCLE (f))
666 FENCE_CYCLE (f) = cycle;
667
668 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
669 VEC_free (rtx, gc, executing_insns);
670 free (ready_ticks);
671 if (FENCE_EXECUTING_INSNS (f))
672 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
673 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
674 if (FENCE_READY_TICKS (f))
675 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
676 }
677 else
678 {
679 edge edge_old = NULL, edge_new = NULL;
680 edge candidate;
681 succ_iterator si;
682 insn_t succ;
683
684 /* Find fallthrough edge. */
685 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
686 candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
687
688 if (!candidate
689 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
690 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
691 {
692 /* No fallthrough edge leading to basic block of INSN. */
693 state_reset (FENCE_STATE (f));
694 state_free (state);
695
696 reset_target_context (FENCE_TC (f), true);
697 delete_target_context (tc);
698
699 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
700 }
701 else
702 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
703 {
704 /* Would be weird if same insn is successor of several fallthrough
705 edges. */
706 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
707 != BLOCK_FOR_INSN (last_scheduled_insn_old));
708
709 state_free (FENCE_STATE (f));
710 FENCE_STATE (f) = state;
711
712 delete_target_context (FENCE_TC (f));
713 FENCE_TC (f) = tc;
714
715 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
716 }
717 else
718 {
719 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
720 state_free (state);
721 delete_target_context (tc);
722
723 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
724 != BLOCK_FOR_INSN (last_scheduled_insn));
725 }
726
727 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
728 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
729 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
730 {
731 if (succ == insn)
732 {
733 /* No same successor allowed from several edges. */
734 gcc_assert (!edge_old);
735 edge_old = si.e1;
736 }
737 }
738 /* Find edge of second predecessor (last_scheduled_insn->insn). */
739 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
740 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
741 {
742 if (succ == insn)
743 {
744 /* No same successor allowed from several edges. */
745 gcc_assert (!edge_new);
746 edge_new = si.e1;
747 }
748 }
749
750 /* Check if we can choose most probable predecessor. */
751 if (edge_old == NULL || edge_new == NULL)
752 {
753 reset_deps_context (FENCE_DC (f));
754 delete_deps_context (dc);
755 VEC_free (rtx, gc, executing_insns);
756 free (ready_ticks);
757
758 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
759 if (FENCE_EXECUTING_INSNS (f))
760 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
761 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
762 if (FENCE_READY_TICKS (f))
763 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
764 }
765 else
766 if (edge_new->probability > edge_old->probability)
767 {
768 delete_deps_context (FENCE_DC (f));
769 FENCE_DC (f) = dc;
770 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
771 FENCE_EXECUTING_INSNS (f) = executing_insns;
772 free (FENCE_READY_TICKS (f));
773 FENCE_READY_TICKS (f) = ready_ticks;
774 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
775 FENCE_CYCLE (f) = cycle;
776 }
777 else
778 {
779 /* Leave DC and CYCLE untouched. */
780 delete_deps_context (dc);
781 VEC_free (rtx, gc, executing_insns);
782 free (ready_ticks);
783 }
784 }
785
786 /* Fill remaining invariant fields. */
787 if (after_stall_p)
788 FENCE_AFTER_STALL_P (f) = 1;
789
790 FENCE_ISSUED_INSNS (f) = 0;
791 FENCE_STARTS_CYCLE_P (f) = 1;
792 FENCE_SCHED_NEXT (f) = NULL;
793 }
794
795 /* Add a new fence to NEW_FENCES list, initializing it from all
796 other parameters. */
797 static void
798 add_to_fences (flist_tail_t new_fences, insn_t insn,
799 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
800 VEC(rtx, gc) *executing_insns, int *ready_ticks,
801 int ready_ticks_size, rtx sched_next, int cycle,
802 int cycle_issued_insns, bool starts_cycle_p, bool after_stall_p)
803 {
804 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
805
806 if (! f)
807 {
808 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
809 last_scheduled_insn, executing_insns, ready_ticks,
810 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
811 starts_cycle_p, after_stall_p);
812
813 FLIST_TAIL_TAILP (new_fences)
814 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
815 }
816 else
817 {
818 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
819 executing_insns, ready_ticks, ready_ticks_size,
820 sched_next, cycle, after_stall_p);
821 }
822 }
823
824 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
825 void
826 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
827 {
828 fence_t f, old;
829 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
830
831 old = FLIST_FENCE (old_fences);
832 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
833 FENCE_INSN (FLIST_FENCE (old_fences)));
834 if (f)
835 {
836 merge_fences (f, old->insn, old->state, old->dc, old->tc,
837 old->last_scheduled_insn, old->executing_insns,
838 old->ready_ticks, old->ready_ticks_size,
839 old->sched_next, old->cycle,
840 old->after_stall_p);
841 }
842 else
843 {
844 _list_add (tailp);
845 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
846 *FLIST_FENCE (*tailp) = *old;
847 init_fence_for_scheduling (FLIST_FENCE (*tailp));
848 }
849 FENCE_INSN (old) = NULL;
850 }
851
852 /* Add a new fence to NEW_FENCES list and initialize most of its data
853 as a clean one. */
854 void
855 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
856 {
857 int ready_ticks_size = get_max_uid () + 1;
858
859 add_to_fences (new_fences,
860 succ, state_create (), create_deps_context (),
861 create_target_context (true),
862 NULL_RTX, NULL,
863 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
864 NULL_RTX, FENCE_CYCLE (fence) + 1,
865 0, 1, FENCE_AFTER_STALL_P (fence));
866 }
867
868 /* Add a new fence to NEW_FENCES list and initialize all of its data
869 from FENCE and SUCC. */
870 void
871 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
872 {
873 int * new_ready_ticks
874 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
875
876 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
877 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
878 add_to_fences (new_fences,
879 succ, state_create_copy (FENCE_STATE (fence)),
880 create_copy_of_deps_context (FENCE_DC (fence)),
881 create_copy_of_target_context (FENCE_TC (fence)),
882 FENCE_LAST_SCHEDULED_INSN (fence),
883 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
884 new_ready_ticks,
885 FENCE_READY_TICKS_SIZE (fence),
886 FENCE_SCHED_NEXT (fence),
887 FENCE_CYCLE (fence),
888 FENCE_ISSUED_INSNS (fence),
889 FENCE_STARTS_CYCLE_P (fence),
890 FENCE_AFTER_STALL_P (fence));
891 }
892 \f
893
894 /* Functions to work with regset and nop pools. */
895
896 /* Returns the new regset from pool. It might have some of the bits set
897 from the previous usage. */
898 regset
899 get_regset_from_pool (void)
900 {
901 regset rs;
902
903 if (regset_pool.n != 0)
904 rs = regset_pool.v[--regset_pool.n];
905 else
906 /* We need to create the regset. */
907 {
908 rs = ALLOC_REG_SET (&reg_obstack);
909
910 if (regset_pool.nn == regset_pool.ss)
911 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
912 (regset_pool.ss = 2 * regset_pool.ss + 1));
913 regset_pool.vv[regset_pool.nn++] = rs;
914 }
915
916 regset_pool.diff++;
917
918 return rs;
919 }
920
921 /* Same as above, but returns the empty regset. */
922 regset
923 get_clear_regset_from_pool (void)
924 {
925 regset rs = get_regset_from_pool ();
926
927 CLEAR_REG_SET (rs);
928 return rs;
929 }
930
931 /* Return regset RS to the pool for future use. */
932 void
933 return_regset_to_pool (regset rs)
934 {
935 regset_pool.diff--;
936
937 if (regset_pool.n == regset_pool.s)
938 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
939 (regset_pool.s = 2 * regset_pool.s + 1));
940 regset_pool.v[regset_pool.n++] = rs;
941 }
942
943 #ifdef ENABLE_CHECKING
944 /* This is used as a qsort callback for sorting regset pool stacks.
945 X and XX are addresses of two regsets. They are never equal. */
946 static int
947 cmp_v_in_regset_pool (const void *x, const void *xx)
948 {
949 return *((const regset *) x) - *((const regset *) xx);
950 }
951 #endif
952
953 /* Free the regset pool possibly checking for memory leaks. */
954 void
955 free_regset_pool (void)
956 {
957 #ifdef ENABLE_CHECKING
958 {
959 regset *v = regset_pool.v;
960 int i = 0;
961 int n = regset_pool.n;
962
963 regset *vv = regset_pool.vv;
964 int ii = 0;
965 int nn = regset_pool.nn;
966
967 int diff = 0;
968
969 gcc_assert (n <= nn);
970
971 /* Sort both vectors so it will be possible to compare them. */
972 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
973 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
974
975 while (ii < nn)
976 {
977 if (v[i] == vv[ii])
978 i++;
979 else
980 /* VV[II] was lost. */
981 diff++;
982
983 ii++;
984 }
985
986 gcc_assert (diff == regset_pool.diff);
987 }
988 #endif
989
990 /* If not true - we have a memory leak. */
991 gcc_assert (regset_pool.diff == 0);
992
993 while (regset_pool.n)
994 {
995 --regset_pool.n;
996 FREE_REG_SET (regset_pool.v[regset_pool.n]);
997 }
998
999 free (regset_pool.v);
1000 regset_pool.v = NULL;
1001 regset_pool.s = 0;
1002
1003 free (regset_pool.vv);
1004 regset_pool.vv = NULL;
1005 regset_pool.nn = 0;
1006 regset_pool.ss = 0;
1007
1008 regset_pool.diff = 0;
1009 }
1010 \f
1011
1012 /* Functions to work with nop pools. NOP insns are used as temporary
1013 placeholders of the insns being scheduled to allow correct update of
1014 the data sets. When update is finished, NOPs are deleted. */
1015
1016 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1017 nops sel-sched generates. */
1018 static vinsn_t nop_vinsn = NULL;
1019
1020 /* Emit a nop before INSN, taking it from pool. */
1021 insn_t
1022 get_nop_from_pool (insn_t insn)
1023 {
1024 insn_t nop;
1025 bool old_p = nop_pool.n != 0;
1026 int flags;
1027
1028 if (old_p)
1029 nop = nop_pool.v[--nop_pool.n];
1030 else
1031 nop = nop_pattern;
1032
1033 nop = emit_insn_before (nop, insn);
1034
1035 if (old_p)
1036 flags = INSN_INIT_TODO_SSID;
1037 else
1038 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1039
1040 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1041 sel_init_new_insn (nop, flags);
1042
1043 return nop;
1044 }
1045
1046 /* Remove NOP from the instruction stream and return it to the pool. */
1047 void
1048 return_nop_to_pool (insn_t nop, bool full_tidying)
1049 {
1050 gcc_assert (INSN_IN_STREAM_P (nop));
1051 sel_remove_insn (nop, false, full_tidying);
1052
1053 if (nop_pool.n == nop_pool.s)
1054 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1055 (nop_pool.s = 2 * nop_pool.s + 1));
1056 nop_pool.v[nop_pool.n++] = nop;
1057 }
1058
1059 /* Free the nop pool. */
1060 void
1061 free_nop_pool (void)
1062 {
1063 nop_pool.n = 0;
1064 nop_pool.s = 0;
1065 free (nop_pool.v);
1066 nop_pool.v = NULL;
1067 }
1068 \f
1069
1070 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1071 The callback is given two rtxes XX and YY and writes the new rtxes
1072 to NX and NY in case some needs to be skipped. */
1073 static int
1074 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1075 {
1076 const_rtx x = *xx;
1077 const_rtx y = *yy;
1078
1079 if (GET_CODE (x) == UNSPEC
1080 && (targetm.sched.skip_rtx_p == NULL
1081 || targetm.sched.skip_rtx_p (x)))
1082 {
1083 *nx = XVECEXP (x, 0, 0);
1084 *ny = CONST_CAST_RTX (y);
1085 return 1;
1086 }
1087
1088 if (GET_CODE (y) == UNSPEC
1089 && (targetm.sched.skip_rtx_p == NULL
1090 || targetm.sched.skip_rtx_p (y)))
1091 {
1092 *nx = CONST_CAST_RTX (x);
1093 *ny = XVECEXP (y, 0, 0);
1094 return 1;
1095 }
1096
1097 return 0;
1098 }
1099
1100 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1101 to support ia64 speculation. When changes are needed, new rtx X and new mode
1102 NMODE are written, and the callback returns true. */
1103 static int
1104 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1105 rtx *nx, enum machine_mode* nmode)
1106 {
1107 if (GET_CODE (x) == UNSPEC
1108 && targetm.sched.skip_rtx_p
1109 && targetm.sched.skip_rtx_p (x))
1110 {
1111 *nx = XVECEXP (x, 0 ,0);
1112 *nmode = VOIDmode;
1113 return 1;
1114 }
1115
1116 return 0;
1117 }
1118
1119 /* Returns LHS and RHS are ok to be scheduled separately. */
1120 static bool
1121 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1122 {
1123 if (lhs == NULL || rhs == NULL)
1124 return false;
1125
1126 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1127 to use reg, if const can be used. Moreover, scheduling const as rhs may
1128 lead to mode mismatch cause consts don't have modes but they could be
1129 merged from branches where the same const used in different modes. */
1130 if (CONSTANT_P (rhs))
1131 return false;
1132
1133 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1134 if (COMPARISON_P (rhs))
1135 return false;
1136
1137 /* Do not allow single REG to be an rhs. */
1138 if (REG_P (rhs))
1139 return false;
1140
1141 /* See comment at find_used_regs_1 (*1) for explanation of this
1142 restriction. */
1143 /* FIXME: remove this later. */
1144 if (MEM_P (lhs))
1145 return false;
1146
1147 /* This will filter all tricky things like ZERO_EXTRACT etc.
1148 For now we don't handle it. */
1149 if (!REG_P (lhs) && !MEM_P (lhs))
1150 return false;
1151
1152 return true;
1153 }
1154
1155 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1156 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1157 used e.g. for insns from recovery blocks. */
1158 static void
1159 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1160 {
1161 hash_rtx_callback_function hrcf;
1162 int insn_class;
1163
1164 VINSN_INSN_RTX (vi) = insn;
1165 VINSN_COUNT (vi) = 0;
1166 vi->cost = -1;
1167
1168 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1169 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1170 else
1171 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1172
1173 /* Hash vinsn depending on whether it is separable or not. */
1174 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1175 if (VINSN_SEPARABLE_P (vi))
1176 {
1177 rtx rhs = VINSN_RHS (vi);
1178
1179 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1180 NULL, NULL, false, hrcf);
1181 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1182 VOIDmode, NULL, NULL,
1183 false, hrcf);
1184 }
1185 else
1186 {
1187 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1188 NULL, NULL, false, hrcf);
1189 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1190 }
1191
1192 insn_class = haifa_classify_insn (insn);
1193 if (insn_class >= 2
1194 && (!targetm.sched.get_insn_spec_ds
1195 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1196 == 0)))
1197 VINSN_MAY_TRAP_P (vi) = true;
1198 else
1199 VINSN_MAY_TRAP_P (vi) = false;
1200 }
1201
1202 /* Indicate that VI has become the part of an rtx object. */
1203 void
1204 vinsn_attach (vinsn_t vi)
1205 {
1206 /* Assert that VI is not pending for deletion. */
1207 gcc_assert (VINSN_INSN_RTX (vi));
1208
1209 VINSN_COUNT (vi)++;
1210 }
1211
1212 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1213 VINSN_TYPE (VI). */
1214 static vinsn_t
1215 vinsn_create (insn_t insn, bool force_unique_p)
1216 {
1217 vinsn_t vi = XCNEW (struct vinsn_def);
1218
1219 vinsn_init (vi, insn, force_unique_p);
1220 return vi;
1221 }
1222
1223 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1224 the copy. */
1225 vinsn_t
1226 vinsn_copy (vinsn_t vi, bool reattach_p)
1227 {
1228 rtx copy;
1229 bool unique = VINSN_UNIQUE_P (vi);
1230 vinsn_t new_vi;
1231
1232 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1233 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1234 if (reattach_p)
1235 {
1236 vinsn_detach (vi);
1237 vinsn_attach (new_vi);
1238 }
1239
1240 return new_vi;
1241 }
1242
1243 /* Delete the VI vinsn and free its data. */
1244 static void
1245 vinsn_delete (vinsn_t vi)
1246 {
1247 gcc_assert (VINSN_COUNT (vi) == 0);
1248
1249 return_regset_to_pool (VINSN_REG_SETS (vi));
1250 return_regset_to_pool (VINSN_REG_USES (vi));
1251 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1252
1253 free (vi);
1254 }
1255
1256 /* Indicate that VI is no longer a part of some rtx object.
1257 Remove VI if it is no longer needed. */
1258 void
1259 vinsn_detach (vinsn_t vi)
1260 {
1261 gcc_assert (VINSN_COUNT (vi) > 0);
1262
1263 if (--VINSN_COUNT (vi) == 0)
1264 vinsn_delete (vi);
1265 }
1266
1267 /* Returns TRUE if VI is a branch. */
1268 bool
1269 vinsn_cond_branch_p (vinsn_t vi)
1270 {
1271 insn_t insn;
1272
1273 if (!VINSN_UNIQUE_P (vi))
1274 return false;
1275
1276 insn = VINSN_INSN_RTX (vi);
1277 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1278 return false;
1279
1280 return control_flow_insn_p (insn);
1281 }
1282
1283 /* Return latency of INSN. */
1284 static int
1285 sel_insn_rtx_cost (rtx insn)
1286 {
1287 int cost;
1288
1289 /* A USE insn, or something else we don't need to
1290 understand. We can't pass these directly to
1291 result_ready_cost or insn_default_latency because it will
1292 trigger a fatal error for unrecognizable insns. */
1293 if (recog_memoized (insn) < 0)
1294 cost = 0;
1295 else
1296 {
1297 cost = insn_default_latency (insn);
1298
1299 if (cost < 0)
1300 cost = 0;
1301 }
1302
1303 return cost;
1304 }
1305
1306 /* Return the cost of the VI.
1307 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1308 int
1309 sel_vinsn_cost (vinsn_t vi)
1310 {
1311 int cost = vi->cost;
1312
1313 if (cost < 0)
1314 {
1315 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1316 vi->cost = cost;
1317 }
1318
1319 return cost;
1320 }
1321 \f
1322
1323 /* Functions for insn emitting. */
1324
1325 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1326 EXPR and SEQNO. */
1327 insn_t
1328 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1329 {
1330 insn_t new_insn;
1331
1332 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1333
1334 new_insn = emit_insn_after (pattern, after);
1335 set_insn_init (expr, NULL, seqno);
1336 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1337
1338 return new_insn;
1339 }
1340
1341 /* Force newly generated vinsns to be unique. */
1342 static bool init_insn_force_unique_p = false;
1343
1344 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1345 initialize its data from EXPR and SEQNO. */
1346 insn_t
1347 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1348 insn_t after)
1349 {
1350 insn_t insn;
1351
1352 gcc_assert (!init_insn_force_unique_p);
1353
1354 init_insn_force_unique_p = true;
1355 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1356 CANT_MOVE (insn) = 1;
1357 init_insn_force_unique_p = false;
1358
1359 return insn;
1360 }
1361
1362 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1363 take it as a new vinsn instead of EXPR's vinsn.
1364 We simplify insns later, after scheduling region in
1365 simplify_changed_insns. */
1366 insn_t
1367 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1368 insn_t after)
1369 {
1370 expr_t emit_expr;
1371 insn_t insn;
1372 int flags;
1373
1374 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1375 seqno);
1376 insn = EXPR_INSN_RTX (emit_expr);
1377 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1378
1379 flags = INSN_INIT_TODO_SSID;
1380 if (INSN_LUID (insn) == 0)
1381 flags |= INSN_INIT_TODO_LUID;
1382 sel_init_new_insn (insn, flags);
1383
1384 return insn;
1385 }
1386
1387 /* Move insn from EXPR after AFTER. */
1388 insn_t
1389 sel_move_insn (expr_t expr, int seqno, insn_t after)
1390 {
1391 insn_t insn = EXPR_INSN_RTX (expr);
1392 basic_block bb = BLOCK_FOR_INSN (after);
1393 insn_t next = NEXT_INSN (after);
1394
1395 /* Assert that in move_op we disconnected this insn properly. */
1396 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1397 PREV_INSN (insn) = after;
1398 NEXT_INSN (insn) = next;
1399
1400 NEXT_INSN (after) = insn;
1401 PREV_INSN (next) = insn;
1402
1403 /* Update links from insn to bb and vice versa. */
1404 df_insn_change_bb (insn, bb);
1405 if (BB_END (bb) == after)
1406 BB_END (bb) = insn;
1407
1408 prepare_insn_expr (insn, seqno);
1409 return insn;
1410 }
1411
1412 \f
1413 /* Functions to work with right-hand sides. */
1414
1415 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1416 VECT and return true when found. Use NEW_VINSN for comparison only when
1417 COMPARE_VINSNS is true. Write to INDP the index on which
1418 the search has stopped, such that inserting the new element at INDP will
1419 retain VECT's sort order. */
1420 static bool
1421 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1422 unsigned uid, vinsn_t new_vinsn,
1423 bool compare_vinsns, int *indp)
1424 {
1425 expr_history_def *arr;
1426 int i, j, len = VEC_length (expr_history_def, vect);
1427
1428 if (len == 0)
1429 {
1430 *indp = 0;
1431 return false;
1432 }
1433
1434 arr = VEC_address (expr_history_def, vect);
1435 i = 0, j = len - 1;
1436
1437 while (i <= j)
1438 {
1439 unsigned auid = arr[i].uid;
1440 vinsn_t avinsn = arr[i].new_expr_vinsn;
1441
1442 if (auid == uid
1443 /* When undoing transformation on a bookkeeping copy, the new vinsn
1444 may not be exactly equal to the one that is saved in the vector.
1445 This is because the insn whose copy we're checking was possibly
1446 substituted itself. */
1447 && (! compare_vinsns
1448 || vinsn_equal_p (avinsn, new_vinsn)))
1449 {
1450 *indp = i;
1451 return true;
1452 }
1453 else if (auid > uid)
1454 break;
1455 i++;
1456 }
1457
1458 *indp = i;
1459 return false;
1460 }
1461
1462 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1463 the position found or -1, if no such value is in vector.
1464 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1465 int
1466 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1467 vinsn_t new_vinsn, bool originators_p)
1468 {
1469 int ind;
1470
1471 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1472 false, &ind))
1473 return ind;
1474
1475 if (INSN_ORIGINATORS (insn) && originators_p)
1476 {
1477 unsigned uid;
1478 bitmap_iterator bi;
1479
1480 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1481 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1482 return ind;
1483 }
1484
1485 return -1;
1486 }
1487
1488 /* Insert new element in a sorted history vector pointed to by PVECT,
1489 if it is not there already. The element is searched using
1490 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1491 the history of a transformation. */
1492 void
1493 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1494 unsigned uid, enum local_trans_type type,
1495 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1496 ds_t spec_ds)
1497 {
1498 VEC(expr_history_def, heap) *vect = *pvect;
1499 expr_history_def temp;
1500 bool res;
1501 int ind;
1502
1503 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1504
1505 if (res)
1506 {
1507 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1508
1509 /* It is possible that speculation types of expressions that were
1510 propagated through different paths will be different here. In this
1511 case, merge the status to get the correct check later. */
1512 if (phist->spec_ds != spec_ds)
1513 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1514 return;
1515 }
1516
1517 temp.uid = uid;
1518 temp.old_expr_vinsn = old_expr_vinsn;
1519 temp.new_expr_vinsn = new_expr_vinsn;
1520 temp.spec_ds = spec_ds;
1521 temp.type = type;
1522
1523 vinsn_attach (old_expr_vinsn);
1524 vinsn_attach (new_expr_vinsn);
1525 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1526 *pvect = vect;
1527 }
1528
1529 /* Free history vector PVECT. */
1530 static void
1531 free_history_vect (VEC (expr_history_def, heap) **pvect)
1532 {
1533 unsigned i;
1534 expr_history_def *phist;
1535
1536 if (! *pvect)
1537 return;
1538
1539 for (i = 0;
1540 VEC_iterate (expr_history_def, *pvect, i, phist);
1541 i++)
1542 {
1543 vinsn_detach (phist->old_expr_vinsn);
1544 vinsn_detach (phist->new_expr_vinsn);
1545 }
1546
1547 VEC_free (expr_history_def, heap, *pvect);
1548 *pvect = NULL;
1549 }
1550
1551
1552 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1553 bool
1554 vinsn_equal_p (vinsn_t x, vinsn_t y)
1555 {
1556 rtx_equal_p_callback_function repcf;
1557
1558 if (x == y)
1559 return true;
1560
1561 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1562 return false;
1563
1564 if (VINSN_HASH (x) != VINSN_HASH (y))
1565 return false;
1566
1567 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1568 if (VINSN_SEPARABLE_P (x))
1569 {
1570 /* Compare RHSes of VINSNs. */
1571 gcc_assert (VINSN_RHS (x));
1572 gcc_assert (VINSN_RHS (y));
1573
1574 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1575 }
1576
1577 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1578 }
1579 \f
1580
1581 /* Functions for working with expressions. */
1582
1583 /* Initialize EXPR. */
1584 static void
1585 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1586 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1587 ds_t spec_to_check_ds, int orig_sched_cycle,
1588 VEC(expr_history_def, heap) *history, bool target_available,
1589 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1590 bool cant_move)
1591 {
1592 vinsn_attach (vi);
1593
1594 EXPR_VINSN (expr) = vi;
1595 EXPR_SPEC (expr) = spec;
1596 EXPR_USEFULNESS (expr) = use;
1597 EXPR_PRIORITY (expr) = priority;
1598 EXPR_PRIORITY_ADJ (expr) = 0;
1599 EXPR_SCHED_TIMES (expr) = sched_times;
1600 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1601 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1602 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1603 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1604
1605 if (history)
1606 EXPR_HISTORY_OF_CHANGES (expr) = history;
1607 else
1608 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1609
1610 EXPR_TARGET_AVAILABLE (expr) = target_available;
1611 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1612 EXPR_WAS_RENAMED (expr) = was_renamed;
1613 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1614 EXPR_CANT_MOVE (expr) = cant_move;
1615 }
1616
1617 /* Make a copy of the expr FROM into the expr TO. */
1618 void
1619 copy_expr (expr_t to, expr_t from)
1620 {
1621 VEC(expr_history_def, heap) *temp = NULL;
1622
1623 if (EXPR_HISTORY_OF_CHANGES (from))
1624 {
1625 unsigned i;
1626 expr_history_def *phist;
1627
1628 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1629 for (i = 0;
1630 VEC_iterate (expr_history_def, temp, i, phist);
1631 i++)
1632 {
1633 vinsn_attach (phist->old_expr_vinsn);
1634 vinsn_attach (phist->new_expr_vinsn);
1635 }
1636 }
1637
1638 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1639 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1640 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1641 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1642 EXPR_ORIG_SCHED_CYCLE (from), temp,
1643 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1644 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1645 EXPR_CANT_MOVE (from));
1646 }
1647
1648 /* Same, but the final expr will not ever be in av sets, so don't copy
1649 "uninteresting" data such as bitmap cache. */
1650 void
1651 copy_expr_onside (expr_t to, expr_t from)
1652 {
1653 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1654 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1655 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1656 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1657 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1658 EXPR_CANT_MOVE (from));
1659 }
1660
1661 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1662 initializing new insns. */
1663 static void
1664 prepare_insn_expr (insn_t insn, int seqno)
1665 {
1666 expr_t expr = INSN_EXPR (insn);
1667 ds_t ds;
1668
1669 INSN_SEQNO (insn) = seqno;
1670 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1671 EXPR_SPEC (expr) = 0;
1672 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1673 EXPR_WAS_SUBSTITUTED (expr) = 0;
1674 EXPR_WAS_RENAMED (expr) = 0;
1675 EXPR_TARGET_AVAILABLE (expr) = 1;
1676 INSN_LIVE_VALID_P (insn) = false;
1677
1678 /* ??? If this expression is speculative, make its dependence
1679 as weak as possible. We can filter this expression later
1680 in process_spec_exprs, because we do not distinguish
1681 between the status we got during compute_av_set and the
1682 existing status. To be fixed. */
1683 ds = EXPR_SPEC_DONE_DS (expr);
1684 if (ds)
1685 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1686
1687 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1688 }
1689
1690 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1691 is non-null when expressions are merged from different successors at
1692 a split point. */
1693 static void
1694 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1695 {
1696 if (EXPR_TARGET_AVAILABLE (to) < 0
1697 || EXPR_TARGET_AVAILABLE (from) < 0)
1698 EXPR_TARGET_AVAILABLE (to) = -1;
1699 else
1700 {
1701 /* We try to detect the case when one of the expressions
1702 can only be reached through another one. In this case,
1703 we can do better. */
1704 if (split_point == NULL)
1705 {
1706 int toind, fromind;
1707
1708 toind = EXPR_ORIG_BB_INDEX (to);
1709 fromind = EXPR_ORIG_BB_INDEX (from);
1710
1711 if (toind && toind == fromind)
1712 /* Do nothing -- everything is done in
1713 merge_with_other_exprs. */
1714 ;
1715 else
1716 EXPR_TARGET_AVAILABLE (to) = -1;
1717 }
1718 else
1719 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1720 }
1721 }
1722
1723 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1724 is non-null when expressions are merged from different successors at
1725 a split point. */
1726 static void
1727 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1728 {
1729 ds_t old_to_ds, old_from_ds;
1730
1731 old_to_ds = EXPR_SPEC_DONE_DS (to);
1732 old_from_ds = EXPR_SPEC_DONE_DS (from);
1733
1734 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1735 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1736 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1737
1738 /* When merging e.g. control & data speculative exprs, or a control
1739 speculative with a control&data speculative one, we really have
1740 to change vinsn too. Also, when speculative status is changed,
1741 we also need to record this as a transformation in expr's history. */
1742 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1743 {
1744 old_to_ds = ds_get_speculation_types (old_to_ds);
1745 old_from_ds = ds_get_speculation_types (old_from_ds);
1746
1747 if (old_to_ds != old_from_ds)
1748 {
1749 ds_t record_ds;
1750
1751 /* When both expressions are speculative, we need to change
1752 the vinsn first. */
1753 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1754 {
1755 int res;
1756
1757 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1758 gcc_assert (res >= 0);
1759 }
1760
1761 if (split_point != NULL)
1762 {
1763 /* Record the change with proper status. */
1764 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1765 record_ds &= ~(old_to_ds & SPECULATIVE);
1766 record_ds &= ~(old_from_ds & SPECULATIVE);
1767
1768 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1769 INSN_UID (split_point), TRANS_SPECULATION,
1770 EXPR_VINSN (from), EXPR_VINSN (to),
1771 record_ds);
1772 }
1773 }
1774 }
1775 }
1776
1777
1778 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1779 this is done along different paths. */
1780 void
1781 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1782 {
1783 int i;
1784 expr_history_def *phist;
1785
1786 /* For now, we just set the spec of resulting expr to be minimum of the specs
1787 of merged exprs. */
1788 if (EXPR_SPEC (to) > EXPR_SPEC (from))
1789 EXPR_SPEC (to) = EXPR_SPEC (from);
1790
1791 if (split_point)
1792 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1793 else
1794 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1795 EXPR_USEFULNESS (from));
1796
1797 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1798 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1799
1800 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1801 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1802
1803 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1804 EXPR_ORIG_BB_INDEX (to) = 0;
1805
1806 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1807 EXPR_ORIG_SCHED_CYCLE (from));
1808
1809 /* We keep this vector sorted. */
1810 for (i = 0;
1811 VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
1812 i, phist);
1813 i++)
1814 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1815 phist->uid, phist->type,
1816 phist->old_expr_vinsn, phist->new_expr_vinsn,
1817 phist->spec_ds);
1818
1819 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1820 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1821 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1822
1823 update_target_availability (to, from, split_point);
1824 update_speculative_bits (to, from, split_point);
1825 }
1826
1827 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1828 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1829 are merged from different successors at a split point. */
1830 void
1831 merge_expr (expr_t to, expr_t from, insn_t split_point)
1832 {
1833 vinsn_t to_vi = EXPR_VINSN (to);
1834 vinsn_t from_vi = EXPR_VINSN (from);
1835
1836 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1837
1838 /* Make sure that speculative pattern is propagated into exprs that
1839 have non-speculative one. This will provide us with consistent
1840 speculative bits and speculative patterns inside expr. */
1841 if (EXPR_SPEC_DONE_DS (to) == 0
1842 && EXPR_SPEC_DONE_DS (from) != 0)
1843 change_vinsn_in_expr (to, EXPR_VINSN (from));
1844
1845 merge_expr_data (to, from, split_point);
1846 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1847 }
1848
1849 /* Clear the information of this EXPR. */
1850 void
1851 clear_expr (expr_t expr)
1852 {
1853
1854 vinsn_detach (EXPR_VINSN (expr));
1855 EXPR_VINSN (expr) = NULL;
1856
1857 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1858 }
1859
1860 /* For a given LV_SET, mark EXPR having unavailable target register. */
1861 static void
1862 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1863 {
1864 if (EXPR_SEPARABLE_P (expr))
1865 {
1866 if (REG_P (EXPR_LHS (expr))
1867 && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
1868 {
1869 /* If it's an insn like r1 = use (r1, ...), and it exists in
1870 different forms in each of the av_sets being merged, we can't say
1871 whether original destination register is available or not.
1872 However, this still works if destination register is not used
1873 in the original expression: if the branch at which LV_SET we're
1874 looking here is not actually 'other branch' in sense that same
1875 expression is available through it (but it can't be determined
1876 at computation stage because of transformations on one of the
1877 branches), it still won't affect the availability.
1878 Liveness of a register somewhere on a code motion path means
1879 it's either read somewhere on a codemotion path, live on
1880 'other' branch, live at the point immediately following
1881 the original operation, or is read by the original operation.
1882 The latter case is filtered out in the condition below.
1883 It still doesn't cover the case when register is defined and used
1884 somewhere within the code motion path, and in this case we could
1885 miss a unifying code motion along both branches using a renamed
1886 register, but it won't affect a code correctness since upon
1887 an actual code motion a bookkeeping code would be generated. */
1888 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1889 REGNO (EXPR_LHS (expr))))
1890 EXPR_TARGET_AVAILABLE (expr) = -1;
1891 else
1892 EXPR_TARGET_AVAILABLE (expr) = false;
1893 }
1894 }
1895 else
1896 {
1897 unsigned regno;
1898 reg_set_iterator rsi;
1899
1900 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1901 0, regno, rsi)
1902 if (bitmap_bit_p (lv_set, regno))
1903 {
1904 EXPR_TARGET_AVAILABLE (expr) = false;
1905 break;
1906 }
1907
1908 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1909 0, regno, rsi)
1910 if (bitmap_bit_p (lv_set, regno))
1911 {
1912 EXPR_TARGET_AVAILABLE (expr) = false;
1913 break;
1914 }
1915 }
1916 }
1917
1918 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1919 or dependence status have changed, 2 when also the target register
1920 became unavailable, 0 if nothing had to be changed. */
1921 int
1922 speculate_expr (expr_t expr, ds_t ds)
1923 {
1924 int res;
1925 rtx orig_insn_rtx;
1926 rtx spec_pat;
1927 ds_t target_ds, current_ds;
1928
1929 /* Obtain the status we need to put on EXPR. */
1930 target_ds = (ds & SPECULATIVE);
1931 current_ds = EXPR_SPEC_DONE_DS (expr);
1932 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1933
1934 orig_insn_rtx = EXPR_INSN_RTX (expr);
1935
1936 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1937
1938 switch (res)
1939 {
1940 case 0:
1941 EXPR_SPEC_DONE_DS (expr) = ds;
1942 return current_ds != ds ? 1 : 0;
1943
1944 case 1:
1945 {
1946 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1947 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1948
1949 change_vinsn_in_expr (expr, spec_vinsn);
1950 EXPR_SPEC_DONE_DS (expr) = ds;
1951 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1952
1953 /* Do not allow clobbering the address register of speculative
1954 insns. */
1955 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1956 expr_dest_regno (expr)))
1957 {
1958 EXPR_TARGET_AVAILABLE (expr) = false;
1959 return 2;
1960 }
1961
1962 return 1;
1963 }
1964
1965 case -1:
1966 return -1;
1967
1968 default:
1969 gcc_unreachable ();
1970 return -1;
1971 }
1972 }
1973
1974 /* Return a destination register, if any, of EXPR. */
1975 rtx
1976 expr_dest_reg (expr_t expr)
1977 {
1978 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1979
1980 if (dest != NULL_RTX && REG_P (dest))
1981 return dest;
1982
1983 return NULL_RTX;
1984 }
1985
1986 /* Returns the REGNO of the R's destination. */
1987 unsigned
1988 expr_dest_regno (expr_t expr)
1989 {
1990 rtx dest = expr_dest_reg (expr);
1991
1992 gcc_assert (dest != NULL_RTX);
1993 return REGNO (dest);
1994 }
1995
1996 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
1997 AV_SET having unavailable target register. */
1998 void
1999 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2000 {
2001 expr_t expr;
2002 av_set_iterator avi;
2003
2004 FOR_EACH_EXPR (expr, avi, join_set)
2005 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2006 set_unavailable_target_for_expr (expr, lv_set);
2007 }
2008 \f
2009
2010 /* Av set functions. */
2011
2012 /* Add a new element to av set SETP.
2013 Return the element added. */
2014 static av_set_t
2015 av_set_add_element (av_set_t *setp)
2016 {
2017 /* Insert at the beginning of the list. */
2018 _list_add (setp);
2019 return *setp;
2020 }
2021
2022 /* Add EXPR to SETP. */
2023 void
2024 av_set_add (av_set_t *setp, expr_t expr)
2025 {
2026 av_set_t elem;
2027
2028 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2029 elem = av_set_add_element (setp);
2030 copy_expr (_AV_SET_EXPR (elem), expr);
2031 }
2032
2033 /* Same, but do not copy EXPR. */
2034 static void
2035 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2036 {
2037 av_set_t elem;
2038
2039 elem = av_set_add_element (setp);
2040 *_AV_SET_EXPR (elem) = *expr;
2041 }
2042
2043 /* Remove expr pointed to by IP from the av_set. */
2044 void
2045 av_set_iter_remove (av_set_iterator *ip)
2046 {
2047 clear_expr (_AV_SET_EXPR (*ip->lp));
2048 _list_iter_remove (ip);
2049 }
2050
2051 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2052 sense of vinsn_equal_p function. Return NULL if no such expr is
2053 in SET was found. */
2054 expr_t
2055 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2056 {
2057 expr_t expr;
2058 av_set_iterator i;
2059
2060 FOR_EACH_EXPR (expr, i, set)
2061 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2062 return expr;
2063 return NULL;
2064 }
2065
2066 /* Same, but also remove the EXPR found. */
2067 static expr_t
2068 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2069 {
2070 expr_t expr;
2071 av_set_iterator i;
2072
2073 FOR_EACH_EXPR_1 (expr, i, setp)
2074 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2075 {
2076 _list_iter_remove_nofree (&i);
2077 return expr;
2078 }
2079 return NULL;
2080 }
2081
2082 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2083 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2084 Returns NULL if no such expr is in SET was found. */
2085 static expr_t
2086 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2087 {
2088 expr_t cur_expr;
2089 av_set_iterator i;
2090
2091 FOR_EACH_EXPR (cur_expr, i, set)
2092 {
2093 if (cur_expr == expr)
2094 continue;
2095 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2096 return cur_expr;
2097 }
2098
2099 return NULL;
2100 }
2101
2102 /* If other expression is already in AVP, remove one of them. */
2103 expr_t
2104 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2105 {
2106 expr_t expr2;
2107
2108 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2109 if (expr2 != NULL)
2110 {
2111 /* Reset target availability on merge, since taking it only from one
2112 of the exprs would be controversial for different code. */
2113 EXPR_TARGET_AVAILABLE (expr2) = -1;
2114 EXPR_USEFULNESS (expr2) = 0;
2115
2116 merge_expr (expr2, expr, NULL);
2117
2118 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2119 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2120
2121 av_set_iter_remove (ip);
2122 return expr2;
2123 }
2124
2125 return expr;
2126 }
2127
2128 /* Return true if there is an expr that correlates to VI in SET. */
2129 bool
2130 av_set_is_in_p (av_set_t set, vinsn_t vi)
2131 {
2132 return av_set_lookup (set, vi) != NULL;
2133 }
2134
2135 /* Return a copy of SET. */
2136 av_set_t
2137 av_set_copy (av_set_t set)
2138 {
2139 expr_t expr;
2140 av_set_iterator i;
2141 av_set_t res = NULL;
2142
2143 FOR_EACH_EXPR (expr, i, set)
2144 av_set_add (&res, expr);
2145
2146 return res;
2147 }
2148
2149 /* Join two av sets that do not have common elements by attaching second set
2150 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2151 _AV_SET_NEXT of first set's last element). */
2152 static void
2153 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2154 {
2155 gcc_assert (*to_tailp == NULL);
2156 *to_tailp = *fromp;
2157 *fromp = NULL;
2158 }
2159
2160 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2161 pointed to by FROMP afterwards. */
2162 void
2163 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2164 {
2165 expr_t expr1;
2166 av_set_iterator i;
2167
2168 /* Delete from TOP all exprs, that present in FROMP. */
2169 FOR_EACH_EXPR_1 (expr1, i, top)
2170 {
2171 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2172
2173 if (expr2)
2174 {
2175 merge_expr (expr2, expr1, insn);
2176 av_set_iter_remove (&i);
2177 }
2178 }
2179
2180 join_distinct_sets (i.lp, fromp);
2181 }
2182
2183 /* Same as above, but also update availability of target register in
2184 TOP judging by TO_LV_SET and FROM_LV_SET. */
2185 void
2186 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2187 regset from_lv_set, insn_t insn)
2188 {
2189 expr_t expr1;
2190 av_set_iterator i;
2191 av_set_t *to_tailp, in_both_set = NULL;
2192
2193 /* Delete from TOP all expres, that present in FROMP. */
2194 FOR_EACH_EXPR_1 (expr1, i, top)
2195 {
2196 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2197
2198 if (expr2)
2199 {
2200 /* It may be that the expressions have different destination
2201 registers, in which case we need to check liveness here. */
2202 if (EXPR_SEPARABLE_P (expr1))
2203 {
2204 int regno1 = (REG_P (EXPR_LHS (expr1))
2205 ? (int) expr_dest_regno (expr1) : -1);
2206 int regno2 = (REG_P (EXPR_LHS (expr2))
2207 ? (int) expr_dest_regno (expr2) : -1);
2208
2209 /* ??? We don't have a way to check restrictions for
2210 *other* register on the current path, we did it only
2211 for the current target register. Give up. */
2212 if (regno1 != regno2)
2213 EXPR_TARGET_AVAILABLE (expr2) = -1;
2214 }
2215 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2216 EXPR_TARGET_AVAILABLE (expr2) = -1;
2217
2218 merge_expr (expr2, expr1, insn);
2219 av_set_add_nocopy (&in_both_set, expr2);
2220 av_set_iter_remove (&i);
2221 }
2222 else
2223 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2224 FROM_LV_SET. */
2225 set_unavailable_target_for_expr (expr1, from_lv_set);
2226 }
2227 to_tailp = i.lp;
2228
2229 /* These expressions are not present in TOP. Check liveness
2230 restrictions on TO_LV_SET. */
2231 FOR_EACH_EXPR (expr1, i, *fromp)
2232 set_unavailable_target_for_expr (expr1, to_lv_set);
2233
2234 join_distinct_sets (i.lp, &in_both_set);
2235 join_distinct_sets (to_tailp, fromp);
2236 }
2237
2238 /* Clear av_set pointed to by SETP. */
2239 void
2240 av_set_clear (av_set_t *setp)
2241 {
2242 expr_t expr;
2243 av_set_iterator i;
2244
2245 FOR_EACH_EXPR_1 (expr, i, setp)
2246 av_set_iter_remove (&i);
2247
2248 gcc_assert (*setp == NULL);
2249 }
2250
2251 /* Leave only one non-speculative element in the SETP. */
2252 void
2253 av_set_leave_one_nonspec (av_set_t *setp)
2254 {
2255 expr_t expr;
2256 av_set_iterator i;
2257 bool has_one_nonspec = false;
2258
2259 /* Keep all speculative exprs, and leave one non-speculative
2260 (the first one). */
2261 FOR_EACH_EXPR_1 (expr, i, setp)
2262 {
2263 if (!EXPR_SPEC_DONE_DS (expr))
2264 {
2265 if (has_one_nonspec)
2266 av_set_iter_remove (&i);
2267 else
2268 has_one_nonspec = true;
2269 }
2270 }
2271 }
2272
2273 /* Return the N'th element of the SET. */
2274 expr_t
2275 av_set_element (av_set_t set, int n)
2276 {
2277 expr_t expr;
2278 av_set_iterator i;
2279
2280 FOR_EACH_EXPR (expr, i, set)
2281 if (n-- == 0)
2282 return expr;
2283
2284 gcc_unreachable ();
2285 return NULL;
2286 }
2287
2288 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2289 void
2290 av_set_substract_cond_branches (av_set_t *avp)
2291 {
2292 av_set_iterator i;
2293 expr_t expr;
2294
2295 FOR_EACH_EXPR_1 (expr, i, avp)
2296 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2297 av_set_iter_remove (&i);
2298 }
2299
2300 /* Multiplies usefulness attribute of each member of av-set *AVP by
2301 value PROB / ALL_PROB. */
2302 void
2303 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2304 {
2305 av_set_iterator i;
2306 expr_t expr;
2307
2308 FOR_EACH_EXPR (expr, i, av)
2309 EXPR_USEFULNESS (expr) = (all_prob
2310 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2311 : 0);
2312 }
2313
2314 /* Leave in AVP only those expressions, which are present in AV,
2315 and return it. */
2316 void
2317 av_set_intersect (av_set_t *avp, av_set_t av)
2318 {
2319 av_set_iterator i;
2320 expr_t expr;
2321
2322 FOR_EACH_EXPR_1 (expr, i, avp)
2323 if (av_set_lookup (av, EXPR_VINSN (expr)) == NULL)
2324 av_set_iter_remove (&i);
2325 }
2326
2327 \f
2328
2329 /* Dependence hooks to initialize insn data. */
2330
2331 /* This is used in hooks callable from dependence analysis when initializing
2332 instruction's data. */
2333 static struct
2334 {
2335 /* Where the dependence was found (lhs/rhs). */
2336 deps_where_t where;
2337
2338 /* The actual data object to initialize. */
2339 idata_t id;
2340
2341 /* True when the insn should not be made clonable. */
2342 bool force_unique_p;
2343
2344 /* True when insn should be treated as of type USE, i.e. never renamed. */
2345 bool force_use_p;
2346 } deps_init_id_data;
2347
2348
2349 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2350 clonable. */
2351 static void
2352 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2353 {
2354 int type;
2355
2356 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2357 That clonable insns which can be separated into lhs and rhs have type SET.
2358 Other clonable insns have type USE. */
2359 type = GET_CODE (insn);
2360
2361 /* Only regular insns could be cloned. */
2362 if (type == INSN && !force_unique_p)
2363 type = SET;
2364 else if (type == JUMP_INSN && simplejump_p (insn))
2365 type = PC;
2366 else if (type == DEBUG_INSN)
2367 type = !force_unique_p ? USE : INSN;
2368
2369 IDATA_TYPE (id) = type;
2370 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2371 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2372 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2373 }
2374
2375 /* Start initializing insn data. */
2376 static void
2377 deps_init_id_start_insn (insn_t insn)
2378 {
2379 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2380
2381 setup_id_for_insn (deps_init_id_data.id, insn,
2382 deps_init_id_data.force_unique_p);
2383 deps_init_id_data.where = DEPS_IN_INSN;
2384 }
2385
2386 /* Start initializing lhs data. */
2387 static void
2388 deps_init_id_start_lhs (rtx lhs)
2389 {
2390 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2391 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2392
2393 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2394 {
2395 IDATA_LHS (deps_init_id_data.id) = lhs;
2396 deps_init_id_data.where = DEPS_IN_LHS;
2397 }
2398 }
2399
2400 /* Finish initializing lhs data. */
2401 static void
2402 deps_init_id_finish_lhs (void)
2403 {
2404 deps_init_id_data.where = DEPS_IN_INSN;
2405 }
2406
2407 /* Note a set of REGNO. */
2408 static void
2409 deps_init_id_note_reg_set (int regno)
2410 {
2411 haifa_note_reg_set (regno);
2412
2413 if (deps_init_id_data.where == DEPS_IN_RHS)
2414 deps_init_id_data.force_use_p = true;
2415
2416 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2417 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2418
2419 #ifdef STACK_REGS
2420 /* Make instructions that set stack registers to be ineligible for
2421 renaming to avoid issues with find_used_regs. */
2422 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2423 deps_init_id_data.force_use_p = true;
2424 #endif
2425 }
2426
2427 /* Note a clobber of REGNO. */
2428 static void
2429 deps_init_id_note_reg_clobber (int regno)
2430 {
2431 haifa_note_reg_clobber (regno);
2432
2433 if (deps_init_id_data.where == DEPS_IN_RHS)
2434 deps_init_id_data.force_use_p = true;
2435
2436 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2437 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2438 }
2439
2440 /* Note a use of REGNO. */
2441 static void
2442 deps_init_id_note_reg_use (int regno)
2443 {
2444 haifa_note_reg_use (regno);
2445
2446 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2447 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2448 }
2449
2450 /* Start initializing rhs data. */
2451 static void
2452 deps_init_id_start_rhs (rtx rhs)
2453 {
2454 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2455
2456 /* And there was no sel_deps_reset_to_insn (). */
2457 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2458 {
2459 IDATA_RHS (deps_init_id_data.id) = rhs;
2460 deps_init_id_data.where = DEPS_IN_RHS;
2461 }
2462 }
2463
2464 /* Finish initializing rhs data. */
2465 static void
2466 deps_init_id_finish_rhs (void)
2467 {
2468 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2469 || deps_init_id_data.where == DEPS_IN_INSN);
2470 deps_init_id_data.where = DEPS_IN_INSN;
2471 }
2472
2473 /* Finish initializing insn data. */
2474 static void
2475 deps_init_id_finish_insn (void)
2476 {
2477 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2478
2479 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2480 {
2481 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2482 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2483
2484 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2485 || deps_init_id_data.force_use_p)
2486 {
2487 /* This should be a USE, as we don't want to schedule its RHS
2488 separately. However, we still want to have them recorded
2489 for the purposes of substitution. That's why we don't
2490 simply call downgrade_to_use () here. */
2491 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2492 gcc_assert (!lhs == !rhs);
2493
2494 IDATA_TYPE (deps_init_id_data.id) = USE;
2495 }
2496 }
2497
2498 deps_init_id_data.where = DEPS_IN_NOWHERE;
2499 }
2500
2501 /* This is dependence info used for initializing insn's data. */
2502 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2503
2504 /* This initializes most of the static part of the above structure. */
2505 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2506 {
2507 NULL,
2508
2509 deps_init_id_start_insn,
2510 deps_init_id_finish_insn,
2511 deps_init_id_start_lhs,
2512 deps_init_id_finish_lhs,
2513 deps_init_id_start_rhs,
2514 deps_init_id_finish_rhs,
2515 deps_init_id_note_reg_set,
2516 deps_init_id_note_reg_clobber,
2517 deps_init_id_note_reg_use,
2518 NULL, /* note_mem_dep */
2519 NULL, /* note_dep */
2520
2521 0, /* use_cselib */
2522 0, /* use_deps_list */
2523 0 /* generate_spec_deps */
2524 };
2525
2526 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2527 we don't actually need information about lhs and rhs. */
2528 static void
2529 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2530 {
2531 rtx pat = PATTERN (insn);
2532
2533 if (NONJUMP_INSN_P (insn)
2534 && GET_CODE (pat) == SET
2535 && !force_unique_p)
2536 {
2537 IDATA_RHS (id) = SET_SRC (pat);
2538 IDATA_LHS (id) = SET_DEST (pat);
2539 }
2540 else
2541 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2542 }
2543
2544 /* Possibly downgrade INSN to USE. */
2545 static void
2546 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2547 {
2548 bool must_be_use = false;
2549 unsigned uid = INSN_UID (insn);
2550 df_ref *rec;
2551 rtx lhs = IDATA_LHS (id);
2552 rtx rhs = IDATA_RHS (id);
2553
2554 /* We downgrade only SETs. */
2555 if (IDATA_TYPE (id) != SET)
2556 return;
2557
2558 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2559 {
2560 IDATA_TYPE (id) = USE;
2561 return;
2562 }
2563
2564 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2565 {
2566 df_ref def = *rec;
2567
2568 if (DF_REF_INSN (def)
2569 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2570 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2571 {
2572 must_be_use = true;
2573 break;
2574 }
2575
2576 #ifdef STACK_REGS
2577 /* Make instructions that set stack registers to be ineligible for
2578 renaming to avoid issues with find_used_regs. */
2579 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2580 {
2581 must_be_use = true;
2582 break;
2583 }
2584 #endif
2585 }
2586
2587 if (must_be_use)
2588 IDATA_TYPE (id) = USE;
2589 }
2590
2591 /* Setup register sets describing INSN in ID. */
2592 static void
2593 setup_id_reg_sets (idata_t id, insn_t insn)
2594 {
2595 unsigned uid = INSN_UID (insn);
2596 df_ref *rec;
2597 regset tmp = get_clear_regset_from_pool ();
2598
2599 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2600 {
2601 df_ref def = *rec;
2602 unsigned int regno = DF_REF_REGNO (def);
2603
2604 /* Post modifies are treated like clobbers by sched-deps.c. */
2605 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2606 | DF_REF_PRE_POST_MODIFY)))
2607 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2608 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2609 {
2610 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2611
2612 #ifdef STACK_REGS
2613 /* For stack registers, treat writes to them as writes
2614 to the first one to be consistent with sched-deps.c. */
2615 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2616 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2617 #endif
2618 }
2619 /* Mark special refs that generate read/write def pair. */
2620 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2621 || regno == STACK_POINTER_REGNUM)
2622 bitmap_set_bit (tmp, regno);
2623 }
2624
2625 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2626 {
2627 df_ref use = *rec;
2628 unsigned int regno = DF_REF_REGNO (use);
2629
2630 /* When these refs are met for the first time, skip them, as
2631 these uses are just counterparts of some defs. */
2632 if (bitmap_bit_p (tmp, regno))
2633 bitmap_clear_bit (tmp, regno);
2634 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2635 {
2636 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2637
2638 #ifdef STACK_REGS
2639 /* For stack registers, treat reads from them as reads from
2640 the first one to be consistent with sched-deps.c. */
2641 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2642 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2643 #endif
2644 }
2645 }
2646
2647 return_regset_to_pool (tmp);
2648 }
2649
2650 /* Initialize instruction data for INSN in ID using DF's data. */
2651 static void
2652 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2653 {
2654 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2655
2656 setup_id_for_insn (id, insn, force_unique_p);
2657 setup_id_lhs_rhs (id, insn, force_unique_p);
2658
2659 if (INSN_NOP_P (insn))
2660 return;
2661
2662 maybe_downgrade_id_to_use (id, insn);
2663 setup_id_reg_sets (id, insn);
2664 }
2665
2666 /* Initialize instruction data for INSN in ID. */
2667 static void
2668 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2669 {
2670 struct deps _dc, *dc = &_dc;
2671
2672 deps_init_id_data.where = DEPS_IN_NOWHERE;
2673 deps_init_id_data.id = id;
2674 deps_init_id_data.force_unique_p = force_unique_p;
2675 deps_init_id_data.force_use_p = false;
2676
2677 init_deps (dc, false);
2678
2679 memcpy (&deps_init_id_sched_deps_info,
2680 &const_deps_init_id_sched_deps_info,
2681 sizeof (deps_init_id_sched_deps_info));
2682
2683 if (spec_info != NULL)
2684 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2685
2686 sched_deps_info = &deps_init_id_sched_deps_info;
2687
2688 deps_analyze_insn (dc, insn);
2689
2690 free_deps (dc);
2691
2692 deps_init_id_data.id = NULL;
2693 }
2694
2695 \f
2696
2697 /* Implement hooks for collecting fundamental insn properties like if insn is
2698 an ASM or is within a SCHED_GROUP. */
2699
2700 /* True when a "one-time init" data for INSN was already inited. */
2701 static bool
2702 first_time_insn_init (insn_t insn)
2703 {
2704 return INSN_LIVE (insn) == NULL;
2705 }
2706
2707 /* Hash an entry in a transformed_insns hashtable. */
2708 static hashval_t
2709 hash_transformed_insns (const void *p)
2710 {
2711 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2712 }
2713
2714 /* Compare the entries in a transformed_insns hashtable. */
2715 static int
2716 eq_transformed_insns (const void *p, const void *q)
2717 {
2718 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2719 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2720
2721 if (INSN_UID (i1) == INSN_UID (i2))
2722 return 1;
2723 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2724 }
2725
2726 /* Free an entry in a transformed_insns hashtable. */
2727 static void
2728 free_transformed_insns (void *p)
2729 {
2730 struct transformed_insns *pti = (struct transformed_insns *) p;
2731
2732 vinsn_detach (pti->vinsn_old);
2733 vinsn_detach (pti->vinsn_new);
2734 free (pti);
2735 }
2736
2737 /* Init the s_i_d data for INSN which should be inited just once, when
2738 we first see the insn. */
2739 static void
2740 init_first_time_insn_data (insn_t insn)
2741 {
2742 /* This should not be set if this is the first time we init data for
2743 insn. */
2744 gcc_assert (first_time_insn_init (insn));
2745
2746 /* These are needed for nops too. */
2747 INSN_LIVE (insn) = get_regset_from_pool ();
2748 INSN_LIVE_VALID_P (insn) = false;
2749
2750 if (!INSN_NOP_P (insn))
2751 {
2752 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2753 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2754 INSN_TRANSFORMED_INSNS (insn)
2755 = htab_create (16, hash_transformed_insns,
2756 eq_transformed_insns, free_transformed_insns);
2757 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2758 }
2759 }
2760
2761 /* Free almost all above data for INSN that is scheduled already.
2762 Used for extra-large basic blocks. */
2763 void
2764 free_data_for_scheduled_insn (insn_t insn)
2765 {
2766 gcc_assert (! first_time_insn_init (insn));
2767
2768 if (! INSN_ANALYZED_DEPS (insn))
2769 return;
2770
2771 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2772 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2773 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2774
2775 /* This is allocated only for bookkeeping insns. */
2776 if (INSN_ORIGINATORS (insn))
2777 BITMAP_FREE (INSN_ORIGINATORS (insn));
2778 free_deps (&INSN_DEPS_CONTEXT (insn));
2779
2780 INSN_ANALYZED_DEPS (insn) = NULL;
2781
2782 /* Clear the readonly flag so we would ICE when trying to recalculate
2783 the deps context (as we believe that it should not happen). */
2784 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2785 }
2786
2787 /* Free the same data as above for INSN. */
2788 static void
2789 free_first_time_insn_data (insn_t insn)
2790 {
2791 gcc_assert (! first_time_insn_init (insn));
2792
2793 free_data_for_scheduled_insn (insn);
2794 return_regset_to_pool (INSN_LIVE (insn));
2795 INSN_LIVE (insn) = NULL;
2796 INSN_LIVE_VALID_P (insn) = false;
2797 }
2798
2799 /* Initialize region-scope data structures for basic blocks. */
2800 static void
2801 init_global_and_expr_for_bb (basic_block bb)
2802 {
2803 if (sel_bb_empty_p (bb))
2804 return;
2805
2806 invalidate_av_set (bb);
2807 }
2808
2809 /* Data for global dependency analysis (to initialize CANT_MOVE and
2810 SCHED_GROUP_P). */
2811 static struct
2812 {
2813 /* Previous insn. */
2814 insn_t prev_insn;
2815 } init_global_data;
2816
2817 /* Determine if INSN is in the sched_group, is an asm or should not be
2818 cloned. After that initialize its expr. */
2819 static void
2820 init_global_and_expr_for_insn (insn_t insn)
2821 {
2822 if (LABEL_P (insn))
2823 return;
2824
2825 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2826 {
2827 init_global_data.prev_insn = NULL_RTX;
2828 return;
2829 }
2830
2831 gcc_assert (INSN_P (insn));
2832
2833 if (SCHED_GROUP_P (insn))
2834 /* Setup a sched_group. */
2835 {
2836 insn_t prev_insn = init_global_data.prev_insn;
2837
2838 if (prev_insn)
2839 INSN_SCHED_NEXT (prev_insn) = insn;
2840
2841 init_global_data.prev_insn = insn;
2842 }
2843 else
2844 init_global_data.prev_insn = NULL_RTX;
2845
2846 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2847 || asm_noperands (PATTERN (insn)) >= 0)
2848 /* Mark INSN as an asm. */
2849 INSN_ASM_P (insn) = true;
2850
2851 {
2852 bool force_unique_p;
2853 ds_t spec_done_ds;
2854
2855 /* Certain instructions cannot be cloned. */
2856 if (CANT_MOVE (insn)
2857 || INSN_ASM_P (insn)
2858 || SCHED_GROUP_P (insn)
2859 || prologue_epilogue_contains (insn)
2860 /* Exception handling insns are always unique. */
2861 || (flag_non_call_exceptions && can_throw_internal (insn))
2862 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2863 || control_flow_insn_p (insn))
2864 force_unique_p = true;
2865 else
2866 force_unique_p = false;
2867
2868 if (targetm.sched.get_insn_spec_ds)
2869 {
2870 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2871 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2872 }
2873 else
2874 spec_done_ds = 0;
2875
2876 /* Initialize INSN's expr. */
2877 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2878 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
2879 spec_done_ds, 0, 0, NULL, true, false, false, false,
2880 CANT_MOVE (insn));
2881 }
2882
2883 init_first_time_insn_data (insn);
2884 }
2885
2886 /* Scan the region and initialize instruction data for basic blocks BBS. */
2887 void
2888 sel_init_global_and_expr (bb_vec_t bbs)
2889 {
2890 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
2891 const struct sched_scan_info_def ssi =
2892 {
2893 NULL, /* extend_bb */
2894 init_global_and_expr_for_bb, /* init_bb */
2895 extend_insn_data, /* extend_insn */
2896 init_global_and_expr_for_insn /* init_insn */
2897 };
2898
2899 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2900 }
2901
2902 /* Finalize region-scope data structures for basic blocks. */
2903 static void
2904 finish_global_and_expr_for_bb (basic_block bb)
2905 {
2906 av_set_clear (&BB_AV_SET (bb));
2907 BB_AV_LEVEL (bb) = 0;
2908 }
2909
2910 /* Finalize INSN's data. */
2911 static void
2912 finish_global_and_expr_insn (insn_t insn)
2913 {
2914 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
2915 return;
2916
2917 gcc_assert (INSN_P (insn));
2918
2919 if (INSN_LUID (insn) > 0)
2920 {
2921 free_first_time_insn_data (insn);
2922 INSN_WS_LEVEL (insn) = 0;
2923 CANT_MOVE (insn) = 0;
2924
2925 /* We can no longer assert this, as vinsns of this insn could be
2926 easily live in other insn's caches. This should be changed to
2927 a counter-like approach among all vinsns. */
2928 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
2929 clear_expr (INSN_EXPR (insn));
2930 }
2931 }
2932
2933 /* Finalize per instruction data for the whole region. */
2934 void
2935 sel_finish_global_and_expr (void)
2936 {
2937 {
2938 bb_vec_t bbs;
2939 int i;
2940
2941 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
2942
2943 for (i = 0; i < current_nr_blocks; i++)
2944 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
2945
2946 /* Clear AV_SETs and INSN_EXPRs. */
2947 {
2948 const struct sched_scan_info_def ssi =
2949 {
2950 NULL, /* extend_bb */
2951 finish_global_and_expr_for_bb, /* init_bb */
2952 NULL, /* extend_insn */
2953 finish_global_and_expr_insn /* init_insn */
2954 };
2955
2956 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2957 }
2958
2959 VEC_free (basic_block, heap, bbs);
2960 }
2961
2962 finish_insns ();
2963 }
2964 \f
2965
2966 /* In the below hooks, we merely calculate whether or not a dependence
2967 exists, and in what part of insn. However, we will need more data
2968 when we'll start caching dependence requests. */
2969
2970 /* Container to hold information for dependency analysis. */
2971 static struct
2972 {
2973 deps_t dc;
2974
2975 /* A variable to track which part of rtx we are scanning in
2976 sched-deps.c: sched_analyze_insn (). */
2977 deps_where_t where;
2978
2979 /* Current producer. */
2980 insn_t pro;
2981
2982 /* Current consumer. */
2983 vinsn_t con;
2984
2985 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
2986 X is from { INSN, LHS, RHS }. */
2987 ds_t has_dep_p[DEPS_IN_NOWHERE];
2988 } has_dependence_data;
2989
2990 /* Start analyzing dependencies of INSN. */
2991 static void
2992 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
2993 {
2994 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
2995
2996 has_dependence_data.where = DEPS_IN_INSN;
2997 }
2998
2999 /* Finish analyzing dependencies of an insn. */
3000 static void
3001 has_dependence_finish_insn (void)
3002 {
3003 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3004
3005 has_dependence_data.where = DEPS_IN_NOWHERE;
3006 }
3007
3008 /* Start analyzing dependencies of LHS. */
3009 static void
3010 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3011 {
3012 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3013
3014 if (VINSN_LHS (has_dependence_data.con) != NULL)
3015 has_dependence_data.where = DEPS_IN_LHS;
3016 }
3017
3018 /* Finish analyzing dependencies of an lhs. */
3019 static void
3020 has_dependence_finish_lhs (void)
3021 {
3022 has_dependence_data.where = DEPS_IN_INSN;
3023 }
3024
3025 /* Start analyzing dependencies of RHS. */
3026 static void
3027 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3028 {
3029 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3030
3031 if (VINSN_RHS (has_dependence_data.con) != NULL)
3032 has_dependence_data.where = DEPS_IN_RHS;
3033 }
3034
3035 /* Start analyzing dependencies of an rhs. */
3036 static void
3037 has_dependence_finish_rhs (void)
3038 {
3039 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3040 || has_dependence_data.where == DEPS_IN_INSN);
3041
3042 has_dependence_data.where = DEPS_IN_INSN;
3043 }
3044
3045 /* Note a set of REGNO. */
3046 static void
3047 has_dependence_note_reg_set (int regno)
3048 {
3049 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3050
3051 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3052 VINSN_INSN_RTX
3053 (has_dependence_data.con)))
3054 {
3055 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3056
3057 if (reg_last->sets != NULL
3058 || reg_last->clobbers != NULL)
3059 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3060
3061 if (reg_last->uses)
3062 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3063 }
3064 }
3065
3066 /* Note a clobber of REGNO. */
3067 static void
3068 has_dependence_note_reg_clobber (int regno)
3069 {
3070 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3071
3072 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3073 VINSN_INSN_RTX
3074 (has_dependence_data.con)))
3075 {
3076 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3077
3078 if (reg_last->sets)
3079 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3080
3081 if (reg_last->uses)
3082 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3083 }
3084 }
3085
3086 /* Note a use of REGNO. */
3087 static void
3088 has_dependence_note_reg_use (int regno)
3089 {
3090 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3091
3092 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3093 VINSN_INSN_RTX
3094 (has_dependence_data.con)))
3095 {
3096 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3097
3098 if (reg_last->sets)
3099 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3100
3101 if (reg_last->clobbers)
3102 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3103
3104 /* Handle BE_IN_SPEC. */
3105 if (reg_last->uses)
3106 {
3107 ds_t pro_spec_checked_ds;
3108
3109 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3110 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3111
3112 if (pro_spec_checked_ds != 0)
3113 /* Merge BE_IN_SPEC bits into *DSP. */
3114 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3115 NULL_RTX, NULL_RTX);
3116 }
3117 }
3118 }
3119
3120 /* Note a memory dependence. */
3121 static void
3122 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3123 rtx pending_mem ATTRIBUTE_UNUSED,
3124 insn_t pending_insn ATTRIBUTE_UNUSED,
3125 ds_t ds ATTRIBUTE_UNUSED)
3126 {
3127 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3128 VINSN_INSN_RTX (has_dependence_data.con)))
3129 {
3130 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3131
3132 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3133 }
3134 }
3135
3136 /* Note a dependence. */
3137 static void
3138 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3139 ds_t ds ATTRIBUTE_UNUSED)
3140 {
3141 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3142 VINSN_INSN_RTX (has_dependence_data.con)))
3143 {
3144 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3145
3146 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3147 }
3148 }
3149
3150 /* Mark the insn as having a hard dependence that prevents speculation. */
3151 void
3152 sel_mark_hard_insn (rtx insn)
3153 {
3154 int i;
3155
3156 /* Only work when we're in has_dependence_p mode.
3157 ??? This is a hack, this should actually be a hook. */
3158 if (!has_dependence_data.dc || !has_dependence_data.pro)
3159 return;
3160
3161 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3162 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3163
3164 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3165 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3166 }
3167
3168 /* This structure holds the hooks for the dependency analysis used when
3169 actually processing dependencies in the scheduler. */
3170 static struct sched_deps_info_def has_dependence_sched_deps_info;
3171
3172 /* This initializes most of the fields of the above structure. */
3173 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3174 {
3175 NULL,
3176
3177 has_dependence_start_insn,
3178 has_dependence_finish_insn,
3179 has_dependence_start_lhs,
3180 has_dependence_finish_lhs,
3181 has_dependence_start_rhs,
3182 has_dependence_finish_rhs,
3183 has_dependence_note_reg_set,
3184 has_dependence_note_reg_clobber,
3185 has_dependence_note_reg_use,
3186 has_dependence_note_mem_dep,
3187 has_dependence_note_dep,
3188
3189 0, /* use_cselib */
3190 0, /* use_deps_list */
3191 0 /* generate_spec_deps */
3192 };
3193
3194 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3195 static void
3196 setup_has_dependence_sched_deps_info (void)
3197 {
3198 memcpy (&has_dependence_sched_deps_info,
3199 &const_has_dependence_sched_deps_info,
3200 sizeof (has_dependence_sched_deps_info));
3201
3202 if (spec_info != NULL)
3203 has_dependence_sched_deps_info.generate_spec_deps = 1;
3204
3205 sched_deps_info = &has_dependence_sched_deps_info;
3206 }
3207
3208 /* Remove all dependences found and recorded in has_dependence_data array. */
3209 void
3210 sel_clear_has_dependence (void)
3211 {
3212 int i;
3213
3214 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3215 has_dependence_data.has_dep_p[i] = 0;
3216 }
3217
3218 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3219 to the dependence information array in HAS_DEP_PP. */
3220 ds_t
3221 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3222 {
3223 int i;
3224 ds_t ds;
3225 struct deps *dc;
3226
3227 if (INSN_SIMPLEJUMP_P (pred))
3228 /* Unconditional jump is just a transfer of control flow.
3229 Ignore it. */
3230 return false;
3231
3232 dc = &INSN_DEPS_CONTEXT (pred);
3233
3234 /* We init this field lazily. */
3235 if (dc->reg_last == NULL)
3236 init_deps_reg_last (dc);
3237
3238 if (!dc->readonly)
3239 {
3240 has_dependence_data.pro = NULL;
3241 /* Initialize empty dep context with information about PRED. */
3242 advance_deps_context (dc, pred);
3243 dc->readonly = 1;
3244 }
3245
3246 has_dependence_data.where = DEPS_IN_NOWHERE;
3247 has_dependence_data.pro = pred;
3248 has_dependence_data.con = EXPR_VINSN (expr);
3249 has_dependence_data.dc = dc;
3250
3251 sel_clear_has_dependence ();
3252
3253 /* Now catch all dependencies that would be generated between PRED and
3254 INSN. */
3255 setup_has_dependence_sched_deps_info ();
3256 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3257 has_dependence_data.dc = NULL;
3258
3259 /* When a barrier was found, set DEPS_IN_INSN bits. */
3260 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3261 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3262 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3263 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3264
3265 /* Do not allow stores to memory to move through checks. Currently
3266 we don't move this to sched-deps.c as the check doesn't have
3267 obvious places to which this dependence can be attached.
3268 FIMXE: this should go to a hook. */
3269 if (EXPR_LHS (expr)
3270 && MEM_P (EXPR_LHS (expr))
3271 && sel_insn_is_speculation_check (pred))
3272 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3273
3274 *has_dep_pp = has_dependence_data.has_dep_p;
3275 ds = 0;
3276 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3277 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3278 NULL_RTX, NULL_RTX);
3279
3280 return ds;
3281 }
3282 \f
3283
3284 /* Dependence hooks implementation that checks dependence latency constraints
3285 on the insns being scheduled. The entry point for these routines is
3286 tick_check_p predicate. */
3287
3288 static struct
3289 {
3290 /* An expr we are currently checking. */
3291 expr_t expr;
3292
3293 /* A minimal cycle for its scheduling. */
3294 int cycle;
3295
3296 /* Whether we have seen a true dependence while checking. */
3297 bool seen_true_dep_p;
3298 } tick_check_data;
3299
3300 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3301 on PRO with status DS and weight DW. */
3302 static void
3303 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3304 {
3305 expr_t con_expr = tick_check_data.expr;
3306 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3307
3308 if (con_insn != pro_insn)
3309 {
3310 enum reg_note dt;
3311 int tick;
3312
3313 if (/* PROducer was removed from above due to pipelining. */
3314 !INSN_IN_STREAM_P (pro_insn)
3315 /* Or PROducer was originally on the next iteration regarding the
3316 CONsumer. */
3317 || (INSN_SCHED_TIMES (pro_insn)
3318 - EXPR_SCHED_TIMES (con_expr)) > 1)
3319 /* Don't count this dependence. */
3320 return;
3321
3322 dt = ds_to_dt (ds);
3323 if (dt == REG_DEP_TRUE)
3324 tick_check_data.seen_true_dep_p = true;
3325
3326 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3327
3328 {
3329 dep_def _dep, *dep = &_dep;
3330
3331 init_dep (dep, pro_insn, con_insn, dt);
3332
3333 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3334 }
3335
3336 /* When there are several kinds of dependencies between pro and con,
3337 only REG_DEP_TRUE should be taken into account. */
3338 if (tick > tick_check_data.cycle
3339 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3340 tick_check_data.cycle = tick;
3341 }
3342 }
3343
3344 /* An implementation of note_dep hook. */
3345 static void
3346 tick_check_note_dep (insn_t pro, ds_t ds)
3347 {
3348 tick_check_dep_with_dw (pro, ds, 0);
3349 }
3350
3351 /* An implementation of note_mem_dep hook. */
3352 static void
3353 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3354 {
3355 dw_t dw;
3356
3357 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3358 ? estimate_dep_weak (mem1, mem2)
3359 : 0);
3360
3361 tick_check_dep_with_dw (pro, ds, dw);
3362 }
3363
3364 /* This structure contains hooks for dependence analysis used when determining
3365 whether an insn is ready for scheduling. */
3366 static struct sched_deps_info_def tick_check_sched_deps_info =
3367 {
3368 NULL,
3369
3370 NULL,
3371 NULL,
3372 NULL,
3373 NULL,
3374 NULL,
3375 NULL,
3376 haifa_note_reg_set,
3377 haifa_note_reg_clobber,
3378 haifa_note_reg_use,
3379 tick_check_note_mem_dep,
3380 tick_check_note_dep,
3381
3382 0, 0, 0
3383 };
3384
3385 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3386 scheduled. Return 0 if all data from producers in DC is ready. */
3387 int
3388 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3389 {
3390 int cycles_left;
3391 /* Initialize variables. */
3392 tick_check_data.expr = expr;
3393 tick_check_data.cycle = 0;
3394 tick_check_data.seen_true_dep_p = false;
3395 sched_deps_info = &tick_check_sched_deps_info;
3396
3397 gcc_assert (!dc->readonly);
3398 dc->readonly = 1;
3399 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3400 dc->readonly = 0;
3401
3402 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3403
3404 return cycles_left >= 0 ? cycles_left : 0;
3405 }
3406 \f
3407
3408 /* Functions to work with insns. */
3409
3410 /* Returns true if LHS of INSN is the same as DEST of an insn
3411 being moved. */
3412 bool
3413 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3414 {
3415 rtx lhs = INSN_LHS (insn);
3416
3417 if (lhs == NULL || dest == NULL)
3418 return false;
3419
3420 return rtx_equal_p (lhs, dest);
3421 }
3422
3423 /* Return s_i_d entry of INSN. Callable from debugger. */
3424 sel_insn_data_def
3425 insn_sid (insn_t insn)
3426 {
3427 return *SID (insn);
3428 }
3429
3430 /* True when INSN is a speculative check. We can tell this by looking
3431 at the data structures of the selective scheduler, not by examining
3432 the pattern. */
3433 bool
3434 sel_insn_is_speculation_check (rtx insn)
3435 {
3436 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3437 }
3438
3439 /* Extracts machine mode MODE and destination location DST_LOC
3440 for given INSN. */
3441 void
3442 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3443 {
3444 rtx pat = PATTERN (insn);
3445
3446 gcc_assert (dst_loc);
3447 gcc_assert (GET_CODE (pat) == SET);
3448
3449 *dst_loc = SET_DEST (pat);
3450
3451 gcc_assert (*dst_loc);
3452 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3453
3454 if (mode)
3455 *mode = GET_MODE (*dst_loc);
3456 }
3457
3458 /* Returns true when moving through JUMP will result in bookkeeping
3459 creation. */
3460 bool
3461 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3462 {
3463 insn_t succ;
3464 succ_iterator si;
3465
3466 FOR_EACH_SUCC (succ, si, jump)
3467 if (sel_num_cfg_preds_gt_1 (succ))
3468 return true;
3469
3470 return false;
3471 }
3472
3473 /* Return 'true' if INSN is the only one in its basic block. */
3474 static bool
3475 insn_is_the_only_one_in_bb_p (insn_t insn)
3476 {
3477 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3478 }
3479
3480 #ifdef ENABLE_CHECKING
3481 /* Check that the region we're scheduling still has at most one
3482 backedge. */
3483 static void
3484 verify_backedges (void)
3485 {
3486 if (pipelining_p)
3487 {
3488 int i, n = 0;
3489 edge e;
3490 edge_iterator ei;
3491
3492 for (i = 0; i < current_nr_blocks; i++)
3493 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3494 if (in_current_region_p (e->dest)
3495 && BLOCK_TO_BB (e->dest->index) < i)
3496 n++;
3497
3498 gcc_assert (n <= 1);
3499 }
3500 }
3501 #endif
3502 \f
3503
3504 /* Functions to work with control flow. */
3505
3506 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3507 are sorted in topological order (it might have been invalidated by
3508 redirecting an edge). */
3509 static void
3510 sel_recompute_toporder (void)
3511 {
3512 int i, n, rgn;
3513 int *postorder, n_blocks;
3514
3515 postorder = XALLOCAVEC (int, n_basic_blocks);
3516 n_blocks = post_order_compute (postorder, false, false);
3517
3518 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3519 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3520 if (CONTAINING_RGN (postorder[i]) == rgn)
3521 {
3522 BLOCK_TO_BB (postorder[i]) = n;
3523 BB_TO_BLOCK (n) = postorder[i];
3524 n++;
3525 }
3526
3527 /* Assert that we updated info for all blocks. We may miss some blocks if
3528 this function is called when redirecting an edge made a block
3529 unreachable, but that block is not deleted yet. */
3530 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3531 }
3532
3533 /* Tidy the possibly empty block BB. */
3534 static bool
3535 maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p)
3536 {
3537 basic_block succ_bb, pred_bb;
3538 edge e;
3539 edge_iterator ei;
3540 bool rescan_p;
3541
3542 /* Keep empty bb only if this block immediately precedes EXIT and
3543 has incoming non-fallthrough edge, or it has no predecessors or
3544 successors. Otherwise remove it. */
3545 if (!sel_bb_empty_p (bb)
3546 || (single_succ_p (bb)
3547 && single_succ (bb) == EXIT_BLOCK_PTR
3548 && (!single_pred_p (bb)
3549 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3550 || EDGE_COUNT (bb->preds) == 0
3551 || EDGE_COUNT (bb->succs) == 0)
3552 return false;
3553
3554 /* Do not attempt to redirect complex edges. */
3555 FOR_EACH_EDGE (e, ei, bb->preds)
3556 if (e->flags & EDGE_COMPLEX)
3557 return false;
3558
3559 free_data_sets (bb);
3560
3561 /* Do not delete BB if it has more than one successor.
3562 That can occur when we moving a jump. */
3563 if (!single_succ_p (bb))
3564 {
3565 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3566 sel_merge_blocks (bb->prev_bb, bb);
3567 return true;
3568 }
3569
3570 succ_bb = single_succ (bb);
3571 rescan_p = true;
3572 pred_bb = NULL;
3573
3574 /* Redirect all non-fallthru edges to the next bb. */
3575 while (rescan_p)
3576 {
3577 rescan_p = false;
3578
3579 FOR_EACH_EDGE (e, ei, bb->preds)
3580 {
3581 pred_bb = e->src;
3582
3583 if (!(e->flags & EDGE_FALLTHRU))
3584 {
3585 recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb);
3586 rescan_p = true;
3587 break;
3588 }
3589 }
3590 }
3591
3592 /* If it is possible - merge BB with its predecessor. */
3593 if (can_merge_blocks_p (bb->prev_bb, bb))
3594 sel_merge_blocks (bb->prev_bb, bb);
3595 else
3596 /* Otherwise this is a block without fallthru predecessor.
3597 Just delete it. */
3598 {
3599 gcc_assert (pred_bb != NULL);
3600
3601 if (in_current_region_p (pred_bb))
3602 move_bb_info (pred_bb, bb);
3603 remove_empty_bb (bb, true);
3604 }
3605
3606 if (recompute_toporder_p)
3607 sel_recompute_toporder ();
3608
3609 #ifdef ENABLE_CHECKING
3610 verify_backedges ();
3611 #endif
3612
3613 return true;
3614 }
3615
3616 /* Tidy the control flow after we have removed original insn from
3617 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3618 is true, also try to optimize control flow on non-empty blocks. */
3619 bool
3620 tidy_control_flow (basic_block xbb, bool full_tidying)
3621 {
3622 bool changed = true;
3623 insn_t first, last;
3624
3625 /* First check whether XBB is empty. */
3626 changed = maybe_tidy_empty_bb (xbb, false);
3627 if (changed || !full_tidying)
3628 return changed;
3629
3630 /* Check if there is a unnecessary jump after insn left. */
3631 if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
3632 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3633 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3634 {
3635 if (sel_remove_insn (BB_END (xbb), false, false))
3636 return true;
3637 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3638 }
3639
3640 first = sel_bb_head (xbb);
3641 last = sel_bb_end (xbb);
3642 if (MAY_HAVE_DEBUG_INSNS)
3643 {
3644 if (first != last && DEBUG_INSN_P (first))
3645 do
3646 first = NEXT_INSN (first);
3647 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3648
3649 if (first != last && DEBUG_INSN_P (last))
3650 do
3651 last = PREV_INSN (last);
3652 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3653 }
3654 /* Check if there is an unnecessary jump in previous basic block leading
3655 to next basic block left after removing INSN from stream.
3656 If it is so, remove that jump and redirect edge to current
3657 basic block (where there was INSN before deletion). This way
3658 when NOP will be deleted several instructions later with its
3659 basic block we will not get a jump to next instruction, which
3660 can be harmful. */
3661 if (first == last
3662 && !sel_bb_empty_p (xbb)
3663 && INSN_NOP_P (last)
3664 /* Flow goes fallthru from current block to the next. */
3665 && EDGE_COUNT (xbb->succs) == 1
3666 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3667 /* When successor is an EXIT block, it may not be the next block. */
3668 && single_succ (xbb) != EXIT_BLOCK_PTR
3669 /* And unconditional jump in previous basic block leads to
3670 next basic block of XBB and this jump can be safely removed. */
3671 && in_current_region_p (xbb->prev_bb)
3672 && jump_leads_only_to_bb_p (BB_END (xbb->prev_bb), xbb->next_bb)
3673 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3674 /* Also this jump is not at the scheduling boundary. */
3675 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3676 {
3677 bool recompute_toporder_p;
3678 /* Clear data structures of jump - jump itself will be removed
3679 by sel_redirect_edge_and_branch. */
3680 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3681 recompute_toporder_p
3682 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3683
3684 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3685
3686 /* It can turn out that after removing unused jump, basic block
3687 that contained that jump, becomes empty too. In such case
3688 remove it too. */
3689 if (sel_bb_empty_p (xbb->prev_bb))
3690 changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p);
3691 else if (recompute_toporder_p)
3692 sel_recompute_toporder ();
3693 }
3694
3695 return changed;
3696 }
3697
3698 /* Purge meaningless empty blocks in the middle of a region. */
3699 void
3700 purge_empty_blocks (void)
3701 {
3702 /* Do not attempt to delete preheader. */
3703 int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
3704
3705 while (i < current_nr_blocks)
3706 {
3707 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3708
3709 if (maybe_tidy_empty_bb (b, false))
3710 continue;
3711
3712 i++;
3713 }
3714 }
3715
3716 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3717 do not delete insn's data, because it will be later re-emitted.
3718 Return true if we have removed some blocks afterwards. */
3719 bool
3720 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3721 {
3722 basic_block bb = BLOCK_FOR_INSN (insn);
3723
3724 gcc_assert (INSN_IN_STREAM_P (insn));
3725
3726 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3727 {
3728 expr_t expr;
3729 av_set_iterator i;
3730
3731 /* When we remove a debug insn that is head of a BB, it remains
3732 in the AV_SET of the block, but it shouldn't. */
3733 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3734 if (EXPR_INSN_RTX (expr) == insn)
3735 {
3736 av_set_iter_remove (&i);
3737 break;
3738 }
3739 }
3740
3741 if (only_disconnect)
3742 {
3743 insn_t prev = PREV_INSN (insn);
3744 insn_t next = NEXT_INSN (insn);
3745 basic_block bb = BLOCK_FOR_INSN (insn);
3746
3747 NEXT_INSN (prev) = next;
3748 PREV_INSN (next) = prev;
3749
3750 if (BB_HEAD (bb) == insn)
3751 {
3752 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3753 BB_HEAD (bb) = prev;
3754 }
3755 if (BB_END (bb) == insn)
3756 BB_END (bb) = prev;
3757 }
3758 else
3759 {
3760 remove_insn (insn);
3761 clear_expr (INSN_EXPR (insn));
3762 }
3763
3764 /* It is necessary to null this fields before calling add_insn (). */
3765 PREV_INSN (insn) = NULL_RTX;
3766 NEXT_INSN (insn) = NULL_RTX;
3767
3768 return tidy_control_flow (bb, full_tidying);
3769 }
3770
3771 /* Estimate number of the insns in BB. */
3772 static int
3773 sel_estimate_number_of_insns (basic_block bb)
3774 {
3775 int res = 0;
3776 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3777
3778 for (; insn != next_tail; insn = NEXT_INSN (insn))
3779 if (NONDEBUG_INSN_P (insn))
3780 res++;
3781
3782 return res;
3783 }
3784
3785 /* We don't need separate luids for notes or labels. */
3786 static int
3787 sel_luid_for_non_insn (rtx x)
3788 {
3789 gcc_assert (NOTE_P (x) || LABEL_P (x));
3790
3791 return -1;
3792 }
3793
3794 /* Return seqno of the only predecessor of INSN. */
3795 static int
3796 get_seqno_of_a_pred (insn_t insn)
3797 {
3798 int seqno;
3799
3800 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3801
3802 if (!sel_bb_head_p (insn))
3803 seqno = INSN_SEQNO (PREV_INSN (insn));
3804 else
3805 {
3806 basic_block bb = BLOCK_FOR_INSN (insn);
3807
3808 if (single_pred_p (bb)
3809 && !in_current_region_p (single_pred (bb)))
3810 {
3811 /* We can have preds outside a region when splitting edges
3812 for pipelining of an outer loop. Use succ instead.
3813 There should be only one of them. */
3814 insn_t succ = NULL;
3815 succ_iterator si;
3816 bool first = true;
3817
3818 gcc_assert (flag_sel_sched_pipelining_outer_loops
3819 && current_loop_nest);
3820 FOR_EACH_SUCC_1 (succ, si, insn,
3821 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
3822 {
3823 gcc_assert (first);
3824 first = false;
3825 }
3826
3827 gcc_assert (succ != NULL);
3828 seqno = INSN_SEQNO (succ);
3829 }
3830 else
3831 {
3832 insn_t *preds;
3833 int n;
3834
3835 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
3836 gcc_assert (n == 1);
3837
3838 seqno = INSN_SEQNO (preds[0]);
3839
3840 free (preds);
3841 }
3842 }
3843
3844 return seqno;
3845 }
3846
3847 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
3848 with positive seqno exist. */
3849 int
3850 get_seqno_by_preds (rtx insn)
3851 {
3852 basic_block bb = BLOCK_FOR_INSN (insn);
3853 rtx tmp = insn, head = BB_HEAD (bb);
3854 insn_t *preds;
3855 int n, i, seqno;
3856
3857 while (tmp != head)
3858 if (INSN_P (tmp))
3859 return INSN_SEQNO (tmp);
3860 else
3861 tmp = PREV_INSN (tmp);
3862
3863 cfg_preds (bb, &preds, &n);
3864 for (i = 0, seqno = -1; i < n; i++)
3865 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
3866
3867 return seqno;
3868 }
3869
3870 \f
3871
3872 /* Extend pass-scope data structures for basic blocks. */
3873 void
3874 sel_extend_global_bb_info (void)
3875 {
3876 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
3877 last_basic_block);
3878 }
3879
3880 /* Extend region-scope data structures for basic blocks. */
3881 static void
3882 extend_region_bb_info (void)
3883 {
3884 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
3885 last_basic_block);
3886 }
3887
3888 /* Extend all data structures to fit for all basic blocks. */
3889 static void
3890 extend_bb_info (void)
3891 {
3892 sel_extend_global_bb_info ();
3893 extend_region_bb_info ();
3894 }
3895
3896 /* Finalize pass-scope data structures for basic blocks. */
3897 void
3898 sel_finish_global_bb_info (void)
3899 {
3900 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
3901 }
3902
3903 /* Finalize region-scope data structures for basic blocks. */
3904 static void
3905 finish_region_bb_info (void)
3906 {
3907 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
3908 }
3909 \f
3910
3911 /* Data for each insn in current region. */
3912 VEC (sel_insn_data_def, heap) *s_i_d = NULL;
3913
3914 /* A vector for the insns we've emitted. */
3915 static insn_vec_t new_insns = NULL;
3916
3917 /* Extend data structures for insns from current region. */
3918 static void
3919 extend_insn_data (void)
3920 {
3921 int reserve;
3922
3923 sched_extend_target ();
3924 sched_deps_init (false);
3925
3926 /* Extend data structures for insns from current region. */
3927 reserve = (sched_max_luid + 1
3928 - VEC_length (sel_insn_data_def, s_i_d));
3929 if (reserve > 0
3930 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
3931 {
3932 int size;
3933
3934 if (sched_max_luid / 2 > 1024)
3935 size = sched_max_luid + 1024;
3936 else
3937 size = 3 * sched_max_luid / 2;
3938
3939
3940 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
3941 }
3942 }
3943
3944 /* Finalize data structures for insns from current region. */
3945 static void
3946 finish_insns (void)
3947 {
3948 unsigned i;
3949
3950 /* Clear here all dependence contexts that may have left from insns that were
3951 removed during the scheduling. */
3952 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
3953 {
3954 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
3955
3956 if (sid_entry->live)
3957 return_regset_to_pool (sid_entry->live);
3958 if (sid_entry->analyzed_deps)
3959 {
3960 BITMAP_FREE (sid_entry->analyzed_deps);
3961 BITMAP_FREE (sid_entry->found_deps);
3962 htab_delete (sid_entry->transformed_insns);
3963 free_deps (&sid_entry->deps_context);
3964 }
3965 if (EXPR_VINSN (&sid_entry->expr))
3966 {
3967 clear_expr (&sid_entry->expr);
3968
3969 /* Also, clear CANT_MOVE bit here, because we really don't want it
3970 to be passed to the next region. */
3971 CANT_MOVE_BY_LUID (i) = 0;
3972 }
3973 }
3974
3975 VEC_free (sel_insn_data_def, heap, s_i_d);
3976 }
3977
3978 /* A proxy to pass initialization data to init_insn (). */
3979 static sel_insn_data_def _insn_init_ssid;
3980 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
3981
3982 /* If true create a new vinsn. Otherwise use the one from EXPR. */
3983 static bool insn_init_create_new_vinsn_p;
3984
3985 /* Set all necessary data for initialization of the new insn[s]. */
3986 static expr_t
3987 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
3988 {
3989 expr_t x = &insn_init_ssid->expr;
3990
3991 copy_expr_onside (x, expr);
3992 if (vi != NULL)
3993 {
3994 insn_init_create_new_vinsn_p = false;
3995 change_vinsn_in_expr (x, vi);
3996 }
3997 else
3998 insn_init_create_new_vinsn_p = true;
3999
4000 insn_init_ssid->seqno = seqno;
4001 return x;
4002 }
4003
4004 /* Init data for INSN. */
4005 static void
4006 init_insn_data (insn_t insn)
4007 {
4008 expr_t expr;
4009 sel_insn_data_t ssid = insn_init_ssid;
4010
4011 /* The fields mentioned below are special and hence are not being
4012 propagated to the new insns. */
4013 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4014 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4015 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4016
4017 expr = INSN_EXPR (insn);
4018 copy_expr (expr, &ssid->expr);
4019 prepare_insn_expr (insn, ssid->seqno);
4020
4021 if (insn_init_create_new_vinsn_p)
4022 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4023
4024 if (first_time_insn_init (insn))
4025 init_first_time_insn_data (insn);
4026 }
4027
4028 /* This is used to initialize spurious jumps generated by
4029 sel_redirect_edge (). */
4030 static void
4031 init_simplejump_data (insn_t insn)
4032 {
4033 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4034 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4035 false, true);
4036 INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
4037 init_first_time_insn_data (insn);
4038 }
4039
4040 /* Perform deferred initialization of insns. This is used to process
4041 a new jump that may be created by redirect_edge. */
4042 void
4043 sel_init_new_insn (insn_t insn, int flags)
4044 {
4045 /* We create data structures for bb when the first insn is emitted in it. */
4046 if (INSN_P (insn)
4047 && INSN_IN_STREAM_P (insn)
4048 && insn_is_the_only_one_in_bb_p (insn))
4049 {
4050 extend_bb_info ();
4051 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4052 }
4053
4054 if (flags & INSN_INIT_TODO_LUID)
4055 sched_init_luids (NULL, NULL, NULL, insn);
4056
4057 if (flags & INSN_INIT_TODO_SSID)
4058 {
4059 extend_insn_data ();
4060 init_insn_data (insn);
4061 clear_expr (&insn_init_ssid->expr);
4062 }
4063
4064 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4065 {
4066 extend_insn_data ();
4067 init_simplejump_data (insn);
4068 }
4069
4070 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4071 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4072 }
4073 \f
4074
4075 /* Functions to init/finish work with lv sets. */
4076
4077 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4078 static void
4079 init_lv_set (basic_block bb)
4080 {
4081 gcc_assert (!BB_LV_SET_VALID_P (bb));
4082
4083 BB_LV_SET (bb) = get_regset_from_pool ();
4084 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4085 BB_LV_SET_VALID_P (bb) = true;
4086 }
4087
4088 /* Copy liveness information to BB from FROM_BB. */
4089 static void
4090 copy_lv_set_from (basic_block bb, basic_block from_bb)
4091 {
4092 gcc_assert (!BB_LV_SET_VALID_P (bb));
4093
4094 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4095 BB_LV_SET_VALID_P (bb) = true;
4096 }
4097
4098 /* Initialize lv set of all bb headers. */
4099 void
4100 init_lv_sets (void)
4101 {
4102 basic_block bb;
4103
4104 /* Initialize of LV sets. */
4105 FOR_EACH_BB (bb)
4106 init_lv_set (bb);
4107
4108 /* Don't forget EXIT_BLOCK. */
4109 init_lv_set (EXIT_BLOCK_PTR);
4110 }
4111
4112 /* Release lv set of HEAD. */
4113 static void
4114 free_lv_set (basic_block bb)
4115 {
4116 gcc_assert (BB_LV_SET (bb) != NULL);
4117
4118 return_regset_to_pool (BB_LV_SET (bb));
4119 BB_LV_SET (bb) = NULL;
4120 BB_LV_SET_VALID_P (bb) = false;
4121 }
4122
4123 /* Finalize lv sets of all bb headers. */
4124 void
4125 free_lv_sets (void)
4126 {
4127 basic_block bb;
4128
4129 /* Don't forget EXIT_BLOCK. */
4130 free_lv_set (EXIT_BLOCK_PTR);
4131
4132 /* Free LV sets. */
4133 FOR_EACH_BB (bb)
4134 if (BB_LV_SET (bb))
4135 free_lv_set (bb);
4136 }
4137
4138 /* Initialize an invalid AV_SET for BB.
4139 This set will be updated next time compute_av () process BB. */
4140 static void
4141 invalidate_av_set (basic_block bb)
4142 {
4143 gcc_assert (BB_AV_LEVEL (bb) <= 0
4144 && BB_AV_SET (bb) == NULL);
4145
4146 BB_AV_LEVEL (bb) = -1;
4147 }
4148
4149 /* Create initial data sets for BB (they will be invalid). */
4150 static void
4151 create_initial_data_sets (basic_block bb)
4152 {
4153 if (BB_LV_SET (bb))
4154 BB_LV_SET_VALID_P (bb) = false;
4155 else
4156 BB_LV_SET (bb) = get_regset_from_pool ();
4157 invalidate_av_set (bb);
4158 }
4159
4160 /* Free av set of BB. */
4161 static void
4162 free_av_set (basic_block bb)
4163 {
4164 av_set_clear (&BB_AV_SET (bb));
4165 BB_AV_LEVEL (bb) = 0;
4166 }
4167
4168 /* Free data sets of BB. */
4169 void
4170 free_data_sets (basic_block bb)
4171 {
4172 free_lv_set (bb);
4173 free_av_set (bb);
4174 }
4175
4176 /* Exchange lv sets of TO and FROM. */
4177 static void
4178 exchange_lv_sets (basic_block to, basic_block from)
4179 {
4180 {
4181 regset to_lv_set = BB_LV_SET (to);
4182
4183 BB_LV_SET (to) = BB_LV_SET (from);
4184 BB_LV_SET (from) = to_lv_set;
4185 }
4186
4187 {
4188 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4189
4190 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4191 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4192 }
4193 }
4194
4195
4196 /* Exchange av sets of TO and FROM. */
4197 static void
4198 exchange_av_sets (basic_block to, basic_block from)
4199 {
4200 {
4201 av_set_t to_av_set = BB_AV_SET (to);
4202
4203 BB_AV_SET (to) = BB_AV_SET (from);
4204 BB_AV_SET (from) = to_av_set;
4205 }
4206
4207 {
4208 int to_av_level = BB_AV_LEVEL (to);
4209
4210 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4211 BB_AV_LEVEL (from) = to_av_level;
4212 }
4213 }
4214
4215 /* Exchange data sets of TO and FROM. */
4216 void
4217 exchange_data_sets (basic_block to, basic_block from)
4218 {
4219 exchange_lv_sets (to, from);
4220 exchange_av_sets (to, from);
4221 }
4222
4223 /* Copy data sets of FROM to TO. */
4224 void
4225 copy_data_sets (basic_block to, basic_block from)
4226 {
4227 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4228 gcc_assert (BB_AV_SET (to) == NULL);
4229
4230 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4231 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4232
4233 if (BB_AV_SET_VALID_P (from))
4234 {
4235 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4236 }
4237 if (BB_LV_SET_VALID_P (from))
4238 {
4239 gcc_assert (BB_LV_SET (to) != NULL);
4240 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4241 }
4242 }
4243
4244 /* Return an av set for INSN, if any. */
4245 av_set_t
4246 get_av_set (insn_t insn)
4247 {
4248 av_set_t av_set;
4249
4250 gcc_assert (AV_SET_VALID_P (insn));
4251
4252 if (sel_bb_head_p (insn))
4253 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4254 else
4255 av_set = NULL;
4256
4257 return av_set;
4258 }
4259
4260 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4261 int
4262 get_av_level (insn_t insn)
4263 {
4264 int av_level;
4265
4266 gcc_assert (INSN_P (insn));
4267
4268 if (sel_bb_head_p (insn))
4269 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4270 else
4271 av_level = INSN_WS_LEVEL (insn);
4272
4273 return av_level;
4274 }
4275
4276 \f
4277
4278 /* Variables to work with control-flow graph. */
4279
4280 /* The basic block that already has been processed by the sched_data_update (),
4281 but hasn't been in sel_add_bb () yet. */
4282 static VEC (basic_block, heap) *last_added_blocks = NULL;
4283
4284 /* A pool for allocating successor infos. */
4285 static struct
4286 {
4287 /* A stack for saving succs_info structures. */
4288 struct succs_info *stack;
4289
4290 /* Its size. */
4291 int size;
4292
4293 /* Top of the stack. */
4294 int top;
4295
4296 /* Maximal value of the top. */
4297 int max_top;
4298 } succs_info_pool;
4299
4300 /* Functions to work with control-flow graph. */
4301
4302 /* Return basic block note of BB. */
4303 insn_t
4304 sel_bb_head (basic_block bb)
4305 {
4306 insn_t head;
4307
4308 if (bb == EXIT_BLOCK_PTR)
4309 {
4310 gcc_assert (exit_insn != NULL_RTX);
4311 head = exit_insn;
4312 }
4313 else
4314 {
4315 insn_t note;
4316
4317 note = bb_note (bb);
4318 head = next_nonnote_insn (note);
4319
4320 if (head && BLOCK_FOR_INSN (head) != bb)
4321 head = NULL_RTX;
4322 }
4323
4324 return head;
4325 }
4326
4327 /* Return true if INSN is a basic block header. */
4328 bool
4329 sel_bb_head_p (insn_t insn)
4330 {
4331 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4332 }
4333
4334 /* Return last insn of BB. */
4335 insn_t
4336 sel_bb_end (basic_block bb)
4337 {
4338 if (sel_bb_empty_p (bb))
4339 return NULL_RTX;
4340
4341 gcc_assert (bb != EXIT_BLOCK_PTR);
4342
4343 return BB_END (bb);
4344 }
4345
4346 /* Return true if INSN is the last insn in its basic block. */
4347 bool
4348 sel_bb_end_p (insn_t insn)
4349 {
4350 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4351 }
4352
4353 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4354 bool
4355 sel_bb_empty_p (basic_block bb)
4356 {
4357 return sel_bb_head (bb) == NULL;
4358 }
4359
4360 /* True when BB belongs to the current scheduling region. */
4361 bool
4362 in_current_region_p (basic_block bb)
4363 {
4364 if (bb->index < NUM_FIXED_BLOCKS)
4365 return false;
4366
4367 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4368 }
4369
4370 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4371 basic_block
4372 fallthru_bb_of_jump (rtx jump)
4373 {
4374 if (!JUMP_P (jump))
4375 return NULL;
4376
4377 if (any_uncondjump_p (jump))
4378 return single_succ (BLOCK_FOR_INSN (jump));
4379
4380 if (!any_condjump_p (jump))
4381 return NULL;
4382
4383 /* A basic block that ends with a conditional jump may still have one successor
4384 (and be followed by a barrier), we are not interested. */
4385 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4386 return NULL;
4387
4388 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4389 }
4390
4391 /* Remove all notes from BB. */
4392 static void
4393 init_bb (basic_block bb)
4394 {
4395 remove_notes (bb_note (bb), BB_END (bb));
4396 BB_NOTE_LIST (bb) = note_list;
4397 }
4398
4399 void
4400 sel_init_bbs (bb_vec_t bbs, basic_block bb)
4401 {
4402 const struct sched_scan_info_def ssi =
4403 {
4404 extend_bb_info, /* extend_bb */
4405 init_bb, /* init_bb */
4406 NULL, /* extend_insn */
4407 NULL /* init_insn */
4408 };
4409
4410 sched_scan (&ssi, bbs, bb, new_insns, NULL);
4411 }
4412
4413 /* Restore other notes for the whole region. */
4414 static void
4415 sel_restore_other_notes (void)
4416 {
4417 int bb;
4418
4419 for (bb = 0; bb < current_nr_blocks; bb++)
4420 {
4421 basic_block first, last;
4422
4423 first = EBB_FIRST_BB (bb);
4424 last = EBB_LAST_BB (bb)->next_bb;
4425
4426 do
4427 {
4428 note_list = BB_NOTE_LIST (first);
4429 restore_other_notes (NULL, first);
4430 BB_NOTE_LIST (first) = NULL_RTX;
4431
4432 first = first->next_bb;
4433 }
4434 while (first != last);
4435 }
4436 }
4437
4438 /* Free per-bb data structures. */
4439 void
4440 sel_finish_bbs (void)
4441 {
4442 sel_restore_other_notes ();
4443
4444 /* Remove current loop preheader from this loop. */
4445 if (current_loop_nest)
4446 sel_remove_loop_preheader ();
4447
4448 finish_region_bb_info ();
4449 }
4450
4451 /* Return true if INSN has a single successor of type FLAGS. */
4452 bool
4453 sel_insn_has_single_succ_p (insn_t insn, int flags)
4454 {
4455 insn_t succ;
4456 succ_iterator si;
4457 bool first_p = true;
4458
4459 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4460 {
4461 if (first_p)
4462 first_p = false;
4463 else
4464 return false;
4465 }
4466
4467 return true;
4468 }
4469
4470 /* Allocate successor's info. */
4471 static struct succs_info *
4472 alloc_succs_info (void)
4473 {
4474 if (succs_info_pool.top == succs_info_pool.max_top)
4475 {
4476 int i;
4477
4478 if (++succs_info_pool.max_top >= succs_info_pool.size)
4479 gcc_unreachable ();
4480
4481 i = ++succs_info_pool.top;
4482 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4483 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4484 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4485 }
4486 else
4487 succs_info_pool.top++;
4488
4489 return &succs_info_pool.stack[succs_info_pool.top];
4490 }
4491
4492 /* Free successor's info. */
4493 void
4494 free_succs_info (struct succs_info * sinfo)
4495 {
4496 gcc_assert (succs_info_pool.top >= 0
4497 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4498 succs_info_pool.top--;
4499
4500 /* Clear stale info. */
4501 VEC_block_remove (rtx, sinfo->succs_ok,
4502 0, VEC_length (rtx, sinfo->succs_ok));
4503 VEC_block_remove (rtx, sinfo->succs_other,
4504 0, VEC_length (rtx, sinfo->succs_other));
4505 VEC_block_remove (int, sinfo->probs_ok,
4506 0, VEC_length (int, sinfo->probs_ok));
4507 sinfo->all_prob = 0;
4508 sinfo->succs_ok_n = 0;
4509 sinfo->all_succs_n = 0;
4510 }
4511
4512 /* Compute successor info for INSN. FLAGS are the flags passed
4513 to the FOR_EACH_SUCC_1 iterator. */
4514 struct succs_info *
4515 compute_succs_info (insn_t insn, short flags)
4516 {
4517 succ_iterator si;
4518 insn_t succ;
4519 struct succs_info *sinfo = alloc_succs_info ();
4520
4521 /* Traverse *all* successors and decide what to do with each. */
4522 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4523 {
4524 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4525 perform code motion through inner loops. */
4526 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4527
4528 if (current_flags & flags)
4529 {
4530 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4531 VEC_safe_push (int, heap, sinfo->probs_ok,
4532 /* FIXME: Improve calculation when skipping
4533 inner loop to exits. */
4534 (si.bb_end
4535 ? si.e1->probability
4536 : REG_BR_PROB_BASE));
4537 sinfo->succs_ok_n++;
4538 }
4539 else
4540 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4541
4542 /* Compute all_prob. */
4543 if (!si.bb_end)
4544 sinfo->all_prob = REG_BR_PROB_BASE;
4545 else
4546 sinfo->all_prob += si.e1->probability;
4547
4548 sinfo->all_succs_n++;
4549 }
4550
4551 return sinfo;
4552 }
4553
4554 /* Return the predecessors of BB in PREDS and their number in N.
4555 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4556 static void
4557 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4558 {
4559 edge e;
4560 edge_iterator ei;
4561
4562 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4563
4564 FOR_EACH_EDGE (e, ei, bb->preds)
4565 {
4566 basic_block pred_bb = e->src;
4567 insn_t bb_end = BB_END (pred_bb);
4568
4569 /* ??? This code is not supposed to walk out of a region. */
4570 gcc_assert (in_current_region_p (pred_bb));
4571
4572 if (sel_bb_empty_p (pred_bb))
4573 cfg_preds_1 (pred_bb, preds, n, size);
4574 else
4575 {
4576 if (*n == *size)
4577 *preds = XRESIZEVEC (insn_t, *preds,
4578 (*size = 2 * *size + 1));
4579 (*preds)[(*n)++] = bb_end;
4580 }
4581 }
4582
4583 gcc_assert (*n != 0);
4584 }
4585
4586 /* Find all predecessors of BB and record them in PREDS and their number
4587 in N. Empty blocks are skipped, and only normal (forward in-region)
4588 edges are processed. */
4589 static void
4590 cfg_preds (basic_block bb, insn_t **preds, int *n)
4591 {
4592 int size = 0;
4593
4594 *preds = NULL;
4595 *n = 0;
4596 cfg_preds_1 (bb, preds, n, &size);
4597 }
4598
4599 /* Returns true if we are moving INSN through join point. */
4600 bool
4601 sel_num_cfg_preds_gt_1 (insn_t insn)
4602 {
4603 basic_block bb;
4604
4605 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4606 return false;
4607
4608 bb = BLOCK_FOR_INSN (insn);
4609
4610 while (1)
4611 {
4612 if (EDGE_COUNT (bb->preds) > 1)
4613 return true;
4614
4615 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4616 bb = EDGE_PRED (bb, 0)->src;
4617
4618 if (!sel_bb_empty_p (bb))
4619 break;
4620 }
4621
4622 return false;
4623 }
4624
4625 /* Returns true when BB should be the end of an ebb. Adapted from the
4626 code in sched-ebb.c. */
4627 bool
4628 bb_ends_ebb_p (basic_block bb)
4629 {
4630 basic_block next_bb = bb_next_bb (bb);
4631 edge e;
4632 edge_iterator ei;
4633
4634 if (next_bb == EXIT_BLOCK_PTR
4635 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4636 || (LABEL_P (BB_HEAD (next_bb))
4637 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4638 Work around that. */
4639 && !single_pred_p (next_bb)))
4640 return true;
4641
4642 if (!in_current_region_p (next_bb))
4643 return true;
4644
4645 FOR_EACH_EDGE (e, ei, bb->succs)
4646 if ((e->flags & EDGE_FALLTHRU) != 0)
4647 {
4648 gcc_assert (e->dest == next_bb);
4649
4650 return false;
4651 }
4652
4653 return true;
4654 }
4655
4656 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4657 successor of INSN. */
4658 bool
4659 in_same_ebb_p (insn_t insn, insn_t succ)
4660 {
4661 basic_block ptr = BLOCK_FOR_INSN (insn);
4662
4663 for(;;)
4664 {
4665 if (ptr == BLOCK_FOR_INSN (succ))
4666 return true;
4667
4668 if (bb_ends_ebb_p (ptr))
4669 return false;
4670
4671 ptr = bb_next_bb (ptr);
4672 }
4673
4674 gcc_unreachable ();
4675 return false;
4676 }
4677
4678 /* Recomputes the reverse topological order for the function and
4679 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4680 modified appropriately. */
4681 static void
4682 recompute_rev_top_order (void)
4683 {
4684 int *postorder;
4685 int n_blocks, i;
4686
4687 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4688 {
4689 rev_top_order_index_len = last_basic_block;
4690 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4691 rev_top_order_index_len);
4692 }
4693
4694 postorder = XNEWVEC (int, n_basic_blocks);
4695
4696 n_blocks = post_order_compute (postorder, true, false);
4697 gcc_assert (n_basic_blocks == n_blocks);
4698
4699 /* Build reverse function: for each basic block with BB->INDEX == K
4700 rev_top_order_index[K] is it's reverse topological sort number. */
4701 for (i = 0; i < n_blocks; i++)
4702 {
4703 gcc_assert (postorder[i] < rev_top_order_index_len);
4704 rev_top_order_index[postorder[i]] = i;
4705 }
4706
4707 free (postorder);
4708 }
4709
4710 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4711 void
4712 clear_outdated_rtx_info (basic_block bb)
4713 {
4714 rtx insn;
4715
4716 FOR_BB_INSNS (bb, insn)
4717 if (INSN_P (insn))
4718 {
4719 SCHED_GROUP_P (insn) = 0;
4720 INSN_AFTER_STALL_P (insn) = 0;
4721 INSN_SCHED_TIMES (insn) = 0;
4722 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4723
4724 /* We cannot use the changed caches, as previously we could ignore
4725 the LHS dependence due to enabled renaming and transform
4726 the expression, and currently we'll be unable to do this. */
4727 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4728 }
4729 }
4730
4731 /* Add BB_NOTE to the pool of available basic block notes. */
4732 static void
4733 return_bb_to_pool (basic_block bb)
4734 {
4735 rtx note = bb_note (bb);
4736
4737 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4738 && bb->aux == NULL);
4739
4740 /* It turns out that current cfg infrastructure does not support
4741 reuse of basic blocks. Don't bother for now. */
4742 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4743 }
4744
4745 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4746 static rtx
4747 get_bb_note_from_pool (void)
4748 {
4749 if (VEC_empty (rtx, bb_note_pool))
4750 return NULL_RTX;
4751 else
4752 {
4753 rtx note = VEC_pop (rtx, bb_note_pool);
4754
4755 PREV_INSN (note) = NULL_RTX;
4756 NEXT_INSN (note) = NULL_RTX;
4757
4758 return note;
4759 }
4760 }
4761
4762 /* Free bb_note_pool. */
4763 void
4764 free_bb_note_pool (void)
4765 {
4766 VEC_free (rtx, heap, bb_note_pool);
4767 }
4768
4769 /* Setup scheduler pool and successor structure. */
4770 void
4771 alloc_sched_pools (void)
4772 {
4773 int succs_size;
4774
4775 succs_size = MAX_WS + 1;
4776 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4777 succs_info_pool.size = succs_size;
4778 succs_info_pool.top = -1;
4779 succs_info_pool.max_top = -1;
4780
4781 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4782 sizeof (struct _list_node), 500);
4783 }
4784
4785 /* Free the pools. */
4786 void
4787 free_sched_pools (void)
4788 {
4789 int i;
4790
4791 free_alloc_pool (sched_lists_pool);
4792 gcc_assert (succs_info_pool.top == -1);
4793 for (i = 0; i < succs_info_pool.max_top; i++)
4794 {
4795 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4796 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4797 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4798 }
4799 free (succs_info_pool.stack);
4800 }
4801 \f
4802
4803 /* Returns a position in RGN where BB can be inserted retaining
4804 topological order. */
4805 static int
4806 find_place_to_insert_bb (basic_block bb, int rgn)
4807 {
4808 bool has_preds_outside_rgn = false;
4809 edge e;
4810 edge_iterator ei;
4811
4812 /* Find whether we have preds outside the region. */
4813 FOR_EACH_EDGE (e, ei, bb->preds)
4814 if (!in_current_region_p (e->src))
4815 {
4816 has_preds_outside_rgn = true;
4817 break;
4818 }
4819
4820 /* Recompute the top order -- needed when we have > 1 pred
4821 and in case we don't have preds outside. */
4822 if (flag_sel_sched_pipelining_outer_loops
4823 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
4824 {
4825 int i, bbi = bb->index, cur_bbi;
4826
4827 recompute_rev_top_order ();
4828 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
4829 {
4830 cur_bbi = BB_TO_BLOCK (i);
4831 if (rev_top_order_index[bbi]
4832 < rev_top_order_index[cur_bbi])
4833 break;
4834 }
4835
4836 /* We skipped the right block, so we increase i. We accomodate
4837 it for increasing by step later, so we decrease i. */
4838 return (i + 1) - 1;
4839 }
4840 else if (has_preds_outside_rgn)
4841 {
4842 /* This is the case when we generate an extra empty block
4843 to serve as region head during pipelining. */
4844 e = EDGE_SUCC (bb, 0);
4845 gcc_assert (EDGE_COUNT (bb->succs) == 1
4846 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
4847 && (BLOCK_TO_BB (e->dest->index) == 0));
4848 return -1;
4849 }
4850
4851 /* We don't have preds outside the region. We should have
4852 the only pred, because the multiple preds case comes from
4853 the pipelining of outer loops, and that is handled above.
4854 Just take the bbi of this single pred. */
4855 if (EDGE_COUNT (bb->succs) > 0)
4856 {
4857 int pred_bbi;
4858
4859 gcc_assert (EDGE_COUNT (bb->preds) == 1);
4860
4861 pred_bbi = EDGE_PRED (bb, 0)->src->index;
4862 return BLOCK_TO_BB (pred_bbi);
4863 }
4864 else
4865 /* BB has no successors. It is safe to put it in the end. */
4866 return current_nr_blocks - 1;
4867 }
4868
4869 /* Deletes an empty basic block freeing its data. */
4870 static void
4871 delete_and_free_basic_block (basic_block bb)
4872 {
4873 gcc_assert (sel_bb_empty_p (bb));
4874
4875 if (BB_LV_SET (bb))
4876 free_lv_set (bb);
4877
4878 bitmap_clear_bit (blocks_to_reschedule, bb->index);
4879
4880 /* Can't assert av_set properties because we use sel_aremove_bb
4881 when removing loop preheader from the region. At the point of
4882 removing the preheader we already have deallocated sel_region_bb_info. */
4883 gcc_assert (BB_LV_SET (bb) == NULL
4884 && !BB_LV_SET_VALID_P (bb)
4885 && BB_AV_LEVEL (bb) == 0
4886 && BB_AV_SET (bb) == NULL);
4887
4888 delete_basic_block (bb);
4889 }
4890
4891 /* Add BB to the current region and update the region data. */
4892 static void
4893 add_block_to_current_region (basic_block bb)
4894 {
4895 int i, pos, bbi = -2, rgn;
4896
4897 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4898 bbi = find_place_to_insert_bb (bb, rgn);
4899 bbi += 1;
4900 pos = RGN_BLOCKS (rgn) + bbi;
4901
4902 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4903 && ebb_head[bbi] == pos);
4904
4905 /* Make a place for the new block. */
4906 extend_regions ();
4907
4908 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4909 BLOCK_TO_BB (rgn_bb_table[i])++;
4910
4911 memmove (rgn_bb_table + pos + 1,
4912 rgn_bb_table + pos,
4913 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4914
4915 /* Initialize data for BB. */
4916 rgn_bb_table[pos] = bb->index;
4917 BLOCK_TO_BB (bb->index) = bbi;
4918 CONTAINING_RGN (bb->index) = rgn;
4919
4920 RGN_NR_BLOCKS (rgn)++;
4921
4922 for (i = rgn + 1; i <= nr_regions; i++)
4923 RGN_BLOCKS (i)++;
4924 }
4925
4926 /* Remove BB from the current region and update the region data. */
4927 static void
4928 remove_bb_from_region (basic_block bb)
4929 {
4930 int i, pos, bbi = -2, rgn;
4931
4932 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4933 bbi = BLOCK_TO_BB (bb->index);
4934 pos = RGN_BLOCKS (rgn) + bbi;
4935
4936 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4937 && ebb_head[bbi] == pos);
4938
4939 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4940 BLOCK_TO_BB (rgn_bb_table[i])--;
4941
4942 memmove (rgn_bb_table + pos,
4943 rgn_bb_table + pos + 1,
4944 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4945
4946 RGN_NR_BLOCKS (rgn)--;
4947 for (i = rgn + 1; i <= nr_regions; i++)
4948 RGN_BLOCKS (i)--;
4949 }
4950
4951 /* Add BB to the current region and update all data. If BB is NULL, add all
4952 blocks from last_added_blocks vector. */
4953 static void
4954 sel_add_bb (basic_block bb)
4955 {
4956 /* Extend luids so that new notes will receive zero luids. */
4957 sched_init_luids (NULL, NULL, NULL, NULL);
4958 sched_init_bbs ();
4959 sel_init_bbs (last_added_blocks, NULL);
4960
4961 /* When bb is passed explicitly, the vector should contain
4962 the only element that equals to bb; otherwise, the vector
4963 should not be NULL. */
4964 gcc_assert (last_added_blocks != NULL);
4965
4966 if (bb != NULL)
4967 {
4968 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
4969 && VEC_index (basic_block,
4970 last_added_blocks, 0) == bb);
4971 add_block_to_current_region (bb);
4972
4973 /* We associate creating/deleting data sets with the first insn
4974 appearing / disappearing in the bb. */
4975 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
4976 create_initial_data_sets (bb);
4977
4978 VEC_free (basic_block, heap, last_added_blocks);
4979 }
4980 else
4981 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
4982 {
4983 int i;
4984 basic_block temp_bb = NULL;
4985
4986 for (i = 0;
4987 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
4988 {
4989 add_block_to_current_region (bb);
4990 temp_bb = bb;
4991 }
4992
4993 /* We need to fetch at least one bb so we know the region
4994 to update. */
4995 gcc_assert (temp_bb != NULL);
4996 bb = temp_bb;
4997
4998 VEC_free (basic_block, heap, last_added_blocks);
4999 }
5000
5001 rgn_setup_region (CONTAINING_RGN (bb->index));
5002 }
5003
5004 /* Remove BB from the current region and update all data.
5005 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5006 static void
5007 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5008 {
5009 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5010
5011 remove_bb_from_region (bb);
5012 return_bb_to_pool (bb);
5013 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5014
5015 if (remove_from_cfg_p)
5016 delete_and_free_basic_block (bb);
5017
5018 rgn_setup_region (CONTAINING_RGN (bb->index));
5019 }
5020
5021 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5022 static void
5023 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5024 {
5025 gcc_assert (in_current_region_p (merge_bb));
5026
5027 concat_note_lists (BB_NOTE_LIST (empty_bb),
5028 &BB_NOTE_LIST (merge_bb));
5029 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5030
5031 }
5032
5033 /* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put
5034 EMPTY_BB's note lists into its predecessor instead of putting them
5035 into the successor. When REMOVE_FROM_CFG_P is true, also remove
5036 the empty block. */
5037 void
5038 sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
5039 bool remove_from_cfg_p)
5040 {
5041 basic_block merge_bb;
5042
5043 gcc_assert (sel_bb_empty_p (empty_bb));
5044
5045 if (merge_up_p)
5046 {
5047 merge_bb = empty_bb->prev_bb;
5048 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1
5049 && EDGE_PRED (empty_bb, 0)->src == merge_bb);
5050 }
5051 else
5052 {
5053 edge e;
5054 edge_iterator ei;
5055
5056 merge_bb = bb_next_bb (empty_bb);
5057
5058 /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
5059 successor block. */
5060 for (ei = ei_start (empty_bb->preds);
5061 (e = ei_safe_edge (ei)); )
5062 {
5063 if (! (e->flags & EDGE_FALLTHRU))
5064 sel_redirect_edge_and_branch (e, merge_bb);
5065 else
5066 ei_next (&ei);
5067 }
5068
5069 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1
5070 && EDGE_SUCC (empty_bb, 0)->dest == merge_bb);
5071 }
5072
5073 move_bb_info (merge_bb, empty_bb);
5074 remove_empty_bb (empty_bb, remove_from_cfg_p);
5075 }
5076
5077 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5078 region, but keep it in CFG. */
5079 static void
5080 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5081 {
5082 /* The block should contain just a note or a label.
5083 We try to check whether it is unused below. */
5084 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5085 || LABEL_P (BB_HEAD (empty_bb)));
5086
5087 /* If basic block has predecessors or successors, redirect them. */
5088 if (remove_from_cfg_p
5089 && (EDGE_COUNT (empty_bb->preds) > 0
5090 || EDGE_COUNT (empty_bb->succs) > 0))
5091 {
5092 basic_block pred;
5093 basic_block succ;
5094
5095 /* We need to init PRED and SUCC before redirecting edges. */
5096 if (EDGE_COUNT (empty_bb->preds) > 0)
5097 {
5098 edge e;
5099
5100 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5101
5102 e = EDGE_PRED (empty_bb, 0);
5103 gcc_assert (e->src == empty_bb->prev_bb
5104 && (e->flags & EDGE_FALLTHRU));
5105
5106 pred = empty_bb->prev_bb;
5107 }
5108 else
5109 pred = NULL;
5110
5111 if (EDGE_COUNT (empty_bb->succs) > 0)
5112 {
5113 /* We do not check fallthruness here as above, because
5114 after removing a jump the edge may actually be not fallthru. */
5115 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5116 succ = EDGE_SUCC (empty_bb, 0)->dest;
5117 }
5118 else
5119 succ = NULL;
5120
5121 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5122 {
5123 edge e = EDGE_PRED (empty_bb, 0);
5124
5125 if (e->flags & EDGE_FALLTHRU)
5126 redirect_edge_succ_nodup (e, succ);
5127 else
5128 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5129 }
5130
5131 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5132 {
5133 edge e = EDGE_SUCC (empty_bb, 0);
5134
5135 if (find_edge (pred, e->dest) == NULL)
5136 redirect_edge_pred (e, pred);
5137 }
5138 }
5139
5140 /* Finish removing. */
5141 sel_remove_bb (empty_bb, remove_from_cfg_p);
5142 }
5143
5144 /* An implementation of create_basic_block hook, which additionally updates
5145 per-bb data structures. */
5146 static basic_block
5147 sel_create_basic_block (void *headp, void *endp, basic_block after)
5148 {
5149 basic_block new_bb;
5150 insn_t new_bb_note;
5151
5152 gcc_assert (flag_sel_sched_pipelining_outer_loops
5153 || last_added_blocks == NULL);
5154
5155 new_bb_note = get_bb_note_from_pool ();
5156
5157 if (new_bb_note == NULL_RTX)
5158 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5159 else
5160 {
5161 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5162 new_bb_note, after);
5163 new_bb->aux = NULL;
5164 }
5165
5166 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5167
5168 return new_bb;
5169 }
5170
5171 /* Implement sched_init_only_bb (). */
5172 static void
5173 sel_init_only_bb (basic_block bb, basic_block after)
5174 {
5175 gcc_assert (after == NULL);
5176
5177 extend_regions ();
5178 rgn_make_new_region_out_of_new_block (bb);
5179 }
5180
5181 /* Update the latch when we've splitted or merged it from FROM block to TO.
5182 This should be checked for all outer loops, too. */
5183 static void
5184 change_loops_latches (basic_block from, basic_block to)
5185 {
5186 gcc_assert (from != to);
5187
5188 if (current_loop_nest)
5189 {
5190 struct loop *loop;
5191
5192 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5193 if (considered_for_pipelining_p (loop) && loop->latch == from)
5194 {
5195 gcc_assert (loop == current_loop_nest);
5196 loop->latch = to;
5197 gcc_assert (loop_latch_edge (loop));
5198 }
5199 }
5200 }
5201
5202 /* Splits BB on two basic blocks, adding it to the region and extending
5203 per-bb data structures. Returns the newly created bb. */
5204 static basic_block
5205 sel_split_block (basic_block bb, rtx after)
5206 {
5207 basic_block new_bb;
5208 insn_t insn;
5209
5210 new_bb = sched_split_block_1 (bb, after);
5211 sel_add_bb (new_bb);
5212
5213 /* This should be called after sel_add_bb, because this uses
5214 CONTAINING_RGN for the new block, which is not yet initialized.
5215 FIXME: this function may be a no-op now. */
5216 change_loops_latches (bb, new_bb);
5217
5218 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5219 FOR_BB_INSNS (new_bb, insn)
5220 if (INSN_P (insn))
5221 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5222
5223 if (sel_bb_empty_p (bb))
5224 {
5225 gcc_assert (!sel_bb_empty_p (new_bb));
5226
5227 /* NEW_BB has data sets that need to be updated and BB holds
5228 data sets that should be removed. Exchange these data sets
5229 so that we won't lose BB's valid data sets. */
5230 exchange_data_sets (new_bb, bb);
5231 free_data_sets (bb);
5232 }
5233
5234 if (!sel_bb_empty_p (new_bb)
5235 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5236 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5237
5238 return new_bb;
5239 }
5240
5241 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5242 Otherwise returns NULL. */
5243 static rtx
5244 check_for_new_jump (basic_block bb, int prev_max_uid)
5245 {
5246 rtx end;
5247
5248 end = sel_bb_end (bb);
5249 if (end && INSN_UID (end) >= prev_max_uid)
5250 return end;
5251 return NULL;
5252 }
5253
5254 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5255 New means having UID at least equal to PREV_MAX_UID. */
5256 static rtx
5257 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5258 {
5259 rtx jump;
5260
5261 /* Return immediately if no new insns were emitted. */
5262 if (get_max_uid () == prev_max_uid)
5263 return NULL;
5264
5265 /* Now check both blocks for new jumps. It will ever be only one. */
5266 if ((jump = check_for_new_jump (from, prev_max_uid)))
5267 return jump;
5268
5269 if (jump_bb != NULL
5270 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5271 return jump;
5272 return NULL;
5273 }
5274
5275 /* Splits E and adds the newly created basic block to the current region.
5276 Returns this basic block. */
5277 basic_block
5278 sel_split_edge (edge e)
5279 {
5280 basic_block new_bb, src, other_bb = NULL;
5281 int prev_max_uid;
5282 rtx jump;
5283
5284 src = e->src;
5285 prev_max_uid = get_max_uid ();
5286 new_bb = split_edge (e);
5287
5288 if (flag_sel_sched_pipelining_outer_loops
5289 && current_loop_nest)
5290 {
5291 int i;
5292 basic_block bb;
5293
5294 /* Some of the basic blocks might not have been added to the loop.
5295 Add them here, until this is fixed in force_fallthru. */
5296 for (i = 0;
5297 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5298 if (!bb->loop_father)
5299 {
5300 add_bb_to_loop (bb, e->dest->loop_father);
5301
5302 gcc_assert (!other_bb && (new_bb->index != bb->index));
5303 other_bb = bb;
5304 }
5305 }
5306
5307 /* Add all last_added_blocks to the region. */
5308 sel_add_bb (NULL);
5309
5310 jump = find_new_jump (src, new_bb, prev_max_uid);
5311 if (jump)
5312 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5313
5314 /* Put the correct lv set on this block. */
5315 if (other_bb && !sel_bb_empty_p (other_bb))
5316 compute_live (sel_bb_head (other_bb));
5317
5318 return new_bb;
5319 }
5320
5321 /* Implement sched_create_empty_bb (). */
5322 static basic_block
5323 sel_create_empty_bb (basic_block after)
5324 {
5325 basic_block new_bb;
5326
5327 new_bb = sched_create_empty_bb_1 (after);
5328
5329 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5330 later. */
5331 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5332 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5333
5334 VEC_free (basic_block, heap, last_added_blocks);
5335 return new_bb;
5336 }
5337
5338 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5339 will be splitted to insert a check. */
5340 basic_block
5341 sel_create_recovery_block (insn_t orig_insn)
5342 {
5343 basic_block first_bb, second_bb, recovery_block;
5344 basic_block before_recovery = NULL;
5345 rtx jump;
5346
5347 first_bb = BLOCK_FOR_INSN (orig_insn);
5348 if (sel_bb_end_p (orig_insn))
5349 {
5350 /* Avoid introducing an empty block while splitting. */
5351 gcc_assert (single_succ_p (first_bb));
5352 second_bb = single_succ (first_bb);
5353 }
5354 else
5355 second_bb = sched_split_block (first_bb, orig_insn);
5356
5357 recovery_block = sched_create_recovery_block (&before_recovery);
5358 if (before_recovery)
5359 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5360
5361 gcc_assert (sel_bb_empty_p (recovery_block));
5362 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5363 if (current_loops != NULL)
5364 add_bb_to_loop (recovery_block, first_bb->loop_father);
5365
5366 sel_add_bb (recovery_block);
5367
5368 jump = BB_END (recovery_block);
5369 gcc_assert (sel_bb_head (recovery_block) == jump);
5370 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5371
5372 return recovery_block;
5373 }
5374
5375 /* Merge basic block B into basic block A. */
5376 void
5377 sel_merge_blocks (basic_block a, basic_block b)
5378 {
5379 sel_remove_empty_bb (b, true, false);
5380 merge_blocks (a, b);
5381
5382 change_loops_latches (b, a);
5383 }
5384
5385 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5386 data structures for possibly created bb and insns. Returns the newly
5387 added bb or NULL, when a bb was not needed. */
5388 void
5389 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5390 {
5391 basic_block jump_bb, src;
5392 int prev_max_uid;
5393 rtx jump;
5394
5395 gcc_assert (!sel_bb_empty_p (e->src));
5396
5397 src = e->src;
5398 prev_max_uid = get_max_uid ();
5399 jump_bb = redirect_edge_and_branch_force (e, to);
5400
5401 if (jump_bb != NULL)
5402 sel_add_bb (jump_bb);
5403
5404 /* This function could not be used to spoil the loop structure by now,
5405 thus we don't care to update anything. But check it to be sure. */
5406 if (current_loop_nest
5407 && pipelining_p)
5408 gcc_assert (loop_latch_edge (current_loop_nest));
5409
5410 jump = find_new_jump (src, jump_bb, prev_max_uid);
5411 if (jump)
5412 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5413 }
5414
5415 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5416 redirected edge are in reverse topological order. */
5417 bool
5418 sel_redirect_edge_and_branch (edge e, basic_block to)
5419 {
5420 bool latch_edge_p;
5421 basic_block src;
5422 int prev_max_uid;
5423 rtx jump;
5424 edge redirected;
5425 bool recompute_toporder_p = false;
5426
5427 latch_edge_p = (pipelining_p
5428 && current_loop_nest
5429 && e == loop_latch_edge (current_loop_nest));
5430
5431 src = e->src;
5432 prev_max_uid = get_max_uid ();
5433
5434 redirected = redirect_edge_and_branch (e, to);
5435
5436 gcc_assert (redirected && last_added_blocks == NULL);
5437
5438 /* When we've redirected a latch edge, update the header. */
5439 if (latch_edge_p)
5440 {
5441 current_loop_nest->header = to;
5442 gcc_assert (loop_latch_edge (current_loop_nest));
5443 }
5444
5445 /* In rare situations, the topological relation between the blocks connected
5446 by the redirected edge can change (see PR42245 for an example). Update
5447 block_to_bb/bb_to_block. */
5448 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5449 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5450 recompute_toporder_p = true;
5451
5452 jump = find_new_jump (src, NULL, prev_max_uid);
5453 if (jump)
5454 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5455
5456 return recompute_toporder_p;
5457 }
5458
5459 /* This variable holds the cfg hooks used by the selective scheduler. */
5460 static struct cfg_hooks sel_cfg_hooks;
5461
5462 /* Register sel-sched cfg hooks. */
5463 void
5464 sel_register_cfg_hooks (void)
5465 {
5466 sched_split_block = sel_split_block;
5467
5468 orig_cfg_hooks = get_cfg_hooks ();
5469 sel_cfg_hooks = orig_cfg_hooks;
5470
5471 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5472
5473 set_cfg_hooks (sel_cfg_hooks);
5474
5475 sched_init_only_bb = sel_init_only_bb;
5476 sched_split_block = sel_split_block;
5477 sched_create_empty_bb = sel_create_empty_bb;
5478 }
5479
5480 /* Unregister sel-sched cfg hooks. */
5481 void
5482 sel_unregister_cfg_hooks (void)
5483 {
5484 sched_create_empty_bb = NULL;
5485 sched_split_block = NULL;
5486 sched_init_only_bb = NULL;
5487
5488 set_cfg_hooks (orig_cfg_hooks);
5489 }
5490 \f
5491
5492 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5493 LABEL is where this jump should be directed. */
5494 rtx
5495 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5496 {
5497 rtx insn_rtx;
5498
5499 gcc_assert (!INSN_P (pattern));
5500
5501 start_sequence ();
5502
5503 if (label == NULL_RTX)
5504 insn_rtx = emit_insn (pattern);
5505 else if (DEBUG_INSN_P (label))
5506 insn_rtx = emit_debug_insn (pattern);
5507 else
5508 {
5509 insn_rtx = emit_jump_insn (pattern);
5510 JUMP_LABEL (insn_rtx) = label;
5511 ++LABEL_NUSES (label);
5512 }
5513
5514 end_sequence ();
5515
5516 sched_init_luids (NULL, NULL, NULL, NULL);
5517 sched_extend_target ();
5518 sched_deps_init (false);
5519
5520 /* Initialize INSN_CODE now. */
5521 recog_memoized (insn_rtx);
5522 return insn_rtx;
5523 }
5524
5525 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5526 must not be clonable. */
5527 vinsn_t
5528 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5529 {
5530 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5531
5532 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5533 return vinsn_create (insn_rtx, force_unique_p);
5534 }
5535
5536 /* Create a copy of INSN_RTX. */
5537 rtx
5538 create_copy_of_insn_rtx (rtx insn_rtx)
5539 {
5540 rtx res;
5541
5542 if (DEBUG_INSN_P (insn_rtx))
5543 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5544 insn_rtx);
5545
5546 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5547
5548 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5549 NULL_RTX);
5550 return res;
5551 }
5552
5553 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5554 void
5555 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5556 {
5557 vinsn_detach (EXPR_VINSN (expr));
5558
5559 EXPR_VINSN (expr) = new_vinsn;
5560 vinsn_attach (new_vinsn);
5561 }
5562
5563 /* Helpers for global init. */
5564 /* This structure is used to be able to call existing bundling mechanism
5565 and calculate insn priorities. */
5566 static struct haifa_sched_info sched_sel_haifa_sched_info =
5567 {
5568 NULL, /* init_ready_list */
5569 NULL, /* can_schedule_ready_p */
5570 NULL, /* schedule_more_p */
5571 NULL, /* new_ready */
5572 NULL, /* rgn_rank */
5573 sel_print_insn, /* rgn_print_insn */
5574 contributes_to_priority,
5575 NULL, /* insn_finishes_block_p */
5576
5577 NULL, NULL,
5578 NULL, NULL,
5579 0, 0,
5580
5581 NULL, /* add_remove_insn */
5582 NULL, /* begin_schedule_ready */
5583 NULL, /* advance_target_bb */
5584 SEL_SCHED | NEW_BBS
5585 };
5586
5587 /* Setup special insns used in the scheduler. */
5588 void
5589 setup_nop_and_exit_insns (void)
5590 {
5591 gcc_assert (nop_pattern == NULL_RTX
5592 && exit_insn == NULL_RTX);
5593
5594 nop_pattern = gen_nop ();
5595
5596 start_sequence ();
5597 emit_insn (nop_pattern);
5598 exit_insn = get_insns ();
5599 end_sequence ();
5600 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5601 }
5602
5603 /* Free special insns used in the scheduler. */
5604 void
5605 free_nop_and_exit_insns (void)
5606 {
5607 exit_insn = NULL_RTX;
5608 nop_pattern = NULL_RTX;
5609 }
5610
5611 /* Setup a special vinsn used in new insns initialization. */
5612 void
5613 setup_nop_vinsn (void)
5614 {
5615 nop_vinsn = vinsn_create (exit_insn, false);
5616 vinsn_attach (nop_vinsn);
5617 }
5618
5619 /* Free a special vinsn used in new insns initialization. */
5620 void
5621 free_nop_vinsn (void)
5622 {
5623 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5624 vinsn_detach (nop_vinsn);
5625 nop_vinsn = NULL;
5626 }
5627
5628 /* Call a set_sched_flags hook. */
5629 void
5630 sel_set_sched_flags (void)
5631 {
5632 /* ??? This means that set_sched_flags were called, and we decided to
5633 support speculation. However, set_sched_flags also modifies flags
5634 on current_sched_info, doing this only at global init. And we
5635 sometimes change c_s_i later. So put the correct flags again. */
5636 if (spec_info && targetm.sched.set_sched_flags)
5637 targetm.sched.set_sched_flags (spec_info);
5638 }
5639
5640 /* Setup pointers to global sched info structures. */
5641 void
5642 sel_setup_sched_infos (void)
5643 {
5644 rgn_setup_common_sched_info ();
5645
5646 memcpy (&sel_common_sched_info, common_sched_info,
5647 sizeof (sel_common_sched_info));
5648
5649 sel_common_sched_info.fix_recovery_cfg = NULL;
5650 sel_common_sched_info.add_block = NULL;
5651 sel_common_sched_info.estimate_number_of_insns
5652 = sel_estimate_number_of_insns;
5653 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5654 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5655
5656 common_sched_info = &sel_common_sched_info;
5657
5658 current_sched_info = &sched_sel_haifa_sched_info;
5659 current_sched_info->sched_max_insns_priority =
5660 get_rgn_sched_max_insns_priority ();
5661
5662 sel_set_sched_flags ();
5663 }
5664 \f
5665
5666 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5667 *BB_ORD_INDEX after that is increased. */
5668 static void
5669 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5670 {
5671 RGN_NR_BLOCKS (rgn) += 1;
5672 RGN_DONT_CALC_DEPS (rgn) = 0;
5673 RGN_HAS_REAL_EBB (rgn) = 0;
5674 CONTAINING_RGN (bb->index) = rgn;
5675 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5676 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5677 (*bb_ord_index)++;
5678
5679 /* FIXME: it is true only when not scheduling ebbs. */
5680 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5681 }
5682
5683 /* Functions to support pipelining of outer loops. */
5684
5685 /* Creates a new empty region and returns it's number. */
5686 static int
5687 sel_create_new_region (void)
5688 {
5689 int new_rgn_number = nr_regions;
5690
5691 RGN_NR_BLOCKS (new_rgn_number) = 0;
5692
5693 /* FIXME: This will work only when EBBs are not created. */
5694 if (new_rgn_number != 0)
5695 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5696 RGN_NR_BLOCKS (new_rgn_number - 1);
5697 else
5698 RGN_BLOCKS (new_rgn_number) = 0;
5699
5700 /* Set the blocks of the next region so the other functions may
5701 calculate the number of blocks in the region. */
5702 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5703 RGN_NR_BLOCKS (new_rgn_number);
5704
5705 nr_regions++;
5706
5707 return new_rgn_number;
5708 }
5709
5710 /* If X has a smaller topological sort number than Y, returns -1;
5711 if greater, returns 1. */
5712 static int
5713 bb_top_order_comparator (const void *x, const void *y)
5714 {
5715 basic_block bb1 = *(const basic_block *) x;
5716 basic_block bb2 = *(const basic_block *) y;
5717
5718 gcc_assert (bb1 == bb2
5719 || rev_top_order_index[bb1->index]
5720 != rev_top_order_index[bb2->index]);
5721
5722 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5723 bbs with greater number should go earlier. */
5724 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5725 return -1;
5726 else
5727 return 1;
5728 }
5729
5730 /* Create a region for LOOP and return its number. If we don't want
5731 to pipeline LOOP, return -1. */
5732 static int
5733 make_region_from_loop (struct loop *loop)
5734 {
5735 unsigned int i;
5736 int new_rgn_number = -1;
5737 struct loop *inner;
5738
5739 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5740 int bb_ord_index = 0;
5741 basic_block *loop_blocks;
5742 basic_block preheader_block;
5743
5744 if (loop->num_nodes
5745 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5746 return -1;
5747
5748 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5749 for (inner = loop->inner; inner; inner = inner->inner)
5750 if (flow_bb_inside_loop_p (inner, loop->latch))
5751 return -1;
5752
5753 loop->ninsns = num_loop_insns (loop);
5754 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5755 return -1;
5756
5757 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5758
5759 for (i = 0; i < loop->num_nodes; i++)
5760 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5761 {
5762 free (loop_blocks);
5763 return -1;
5764 }
5765
5766 preheader_block = loop_preheader_edge (loop)->src;
5767 gcc_assert (preheader_block);
5768 gcc_assert (loop_blocks[0] == loop->header);
5769
5770 new_rgn_number = sel_create_new_region ();
5771
5772 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5773 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5774
5775 for (i = 0; i < loop->num_nodes; i++)
5776 {
5777 /* Add only those blocks that haven't been scheduled in the inner loop.
5778 The exception is the basic blocks with bookkeeping code - they should
5779 be added to the region (and they actually don't belong to the loop
5780 body, but to the region containing that loop body). */
5781
5782 gcc_assert (new_rgn_number >= 0);
5783
5784 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5785 {
5786 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5787 new_rgn_number);
5788 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5789 }
5790 }
5791
5792 free (loop_blocks);
5793 MARK_LOOP_FOR_PIPELINING (loop);
5794
5795 return new_rgn_number;
5796 }
5797
5798 /* Create a new region from preheader blocks LOOP_BLOCKS. */
5799 void
5800 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5801 {
5802 unsigned int i;
5803 int new_rgn_number = -1;
5804 basic_block bb;
5805
5806 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5807 int bb_ord_index = 0;
5808
5809 new_rgn_number = sel_create_new_region ();
5810
5811 for (i = 0; VEC_iterate (basic_block, *loop_blocks, i, bb); i++)
5812 {
5813 gcc_assert (new_rgn_number >= 0);
5814
5815 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
5816 }
5817
5818 VEC_free (basic_block, heap, *loop_blocks);
5819 gcc_assert (*loop_blocks == NULL);
5820 }
5821
5822
5823 /* Create region(s) from loop nest LOOP, such that inner loops will be
5824 pipelined before outer loops. Returns true when a region for LOOP
5825 is created. */
5826 static bool
5827 make_regions_from_loop_nest (struct loop *loop)
5828 {
5829 struct loop *cur_loop;
5830 int rgn_number;
5831
5832 /* Traverse all inner nodes of the loop. */
5833 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
5834 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
5835 return false;
5836
5837 /* At this moment all regular inner loops should have been pipelined.
5838 Try to create a region from this loop. */
5839 rgn_number = make_region_from_loop (loop);
5840
5841 if (rgn_number < 0)
5842 return false;
5843
5844 VEC_safe_push (loop_p, heap, loop_nests, loop);
5845 return true;
5846 }
5847
5848 /* Initalize data structures needed. */
5849 void
5850 sel_init_pipelining (void)
5851 {
5852 /* Collect loop information to be used in outer loops pipelining. */
5853 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
5854 | LOOPS_HAVE_FALLTHRU_PREHEADERS
5855 | LOOPS_HAVE_RECORDED_EXITS
5856 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
5857 current_loop_nest = NULL;
5858
5859 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
5860 sbitmap_zero (bbs_in_loop_rgns);
5861
5862 recompute_rev_top_order ();
5863 }
5864
5865 /* Returns a struct loop for region RGN. */
5866 loop_p
5867 get_loop_nest_for_rgn (unsigned int rgn)
5868 {
5869 /* Regions created with extend_rgns don't have corresponding loop nests,
5870 because they don't represent loops. */
5871 if (rgn < VEC_length (loop_p, loop_nests))
5872 return VEC_index (loop_p, loop_nests, rgn);
5873 else
5874 return NULL;
5875 }
5876
5877 /* True when LOOP was included into pipelining regions. */
5878 bool
5879 considered_for_pipelining_p (struct loop *loop)
5880 {
5881 if (loop_depth (loop) == 0)
5882 return false;
5883
5884 /* Now, the loop could be too large or irreducible. Check whether its
5885 region is in LOOP_NESTS.
5886 We determine the region number of LOOP as the region number of its
5887 latch. We can't use header here, because this header could be
5888 just removed preheader and it will give us the wrong region number.
5889 Latch can't be used because it could be in the inner loop too. */
5890 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
5891 {
5892 int rgn = CONTAINING_RGN (loop->latch->index);
5893
5894 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
5895 return true;
5896 }
5897
5898 return false;
5899 }
5900
5901 /* Makes regions from the rest of the blocks, after loops are chosen
5902 for pipelining. */
5903 static void
5904 make_regions_from_the_rest (void)
5905 {
5906 int cur_rgn_blocks;
5907 int *loop_hdr;
5908 int i;
5909
5910 basic_block bb;
5911 edge e;
5912 edge_iterator ei;
5913 int *degree;
5914
5915 /* Index in rgn_bb_table where to start allocating new regions. */
5916 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
5917
5918 /* Make regions from all the rest basic blocks - those that don't belong to
5919 any loop or belong to irreducible loops. Prepare the data structures
5920 for extend_rgns. */
5921
5922 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
5923 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
5924 loop. */
5925 loop_hdr = XNEWVEC (int, last_basic_block);
5926 degree = XCNEWVEC (int, last_basic_block);
5927
5928
5929 /* For each basic block that belongs to some loop assign the number
5930 of innermost loop it belongs to. */
5931 for (i = 0; i < last_basic_block; i++)
5932 loop_hdr[i] = -1;
5933
5934 FOR_EACH_BB (bb)
5935 {
5936 if (bb->loop_father && !bb->loop_father->num == 0
5937 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
5938 loop_hdr[bb->index] = bb->loop_father->num;
5939 }
5940
5941 /* For each basic block degree is calculated as the number of incoming
5942 edges, that are going out of bbs that are not yet scheduled.
5943 The basic blocks that are scheduled have degree value of zero. */
5944 FOR_EACH_BB (bb)
5945 {
5946 degree[bb->index] = 0;
5947
5948 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
5949 {
5950 FOR_EACH_EDGE (e, ei, bb->preds)
5951 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
5952 degree[bb->index]++;
5953 }
5954 else
5955 degree[bb->index] = -1;
5956 }
5957
5958 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
5959
5960 /* Any block that did not end up in a region is placed into a region
5961 by itself. */
5962 FOR_EACH_BB (bb)
5963 if (degree[bb->index] >= 0)
5964 {
5965 rgn_bb_table[cur_rgn_blocks] = bb->index;
5966 RGN_NR_BLOCKS (nr_regions) = 1;
5967 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
5968 RGN_DONT_CALC_DEPS (nr_regions) = 0;
5969 RGN_HAS_REAL_EBB (nr_regions) = 0;
5970 CONTAINING_RGN (bb->index) = nr_regions++;
5971 BLOCK_TO_BB (bb->index) = 0;
5972 }
5973
5974 free (degree);
5975 free (loop_hdr);
5976 }
5977
5978 /* Free data structures used in pipelining of loops. */
5979 void sel_finish_pipelining (void)
5980 {
5981 loop_iterator li;
5982 struct loop *loop;
5983
5984 /* Release aux fields so we don't free them later by mistake. */
5985 FOR_EACH_LOOP (li, loop, 0)
5986 loop->aux = NULL;
5987
5988 loop_optimizer_finalize ();
5989
5990 VEC_free (loop_p, heap, loop_nests);
5991
5992 free (rev_top_order_index);
5993 rev_top_order_index = NULL;
5994 }
5995
5996 /* This function replaces the find_rgns when
5997 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
5998 void
5999 sel_find_rgns (void)
6000 {
6001 sel_init_pipelining ();
6002 extend_regions ();
6003
6004 if (current_loops)
6005 {
6006 loop_p loop;
6007 loop_iterator li;
6008
6009 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6010 ? LI_FROM_INNERMOST
6011 : LI_ONLY_INNERMOST))
6012 make_regions_from_loop_nest (loop);
6013 }
6014
6015 /* Make regions from all the rest basic blocks and schedule them.
6016 These blocks include blocks that don't belong to any loop or belong
6017 to irreducible loops. */
6018 make_regions_from_the_rest ();
6019
6020 /* We don't need bbs_in_loop_rgns anymore. */
6021 sbitmap_free (bbs_in_loop_rgns);
6022 bbs_in_loop_rgns = NULL;
6023 }
6024
6025 /* Adds the preheader blocks from previous loop to current region taking
6026 it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
6027 This function is only used with -fsel-sched-pipelining-outer-loops. */
6028 void
6029 sel_add_loop_preheaders (void)
6030 {
6031 int i;
6032 basic_block bb;
6033 VEC(basic_block, heap) *preheader_blocks
6034 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6035
6036 for (i = 0;
6037 VEC_iterate (basic_block, preheader_blocks, i, bb);
6038 i++)
6039 {
6040 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6041 sel_add_bb (bb);
6042 }
6043
6044 VEC_free (basic_block, heap, preheader_blocks);
6045 }
6046
6047 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6048 Please note that the function should also work when pipelining_p is
6049 false, because it is used when deciding whether we should or should
6050 not reschedule pipelined code. */
6051 bool
6052 sel_is_loop_preheader_p (basic_block bb)
6053 {
6054 if (current_loop_nest)
6055 {
6056 struct loop *outer;
6057
6058 if (preheader_removed)
6059 return false;
6060
6061 /* Preheader is the first block in the region. */
6062 if (BLOCK_TO_BB (bb->index) == 0)
6063 return true;
6064
6065 /* We used to find a preheader with the topological information.
6066 Check that the above code is equivalent to what we did before. */
6067
6068 if (in_current_region_p (current_loop_nest->header))
6069 gcc_assert (!(BLOCK_TO_BB (bb->index)
6070 < BLOCK_TO_BB (current_loop_nest->header->index)));
6071
6072 /* Support the situation when the latch block of outer loop
6073 could be from here. */
6074 for (outer = loop_outer (current_loop_nest);
6075 outer;
6076 outer = loop_outer (outer))
6077 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6078 gcc_unreachable ();
6079 }
6080
6081 return false;
6082 }
6083
6084 /* Checks whether JUMP leads to basic block DEST_BB and no other blocks. */
6085 bool
6086 jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
6087 {
6088 basic_block jump_bb = BLOCK_FOR_INSN (jump);
6089
6090 /* It is not jump, jump with side-effects or jump can lead to several
6091 basic blocks. */
6092 if (!onlyjump_p (jump)
6093 || !any_uncondjump_p (jump))
6094 return false;
6095
6096 /* Several outgoing edges, abnormal edge or destination of jump is
6097 not DEST_BB. */
6098 if (EDGE_COUNT (jump_bb->succs) != 1
6099 || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
6100 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6101 return false;
6102
6103 /* If not anything of the upper. */
6104 return true;
6105 }
6106
6107 /* Removes the loop preheader from the current region and saves it in
6108 PREHEADER_BLOCKS of the father loop, so they will be added later to
6109 region that represents an outer loop. */
6110 static void
6111 sel_remove_loop_preheader (void)
6112 {
6113 int i, old_len;
6114 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6115 basic_block bb;
6116 bool all_empty_p = true;
6117 VEC(basic_block, heap) *preheader_blocks
6118 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6119
6120 gcc_assert (current_loop_nest);
6121 old_len = VEC_length (basic_block, preheader_blocks);
6122
6123 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6124 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6125 {
6126 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6127
6128 /* If the basic block belongs to region, but doesn't belong to
6129 corresponding loop, then it should be a preheader. */
6130 if (sel_is_loop_preheader_p (bb))
6131 {
6132 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6133 if (BB_END (bb) != bb_note (bb))
6134 all_empty_p = false;
6135 }
6136 }
6137
6138 /* Remove these blocks only after iterating over the whole region. */
6139 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6140 i >= old_len;
6141 i--)
6142 {
6143 bb = VEC_index (basic_block, preheader_blocks, i);
6144 sel_remove_bb (bb, false);
6145 }
6146
6147 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6148 {
6149 if (!all_empty_p)
6150 /* Immediately create new region from preheader. */
6151 make_region_from_loop_preheader (&preheader_blocks);
6152 else
6153 {
6154 /* If all preheader blocks are empty - dont create new empty region.
6155 Instead, remove them completely. */
6156 for (i = 0; VEC_iterate (basic_block, preheader_blocks, i, bb); i++)
6157 {
6158 edge e;
6159 edge_iterator ei;
6160 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6161
6162 /* Redirect all incoming edges to next basic block. */
6163 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6164 {
6165 if (! (e->flags & EDGE_FALLTHRU))
6166 redirect_edge_and_branch (e, bb->next_bb);
6167 else
6168 redirect_edge_succ (e, bb->next_bb);
6169 }
6170 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6171 delete_and_free_basic_block (bb);
6172
6173 /* Check if after deleting preheader there is a nonconditional
6174 jump in PREV_BB that leads to the next basic block NEXT_BB.
6175 If it is so - delete this jump and clear data sets of its
6176 basic block if it becomes empty. */
6177 if (next_bb->prev_bb == prev_bb
6178 && prev_bb != ENTRY_BLOCK_PTR
6179 && jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
6180 {
6181 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6182 if (BB_END (prev_bb) == bb_note (prev_bb))
6183 free_data_sets (prev_bb);
6184 }
6185 }
6186 }
6187 VEC_free (basic_block, heap, preheader_blocks);
6188 }
6189 else
6190 /* Store preheader within the father's loop structure. */
6191 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6192 preheader_blocks);
6193 }
6194 #endif