Daily bump.
[gcc.git] / gcc / sel-sched-ir.c
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "regs.h"
30 #include "function.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "insn-attr.h"
34 #include "except.h"
35 #include "recog.h"
36 #include "params.h"
37 #include "target.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
47
48 #ifdef INSN_SCHEDULING
49 #include "sel-sched-ir.h"
50 /* We don't have to use it except for sel_print_insn. */
51 #include "sel-sched-dump.h"
52
53 /* A vector holding bb info for whole scheduling pass. */
54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
55
56 /* A vector holding bb info. */
57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
58
59 /* A pool for allocating all lists. */
60 alloc_pool sched_lists_pool;
61
62 /* This contains information about successors for compute_av_set. */
63 struct succs_info current_succs;
64
65 /* Data structure to describe interaction with the generic scheduler utils. */
66 static struct common_sched_info_def sel_common_sched_info;
67
68 /* The loop nest being pipelined. */
69 struct loop *current_loop_nest;
70
71 /* LOOP_NESTS is a vector containing the corresponding loop nest for
72 each region. */
73 static VEC(loop_p, heap) *loop_nests = NULL;
74
75 /* Saves blocks already in loop regions, indexed by bb->index. */
76 static sbitmap bbs_in_loop_rgns = NULL;
77
78 /* CFG hooks that are saved before changing create_basic_block hook. */
79 static struct cfg_hooks orig_cfg_hooks;
80 \f
81
82 /* Array containing reverse topological index of function basic blocks,
83 indexed by BB->INDEX. */
84 static int *rev_top_order_index = NULL;
85
86 /* Length of the above array. */
87 static int rev_top_order_index_len = -1;
88
89 /* A regset pool structure. */
90 static struct
91 {
92 /* The stack to which regsets are returned. */
93 regset *v;
94
95 /* Its pointer. */
96 int n;
97
98 /* Its size. */
99 int s;
100
101 /* In VV we save all generated regsets so that, when destructing the
102 pool, we can compare it with V and check that every regset was returned
103 back to pool. */
104 regset *vv;
105
106 /* The pointer of VV stack. */
107 int nn;
108
109 /* Its size. */
110 int ss;
111
112 /* The difference between allocated and returned regsets. */
113 int diff;
114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
115
116 /* This represents the nop pool. */
117 static struct
118 {
119 /* The vector which holds previously emitted nops. */
120 insn_t *v;
121
122 /* Its pointer. */
123 int n;
124
125 /* Its size. */
126 int s;
127 } nop_pool = { NULL, 0, 0 };
128
129 /* The pool for basic block notes. */
130 static rtx_vec_t bb_note_pool;
131
132 /* A NOP pattern used to emit placeholder insns. */
133 rtx nop_pattern = NULL_RTX;
134 /* A special instruction that resides in EXIT_BLOCK.
135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
136 rtx exit_insn = NULL_RTX;
137
138 /* TRUE if while scheduling current region, which is loop, its preheader
139 was removed. */
140 bool preheader_removed = false;
141 \f
142
143 /* Forward static declarations. */
144 static void fence_clear (fence_t);
145
146 static void deps_init_id (idata_t, insn_t, bool);
147 static void init_id_from_df (idata_t, insn_t, bool);
148 static expr_t set_insn_init (expr_t, vinsn_t, int);
149
150 static void cfg_preds (basic_block, insn_t **, int *);
151 static void prepare_insn_expr (insn_t, int);
152 static void free_history_vect (VEC (expr_history_def, heap) **);
153
154 static void move_bb_info (basic_block, basic_block);
155 static void remove_empty_bb (basic_block, bool);
156 static void sel_merge_blocks (basic_block, basic_block);
157 static void sel_remove_loop_preheader (void);
158 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
159
160 static bool insn_is_the_only_one_in_bb_p (insn_t);
161 static void create_initial_data_sets (basic_block);
162
163 static void free_av_set (basic_block);
164 static void invalidate_av_set (basic_block);
165 static void extend_insn_data (void);
166 static void sel_init_new_insn (insn_t, int);
167 static void finish_insns (void);
168 \f
169 /* Various list functions. */
170
171 /* Copy an instruction list L. */
172 ilist_t
173 ilist_copy (ilist_t l)
174 {
175 ilist_t head = NULL, *tailp = &head;
176
177 while (l)
178 {
179 ilist_add (tailp, ILIST_INSN (l));
180 tailp = &ILIST_NEXT (*tailp);
181 l = ILIST_NEXT (l);
182 }
183
184 return head;
185 }
186
187 /* Invert an instruction list L. */
188 ilist_t
189 ilist_invert (ilist_t l)
190 {
191 ilist_t res = NULL;
192
193 while (l)
194 {
195 ilist_add (&res, ILIST_INSN (l));
196 l = ILIST_NEXT (l);
197 }
198
199 return res;
200 }
201
202 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
203 void
204 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
205 {
206 bnd_t bnd;
207
208 _list_add (lp);
209 bnd = BLIST_BND (*lp);
210
211 BND_TO (bnd) = to;
212 BND_PTR (bnd) = ptr;
213 BND_AV (bnd) = NULL;
214 BND_AV1 (bnd) = NULL;
215 BND_DC (bnd) = dc;
216 }
217
218 /* Remove the list note pointed to by LP. */
219 void
220 blist_remove (blist_t *lp)
221 {
222 bnd_t b = BLIST_BND (*lp);
223
224 av_set_clear (&BND_AV (b));
225 av_set_clear (&BND_AV1 (b));
226 ilist_clear (&BND_PTR (b));
227
228 _list_remove (lp);
229 }
230
231 /* Init a fence tail L. */
232 void
233 flist_tail_init (flist_tail_t l)
234 {
235 FLIST_TAIL_HEAD (l) = NULL;
236 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
237 }
238
239 /* Try to find fence corresponding to INSN in L. */
240 fence_t
241 flist_lookup (flist_t l, insn_t insn)
242 {
243 while (l)
244 {
245 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
246 return FLIST_FENCE (l);
247
248 l = FLIST_NEXT (l);
249 }
250
251 return NULL;
252 }
253
254 /* Init the fields of F before running fill_insns. */
255 static void
256 init_fence_for_scheduling (fence_t f)
257 {
258 FENCE_BNDS (f) = NULL;
259 FENCE_PROCESSED_P (f) = false;
260 FENCE_SCHEDULED_P (f) = false;
261 }
262
263 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
264 static void
265 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
266 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
267 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
268 int cycle, int cycle_issued_insns, int issue_more,
269 bool starts_cycle_p, bool after_stall_p)
270 {
271 fence_t f;
272
273 _list_add (lp);
274 f = FLIST_FENCE (*lp);
275
276 FENCE_INSN (f) = insn;
277
278 gcc_assert (state != NULL);
279 FENCE_STATE (f) = state;
280
281 FENCE_CYCLE (f) = cycle;
282 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
283 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
284 FENCE_AFTER_STALL_P (f) = after_stall_p;
285
286 gcc_assert (dc != NULL);
287 FENCE_DC (f) = dc;
288
289 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
290 FENCE_TC (f) = tc;
291
292 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
293 FENCE_ISSUE_MORE (f) = issue_more;
294 FENCE_EXECUTING_INSNS (f) = executing_insns;
295 FENCE_READY_TICKS (f) = ready_ticks;
296 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
297 FENCE_SCHED_NEXT (f) = sched_next;
298
299 init_fence_for_scheduling (f);
300 }
301
302 /* Remove the head node of the list pointed to by LP. */
303 static void
304 flist_remove (flist_t *lp)
305 {
306 if (FENCE_INSN (FLIST_FENCE (*lp)))
307 fence_clear (FLIST_FENCE (*lp));
308 _list_remove (lp);
309 }
310
311 /* Clear the fence list pointed to by LP. */
312 void
313 flist_clear (flist_t *lp)
314 {
315 while (*lp)
316 flist_remove (lp);
317 }
318
319 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
320 void
321 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
322 {
323 def_t d;
324
325 _list_add (dl);
326 d = DEF_LIST_DEF (*dl);
327
328 d->orig_insn = original_insn;
329 d->crosses_call = crosses_call;
330 }
331 \f
332
333 /* Functions to work with target contexts. */
334
335 /* Bulk target context. It is convenient for debugging purposes to ensure
336 that there are no uninitialized (null) target contexts. */
337 static tc_t bulk_tc = (tc_t) 1;
338
339 /* Target hooks wrappers. In the future we can provide some default
340 implementations for them. */
341
342 /* Allocate a store for the target context. */
343 static tc_t
344 alloc_target_context (void)
345 {
346 return (targetm.sched.alloc_sched_context
347 ? targetm.sched.alloc_sched_context () : bulk_tc);
348 }
349
350 /* Init target context TC.
351 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
352 Overwise, copy current backend context to TC. */
353 static void
354 init_target_context (tc_t tc, bool clean_p)
355 {
356 if (targetm.sched.init_sched_context)
357 targetm.sched.init_sched_context (tc, clean_p);
358 }
359
360 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
361 int init_target_context (). */
362 tc_t
363 create_target_context (bool clean_p)
364 {
365 tc_t tc = alloc_target_context ();
366
367 init_target_context (tc, clean_p);
368 return tc;
369 }
370
371 /* Copy TC to the current backend context. */
372 void
373 set_target_context (tc_t tc)
374 {
375 if (targetm.sched.set_sched_context)
376 targetm.sched.set_sched_context (tc);
377 }
378
379 /* TC is about to be destroyed. Free any internal data. */
380 static void
381 clear_target_context (tc_t tc)
382 {
383 if (targetm.sched.clear_sched_context)
384 targetm.sched.clear_sched_context (tc);
385 }
386
387 /* Clear and free it. */
388 static void
389 delete_target_context (tc_t tc)
390 {
391 clear_target_context (tc);
392
393 if (targetm.sched.free_sched_context)
394 targetm.sched.free_sched_context (tc);
395 }
396
397 /* Make a copy of FROM in TO.
398 NB: May be this should be a hook. */
399 static void
400 copy_target_context (tc_t to, tc_t from)
401 {
402 tc_t tmp = create_target_context (false);
403
404 set_target_context (from);
405 init_target_context (to, false);
406
407 set_target_context (tmp);
408 delete_target_context (tmp);
409 }
410
411 /* Create a copy of TC. */
412 static tc_t
413 create_copy_of_target_context (tc_t tc)
414 {
415 tc_t copy = alloc_target_context ();
416
417 copy_target_context (copy, tc);
418
419 return copy;
420 }
421
422 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
423 is the same as in init_target_context (). */
424 void
425 reset_target_context (tc_t tc, bool clean_p)
426 {
427 clear_target_context (tc);
428 init_target_context (tc, clean_p);
429 }
430 \f
431 /* Functions to work with dependence contexts.
432 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
433 context. It accumulates information about processed insns to decide if
434 current insn is dependent on the processed ones. */
435
436 /* Make a copy of FROM in TO. */
437 static void
438 copy_deps_context (deps_t to, deps_t from)
439 {
440 init_deps (to, false);
441 deps_join (to, from);
442 }
443
444 /* Allocate store for dep context. */
445 static deps_t
446 alloc_deps_context (void)
447 {
448 return XNEW (struct deps_desc);
449 }
450
451 /* Allocate and initialize dep context. */
452 static deps_t
453 create_deps_context (void)
454 {
455 deps_t dc = alloc_deps_context ();
456
457 init_deps (dc, false);
458 return dc;
459 }
460
461 /* Create a copy of FROM. */
462 static deps_t
463 create_copy_of_deps_context (deps_t from)
464 {
465 deps_t to = alloc_deps_context ();
466
467 copy_deps_context (to, from);
468 return to;
469 }
470
471 /* Clean up internal data of DC. */
472 static void
473 clear_deps_context (deps_t dc)
474 {
475 free_deps (dc);
476 }
477
478 /* Clear and free DC. */
479 static void
480 delete_deps_context (deps_t dc)
481 {
482 clear_deps_context (dc);
483 free (dc);
484 }
485
486 /* Clear and init DC. */
487 static void
488 reset_deps_context (deps_t dc)
489 {
490 clear_deps_context (dc);
491 init_deps (dc, false);
492 }
493
494 /* This structure describes the dependence analysis hooks for advancing
495 dependence context. */
496 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
497 {
498 NULL,
499
500 NULL, /* start_insn */
501 NULL, /* finish_insn */
502 NULL, /* start_lhs */
503 NULL, /* finish_lhs */
504 NULL, /* start_rhs */
505 NULL, /* finish_rhs */
506 haifa_note_reg_set,
507 haifa_note_reg_clobber,
508 haifa_note_reg_use,
509 NULL, /* note_mem_dep */
510 NULL, /* note_dep */
511
512 0, 0, 0
513 };
514
515 /* Process INSN and add its impact on DC. */
516 void
517 advance_deps_context (deps_t dc, insn_t insn)
518 {
519 sched_deps_info = &advance_deps_context_sched_deps_info;
520 deps_analyze_insn (dc, insn);
521 }
522 \f
523
524 /* Functions to work with DFA states. */
525
526 /* Allocate store for a DFA state. */
527 static state_t
528 state_alloc (void)
529 {
530 return xmalloc (dfa_state_size);
531 }
532
533 /* Allocate and initialize DFA state. */
534 static state_t
535 state_create (void)
536 {
537 state_t state = state_alloc ();
538
539 state_reset (state);
540 advance_state (state);
541 return state;
542 }
543
544 /* Free DFA state. */
545 static void
546 state_free (state_t state)
547 {
548 free (state);
549 }
550
551 /* Make a copy of FROM in TO. */
552 static void
553 state_copy (state_t to, state_t from)
554 {
555 memcpy (to, from, dfa_state_size);
556 }
557
558 /* Create a copy of FROM. */
559 static state_t
560 state_create_copy (state_t from)
561 {
562 state_t to = state_alloc ();
563
564 state_copy (to, from);
565 return to;
566 }
567 \f
568
569 /* Functions to work with fences. */
570
571 /* Clear the fence. */
572 static void
573 fence_clear (fence_t f)
574 {
575 state_t s = FENCE_STATE (f);
576 deps_t dc = FENCE_DC (f);
577 void *tc = FENCE_TC (f);
578
579 ilist_clear (&FENCE_BNDS (f));
580
581 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
582 || (s == NULL && dc == NULL && tc == NULL));
583
584 free (s);
585
586 if (dc != NULL)
587 delete_deps_context (dc);
588
589 if (tc != NULL)
590 delete_target_context (tc);
591 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
592 free (FENCE_READY_TICKS (f));
593 FENCE_READY_TICKS (f) = NULL;
594 }
595
596 /* Init a list of fences with successors of OLD_FENCE. */
597 void
598 init_fences (insn_t old_fence)
599 {
600 insn_t succ;
601 succ_iterator si;
602 bool first = true;
603 int ready_ticks_size = get_max_uid () + 1;
604
605 FOR_EACH_SUCC_1 (succ, si, old_fence,
606 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
607 {
608
609 if (first)
610 first = false;
611 else
612 gcc_assert (flag_sel_sched_pipelining_outer_loops);
613
614 flist_add (&fences, succ,
615 state_create (),
616 create_deps_context () /* dc */,
617 create_target_context (true) /* tc */,
618 NULL_RTX /* last_scheduled_insn */,
619 NULL, /* executing_insns */
620 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
621 ready_ticks_size,
622 NULL_RTX /* sched_next */,
623 1 /* cycle */, 0 /* cycle_issued_insns */,
624 issue_rate, /* issue_more */
625 1 /* starts_cycle_p */, 0 /* after_stall_p */);
626 }
627 }
628
629 /* Merges two fences (filling fields of fence F with resulting values) by
630 following rules: 1) state, target context and last scheduled insn are
631 propagated from fallthrough edge if it is available;
632 2) deps context and cycle is propagated from more probable edge;
633 3) all other fields are set to corresponding constant values.
634
635 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
636 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
637 and AFTER_STALL_P are the corresponding fields of the second fence. */
638 static void
639 merge_fences (fence_t f, insn_t insn,
640 state_t state, deps_t dc, void *tc,
641 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
642 int *ready_ticks, int ready_ticks_size,
643 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
644 {
645 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
646
647 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
648 && !sched_next && !FENCE_SCHED_NEXT (f));
649
650 /* Check if we can decide which path fences came.
651 If we can't (or don't want to) - reset all. */
652 if (last_scheduled_insn == NULL
653 || last_scheduled_insn_old == NULL
654 /* This is a case when INSN is reachable on several paths from
655 one insn (this can happen when pipelining of outer loops is on and
656 there are two edges: one going around of inner loop and the other -
657 right through it; in such case just reset everything). */
658 || last_scheduled_insn == last_scheduled_insn_old)
659 {
660 state_reset (FENCE_STATE (f));
661 state_free (state);
662
663 reset_deps_context (FENCE_DC (f));
664 delete_deps_context (dc);
665
666 reset_target_context (FENCE_TC (f), true);
667 delete_target_context (tc);
668
669 if (cycle > FENCE_CYCLE (f))
670 FENCE_CYCLE (f) = cycle;
671
672 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
673 FENCE_ISSUE_MORE (f) = issue_rate;
674 VEC_free (rtx, gc, executing_insns);
675 free (ready_ticks);
676 if (FENCE_EXECUTING_INSNS (f))
677 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
678 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
679 if (FENCE_READY_TICKS (f))
680 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
681 }
682 else
683 {
684 edge edge_old = NULL, edge_new = NULL;
685 edge candidate;
686 succ_iterator si;
687 insn_t succ;
688
689 /* Find fallthrough edge. */
690 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
691 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
692
693 if (!candidate
694 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
695 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
696 {
697 /* No fallthrough edge leading to basic block of INSN. */
698 state_reset (FENCE_STATE (f));
699 state_free (state);
700
701 reset_target_context (FENCE_TC (f), true);
702 delete_target_context (tc);
703
704 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
705 FENCE_ISSUE_MORE (f) = issue_rate;
706 }
707 else
708 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
709 {
710 /* Would be weird if same insn is successor of several fallthrough
711 edges. */
712 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
713 != BLOCK_FOR_INSN (last_scheduled_insn_old));
714
715 state_free (FENCE_STATE (f));
716 FENCE_STATE (f) = state;
717
718 delete_target_context (FENCE_TC (f));
719 FENCE_TC (f) = tc;
720
721 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
722 FENCE_ISSUE_MORE (f) = issue_more;
723 }
724 else
725 {
726 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
727 state_free (state);
728 delete_target_context (tc);
729
730 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
731 != BLOCK_FOR_INSN (last_scheduled_insn));
732 }
733
734 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
735 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
736 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
737 {
738 if (succ == insn)
739 {
740 /* No same successor allowed from several edges. */
741 gcc_assert (!edge_old);
742 edge_old = si.e1;
743 }
744 }
745 /* Find edge of second predecessor (last_scheduled_insn->insn). */
746 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
747 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
748 {
749 if (succ == insn)
750 {
751 /* No same successor allowed from several edges. */
752 gcc_assert (!edge_new);
753 edge_new = si.e1;
754 }
755 }
756
757 /* Check if we can choose most probable predecessor. */
758 if (edge_old == NULL || edge_new == NULL)
759 {
760 reset_deps_context (FENCE_DC (f));
761 delete_deps_context (dc);
762 VEC_free (rtx, gc, executing_insns);
763 free (ready_ticks);
764
765 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
766 if (FENCE_EXECUTING_INSNS (f))
767 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
768 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
769 if (FENCE_READY_TICKS (f))
770 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
771 }
772 else
773 if (edge_new->probability > edge_old->probability)
774 {
775 delete_deps_context (FENCE_DC (f));
776 FENCE_DC (f) = dc;
777 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
778 FENCE_EXECUTING_INSNS (f) = executing_insns;
779 free (FENCE_READY_TICKS (f));
780 FENCE_READY_TICKS (f) = ready_ticks;
781 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
782 FENCE_CYCLE (f) = cycle;
783 }
784 else
785 {
786 /* Leave DC and CYCLE untouched. */
787 delete_deps_context (dc);
788 VEC_free (rtx, gc, executing_insns);
789 free (ready_ticks);
790 }
791 }
792
793 /* Fill remaining invariant fields. */
794 if (after_stall_p)
795 FENCE_AFTER_STALL_P (f) = 1;
796
797 FENCE_ISSUED_INSNS (f) = 0;
798 FENCE_STARTS_CYCLE_P (f) = 1;
799 FENCE_SCHED_NEXT (f) = NULL;
800 }
801
802 /* Add a new fence to NEW_FENCES list, initializing it from all
803 other parameters. */
804 static void
805 add_to_fences (flist_tail_t new_fences, insn_t insn,
806 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
807 VEC(rtx, gc) *executing_insns, int *ready_ticks,
808 int ready_ticks_size, rtx sched_next, int cycle,
809 int cycle_issued_insns, int issue_rate,
810 bool starts_cycle_p, bool after_stall_p)
811 {
812 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
813
814 if (! f)
815 {
816 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
817 last_scheduled_insn, executing_insns, ready_ticks,
818 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
819 issue_rate, starts_cycle_p, after_stall_p);
820
821 FLIST_TAIL_TAILP (new_fences)
822 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
823 }
824 else
825 {
826 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
827 executing_insns, ready_ticks, ready_ticks_size,
828 sched_next, cycle, issue_rate, after_stall_p);
829 }
830 }
831
832 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
833 void
834 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
835 {
836 fence_t f, old;
837 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
838
839 old = FLIST_FENCE (old_fences);
840 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
841 FENCE_INSN (FLIST_FENCE (old_fences)));
842 if (f)
843 {
844 merge_fences (f, old->insn, old->state, old->dc, old->tc,
845 old->last_scheduled_insn, old->executing_insns,
846 old->ready_ticks, old->ready_ticks_size,
847 old->sched_next, old->cycle, old->issue_more,
848 old->after_stall_p);
849 }
850 else
851 {
852 _list_add (tailp);
853 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
854 *FLIST_FENCE (*tailp) = *old;
855 init_fence_for_scheduling (FLIST_FENCE (*tailp));
856 }
857 FENCE_INSN (old) = NULL;
858 }
859
860 /* Add a new fence to NEW_FENCES list and initialize most of its data
861 as a clean one. */
862 void
863 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
864 {
865 int ready_ticks_size = get_max_uid () + 1;
866
867 add_to_fences (new_fences,
868 succ, state_create (), create_deps_context (),
869 create_target_context (true),
870 NULL_RTX, NULL,
871 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
872 NULL_RTX, FENCE_CYCLE (fence) + 1,
873 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
874 }
875
876 /* Add a new fence to NEW_FENCES list and initialize all of its data
877 from FENCE and SUCC. */
878 void
879 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
880 {
881 int * new_ready_ticks
882 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
883
884 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
885 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
886 add_to_fences (new_fences,
887 succ, state_create_copy (FENCE_STATE (fence)),
888 create_copy_of_deps_context (FENCE_DC (fence)),
889 create_copy_of_target_context (FENCE_TC (fence)),
890 FENCE_LAST_SCHEDULED_INSN (fence),
891 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
892 new_ready_ticks,
893 FENCE_READY_TICKS_SIZE (fence),
894 FENCE_SCHED_NEXT (fence),
895 FENCE_CYCLE (fence),
896 FENCE_ISSUED_INSNS (fence),
897 FENCE_ISSUE_MORE (fence),
898 FENCE_STARTS_CYCLE_P (fence),
899 FENCE_AFTER_STALL_P (fence));
900 }
901 \f
902
903 /* Functions to work with regset and nop pools. */
904
905 /* Returns the new regset from pool. It might have some of the bits set
906 from the previous usage. */
907 regset
908 get_regset_from_pool (void)
909 {
910 regset rs;
911
912 if (regset_pool.n != 0)
913 rs = regset_pool.v[--regset_pool.n];
914 else
915 /* We need to create the regset. */
916 {
917 rs = ALLOC_REG_SET (&reg_obstack);
918
919 if (regset_pool.nn == regset_pool.ss)
920 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
921 (regset_pool.ss = 2 * regset_pool.ss + 1));
922 regset_pool.vv[regset_pool.nn++] = rs;
923 }
924
925 regset_pool.diff++;
926
927 return rs;
928 }
929
930 /* Same as above, but returns the empty regset. */
931 regset
932 get_clear_regset_from_pool (void)
933 {
934 regset rs = get_regset_from_pool ();
935
936 CLEAR_REG_SET (rs);
937 return rs;
938 }
939
940 /* Return regset RS to the pool for future use. */
941 void
942 return_regset_to_pool (regset rs)
943 {
944 gcc_assert (rs);
945 regset_pool.diff--;
946
947 if (regset_pool.n == regset_pool.s)
948 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
949 (regset_pool.s = 2 * regset_pool.s + 1));
950 regset_pool.v[regset_pool.n++] = rs;
951 }
952
953 #ifdef ENABLE_CHECKING
954 /* This is used as a qsort callback for sorting regset pool stacks.
955 X and XX are addresses of two regsets. They are never equal. */
956 static int
957 cmp_v_in_regset_pool (const void *x, const void *xx)
958 {
959 return *((const regset *) x) - *((const regset *) xx);
960 }
961 #endif
962
963 /* Free the regset pool possibly checking for memory leaks. */
964 void
965 free_regset_pool (void)
966 {
967 #ifdef ENABLE_CHECKING
968 {
969 regset *v = regset_pool.v;
970 int i = 0;
971 int n = regset_pool.n;
972
973 regset *vv = regset_pool.vv;
974 int ii = 0;
975 int nn = regset_pool.nn;
976
977 int diff = 0;
978
979 gcc_assert (n <= nn);
980
981 /* Sort both vectors so it will be possible to compare them. */
982 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
983 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
984
985 while (ii < nn)
986 {
987 if (v[i] == vv[ii])
988 i++;
989 else
990 /* VV[II] was lost. */
991 diff++;
992
993 ii++;
994 }
995
996 gcc_assert (diff == regset_pool.diff);
997 }
998 #endif
999
1000 /* If not true - we have a memory leak. */
1001 gcc_assert (regset_pool.diff == 0);
1002
1003 while (regset_pool.n)
1004 {
1005 --regset_pool.n;
1006 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1007 }
1008
1009 free (regset_pool.v);
1010 regset_pool.v = NULL;
1011 regset_pool.s = 0;
1012
1013 free (regset_pool.vv);
1014 regset_pool.vv = NULL;
1015 regset_pool.nn = 0;
1016 regset_pool.ss = 0;
1017
1018 regset_pool.diff = 0;
1019 }
1020 \f
1021
1022 /* Functions to work with nop pools. NOP insns are used as temporary
1023 placeholders of the insns being scheduled to allow correct update of
1024 the data sets. When update is finished, NOPs are deleted. */
1025
1026 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1027 nops sel-sched generates. */
1028 static vinsn_t nop_vinsn = NULL;
1029
1030 /* Emit a nop before INSN, taking it from pool. */
1031 insn_t
1032 get_nop_from_pool (insn_t insn)
1033 {
1034 insn_t nop;
1035 bool old_p = nop_pool.n != 0;
1036 int flags;
1037
1038 if (old_p)
1039 nop = nop_pool.v[--nop_pool.n];
1040 else
1041 nop = nop_pattern;
1042
1043 nop = emit_insn_before (nop, insn);
1044
1045 if (old_p)
1046 flags = INSN_INIT_TODO_SSID;
1047 else
1048 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1049
1050 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1051 sel_init_new_insn (nop, flags);
1052
1053 return nop;
1054 }
1055
1056 /* Remove NOP from the instruction stream and return it to the pool. */
1057 void
1058 return_nop_to_pool (insn_t nop, bool full_tidying)
1059 {
1060 gcc_assert (INSN_IN_STREAM_P (nop));
1061 sel_remove_insn (nop, false, full_tidying);
1062
1063 if (nop_pool.n == nop_pool.s)
1064 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1065 (nop_pool.s = 2 * nop_pool.s + 1));
1066 nop_pool.v[nop_pool.n++] = nop;
1067 }
1068
1069 /* Free the nop pool. */
1070 void
1071 free_nop_pool (void)
1072 {
1073 nop_pool.n = 0;
1074 nop_pool.s = 0;
1075 free (nop_pool.v);
1076 nop_pool.v = NULL;
1077 }
1078 \f
1079
1080 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1081 The callback is given two rtxes XX and YY and writes the new rtxes
1082 to NX and NY in case some needs to be skipped. */
1083 static int
1084 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1085 {
1086 const_rtx x = *xx;
1087 const_rtx y = *yy;
1088
1089 if (GET_CODE (x) == UNSPEC
1090 && (targetm.sched.skip_rtx_p == NULL
1091 || targetm.sched.skip_rtx_p (x)))
1092 {
1093 *nx = XVECEXP (x, 0, 0);
1094 *ny = CONST_CAST_RTX (y);
1095 return 1;
1096 }
1097
1098 if (GET_CODE (y) == UNSPEC
1099 && (targetm.sched.skip_rtx_p == NULL
1100 || targetm.sched.skip_rtx_p (y)))
1101 {
1102 *nx = CONST_CAST_RTX (x);
1103 *ny = XVECEXP (y, 0, 0);
1104 return 1;
1105 }
1106
1107 return 0;
1108 }
1109
1110 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1111 to support ia64 speculation. When changes are needed, new rtx X and new mode
1112 NMODE are written, and the callback returns true. */
1113 static int
1114 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1115 rtx *nx, enum machine_mode* nmode)
1116 {
1117 if (GET_CODE (x) == UNSPEC
1118 && targetm.sched.skip_rtx_p
1119 && targetm.sched.skip_rtx_p (x))
1120 {
1121 *nx = XVECEXP (x, 0 ,0);
1122 *nmode = VOIDmode;
1123 return 1;
1124 }
1125
1126 return 0;
1127 }
1128
1129 /* Returns LHS and RHS are ok to be scheduled separately. */
1130 static bool
1131 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1132 {
1133 if (lhs == NULL || rhs == NULL)
1134 return false;
1135
1136 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1137 to use reg, if const can be used. Moreover, scheduling const as rhs may
1138 lead to mode mismatch cause consts don't have modes but they could be
1139 merged from branches where the same const used in different modes. */
1140 if (CONSTANT_P (rhs))
1141 return false;
1142
1143 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1144 if (COMPARISON_P (rhs))
1145 return false;
1146
1147 /* Do not allow single REG to be an rhs. */
1148 if (REG_P (rhs))
1149 return false;
1150
1151 /* See comment at find_used_regs_1 (*1) for explanation of this
1152 restriction. */
1153 /* FIXME: remove this later. */
1154 if (MEM_P (lhs))
1155 return false;
1156
1157 /* This will filter all tricky things like ZERO_EXTRACT etc.
1158 For now we don't handle it. */
1159 if (!REG_P (lhs) && !MEM_P (lhs))
1160 return false;
1161
1162 return true;
1163 }
1164
1165 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1166 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1167 used e.g. for insns from recovery blocks. */
1168 static void
1169 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1170 {
1171 hash_rtx_callback_function hrcf;
1172 int insn_class;
1173
1174 VINSN_INSN_RTX (vi) = insn;
1175 VINSN_COUNT (vi) = 0;
1176 vi->cost = -1;
1177
1178 if (INSN_NOP_P (insn))
1179 return;
1180
1181 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1182 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1183 else
1184 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1185
1186 /* Hash vinsn depending on whether it is separable or not. */
1187 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1188 if (VINSN_SEPARABLE_P (vi))
1189 {
1190 rtx rhs = VINSN_RHS (vi);
1191
1192 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1193 NULL, NULL, false, hrcf);
1194 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1195 VOIDmode, NULL, NULL,
1196 false, hrcf);
1197 }
1198 else
1199 {
1200 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1201 NULL, NULL, false, hrcf);
1202 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1203 }
1204
1205 insn_class = haifa_classify_insn (insn);
1206 if (insn_class >= 2
1207 && (!targetm.sched.get_insn_spec_ds
1208 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1209 == 0)))
1210 VINSN_MAY_TRAP_P (vi) = true;
1211 else
1212 VINSN_MAY_TRAP_P (vi) = false;
1213 }
1214
1215 /* Indicate that VI has become the part of an rtx object. */
1216 void
1217 vinsn_attach (vinsn_t vi)
1218 {
1219 /* Assert that VI is not pending for deletion. */
1220 gcc_assert (VINSN_INSN_RTX (vi));
1221
1222 VINSN_COUNT (vi)++;
1223 }
1224
1225 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1226 VINSN_TYPE (VI). */
1227 static vinsn_t
1228 vinsn_create (insn_t insn, bool force_unique_p)
1229 {
1230 vinsn_t vi = XCNEW (struct vinsn_def);
1231
1232 vinsn_init (vi, insn, force_unique_p);
1233 return vi;
1234 }
1235
1236 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1237 the copy. */
1238 vinsn_t
1239 vinsn_copy (vinsn_t vi, bool reattach_p)
1240 {
1241 rtx copy;
1242 bool unique = VINSN_UNIQUE_P (vi);
1243 vinsn_t new_vi;
1244
1245 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1246 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1247 if (reattach_p)
1248 {
1249 vinsn_detach (vi);
1250 vinsn_attach (new_vi);
1251 }
1252
1253 return new_vi;
1254 }
1255
1256 /* Delete the VI vinsn and free its data. */
1257 static void
1258 vinsn_delete (vinsn_t vi)
1259 {
1260 gcc_assert (VINSN_COUNT (vi) == 0);
1261
1262 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1263 {
1264 return_regset_to_pool (VINSN_REG_SETS (vi));
1265 return_regset_to_pool (VINSN_REG_USES (vi));
1266 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1267 }
1268
1269 free (vi);
1270 }
1271
1272 /* Indicate that VI is no longer a part of some rtx object.
1273 Remove VI if it is no longer needed. */
1274 void
1275 vinsn_detach (vinsn_t vi)
1276 {
1277 gcc_assert (VINSN_COUNT (vi) > 0);
1278
1279 if (--VINSN_COUNT (vi) == 0)
1280 vinsn_delete (vi);
1281 }
1282
1283 /* Returns TRUE if VI is a branch. */
1284 bool
1285 vinsn_cond_branch_p (vinsn_t vi)
1286 {
1287 insn_t insn;
1288
1289 if (!VINSN_UNIQUE_P (vi))
1290 return false;
1291
1292 insn = VINSN_INSN_RTX (vi);
1293 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1294 return false;
1295
1296 return control_flow_insn_p (insn);
1297 }
1298
1299 /* Return latency of INSN. */
1300 static int
1301 sel_insn_rtx_cost (rtx insn)
1302 {
1303 int cost;
1304
1305 /* A USE insn, or something else we don't need to
1306 understand. We can't pass these directly to
1307 result_ready_cost or insn_default_latency because it will
1308 trigger a fatal error for unrecognizable insns. */
1309 if (recog_memoized (insn) < 0)
1310 cost = 0;
1311 else
1312 {
1313 cost = insn_default_latency (insn);
1314
1315 if (cost < 0)
1316 cost = 0;
1317 }
1318
1319 return cost;
1320 }
1321
1322 /* Return the cost of the VI.
1323 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1324 int
1325 sel_vinsn_cost (vinsn_t vi)
1326 {
1327 int cost = vi->cost;
1328
1329 if (cost < 0)
1330 {
1331 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1332 vi->cost = cost;
1333 }
1334
1335 return cost;
1336 }
1337 \f
1338
1339 /* Functions for insn emitting. */
1340
1341 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1342 EXPR and SEQNO. */
1343 insn_t
1344 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1345 {
1346 insn_t new_insn;
1347
1348 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1349
1350 new_insn = emit_insn_after (pattern, after);
1351 set_insn_init (expr, NULL, seqno);
1352 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1353
1354 return new_insn;
1355 }
1356
1357 /* Force newly generated vinsns to be unique. */
1358 static bool init_insn_force_unique_p = false;
1359
1360 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1361 initialize its data from EXPR and SEQNO. */
1362 insn_t
1363 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1364 insn_t after)
1365 {
1366 insn_t insn;
1367
1368 gcc_assert (!init_insn_force_unique_p);
1369
1370 init_insn_force_unique_p = true;
1371 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1372 CANT_MOVE (insn) = 1;
1373 init_insn_force_unique_p = false;
1374
1375 return insn;
1376 }
1377
1378 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1379 take it as a new vinsn instead of EXPR's vinsn.
1380 We simplify insns later, after scheduling region in
1381 simplify_changed_insns. */
1382 insn_t
1383 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1384 insn_t after)
1385 {
1386 expr_t emit_expr;
1387 insn_t insn;
1388 int flags;
1389
1390 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1391 seqno);
1392 insn = EXPR_INSN_RTX (emit_expr);
1393 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1394
1395 flags = INSN_INIT_TODO_SSID;
1396 if (INSN_LUID (insn) == 0)
1397 flags |= INSN_INIT_TODO_LUID;
1398 sel_init_new_insn (insn, flags);
1399
1400 return insn;
1401 }
1402
1403 /* Move insn from EXPR after AFTER. */
1404 insn_t
1405 sel_move_insn (expr_t expr, int seqno, insn_t after)
1406 {
1407 insn_t insn = EXPR_INSN_RTX (expr);
1408 basic_block bb = BLOCK_FOR_INSN (after);
1409 insn_t next = NEXT_INSN (after);
1410
1411 /* Assert that in move_op we disconnected this insn properly. */
1412 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1413 PREV_INSN (insn) = after;
1414 NEXT_INSN (insn) = next;
1415
1416 NEXT_INSN (after) = insn;
1417 PREV_INSN (next) = insn;
1418
1419 /* Update links from insn to bb and vice versa. */
1420 df_insn_change_bb (insn, bb);
1421 if (BB_END (bb) == after)
1422 BB_END (bb) = insn;
1423
1424 prepare_insn_expr (insn, seqno);
1425 return insn;
1426 }
1427
1428 \f
1429 /* Functions to work with right-hand sides. */
1430
1431 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1432 VECT and return true when found. Use NEW_VINSN for comparison only when
1433 COMPARE_VINSNS is true. Write to INDP the index on which
1434 the search has stopped, such that inserting the new element at INDP will
1435 retain VECT's sort order. */
1436 static bool
1437 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1438 unsigned uid, vinsn_t new_vinsn,
1439 bool compare_vinsns, int *indp)
1440 {
1441 expr_history_def *arr;
1442 int i, j, len = VEC_length (expr_history_def, vect);
1443
1444 if (len == 0)
1445 {
1446 *indp = 0;
1447 return false;
1448 }
1449
1450 arr = VEC_address (expr_history_def, vect);
1451 i = 0, j = len - 1;
1452
1453 while (i <= j)
1454 {
1455 unsigned auid = arr[i].uid;
1456 vinsn_t avinsn = arr[i].new_expr_vinsn;
1457
1458 if (auid == uid
1459 /* When undoing transformation on a bookkeeping copy, the new vinsn
1460 may not be exactly equal to the one that is saved in the vector.
1461 This is because the insn whose copy we're checking was possibly
1462 substituted itself. */
1463 && (! compare_vinsns
1464 || vinsn_equal_p (avinsn, new_vinsn)))
1465 {
1466 *indp = i;
1467 return true;
1468 }
1469 else if (auid > uid)
1470 break;
1471 i++;
1472 }
1473
1474 *indp = i;
1475 return false;
1476 }
1477
1478 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1479 the position found or -1, if no such value is in vector.
1480 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1481 int
1482 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1483 vinsn_t new_vinsn, bool originators_p)
1484 {
1485 int ind;
1486
1487 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1488 false, &ind))
1489 return ind;
1490
1491 if (INSN_ORIGINATORS (insn) && originators_p)
1492 {
1493 unsigned uid;
1494 bitmap_iterator bi;
1495
1496 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1497 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1498 return ind;
1499 }
1500
1501 return -1;
1502 }
1503
1504 /* Insert new element in a sorted history vector pointed to by PVECT,
1505 if it is not there already. The element is searched using
1506 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1507 the history of a transformation. */
1508 void
1509 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1510 unsigned uid, enum local_trans_type type,
1511 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1512 ds_t spec_ds)
1513 {
1514 VEC(expr_history_def, heap) *vect = *pvect;
1515 expr_history_def temp;
1516 bool res;
1517 int ind;
1518
1519 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1520
1521 if (res)
1522 {
1523 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1524
1525 /* It is possible that speculation types of expressions that were
1526 propagated through different paths will be different here. In this
1527 case, merge the status to get the correct check later. */
1528 if (phist->spec_ds != spec_ds)
1529 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1530 return;
1531 }
1532
1533 temp.uid = uid;
1534 temp.old_expr_vinsn = old_expr_vinsn;
1535 temp.new_expr_vinsn = new_expr_vinsn;
1536 temp.spec_ds = spec_ds;
1537 temp.type = type;
1538
1539 vinsn_attach (old_expr_vinsn);
1540 vinsn_attach (new_expr_vinsn);
1541 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1542 *pvect = vect;
1543 }
1544
1545 /* Free history vector PVECT. */
1546 static void
1547 free_history_vect (VEC (expr_history_def, heap) **pvect)
1548 {
1549 unsigned i;
1550 expr_history_def *phist;
1551
1552 if (! *pvect)
1553 return;
1554
1555 for (i = 0;
1556 VEC_iterate (expr_history_def, *pvect, i, phist);
1557 i++)
1558 {
1559 vinsn_detach (phist->old_expr_vinsn);
1560 vinsn_detach (phist->new_expr_vinsn);
1561 }
1562
1563 VEC_free (expr_history_def, heap, *pvect);
1564 *pvect = NULL;
1565 }
1566
1567 /* Merge vector FROM to PVECT. */
1568 static void
1569 merge_history_vect (VEC (expr_history_def, heap) **pvect,
1570 VEC (expr_history_def, heap) *from)
1571 {
1572 expr_history_def *phist;
1573 int i;
1574
1575 /* We keep this vector sorted. */
1576 for (i = 0; VEC_iterate (expr_history_def, from, i, phist); i++)
1577 insert_in_history_vect (pvect, phist->uid, phist->type,
1578 phist->old_expr_vinsn, phist->new_expr_vinsn,
1579 phist->spec_ds);
1580 }
1581
1582 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1583 bool
1584 vinsn_equal_p (vinsn_t x, vinsn_t y)
1585 {
1586 rtx_equal_p_callback_function repcf;
1587
1588 if (x == y)
1589 return true;
1590
1591 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1592 return false;
1593
1594 if (VINSN_HASH (x) != VINSN_HASH (y))
1595 return false;
1596
1597 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1598 if (VINSN_SEPARABLE_P (x))
1599 {
1600 /* Compare RHSes of VINSNs. */
1601 gcc_assert (VINSN_RHS (x));
1602 gcc_assert (VINSN_RHS (y));
1603
1604 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1605 }
1606
1607 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1608 }
1609 \f
1610
1611 /* Functions for working with expressions. */
1612
1613 /* Initialize EXPR. */
1614 static void
1615 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1616 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1617 ds_t spec_to_check_ds, int orig_sched_cycle,
1618 VEC(expr_history_def, heap) *history, signed char target_available,
1619 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1620 bool cant_move)
1621 {
1622 vinsn_attach (vi);
1623
1624 EXPR_VINSN (expr) = vi;
1625 EXPR_SPEC (expr) = spec;
1626 EXPR_USEFULNESS (expr) = use;
1627 EXPR_PRIORITY (expr) = priority;
1628 EXPR_PRIORITY_ADJ (expr) = 0;
1629 EXPR_SCHED_TIMES (expr) = sched_times;
1630 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1631 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1632 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1633 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1634
1635 if (history)
1636 EXPR_HISTORY_OF_CHANGES (expr) = history;
1637 else
1638 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1639
1640 EXPR_TARGET_AVAILABLE (expr) = target_available;
1641 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1642 EXPR_WAS_RENAMED (expr) = was_renamed;
1643 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1644 EXPR_CANT_MOVE (expr) = cant_move;
1645 }
1646
1647 /* Make a copy of the expr FROM into the expr TO. */
1648 void
1649 copy_expr (expr_t to, expr_t from)
1650 {
1651 VEC(expr_history_def, heap) *temp = NULL;
1652
1653 if (EXPR_HISTORY_OF_CHANGES (from))
1654 {
1655 unsigned i;
1656 expr_history_def *phist;
1657
1658 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1659 for (i = 0;
1660 VEC_iterate (expr_history_def, temp, i, phist);
1661 i++)
1662 {
1663 vinsn_attach (phist->old_expr_vinsn);
1664 vinsn_attach (phist->new_expr_vinsn);
1665 }
1666 }
1667
1668 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1669 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1670 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1671 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1672 EXPR_ORIG_SCHED_CYCLE (from), temp,
1673 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1674 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1675 EXPR_CANT_MOVE (from));
1676 }
1677
1678 /* Same, but the final expr will not ever be in av sets, so don't copy
1679 "uninteresting" data such as bitmap cache. */
1680 void
1681 copy_expr_onside (expr_t to, expr_t from)
1682 {
1683 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1684 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1685 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1686 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1687 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1688 EXPR_CANT_MOVE (from));
1689 }
1690
1691 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1692 initializing new insns. */
1693 static void
1694 prepare_insn_expr (insn_t insn, int seqno)
1695 {
1696 expr_t expr = INSN_EXPR (insn);
1697 ds_t ds;
1698
1699 INSN_SEQNO (insn) = seqno;
1700 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1701 EXPR_SPEC (expr) = 0;
1702 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1703 EXPR_WAS_SUBSTITUTED (expr) = 0;
1704 EXPR_WAS_RENAMED (expr) = 0;
1705 EXPR_TARGET_AVAILABLE (expr) = 1;
1706 INSN_LIVE_VALID_P (insn) = false;
1707
1708 /* ??? If this expression is speculative, make its dependence
1709 as weak as possible. We can filter this expression later
1710 in process_spec_exprs, because we do not distinguish
1711 between the status we got during compute_av_set and the
1712 existing status. To be fixed. */
1713 ds = EXPR_SPEC_DONE_DS (expr);
1714 if (ds)
1715 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1716
1717 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1718 }
1719
1720 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1721 is non-null when expressions are merged from different successors at
1722 a split point. */
1723 static void
1724 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1725 {
1726 if (EXPR_TARGET_AVAILABLE (to) < 0
1727 || EXPR_TARGET_AVAILABLE (from) < 0)
1728 EXPR_TARGET_AVAILABLE (to) = -1;
1729 else
1730 {
1731 /* We try to detect the case when one of the expressions
1732 can only be reached through another one. In this case,
1733 we can do better. */
1734 if (split_point == NULL)
1735 {
1736 int toind, fromind;
1737
1738 toind = EXPR_ORIG_BB_INDEX (to);
1739 fromind = EXPR_ORIG_BB_INDEX (from);
1740
1741 if (toind && toind == fromind)
1742 /* Do nothing -- everything is done in
1743 merge_with_other_exprs. */
1744 ;
1745 else
1746 EXPR_TARGET_AVAILABLE (to) = -1;
1747 }
1748 else if (EXPR_TARGET_AVAILABLE (from) == 0
1749 && EXPR_LHS (from)
1750 && REG_P (EXPR_LHS (from))
1751 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1752 EXPR_TARGET_AVAILABLE (to) = -1;
1753 else
1754 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1755 }
1756 }
1757
1758 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1759 is non-null when expressions are merged from different successors at
1760 a split point. */
1761 static void
1762 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1763 {
1764 ds_t old_to_ds, old_from_ds;
1765
1766 old_to_ds = EXPR_SPEC_DONE_DS (to);
1767 old_from_ds = EXPR_SPEC_DONE_DS (from);
1768
1769 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1770 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1771 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1772
1773 /* When merging e.g. control & data speculative exprs, or a control
1774 speculative with a control&data speculative one, we really have
1775 to change vinsn too. Also, when speculative status is changed,
1776 we also need to record this as a transformation in expr's history. */
1777 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1778 {
1779 old_to_ds = ds_get_speculation_types (old_to_ds);
1780 old_from_ds = ds_get_speculation_types (old_from_ds);
1781
1782 if (old_to_ds != old_from_ds)
1783 {
1784 ds_t record_ds;
1785
1786 /* When both expressions are speculative, we need to change
1787 the vinsn first. */
1788 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1789 {
1790 int res;
1791
1792 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1793 gcc_assert (res >= 0);
1794 }
1795
1796 if (split_point != NULL)
1797 {
1798 /* Record the change with proper status. */
1799 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1800 record_ds &= ~(old_to_ds & SPECULATIVE);
1801 record_ds &= ~(old_from_ds & SPECULATIVE);
1802
1803 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1804 INSN_UID (split_point), TRANS_SPECULATION,
1805 EXPR_VINSN (from), EXPR_VINSN (to),
1806 record_ds);
1807 }
1808 }
1809 }
1810 }
1811
1812
1813 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1814 this is done along different paths. */
1815 void
1816 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1817 {
1818 /* Choose the maximum of the specs of merged exprs. This is required
1819 for correctness of bookkeeping. */
1820 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1821 EXPR_SPEC (to) = EXPR_SPEC (from);
1822
1823 if (split_point)
1824 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1825 else
1826 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1827 EXPR_USEFULNESS (from));
1828
1829 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1830 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1831
1832 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1833 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1834
1835 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1836 EXPR_ORIG_BB_INDEX (to) = 0;
1837
1838 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1839 EXPR_ORIG_SCHED_CYCLE (from));
1840
1841 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1842 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1843 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1844
1845 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1846 EXPR_HISTORY_OF_CHANGES (from));
1847 update_target_availability (to, from, split_point);
1848 update_speculative_bits (to, from, split_point);
1849 }
1850
1851 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1852 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1853 are merged from different successors at a split point. */
1854 void
1855 merge_expr (expr_t to, expr_t from, insn_t split_point)
1856 {
1857 vinsn_t to_vi = EXPR_VINSN (to);
1858 vinsn_t from_vi = EXPR_VINSN (from);
1859
1860 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1861
1862 /* Make sure that speculative pattern is propagated into exprs that
1863 have non-speculative one. This will provide us with consistent
1864 speculative bits and speculative patterns inside expr. */
1865 if (EXPR_SPEC_DONE_DS (to) == 0
1866 && EXPR_SPEC_DONE_DS (from) != 0)
1867 change_vinsn_in_expr (to, EXPR_VINSN (from));
1868
1869 merge_expr_data (to, from, split_point);
1870 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1871 }
1872
1873 /* Clear the information of this EXPR. */
1874 void
1875 clear_expr (expr_t expr)
1876 {
1877
1878 vinsn_detach (EXPR_VINSN (expr));
1879 EXPR_VINSN (expr) = NULL;
1880
1881 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1882 }
1883
1884 /* For a given LV_SET, mark EXPR having unavailable target register. */
1885 static void
1886 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1887 {
1888 if (EXPR_SEPARABLE_P (expr))
1889 {
1890 if (REG_P (EXPR_LHS (expr))
1891 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1892 {
1893 /* If it's an insn like r1 = use (r1, ...), and it exists in
1894 different forms in each of the av_sets being merged, we can't say
1895 whether original destination register is available or not.
1896 However, this still works if destination register is not used
1897 in the original expression: if the branch at which LV_SET we're
1898 looking here is not actually 'other branch' in sense that same
1899 expression is available through it (but it can't be determined
1900 at computation stage because of transformations on one of the
1901 branches), it still won't affect the availability.
1902 Liveness of a register somewhere on a code motion path means
1903 it's either read somewhere on a codemotion path, live on
1904 'other' branch, live at the point immediately following
1905 the original operation, or is read by the original operation.
1906 The latter case is filtered out in the condition below.
1907 It still doesn't cover the case when register is defined and used
1908 somewhere within the code motion path, and in this case we could
1909 miss a unifying code motion along both branches using a renamed
1910 register, but it won't affect a code correctness since upon
1911 an actual code motion a bookkeeping code would be generated. */
1912 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1913 EXPR_LHS (expr)))
1914 EXPR_TARGET_AVAILABLE (expr) = -1;
1915 else
1916 EXPR_TARGET_AVAILABLE (expr) = false;
1917 }
1918 }
1919 else
1920 {
1921 unsigned regno;
1922 reg_set_iterator rsi;
1923
1924 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1925 0, regno, rsi)
1926 if (bitmap_bit_p (lv_set, regno))
1927 {
1928 EXPR_TARGET_AVAILABLE (expr) = false;
1929 break;
1930 }
1931
1932 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1933 0, regno, rsi)
1934 if (bitmap_bit_p (lv_set, regno))
1935 {
1936 EXPR_TARGET_AVAILABLE (expr) = false;
1937 break;
1938 }
1939 }
1940 }
1941
1942 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1943 or dependence status have changed, 2 when also the target register
1944 became unavailable, 0 if nothing had to be changed. */
1945 int
1946 speculate_expr (expr_t expr, ds_t ds)
1947 {
1948 int res;
1949 rtx orig_insn_rtx;
1950 rtx spec_pat;
1951 ds_t target_ds, current_ds;
1952
1953 /* Obtain the status we need to put on EXPR. */
1954 target_ds = (ds & SPECULATIVE);
1955 current_ds = EXPR_SPEC_DONE_DS (expr);
1956 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1957
1958 orig_insn_rtx = EXPR_INSN_RTX (expr);
1959
1960 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1961
1962 switch (res)
1963 {
1964 case 0:
1965 EXPR_SPEC_DONE_DS (expr) = ds;
1966 return current_ds != ds ? 1 : 0;
1967
1968 case 1:
1969 {
1970 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1971 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1972
1973 change_vinsn_in_expr (expr, spec_vinsn);
1974 EXPR_SPEC_DONE_DS (expr) = ds;
1975 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1976
1977 /* Do not allow clobbering the address register of speculative
1978 insns. */
1979 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1980 expr_dest_reg (expr)))
1981 {
1982 EXPR_TARGET_AVAILABLE (expr) = false;
1983 return 2;
1984 }
1985
1986 return 1;
1987 }
1988
1989 case -1:
1990 return -1;
1991
1992 default:
1993 gcc_unreachable ();
1994 return -1;
1995 }
1996 }
1997
1998 /* Return a destination register, if any, of EXPR. */
1999 rtx
2000 expr_dest_reg (expr_t expr)
2001 {
2002 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2003
2004 if (dest != NULL_RTX && REG_P (dest))
2005 return dest;
2006
2007 return NULL_RTX;
2008 }
2009
2010 /* Returns the REGNO of the R's destination. */
2011 unsigned
2012 expr_dest_regno (expr_t expr)
2013 {
2014 rtx dest = expr_dest_reg (expr);
2015
2016 gcc_assert (dest != NULL_RTX);
2017 return REGNO (dest);
2018 }
2019
2020 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2021 AV_SET having unavailable target register. */
2022 void
2023 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2024 {
2025 expr_t expr;
2026 av_set_iterator avi;
2027
2028 FOR_EACH_EXPR (expr, avi, join_set)
2029 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2030 set_unavailable_target_for_expr (expr, lv_set);
2031 }
2032 \f
2033
2034 /* Returns true if REG (at least partially) is present in REGS. */
2035 bool
2036 register_unavailable_p (regset regs, rtx reg)
2037 {
2038 unsigned regno, end_regno;
2039
2040 regno = REGNO (reg);
2041 if (bitmap_bit_p (regs, regno))
2042 return true;
2043
2044 end_regno = END_REGNO (reg);
2045
2046 while (++regno < end_regno)
2047 if (bitmap_bit_p (regs, regno))
2048 return true;
2049
2050 return false;
2051 }
2052
2053 /* Av set functions. */
2054
2055 /* Add a new element to av set SETP.
2056 Return the element added. */
2057 static av_set_t
2058 av_set_add_element (av_set_t *setp)
2059 {
2060 /* Insert at the beginning of the list. */
2061 _list_add (setp);
2062 return *setp;
2063 }
2064
2065 /* Add EXPR to SETP. */
2066 void
2067 av_set_add (av_set_t *setp, expr_t expr)
2068 {
2069 av_set_t elem;
2070
2071 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2072 elem = av_set_add_element (setp);
2073 copy_expr (_AV_SET_EXPR (elem), expr);
2074 }
2075
2076 /* Same, but do not copy EXPR. */
2077 static void
2078 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2079 {
2080 av_set_t elem;
2081
2082 elem = av_set_add_element (setp);
2083 *_AV_SET_EXPR (elem) = *expr;
2084 }
2085
2086 /* Remove expr pointed to by IP from the av_set. */
2087 void
2088 av_set_iter_remove (av_set_iterator *ip)
2089 {
2090 clear_expr (_AV_SET_EXPR (*ip->lp));
2091 _list_iter_remove (ip);
2092 }
2093
2094 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2095 sense of vinsn_equal_p function. Return NULL if no such expr is
2096 in SET was found. */
2097 expr_t
2098 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2099 {
2100 expr_t expr;
2101 av_set_iterator i;
2102
2103 FOR_EACH_EXPR (expr, i, set)
2104 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2105 return expr;
2106 return NULL;
2107 }
2108
2109 /* Same, but also remove the EXPR found. */
2110 static expr_t
2111 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2112 {
2113 expr_t expr;
2114 av_set_iterator i;
2115
2116 FOR_EACH_EXPR_1 (expr, i, setp)
2117 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2118 {
2119 _list_iter_remove_nofree (&i);
2120 return expr;
2121 }
2122 return NULL;
2123 }
2124
2125 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2126 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2127 Returns NULL if no such expr is in SET was found. */
2128 static expr_t
2129 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2130 {
2131 expr_t cur_expr;
2132 av_set_iterator i;
2133
2134 FOR_EACH_EXPR (cur_expr, i, set)
2135 {
2136 if (cur_expr == expr)
2137 continue;
2138 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2139 return cur_expr;
2140 }
2141
2142 return NULL;
2143 }
2144
2145 /* If other expression is already in AVP, remove one of them. */
2146 expr_t
2147 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2148 {
2149 expr_t expr2;
2150
2151 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2152 if (expr2 != NULL)
2153 {
2154 /* Reset target availability on merge, since taking it only from one
2155 of the exprs would be controversial for different code. */
2156 EXPR_TARGET_AVAILABLE (expr2) = -1;
2157 EXPR_USEFULNESS (expr2) = 0;
2158
2159 merge_expr (expr2, expr, NULL);
2160
2161 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2162 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2163
2164 av_set_iter_remove (ip);
2165 return expr2;
2166 }
2167
2168 return expr;
2169 }
2170
2171 /* Return true if there is an expr that correlates to VI in SET. */
2172 bool
2173 av_set_is_in_p (av_set_t set, vinsn_t vi)
2174 {
2175 return av_set_lookup (set, vi) != NULL;
2176 }
2177
2178 /* Return a copy of SET. */
2179 av_set_t
2180 av_set_copy (av_set_t set)
2181 {
2182 expr_t expr;
2183 av_set_iterator i;
2184 av_set_t res = NULL;
2185
2186 FOR_EACH_EXPR (expr, i, set)
2187 av_set_add (&res, expr);
2188
2189 return res;
2190 }
2191
2192 /* Join two av sets that do not have common elements by attaching second set
2193 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2194 _AV_SET_NEXT of first set's last element). */
2195 static void
2196 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2197 {
2198 gcc_assert (*to_tailp == NULL);
2199 *to_tailp = *fromp;
2200 *fromp = NULL;
2201 }
2202
2203 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2204 pointed to by FROMP afterwards. */
2205 void
2206 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2207 {
2208 expr_t expr1;
2209 av_set_iterator i;
2210
2211 /* Delete from TOP all exprs, that present in FROMP. */
2212 FOR_EACH_EXPR_1 (expr1, i, top)
2213 {
2214 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2215
2216 if (expr2)
2217 {
2218 merge_expr (expr2, expr1, insn);
2219 av_set_iter_remove (&i);
2220 }
2221 }
2222
2223 join_distinct_sets (i.lp, fromp);
2224 }
2225
2226 /* Same as above, but also update availability of target register in
2227 TOP judging by TO_LV_SET and FROM_LV_SET. */
2228 void
2229 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2230 regset from_lv_set, insn_t insn)
2231 {
2232 expr_t expr1;
2233 av_set_iterator i;
2234 av_set_t *to_tailp, in_both_set = NULL;
2235
2236 /* Delete from TOP all expres, that present in FROMP. */
2237 FOR_EACH_EXPR_1 (expr1, i, top)
2238 {
2239 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2240
2241 if (expr2)
2242 {
2243 /* It may be that the expressions have different destination
2244 registers, in which case we need to check liveness here. */
2245 if (EXPR_SEPARABLE_P (expr1))
2246 {
2247 int regno1 = (REG_P (EXPR_LHS (expr1))
2248 ? (int) expr_dest_regno (expr1) : -1);
2249 int regno2 = (REG_P (EXPR_LHS (expr2))
2250 ? (int) expr_dest_regno (expr2) : -1);
2251
2252 /* ??? We don't have a way to check restrictions for
2253 *other* register on the current path, we did it only
2254 for the current target register. Give up. */
2255 if (regno1 != regno2)
2256 EXPR_TARGET_AVAILABLE (expr2) = -1;
2257 }
2258 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2259 EXPR_TARGET_AVAILABLE (expr2) = -1;
2260
2261 merge_expr (expr2, expr1, insn);
2262 av_set_add_nocopy (&in_both_set, expr2);
2263 av_set_iter_remove (&i);
2264 }
2265 else
2266 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2267 FROM_LV_SET. */
2268 set_unavailable_target_for_expr (expr1, from_lv_set);
2269 }
2270 to_tailp = i.lp;
2271
2272 /* These expressions are not present in TOP. Check liveness
2273 restrictions on TO_LV_SET. */
2274 FOR_EACH_EXPR (expr1, i, *fromp)
2275 set_unavailable_target_for_expr (expr1, to_lv_set);
2276
2277 join_distinct_sets (i.lp, &in_both_set);
2278 join_distinct_sets (to_tailp, fromp);
2279 }
2280
2281 /* Clear av_set pointed to by SETP. */
2282 void
2283 av_set_clear (av_set_t *setp)
2284 {
2285 expr_t expr;
2286 av_set_iterator i;
2287
2288 FOR_EACH_EXPR_1 (expr, i, setp)
2289 av_set_iter_remove (&i);
2290
2291 gcc_assert (*setp == NULL);
2292 }
2293
2294 /* Leave only one non-speculative element in the SETP. */
2295 void
2296 av_set_leave_one_nonspec (av_set_t *setp)
2297 {
2298 expr_t expr;
2299 av_set_iterator i;
2300 bool has_one_nonspec = false;
2301
2302 /* Keep all speculative exprs, and leave one non-speculative
2303 (the first one). */
2304 FOR_EACH_EXPR_1 (expr, i, setp)
2305 {
2306 if (!EXPR_SPEC_DONE_DS (expr))
2307 {
2308 if (has_one_nonspec)
2309 av_set_iter_remove (&i);
2310 else
2311 has_one_nonspec = true;
2312 }
2313 }
2314 }
2315
2316 /* Return the N'th element of the SET. */
2317 expr_t
2318 av_set_element (av_set_t set, int n)
2319 {
2320 expr_t expr;
2321 av_set_iterator i;
2322
2323 FOR_EACH_EXPR (expr, i, set)
2324 if (n-- == 0)
2325 return expr;
2326
2327 gcc_unreachable ();
2328 return NULL;
2329 }
2330
2331 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2332 void
2333 av_set_substract_cond_branches (av_set_t *avp)
2334 {
2335 av_set_iterator i;
2336 expr_t expr;
2337
2338 FOR_EACH_EXPR_1 (expr, i, avp)
2339 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2340 av_set_iter_remove (&i);
2341 }
2342
2343 /* Multiplies usefulness attribute of each member of av-set *AVP by
2344 value PROB / ALL_PROB. */
2345 void
2346 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2347 {
2348 av_set_iterator i;
2349 expr_t expr;
2350
2351 FOR_EACH_EXPR (expr, i, av)
2352 EXPR_USEFULNESS (expr) = (all_prob
2353 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2354 : 0);
2355 }
2356
2357 /* Leave in AVP only those expressions, which are present in AV,
2358 and return it, merging history expressions. */
2359 void
2360 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2361 {
2362 av_set_iterator i;
2363 expr_t expr, expr2;
2364
2365 FOR_EACH_EXPR_1 (expr, i, avp)
2366 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2367 av_set_iter_remove (&i);
2368 else
2369 /* When updating av sets in bookkeeping blocks, we can add more insns
2370 there which will be transformed but the upper av sets will not
2371 reflect those transformations. We then fail to undo those
2372 when searching for such insns. So merge the history saved
2373 in the av set of the block we are processing. */
2374 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2375 EXPR_HISTORY_OF_CHANGES (expr2));
2376 }
2377
2378 \f
2379
2380 /* Dependence hooks to initialize insn data. */
2381
2382 /* This is used in hooks callable from dependence analysis when initializing
2383 instruction's data. */
2384 static struct
2385 {
2386 /* Where the dependence was found (lhs/rhs). */
2387 deps_where_t where;
2388
2389 /* The actual data object to initialize. */
2390 idata_t id;
2391
2392 /* True when the insn should not be made clonable. */
2393 bool force_unique_p;
2394
2395 /* True when insn should be treated as of type USE, i.e. never renamed. */
2396 bool force_use_p;
2397 } deps_init_id_data;
2398
2399
2400 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2401 clonable. */
2402 static void
2403 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2404 {
2405 int type;
2406
2407 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2408 That clonable insns which can be separated into lhs and rhs have type SET.
2409 Other clonable insns have type USE. */
2410 type = GET_CODE (insn);
2411
2412 /* Only regular insns could be cloned. */
2413 if (type == INSN && !force_unique_p)
2414 type = SET;
2415 else if (type == JUMP_INSN && simplejump_p (insn))
2416 type = PC;
2417 else if (type == DEBUG_INSN)
2418 type = !force_unique_p ? USE : INSN;
2419
2420 IDATA_TYPE (id) = type;
2421 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2422 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2423 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2424 }
2425
2426 /* Start initializing insn data. */
2427 static void
2428 deps_init_id_start_insn (insn_t insn)
2429 {
2430 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2431
2432 setup_id_for_insn (deps_init_id_data.id, insn,
2433 deps_init_id_data.force_unique_p);
2434 deps_init_id_data.where = DEPS_IN_INSN;
2435 }
2436
2437 /* Start initializing lhs data. */
2438 static void
2439 deps_init_id_start_lhs (rtx lhs)
2440 {
2441 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2442 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2443
2444 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2445 {
2446 IDATA_LHS (deps_init_id_data.id) = lhs;
2447 deps_init_id_data.where = DEPS_IN_LHS;
2448 }
2449 }
2450
2451 /* Finish initializing lhs data. */
2452 static void
2453 deps_init_id_finish_lhs (void)
2454 {
2455 deps_init_id_data.where = DEPS_IN_INSN;
2456 }
2457
2458 /* Note a set of REGNO. */
2459 static void
2460 deps_init_id_note_reg_set (int regno)
2461 {
2462 haifa_note_reg_set (regno);
2463
2464 if (deps_init_id_data.where == DEPS_IN_RHS)
2465 deps_init_id_data.force_use_p = true;
2466
2467 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2468 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2469
2470 #ifdef STACK_REGS
2471 /* Make instructions that set stack registers to be ineligible for
2472 renaming to avoid issues with find_used_regs. */
2473 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2474 deps_init_id_data.force_use_p = true;
2475 #endif
2476 }
2477
2478 /* Note a clobber of REGNO. */
2479 static void
2480 deps_init_id_note_reg_clobber (int regno)
2481 {
2482 haifa_note_reg_clobber (regno);
2483
2484 if (deps_init_id_data.where == DEPS_IN_RHS)
2485 deps_init_id_data.force_use_p = true;
2486
2487 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2488 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2489 }
2490
2491 /* Note a use of REGNO. */
2492 static void
2493 deps_init_id_note_reg_use (int regno)
2494 {
2495 haifa_note_reg_use (regno);
2496
2497 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2498 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2499 }
2500
2501 /* Start initializing rhs data. */
2502 static void
2503 deps_init_id_start_rhs (rtx rhs)
2504 {
2505 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2506
2507 /* And there was no sel_deps_reset_to_insn (). */
2508 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2509 {
2510 IDATA_RHS (deps_init_id_data.id) = rhs;
2511 deps_init_id_data.where = DEPS_IN_RHS;
2512 }
2513 }
2514
2515 /* Finish initializing rhs data. */
2516 static void
2517 deps_init_id_finish_rhs (void)
2518 {
2519 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2520 || deps_init_id_data.where == DEPS_IN_INSN);
2521 deps_init_id_data.where = DEPS_IN_INSN;
2522 }
2523
2524 /* Finish initializing insn data. */
2525 static void
2526 deps_init_id_finish_insn (void)
2527 {
2528 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2529
2530 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2531 {
2532 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2533 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2534
2535 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2536 || deps_init_id_data.force_use_p)
2537 {
2538 /* This should be a USE, as we don't want to schedule its RHS
2539 separately. However, we still want to have them recorded
2540 for the purposes of substitution. That's why we don't
2541 simply call downgrade_to_use () here. */
2542 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2543 gcc_assert (!lhs == !rhs);
2544
2545 IDATA_TYPE (deps_init_id_data.id) = USE;
2546 }
2547 }
2548
2549 deps_init_id_data.where = DEPS_IN_NOWHERE;
2550 }
2551
2552 /* This is dependence info used for initializing insn's data. */
2553 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2554
2555 /* This initializes most of the static part of the above structure. */
2556 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2557 {
2558 NULL,
2559
2560 deps_init_id_start_insn,
2561 deps_init_id_finish_insn,
2562 deps_init_id_start_lhs,
2563 deps_init_id_finish_lhs,
2564 deps_init_id_start_rhs,
2565 deps_init_id_finish_rhs,
2566 deps_init_id_note_reg_set,
2567 deps_init_id_note_reg_clobber,
2568 deps_init_id_note_reg_use,
2569 NULL, /* note_mem_dep */
2570 NULL, /* note_dep */
2571
2572 0, /* use_cselib */
2573 0, /* use_deps_list */
2574 0 /* generate_spec_deps */
2575 };
2576
2577 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2578 we don't actually need information about lhs and rhs. */
2579 static void
2580 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2581 {
2582 rtx pat = PATTERN (insn);
2583
2584 if (NONJUMP_INSN_P (insn)
2585 && GET_CODE (pat) == SET
2586 && !force_unique_p)
2587 {
2588 IDATA_RHS (id) = SET_SRC (pat);
2589 IDATA_LHS (id) = SET_DEST (pat);
2590 }
2591 else
2592 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2593 }
2594
2595 /* Possibly downgrade INSN to USE. */
2596 static void
2597 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2598 {
2599 bool must_be_use = false;
2600 unsigned uid = INSN_UID (insn);
2601 df_ref *rec;
2602 rtx lhs = IDATA_LHS (id);
2603 rtx rhs = IDATA_RHS (id);
2604
2605 /* We downgrade only SETs. */
2606 if (IDATA_TYPE (id) != SET)
2607 return;
2608
2609 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2610 {
2611 IDATA_TYPE (id) = USE;
2612 return;
2613 }
2614
2615 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2616 {
2617 df_ref def = *rec;
2618
2619 if (DF_REF_INSN (def)
2620 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2621 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2622 {
2623 must_be_use = true;
2624 break;
2625 }
2626
2627 #ifdef STACK_REGS
2628 /* Make instructions that set stack registers to be ineligible for
2629 renaming to avoid issues with find_used_regs. */
2630 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2631 {
2632 must_be_use = true;
2633 break;
2634 }
2635 #endif
2636 }
2637
2638 if (must_be_use)
2639 IDATA_TYPE (id) = USE;
2640 }
2641
2642 /* Setup register sets describing INSN in ID. */
2643 static void
2644 setup_id_reg_sets (idata_t id, insn_t insn)
2645 {
2646 unsigned uid = INSN_UID (insn);
2647 df_ref *rec;
2648 regset tmp = get_clear_regset_from_pool ();
2649
2650 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2651 {
2652 df_ref def = *rec;
2653 unsigned int regno = DF_REF_REGNO (def);
2654
2655 /* Post modifies are treated like clobbers by sched-deps.c. */
2656 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2657 | DF_REF_PRE_POST_MODIFY)))
2658 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2659 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2660 {
2661 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2662
2663 #ifdef STACK_REGS
2664 /* For stack registers, treat writes to them as writes
2665 to the first one to be consistent with sched-deps.c. */
2666 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2667 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2668 #endif
2669 }
2670 /* Mark special refs that generate read/write def pair. */
2671 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2672 || regno == STACK_POINTER_REGNUM)
2673 bitmap_set_bit (tmp, regno);
2674 }
2675
2676 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2677 {
2678 df_ref use = *rec;
2679 unsigned int regno = DF_REF_REGNO (use);
2680
2681 /* When these refs are met for the first time, skip them, as
2682 these uses are just counterparts of some defs. */
2683 if (bitmap_bit_p (tmp, regno))
2684 bitmap_clear_bit (tmp, regno);
2685 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2686 {
2687 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2688
2689 #ifdef STACK_REGS
2690 /* For stack registers, treat reads from them as reads from
2691 the first one to be consistent with sched-deps.c. */
2692 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2693 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2694 #endif
2695 }
2696 }
2697
2698 return_regset_to_pool (tmp);
2699 }
2700
2701 /* Initialize instruction data for INSN in ID using DF's data. */
2702 static void
2703 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2704 {
2705 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2706
2707 setup_id_for_insn (id, insn, force_unique_p);
2708 setup_id_lhs_rhs (id, insn, force_unique_p);
2709
2710 if (INSN_NOP_P (insn))
2711 return;
2712
2713 maybe_downgrade_id_to_use (id, insn);
2714 setup_id_reg_sets (id, insn);
2715 }
2716
2717 /* Initialize instruction data for INSN in ID. */
2718 static void
2719 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2720 {
2721 struct deps_desc _dc, *dc = &_dc;
2722
2723 deps_init_id_data.where = DEPS_IN_NOWHERE;
2724 deps_init_id_data.id = id;
2725 deps_init_id_data.force_unique_p = force_unique_p;
2726 deps_init_id_data.force_use_p = false;
2727
2728 init_deps (dc, false);
2729
2730 memcpy (&deps_init_id_sched_deps_info,
2731 &const_deps_init_id_sched_deps_info,
2732 sizeof (deps_init_id_sched_deps_info));
2733
2734 if (spec_info != NULL)
2735 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2736
2737 sched_deps_info = &deps_init_id_sched_deps_info;
2738
2739 deps_analyze_insn (dc, insn);
2740
2741 free_deps (dc);
2742
2743 deps_init_id_data.id = NULL;
2744 }
2745
2746 \f
2747 struct sched_scan_info_def
2748 {
2749 /* This hook notifies scheduler frontend to extend its internal per basic
2750 block data structures. This hook should be called once before a series of
2751 calls to bb_init (). */
2752 void (*extend_bb) (void);
2753
2754 /* This hook makes scheduler frontend to initialize its internal data
2755 structures for the passed basic block. */
2756 void (*init_bb) (basic_block);
2757
2758 /* This hook notifies scheduler frontend to extend its internal per insn data
2759 structures. This hook should be called once before a series of calls to
2760 insn_init (). */
2761 void (*extend_insn) (void);
2762
2763 /* This hook makes scheduler frontend to initialize its internal data
2764 structures for the passed insn. */
2765 void (*init_insn) (rtx);
2766 };
2767
2768 /* A driver function to add a set of basic blocks (BBS) to the
2769 scheduling region. */
2770 static void
2771 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2772 {
2773 unsigned i;
2774 basic_block bb;
2775
2776 if (ssi->extend_bb)
2777 ssi->extend_bb ();
2778
2779 if (ssi->init_bb)
2780 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
2781 ssi->init_bb (bb);
2782
2783 if (ssi->extend_insn)
2784 ssi->extend_insn ();
2785
2786 if (ssi->init_insn)
2787 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
2788 {
2789 rtx insn;
2790
2791 FOR_BB_INSNS (bb, insn)
2792 ssi->init_insn (insn);
2793 }
2794 }
2795
2796 /* Implement hooks for collecting fundamental insn properties like if insn is
2797 an ASM or is within a SCHED_GROUP. */
2798
2799 /* True when a "one-time init" data for INSN was already inited. */
2800 static bool
2801 first_time_insn_init (insn_t insn)
2802 {
2803 return INSN_LIVE (insn) == NULL;
2804 }
2805
2806 /* Hash an entry in a transformed_insns hashtable. */
2807 static hashval_t
2808 hash_transformed_insns (const void *p)
2809 {
2810 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2811 }
2812
2813 /* Compare the entries in a transformed_insns hashtable. */
2814 static int
2815 eq_transformed_insns (const void *p, const void *q)
2816 {
2817 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2818 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2819
2820 if (INSN_UID (i1) == INSN_UID (i2))
2821 return 1;
2822 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2823 }
2824
2825 /* Free an entry in a transformed_insns hashtable. */
2826 static void
2827 free_transformed_insns (void *p)
2828 {
2829 struct transformed_insns *pti = (struct transformed_insns *) p;
2830
2831 vinsn_detach (pti->vinsn_old);
2832 vinsn_detach (pti->vinsn_new);
2833 free (pti);
2834 }
2835
2836 /* Init the s_i_d data for INSN which should be inited just once, when
2837 we first see the insn. */
2838 static void
2839 init_first_time_insn_data (insn_t insn)
2840 {
2841 /* This should not be set if this is the first time we init data for
2842 insn. */
2843 gcc_assert (first_time_insn_init (insn));
2844
2845 /* These are needed for nops too. */
2846 INSN_LIVE (insn) = get_regset_from_pool ();
2847 INSN_LIVE_VALID_P (insn) = false;
2848
2849 if (!INSN_NOP_P (insn))
2850 {
2851 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2852 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2853 INSN_TRANSFORMED_INSNS (insn)
2854 = htab_create (16, hash_transformed_insns,
2855 eq_transformed_insns, free_transformed_insns);
2856 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2857 }
2858 }
2859
2860 /* Free almost all above data for INSN that is scheduled already.
2861 Used for extra-large basic blocks. */
2862 void
2863 free_data_for_scheduled_insn (insn_t insn)
2864 {
2865 gcc_assert (! first_time_insn_init (insn));
2866
2867 if (! INSN_ANALYZED_DEPS (insn))
2868 return;
2869
2870 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2871 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2872 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2873
2874 /* This is allocated only for bookkeeping insns. */
2875 if (INSN_ORIGINATORS (insn))
2876 BITMAP_FREE (INSN_ORIGINATORS (insn));
2877 free_deps (&INSN_DEPS_CONTEXT (insn));
2878
2879 INSN_ANALYZED_DEPS (insn) = NULL;
2880
2881 /* Clear the readonly flag so we would ICE when trying to recalculate
2882 the deps context (as we believe that it should not happen). */
2883 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2884 }
2885
2886 /* Free the same data as above for INSN. */
2887 static void
2888 free_first_time_insn_data (insn_t insn)
2889 {
2890 gcc_assert (! first_time_insn_init (insn));
2891
2892 free_data_for_scheduled_insn (insn);
2893 return_regset_to_pool (INSN_LIVE (insn));
2894 INSN_LIVE (insn) = NULL;
2895 INSN_LIVE_VALID_P (insn) = false;
2896 }
2897
2898 /* Initialize region-scope data structures for basic blocks. */
2899 static void
2900 init_global_and_expr_for_bb (basic_block bb)
2901 {
2902 if (sel_bb_empty_p (bb))
2903 return;
2904
2905 invalidate_av_set (bb);
2906 }
2907
2908 /* Data for global dependency analysis (to initialize CANT_MOVE and
2909 SCHED_GROUP_P). */
2910 static struct
2911 {
2912 /* Previous insn. */
2913 insn_t prev_insn;
2914 } init_global_data;
2915
2916 /* Determine if INSN is in the sched_group, is an asm or should not be
2917 cloned. After that initialize its expr. */
2918 static void
2919 init_global_and_expr_for_insn (insn_t insn)
2920 {
2921 if (LABEL_P (insn))
2922 return;
2923
2924 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2925 {
2926 init_global_data.prev_insn = NULL_RTX;
2927 return;
2928 }
2929
2930 gcc_assert (INSN_P (insn));
2931
2932 if (SCHED_GROUP_P (insn))
2933 /* Setup a sched_group. */
2934 {
2935 insn_t prev_insn = init_global_data.prev_insn;
2936
2937 if (prev_insn)
2938 INSN_SCHED_NEXT (prev_insn) = insn;
2939
2940 init_global_data.prev_insn = insn;
2941 }
2942 else
2943 init_global_data.prev_insn = NULL_RTX;
2944
2945 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2946 || asm_noperands (PATTERN (insn)) >= 0)
2947 /* Mark INSN as an asm. */
2948 INSN_ASM_P (insn) = true;
2949
2950 {
2951 bool force_unique_p;
2952 ds_t spec_done_ds;
2953
2954 /* Certain instructions cannot be cloned, and frame related insns and
2955 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2956 their block. */
2957 if (prologue_epilogue_contains (insn))
2958 {
2959 if (RTX_FRAME_RELATED_P (insn))
2960 CANT_MOVE (insn) = 1;
2961 else
2962 {
2963 rtx note;
2964 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2965 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2966 && ((enum insn_note) INTVAL (XEXP (note, 0))
2967 == NOTE_INSN_EPILOGUE_BEG))
2968 {
2969 CANT_MOVE (insn) = 1;
2970 break;
2971 }
2972 }
2973 force_unique_p = true;
2974 }
2975 else
2976 if (CANT_MOVE (insn)
2977 || INSN_ASM_P (insn)
2978 || SCHED_GROUP_P (insn)
2979 || CALL_P (insn)
2980 /* Exception handling insns are always unique. */
2981 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2982 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2983 || control_flow_insn_p (insn)
2984 || volatile_insn_p (PATTERN (insn))
2985 || (targetm.cannot_copy_insn_p
2986 && targetm.cannot_copy_insn_p (insn)))
2987 force_unique_p = true;
2988 else
2989 force_unique_p = false;
2990
2991 if (targetm.sched.get_insn_spec_ds)
2992 {
2993 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2994 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2995 }
2996 else
2997 spec_done_ds = 0;
2998
2999 /* Initialize INSN's expr. */
3000 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3001 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3002 spec_done_ds, 0, 0, NULL, true, false, false, false,
3003 CANT_MOVE (insn));
3004 }
3005
3006 init_first_time_insn_data (insn);
3007 }
3008
3009 /* Scan the region and initialize instruction data for basic blocks BBS. */
3010 void
3011 sel_init_global_and_expr (bb_vec_t bbs)
3012 {
3013 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3014 const struct sched_scan_info_def ssi =
3015 {
3016 NULL, /* extend_bb */
3017 init_global_and_expr_for_bb, /* init_bb */
3018 extend_insn_data, /* extend_insn */
3019 init_global_and_expr_for_insn /* init_insn */
3020 };
3021
3022 sched_scan (&ssi, bbs);
3023 }
3024
3025 /* Finalize region-scope data structures for basic blocks. */
3026 static void
3027 finish_global_and_expr_for_bb (basic_block bb)
3028 {
3029 av_set_clear (&BB_AV_SET (bb));
3030 BB_AV_LEVEL (bb) = 0;
3031 }
3032
3033 /* Finalize INSN's data. */
3034 static void
3035 finish_global_and_expr_insn (insn_t insn)
3036 {
3037 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3038 return;
3039
3040 gcc_assert (INSN_P (insn));
3041
3042 if (INSN_LUID (insn) > 0)
3043 {
3044 free_first_time_insn_data (insn);
3045 INSN_WS_LEVEL (insn) = 0;
3046 CANT_MOVE (insn) = 0;
3047
3048 /* We can no longer assert this, as vinsns of this insn could be
3049 easily live in other insn's caches. This should be changed to
3050 a counter-like approach among all vinsns. */
3051 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3052 clear_expr (INSN_EXPR (insn));
3053 }
3054 }
3055
3056 /* Finalize per instruction data for the whole region. */
3057 void
3058 sel_finish_global_and_expr (void)
3059 {
3060 {
3061 bb_vec_t bbs;
3062 int i;
3063
3064 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
3065
3066 for (i = 0; i < current_nr_blocks; i++)
3067 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
3068
3069 /* Clear AV_SETs and INSN_EXPRs. */
3070 {
3071 const struct sched_scan_info_def ssi =
3072 {
3073 NULL, /* extend_bb */
3074 finish_global_and_expr_for_bb, /* init_bb */
3075 NULL, /* extend_insn */
3076 finish_global_and_expr_insn /* init_insn */
3077 };
3078
3079 sched_scan (&ssi, bbs);
3080 }
3081
3082 VEC_free (basic_block, heap, bbs);
3083 }
3084
3085 finish_insns ();
3086 }
3087 \f
3088
3089 /* In the below hooks, we merely calculate whether or not a dependence
3090 exists, and in what part of insn. However, we will need more data
3091 when we'll start caching dependence requests. */
3092
3093 /* Container to hold information for dependency analysis. */
3094 static struct
3095 {
3096 deps_t dc;
3097
3098 /* A variable to track which part of rtx we are scanning in
3099 sched-deps.c: sched_analyze_insn (). */
3100 deps_where_t where;
3101
3102 /* Current producer. */
3103 insn_t pro;
3104
3105 /* Current consumer. */
3106 vinsn_t con;
3107
3108 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3109 X is from { INSN, LHS, RHS }. */
3110 ds_t has_dep_p[DEPS_IN_NOWHERE];
3111 } has_dependence_data;
3112
3113 /* Start analyzing dependencies of INSN. */
3114 static void
3115 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3116 {
3117 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3118
3119 has_dependence_data.where = DEPS_IN_INSN;
3120 }
3121
3122 /* Finish analyzing dependencies of an insn. */
3123 static void
3124 has_dependence_finish_insn (void)
3125 {
3126 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3127
3128 has_dependence_data.where = DEPS_IN_NOWHERE;
3129 }
3130
3131 /* Start analyzing dependencies of LHS. */
3132 static void
3133 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3134 {
3135 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3136
3137 if (VINSN_LHS (has_dependence_data.con) != NULL)
3138 has_dependence_data.where = DEPS_IN_LHS;
3139 }
3140
3141 /* Finish analyzing dependencies of an lhs. */
3142 static void
3143 has_dependence_finish_lhs (void)
3144 {
3145 has_dependence_data.where = DEPS_IN_INSN;
3146 }
3147
3148 /* Start analyzing dependencies of RHS. */
3149 static void
3150 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3151 {
3152 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3153
3154 if (VINSN_RHS (has_dependence_data.con) != NULL)
3155 has_dependence_data.where = DEPS_IN_RHS;
3156 }
3157
3158 /* Start analyzing dependencies of an rhs. */
3159 static void
3160 has_dependence_finish_rhs (void)
3161 {
3162 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3163 || has_dependence_data.where == DEPS_IN_INSN);
3164
3165 has_dependence_data.where = DEPS_IN_INSN;
3166 }
3167
3168 /* Note a set of REGNO. */
3169 static void
3170 has_dependence_note_reg_set (int regno)
3171 {
3172 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3173
3174 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3175 VINSN_INSN_RTX
3176 (has_dependence_data.con)))
3177 {
3178 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3179
3180 if (reg_last->sets != NULL
3181 || reg_last->clobbers != NULL)
3182 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3183
3184 if (reg_last->uses)
3185 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3186 }
3187 }
3188
3189 /* Note a clobber of REGNO. */
3190 static void
3191 has_dependence_note_reg_clobber (int regno)
3192 {
3193 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3194
3195 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3196 VINSN_INSN_RTX
3197 (has_dependence_data.con)))
3198 {
3199 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3200
3201 if (reg_last->sets)
3202 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3203
3204 if (reg_last->uses)
3205 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3206 }
3207 }
3208
3209 /* Note a use of REGNO. */
3210 static void
3211 has_dependence_note_reg_use (int regno)
3212 {
3213 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3214
3215 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3216 VINSN_INSN_RTX
3217 (has_dependence_data.con)))
3218 {
3219 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3220
3221 if (reg_last->sets)
3222 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3223
3224 if (reg_last->clobbers)
3225 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3226
3227 /* Handle BE_IN_SPEC. */
3228 if (reg_last->uses)
3229 {
3230 ds_t pro_spec_checked_ds;
3231
3232 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3233 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3234
3235 if (pro_spec_checked_ds != 0
3236 && bitmap_bit_p (INSN_REG_SETS (has_dependence_data.pro), regno))
3237 /* Merge BE_IN_SPEC bits into *DSP. */
3238 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3239 NULL_RTX, NULL_RTX);
3240 }
3241 }
3242 }
3243
3244 /* Note a memory dependence. */
3245 static void
3246 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3247 rtx pending_mem ATTRIBUTE_UNUSED,
3248 insn_t pending_insn ATTRIBUTE_UNUSED,
3249 ds_t ds ATTRIBUTE_UNUSED)
3250 {
3251 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3252 VINSN_INSN_RTX (has_dependence_data.con)))
3253 {
3254 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3255
3256 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3257 }
3258 }
3259
3260 /* Note a dependence. */
3261 static void
3262 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3263 ds_t ds ATTRIBUTE_UNUSED)
3264 {
3265 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3266 VINSN_INSN_RTX (has_dependence_data.con)))
3267 {
3268 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3269
3270 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3271 }
3272 }
3273
3274 /* Mark the insn as having a hard dependence that prevents speculation. */
3275 void
3276 sel_mark_hard_insn (rtx insn)
3277 {
3278 int i;
3279
3280 /* Only work when we're in has_dependence_p mode.
3281 ??? This is a hack, this should actually be a hook. */
3282 if (!has_dependence_data.dc || !has_dependence_data.pro)
3283 return;
3284
3285 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3286 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3287
3288 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3289 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3290 }
3291
3292 /* This structure holds the hooks for the dependency analysis used when
3293 actually processing dependencies in the scheduler. */
3294 static struct sched_deps_info_def has_dependence_sched_deps_info;
3295
3296 /* This initializes most of the fields of the above structure. */
3297 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3298 {
3299 NULL,
3300
3301 has_dependence_start_insn,
3302 has_dependence_finish_insn,
3303 has_dependence_start_lhs,
3304 has_dependence_finish_lhs,
3305 has_dependence_start_rhs,
3306 has_dependence_finish_rhs,
3307 has_dependence_note_reg_set,
3308 has_dependence_note_reg_clobber,
3309 has_dependence_note_reg_use,
3310 has_dependence_note_mem_dep,
3311 has_dependence_note_dep,
3312
3313 0, /* use_cselib */
3314 0, /* use_deps_list */
3315 0 /* generate_spec_deps */
3316 };
3317
3318 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3319 static void
3320 setup_has_dependence_sched_deps_info (void)
3321 {
3322 memcpy (&has_dependence_sched_deps_info,
3323 &const_has_dependence_sched_deps_info,
3324 sizeof (has_dependence_sched_deps_info));
3325
3326 if (spec_info != NULL)
3327 has_dependence_sched_deps_info.generate_spec_deps = 1;
3328
3329 sched_deps_info = &has_dependence_sched_deps_info;
3330 }
3331
3332 /* Remove all dependences found and recorded in has_dependence_data array. */
3333 void
3334 sel_clear_has_dependence (void)
3335 {
3336 int i;
3337
3338 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3339 has_dependence_data.has_dep_p[i] = 0;
3340 }
3341
3342 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3343 to the dependence information array in HAS_DEP_PP. */
3344 ds_t
3345 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3346 {
3347 int i;
3348 ds_t ds;
3349 struct deps_desc *dc;
3350
3351 if (INSN_SIMPLEJUMP_P (pred))
3352 /* Unconditional jump is just a transfer of control flow.
3353 Ignore it. */
3354 return false;
3355
3356 dc = &INSN_DEPS_CONTEXT (pred);
3357
3358 /* We init this field lazily. */
3359 if (dc->reg_last == NULL)
3360 init_deps_reg_last (dc);
3361
3362 if (!dc->readonly)
3363 {
3364 has_dependence_data.pro = NULL;
3365 /* Initialize empty dep context with information about PRED. */
3366 advance_deps_context (dc, pred);
3367 dc->readonly = 1;
3368 }
3369
3370 has_dependence_data.where = DEPS_IN_NOWHERE;
3371 has_dependence_data.pro = pred;
3372 has_dependence_data.con = EXPR_VINSN (expr);
3373 has_dependence_data.dc = dc;
3374
3375 sel_clear_has_dependence ();
3376
3377 /* Now catch all dependencies that would be generated between PRED and
3378 INSN. */
3379 setup_has_dependence_sched_deps_info ();
3380 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3381 has_dependence_data.dc = NULL;
3382
3383 /* When a barrier was found, set DEPS_IN_INSN bits. */
3384 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3385 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3386 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3387 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3388
3389 /* Do not allow stores to memory to move through checks. Currently
3390 we don't move this to sched-deps.c as the check doesn't have
3391 obvious places to which this dependence can be attached.
3392 FIMXE: this should go to a hook. */
3393 if (EXPR_LHS (expr)
3394 && MEM_P (EXPR_LHS (expr))
3395 && sel_insn_is_speculation_check (pred))
3396 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3397
3398 *has_dep_pp = has_dependence_data.has_dep_p;
3399 ds = 0;
3400 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3401 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3402 NULL_RTX, NULL_RTX);
3403
3404 return ds;
3405 }
3406 \f
3407
3408 /* Dependence hooks implementation that checks dependence latency constraints
3409 on the insns being scheduled. The entry point for these routines is
3410 tick_check_p predicate. */
3411
3412 static struct
3413 {
3414 /* An expr we are currently checking. */
3415 expr_t expr;
3416
3417 /* A minimal cycle for its scheduling. */
3418 int cycle;
3419
3420 /* Whether we have seen a true dependence while checking. */
3421 bool seen_true_dep_p;
3422 } tick_check_data;
3423
3424 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3425 on PRO with status DS and weight DW. */
3426 static void
3427 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3428 {
3429 expr_t con_expr = tick_check_data.expr;
3430 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3431
3432 if (con_insn != pro_insn)
3433 {
3434 enum reg_note dt;
3435 int tick;
3436
3437 if (/* PROducer was removed from above due to pipelining. */
3438 !INSN_IN_STREAM_P (pro_insn)
3439 /* Or PROducer was originally on the next iteration regarding the
3440 CONsumer. */
3441 || (INSN_SCHED_TIMES (pro_insn)
3442 - EXPR_SCHED_TIMES (con_expr)) > 1)
3443 /* Don't count this dependence. */
3444 return;
3445
3446 dt = ds_to_dt (ds);
3447 if (dt == REG_DEP_TRUE)
3448 tick_check_data.seen_true_dep_p = true;
3449
3450 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3451
3452 {
3453 dep_def _dep, *dep = &_dep;
3454
3455 init_dep (dep, pro_insn, con_insn, dt);
3456
3457 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3458 }
3459
3460 /* When there are several kinds of dependencies between pro and con,
3461 only REG_DEP_TRUE should be taken into account. */
3462 if (tick > tick_check_data.cycle
3463 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3464 tick_check_data.cycle = tick;
3465 }
3466 }
3467
3468 /* An implementation of note_dep hook. */
3469 static void
3470 tick_check_note_dep (insn_t pro, ds_t ds)
3471 {
3472 tick_check_dep_with_dw (pro, ds, 0);
3473 }
3474
3475 /* An implementation of note_mem_dep hook. */
3476 static void
3477 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3478 {
3479 dw_t dw;
3480
3481 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3482 ? estimate_dep_weak (mem1, mem2)
3483 : 0);
3484
3485 tick_check_dep_with_dw (pro, ds, dw);
3486 }
3487
3488 /* This structure contains hooks for dependence analysis used when determining
3489 whether an insn is ready for scheduling. */
3490 static struct sched_deps_info_def tick_check_sched_deps_info =
3491 {
3492 NULL,
3493
3494 NULL,
3495 NULL,
3496 NULL,
3497 NULL,
3498 NULL,
3499 NULL,
3500 haifa_note_reg_set,
3501 haifa_note_reg_clobber,
3502 haifa_note_reg_use,
3503 tick_check_note_mem_dep,
3504 tick_check_note_dep,
3505
3506 0, 0, 0
3507 };
3508
3509 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3510 scheduled. Return 0 if all data from producers in DC is ready. */
3511 int
3512 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3513 {
3514 int cycles_left;
3515 /* Initialize variables. */
3516 tick_check_data.expr = expr;
3517 tick_check_data.cycle = 0;
3518 tick_check_data.seen_true_dep_p = false;
3519 sched_deps_info = &tick_check_sched_deps_info;
3520
3521 gcc_assert (!dc->readonly);
3522 dc->readonly = 1;
3523 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3524 dc->readonly = 0;
3525
3526 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3527
3528 return cycles_left >= 0 ? cycles_left : 0;
3529 }
3530 \f
3531
3532 /* Functions to work with insns. */
3533
3534 /* Returns true if LHS of INSN is the same as DEST of an insn
3535 being moved. */
3536 bool
3537 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3538 {
3539 rtx lhs = INSN_LHS (insn);
3540
3541 if (lhs == NULL || dest == NULL)
3542 return false;
3543
3544 return rtx_equal_p (lhs, dest);
3545 }
3546
3547 /* Return s_i_d entry of INSN. Callable from debugger. */
3548 sel_insn_data_def
3549 insn_sid (insn_t insn)
3550 {
3551 return *SID (insn);
3552 }
3553
3554 /* True when INSN is a speculative check. We can tell this by looking
3555 at the data structures of the selective scheduler, not by examining
3556 the pattern. */
3557 bool
3558 sel_insn_is_speculation_check (rtx insn)
3559 {
3560 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3561 }
3562
3563 /* Extracts machine mode MODE and destination location DST_LOC
3564 for given INSN. */
3565 void
3566 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3567 {
3568 rtx pat = PATTERN (insn);
3569
3570 gcc_assert (dst_loc);
3571 gcc_assert (GET_CODE (pat) == SET);
3572
3573 *dst_loc = SET_DEST (pat);
3574
3575 gcc_assert (*dst_loc);
3576 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3577
3578 if (mode)
3579 *mode = GET_MODE (*dst_loc);
3580 }
3581
3582 /* Returns true when moving through JUMP will result in bookkeeping
3583 creation. */
3584 bool
3585 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3586 {
3587 insn_t succ;
3588 succ_iterator si;
3589
3590 FOR_EACH_SUCC (succ, si, jump)
3591 if (sel_num_cfg_preds_gt_1 (succ))
3592 return true;
3593
3594 return false;
3595 }
3596
3597 /* Return 'true' if INSN is the only one in its basic block. */
3598 static bool
3599 insn_is_the_only_one_in_bb_p (insn_t insn)
3600 {
3601 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3602 }
3603
3604 #ifdef ENABLE_CHECKING
3605 /* Check that the region we're scheduling still has at most one
3606 backedge. */
3607 static void
3608 verify_backedges (void)
3609 {
3610 if (pipelining_p)
3611 {
3612 int i, n = 0;
3613 edge e;
3614 edge_iterator ei;
3615
3616 for (i = 0; i < current_nr_blocks; i++)
3617 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3618 if (in_current_region_p (e->dest)
3619 && BLOCK_TO_BB (e->dest->index) < i)
3620 n++;
3621
3622 gcc_assert (n <= 1);
3623 }
3624 }
3625 #endif
3626 \f
3627
3628 /* Functions to work with control flow. */
3629
3630 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3631 are sorted in topological order (it might have been invalidated by
3632 redirecting an edge). */
3633 static void
3634 sel_recompute_toporder (void)
3635 {
3636 int i, n, rgn;
3637 int *postorder, n_blocks;
3638
3639 postorder = XALLOCAVEC (int, n_basic_blocks);
3640 n_blocks = post_order_compute (postorder, false, false);
3641
3642 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3643 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3644 if (CONTAINING_RGN (postorder[i]) == rgn)
3645 {
3646 BLOCK_TO_BB (postorder[i]) = n;
3647 BB_TO_BLOCK (n) = postorder[i];
3648 n++;
3649 }
3650
3651 /* Assert that we updated info for all blocks. We may miss some blocks if
3652 this function is called when redirecting an edge made a block
3653 unreachable, but that block is not deleted yet. */
3654 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3655 }
3656
3657 /* Tidy the possibly empty block BB. */
3658 static bool
3659 maybe_tidy_empty_bb (basic_block bb)
3660 {
3661 basic_block succ_bb, pred_bb;
3662 VEC (basic_block, heap) *dom_bbs;
3663 edge e;
3664 edge_iterator ei;
3665 bool rescan_p;
3666
3667 /* Keep empty bb only if this block immediately precedes EXIT and
3668 has incoming non-fallthrough edge, or it has no predecessors or
3669 successors. Otherwise remove it. */
3670 if (!sel_bb_empty_p (bb)
3671 || (single_succ_p (bb)
3672 && single_succ (bb) == EXIT_BLOCK_PTR
3673 && (!single_pred_p (bb)
3674 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3675 || EDGE_COUNT (bb->preds) == 0
3676 || EDGE_COUNT (bb->succs) == 0)
3677 return false;
3678
3679 /* Do not attempt to redirect complex edges. */
3680 FOR_EACH_EDGE (e, ei, bb->preds)
3681 if (e->flags & EDGE_COMPLEX)
3682 return false;
3683
3684 free_data_sets (bb);
3685
3686 /* Do not delete BB if it has more than one successor.
3687 That can occur when we moving a jump. */
3688 if (!single_succ_p (bb))
3689 {
3690 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3691 sel_merge_blocks (bb->prev_bb, bb);
3692 return true;
3693 }
3694
3695 succ_bb = single_succ (bb);
3696 rescan_p = true;
3697 pred_bb = NULL;
3698 dom_bbs = NULL;
3699
3700 /* Redirect all non-fallthru edges to the next bb. */
3701 while (rescan_p)
3702 {
3703 rescan_p = false;
3704
3705 FOR_EACH_EDGE (e, ei, bb->preds)
3706 {
3707 pred_bb = e->src;
3708
3709 if (!(e->flags & EDGE_FALLTHRU))
3710 {
3711 /* We can not invalidate computed topological order by moving
3712 the edge destination block (E->SUCC) along a fallthru edge.
3713
3714 We will update dominators here only when we'll get
3715 an unreachable block when redirecting, otherwise
3716 sel_redirect_edge_and_branch will take care of it. */
3717 if (e->dest != bb
3718 && single_pred_p (e->dest))
3719 VEC_safe_push (basic_block, heap, dom_bbs, e->dest);
3720 sel_redirect_edge_and_branch (e, succ_bb);
3721 rescan_p = true;
3722 break;
3723 }
3724 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3725 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3726 still have to adjust it. */
3727 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3728 {
3729 /* If possible, try to remove the unneeded conditional jump. */
3730 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3731 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3732 {
3733 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3734 tidy_fallthru_edge (e);
3735 }
3736 else
3737 sel_redirect_edge_and_branch (e, succ_bb);
3738 rescan_p = true;
3739 break;
3740 }
3741 }
3742 }
3743
3744 if (can_merge_blocks_p (bb->prev_bb, bb))
3745 sel_merge_blocks (bb->prev_bb, bb);
3746 else
3747 {
3748 /* This is a block without fallthru predecessor. Just delete it. */
3749 gcc_assert (pred_bb != NULL);
3750
3751 if (in_current_region_p (pred_bb))
3752 move_bb_info (pred_bb, bb);
3753 remove_empty_bb (bb, true);
3754 }
3755
3756 if (!VEC_empty (basic_block, dom_bbs))
3757 {
3758 VEC_safe_push (basic_block, heap, dom_bbs, succ_bb);
3759 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3760 VEC_free (basic_block, heap, dom_bbs);
3761 }
3762
3763 return true;
3764 }
3765
3766 /* Tidy the control flow after we have removed original insn from
3767 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3768 is true, also try to optimize control flow on non-empty blocks. */
3769 bool
3770 tidy_control_flow (basic_block xbb, bool full_tidying)
3771 {
3772 bool changed = true;
3773 insn_t first, last;
3774
3775 /* First check whether XBB is empty. */
3776 changed = maybe_tidy_empty_bb (xbb);
3777 if (changed || !full_tidying)
3778 return changed;
3779
3780 /* Check if there is a unnecessary jump after insn left. */
3781 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3782 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3783 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3784 {
3785 if (sel_remove_insn (BB_END (xbb), false, false))
3786 return true;
3787 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3788 }
3789
3790 first = sel_bb_head (xbb);
3791 last = sel_bb_end (xbb);
3792 if (MAY_HAVE_DEBUG_INSNS)
3793 {
3794 if (first != last && DEBUG_INSN_P (first))
3795 do
3796 first = NEXT_INSN (first);
3797 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3798
3799 if (first != last && DEBUG_INSN_P (last))
3800 do
3801 last = PREV_INSN (last);
3802 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3803 }
3804 /* Check if there is an unnecessary jump in previous basic block leading
3805 to next basic block left after removing INSN from stream.
3806 If it is so, remove that jump and redirect edge to current
3807 basic block (where there was INSN before deletion). This way
3808 when NOP will be deleted several instructions later with its
3809 basic block we will not get a jump to next instruction, which
3810 can be harmful. */
3811 if (first == last
3812 && !sel_bb_empty_p (xbb)
3813 && INSN_NOP_P (last)
3814 /* Flow goes fallthru from current block to the next. */
3815 && EDGE_COUNT (xbb->succs) == 1
3816 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3817 /* When successor is an EXIT block, it may not be the next block. */
3818 && single_succ (xbb) != EXIT_BLOCK_PTR
3819 /* And unconditional jump in previous basic block leads to
3820 next basic block of XBB and this jump can be safely removed. */
3821 && in_current_region_p (xbb->prev_bb)
3822 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3823 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3824 /* Also this jump is not at the scheduling boundary. */
3825 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3826 {
3827 bool recompute_toporder_p;
3828 /* Clear data structures of jump - jump itself will be removed
3829 by sel_redirect_edge_and_branch. */
3830 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3831 recompute_toporder_p
3832 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3833
3834 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3835
3836 /* It can turn out that after removing unused jump, basic block
3837 that contained that jump, becomes empty too. In such case
3838 remove it too. */
3839 if (sel_bb_empty_p (xbb->prev_bb))
3840 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3841 if (recompute_toporder_p)
3842 sel_recompute_toporder ();
3843 }
3844
3845 #ifdef ENABLE_CHECKING
3846 verify_backedges ();
3847 verify_dominators (CDI_DOMINATORS);
3848 #endif
3849
3850 return changed;
3851 }
3852
3853 /* Purge meaningless empty blocks in the middle of a region. */
3854 void
3855 purge_empty_blocks (void)
3856 {
3857 int i;
3858
3859 /* Do not attempt to delete the first basic block in the region. */
3860 for (i = 1; i < current_nr_blocks; )
3861 {
3862 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3863
3864 if (maybe_tidy_empty_bb (b))
3865 continue;
3866
3867 i++;
3868 }
3869 }
3870
3871 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3872 do not delete insn's data, because it will be later re-emitted.
3873 Return true if we have removed some blocks afterwards. */
3874 bool
3875 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3876 {
3877 basic_block bb = BLOCK_FOR_INSN (insn);
3878
3879 gcc_assert (INSN_IN_STREAM_P (insn));
3880
3881 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3882 {
3883 expr_t expr;
3884 av_set_iterator i;
3885
3886 /* When we remove a debug insn that is head of a BB, it remains
3887 in the AV_SET of the block, but it shouldn't. */
3888 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3889 if (EXPR_INSN_RTX (expr) == insn)
3890 {
3891 av_set_iter_remove (&i);
3892 break;
3893 }
3894 }
3895
3896 if (only_disconnect)
3897 {
3898 insn_t prev = PREV_INSN (insn);
3899 insn_t next = NEXT_INSN (insn);
3900 basic_block bb = BLOCK_FOR_INSN (insn);
3901
3902 NEXT_INSN (prev) = next;
3903 PREV_INSN (next) = prev;
3904
3905 if (BB_HEAD (bb) == insn)
3906 {
3907 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3908 BB_HEAD (bb) = prev;
3909 }
3910 if (BB_END (bb) == insn)
3911 BB_END (bb) = prev;
3912 }
3913 else
3914 {
3915 remove_insn (insn);
3916 clear_expr (INSN_EXPR (insn));
3917 }
3918
3919 /* It is necessary to null this fields before calling add_insn (). */
3920 PREV_INSN (insn) = NULL_RTX;
3921 NEXT_INSN (insn) = NULL_RTX;
3922
3923 return tidy_control_flow (bb, full_tidying);
3924 }
3925
3926 /* Estimate number of the insns in BB. */
3927 static int
3928 sel_estimate_number_of_insns (basic_block bb)
3929 {
3930 int res = 0;
3931 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3932
3933 for (; insn != next_tail; insn = NEXT_INSN (insn))
3934 if (NONDEBUG_INSN_P (insn))
3935 res++;
3936
3937 return res;
3938 }
3939
3940 /* We don't need separate luids for notes or labels. */
3941 static int
3942 sel_luid_for_non_insn (rtx x)
3943 {
3944 gcc_assert (NOTE_P (x) || LABEL_P (x));
3945
3946 return -1;
3947 }
3948
3949 /* Find the proper seqno for inserting at INSN by successors.
3950 Return -1 if no successors with positive seqno exist. */
3951 static int
3952 get_seqno_by_succs (rtx insn)
3953 {
3954 basic_block bb = BLOCK_FOR_INSN (insn);
3955 rtx tmp = insn, end = BB_END (bb);
3956 int seqno;
3957 insn_t succ = NULL;
3958 succ_iterator si;
3959
3960 while (tmp != end)
3961 {
3962 tmp = NEXT_INSN (tmp);
3963 if (INSN_P (tmp))
3964 return INSN_SEQNO (tmp);
3965 }
3966
3967 seqno = INT_MAX;
3968
3969 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
3970 if (INSN_SEQNO (succ) > 0)
3971 seqno = MIN (seqno, INSN_SEQNO (succ));
3972
3973 if (seqno == INT_MAX)
3974 return -1;
3975
3976 return seqno;
3977 }
3978
3979 /* Compute seqno for INSN by its preds or succs. */
3980 static int
3981 get_seqno_for_a_jump (insn_t insn)
3982 {
3983 int seqno;
3984
3985 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3986
3987 if (!sel_bb_head_p (insn))
3988 seqno = INSN_SEQNO (PREV_INSN (insn));
3989 else
3990 {
3991 basic_block bb = BLOCK_FOR_INSN (insn);
3992
3993 if (single_pred_p (bb)
3994 && !in_current_region_p (single_pred (bb)))
3995 {
3996 /* We can have preds outside a region when splitting edges
3997 for pipelining of an outer loop. Use succ instead.
3998 There should be only one of them. */
3999 insn_t succ = NULL;
4000 succ_iterator si;
4001 bool first = true;
4002
4003 gcc_assert (flag_sel_sched_pipelining_outer_loops
4004 && current_loop_nest);
4005 FOR_EACH_SUCC_1 (succ, si, insn,
4006 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4007 {
4008 gcc_assert (first);
4009 first = false;
4010 }
4011
4012 gcc_assert (succ != NULL);
4013 seqno = INSN_SEQNO (succ);
4014 }
4015 else
4016 {
4017 insn_t *preds;
4018 int n;
4019
4020 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4021
4022 gcc_assert (n > 0);
4023 /* For one predecessor, use simple method. */
4024 if (n == 1)
4025 seqno = INSN_SEQNO (preds[0]);
4026 else
4027 seqno = get_seqno_by_preds (insn);
4028
4029 free (preds);
4030 }
4031 }
4032
4033 /* We were unable to find a good seqno among preds. */
4034 if (seqno < 0)
4035 seqno = get_seqno_by_succs (insn);
4036
4037 gcc_assert (seqno >= 0);
4038
4039 return seqno;
4040 }
4041
4042 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4043 with positive seqno exist. */
4044 int
4045 get_seqno_by_preds (rtx insn)
4046 {
4047 basic_block bb = BLOCK_FOR_INSN (insn);
4048 rtx tmp = insn, head = BB_HEAD (bb);
4049 insn_t *preds;
4050 int n, i, seqno;
4051
4052 while (tmp != head)
4053 {
4054 tmp = PREV_INSN (tmp);
4055 if (INSN_P (tmp))
4056 return INSN_SEQNO (tmp);
4057 }
4058
4059 cfg_preds (bb, &preds, &n);
4060 for (i = 0, seqno = -1; i < n; i++)
4061 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4062
4063 return seqno;
4064 }
4065
4066 \f
4067
4068 /* Extend pass-scope data structures for basic blocks. */
4069 void
4070 sel_extend_global_bb_info (void)
4071 {
4072 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
4073 last_basic_block);
4074 }
4075
4076 /* Extend region-scope data structures for basic blocks. */
4077 static void
4078 extend_region_bb_info (void)
4079 {
4080 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
4081 last_basic_block);
4082 }
4083
4084 /* Extend all data structures to fit for all basic blocks. */
4085 static void
4086 extend_bb_info (void)
4087 {
4088 sel_extend_global_bb_info ();
4089 extend_region_bb_info ();
4090 }
4091
4092 /* Finalize pass-scope data structures for basic blocks. */
4093 void
4094 sel_finish_global_bb_info (void)
4095 {
4096 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
4097 }
4098
4099 /* Finalize region-scope data structures for basic blocks. */
4100 static void
4101 finish_region_bb_info (void)
4102 {
4103 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
4104 }
4105 \f
4106
4107 /* Data for each insn in current region. */
4108 VEC (sel_insn_data_def, heap) *s_i_d = NULL;
4109
4110 /* Extend data structures for insns from current region. */
4111 static void
4112 extend_insn_data (void)
4113 {
4114 int reserve;
4115
4116 sched_extend_target ();
4117 sched_deps_init (false);
4118
4119 /* Extend data structures for insns from current region. */
4120 reserve = (sched_max_luid + 1
4121 - VEC_length (sel_insn_data_def, s_i_d));
4122 if (reserve > 0
4123 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
4124 {
4125 int size;
4126
4127 if (sched_max_luid / 2 > 1024)
4128 size = sched_max_luid + 1024;
4129 else
4130 size = 3 * sched_max_luid / 2;
4131
4132
4133 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
4134 }
4135 }
4136
4137 /* Finalize data structures for insns from current region. */
4138 static void
4139 finish_insns (void)
4140 {
4141 unsigned i;
4142
4143 /* Clear here all dependence contexts that may have left from insns that were
4144 removed during the scheduling. */
4145 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
4146 {
4147 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
4148
4149 if (sid_entry->live)
4150 return_regset_to_pool (sid_entry->live);
4151 if (sid_entry->analyzed_deps)
4152 {
4153 BITMAP_FREE (sid_entry->analyzed_deps);
4154 BITMAP_FREE (sid_entry->found_deps);
4155 htab_delete (sid_entry->transformed_insns);
4156 free_deps (&sid_entry->deps_context);
4157 }
4158 if (EXPR_VINSN (&sid_entry->expr))
4159 {
4160 clear_expr (&sid_entry->expr);
4161
4162 /* Also, clear CANT_MOVE bit here, because we really don't want it
4163 to be passed to the next region. */
4164 CANT_MOVE_BY_LUID (i) = 0;
4165 }
4166 }
4167
4168 VEC_free (sel_insn_data_def, heap, s_i_d);
4169 }
4170
4171 /* A proxy to pass initialization data to init_insn (). */
4172 static sel_insn_data_def _insn_init_ssid;
4173 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4174
4175 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4176 static bool insn_init_create_new_vinsn_p;
4177
4178 /* Set all necessary data for initialization of the new insn[s]. */
4179 static expr_t
4180 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4181 {
4182 expr_t x = &insn_init_ssid->expr;
4183
4184 copy_expr_onside (x, expr);
4185 if (vi != NULL)
4186 {
4187 insn_init_create_new_vinsn_p = false;
4188 change_vinsn_in_expr (x, vi);
4189 }
4190 else
4191 insn_init_create_new_vinsn_p = true;
4192
4193 insn_init_ssid->seqno = seqno;
4194 return x;
4195 }
4196
4197 /* Init data for INSN. */
4198 static void
4199 init_insn_data (insn_t insn)
4200 {
4201 expr_t expr;
4202 sel_insn_data_t ssid = insn_init_ssid;
4203
4204 /* The fields mentioned below are special and hence are not being
4205 propagated to the new insns. */
4206 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4207 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4208 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4209
4210 expr = INSN_EXPR (insn);
4211 copy_expr (expr, &ssid->expr);
4212 prepare_insn_expr (insn, ssid->seqno);
4213
4214 if (insn_init_create_new_vinsn_p)
4215 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4216
4217 if (first_time_insn_init (insn))
4218 init_first_time_insn_data (insn);
4219 }
4220
4221 /* This is used to initialize spurious jumps generated by
4222 sel_redirect_edge (). */
4223 static void
4224 init_simplejump_data (insn_t insn)
4225 {
4226 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4227 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4228 false, true);
4229 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn);
4230 init_first_time_insn_data (insn);
4231 }
4232
4233 /* Perform deferred initialization of insns. This is used to process
4234 a new jump that may be created by redirect_edge. */
4235 void
4236 sel_init_new_insn (insn_t insn, int flags)
4237 {
4238 /* We create data structures for bb when the first insn is emitted in it. */
4239 if (INSN_P (insn)
4240 && INSN_IN_STREAM_P (insn)
4241 && insn_is_the_only_one_in_bb_p (insn))
4242 {
4243 extend_bb_info ();
4244 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4245 }
4246
4247 if (flags & INSN_INIT_TODO_LUID)
4248 {
4249 sched_extend_luids ();
4250 sched_init_insn_luid (insn);
4251 }
4252
4253 if (flags & INSN_INIT_TODO_SSID)
4254 {
4255 extend_insn_data ();
4256 init_insn_data (insn);
4257 clear_expr (&insn_init_ssid->expr);
4258 }
4259
4260 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4261 {
4262 extend_insn_data ();
4263 init_simplejump_data (insn);
4264 }
4265
4266 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4267 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4268 }
4269 \f
4270
4271 /* Functions to init/finish work with lv sets. */
4272
4273 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4274 static void
4275 init_lv_set (basic_block bb)
4276 {
4277 gcc_assert (!BB_LV_SET_VALID_P (bb));
4278
4279 BB_LV_SET (bb) = get_regset_from_pool ();
4280 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4281 BB_LV_SET_VALID_P (bb) = true;
4282 }
4283
4284 /* Copy liveness information to BB from FROM_BB. */
4285 static void
4286 copy_lv_set_from (basic_block bb, basic_block from_bb)
4287 {
4288 gcc_assert (!BB_LV_SET_VALID_P (bb));
4289
4290 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4291 BB_LV_SET_VALID_P (bb) = true;
4292 }
4293
4294 /* Initialize lv set of all bb headers. */
4295 void
4296 init_lv_sets (void)
4297 {
4298 basic_block bb;
4299
4300 /* Initialize of LV sets. */
4301 FOR_EACH_BB (bb)
4302 init_lv_set (bb);
4303
4304 /* Don't forget EXIT_BLOCK. */
4305 init_lv_set (EXIT_BLOCK_PTR);
4306 }
4307
4308 /* Release lv set of HEAD. */
4309 static void
4310 free_lv_set (basic_block bb)
4311 {
4312 gcc_assert (BB_LV_SET (bb) != NULL);
4313
4314 return_regset_to_pool (BB_LV_SET (bb));
4315 BB_LV_SET (bb) = NULL;
4316 BB_LV_SET_VALID_P (bb) = false;
4317 }
4318
4319 /* Finalize lv sets of all bb headers. */
4320 void
4321 free_lv_sets (void)
4322 {
4323 basic_block bb;
4324
4325 /* Don't forget EXIT_BLOCK. */
4326 free_lv_set (EXIT_BLOCK_PTR);
4327
4328 /* Free LV sets. */
4329 FOR_EACH_BB (bb)
4330 if (BB_LV_SET (bb))
4331 free_lv_set (bb);
4332 }
4333
4334 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4335 compute_av() processes BB. This function is called when creating new basic
4336 blocks, as well as for blocks (either new or existing) where new jumps are
4337 created when the control flow is being updated. */
4338 static void
4339 invalidate_av_set (basic_block bb)
4340 {
4341 BB_AV_LEVEL (bb) = -1;
4342 }
4343
4344 /* Create initial data sets for BB (they will be invalid). */
4345 static void
4346 create_initial_data_sets (basic_block bb)
4347 {
4348 if (BB_LV_SET (bb))
4349 BB_LV_SET_VALID_P (bb) = false;
4350 else
4351 BB_LV_SET (bb) = get_regset_from_pool ();
4352 invalidate_av_set (bb);
4353 }
4354
4355 /* Free av set of BB. */
4356 static void
4357 free_av_set (basic_block bb)
4358 {
4359 av_set_clear (&BB_AV_SET (bb));
4360 BB_AV_LEVEL (bb) = 0;
4361 }
4362
4363 /* Free data sets of BB. */
4364 void
4365 free_data_sets (basic_block bb)
4366 {
4367 free_lv_set (bb);
4368 free_av_set (bb);
4369 }
4370
4371 /* Exchange lv sets of TO and FROM. */
4372 static void
4373 exchange_lv_sets (basic_block to, basic_block from)
4374 {
4375 {
4376 regset to_lv_set = BB_LV_SET (to);
4377
4378 BB_LV_SET (to) = BB_LV_SET (from);
4379 BB_LV_SET (from) = to_lv_set;
4380 }
4381
4382 {
4383 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4384
4385 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4386 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4387 }
4388 }
4389
4390
4391 /* Exchange av sets of TO and FROM. */
4392 static void
4393 exchange_av_sets (basic_block to, basic_block from)
4394 {
4395 {
4396 av_set_t to_av_set = BB_AV_SET (to);
4397
4398 BB_AV_SET (to) = BB_AV_SET (from);
4399 BB_AV_SET (from) = to_av_set;
4400 }
4401
4402 {
4403 int to_av_level = BB_AV_LEVEL (to);
4404
4405 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4406 BB_AV_LEVEL (from) = to_av_level;
4407 }
4408 }
4409
4410 /* Exchange data sets of TO and FROM. */
4411 void
4412 exchange_data_sets (basic_block to, basic_block from)
4413 {
4414 exchange_lv_sets (to, from);
4415 exchange_av_sets (to, from);
4416 }
4417
4418 /* Copy data sets of FROM to TO. */
4419 void
4420 copy_data_sets (basic_block to, basic_block from)
4421 {
4422 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4423 gcc_assert (BB_AV_SET (to) == NULL);
4424
4425 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4426 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4427
4428 if (BB_AV_SET_VALID_P (from))
4429 {
4430 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4431 }
4432 if (BB_LV_SET_VALID_P (from))
4433 {
4434 gcc_assert (BB_LV_SET (to) != NULL);
4435 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4436 }
4437 }
4438
4439 /* Return an av set for INSN, if any. */
4440 av_set_t
4441 get_av_set (insn_t insn)
4442 {
4443 av_set_t av_set;
4444
4445 gcc_assert (AV_SET_VALID_P (insn));
4446
4447 if (sel_bb_head_p (insn))
4448 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4449 else
4450 av_set = NULL;
4451
4452 return av_set;
4453 }
4454
4455 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4456 int
4457 get_av_level (insn_t insn)
4458 {
4459 int av_level;
4460
4461 gcc_assert (INSN_P (insn));
4462
4463 if (sel_bb_head_p (insn))
4464 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4465 else
4466 av_level = INSN_WS_LEVEL (insn);
4467
4468 return av_level;
4469 }
4470
4471 \f
4472
4473 /* Variables to work with control-flow graph. */
4474
4475 /* The basic block that already has been processed by the sched_data_update (),
4476 but hasn't been in sel_add_bb () yet. */
4477 static VEC (basic_block, heap) *last_added_blocks = NULL;
4478
4479 /* A pool for allocating successor infos. */
4480 static struct
4481 {
4482 /* A stack for saving succs_info structures. */
4483 struct succs_info *stack;
4484
4485 /* Its size. */
4486 int size;
4487
4488 /* Top of the stack. */
4489 int top;
4490
4491 /* Maximal value of the top. */
4492 int max_top;
4493 } succs_info_pool;
4494
4495 /* Functions to work with control-flow graph. */
4496
4497 /* Return basic block note of BB. */
4498 insn_t
4499 sel_bb_head (basic_block bb)
4500 {
4501 insn_t head;
4502
4503 if (bb == EXIT_BLOCK_PTR)
4504 {
4505 gcc_assert (exit_insn != NULL_RTX);
4506 head = exit_insn;
4507 }
4508 else
4509 {
4510 insn_t note;
4511
4512 note = bb_note (bb);
4513 head = next_nonnote_insn (note);
4514
4515 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4516 head = NULL_RTX;
4517 }
4518
4519 return head;
4520 }
4521
4522 /* Return true if INSN is a basic block header. */
4523 bool
4524 sel_bb_head_p (insn_t insn)
4525 {
4526 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4527 }
4528
4529 /* Return last insn of BB. */
4530 insn_t
4531 sel_bb_end (basic_block bb)
4532 {
4533 if (sel_bb_empty_p (bb))
4534 return NULL_RTX;
4535
4536 gcc_assert (bb != EXIT_BLOCK_PTR);
4537
4538 return BB_END (bb);
4539 }
4540
4541 /* Return true if INSN is the last insn in its basic block. */
4542 bool
4543 sel_bb_end_p (insn_t insn)
4544 {
4545 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4546 }
4547
4548 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4549 bool
4550 sel_bb_empty_p (basic_block bb)
4551 {
4552 return sel_bb_head (bb) == NULL;
4553 }
4554
4555 /* True when BB belongs to the current scheduling region. */
4556 bool
4557 in_current_region_p (basic_block bb)
4558 {
4559 if (bb->index < NUM_FIXED_BLOCKS)
4560 return false;
4561
4562 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4563 }
4564
4565 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4566 basic_block
4567 fallthru_bb_of_jump (rtx jump)
4568 {
4569 if (!JUMP_P (jump))
4570 return NULL;
4571
4572 if (!any_condjump_p (jump))
4573 return NULL;
4574
4575 /* A basic block that ends with a conditional jump may still have one successor
4576 (and be followed by a barrier), we are not interested. */
4577 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4578 return NULL;
4579
4580 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4581 }
4582
4583 /* Remove all notes from BB. */
4584 static void
4585 init_bb (basic_block bb)
4586 {
4587 remove_notes (bb_note (bb), BB_END (bb));
4588 BB_NOTE_LIST (bb) = note_list;
4589 }
4590
4591 void
4592 sel_init_bbs (bb_vec_t bbs)
4593 {
4594 const struct sched_scan_info_def ssi =
4595 {
4596 extend_bb_info, /* extend_bb */
4597 init_bb, /* init_bb */
4598 NULL, /* extend_insn */
4599 NULL /* init_insn */
4600 };
4601
4602 sched_scan (&ssi, bbs);
4603 }
4604
4605 /* Restore notes for the whole region. */
4606 static void
4607 sel_restore_notes (void)
4608 {
4609 int bb;
4610 insn_t insn;
4611
4612 for (bb = 0; bb < current_nr_blocks; bb++)
4613 {
4614 basic_block first, last;
4615
4616 first = EBB_FIRST_BB (bb);
4617 last = EBB_LAST_BB (bb)->next_bb;
4618
4619 do
4620 {
4621 note_list = BB_NOTE_LIST (first);
4622 restore_other_notes (NULL, first);
4623 BB_NOTE_LIST (first) = NULL_RTX;
4624
4625 FOR_BB_INSNS (first, insn)
4626 if (NONDEBUG_INSN_P (insn))
4627 reemit_notes (insn);
4628
4629 first = first->next_bb;
4630 }
4631 while (first != last);
4632 }
4633 }
4634
4635 /* Free per-bb data structures. */
4636 void
4637 sel_finish_bbs (void)
4638 {
4639 sel_restore_notes ();
4640
4641 /* Remove current loop preheader from this loop. */
4642 if (current_loop_nest)
4643 sel_remove_loop_preheader ();
4644
4645 finish_region_bb_info ();
4646 }
4647
4648 /* Return true if INSN has a single successor of type FLAGS. */
4649 bool
4650 sel_insn_has_single_succ_p (insn_t insn, int flags)
4651 {
4652 insn_t succ;
4653 succ_iterator si;
4654 bool first_p = true;
4655
4656 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4657 {
4658 if (first_p)
4659 first_p = false;
4660 else
4661 return false;
4662 }
4663
4664 return true;
4665 }
4666
4667 /* Allocate successor's info. */
4668 static struct succs_info *
4669 alloc_succs_info (void)
4670 {
4671 if (succs_info_pool.top == succs_info_pool.max_top)
4672 {
4673 int i;
4674
4675 if (++succs_info_pool.max_top >= succs_info_pool.size)
4676 gcc_unreachable ();
4677
4678 i = ++succs_info_pool.top;
4679 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4680 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4681 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4682 }
4683 else
4684 succs_info_pool.top++;
4685
4686 return &succs_info_pool.stack[succs_info_pool.top];
4687 }
4688
4689 /* Free successor's info. */
4690 void
4691 free_succs_info (struct succs_info * sinfo)
4692 {
4693 gcc_assert (succs_info_pool.top >= 0
4694 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4695 succs_info_pool.top--;
4696
4697 /* Clear stale info. */
4698 VEC_block_remove (rtx, sinfo->succs_ok,
4699 0, VEC_length (rtx, sinfo->succs_ok));
4700 VEC_block_remove (rtx, sinfo->succs_other,
4701 0, VEC_length (rtx, sinfo->succs_other));
4702 VEC_block_remove (int, sinfo->probs_ok,
4703 0, VEC_length (int, sinfo->probs_ok));
4704 sinfo->all_prob = 0;
4705 sinfo->succs_ok_n = 0;
4706 sinfo->all_succs_n = 0;
4707 }
4708
4709 /* Compute successor info for INSN. FLAGS are the flags passed
4710 to the FOR_EACH_SUCC_1 iterator. */
4711 struct succs_info *
4712 compute_succs_info (insn_t insn, short flags)
4713 {
4714 succ_iterator si;
4715 insn_t succ;
4716 struct succs_info *sinfo = alloc_succs_info ();
4717
4718 /* Traverse *all* successors and decide what to do with each. */
4719 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4720 {
4721 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4722 perform code motion through inner loops. */
4723 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4724
4725 if (current_flags & flags)
4726 {
4727 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4728 VEC_safe_push (int, heap, sinfo->probs_ok,
4729 /* FIXME: Improve calculation when skipping
4730 inner loop to exits. */
4731 (si.bb_end
4732 ? si.e1->probability
4733 : REG_BR_PROB_BASE));
4734 sinfo->succs_ok_n++;
4735 }
4736 else
4737 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4738
4739 /* Compute all_prob. */
4740 if (!si.bb_end)
4741 sinfo->all_prob = REG_BR_PROB_BASE;
4742 else
4743 sinfo->all_prob += si.e1->probability;
4744
4745 sinfo->all_succs_n++;
4746 }
4747
4748 return sinfo;
4749 }
4750
4751 /* Return the predecessors of BB in PREDS and their number in N.
4752 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4753 static void
4754 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4755 {
4756 edge e;
4757 edge_iterator ei;
4758
4759 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4760
4761 FOR_EACH_EDGE (e, ei, bb->preds)
4762 {
4763 basic_block pred_bb = e->src;
4764 insn_t bb_end = BB_END (pred_bb);
4765
4766 if (!in_current_region_p (pred_bb))
4767 {
4768 gcc_assert (flag_sel_sched_pipelining_outer_loops
4769 && current_loop_nest);
4770 continue;
4771 }
4772
4773 if (sel_bb_empty_p (pred_bb))
4774 cfg_preds_1 (pred_bb, preds, n, size);
4775 else
4776 {
4777 if (*n == *size)
4778 *preds = XRESIZEVEC (insn_t, *preds,
4779 (*size = 2 * *size + 1));
4780 (*preds)[(*n)++] = bb_end;
4781 }
4782 }
4783
4784 gcc_assert (*n != 0
4785 || (flag_sel_sched_pipelining_outer_loops
4786 && current_loop_nest));
4787 }
4788
4789 /* Find all predecessors of BB and record them in PREDS and their number
4790 in N. Empty blocks are skipped, and only normal (forward in-region)
4791 edges are processed. */
4792 static void
4793 cfg_preds (basic_block bb, insn_t **preds, int *n)
4794 {
4795 int size = 0;
4796
4797 *preds = NULL;
4798 *n = 0;
4799 cfg_preds_1 (bb, preds, n, &size);
4800 }
4801
4802 /* Returns true if we are moving INSN through join point. */
4803 bool
4804 sel_num_cfg_preds_gt_1 (insn_t insn)
4805 {
4806 basic_block bb;
4807
4808 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4809 return false;
4810
4811 bb = BLOCK_FOR_INSN (insn);
4812
4813 while (1)
4814 {
4815 if (EDGE_COUNT (bb->preds) > 1)
4816 return true;
4817
4818 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4819 bb = EDGE_PRED (bb, 0)->src;
4820
4821 if (!sel_bb_empty_p (bb))
4822 break;
4823 }
4824
4825 return false;
4826 }
4827
4828 /* Returns true when BB should be the end of an ebb. Adapted from the
4829 code in sched-ebb.c. */
4830 bool
4831 bb_ends_ebb_p (basic_block bb)
4832 {
4833 basic_block next_bb = bb_next_bb (bb);
4834 edge e;
4835
4836 if (next_bb == EXIT_BLOCK_PTR
4837 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4838 || (LABEL_P (BB_HEAD (next_bb))
4839 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4840 Work around that. */
4841 && !single_pred_p (next_bb)))
4842 return true;
4843
4844 if (!in_current_region_p (next_bb))
4845 return true;
4846
4847 e = find_fallthru_edge (bb->succs);
4848 if (e)
4849 {
4850 gcc_assert (e->dest == next_bb);
4851
4852 return false;
4853 }
4854
4855 return true;
4856 }
4857
4858 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4859 successor of INSN. */
4860 bool
4861 in_same_ebb_p (insn_t insn, insn_t succ)
4862 {
4863 basic_block ptr = BLOCK_FOR_INSN (insn);
4864
4865 for(;;)
4866 {
4867 if (ptr == BLOCK_FOR_INSN (succ))
4868 return true;
4869
4870 if (bb_ends_ebb_p (ptr))
4871 return false;
4872
4873 ptr = bb_next_bb (ptr);
4874 }
4875
4876 gcc_unreachable ();
4877 return false;
4878 }
4879
4880 /* Recomputes the reverse topological order for the function and
4881 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4882 modified appropriately. */
4883 static void
4884 recompute_rev_top_order (void)
4885 {
4886 int *postorder;
4887 int n_blocks, i;
4888
4889 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4890 {
4891 rev_top_order_index_len = last_basic_block;
4892 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4893 rev_top_order_index_len);
4894 }
4895
4896 postorder = XNEWVEC (int, n_basic_blocks);
4897
4898 n_blocks = post_order_compute (postorder, true, false);
4899 gcc_assert (n_basic_blocks == n_blocks);
4900
4901 /* Build reverse function: for each basic block with BB->INDEX == K
4902 rev_top_order_index[K] is it's reverse topological sort number. */
4903 for (i = 0; i < n_blocks; i++)
4904 {
4905 gcc_assert (postorder[i] < rev_top_order_index_len);
4906 rev_top_order_index[postorder[i]] = i;
4907 }
4908
4909 free (postorder);
4910 }
4911
4912 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4913 void
4914 clear_outdated_rtx_info (basic_block bb)
4915 {
4916 rtx insn;
4917
4918 FOR_BB_INSNS (bb, insn)
4919 if (INSN_P (insn))
4920 {
4921 SCHED_GROUP_P (insn) = 0;
4922 INSN_AFTER_STALL_P (insn) = 0;
4923 INSN_SCHED_TIMES (insn) = 0;
4924 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4925
4926 /* We cannot use the changed caches, as previously we could ignore
4927 the LHS dependence due to enabled renaming and transform
4928 the expression, and currently we'll be unable to do this. */
4929 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4930 }
4931 }
4932
4933 /* Add BB_NOTE to the pool of available basic block notes. */
4934 static void
4935 return_bb_to_pool (basic_block bb)
4936 {
4937 rtx note = bb_note (bb);
4938
4939 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4940 && bb->aux == NULL);
4941
4942 /* It turns out that current cfg infrastructure does not support
4943 reuse of basic blocks. Don't bother for now. */
4944 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4945 }
4946
4947 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4948 static rtx
4949 get_bb_note_from_pool (void)
4950 {
4951 if (VEC_empty (rtx, bb_note_pool))
4952 return NULL_RTX;
4953 else
4954 {
4955 rtx note = VEC_pop (rtx, bb_note_pool);
4956
4957 PREV_INSN (note) = NULL_RTX;
4958 NEXT_INSN (note) = NULL_RTX;
4959
4960 return note;
4961 }
4962 }
4963
4964 /* Free bb_note_pool. */
4965 void
4966 free_bb_note_pool (void)
4967 {
4968 VEC_free (rtx, heap, bb_note_pool);
4969 }
4970
4971 /* Setup scheduler pool and successor structure. */
4972 void
4973 alloc_sched_pools (void)
4974 {
4975 int succs_size;
4976
4977 succs_size = MAX_WS + 1;
4978 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4979 succs_info_pool.size = succs_size;
4980 succs_info_pool.top = -1;
4981 succs_info_pool.max_top = -1;
4982
4983 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4984 sizeof (struct _list_node), 500);
4985 }
4986
4987 /* Free the pools. */
4988 void
4989 free_sched_pools (void)
4990 {
4991 int i;
4992
4993 free_alloc_pool (sched_lists_pool);
4994 gcc_assert (succs_info_pool.top == -1);
4995 for (i = 0; i < succs_info_pool.max_top; i++)
4996 {
4997 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4998 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4999 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
5000 }
5001 free (succs_info_pool.stack);
5002 }
5003 \f
5004
5005 /* Returns a position in RGN where BB can be inserted retaining
5006 topological order. */
5007 static int
5008 find_place_to_insert_bb (basic_block bb, int rgn)
5009 {
5010 bool has_preds_outside_rgn = false;
5011 edge e;
5012 edge_iterator ei;
5013
5014 /* Find whether we have preds outside the region. */
5015 FOR_EACH_EDGE (e, ei, bb->preds)
5016 if (!in_current_region_p (e->src))
5017 {
5018 has_preds_outside_rgn = true;
5019 break;
5020 }
5021
5022 /* Recompute the top order -- needed when we have > 1 pred
5023 and in case we don't have preds outside. */
5024 if (flag_sel_sched_pipelining_outer_loops
5025 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5026 {
5027 int i, bbi = bb->index, cur_bbi;
5028
5029 recompute_rev_top_order ();
5030 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5031 {
5032 cur_bbi = BB_TO_BLOCK (i);
5033 if (rev_top_order_index[bbi]
5034 < rev_top_order_index[cur_bbi])
5035 break;
5036 }
5037
5038 /* We skipped the right block, so we increase i. We accomodate
5039 it for increasing by step later, so we decrease i. */
5040 return (i + 1) - 1;
5041 }
5042 else if (has_preds_outside_rgn)
5043 {
5044 /* This is the case when we generate an extra empty block
5045 to serve as region head during pipelining. */
5046 e = EDGE_SUCC (bb, 0);
5047 gcc_assert (EDGE_COUNT (bb->succs) == 1
5048 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5049 && (BLOCK_TO_BB (e->dest->index) == 0));
5050 return -1;
5051 }
5052
5053 /* We don't have preds outside the region. We should have
5054 the only pred, because the multiple preds case comes from
5055 the pipelining of outer loops, and that is handled above.
5056 Just take the bbi of this single pred. */
5057 if (EDGE_COUNT (bb->succs) > 0)
5058 {
5059 int pred_bbi;
5060
5061 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5062
5063 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5064 return BLOCK_TO_BB (pred_bbi);
5065 }
5066 else
5067 /* BB has no successors. It is safe to put it in the end. */
5068 return current_nr_blocks - 1;
5069 }
5070
5071 /* Deletes an empty basic block freeing its data. */
5072 static void
5073 delete_and_free_basic_block (basic_block bb)
5074 {
5075 gcc_assert (sel_bb_empty_p (bb));
5076
5077 if (BB_LV_SET (bb))
5078 free_lv_set (bb);
5079
5080 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5081
5082 /* Can't assert av_set properties because we use sel_aremove_bb
5083 when removing loop preheader from the region. At the point of
5084 removing the preheader we already have deallocated sel_region_bb_info. */
5085 gcc_assert (BB_LV_SET (bb) == NULL
5086 && !BB_LV_SET_VALID_P (bb)
5087 && BB_AV_LEVEL (bb) == 0
5088 && BB_AV_SET (bb) == NULL);
5089
5090 delete_basic_block (bb);
5091 }
5092
5093 /* Add BB to the current region and update the region data. */
5094 static void
5095 add_block_to_current_region (basic_block bb)
5096 {
5097 int i, pos, bbi = -2, rgn;
5098
5099 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5100 bbi = find_place_to_insert_bb (bb, rgn);
5101 bbi += 1;
5102 pos = RGN_BLOCKS (rgn) + bbi;
5103
5104 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5105 && ebb_head[bbi] == pos);
5106
5107 /* Make a place for the new block. */
5108 extend_regions ();
5109
5110 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5111 BLOCK_TO_BB (rgn_bb_table[i])++;
5112
5113 memmove (rgn_bb_table + pos + 1,
5114 rgn_bb_table + pos,
5115 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5116
5117 /* Initialize data for BB. */
5118 rgn_bb_table[pos] = bb->index;
5119 BLOCK_TO_BB (bb->index) = bbi;
5120 CONTAINING_RGN (bb->index) = rgn;
5121
5122 RGN_NR_BLOCKS (rgn)++;
5123
5124 for (i = rgn + 1; i <= nr_regions; i++)
5125 RGN_BLOCKS (i)++;
5126 }
5127
5128 /* Remove BB from the current region and update the region data. */
5129 static void
5130 remove_bb_from_region (basic_block bb)
5131 {
5132 int i, pos, bbi = -2, rgn;
5133
5134 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5135 bbi = BLOCK_TO_BB (bb->index);
5136 pos = RGN_BLOCKS (rgn) + bbi;
5137
5138 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5139 && ebb_head[bbi] == pos);
5140
5141 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5142 BLOCK_TO_BB (rgn_bb_table[i])--;
5143
5144 memmove (rgn_bb_table + pos,
5145 rgn_bb_table + pos + 1,
5146 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5147
5148 RGN_NR_BLOCKS (rgn)--;
5149 for (i = rgn + 1; i <= nr_regions; i++)
5150 RGN_BLOCKS (i)--;
5151 }
5152
5153 /* Add BB to the current region and update all data. If BB is NULL, add all
5154 blocks from last_added_blocks vector. */
5155 static void
5156 sel_add_bb (basic_block bb)
5157 {
5158 /* Extend luids so that new notes will receive zero luids. */
5159 sched_extend_luids ();
5160 sched_init_bbs ();
5161 sel_init_bbs (last_added_blocks);
5162
5163 /* When bb is passed explicitly, the vector should contain
5164 the only element that equals to bb; otherwise, the vector
5165 should not be NULL. */
5166 gcc_assert (last_added_blocks != NULL);
5167
5168 if (bb != NULL)
5169 {
5170 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5171 && VEC_index (basic_block,
5172 last_added_blocks, 0) == bb);
5173 add_block_to_current_region (bb);
5174
5175 /* We associate creating/deleting data sets with the first insn
5176 appearing / disappearing in the bb. */
5177 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5178 create_initial_data_sets (bb);
5179
5180 VEC_free (basic_block, heap, last_added_blocks);
5181 }
5182 else
5183 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5184 {
5185 int i;
5186 basic_block temp_bb = NULL;
5187
5188 for (i = 0;
5189 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5190 {
5191 add_block_to_current_region (bb);
5192 temp_bb = bb;
5193 }
5194
5195 /* We need to fetch at least one bb so we know the region
5196 to update. */
5197 gcc_assert (temp_bb != NULL);
5198 bb = temp_bb;
5199
5200 VEC_free (basic_block, heap, last_added_blocks);
5201 }
5202
5203 rgn_setup_region (CONTAINING_RGN (bb->index));
5204 }
5205
5206 /* Remove BB from the current region and update all data.
5207 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5208 static void
5209 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5210 {
5211 unsigned idx = bb->index;
5212
5213 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5214
5215 remove_bb_from_region (bb);
5216 return_bb_to_pool (bb);
5217 bitmap_clear_bit (blocks_to_reschedule, idx);
5218
5219 if (remove_from_cfg_p)
5220 {
5221 basic_block succ = single_succ (bb);
5222 delete_and_free_basic_block (bb);
5223 set_immediate_dominator (CDI_DOMINATORS, succ,
5224 recompute_dominator (CDI_DOMINATORS, succ));
5225 }
5226
5227 rgn_setup_region (CONTAINING_RGN (idx));
5228 }
5229
5230 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5231 static void
5232 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5233 {
5234 gcc_assert (in_current_region_p (merge_bb));
5235
5236 concat_note_lists (BB_NOTE_LIST (empty_bb),
5237 &BB_NOTE_LIST (merge_bb));
5238 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5239
5240 }
5241
5242 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5243 region, but keep it in CFG. */
5244 static void
5245 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5246 {
5247 /* The block should contain just a note or a label.
5248 We try to check whether it is unused below. */
5249 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5250 || LABEL_P (BB_HEAD (empty_bb)));
5251
5252 /* If basic block has predecessors or successors, redirect them. */
5253 if (remove_from_cfg_p
5254 && (EDGE_COUNT (empty_bb->preds) > 0
5255 || EDGE_COUNT (empty_bb->succs) > 0))
5256 {
5257 basic_block pred;
5258 basic_block succ;
5259
5260 /* We need to init PRED and SUCC before redirecting edges. */
5261 if (EDGE_COUNT (empty_bb->preds) > 0)
5262 {
5263 edge e;
5264
5265 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5266
5267 e = EDGE_PRED (empty_bb, 0);
5268 gcc_assert (e->src == empty_bb->prev_bb
5269 && (e->flags & EDGE_FALLTHRU));
5270
5271 pred = empty_bb->prev_bb;
5272 }
5273 else
5274 pred = NULL;
5275
5276 if (EDGE_COUNT (empty_bb->succs) > 0)
5277 {
5278 /* We do not check fallthruness here as above, because
5279 after removing a jump the edge may actually be not fallthru. */
5280 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5281 succ = EDGE_SUCC (empty_bb, 0)->dest;
5282 }
5283 else
5284 succ = NULL;
5285
5286 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5287 {
5288 edge e = EDGE_PRED (empty_bb, 0);
5289
5290 if (e->flags & EDGE_FALLTHRU)
5291 redirect_edge_succ_nodup (e, succ);
5292 else
5293 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5294 }
5295
5296 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5297 {
5298 edge e = EDGE_SUCC (empty_bb, 0);
5299
5300 if (find_edge (pred, e->dest) == NULL)
5301 redirect_edge_pred (e, pred);
5302 }
5303 }
5304
5305 /* Finish removing. */
5306 sel_remove_bb (empty_bb, remove_from_cfg_p);
5307 }
5308
5309 /* An implementation of create_basic_block hook, which additionally updates
5310 per-bb data structures. */
5311 static basic_block
5312 sel_create_basic_block (void *headp, void *endp, basic_block after)
5313 {
5314 basic_block new_bb;
5315 insn_t new_bb_note;
5316
5317 gcc_assert (flag_sel_sched_pipelining_outer_loops
5318 || last_added_blocks == NULL);
5319
5320 new_bb_note = get_bb_note_from_pool ();
5321
5322 if (new_bb_note == NULL_RTX)
5323 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5324 else
5325 {
5326 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5327 new_bb_note, after);
5328 new_bb->aux = NULL;
5329 }
5330
5331 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5332
5333 return new_bb;
5334 }
5335
5336 /* Implement sched_init_only_bb (). */
5337 static void
5338 sel_init_only_bb (basic_block bb, basic_block after)
5339 {
5340 gcc_assert (after == NULL);
5341
5342 extend_regions ();
5343 rgn_make_new_region_out_of_new_block (bb);
5344 }
5345
5346 /* Update the latch when we've splitted or merged it from FROM block to TO.
5347 This should be checked for all outer loops, too. */
5348 static void
5349 change_loops_latches (basic_block from, basic_block to)
5350 {
5351 gcc_assert (from != to);
5352
5353 if (current_loop_nest)
5354 {
5355 struct loop *loop;
5356
5357 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5358 if (considered_for_pipelining_p (loop) && loop->latch == from)
5359 {
5360 gcc_assert (loop == current_loop_nest);
5361 loop->latch = to;
5362 gcc_assert (loop_latch_edge (loop));
5363 }
5364 }
5365 }
5366
5367 /* Splits BB on two basic blocks, adding it to the region and extending
5368 per-bb data structures. Returns the newly created bb. */
5369 static basic_block
5370 sel_split_block (basic_block bb, rtx after)
5371 {
5372 basic_block new_bb;
5373 insn_t insn;
5374
5375 new_bb = sched_split_block_1 (bb, after);
5376 sel_add_bb (new_bb);
5377
5378 /* This should be called after sel_add_bb, because this uses
5379 CONTAINING_RGN for the new block, which is not yet initialized.
5380 FIXME: this function may be a no-op now. */
5381 change_loops_latches (bb, new_bb);
5382
5383 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5384 FOR_BB_INSNS (new_bb, insn)
5385 if (INSN_P (insn))
5386 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5387
5388 if (sel_bb_empty_p (bb))
5389 {
5390 gcc_assert (!sel_bb_empty_p (new_bb));
5391
5392 /* NEW_BB has data sets that need to be updated and BB holds
5393 data sets that should be removed. Exchange these data sets
5394 so that we won't lose BB's valid data sets. */
5395 exchange_data_sets (new_bb, bb);
5396 free_data_sets (bb);
5397 }
5398
5399 if (!sel_bb_empty_p (new_bb)
5400 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5401 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5402
5403 return new_bb;
5404 }
5405
5406 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5407 Otherwise returns NULL. */
5408 static rtx
5409 check_for_new_jump (basic_block bb, int prev_max_uid)
5410 {
5411 rtx end;
5412
5413 end = sel_bb_end (bb);
5414 if (end && INSN_UID (end) >= prev_max_uid)
5415 return end;
5416 return NULL;
5417 }
5418
5419 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5420 New means having UID at least equal to PREV_MAX_UID. */
5421 static rtx
5422 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5423 {
5424 rtx jump;
5425
5426 /* Return immediately if no new insns were emitted. */
5427 if (get_max_uid () == prev_max_uid)
5428 return NULL;
5429
5430 /* Now check both blocks for new jumps. It will ever be only one. */
5431 if ((jump = check_for_new_jump (from, prev_max_uid)))
5432 return jump;
5433
5434 if (jump_bb != NULL
5435 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5436 return jump;
5437 return NULL;
5438 }
5439
5440 /* Splits E and adds the newly created basic block to the current region.
5441 Returns this basic block. */
5442 basic_block
5443 sel_split_edge (edge e)
5444 {
5445 basic_block new_bb, src, other_bb = NULL;
5446 int prev_max_uid;
5447 rtx jump;
5448
5449 src = e->src;
5450 prev_max_uid = get_max_uid ();
5451 new_bb = split_edge (e);
5452
5453 if (flag_sel_sched_pipelining_outer_loops
5454 && current_loop_nest)
5455 {
5456 int i;
5457 basic_block bb;
5458
5459 /* Some of the basic blocks might not have been added to the loop.
5460 Add them here, until this is fixed in force_fallthru. */
5461 for (i = 0;
5462 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5463 if (!bb->loop_father)
5464 {
5465 add_bb_to_loop (bb, e->dest->loop_father);
5466
5467 gcc_assert (!other_bb && (new_bb->index != bb->index));
5468 other_bb = bb;
5469 }
5470 }
5471
5472 /* Add all last_added_blocks to the region. */
5473 sel_add_bb (NULL);
5474
5475 jump = find_new_jump (src, new_bb, prev_max_uid);
5476 if (jump)
5477 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5478
5479 /* Put the correct lv set on this block. */
5480 if (other_bb && !sel_bb_empty_p (other_bb))
5481 compute_live (sel_bb_head (other_bb));
5482
5483 return new_bb;
5484 }
5485
5486 /* Implement sched_create_empty_bb (). */
5487 static basic_block
5488 sel_create_empty_bb (basic_block after)
5489 {
5490 basic_block new_bb;
5491
5492 new_bb = sched_create_empty_bb_1 (after);
5493
5494 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5495 later. */
5496 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5497 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5498
5499 VEC_free (basic_block, heap, last_added_blocks);
5500 return new_bb;
5501 }
5502
5503 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5504 will be splitted to insert a check. */
5505 basic_block
5506 sel_create_recovery_block (insn_t orig_insn)
5507 {
5508 basic_block first_bb, second_bb, recovery_block;
5509 basic_block before_recovery = NULL;
5510 rtx jump;
5511
5512 first_bb = BLOCK_FOR_INSN (orig_insn);
5513 if (sel_bb_end_p (orig_insn))
5514 {
5515 /* Avoid introducing an empty block while splitting. */
5516 gcc_assert (single_succ_p (first_bb));
5517 second_bb = single_succ (first_bb);
5518 }
5519 else
5520 second_bb = sched_split_block (first_bb, orig_insn);
5521
5522 recovery_block = sched_create_recovery_block (&before_recovery);
5523 if (before_recovery)
5524 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5525
5526 gcc_assert (sel_bb_empty_p (recovery_block));
5527 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5528 if (current_loops != NULL)
5529 add_bb_to_loop (recovery_block, first_bb->loop_father);
5530
5531 sel_add_bb (recovery_block);
5532
5533 jump = BB_END (recovery_block);
5534 gcc_assert (sel_bb_head (recovery_block) == jump);
5535 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5536
5537 return recovery_block;
5538 }
5539
5540 /* Merge basic block B into basic block A. */
5541 static void
5542 sel_merge_blocks (basic_block a, basic_block b)
5543 {
5544 gcc_assert (sel_bb_empty_p (b)
5545 && EDGE_COUNT (b->preds) == 1
5546 && EDGE_PRED (b, 0)->src == b->prev_bb);
5547
5548 move_bb_info (b->prev_bb, b);
5549 remove_empty_bb (b, false);
5550 merge_blocks (a, b);
5551 change_loops_latches (b, a);
5552 }
5553
5554 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5555 data structures for possibly created bb and insns. Returns the newly
5556 added bb or NULL, when a bb was not needed. */
5557 void
5558 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5559 {
5560 basic_block jump_bb, src, orig_dest = e->dest;
5561 int prev_max_uid;
5562 rtx jump;
5563
5564 /* This function is now used only for bookkeeping code creation, where
5565 we'll never get the single pred of orig_dest block and thus will not
5566 hit unreachable blocks when updating dominator info. */
5567 gcc_assert (!sel_bb_empty_p (e->src)
5568 && !single_pred_p (orig_dest));
5569 src = e->src;
5570 prev_max_uid = get_max_uid ();
5571 jump_bb = redirect_edge_and_branch_force (e, to);
5572
5573 if (jump_bb != NULL)
5574 sel_add_bb (jump_bb);
5575
5576 /* This function could not be used to spoil the loop structure by now,
5577 thus we don't care to update anything. But check it to be sure. */
5578 if (current_loop_nest
5579 && pipelining_p)
5580 gcc_assert (loop_latch_edge (current_loop_nest));
5581
5582 jump = find_new_jump (src, jump_bb, prev_max_uid);
5583 if (jump)
5584 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5585 set_immediate_dominator (CDI_DOMINATORS, to,
5586 recompute_dominator (CDI_DOMINATORS, to));
5587 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5588 recompute_dominator (CDI_DOMINATORS, orig_dest));
5589 }
5590
5591 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5592 redirected edge are in reverse topological order. */
5593 bool
5594 sel_redirect_edge_and_branch (edge e, basic_block to)
5595 {
5596 bool latch_edge_p;
5597 basic_block src, orig_dest = e->dest;
5598 int prev_max_uid;
5599 rtx jump;
5600 edge redirected;
5601 bool recompute_toporder_p = false;
5602 bool maybe_unreachable = single_pred_p (orig_dest);
5603
5604 latch_edge_p = (pipelining_p
5605 && current_loop_nest
5606 && e == loop_latch_edge (current_loop_nest));
5607
5608 src = e->src;
5609 prev_max_uid = get_max_uid ();
5610
5611 redirected = redirect_edge_and_branch (e, to);
5612
5613 gcc_assert (redirected && last_added_blocks == NULL);
5614
5615 /* When we've redirected a latch edge, update the header. */
5616 if (latch_edge_p)
5617 {
5618 current_loop_nest->header = to;
5619 gcc_assert (loop_latch_edge (current_loop_nest));
5620 }
5621
5622 /* In rare situations, the topological relation between the blocks connected
5623 by the redirected edge can change (see PR42245 for an example). Update
5624 block_to_bb/bb_to_block. */
5625 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5626 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5627 recompute_toporder_p = true;
5628
5629 jump = find_new_jump (src, NULL, prev_max_uid);
5630 if (jump)
5631 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5632
5633 /* Only update dominator info when we don't have unreachable blocks.
5634 Otherwise we'll update in maybe_tidy_empty_bb. */
5635 if (!maybe_unreachable)
5636 {
5637 set_immediate_dominator (CDI_DOMINATORS, to,
5638 recompute_dominator (CDI_DOMINATORS, to));
5639 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5640 recompute_dominator (CDI_DOMINATORS, orig_dest));
5641 }
5642 return recompute_toporder_p;
5643 }
5644
5645 /* This variable holds the cfg hooks used by the selective scheduler. */
5646 static struct cfg_hooks sel_cfg_hooks;
5647
5648 /* Register sel-sched cfg hooks. */
5649 void
5650 sel_register_cfg_hooks (void)
5651 {
5652 sched_split_block = sel_split_block;
5653
5654 orig_cfg_hooks = get_cfg_hooks ();
5655 sel_cfg_hooks = orig_cfg_hooks;
5656
5657 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5658
5659 set_cfg_hooks (sel_cfg_hooks);
5660
5661 sched_init_only_bb = sel_init_only_bb;
5662 sched_split_block = sel_split_block;
5663 sched_create_empty_bb = sel_create_empty_bb;
5664 }
5665
5666 /* Unregister sel-sched cfg hooks. */
5667 void
5668 sel_unregister_cfg_hooks (void)
5669 {
5670 sched_create_empty_bb = NULL;
5671 sched_split_block = NULL;
5672 sched_init_only_bb = NULL;
5673
5674 set_cfg_hooks (orig_cfg_hooks);
5675 }
5676 \f
5677
5678 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5679 LABEL is where this jump should be directed. */
5680 rtx
5681 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5682 {
5683 rtx insn_rtx;
5684
5685 gcc_assert (!INSN_P (pattern));
5686
5687 start_sequence ();
5688
5689 if (label == NULL_RTX)
5690 insn_rtx = emit_insn (pattern);
5691 else if (DEBUG_INSN_P (label))
5692 insn_rtx = emit_debug_insn (pattern);
5693 else
5694 {
5695 insn_rtx = emit_jump_insn (pattern);
5696 JUMP_LABEL (insn_rtx) = label;
5697 ++LABEL_NUSES (label);
5698 }
5699
5700 end_sequence ();
5701
5702 sched_extend_luids ();
5703 sched_extend_target ();
5704 sched_deps_init (false);
5705
5706 /* Initialize INSN_CODE now. */
5707 recog_memoized (insn_rtx);
5708 return insn_rtx;
5709 }
5710
5711 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5712 must not be clonable. */
5713 vinsn_t
5714 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5715 {
5716 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5717
5718 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5719 return vinsn_create (insn_rtx, force_unique_p);
5720 }
5721
5722 /* Create a copy of INSN_RTX. */
5723 rtx
5724 create_copy_of_insn_rtx (rtx insn_rtx)
5725 {
5726 rtx res, link;
5727
5728 if (DEBUG_INSN_P (insn_rtx))
5729 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5730 insn_rtx);
5731
5732 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5733
5734 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5735 NULL_RTX);
5736
5737 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5738 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5739 there too, but are supposed to be sticky, so we copy them. */
5740 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5741 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5742 && REG_NOTE_KIND (link) != REG_EQUAL
5743 && REG_NOTE_KIND (link) != REG_EQUIV)
5744 {
5745 if (GET_CODE (link) == EXPR_LIST)
5746 add_reg_note (res, REG_NOTE_KIND (link),
5747 copy_insn_1 (XEXP (link, 0)));
5748 else
5749 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5750 }
5751
5752 return res;
5753 }
5754
5755 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5756 void
5757 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5758 {
5759 vinsn_detach (EXPR_VINSN (expr));
5760
5761 EXPR_VINSN (expr) = new_vinsn;
5762 vinsn_attach (new_vinsn);
5763 }
5764
5765 /* Helpers for global init. */
5766 /* This structure is used to be able to call existing bundling mechanism
5767 and calculate insn priorities. */
5768 static struct haifa_sched_info sched_sel_haifa_sched_info =
5769 {
5770 NULL, /* init_ready_list */
5771 NULL, /* can_schedule_ready_p */
5772 NULL, /* schedule_more_p */
5773 NULL, /* new_ready */
5774 NULL, /* rgn_rank */
5775 sel_print_insn, /* rgn_print_insn */
5776 contributes_to_priority,
5777 NULL, /* insn_finishes_block_p */
5778
5779 NULL, NULL,
5780 NULL, NULL,
5781 0, 0,
5782
5783 NULL, /* add_remove_insn */
5784 NULL, /* begin_schedule_ready */
5785 NULL, /* begin_move_insn */
5786 NULL, /* advance_target_bb */
5787
5788 NULL,
5789 NULL,
5790
5791 SEL_SCHED | NEW_BBS
5792 };
5793
5794 /* Setup special insns used in the scheduler. */
5795 void
5796 setup_nop_and_exit_insns (void)
5797 {
5798 gcc_assert (nop_pattern == NULL_RTX
5799 && exit_insn == NULL_RTX);
5800
5801 nop_pattern = constm1_rtx;
5802
5803 start_sequence ();
5804 emit_insn (nop_pattern);
5805 exit_insn = get_insns ();
5806 end_sequence ();
5807 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5808 }
5809
5810 /* Free special insns used in the scheduler. */
5811 void
5812 free_nop_and_exit_insns (void)
5813 {
5814 exit_insn = NULL_RTX;
5815 nop_pattern = NULL_RTX;
5816 }
5817
5818 /* Setup a special vinsn used in new insns initialization. */
5819 void
5820 setup_nop_vinsn (void)
5821 {
5822 nop_vinsn = vinsn_create (exit_insn, false);
5823 vinsn_attach (nop_vinsn);
5824 }
5825
5826 /* Free a special vinsn used in new insns initialization. */
5827 void
5828 free_nop_vinsn (void)
5829 {
5830 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5831 vinsn_detach (nop_vinsn);
5832 nop_vinsn = NULL;
5833 }
5834
5835 /* Call a set_sched_flags hook. */
5836 void
5837 sel_set_sched_flags (void)
5838 {
5839 /* ??? This means that set_sched_flags were called, and we decided to
5840 support speculation. However, set_sched_flags also modifies flags
5841 on current_sched_info, doing this only at global init. And we
5842 sometimes change c_s_i later. So put the correct flags again. */
5843 if (spec_info && targetm.sched.set_sched_flags)
5844 targetm.sched.set_sched_flags (spec_info);
5845 }
5846
5847 /* Setup pointers to global sched info structures. */
5848 void
5849 sel_setup_sched_infos (void)
5850 {
5851 rgn_setup_common_sched_info ();
5852
5853 memcpy (&sel_common_sched_info, common_sched_info,
5854 sizeof (sel_common_sched_info));
5855
5856 sel_common_sched_info.fix_recovery_cfg = NULL;
5857 sel_common_sched_info.add_block = NULL;
5858 sel_common_sched_info.estimate_number_of_insns
5859 = sel_estimate_number_of_insns;
5860 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5861 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5862
5863 common_sched_info = &sel_common_sched_info;
5864
5865 current_sched_info = &sched_sel_haifa_sched_info;
5866 current_sched_info->sched_max_insns_priority =
5867 get_rgn_sched_max_insns_priority ();
5868
5869 sel_set_sched_flags ();
5870 }
5871 \f
5872
5873 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5874 *BB_ORD_INDEX after that is increased. */
5875 static void
5876 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5877 {
5878 RGN_NR_BLOCKS (rgn) += 1;
5879 RGN_DONT_CALC_DEPS (rgn) = 0;
5880 RGN_HAS_REAL_EBB (rgn) = 0;
5881 CONTAINING_RGN (bb->index) = rgn;
5882 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5883 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5884 (*bb_ord_index)++;
5885
5886 /* FIXME: it is true only when not scheduling ebbs. */
5887 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5888 }
5889
5890 /* Functions to support pipelining of outer loops. */
5891
5892 /* Creates a new empty region and returns it's number. */
5893 static int
5894 sel_create_new_region (void)
5895 {
5896 int new_rgn_number = nr_regions;
5897
5898 RGN_NR_BLOCKS (new_rgn_number) = 0;
5899
5900 /* FIXME: This will work only when EBBs are not created. */
5901 if (new_rgn_number != 0)
5902 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5903 RGN_NR_BLOCKS (new_rgn_number - 1);
5904 else
5905 RGN_BLOCKS (new_rgn_number) = 0;
5906
5907 /* Set the blocks of the next region so the other functions may
5908 calculate the number of blocks in the region. */
5909 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5910 RGN_NR_BLOCKS (new_rgn_number);
5911
5912 nr_regions++;
5913
5914 return new_rgn_number;
5915 }
5916
5917 /* If X has a smaller topological sort number than Y, returns -1;
5918 if greater, returns 1. */
5919 static int
5920 bb_top_order_comparator (const void *x, const void *y)
5921 {
5922 basic_block bb1 = *(const basic_block *) x;
5923 basic_block bb2 = *(const basic_block *) y;
5924
5925 gcc_assert (bb1 == bb2
5926 || rev_top_order_index[bb1->index]
5927 != rev_top_order_index[bb2->index]);
5928
5929 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5930 bbs with greater number should go earlier. */
5931 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5932 return -1;
5933 else
5934 return 1;
5935 }
5936
5937 /* Create a region for LOOP and return its number. If we don't want
5938 to pipeline LOOP, return -1. */
5939 static int
5940 make_region_from_loop (struct loop *loop)
5941 {
5942 unsigned int i;
5943 int new_rgn_number = -1;
5944 struct loop *inner;
5945
5946 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5947 int bb_ord_index = 0;
5948 basic_block *loop_blocks;
5949 basic_block preheader_block;
5950
5951 if (loop->num_nodes
5952 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5953 return -1;
5954
5955 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5956 for (inner = loop->inner; inner; inner = inner->inner)
5957 if (flow_bb_inside_loop_p (inner, loop->latch))
5958 return -1;
5959
5960 loop->ninsns = num_loop_insns (loop);
5961 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5962 return -1;
5963
5964 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5965
5966 for (i = 0; i < loop->num_nodes; i++)
5967 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5968 {
5969 free (loop_blocks);
5970 return -1;
5971 }
5972
5973 preheader_block = loop_preheader_edge (loop)->src;
5974 gcc_assert (preheader_block);
5975 gcc_assert (loop_blocks[0] == loop->header);
5976
5977 new_rgn_number = sel_create_new_region ();
5978
5979 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5980 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5981
5982 for (i = 0; i < loop->num_nodes; i++)
5983 {
5984 /* Add only those blocks that haven't been scheduled in the inner loop.
5985 The exception is the basic blocks with bookkeeping code - they should
5986 be added to the region (and they actually don't belong to the loop
5987 body, but to the region containing that loop body). */
5988
5989 gcc_assert (new_rgn_number >= 0);
5990
5991 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5992 {
5993 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5994 new_rgn_number);
5995 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5996 }
5997 }
5998
5999 free (loop_blocks);
6000 MARK_LOOP_FOR_PIPELINING (loop);
6001
6002 return new_rgn_number;
6003 }
6004
6005 /* Create a new region from preheader blocks LOOP_BLOCKS. */
6006 void
6007 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
6008 {
6009 unsigned int i;
6010 int new_rgn_number = -1;
6011 basic_block bb;
6012
6013 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6014 int bb_ord_index = 0;
6015
6016 new_rgn_number = sel_create_new_region ();
6017
6018 FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb)
6019 {
6020 gcc_assert (new_rgn_number >= 0);
6021
6022 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6023 }
6024
6025 VEC_free (basic_block, heap, *loop_blocks);
6026 gcc_assert (*loop_blocks == NULL);
6027 }
6028
6029
6030 /* Create region(s) from loop nest LOOP, such that inner loops will be
6031 pipelined before outer loops. Returns true when a region for LOOP
6032 is created. */
6033 static bool
6034 make_regions_from_loop_nest (struct loop *loop)
6035 {
6036 struct loop *cur_loop;
6037 int rgn_number;
6038
6039 /* Traverse all inner nodes of the loop. */
6040 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6041 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
6042 return false;
6043
6044 /* At this moment all regular inner loops should have been pipelined.
6045 Try to create a region from this loop. */
6046 rgn_number = make_region_from_loop (loop);
6047
6048 if (rgn_number < 0)
6049 return false;
6050
6051 VEC_safe_push (loop_p, heap, loop_nests, loop);
6052 return true;
6053 }
6054
6055 /* Initalize data structures needed. */
6056 void
6057 sel_init_pipelining (void)
6058 {
6059 /* Collect loop information to be used in outer loops pipelining. */
6060 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6061 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6062 | LOOPS_HAVE_RECORDED_EXITS
6063 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6064 current_loop_nest = NULL;
6065
6066 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
6067 sbitmap_zero (bbs_in_loop_rgns);
6068
6069 recompute_rev_top_order ();
6070 }
6071
6072 /* Returns a struct loop for region RGN. */
6073 loop_p
6074 get_loop_nest_for_rgn (unsigned int rgn)
6075 {
6076 /* Regions created with extend_rgns don't have corresponding loop nests,
6077 because they don't represent loops. */
6078 if (rgn < VEC_length (loop_p, loop_nests))
6079 return VEC_index (loop_p, loop_nests, rgn);
6080 else
6081 return NULL;
6082 }
6083
6084 /* True when LOOP was included into pipelining regions. */
6085 bool
6086 considered_for_pipelining_p (struct loop *loop)
6087 {
6088 if (loop_depth (loop) == 0)
6089 return false;
6090
6091 /* Now, the loop could be too large or irreducible. Check whether its
6092 region is in LOOP_NESTS.
6093 We determine the region number of LOOP as the region number of its
6094 latch. We can't use header here, because this header could be
6095 just removed preheader and it will give us the wrong region number.
6096 Latch can't be used because it could be in the inner loop too. */
6097 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6098 {
6099 int rgn = CONTAINING_RGN (loop->latch->index);
6100
6101 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
6102 return true;
6103 }
6104
6105 return false;
6106 }
6107
6108 /* Makes regions from the rest of the blocks, after loops are chosen
6109 for pipelining. */
6110 static void
6111 make_regions_from_the_rest (void)
6112 {
6113 int cur_rgn_blocks;
6114 int *loop_hdr;
6115 int i;
6116
6117 basic_block bb;
6118 edge e;
6119 edge_iterator ei;
6120 int *degree;
6121
6122 /* Index in rgn_bb_table where to start allocating new regions. */
6123 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6124
6125 /* Make regions from all the rest basic blocks - those that don't belong to
6126 any loop or belong to irreducible loops. Prepare the data structures
6127 for extend_rgns. */
6128
6129 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6130 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6131 loop. */
6132 loop_hdr = XNEWVEC (int, last_basic_block);
6133 degree = XCNEWVEC (int, last_basic_block);
6134
6135
6136 /* For each basic block that belongs to some loop assign the number
6137 of innermost loop it belongs to. */
6138 for (i = 0; i < last_basic_block; i++)
6139 loop_hdr[i] = -1;
6140
6141 FOR_EACH_BB (bb)
6142 {
6143 if (bb->loop_father && !bb->loop_father->num == 0
6144 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6145 loop_hdr[bb->index] = bb->loop_father->num;
6146 }
6147
6148 /* For each basic block degree is calculated as the number of incoming
6149 edges, that are going out of bbs that are not yet scheduled.
6150 The basic blocks that are scheduled have degree value of zero. */
6151 FOR_EACH_BB (bb)
6152 {
6153 degree[bb->index] = 0;
6154
6155 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
6156 {
6157 FOR_EACH_EDGE (e, ei, bb->preds)
6158 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
6159 degree[bb->index]++;
6160 }
6161 else
6162 degree[bb->index] = -1;
6163 }
6164
6165 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6166
6167 /* Any block that did not end up in a region is placed into a region
6168 by itself. */
6169 FOR_EACH_BB (bb)
6170 if (degree[bb->index] >= 0)
6171 {
6172 rgn_bb_table[cur_rgn_blocks] = bb->index;
6173 RGN_NR_BLOCKS (nr_regions) = 1;
6174 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6175 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6176 RGN_HAS_REAL_EBB (nr_regions) = 0;
6177 CONTAINING_RGN (bb->index) = nr_regions++;
6178 BLOCK_TO_BB (bb->index) = 0;
6179 }
6180
6181 free (degree);
6182 free (loop_hdr);
6183 }
6184
6185 /* Free data structures used in pipelining of loops. */
6186 void sel_finish_pipelining (void)
6187 {
6188 loop_iterator li;
6189 struct loop *loop;
6190
6191 /* Release aux fields so we don't free them later by mistake. */
6192 FOR_EACH_LOOP (li, loop, 0)
6193 loop->aux = NULL;
6194
6195 loop_optimizer_finalize ();
6196
6197 VEC_free (loop_p, heap, loop_nests);
6198
6199 free (rev_top_order_index);
6200 rev_top_order_index = NULL;
6201 }
6202
6203 /* This function replaces the find_rgns when
6204 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6205 void
6206 sel_find_rgns (void)
6207 {
6208 sel_init_pipelining ();
6209 extend_regions ();
6210
6211 if (current_loops)
6212 {
6213 loop_p loop;
6214 loop_iterator li;
6215
6216 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6217 ? LI_FROM_INNERMOST
6218 : LI_ONLY_INNERMOST))
6219 make_regions_from_loop_nest (loop);
6220 }
6221
6222 /* Make regions from all the rest basic blocks and schedule them.
6223 These blocks include blocks that don't belong to any loop or belong
6224 to irreducible loops. */
6225 make_regions_from_the_rest ();
6226
6227 /* We don't need bbs_in_loop_rgns anymore. */
6228 sbitmap_free (bbs_in_loop_rgns);
6229 bbs_in_loop_rgns = NULL;
6230 }
6231
6232 /* Add the preheader blocks from previous loop to current region taking
6233 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6234 This function is only used with -fsel-sched-pipelining-outer-loops. */
6235 void
6236 sel_add_loop_preheaders (bb_vec_t *bbs)
6237 {
6238 int i;
6239 basic_block bb;
6240 VEC(basic_block, heap) *preheader_blocks
6241 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6242
6243 for (i = 0;
6244 VEC_iterate (basic_block, preheader_blocks, i, bb);
6245 i++)
6246 {
6247 VEC_safe_push (basic_block, heap, *bbs, bb);
6248 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6249 sel_add_bb (bb);
6250 }
6251
6252 VEC_free (basic_block, heap, preheader_blocks);
6253 }
6254
6255 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6256 Please note that the function should also work when pipelining_p is
6257 false, because it is used when deciding whether we should or should
6258 not reschedule pipelined code. */
6259 bool
6260 sel_is_loop_preheader_p (basic_block bb)
6261 {
6262 if (current_loop_nest)
6263 {
6264 struct loop *outer;
6265
6266 if (preheader_removed)
6267 return false;
6268
6269 /* Preheader is the first block in the region. */
6270 if (BLOCK_TO_BB (bb->index) == 0)
6271 return true;
6272
6273 /* We used to find a preheader with the topological information.
6274 Check that the above code is equivalent to what we did before. */
6275
6276 if (in_current_region_p (current_loop_nest->header))
6277 gcc_assert (!(BLOCK_TO_BB (bb->index)
6278 < BLOCK_TO_BB (current_loop_nest->header->index)));
6279
6280 /* Support the situation when the latch block of outer loop
6281 could be from here. */
6282 for (outer = loop_outer (current_loop_nest);
6283 outer;
6284 outer = loop_outer (outer))
6285 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6286 gcc_unreachable ();
6287 }
6288
6289 return false;
6290 }
6291
6292 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6293 can be removed, making the corresponding edge fallthrough (assuming that
6294 all basic blocks between JUMP_BB and DEST_BB are empty). */
6295 static bool
6296 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6297 {
6298 if (!onlyjump_p (BB_END (jump_bb))
6299 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6300 return false;
6301
6302 /* Several outgoing edges, abnormal edge or destination of jump is
6303 not DEST_BB. */
6304 if (EDGE_COUNT (jump_bb->succs) != 1
6305 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6306 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6307 return false;
6308
6309 /* If not anything of the upper. */
6310 return true;
6311 }
6312
6313 /* Removes the loop preheader from the current region and saves it in
6314 PREHEADER_BLOCKS of the father loop, so they will be added later to
6315 region that represents an outer loop. */
6316 static void
6317 sel_remove_loop_preheader (void)
6318 {
6319 int i, old_len;
6320 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6321 basic_block bb;
6322 bool all_empty_p = true;
6323 VEC(basic_block, heap) *preheader_blocks
6324 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6325
6326 gcc_assert (current_loop_nest);
6327 old_len = VEC_length (basic_block, preheader_blocks);
6328
6329 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6330 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6331 {
6332 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6333
6334 /* If the basic block belongs to region, but doesn't belong to
6335 corresponding loop, then it should be a preheader. */
6336 if (sel_is_loop_preheader_p (bb))
6337 {
6338 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6339 if (BB_END (bb) != bb_note (bb))
6340 all_empty_p = false;
6341 }
6342 }
6343
6344 /* Remove these blocks only after iterating over the whole region. */
6345 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6346 i >= old_len;
6347 i--)
6348 {
6349 bb = VEC_index (basic_block, preheader_blocks, i);
6350 sel_remove_bb (bb, false);
6351 }
6352
6353 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6354 {
6355 if (!all_empty_p)
6356 /* Immediately create new region from preheader. */
6357 make_region_from_loop_preheader (&preheader_blocks);
6358 else
6359 {
6360 /* If all preheader blocks are empty - dont create new empty region.
6361 Instead, remove them completely. */
6362 FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb)
6363 {
6364 edge e;
6365 edge_iterator ei;
6366 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6367
6368 /* Redirect all incoming edges to next basic block. */
6369 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6370 {
6371 if (! (e->flags & EDGE_FALLTHRU))
6372 redirect_edge_and_branch (e, bb->next_bb);
6373 else
6374 redirect_edge_succ (e, bb->next_bb);
6375 }
6376 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6377 delete_and_free_basic_block (bb);
6378
6379 /* Check if after deleting preheader there is a nonconditional
6380 jump in PREV_BB that leads to the next basic block NEXT_BB.
6381 If it is so - delete this jump and clear data sets of its
6382 basic block if it becomes empty. */
6383 if (next_bb->prev_bb == prev_bb
6384 && prev_bb != ENTRY_BLOCK_PTR
6385 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6386 {
6387 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6388 if (BB_END (prev_bb) == bb_note (prev_bb))
6389 free_data_sets (prev_bb);
6390 }
6391
6392 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6393 recompute_dominator (CDI_DOMINATORS,
6394 next_bb));
6395 }
6396 }
6397 VEC_free (basic_block, heap, preheader_blocks);
6398 }
6399 else
6400 /* Store preheader within the father's loop structure. */
6401 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6402 preheader_blocks);
6403 }
6404 #endif