c.opt: Add -Wnoexcept.
[gcc.git] / gcc / sel-sched-ir.c
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "toplev.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "toplev.h"
35 #include "recog.h"
36 #include "params.h"
37 #include "target.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
47
48 #ifdef INSN_SCHEDULING
49 #include "sel-sched-ir.h"
50 /* We don't have to use it except for sel_print_insn. */
51 #include "sel-sched-dump.h"
52
53 /* A vector holding bb info for whole scheduling pass. */
54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
55
56 /* A vector holding bb info. */
57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
58
59 /* A pool for allocating all lists. */
60 alloc_pool sched_lists_pool;
61
62 /* This contains information about successors for compute_av_set. */
63 struct succs_info current_succs;
64
65 /* Data structure to describe interaction with the generic scheduler utils. */
66 static struct common_sched_info_def sel_common_sched_info;
67
68 /* The loop nest being pipelined. */
69 struct loop *current_loop_nest;
70
71 /* LOOP_NESTS is a vector containing the corresponding loop nest for
72 each region. */
73 static VEC(loop_p, heap) *loop_nests = NULL;
74
75 /* Saves blocks already in loop regions, indexed by bb->index. */
76 static sbitmap bbs_in_loop_rgns = NULL;
77
78 /* CFG hooks that are saved before changing create_basic_block hook. */
79 static struct cfg_hooks orig_cfg_hooks;
80 \f
81
82 /* Array containing reverse topological index of function basic blocks,
83 indexed by BB->INDEX. */
84 static int *rev_top_order_index = NULL;
85
86 /* Length of the above array. */
87 static int rev_top_order_index_len = -1;
88
89 /* A regset pool structure. */
90 static struct
91 {
92 /* The stack to which regsets are returned. */
93 regset *v;
94
95 /* Its pointer. */
96 int n;
97
98 /* Its size. */
99 int s;
100
101 /* In VV we save all generated regsets so that, when destructing the
102 pool, we can compare it with V and check that every regset was returned
103 back to pool. */
104 regset *vv;
105
106 /* The pointer of VV stack. */
107 int nn;
108
109 /* Its size. */
110 int ss;
111
112 /* The difference between allocated and returned regsets. */
113 int diff;
114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
115
116 /* This represents the nop pool. */
117 static struct
118 {
119 /* The vector which holds previously emitted nops. */
120 insn_t *v;
121
122 /* Its pointer. */
123 int n;
124
125 /* Its size. */
126 int s;
127 } nop_pool = { NULL, 0, 0 };
128
129 /* The pool for basic block notes. */
130 static rtx_vec_t bb_note_pool;
131
132 /* A NOP pattern used to emit placeholder insns. */
133 rtx nop_pattern = NULL_RTX;
134 /* A special instruction that resides in EXIT_BLOCK.
135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
136 rtx exit_insn = NULL_RTX;
137
138 /* TRUE if while scheduling current region, which is loop, its preheader
139 was removed. */
140 bool preheader_removed = false;
141 \f
142
143 /* Forward static declarations. */
144 static void fence_clear (fence_t);
145
146 static void deps_init_id (idata_t, insn_t, bool);
147 static void init_id_from_df (idata_t, insn_t, bool);
148 static expr_t set_insn_init (expr_t, vinsn_t, int);
149
150 static void cfg_preds (basic_block, insn_t **, int *);
151 static void prepare_insn_expr (insn_t, int);
152 static void free_history_vect (VEC (expr_history_def, heap) **);
153
154 static void move_bb_info (basic_block, basic_block);
155 static void remove_empty_bb (basic_block, bool);
156 static void sel_remove_loop_preheader (void);
157
158 static bool insn_is_the_only_one_in_bb_p (insn_t);
159 static void create_initial_data_sets (basic_block);
160
161 static void free_av_set (basic_block);
162 static void invalidate_av_set (basic_block);
163 static void extend_insn_data (void);
164 static void sel_init_new_insn (insn_t, int);
165 static void finish_insns (void);
166 \f
167 /* Various list functions. */
168
169 /* Copy an instruction list L. */
170 ilist_t
171 ilist_copy (ilist_t l)
172 {
173 ilist_t head = NULL, *tailp = &head;
174
175 while (l)
176 {
177 ilist_add (tailp, ILIST_INSN (l));
178 tailp = &ILIST_NEXT (*tailp);
179 l = ILIST_NEXT (l);
180 }
181
182 return head;
183 }
184
185 /* Invert an instruction list L. */
186 ilist_t
187 ilist_invert (ilist_t l)
188 {
189 ilist_t res = NULL;
190
191 while (l)
192 {
193 ilist_add (&res, ILIST_INSN (l));
194 l = ILIST_NEXT (l);
195 }
196
197 return res;
198 }
199
200 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
201 void
202 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
203 {
204 bnd_t bnd;
205
206 _list_add (lp);
207 bnd = BLIST_BND (*lp);
208
209 BND_TO (bnd) = to;
210 BND_PTR (bnd) = ptr;
211 BND_AV (bnd) = NULL;
212 BND_AV1 (bnd) = NULL;
213 BND_DC (bnd) = dc;
214 }
215
216 /* Remove the list note pointed to by LP. */
217 void
218 blist_remove (blist_t *lp)
219 {
220 bnd_t b = BLIST_BND (*lp);
221
222 av_set_clear (&BND_AV (b));
223 av_set_clear (&BND_AV1 (b));
224 ilist_clear (&BND_PTR (b));
225
226 _list_remove (lp);
227 }
228
229 /* Init a fence tail L. */
230 void
231 flist_tail_init (flist_tail_t l)
232 {
233 FLIST_TAIL_HEAD (l) = NULL;
234 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
235 }
236
237 /* Try to find fence corresponding to INSN in L. */
238 fence_t
239 flist_lookup (flist_t l, insn_t insn)
240 {
241 while (l)
242 {
243 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
244 return FLIST_FENCE (l);
245
246 l = FLIST_NEXT (l);
247 }
248
249 return NULL;
250 }
251
252 /* Init the fields of F before running fill_insns. */
253 static void
254 init_fence_for_scheduling (fence_t f)
255 {
256 FENCE_BNDS (f) = NULL;
257 FENCE_PROCESSED_P (f) = false;
258 FENCE_SCHEDULED_P (f) = false;
259 }
260
261 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
262 static void
263 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
264 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
265 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
266 int cycle, int cycle_issued_insns, int issue_more,
267 bool starts_cycle_p, bool after_stall_p)
268 {
269 fence_t f;
270
271 _list_add (lp);
272 f = FLIST_FENCE (*lp);
273
274 FENCE_INSN (f) = insn;
275
276 gcc_assert (state != NULL);
277 FENCE_STATE (f) = state;
278
279 FENCE_CYCLE (f) = cycle;
280 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
281 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
282 FENCE_AFTER_STALL_P (f) = after_stall_p;
283
284 gcc_assert (dc != NULL);
285 FENCE_DC (f) = dc;
286
287 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
288 FENCE_TC (f) = tc;
289
290 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
291 FENCE_ISSUE_MORE (f) = issue_more;
292 FENCE_EXECUTING_INSNS (f) = executing_insns;
293 FENCE_READY_TICKS (f) = ready_ticks;
294 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
295 FENCE_SCHED_NEXT (f) = sched_next;
296
297 init_fence_for_scheduling (f);
298 }
299
300 /* Remove the head node of the list pointed to by LP. */
301 static void
302 flist_remove (flist_t *lp)
303 {
304 if (FENCE_INSN (FLIST_FENCE (*lp)))
305 fence_clear (FLIST_FENCE (*lp));
306 _list_remove (lp);
307 }
308
309 /* Clear the fence list pointed to by LP. */
310 void
311 flist_clear (flist_t *lp)
312 {
313 while (*lp)
314 flist_remove (lp);
315 }
316
317 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
318 void
319 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
320 {
321 def_t d;
322
323 _list_add (dl);
324 d = DEF_LIST_DEF (*dl);
325
326 d->orig_insn = original_insn;
327 d->crosses_call = crosses_call;
328 }
329 \f
330
331 /* Functions to work with target contexts. */
332
333 /* Bulk target context. It is convenient for debugging purposes to ensure
334 that there are no uninitialized (null) target contexts. */
335 static tc_t bulk_tc = (tc_t) 1;
336
337 /* Target hooks wrappers. In the future we can provide some default
338 implementations for them. */
339
340 /* Allocate a store for the target context. */
341 static tc_t
342 alloc_target_context (void)
343 {
344 return (targetm.sched.alloc_sched_context
345 ? targetm.sched.alloc_sched_context () : bulk_tc);
346 }
347
348 /* Init target context TC.
349 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
350 Overwise, copy current backend context to TC. */
351 static void
352 init_target_context (tc_t tc, bool clean_p)
353 {
354 if (targetm.sched.init_sched_context)
355 targetm.sched.init_sched_context (tc, clean_p);
356 }
357
358 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
359 int init_target_context (). */
360 tc_t
361 create_target_context (bool clean_p)
362 {
363 tc_t tc = alloc_target_context ();
364
365 init_target_context (tc, clean_p);
366 return tc;
367 }
368
369 /* Copy TC to the current backend context. */
370 void
371 set_target_context (tc_t tc)
372 {
373 if (targetm.sched.set_sched_context)
374 targetm.sched.set_sched_context (tc);
375 }
376
377 /* TC is about to be destroyed. Free any internal data. */
378 static void
379 clear_target_context (tc_t tc)
380 {
381 if (targetm.sched.clear_sched_context)
382 targetm.sched.clear_sched_context (tc);
383 }
384
385 /* Clear and free it. */
386 static void
387 delete_target_context (tc_t tc)
388 {
389 clear_target_context (tc);
390
391 if (targetm.sched.free_sched_context)
392 targetm.sched.free_sched_context (tc);
393 }
394
395 /* Make a copy of FROM in TO.
396 NB: May be this should be a hook. */
397 static void
398 copy_target_context (tc_t to, tc_t from)
399 {
400 tc_t tmp = create_target_context (false);
401
402 set_target_context (from);
403 init_target_context (to, false);
404
405 set_target_context (tmp);
406 delete_target_context (tmp);
407 }
408
409 /* Create a copy of TC. */
410 static tc_t
411 create_copy_of_target_context (tc_t tc)
412 {
413 tc_t copy = alloc_target_context ();
414
415 copy_target_context (copy, tc);
416
417 return copy;
418 }
419
420 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
421 is the same as in init_target_context (). */
422 void
423 reset_target_context (tc_t tc, bool clean_p)
424 {
425 clear_target_context (tc);
426 init_target_context (tc, clean_p);
427 }
428 \f
429 /* Functions to work with dependence contexts.
430 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
431 context. It accumulates information about processed insns to decide if
432 current insn is dependent on the processed ones. */
433
434 /* Make a copy of FROM in TO. */
435 static void
436 copy_deps_context (deps_t to, deps_t from)
437 {
438 init_deps (to, false);
439 deps_join (to, from);
440 }
441
442 /* Allocate store for dep context. */
443 static deps_t
444 alloc_deps_context (void)
445 {
446 return XNEW (struct deps_desc);
447 }
448
449 /* Allocate and initialize dep context. */
450 static deps_t
451 create_deps_context (void)
452 {
453 deps_t dc = alloc_deps_context ();
454
455 init_deps (dc, false);
456 return dc;
457 }
458
459 /* Create a copy of FROM. */
460 static deps_t
461 create_copy_of_deps_context (deps_t from)
462 {
463 deps_t to = alloc_deps_context ();
464
465 copy_deps_context (to, from);
466 return to;
467 }
468
469 /* Clean up internal data of DC. */
470 static void
471 clear_deps_context (deps_t dc)
472 {
473 free_deps (dc);
474 }
475
476 /* Clear and free DC. */
477 static void
478 delete_deps_context (deps_t dc)
479 {
480 clear_deps_context (dc);
481 free (dc);
482 }
483
484 /* Clear and init DC. */
485 static void
486 reset_deps_context (deps_t dc)
487 {
488 clear_deps_context (dc);
489 init_deps (dc, false);
490 }
491
492 /* This structure describes the dependence analysis hooks for advancing
493 dependence context. */
494 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
495 {
496 NULL,
497
498 NULL, /* start_insn */
499 NULL, /* finish_insn */
500 NULL, /* start_lhs */
501 NULL, /* finish_lhs */
502 NULL, /* start_rhs */
503 NULL, /* finish_rhs */
504 haifa_note_reg_set,
505 haifa_note_reg_clobber,
506 haifa_note_reg_use,
507 NULL, /* note_mem_dep */
508 NULL, /* note_dep */
509
510 0, 0, 0
511 };
512
513 /* Process INSN and add its impact on DC. */
514 void
515 advance_deps_context (deps_t dc, insn_t insn)
516 {
517 sched_deps_info = &advance_deps_context_sched_deps_info;
518 deps_analyze_insn (dc, insn);
519 }
520 \f
521
522 /* Functions to work with DFA states. */
523
524 /* Allocate store for a DFA state. */
525 static state_t
526 state_alloc (void)
527 {
528 return xmalloc (dfa_state_size);
529 }
530
531 /* Allocate and initialize DFA state. */
532 static state_t
533 state_create (void)
534 {
535 state_t state = state_alloc ();
536
537 state_reset (state);
538 advance_state (state);
539 return state;
540 }
541
542 /* Free DFA state. */
543 static void
544 state_free (state_t state)
545 {
546 free (state);
547 }
548
549 /* Make a copy of FROM in TO. */
550 static void
551 state_copy (state_t to, state_t from)
552 {
553 memcpy (to, from, dfa_state_size);
554 }
555
556 /* Create a copy of FROM. */
557 static state_t
558 state_create_copy (state_t from)
559 {
560 state_t to = state_alloc ();
561
562 state_copy (to, from);
563 return to;
564 }
565 \f
566
567 /* Functions to work with fences. */
568
569 /* Clear the fence. */
570 static void
571 fence_clear (fence_t f)
572 {
573 state_t s = FENCE_STATE (f);
574 deps_t dc = FENCE_DC (f);
575 void *tc = FENCE_TC (f);
576
577 ilist_clear (&FENCE_BNDS (f));
578
579 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
580 || (s == NULL && dc == NULL && tc == NULL));
581
582 if (s != NULL)
583 free (s);
584
585 if (dc != NULL)
586 delete_deps_context (dc);
587
588 if (tc != NULL)
589 delete_target_context (tc);
590 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
591 free (FENCE_READY_TICKS (f));
592 FENCE_READY_TICKS (f) = NULL;
593 }
594
595 /* Init a list of fences with successors of OLD_FENCE. */
596 void
597 init_fences (insn_t old_fence)
598 {
599 insn_t succ;
600 succ_iterator si;
601 bool first = true;
602 int ready_ticks_size = get_max_uid () + 1;
603
604 FOR_EACH_SUCC_1 (succ, si, old_fence,
605 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
606 {
607
608 if (first)
609 first = false;
610 else
611 gcc_assert (flag_sel_sched_pipelining_outer_loops);
612
613 flist_add (&fences, succ,
614 state_create (),
615 create_deps_context () /* dc */,
616 create_target_context (true) /* tc */,
617 NULL_RTX /* last_scheduled_insn */,
618 NULL, /* executing_insns */
619 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
620 ready_ticks_size,
621 NULL_RTX /* sched_next */,
622 1 /* cycle */, 0 /* cycle_issued_insns */,
623 issue_rate, /* issue_more */
624 1 /* starts_cycle_p */, 0 /* after_stall_p */);
625 }
626 }
627
628 /* Merges two fences (filling fields of fence F with resulting values) by
629 following rules: 1) state, target context and last scheduled insn are
630 propagated from fallthrough edge if it is available;
631 2) deps context and cycle is propagated from more probable edge;
632 3) all other fields are set to corresponding constant values.
633
634 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
635 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
636 and AFTER_STALL_P are the corresponding fields of the second fence. */
637 static void
638 merge_fences (fence_t f, insn_t insn,
639 state_t state, deps_t dc, void *tc,
640 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
641 int *ready_ticks, int ready_ticks_size,
642 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
643 {
644 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
645
646 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
647 && !sched_next && !FENCE_SCHED_NEXT (f));
648
649 /* Check if we can decide which path fences came.
650 If we can't (or don't want to) - reset all. */
651 if (last_scheduled_insn == NULL
652 || last_scheduled_insn_old == NULL
653 /* This is a case when INSN is reachable on several paths from
654 one insn (this can happen when pipelining of outer loops is on and
655 there are two edges: one going around of inner loop and the other -
656 right through it; in such case just reset everything). */
657 || last_scheduled_insn == last_scheduled_insn_old)
658 {
659 state_reset (FENCE_STATE (f));
660 state_free (state);
661
662 reset_deps_context (FENCE_DC (f));
663 delete_deps_context (dc);
664
665 reset_target_context (FENCE_TC (f), true);
666 delete_target_context (tc);
667
668 if (cycle > FENCE_CYCLE (f))
669 FENCE_CYCLE (f) = cycle;
670
671 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
672 FENCE_ISSUE_MORE (f) = issue_rate;
673 VEC_free (rtx, gc, executing_insns);
674 free (ready_ticks);
675 if (FENCE_EXECUTING_INSNS (f))
676 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
677 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
678 if (FENCE_READY_TICKS (f))
679 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
680 }
681 else
682 {
683 edge edge_old = NULL, edge_new = NULL;
684 edge candidate;
685 succ_iterator si;
686 insn_t succ;
687
688 /* Find fallthrough edge. */
689 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
690 candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
691
692 if (!candidate
693 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
694 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
695 {
696 /* No fallthrough edge leading to basic block of INSN. */
697 state_reset (FENCE_STATE (f));
698 state_free (state);
699
700 reset_target_context (FENCE_TC (f), true);
701 delete_target_context (tc);
702
703 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
704 FENCE_ISSUE_MORE (f) = issue_rate;
705 }
706 else
707 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
708 {
709 /* Would be weird if same insn is successor of several fallthrough
710 edges. */
711 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
712 != BLOCK_FOR_INSN (last_scheduled_insn_old));
713
714 state_free (FENCE_STATE (f));
715 FENCE_STATE (f) = state;
716
717 delete_target_context (FENCE_TC (f));
718 FENCE_TC (f) = tc;
719
720 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
721 FENCE_ISSUE_MORE (f) = issue_more;
722 }
723 else
724 {
725 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
726 state_free (state);
727 delete_target_context (tc);
728
729 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
730 != BLOCK_FOR_INSN (last_scheduled_insn));
731 }
732
733 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
734 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
735 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
736 {
737 if (succ == insn)
738 {
739 /* No same successor allowed from several edges. */
740 gcc_assert (!edge_old);
741 edge_old = si.e1;
742 }
743 }
744 /* Find edge of second predecessor (last_scheduled_insn->insn). */
745 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
746 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
747 {
748 if (succ == insn)
749 {
750 /* No same successor allowed from several edges. */
751 gcc_assert (!edge_new);
752 edge_new = si.e1;
753 }
754 }
755
756 /* Check if we can choose most probable predecessor. */
757 if (edge_old == NULL || edge_new == NULL)
758 {
759 reset_deps_context (FENCE_DC (f));
760 delete_deps_context (dc);
761 VEC_free (rtx, gc, executing_insns);
762 free (ready_ticks);
763
764 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
765 if (FENCE_EXECUTING_INSNS (f))
766 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
767 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
768 if (FENCE_READY_TICKS (f))
769 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
770 }
771 else
772 if (edge_new->probability > edge_old->probability)
773 {
774 delete_deps_context (FENCE_DC (f));
775 FENCE_DC (f) = dc;
776 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
777 FENCE_EXECUTING_INSNS (f) = executing_insns;
778 free (FENCE_READY_TICKS (f));
779 FENCE_READY_TICKS (f) = ready_ticks;
780 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
781 FENCE_CYCLE (f) = cycle;
782 }
783 else
784 {
785 /* Leave DC and CYCLE untouched. */
786 delete_deps_context (dc);
787 VEC_free (rtx, gc, executing_insns);
788 free (ready_ticks);
789 }
790 }
791
792 /* Fill remaining invariant fields. */
793 if (after_stall_p)
794 FENCE_AFTER_STALL_P (f) = 1;
795
796 FENCE_ISSUED_INSNS (f) = 0;
797 FENCE_STARTS_CYCLE_P (f) = 1;
798 FENCE_SCHED_NEXT (f) = NULL;
799 }
800
801 /* Add a new fence to NEW_FENCES list, initializing it from all
802 other parameters. */
803 static void
804 add_to_fences (flist_tail_t new_fences, insn_t insn,
805 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
806 VEC(rtx, gc) *executing_insns, int *ready_ticks,
807 int ready_ticks_size, rtx sched_next, int cycle,
808 int cycle_issued_insns, int issue_rate,
809 bool starts_cycle_p, bool after_stall_p)
810 {
811 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
812
813 if (! f)
814 {
815 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
816 last_scheduled_insn, executing_insns, ready_ticks,
817 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
818 issue_rate, starts_cycle_p, after_stall_p);
819
820 FLIST_TAIL_TAILP (new_fences)
821 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
822 }
823 else
824 {
825 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
826 executing_insns, ready_ticks, ready_ticks_size,
827 sched_next, cycle, issue_rate, after_stall_p);
828 }
829 }
830
831 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
832 void
833 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
834 {
835 fence_t f, old;
836 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
837
838 old = FLIST_FENCE (old_fences);
839 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
840 FENCE_INSN (FLIST_FENCE (old_fences)));
841 if (f)
842 {
843 merge_fences (f, old->insn, old->state, old->dc, old->tc,
844 old->last_scheduled_insn, old->executing_insns,
845 old->ready_ticks, old->ready_ticks_size,
846 old->sched_next, old->cycle, old->issue_more,
847 old->after_stall_p);
848 }
849 else
850 {
851 _list_add (tailp);
852 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
853 *FLIST_FENCE (*tailp) = *old;
854 init_fence_for_scheduling (FLIST_FENCE (*tailp));
855 }
856 FENCE_INSN (old) = NULL;
857 }
858
859 /* Add a new fence to NEW_FENCES list and initialize most of its data
860 as a clean one. */
861 void
862 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
863 {
864 int ready_ticks_size = get_max_uid () + 1;
865
866 add_to_fences (new_fences,
867 succ, state_create (), create_deps_context (),
868 create_target_context (true),
869 NULL_RTX, NULL,
870 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
871 NULL_RTX, FENCE_CYCLE (fence) + 1,
872 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
873 }
874
875 /* Add a new fence to NEW_FENCES list and initialize all of its data
876 from FENCE and SUCC. */
877 void
878 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
879 {
880 int * new_ready_ticks
881 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
882
883 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
884 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
885 add_to_fences (new_fences,
886 succ, state_create_copy (FENCE_STATE (fence)),
887 create_copy_of_deps_context (FENCE_DC (fence)),
888 create_copy_of_target_context (FENCE_TC (fence)),
889 FENCE_LAST_SCHEDULED_INSN (fence),
890 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
891 new_ready_ticks,
892 FENCE_READY_TICKS_SIZE (fence),
893 FENCE_SCHED_NEXT (fence),
894 FENCE_CYCLE (fence),
895 FENCE_ISSUED_INSNS (fence),
896 FENCE_ISSUE_MORE (fence),
897 FENCE_STARTS_CYCLE_P (fence),
898 FENCE_AFTER_STALL_P (fence));
899 }
900 \f
901
902 /* Functions to work with regset and nop pools. */
903
904 /* Returns the new regset from pool. It might have some of the bits set
905 from the previous usage. */
906 regset
907 get_regset_from_pool (void)
908 {
909 regset rs;
910
911 if (regset_pool.n != 0)
912 rs = regset_pool.v[--regset_pool.n];
913 else
914 /* We need to create the regset. */
915 {
916 rs = ALLOC_REG_SET (&reg_obstack);
917
918 if (regset_pool.nn == regset_pool.ss)
919 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
920 (regset_pool.ss = 2 * regset_pool.ss + 1));
921 regset_pool.vv[regset_pool.nn++] = rs;
922 }
923
924 regset_pool.diff++;
925
926 return rs;
927 }
928
929 /* Same as above, but returns the empty regset. */
930 regset
931 get_clear_regset_from_pool (void)
932 {
933 regset rs = get_regset_from_pool ();
934
935 CLEAR_REG_SET (rs);
936 return rs;
937 }
938
939 /* Return regset RS to the pool for future use. */
940 void
941 return_regset_to_pool (regset rs)
942 {
943 regset_pool.diff--;
944
945 if (regset_pool.n == regset_pool.s)
946 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
947 (regset_pool.s = 2 * regset_pool.s + 1));
948 regset_pool.v[regset_pool.n++] = rs;
949 }
950
951 #ifdef ENABLE_CHECKING
952 /* This is used as a qsort callback for sorting regset pool stacks.
953 X and XX are addresses of two regsets. They are never equal. */
954 static int
955 cmp_v_in_regset_pool (const void *x, const void *xx)
956 {
957 return *((const regset *) x) - *((const regset *) xx);
958 }
959 #endif
960
961 /* Free the regset pool possibly checking for memory leaks. */
962 void
963 free_regset_pool (void)
964 {
965 #ifdef ENABLE_CHECKING
966 {
967 regset *v = regset_pool.v;
968 int i = 0;
969 int n = regset_pool.n;
970
971 regset *vv = regset_pool.vv;
972 int ii = 0;
973 int nn = regset_pool.nn;
974
975 int diff = 0;
976
977 gcc_assert (n <= nn);
978
979 /* Sort both vectors so it will be possible to compare them. */
980 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
981 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
982
983 while (ii < nn)
984 {
985 if (v[i] == vv[ii])
986 i++;
987 else
988 /* VV[II] was lost. */
989 diff++;
990
991 ii++;
992 }
993
994 gcc_assert (diff == regset_pool.diff);
995 }
996 #endif
997
998 /* If not true - we have a memory leak. */
999 gcc_assert (regset_pool.diff == 0);
1000
1001 while (regset_pool.n)
1002 {
1003 --regset_pool.n;
1004 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1005 }
1006
1007 free (regset_pool.v);
1008 regset_pool.v = NULL;
1009 regset_pool.s = 0;
1010
1011 free (regset_pool.vv);
1012 regset_pool.vv = NULL;
1013 regset_pool.nn = 0;
1014 regset_pool.ss = 0;
1015
1016 regset_pool.diff = 0;
1017 }
1018 \f
1019
1020 /* Functions to work with nop pools. NOP insns are used as temporary
1021 placeholders of the insns being scheduled to allow correct update of
1022 the data sets. When update is finished, NOPs are deleted. */
1023
1024 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1025 nops sel-sched generates. */
1026 static vinsn_t nop_vinsn = NULL;
1027
1028 /* Emit a nop before INSN, taking it from pool. */
1029 insn_t
1030 get_nop_from_pool (insn_t insn)
1031 {
1032 insn_t nop;
1033 bool old_p = nop_pool.n != 0;
1034 int flags;
1035
1036 if (old_p)
1037 nop = nop_pool.v[--nop_pool.n];
1038 else
1039 nop = nop_pattern;
1040
1041 nop = emit_insn_before (nop, insn);
1042
1043 if (old_p)
1044 flags = INSN_INIT_TODO_SSID;
1045 else
1046 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1047
1048 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1049 sel_init_new_insn (nop, flags);
1050
1051 return nop;
1052 }
1053
1054 /* Remove NOP from the instruction stream and return it to the pool. */
1055 void
1056 return_nop_to_pool (insn_t nop, bool full_tidying)
1057 {
1058 gcc_assert (INSN_IN_STREAM_P (nop));
1059 sel_remove_insn (nop, false, full_tidying);
1060
1061 if (nop_pool.n == nop_pool.s)
1062 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1063 (nop_pool.s = 2 * nop_pool.s + 1));
1064 nop_pool.v[nop_pool.n++] = nop;
1065 }
1066
1067 /* Free the nop pool. */
1068 void
1069 free_nop_pool (void)
1070 {
1071 nop_pool.n = 0;
1072 nop_pool.s = 0;
1073 free (nop_pool.v);
1074 nop_pool.v = NULL;
1075 }
1076 \f
1077
1078 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1079 The callback is given two rtxes XX and YY and writes the new rtxes
1080 to NX and NY in case some needs to be skipped. */
1081 static int
1082 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1083 {
1084 const_rtx x = *xx;
1085 const_rtx y = *yy;
1086
1087 if (GET_CODE (x) == UNSPEC
1088 && (targetm.sched.skip_rtx_p == NULL
1089 || targetm.sched.skip_rtx_p (x)))
1090 {
1091 *nx = XVECEXP (x, 0, 0);
1092 *ny = CONST_CAST_RTX (y);
1093 return 1;
1094 }
1095
1096 if (GET_CODE (y) == UNSPEC
1097 && (targetm.sched.skip_rtx_p == NULL
1098 || targetm.sched.skip_rtx_p (y)))
1099 {
1100 *nx = CONST_CAST_RTX (x);
1101 *ny = XVECEXP (y, 0, 0);
1102 return 1;
1103 }
1104
1105 return 0;
1106 }
1107
1108 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1109 to support ia64 speculation. When changes are needed, new rtx X and new mode
1110 NMODE are written, and the callback returns true. */
1111 static int
1112 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1113 rtx *nx, enum machine_mode* nmode)
1114 {
1115 if (GET_CODE (x) == UNSPEC
1116 && targetm.sched.skip_rtx_p
1117 && targetm.sched.skip_rtx_p (x))
1118 {
1119 *nx = XVECEXP (x, 0 ,0);
1120 *nmode = VOIDmode;
1121 return 1;
1122 }
1123
1124 return 0;
1125 }
1126
1127 /* Returns LHS and RHS are ok to be scheduled separately. */
1128 static bool
1129 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1130 {
1131 if (lhs == NULL || rhs == NULL)
1132 return false;
1133
1134 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1135 to use reg, if const can be used. Moreover, scheduling const as rhs may
1136 lead to mode mismatch cause consts don't have modes but they could be
1137 merged from branches where the same const used in different modes. */
1138 if (CONSTANT_P (rhs))
1139 return false;
1140
1141 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1142 if (COMPARISON_P (rhs))
1143 return false;
1144
1145 /* Do not allow single REG to be an rhs. */
1146 if (REG_P (rhs))
1147 return false;
1148
1149 /* See comment at find_used_regs_1 (*1) for explanation of this
1150 restriction. */
1151 /* FIXME: remove this later. */
1152 if (MEM_P (lhs))
1153 return false;
1154
1155 /* This will filter all tricky things like ZERO_EXTRACT etc.
1156 For now we don't handle it. */
1157 if (!REG_P (lhs) && !MEM_P (lhs))
1158 return false;
1159
1160 return true;
1161 }
1162
1163 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1164 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1165 used e.g. for insns from recovery blocks. */
1166 static void
1167 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1168 {
1169 hash_rtx_callback_function hrcf;
1170 int insn_class;
1171
1172 VINSN_INSN_RTX (vi) = insn;
1173 VINSN_COUNT (vi) = 0;
1174 vi->cost = -1;
1175
1176 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1177 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1178 else
1179 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1180
1181 /* Hash vinsn depending on whether it is separable or not. */
1182 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1183 if (VINSN_SEPARABLE_P (vi))
1184 {
1185 rtx rhs = VINSN_RHS (vi);
1186
1187 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1188 NULL, NULL, false, hrcf);
1189 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1190 VOIDmode, NULL, NULL,
1191 false, hrcf);
1192 }
1193 else
1194 {
1195 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1196 NULL, NULL, false, hrcf);
1197 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1198 }
1199
1200 insn_class = haifa_classify_insn (insn);
1201 if (insn_class >= 2
1202 && (!targetm.sched.get_insn_spec_ds
1203 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1204 == 0)))
1205 VINSN_MAY_TRAP_P (vi) = true;
1206 else
1207 VINSN_MAY_TRAP_P (vi) = false;
1208 }
1209
1210 /* Indicate that VI has become the part of an rtx object. */
1211 void
1212 vinsn_attach (vinsn_t vi)
1213 {
1214 /* Assert that VI is not pending for deletion. */
1215 gcc_assert (VINSN_INSN_RTX (vi));
1216
1217 VINSN_COUNT (vi)++;
1218 }
1219
1220 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1221 VINSN_TYPE (VI). */
1222 static vinsn_t
1223 vinsn_create (insn_t insn, bool force_unique_p)
1224 {
1225 vinsn_t vi = XCNEW (struct vinsn_def);
1226
1227 vinsn_init (vi, insn, force_unique_p);
1228 return vi;
1229 }
1230
1231 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1232 the copy. */
1233 vinsn_t
1234 vinsn_copy (vinsn_t vi, bool reattach_p)
1235 {
1236 rtx copy;
1237 bool unique = VINSN_UNIQUE_P (vi);
1238 vinsn_t new_vi;
1239
1240 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1241 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1242 if (reattach_p)
1243 {
1244 vinsn_detach (vi);
1245 vinsn_attach (new_vi);
1246 }
1247
1248 return new_vi;
1249 }
1250
1251 /* Delete the VI vinsn and free its data. */
1252 static void
1253 vinsn_delete (vinsn_t vi)
1254 {
1255 gcc_assert (VINSN_COUNT (vi) == 0);
1256
1257 return_regset_to_pool (VINSN_REG_SETS (vi));
1258 return_regset_to_pool (VINSN_REG_USES (vi));
1259 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1260
1261 free (vi);
1262 }
1263
1264 /* Indicate that VI is no longer a part of some rtx object.
1265 Remove VI if it is no longer needed. */
1266 void
1267 vinsn_detach (vinsn_t vi)
1268 {
1269 gcc_assert (VINSN_COUNT (vi) > 0);
1270
1271 if (--VINSN_COUNT (vi) == 0)
1272 vinsn_delete (vi);
1273 }
1274
1275 /* Returns TRUE if VI is a branch. */
1276 bool
1277 vinsn_cond_branch_p (vinsn_t vi)
1278 {
1279 insn_t insn;
1280
1281 if (!VINSN_UNIQUE_P (vi))
1282 return false;
1283
1284 insn = VINSN_INSN_RTX (vi);
1285 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1286 return false;
1287
1288 return control_flow_insn_p (insn);
1289 }
1290
1291 /* Return latency of INSN. */
1292 static int
1293 sel_insn_rtx_cost (rtx insn)
1294 {
1295 int cost;
1296
1297 /* A USE insn, or something else we don't need to
1298 understand. We can't pass these directly to
1299 result_ready_cost or insn_default_latency because it will
1300 trigger a fatal error for unrecognizable insns. */
1301 if (recog_memoized (insn) < 0)
1302 cost = 0;
1303 else
1304 {
1305 cost = insn_default_latency (insn);
1306
1307 if (cost < 0)
1308 cost = 0;
1309 }
1310
1311 return cost;
1312 }
1313
1314 /* Return the cost of the VI.
1315 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1316 int
1317 sel_vinsn_cost (vinsn_t vi)
1318 {
1319 int cost = vi->cost;
1320
1321 if (cost < 0)
1322 {
1323 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1324 vi->cost = cost;
1325 }
1326
1327 return cost;
1328 }
1329 \f
1330
1331 /* Functions for insn emitting. */
1332
1333 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1334 EXPR and SEQNO. */
1335 insn_t
1336 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1337 {
1338 insn_t new_insn;
1339
1340 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1341
1342 new_insn = emit_insn_after (pattern, after);
1343 set_insn_init (expr, NULL, seqno);
1344 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1345
1346 return new_insn;
1347 }
1348
1349 /* Force newly generated vinsns to be unique. */
1350 static bool init_insn_force_unique_p = false;
1351
1352 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1353 initialize its data from EXPR and SEQNO. */
1354 insn_t
1355 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1356 insn_t after)
1357 {
1358 insn_t insn;
1359
1360 gcc_assert (!init_insn_force_unique_p);
1361
1362 init_insn_force_unique_p = true;
1363 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1364 CANT_MOVE (insn) = 1;
1365 init_insn_force_unique_p = false;
1366
1367 return insn;
1368 }
1369
1370 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1371 take it as a new vinsn instead of EXPR's vinsn.
1372 We simplify insns later, after scheduling region in
1373 simplify_changed_insns. */
1374 insn_t
1375 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1376 insn_t after)
1377 {
1378 expr_t emit_expr;
1379 insn_t insn;
1380 int flags;
1381
1382 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1383 seqno);
1384 insn = EXPR_INSN_RTX (emit_expr);
1385 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1386
1387 flags = INSN_INIT_TODO_SSID;
1388 if (INSN_LUID (insn) == 0)
1389 flags |= INSN_INIT_TODO_LUID;
1390 sel_init_new_insn (insn, flags);
1391
1392 return insn;
1393 }
1394
1395 /* Move insn from EXPR after AFTER. */
1396 insn_t
1397 sel_move_insn (expr_t expr, int seqno, insn_t after)
1398 {
1399 insn_t insn = EXPR_INSN_RTX (expr);
1400 basic_block bb = BLOCK_FOR_INSN (after);
1401 insn_t next = NEXT_INSN (after);
1402
1403 /* Assert that in move_op we disconnected this insn properly. */
1404 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1405 PREV_INSN (insn) = after;
1406 NEXT_INSN (insn) = next;
1407
1408 NEXT_INSN (after) = insn;
1409 PREV_INSN (next) = insn;
1410
1411 /* Update links from insn to bb and vice versa. */
1412 df_insn_change_bb (insn, bb);
1413 if (BB_END (bb) == after)
1414 BB_END (bb) = insn;
1415
1416 prepare_insn_expr (insn, seqno);
1417 return insn;
1418 }
1419
1420 \f
1421 /* Functions to work with right-hand sides. */
1422
1423 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1424 VECT and return true when found. Use NEW_VINSN for comparison only when
1425 COMPARE_VINSNS is true. Write to INDP the index on which
1426 the search has stopped, such that inserting the new element at INDP will
1427 retain VECT's sort order. */
1428 static bool
1429 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1430 unsigned uid, vinsn_t new_vinsn,
1431 bool compare_vinsns, int *indp)
1432 {
1433 expr_history_def *arr;
1434 int i, j, len = VEC_length (expr_history_def, vect);
1435
1436 if (len == 0)
1437 {
1438 *indp = 0;
1439 return false;
1440 }
1441
1442 arr = VEC_address (expr_history_def, vect);
1443 i = 0, j = len - 1;
1444
1445 while (i <= j)
1446 {
1447 unsigned auid = arr[i].uid;
1448 vinsn_t avinsn = arr[i].new_expr_vinsn;
1449
1450 if (auid == uid
1451 /* When undoing transformation on a bookkeeping copy, the new vinsn
1452 may not be exactly equal to the one that is saved in the vector.
1453 This is because the insn whose copy we're checking was possibly
1454 substituted itself. */
1455 && (! compare_vinsns
1456 || vinsn_equal_p (avinsn, new_vinsn)))
1457 {
1458 *indp = i;
1459 return true;
1460 }
1461 else if (auid > uid)
1462 break;
1463 i++;
1464 }
1465
1466 *indp = i;
1467 return false;
1468 }
1469
1470 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1471 the position found or -1, if no such value is in vector.
1472 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1473 int
1474 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1475 vinsn_t new_vinsn, bool originators_p)
1476 {
1477 int ind;
1478
1479 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1480 false, &ind))
1481 return ind;
1482
1483 if (INSN_ORIGINATORS (insn) && originators_p)
1484 {
1485 unsigned uid;
1486 bitmap_iterator bi;
1487
1488 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1489 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1490 return ind;
1491 }
1492
1493 return -1;
1494 }
1495
1496 /* Insert new element in a sorted history vector pointed to by PVECT,
1497 if it is not there already. The element is searched using
1498 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1499 the history of a transformation. */
1500 void
1501 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1502 unsigned uid, enum local_trans_type type,
1503 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1504 ds_t spec_ds)
1505 {
1506 VEC(expr_history_def, heap) *vect = *pvect;
1507 expr_history_def temp;
1508 bool res;
1509 int ind;
1510
1511 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1512
1513 if (res)
1514 {
1515 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1516
1517 /* It is possible that speculation types of expressions that were
1518 propagated through different paths will be different here. In this
1519 case, merge the status to get the correct check later. */
1520 if (phist->spec_ds != spec_ds)
1521 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1522 return;
1523 }
1524
1525 temp.uid = uid;
1526 temp.old_expr_vinsn = old_expr_vinsn;
1527 temp.new_expr_vinsn = new_expr_vinsn;
1528 temp.spec_ds = spec_ds;
1529 temp.type = type;
1530
1531 vinsn_attach (old_expr_vinsn);
1532 vinsn_attach (new_expr_vinsn);
1533 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1534 *pvect = vect;
1535 }
1536
1537 /* Free history vector PVECT. */
1538 static void
1539 free_history_vect (VEC (expr_history_def, heap) **pvect)
1540 {
1541 unsigned i;
1542 expr_history_def *phist;
1543
1544 if (! *pvect)
1545 return;
1546
1547 for (i = 0;
1548 VEC_iterate (expr_history_def, *pvect, i, phist);
1549 i++)
1550 {
1551 vinsn_detach (phist->old_expr_vinsn);
1552 vinsn_detach (phist->new_expr_vinsn);
1553 }
1554
1555 VEC_free (expr_history_def, heap, *pvect);
1556 *pvect = NULL;
1557 }
1558
1559
1560 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1561 bool
1562 vinsn_equal_p (vinsn_t x, vinsn_t y)
1563 {
1564 rtx_equal_p_callback_function repcf;
1565
1566 if (x == y)
1567 return true;
1568
1569 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1570 return false;
1571
1572 if (VINSN_HASH (x) != VINSN_HASH (y))
1573 return false;
1574
1575 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1576 if (VINSN_SEPARABLE_P (x))
1577 {
1578 /* Compare RHSes of VINSNs. */
1579 gcc_assert (VINSN_RHS (x));
1580 gcc_assert (VINSN_RHS (y));
1581
1582 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1583 }
1584
1585 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1586 }
1587 \f
1588
1589 /* Functions for working with expressions. */
1590
1591 /* Initialize EXPR. */
1592 static void
1593 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1594 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1595 ds_t spec_to_check_ds, int orig_sched_cycle,
1596 VEC(expr_history_def, heap) *history, bool target_available,
1597 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1598 bool cant_move)
1599 {
1600 vinsn_attach (vi);
1601
1602 EXPR_VINSN (expr) = vi;
1603 EXPR_SPEC (expr) = spec;
1604 EXPR_USEFULNESS (expr) = use;
1605 EXPR_PRIORITY (expr) = priority;
1606 EXPR_PRIORITY_ADJ (expr) = 0;
1607 EXPR_SCHED_TIMES (expr) = sched_times;
1608 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1609 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1610 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1611 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1612
1613 if (history)
1614 EXPR_HISTORY_OF_CHANGES (expr) = history;
1615 else
1616 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1617
1618 EXPR_TARGET_AVAILABLE (expr) = target_available;
1619 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1620 EXPR_WAS_RENAMED (expr) = was_renamed;
1621 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1622 EXPR_CANT_MOVE (expr) = cant_move;
1623 }
1624
1625 /* Make a copy of the expr FROM into the expr TO. */
1626 void
1627 copy_expr (expr_t to, expr_t from)
1628 {
1629 VEC(expr_history_def, heap) *temp = NULL;
1630
1631 if (EXPR_HISTORY_OF_CHANGES (from))
1632 {
1633 unsigned i;
1634 expr_history_def *phist;
1635
1636 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1637 for (i = 0;
1638 VEC_iterate (expr_history_def, temp, i, phist);
1639 i++)
1640 {
1641 vinsn_attach (phist->old_expr_vinsn);
1642 vinsn_attach (phist->new_expr_vinsn);
1643 }
1644 }
1645
1646 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1647 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1648 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1649 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1650 EXPR_ORIG_SCHED_CYCLE (from), temp,
1651 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1652 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1653 EXPR_CANT_MOVE (from));
1654 }
1655
1656 /* Same, but the final expr will not ever be in av sets, so don't copy
1657 "uninteresting" data such as bitmap cache. */
1658 void
1659 copy_expr_onside (expr_t to, expr_t from)
1660 {
1661 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1662 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1663 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1664 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1665 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1666 EXPR_CANT_MOVE (from));
1667 }
1668
1669 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1670 initializing new insns. */
1671 static void
1672 prepare_insn_expr (insn_t insn, int seqno)
1673 {
1674 expr_t expr = INSN_EXPR (insn);
1675 ds_t ds;
1676
1677 INSN_SEQNO (insn) = seqno;
1678 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1679 EXPR_SPEC (expr) = 0;
1680 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1681 EXPR_WAS_SUBSTITUTED (expr) = 0;
1682 EXPR_WAS_RENAMED (expr) = 0;
1683 EXPR_TARGET_AVAILABLE (expr) = 1;
1684 INSN_LIVE_VALID_P (insn) = false;
1685
1686 /* ??? If this expression is speculative, make its dependence
1687 as weak as possible. We can filter this expression later
1688 in process_spec_exprs, because we do not distinguish
1689 between the status we got during compute_av_set and the
1690 existing status. To be fixed. */
1691 ds = EXPR_SPEC_DONE_DS (expr);
1692 if (ds)
1693 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1694
1695 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1696 }
1697
1698 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1699 is non-null when expressions are merged from different successors at
1700 a split point. */
1701 static void
1702 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1703 {
1704 if (EXPR_TARGET_AVAILABLE (to) < 0
1705 || EXPR_TARGET_AVAILABLE (from) < 0)
1706 EXPR_TARGET_AVAILABLE (to) = -1;
1707 else
1708 {
1709 /* We try to detect the case when one of the expressions
1710 can only be reached through another one. In this case,
1711 we can do better. */
1712 if (split_point == NULL)
1713 {
1714 int toind, fromind;
1715
1716 toind = EXPR_ORIG_BB_INDEX (to);
1717 fromind = EXPR_ORIG_BB_INDEX (from);
1718
1719 if (toind && toind == fromind)
1720 /* Do nothing -- everything is done in
1721 merge_with_other_exprs. */
1722 ;
1723 else
1724 EXPR_TARGET_AVAILABLE (to) = -1;
1725 }
1726 else
1727 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1728 }
1729 }
1730
1731 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1732 is non-null when expressions are merged from different successors at
1733 a split point. */
1734 static void
1735 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1736 {
1737 ds_t old_to_ds, old_from_ds;
1738
1739 old_to_ds = EXPR_SPEC_DONE_DS (to);
1740 old_from_ds = EXPR_SPEC_DONE_DS (from);
1741
1742 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1743 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1744 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1745
1746 /* When merging e.g. control & data speculative exprs, or a control
1747 speculative with a control&data speculative one, we really have
1748 to change vinsn too. Also, when speculative status is changed,
1749 we also need to record this as a transformation in expr's history. */
1750 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1751 {
1752 old_to_ds = ds_get_speculation_types (old_to_ds);
1753 old_from_ds = ds_get_speculation_types (old_from_ds);
1754
1755 if (old_to_ds != old_from_ds)
1756 {
1757 ds_t record_ds;
1758
1759 /* When both expressions are speculative, we need to change
1760 the vinsn first. */
1761 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1762 {
1763 int res;
1764
1765 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1766 gcc_assert (res >= 0);
1767 }
1768
1769 if (split_point != NULL)
1770 {
1771 /* Record the change with proper status. */
1772 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1773 record_ds &= ~(old_to_ds & SPECULATIVE);
1774 record_ds &= ~(old_from_ds & SPECULATIVE);
1775
1776 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1777 INSN_UID (split_point), TRANS_SPECULATION,
1778 EXPR_VINSN (from), EXPR_VINSN (to),
1779 record_ds);
1780 }
1781 }
1782 }
1783 }
1784
1785
1786 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1787 this is done along different paths. */
1788 void
1789 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1790 {
1791 int i;
1792 expr_history_def *phist;
1793
1794 /* For now, we just set the spec of resulting expr to be minimum of the specs
1795 of merged exprs. */
1796 if (EXPR_SPEC (to) > EXPR_SPEC (from))
1797 EXPR_SPEC (to) = EXPR_SPEC (from);
1798
1799 if (split_point)
1800 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1801 else
1802 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1803 EXPR_USEFULNESS (from));
1804
1805 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1806 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1807
1808 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1809 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1810
1811 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1812 EXPR_ORIG_BB_INDEX (to) = 0;
1813
1814 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1815 EXPR_ORIG_SCHED_CYCLE (from));
1816
1817 /* We keep this vector sorted. */
1818 for (i = 0;
1819 VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
1820 i, phist);
1821 i++)
1822 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1823 phist->uid, phist->type,
1824 phist->old_expr_vinsn, phist->new_expr_vinsn,
1825 phist->spec_ds);
1826
1827 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1828 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1829 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1830
1831 update_target_availability (to, from, split_point);
1832 update_speculative_bits (to, from, split_point);
1833 }
1834
1835 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1836 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1837 are merged from different successors at a split point. */
1838 void
1839 merge_expr (expr_t to, expr_t from, insn_t split_point)
1840 {
1841 vinsn_t to_vi = EXPR_VINSN (to);
1842 vinsn_t from_vi = EXPR_VINSN (from);
1843
1844 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1845
1846 /* Make sure that speculative pattern is propagated into exprs that
1847 have non-speculative one. This will provide us with consistent
1848 speculative bits and speculative patterns inside expr. */
1849 if (EXPR_SPEC_DONE_DS (to) == 0
1850 && EXPR_SPEC_DONE_DS (from) != 0)
1851 change_vinsn_in_expr (to, EXPR_VINSN (from));
1852
1853 merge_expr_data (to, from, split_point);
1854 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1855 }
1856
1857 /* Clear the information of this EXPR. */
1858 void
1859 clear_expr (expr_t expr)
1860 {
1861
1862 vinsn_detach (EXPR_VINSN (expr));
1863 EXPR_VINSN (expr) = NULL;
1864
1865 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1866 }
1867
1868 /* For a given LV_SET, mark EXPR having unavailable target register. */
1869 static void
1870 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1871 {
1872 if (EXPR_SEPARABLE_P (expr))
1873 {
1874 if (REG_P (EXPR_LHS (expr))
1875 && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
1876 {
1877 /* If it's an insn like r1 = use (r1, ...), and it exists in
1878 different forms in each of the av_sets being merged, we can't say
1879 whether original destination register is available or not.
1880 However, this still works if destination register is not used
1881 in the original expression: if the branch at which LV_SET we're
1882 looking here is not actually 'other branch' in sense that same
1883 expression is available through it (but it can't be determined
1884 at computation stage because of transformations on one of the
1885 branches), it still won't affect the availability.
1886 Liveness of a register somewhere on a code motion path means
1887 it's either read somewhere on a codemotion path, live on
1888 'other' branch, live at the point immediately following
1889 the original operation, or is read by the original operation.
1890 The latter case is filtered out in the condition below.
1891 It still doesn't cover the case when register is defined and used
1892 somewhere within the code motion path, and in this case we could
1893 miss a unifying code motion along both branches using a renamed
1894 register, but it won't affect a code correctness since upon
1895 an actual code motion a bookkeeping code would be generated. */
1896 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1897 REGNO (EXPR_LHS (expr))))
1898 EXPR_TARGET_AVAILABLE (expr) = -1;
1899 else
1900 EXPR_TARGET_AVAILABLE (expr) = false;
1901 }
1902 }
1903 else
1904 {
1905 unsigned regno;
1906 reg_set_iterator rsi;
1907
1908 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1909 0, regno, rsi)
1910 if (bitmap_bit_p (lv_set, regno))
1911 {
1912 EXPR_TARGET_AVAILABLE (expr) = false;
1913 break;
1914 }
1915
1916 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1917 0, regno, rsi)
1918 if (bitmap_bit_p (lv_set, regno))
1919 {
1920 EXPR_TARGET_AVAILABLE (expr) = false;
1921 break;
1922 }
1923 }
1924 }
1925
1926 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1927 or dependence status have changed, 2 when also the target register
1928 became unavailable, 0 if nothing had to be changed. */
1929 int
1930 speculate_expr (expr_t expr, ds_t ds)
1931 {
1932 int res;
1933 rtx orig_insn_rtx;
1934 rtx spec_pat;
1935 ds_t target_ds, current_ds;
1936
1937 /* Obtain the status we need to put on EXPR. */
1938 target_ds = (ds & SPECULATIVE);
1939 current_ds = EXPR_SPEC_DONE_DS (expr);
1940 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1941
1942 orig_insn_rtx = EXPR_INSN_RTX (expr);
1943
1944 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1945
1946 switch (res)
1947 {
1948 case 0:
1949 EXPR_SPEC_DONE_DS (expr) = ds;
1950 return current_ds != ds ? 1 : 0;
1951
1952 case 1:
1953 {
1954 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1955 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1956
1957 change_vinsn_in_expr (expr, spec_vinsn);
1958 EXPR_SPEC_DONE_DS (expr) = ds;
1959 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1960
1961 /* Do not allow clobbering the address register of speculative
1962 insns. */
1963 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1964 expr_dest_regno (expr)))
1965 {
1966 EXPR_TARGET_AVAILABLE (expr) = false;
1967 return 2;
1968 }
1969
1970 return 1;
1971 }
1972
1973 case -1:
1974 return -1;
1975
1976 default:
1977 gcc_unreachable ();
1978 return -1;
1979 }
1980 }
1981
1982 /* Return a destination register, if any, of EXPR. */
1983 rtx
1984 expr_dest_reg (expr_t expr)
1985 {
1986 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1987
1988 if (dest != NULL_RTX && REG_P (dest))
1989 return dest;
1990
1991 return NULL_RTX;
1992 }
1993
1994 /* Returns the REGNO of the R's destination. */
1995 unsigned
1996 expr_dest_regno (expr_t expr)
1997 {
1998 rtx dest = expr_dest_reg (expr);
1999
2000 gcc_assert (dest != NULL_RTX);
2001 return REGNO (dest);
2002 }
2003
2004 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2005 AV_SET having unavailable target register. */
2006 void
2007 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2008 {
2009 expr_t expr;
2010 av_set_iterator avi;
2011
2012 FOR_EACH_EXPR (expr, avi, join_set)
2013 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2014 set_unavailable_target_for_expr (expr, lv_set);
2015 }
2016 \f
2017
2018 /* Av set functions. */
2019
2020 /* Add a new element to av set SETP.
2021 Return the element added. */
2022 static av_set_t
2023 av_set_add_element (av_set_t *setp)
2024 {
2025 /* Insert at the beginning of the list. */
2026 _list_add (setp);
2027 return *setp;
2028 }
2029
2030 /* Add EXPR to SETP. */
2031 void
2032 av_set_add (av_set_t *setp, expr_t expr)
2033 {
2034 av_set_t elem;
2035
2036 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2037 elem = av_set_add_element (setp);
2038 copy_expr (_AV_SET_EXPR (elem), expr);
2039 }
2040
2041 /* Same, but do not copy EXPR. */
2042 static void
2043 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2044 {
2045 av_set_t elem;
2046
2047 elem = av_set_add_element (setp);
2048 *_AV_SET_EXPR (elem) = *expr;
2049 }
2050
2051 /* Remove expr pointed to by IP from the av_set. */
2052 void
2053 av_set_iter_remove (av_set_iterator *ip)
2054 {
2055 clear_expr (_AV_SET_EXPR (*ip->lp));
2056 _list_iter_remove (ip);
2057 }
2058
2059 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2060 sense of vinsn_equal_p function. Return NULL if no such expr is
2061 in SET was found. */
2062 expr_t
2063 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2064 {
2065 expr_t expr;
2066 av_set_iterator i;
2067
2068 FOR_EACH_EXPR (expr, i, set)
2069 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2070 return expr;
2071 return NULL;
2072 }
2073
2074 /* Same, but also remove the EXPR found. */
2075 static expr_t
2076 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2077 {
2078 expr_t expr;
2079 av_set_iterator i;
2080
2081 FOR_EACH_EXPR_1 (expr, i, setp)
2082 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2083 {
2084 _list_iter_remove_nofree (&i);
2085 return expr;
2086 }
2087 return NULL;
2088 }
2089
2090 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2091 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2092 Returns NULL if no such expr is in SET was found. */
2093 static expr_t
2094 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2095 {
2096 expr_t cur_expr;
2097 av_set_iterator i;
2098
2099 FOR_EACH_EXPR (cur_expr, i, set)
2100 {
2101 if (cur_expr == expr)
2102 continue;
2103 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2104 return cur_expr;
2105 }
2106
2107 return NULL;
2108 }
2109
2110 /* If other expression is already in AVP, remove one of them. */
2111 expr_t
2112 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2113 {
2114 expr_t expr2;
2115
2116 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2117 if (expr2 != NULL)
2118 {
2119 /* Reset target availability on merge, since taking it only from one
2120 of the exprs would be controversial for different code. */
2121 EXPR_TARGET_AVAILABLE (expr2) = -1;
2122 EXPR_USEFULNESS (expr2) = 0;
2123
2124 merge_expr (expr2, expr, NULL);
2125
2126 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2127 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2128
2129 av_set_iter_remove (ip);
2130 return expr2;
2131 }
2132
2133 return expr;
2134 }
2135
2136 /* Return true if there is an expr that correlates to VI in SET. */
2137 bool
2138 av_set_is_in_p (av_set_t set, vinsn_t vi)
2139 {
2140 return av_set_lookup (set, vi) != NULL;
2141 }
2142
2143 /* Return a copy of SET. */
2144 av_set_t
2145 av_set_copy (av_set_t set)
2146 {
2147 expr_t expr;
2148 av_set_iterator i;
2149 av_set_t res = NULL;
2150
2151 FOR_EACH_EXPR (expr, i, set)
2152 av_set_add (&res, expr);
2153
2154 return res;
2155 }
2156
2157 /* Join two av sets that do not have common elements by attaching second set
2158 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2159 _AV_SET_NEXT of first set's last element). */
2160 static void
2161 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2162 {
2163 gcc_assert (*to_tailp == NULL);
2164 *to_tailp = *fromp;
2165 *fromp = NULL;
2166 }
2167
2168 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2169 pointed to by FROMP afterwards. */
2170 void
2171 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2172 {
2173 expr_t expr1;
2174 av_set_iterator i;
2175
2176 /* Delete from TOP all exprs, that present in FROMP. */
2177 FOR_EACH_EXPR_1 (expr1, i, top)
2178 {
2179 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2180
2181 if (expr2)
2182 {
2183 merge_expr (expr2, expr1, insn);
2184 av_set_iter_remove (&i);
2185 }
2186 }
2187
2188 join_distinct_sets (i.lp, fromp);
2189 }
2190
2191 /* Same as above, but also update availability of target register in
2192 TOP judging by TO_LV_SET and FROM_LV_SET. */
2193 void
2194 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2195 regset from_lv_set, insn_t insn)
2196 {
2197 expr_t expr1;
2198 av_set_iterator i;
2199 av_set_t *to_tailp, in_both_set = NULL;
2200
2201 /* Delete from TOP all expres, that present in FROMP. */
2202 FOR_EACH_EXPR_1 (expr1, i, top)
2203 {
2204 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2205
2206 if (expr2)
2207 {
2208 /* It may be that the expressions have different destination
2209 registers, in which case we need to check liveness here. */
2210 if (EXPR_SEPARABLE_P (expr1))
2211 {
2212 int regno1 = (REG_P (EXPR_LHS (expr1))
2213 ? (int) expr_dest_regno (expr1) : -1);
2214 int regno2 = (REG_P (EXPR_LHS (expr2))
2215 ? (int) expr_dest_regno (expr2) : -1);
2216
2217 /* ??? We don't have a way to check restrictions for
2218 *other* register on the current path, we did it only
2219 for the current target register. Give up. */
2220 if (regno1 != regno2)
2221 EXPR_TARGET_AVAILABLE (expr2) = -1;
2222 }
2223 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2224 EXPR_TARGET_AVAILABLE (expr2) = -1;
2225
2226 merge_expr (expr2, expr1, insn);
2227 av_set_add_nocopy (&in_both_set, expr2);
2228 av_set_iter_remove (&i);
2229 }
2230 else
2231 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2232 FROM_LV_SET. */
2233 set_unavailable_target_for_expr (expr1, from_lv_set);
2234 }
2235 to_tailp = i.lp;
2236
2237 /* These expressions are not present in TOP. Check liveness
2238 restrictions on TO_LV_SET. */
2239 FOR_EACH_EXPR (expr1, i, *fromp)
2240 set_unavailable_target_for_expr (expr1, to_lv_set);
2241
2242 join_distinct_sets (i.lp, &in_both_set);
2243 join_distinct_sets (to_tailp, fromp);
2244 }
2245
2246 /* Clear av_set pointed to by SETP. */
2247 void
2248 av_set_clear (av_set_t *setp)
2249 {
2250 expr_t expr;
2251 av_set_iterator i;
2252
2253 FOR_EACH_EXPR_1 (expr, i, setp)
2254 av_set_iter_remove (&i);
2255
2256 gcc_assert (*setp == NULL);
2257 }
2258
2259 /* Leave only one non-speculative element in the SETP. */
2260 void
2261 av_set_leave_one_nonspec (av_set_t *setp)
2262 {
2263 expr_t expr;
2264 av_set_iterator i;
2265 bool has_one_nonspec = false;
2266
2267 /* Keep all speculative exprs, and leave one non-speculative
2268 (the first one). */
2269 FOR_EACH_EXPR_1 (expr, i, setp)
2270 {
2271 if (!EXPR_SPEC_DONE_DS (expr))
2272 {
2273 if (has_one_nonspec)
2274 av_set_iter_remove (&i);
2275 else
2276 has_one_nonspec = true;
2277 }
2278 }
2279 }
2280
2281 /* Return the N'th element of the SET. */
2282 expr_t
2283 av_set_element (av_set_t set, int n)
2284 {
2285 expr_t expr;
2286 av_set_iterator i;
2287
2288 FOR_EACH_EXPR (expr, i, set)
2289 if (n-- == 0)
2290 return expr;
2291
2292 gcc_unreachable ();
2293 return NULL;
2294 }
2295
2296 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2297 void
2298 av_set_substract_cond_branches (av_set_t *avp)
2299 {
2300 av_set_iterator i;
2301 expr_t expr;
2302
2303 FOR_EACH_EXPR_1 (expr, i, avp)
2304 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2305 av_set_iter_remove (&i);
2306 }
2307
2308 /* Multiplies usefulness attribute of each member of av-set *AVP by
2309 value PROB / ALL_PROB. */
2310 void
2311 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2312 {
2313 av_set_iterator i;
2314 expr_t expr;
2315
2316 FOR_EACH_EXPR (expr, i, av)
2317 EXPR_USEFULNESS (expr) = (all_prob
2318 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2319 : 0);
2320 }
2321
2322 /* Leave in AVP only those expressions, which are present in AV,
2323 and return it. */
2324 void
2325 av_set_intersect (av_set_t *avp, av_set_t av)
2326 {
2327 av_set_iterator i;
2328 expr_t expr;
2329
2330 FOR_EACH_EXPR_1 (expr, i, avp)
2331 if (av_set_lookup (av, EXPR_VINSN (expr)) == NULL)
2332 av_set_iter_remove (&i);
2333 }
2334
2335 \f
2336
2337 /* Dependence hooks to initialize insn data. */
2338
2339 /* This is used in hooks callable from dependence analysis when initializing
2340 instruction's data. */
2341 static struct
2342 {
2343 /* Where the dependence was found (lhs/rhs). */
2344 deps_where_t where;
2345
2346 /* The actual data object to initialize. */
2347 idata_t id;
2348
2349 /* True when the insn should not be made clonable. */
2350 bool force_unique_p;
2351
2352 /* True when insn should be treated as of type USE, i.e. never renamed. */
2353 bool force_use_p;
2354 } deps_init_id_data;
2355
2356
2357 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2358 clonable. */
2359 static void
2360 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2361 {
2362 int type;
2363
2364 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2365 That clonable insns which can be separated into lhs and rhs have type SET.
2366 Other clonable insns have type USE. */
2367 type = GET_CODE (insn);
2368
2369 /* Only regular insns could be cloned. */
2370 if (type == INSN && !force_unique_p)
2371 type = SET;
2372 else if (type == JUMP_INSN && simplejump_p (insn))
2373 type = PC;
2374 else if (type == DEBUG_INSN)
2375 type = !force_unique_p ? USE : INSN;
2376
2377 IDATA_TYPE (id) = type;
2378 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2379 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2380 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2381 }
2382
2383 /* Start initializing insn data. */
2384 static void
2385 deps_init_id_start_insn (insn_t insn)
2386 {
2387 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2388
2389 setup_id_for_insn (deps_init_id_data.id, insn,
2390 deps_init_id_data.force_unique_p);
2391 deps_init_id_data.where = DEPS_IN_INSN;
2392 }
2393
2394 /* Start initializing lhs data. */
2395 static void
2396 deps_init_id_start_lhs (rtx lhs)
2397 {
2398 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2399 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2400
2401 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2402 {
2403 IDATA_LHS (deps_init_id_data.id) = lhs;
2404 deps_init_id_data.where = DEPS_IN_LHS;
2405 }
2406 }
2407
2408 /* Finish initializing lhs data. */
2409 static void
2410 deps_init_id_finish_lhs (void)
2411 {
2412 deps_init_id_data.where = DEPS_IN_INSN;
2413 }
2414
2415 /* Note a set of REGNO. */
2416 static void
2417 deps_init_id_note_reg_set (int regno)
2418 {
2419 haifa_note_reg_set (regno);
2420
2421 if (deps_init_id_data.where == DEPS_IN_RHS)
2422 deps_init_id_data.force_use_p = true;
2423
2424 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2425 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2426
2427 #ifdef STACK_REGS
2428 /* Make instructions that set stack registers to be ineligible for
2429 renaming to avoid issues with find_used_regs. */
2430 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2431 deps_init_id_data.force_use_p = true;
2432 #endif
2433 }
2434
2435 /* Note a clobber of REGNO. */
2436 static void
2437 deps_init_id_note_reg_clobber (int regno)
2438 {
2439 haifa_note_reg_clobber (regno);
2440
2441 if (deps_init_id_data.where == DEPS_IN_RHS)
2442 deps_init_id_data.force_use_p = true;
2443
2444 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2445 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2446 }
2447
2448 /* Note a use of REGNO. */
2449 static void
2450 deps_init_id_note_reg_use (int regno)
2451 {
2452 haifa_note_reg_use (regno);
2453
2454 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2455 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2456 }
2457
2458 /* Start initializing rhs data. */
2459 static void
2460 deps_init_id_start_rhs (rtx rhs)
2461 {
2462 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2463
2464 /* And there was no sel_deps_reset_to_insn (). */
2465 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2466 {
2467 IDATA_RHS (deps_init_id_data.id) = rhs;
2468 deps_init_id_data.where = DEPS_IN_RHS;
2469 }
2470 }
2471
2472 /* Finish initializing rhs data. */
2473 static void
2474 deps_init_id_finish_rhs (void)
2475 {
2476 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2477 || deps_init_id_data.where == DEPS_IN_INSN);
2478 deps_init_id_data.where = DEPS_IN_INSN;
2479 }
2480
2481 /* Finish initializing insn data. */
2482 static void
2483 deps_init_id_finish_insn (void)
2484 {
2485 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2486
2487 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2488 {
2489 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2490 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2491
2492 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2493 || deps_init_id_data.force_use_p)
2494 {
2495 /* This should be a USE, as we don't want to schedule its RHS
2496 separately. However, we still want to have them recorded
2497 for the purposes of substitution. That's why we don't
2498 simply call downgrade_to_use () here. */
2499 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2500 gcc_assert (!lhs == !rhs);
2501
2502 IDATA_TYPE (deps_init_id_data.id) = USE;
2503 }
2504 }
2505
2506 deps_init_id_data.where = DEPS_IN_NOWHERE;
2507 }
2508
2509 /* This is dependence info used for initializing insn's data. */
2510 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2511
2512 /* This initializes most of the static part of the above structure. */
2513 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2514 {
2515 NULL,
2516
2517 deps_init_id_start_insn,
2518 deps_init_id_finish_insn,
2519 deps_init_id_start_lhs,
2520 deps_init_id_finish_lhs,
2521 deps_init_id_start_rhs,
2522 deps_init_id_finish_rhs,
2523 deps_init_id_note_reg_set,
2524 deps_init_id_note_reg_clobber,
2525 deps_init_id_note_reg_use,
2526 NULL, /* note_mem_dep */
2527 NULL, /* note_dep */
2528
2529 0, /* use_cselib */
2530 0, /* use_deps_list */
2531 0 /* generate_spec_deps */
2532 };
2533
2534 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2535 we don't actually need information about lhs and rhs. */
2536 static void
2537 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2538 {
2539 rtx pat = PATTERN (insn);
2540
2541 if (NONJUMP_INSN_P (insn)
2542 && GET_CODE (pat) == SET
2543 && !force_unique_p)
2544 {
2545 IDATA_RHS (id) = SET_SRC (pat);
2546 IDATA_LHS (id) = SET_DEST (pat);
2547 }
2548 else
2549 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2550 }
2551
2552 /* Possibly downgrade INSN to USE. */
2553 static void
2554 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2555 {
2556 bool must_be_use = false;
2557 unsigned uid = INSN_UID (insn);
2558 df_ref *rec;
2559 rtx lhs = IDATA_LHS (id);
2560 rtx rhs = IDATA_RHS (id);
2561
2562 /* We downgrade only SETs. */
2563 if (IDATA_TYPE (id) != SET)
2564 return;
2565
2566 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2567 {
2568 IDATA_TYPE (id) = USE;
2569 return;
2570 }
2571
2572 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2573 {
2574 df_ref def = *rec;
2575
2576 if (DF_REF_INSN (def)
2577 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2578 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2579 {
2580 must_be_use = true;
2581 break;
2582 }
2583
2584 #ifdef STACK_REGS
2585 /* Make instructions that set stack registers to be ineligible for
2586 renaming to avoid issues with find_used_regs. */
2587 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2588 {
2589 must_be_use = true;
2590 break;
2591 }
2592 #endif
2593 }
2594
2595 if (must_be_use)
2596 IDATA_TYPE (id) = USE;
2597 }
2598
2599 /* Setup register sets describing INSN in ID. */
2600 static void
2601 setup_id_reg_sets (idata_t id, insn_t insn)
2602 {
2603 unsigned uid = INSN_UID (insn);
2604 df_ref *rec;
2605 regset tmp = get_clear_regset_from_pool ();
2606
2607 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2608 {
2609 df_ref def = *rec;
2610 unsigned int regno = DF_REF_REGNO (def);
2611
2612 /* Post modifies are treated like clobbers by sched-deps.c. */
2613 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2614 | DF_REF_PRE_POST_MODIFY)))
2615 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2616 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2617 {
2618 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2619
2620 #ifdef STACK_REGS
2621 /* For stack registers, treat writes to them as writes
2622 to the first one to be consistent with sched-deps.c. */
2623 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2624 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2625 #endif
2626 }
2627 /* Mark special refs that generate read/write def pair. */
2628 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2629 || regno == STACK_POINTER_REGNUM)
2630 bitmap_set_bit (tmp, regno);
2631 }
2632
2633 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2634 {
2635 df_ref use = *rec;
2636 unsigned int regno = DF_REF_REGNO (use);
2637
2638 /* When these refs are met for the first time, skip them, as
2639 these uses are just counterparts of some defs. */
2640 if (bitmap_bit_p (tmp, regno))
2641 bitmap_clear_bit (tmp, regno);
2642 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2643 {
2644 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2645
2646 #ifdef STACK_REGS
2647 /* For stack registers, treat reads from them as reads from
2648 the first one to be consistent with sched-deps.c. */
2649 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2650 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2651 #endif
2652 }
2653 }
2654
2655 return_regset_to_pool (tmp);
2656 }
2657
2658 /* Initialize instruction data for INSN in ID using DF's data. */
2659 static void
2660 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2661 {
2662 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2663
2664 setup_id_for_insn (id, insn, force_unique_p);
2665 setup_id_lhs_rhs (id, insn, force_unique_p);
2666
2667 if (INSN_NOP_P (insn))
2668 return;
2669
2670 maybe_downgrade_id_to_use (id, insn);
2671 setup_id_reg_sets (id, insn);
2672 }
2673
2674 /* Initialize instruction data for INSN in ID. */
2675 static void
2676 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2677 {
2678 struct deps_desc _dc, *dc = &_dc;
2679
2680 deps_init_id_data.where = DEPS_IN_NOWHERE;
2681 deps_init_id_data.id = id;
2682 deps_init_id_data.force_unique_p = force_unique_p;
2683 deps_init_id_data.force_use_p = false;
2684
2685 init_deps (dc, false);
2686
2687 memcpy (&deps_init_id_sched_deps_info,
2688 &const_deps_init_id_sched_deps_info,
2689 sizeof (deps_init_id_sched_deps_info));
2690
2691 if (spec_info != NULL)
2692 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2693
2694 sched_deps_info = &deps_init_id_sched_deps_info;
2695
2696 deps_analyze_insn (dc, insn);
2697
2698 free_deps (dc);
2699
2700 deps_init_id_data.id = NULL;
2701 }
2702
2703 \f
2704
2705 /* Implement hooks for collecting fundamental insn properties like if insn is
2706 an ASM or is within a SCHED_GROUP. */
2707
2708 /* True when a "one-time init" data for INSN was already inited. */
2709 static bool
2710 first_time_insn_init (insn_t insn)
2711 {
2712 return INSN_LIVE (insn) == NULL;
2713 }
2714
2715 /* Hash an entry in a transformed_insns hashtable. */
2716 static hashval_t
2717 hash_transformed_insns (const void *p)
2718 {
2719 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2720 }
2721
2722 /* Compare the entries in a transformed_insns hashtable. */
2723 static int
2724 eq_transformed_insns (const void *p, const void *q)
2725 {
2726 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2727 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2728
2729 if (INSN_UID (i1) == INSN_UID (i2))
2730 return 1;
2731 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2732 }
2733
2734 /* Free an entry in a transformed_insns hashtable. */
2735 static void
2736 free_transformed_insns (void *p)
2737 {
2738 struct transformed_insns *pti = (struct transformed_insns *) p;
2739
2740 vinsn_detach (pti->vinsn_old);
2741 vinsn_detach (pti->vinsn_new);
2742 free (pti);
2743 }
2744
2745 /* Init the s_i_d data for INSN which should be inited just once, when
2746 we first see the insn. */
2747 static void
2748 init_first_time_insn_data (insn_t insn)
2749 {
2750 /* This should not be set if this is the first time we init data for
2751 insn. */
2752 gcc_assert (first_time_insn_init (insn));
2753
2754 /* These are needed for nops too. */
2755 INSN_LIVE (insn) = get_regset_from_pool ();
2756 INSN_LIVE_VALID_P (insn) = false;
2757
2758 if (!INSN_NOP_P (insn))
2759 {
2760 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2761 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2762 INSN_TRANSFORMED_INSNS (insn)
2763 = htab_create (16, hash_transformed_insns,
2764 eq_transformed_insns, free_transformed_insns);
2765 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2766 }
2767 }
2768
2769 /* Free almost all above data for INSN that is scheduled already.
2770 Used for extra-large basic blocks. */
2771 void
2772 free_data_for_scheduled_insn (insn_t insn)
2773 {
2774 gcc_assert (! first_time_insn_init (insn));
2775
2776 if (! INSN_ANALYZED_DEPS (insn))
2777 return;
2778
2779 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2780 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2781 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2782
2783 /* This is allocated only for bookkeeping insns. */
2784 if (INSN_ORIGINATORS (insn))
2785 BITMAP_FREE (INSN_ORIGINATORS (insn));
2786 free_deps (&INSN_DEPS_CONTEXT (insn));
2787
2788 INSN_ANALYZED_DEPS (insn) = NULL;
2789
2790 /* Clear the readonly flag so we would ICE when trying to recalculate
2791 the deps context (as we believe that it should not happen). */
2792 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2793 }
2794
2795 /* Free the same data as above for INSN. */
2796 static void
2797 free_first_time_insn_data (insn_t insn)
2798 {
2799 gcc_assert (! first_time_insn_init (insn));
2800
2801 free_data_for_scheduled_insn (insn);
2802 return_regset_to_pool (INSN_LIVE (insn));
2803 INSN_LIVE (insn) = NULL;
2804 INSN_LIVE_VALID_P (insn) = false;
2805 }
2806
2807 /* Initialize region-scope data structures for basic blocks. */
2808 static void
2809 init_global_and_expr_for_bb (basic_block bb)
2810 {
2811 if (sel_bb_empty_p (bb))
2812 return;
2813
2814 invalidate_av_set (bb);
2815 }
2816
2817 /* Data for global dependency analysis (to initialize CANT_MOVE and
2818 SCHED_GROUP_P). */
2819 static struct
2820 {
2821 /* Previous insn. */
2822 insn_t prev_insn;
2823 } init_global_data;
2824
2825 /* Determine if INSN is in the sched_group, is an asm or should not be
2826 cloned. After that initialize its expr. */
2827 static void
2828 init_global_and_expr_for_insn (insn_t insn)
2829 {
2830 if (LABEL_P (insn))
2831 return;
2832
2833 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2834 {
2835 init_global_data.prev_insn = NULL_RTX;
2836 return;
2837 }
2838
2839 gcc_assert (INSN_P (insn));
2840
2841 if (SCHED_GROUP_P (insn))
2842 /* Setup a sched_group. */
2843 {
2844 insn_t prev_insn = init_global_data.prev_insn;
2845
2846 if (prev_insn)
2847 INSN_SCHED_NEXT (prev_insn) = insn;
2848
2849 init_global_data.prev_insn = insn;
2850 }
2851 else
2852 init_global_data.prev_insn = NULL_RTX;
2853
2854 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2855 || asm_noperands (PATTERN (insn)) >= 0)
2856 /* Mark INSN as an asm. */
2857 INSN_ASM_P (insn) = true;
2858
2859 {
2860 bool force_unique_p;
2861 ds_t spec_done_ds;
2862
2863 /* Certain instructions cannot be cloned. */
2864 if (CANT_MOVE (insn)
2865 || INSN_ASM_P (insn)
2866 || SCHED_GROUP_P (insn)
2867 || prologue_epilogue_contains (insn)
2868 /* Exception handling insns are always unique. */
2869 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2870 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2871 || control_flow_insn_p (insn))
2872 force_unique_p = true;
2873 else
2874 force_unique_p = false;
2875
2876 if (targetm.sched.get_insn_spec_ds)
2877 {
2878 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2879 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2880 }
2881 else
2882 spec_done_ds = 0;
2883
2884 /* Initialize INSN's expr. */
2885 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2886 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
2887 spec_done_ds, 0, 0, NULL, true, false, false, false,
2888 CANT_MOVE (insn));
2889 }
2890
2891 init_first_time_insn_data (insn);
2892 }
2893
2894 /* Scan the region and initialize instruction data for basic blocks BBS. */
2895 void
2896 sel_init_global_and_expr (bb_vec_t bbs)
2897 {
2898 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
2899 const struct sched_scan_info_def ssi =
2900 {
2901 NULL, /* extend_bb */
2902 init_global_and_expr_for_bb, /* init_bb */
2903 extend_insn_data, /* extend_insn */
2904 init_global_and_expr_for_insn /* init_insn */
2905 };
2906
2907 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2908 }
2909
2910 /* Finalize region-scope data structures for basic blocks. */
2911 static void
2912 finish_global_and_expr_for_bb (basic_block bb)
2913 {
2914 av_set_clear (&BB_AV_SET (bb));
2915 BB_AV_LEVEL (bb) = 0;
2916 }
2917
2918 /* Finalize INSN's data. */
2919 static void
2920 finish_global_and_expr_insn (insn_t insn)
2921 {
2922 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
2923 return;
2924
2925 gcc_assert (INSN_P (insn));
2926
2927 if (INSN_LUID (insn) > 0)
2928 {
2929 free_first_time_insn_data (insn);
2930 INSN_WS_LEVEL (insn) = 0;
2931 CANT_MOVE (insn) = 0;
2932
2933 /* We can no longer assert this, as vinsns of this insn could be
2934 easily live in other insn's caches. This should be changed to
2935 a counter-like approach among all vinsns. */
2936 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
2937 clear_expr (INSN_EXPR (insn));
2938 }
2939 }
2940
2941 /* Finalize per instruction data for the whole region. */
2942 void
2943 sel_finish_global_and_expr (void)
2944 {
2945 {
2946 bb_vec_t bbs;
2947 int i;
2948
2949 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
2950
2951 for (i = 0; i < current_nr_blocks; i++)
2952 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
2953
2954 /* Clear AV_SETs and INSN_EXPRs. */
2955 {
2956 const struct sched_scan_info_def ssi =
2957 {
2958 NULL, /* extend_bb */
2959 finish_global_and_expr_for_bb, /* init_bb */
2960 NULL, /* extend_insn */
2961 finish_global_and_expr_insn /* init_insn */
2962 };
2963
2964 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2965 }
2966
2967 VEC_free (basic_block, heap, bbs);
2968 }
2969
2970 finish_insns ();
2971 }
2972 \f
2973
2974 /* In the below hooks, we merely calculate whether or not a dependence
2975 exists, and in what part of insn. However, we will need more data
2976 when we'll start caching dependence requests. */
2977
2978 /* Container to hold information for dependency analysis. */
2979 static struct
2980 {
2981 deps_t dc;
2982
2983 /* A variable to track which part of rtx we are scanning in
2984 sched-deps.c: sched_analyze_insn (). */
2985 deps_where_t where;
2986
2987 /* Current producer. */
2988 insn_t pro;
2989
2990 /* Current consumer. */
2991 vinsn_t con;
2992
2993 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
2994 X is from { INSN, LHS, RHS }. */
2995 ds_t has_dep_p[DEPS_IN_NOWHERE];
2996 } has_dependence_data;
2997
2998 /* Start analyzing dependencies of INSN. */
2999 static void
3000 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3001 {
3002 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3003
3004 has_dependence_data.where = DEPS_IN_INSN;
3005 }
3006
3007 /* Finish analyzing dependencies of an insn. */
3008 static void
3009 has_dependence_finish_insn (void)
3010 {
3011 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3012
3013 has_dependence_data.where = DEPS_IN_NOWHERE;
3014 }
3015
3016 /* Start analyzing dependencies of LHS. */
3017 static void
3018 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3019 {
3020 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3021
3022 if (VINSN_LHS (has_dependence_data.con) != NULL)
3023 has_dependence_data.where = DEPS_IN_LHS;
3024 }
3025
3026 /* Finish analyzing dependencies of an lhs. */
3027 static void
3028 has_dependence_finish_lhs (void)
3029 {
3030 has_dependence_data.where = DEPS_IN_INSN;
3031 }
3032
3033 /* Start analyzing dependencies of RHS. */
3034 static void
3035 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3036 {
3037 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3038
3039 if (VINSN_RHS (has_dependence_data.con) != NULL)
3040 has_dependence_data.where = DEPS_IN_RHS;
3041 }
3042
3043 /* Start analyzing dependencies of an rhs. */
3044 static void
3045 has_dependence_finish_rhs (void)
3046 {
3047 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3048 || has_dependence_data.where == DEPS_IN_INSN);
3049
3050 has_dependence_data.where = DEPS_IN_INSN;
3051 }
3052
3053 /* Note a set of REGNO. */
3054 static void
3055 has_dependence_note_reg_set (int regno)
3056 {
3057 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3058
3059 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3060 VINSN_INSN_RTX
3061 (has_dependence_data.con)))
3062 {
3063 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3064
3065 if (reg_last->sets != NULL
3066 || reg_last->clobbers != NULL)
3067 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3068
3069 if (reg_last->uses)
3070 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3071 }
3072 }
3073
3074 /* Note a clobber of REGNO. */
3075 static void
3076 has_dependence_note_reg_clobber (int regno)
3077 {
3078 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3079
3080 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3081 VINSN_INSN_RTX
3082 (has_dependence_data.con)))
3083 {
3084 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3085
3086 if (reg_last->sets)
3087 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3088
3089 if (reg_last->uses)
3090 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3091 }
3092 }
3093
3094 /* Note a use of REGNO. */
3095 static void
3096 has_dependence_note_reg_use (int regno)
3097 {
3098 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3099
3100 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3101 VINSN_INSN_RTX
3102 (has_dependence_data.con)))
3103 {
3104 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3105
3106 if (reg_last->sets)
3107 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3108
3109 if (reg_last->clobbers)
3110 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3111
3112 /* Handle BE_IN_SPEC. */
3113 if (reg_last->uses)
3114 {
3115 ds_t pro_spec_checked_ds;
3116
3117 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3118 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3119
3120 if (pro_spec_checked_ds != 0)
3121 /* Merge BE_IN_SPEC bits into *DSP. */
3122 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3123 NULL_RTX, NULL_RTX);
3124 }
3125 }
3126 }
3127
3128 /* Note a memory dependence. */
3129 static void
3130 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3131 rtx pending_mem ATTRIBUTE_UNUSED,
3132 insn_t pending_insn ATTRIBUTE_UNUSED,
3133 ds_t ds ATTRIBUTE_UNUSED)
3134 {
3135 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3136 VINSN_INSN_RTX (has_dependence_data.con)))
3137 {
3138 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3139
3140 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3141 }
3142 }
3143
3144 /* Note a dependence. */
3145 static void
3146 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3147 ds_t ds ATTRIBUTE_UNUSED)
3148 {
3149 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3150 VINSN_INSN_RTX (has_dependence_data.con)))
3151 {
3152 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3153
3154 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3155 }
3156 }
3157
3158 /* Mark the insn as having a hard dependence that prevents speculation. */
3159 void
3160 sel_mark_hard_insn (rtx insn)
3161 {
3162 int i;
3163
3164 /* Only work when we're in has_dependence_p mode.
3165 ??? This is a hack, this should actually be a hook. */
3166 if (!has_dependence_data.dc || !has_dependence_data.pro)
3167 return;
3168
3169 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3170 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3171
3172 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3173 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3174 }
3175
3176 /* This structure holds the hooks for the dependency analysis used when
3177 actually processing dependencies in the scheduler. */
3178 static struct sched_deps_info_def has_dependence_sched_deps_info;
3179
3180 /* This initializes most of the fields of the above structure. */
3181 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3182 {
3183 NULL,
3184
3185 has_dependence_start_insn,
3186 has_dependence_finish_insn,
3187 has_dependence_start_lhs,
3188 has_dependence_finish_lhs,
3189 has_dependence_start_rhs,
3190 has_dependence_finish_rhs,
3191 has_dependence_note_reg_set,
3192 has_dependence_note_reg_clobber,
3193 has_dependence_note_reg_use,
3194 has_dependence_note_mem_dep,
3195 has_dependence_note_dep,
3196
3197 0, /* use_cselib */
3198 0, /* use_deps_list */
3199 0 /* generate_spec_deps */
3200 };
3201
3202 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3203 static void
3204 setup_has_dependence_sched_deps_info (void)
3205 {
3206 memcpy (&has_dependence_sched_deps_info,
3207 &const_has_dependence_sched_deps_info,
3208 sizeof (has_dependence_sched_deps_info));
3209
3210 if (spec_info != NULL)
3211 has_dependence_sched_deps_info.generate_spec_deps = 1;
3212
3213 sched_deps_info = &has_dependence_sched_deps_info;
3214 }
3215
3216 /* Remove all dependences found and recorded in has_dependence_data array. */
3217 void
3218 sel_clear_has_dependence (void)
3219 {
3220 int i;
3221
3222 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3223 has_dependence_data.has_dep_p[i] = 0;
3224 }
3225
3226 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3227 to the dependence information array in HAS_DEP_PP. */
3228 ds_t
3229 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3230 {
3231 int i;
3232 ds_t ds;
3233 struct deps_desc *dc;
3234
3235 if (INSN_SIMPLEJUMP_P (pred))
3236 /* Unconditional jump is just a transfer of control flow.
3237 Ignore it. */
3238 return false;
3239
3240 dc = &INSN_DEPS_CONTEXT (pred);
3241
3242 /* We init this field lazily. */
3243 if (dc->reg_last == NULL)
3244 init_deps_reg_last (dc);
3245
3246 if (!dc->readonly)
3247 {
3248 has_dependence_data.pro = NULL;
3249 /* Initialize empty dep context with information about PRED. */
3250 advance_deps_context (dc, pred);
3251 dc->readonly = 1;
3252 }
3253
3254 has_dependence_data.where = DEPS_IN_NOWHERE;
3255 has_dependence_data.pro = pred;
3256 has_dependence_data.con = EXPR_VINSN (expr);
3257 has_dependence_data.dc = dc;
3258
3259 sel_clear_has_dependence ();
3260
3261 /* Now catch all dependencies that would be generated between PRED and
3262 INSN. */
3263 setup_has_dependence_sched_deps_info ();
3264 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3265 has_dependence_data.dc = NULL;
3266
3267 /* When a barrier was found, set DEPS_IN_INSN bits. */
3268 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3269 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3270 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3271 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3272
3273 /* Do not allow stores to memory to move through checks. Currently
3274 we don't move this to sched-deps.c as the check doesn't have
3275 obvious places to which this dependence can be attached.
3276 FIMXE: this should go to a hook. */
3277 if (EXPR_LHS (expr)
3278 && MEM_P (EXPR_LHS (expr))
3279 && sel_insn_is_speculation_check (pred))
3280 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3281
3282 *has_dep_pp = has_dependence_data.has_dep_p;
3283 ds = 0;
3284 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3285 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3286 NULL_RTX, NULL_RTX);
3287
3288 return ds;
3289 }
3290 \f
3291
3292 /* Dependence hooks implementation that checks dependence latency constraints
3293 on the insns being scheduled. The entry point for these routines is
3294 tick_check_p predicate. */
3295
3296 static struct
3297 {
3298 /* An expr we are currently checking. */
3299 expr_t expr;
3300
3301 /* A minimal cycle for its scheduling. */
3302 int cycle;
3303
3304 /* Whether we have seen a true dependence while checking. */
3305 bool seen_true_dep_p;
3306 } tick_check_data;
3307
3308 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3309 on PRO with status DS and weight DW. */
3310 static void
3311 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3312 {
3313 expr_t con_expr = tick_check_data.expr;
3314 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3315
3316 if (con_insn != pro_insn)
3317 {
3318 enum reg_note dt;
3319 int tick;
3320
3321 if (/* PROducer was removed from above due to pipelining. */
3322 !INSN_IN_STREAM_P (pro_insn)
3323 /* Or PROducer was originally on the next iteration regarding the
3324 CONsumer. */
3325 || (INSN_SCHED_TIMES (pro_insn)
3326 - EXPR_SCHED_TIMES (con_expr)) > 1)
3327 /* Don't count this dependence. */
3328 return;
3329
3330 dt = ds_to_dt (ds);
3331 if (dt == REG_DEP_TRUE)
3332 tick_check_data.seen_true_dep_p = true;
3333
3334 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3335
3336 {
3337 dep_def _dep, *dep = &_dep;
3338
3339 init_dep (dep, pro_insn, con_insn, dt);
3340
3341 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3342 }
3343
3344 /* When there are several kinds of dependencies between pro and con,
3345 only REG_DEP_TRUE should be taken into account. */
3346 if (tick > tick_check_data.cycle
3347 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3348 tick_check_data.cycle = tick;
3349 }
3350 }
3351
3352 /* An implementation of note_dep hook. */
3353 static void
3354 tick_check_note_dep (insn_t pro, ds_t ds)
3355 {
3356 tick_check_dep_with_dw (pro, ds, 0);
3357 }
3358
3359 /* An implementation of note_mem_dep hook. */
3360 static void
3361 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3362 {
3363 dw_t dw;
3364
3365 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3366 ? estimate_dep_weak (mem1, mem2)
3367 : 0);
3368
3369 tick_check_dep_with_dw (pro, ds, dw);
3370 }
3371
3372 /* This structure contains hooks for dependence analysis used when determining
3373 whether an insn is ready for scheduling. */
3374 static struct sched_deps_info_def tick_check_sched_deps_info =
3375 {
3376 NULL,
3377
3378 NULL,
3379 NULL,
3380 NULL,
3381 NULL,
3382 NULL,
3383 NULL,
3384 haifa_note_reg_set,
3385 haifa_note_reg_clobber,
3386 haifa_note_reg_use,
3387 tick_check_note_mem_dep,
3388 tick_check_note_dep,
3389
3390 0, 0, 0
3391 };
3392
3393 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3394 scheduled. Return 0 if all data from producers in DC is ready. */
3395 int
3396 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3397 {
3398 int cycles_left;
3399 /* Initialize variables. */
3400 tick_check_data.expr = expr;
3401 tick_check_data.cycle = 0;
3402 tick_check_data.seen_true_dep_p = false;
3403 sched_deps_info = &tick_check_sched_deps_info;
3404
3405 gcc_assert (!dc->readonly);
3406 dc->readonly = 1;
3407 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3408 dc->readonly = 0;
3409
3410 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3411
3412 return cycles_left >= 0 ? cycles_left : 0;
3413 }
3414 \f
3415
3416 /* Functions to work with insns. */
3417
3418 /* Returns true if LHS of INSN is the same as DEST of an insn
3419 being moved. */
3420 bool
3421 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3422 {
3423 rtx lhs = INSN_LHS (insn);
3424
3425 if (lhs == NULL || dest == NULL)
3426 return false;
3427
3428 return rtx_equal_p (lhs, dest);
3429 }
3430
3431 /* Return s_i_d entry of INSN. Callable from debugger. */
3432 sel_insn_data_def
3433 insn_sid (insn_t insn)
3434 {
3435 return *SID (insn);
3436 }
3437
3438 /* True when INSN is a speculative check. We can tell this by looking
3439 at the data structures of the selective scheduler, not by examining
3440 the pattern. */
3441 bool
3442 sel_insn_is_speculation_check (rtx insn)
3443 {
3444 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3445 }
3446
3447 /* Extracts machine mode MODE and destination location DST_LOC
3448 for given INSN. */
3449 void
3450 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3451 {
3452 rtx pat = PATTERN (insn);
3453
3454 gcc_assert (dst_loc);
3455 gcc_assert (GET_CODE (pat) == SET);
3456
3457 *dst_loc = SET_DEST (pat);
3458
3459 gcc_assert (*dst_loc);
3460 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3461
3462 if (mode)
3463 *mode = GET_MODE (*dst_loc);
3464 }
3465
3466 /* Returns true when moving through JUMP will result in bookkeeping
3467 creation. */
3468 bool
3469 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3470 {
3471 insn_t succ;
3472 succ_iterator si;
3473
3474 FOR_EACH_SUCC (succ, si, jump)
3475 if (sel_num_cfg_preds_gt_1 (succ))
3476 return true;
3477
3478 return false;
3479 }
3480
3481 /* Return 'true' if INSN is the only one in its basic block. */
3482 static bool
3483 insn_is_the_only_one_in_bb_p (insn_t insn)
3484 {
3485 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3486 }
3487
3488 #ifdef ENABLE_CHECKING
3489 /* Check that the region we're scheduling still has at most one
3490 backedge. */
3491 static void
3492 verify_backedges (void)
3493 {
3494 if (pipelining_p)
3495 {
3496 int i, n = 0;
3497 edge e;
3498 edge_iterator ei;
3499
3500 for (i = 0; i < current_nr_blocks; i++)
3501 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3502 if (in_current_region_p (e->dest)
3503 && BLOCK_TO_BB (e->dest->index) < i)
3504 n++;
3505
3506 gcc_assert (n <= 1);
3507 }
3508 }
3509 #endif
3510 \f
3511
3512 /* Functions to work with control flow. */
3513
3514 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3515 are sorted in topological order (it might have been invalidated by
3516 redirecting an edge). */
3517 static void
3518 sel_recompute_toporder (void)
3519 {
3520 int i, n, rgn;
3521 int *postorder, n_blocks;
3522
3523 postorder = XALLOCAVEC (int, n_basic_blocks);
3524 n_blocks = post_order_compute (postorder, false, false);
3525
3526 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3527 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3528 if (CONTAINING_RGN (postorder[i]) == rgn)
3529 {
3530 BLOCK_TO_BB (postorder[i]) = n;
3531 BB_TO_BLOCK (n) = postorder[i];
3532 n++;
3533 }
3534
3535 /* Assert that we updated info for all blocks. We may miss some blocks if
3536 this function is called when redirecting an edge made a block
3537 unreachable, but that block is not deleted yet. */
3538 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3539 }
3540
3541 /* Tidy the possibly empty block BB. */
3542 static bool
3543 maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p)
3544 {
3545 basic_block succ_bb, pred_bb;
3546 edge e;
3547 edge_iterator ei;
3548 bool rescan_p;
3549
3550 /* Keep empty bb only if this block immediately precedes EXIT and
3551 has incoming non-fallthrough edge, or it has no predecessors or
3552 successors. Otherwise remove it. */
3553 if (!sel_bb_empty_p (bb)
3554 || (single_succ_p (bb)
3555 && single_succ (bb) == EXIT_BLOCK_PTR
3556 && (!single_pred_p (bb)
3557 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3558 || EDGE_COUNT (bb->preds) == 0
3559 || EDGE_COUNT (bb->succs) == 0)
3560 return false;
3561
3562 /* Do not attempt to redirect complex edges. */
3563 FOR_EACH_EDGE (e, ei, bb->preds)
3564 if (e->flags & EDGE_COMPLEX)
3565 return false;
3566
3567 free_data_sets (bb);
3568
3569 /* Do not delete BB if it has more than one successor.
3570 That can occur when we moving a jump. */
3571 if (!single_succ_p (bb))
3572 {
3573 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3574 sel_merge_blocks (bb->prev_bb, bb);
3575 return true;
3576 }
3577
3578 succ_bb = single_succ (bb);
3579 rescan_p = true;
3580 pred_bb = NULL;
3581
3582 /* Redirect all non-fallthru edges to the next bb. */
3583 while (rescan_p)
3584 {
3585 rescan_p = false;
3586
3587 FOR_EACH_EDGE (e, ei, bb->preds)
3588 {
3589 pred_bb = e->src;
3590
3591 if (!(e->flags & EDGE_FALLTHRU))
3592 {
3593 recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb);
3594 rescan_p = true;
3595 break;
3596 }
3597 }
3598 }
3599
3600 /* If it is possible - merge BB with its predecessor. */
3601 if (can_merge_blocks_p (bb->prev_bb, bb))
3602 sel_merge_blocks (bb->prev_bb, bb);
3603 else
3604 /* Otherwise this is a block without fallthru predecessor.
3605 Just delete it. */
3606 {
3607 gcc_assert (pred_bb != NULL);
3608
3609 if (in_current_region_p (pred_bb))
3610 move_bb_info (pred_bb, bb);
3611 remove_empty_bb (bb, true);
3612 }
3613
3614 if (recompute_toporder_p)
3615 sel_recompute_toporder ();
3616
3617 #ifdef ENABLE_CHECKING
3618 verify_backedges ();
3619 #endif
3620
3621 return true;
3622 }
3623
3624 /* Tidy the control flow after we have removed original insn from
3625 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3626 is true, also try to optimize control flow on non-empty blocks. */
3627 bool
3628 tidy_control_flow (basic_block xbb, bool full_tidying)
3629 {
3630 bool changed = true;
3631 insn_t first, last;
3632
3633 /* First check whether XBB is empty. */
3634 changed = maybe_tidy_empty_bb (xbb, false);
3635 if (changed || !full_tidying)
3636 return changed;
3637
3638 /* Check if there is a unnecessary jump after insn left. */
3639 if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
3640 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3641 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3642 {
3643 if (sel_remove_insn (BB_END (xbb), false, false))
3644 return true;
3645 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3646 }
3647
3648 first = sel_bb_head (xbb);
3649 last = sel_bb_end (xbb);
3650 if (MAY_HAVE_DEBUG_INSNS)
3651 {
3652 if (first != last && DEBUG_INSN_P (first))
3653 do
3654 first = NEXT_INSN (first);
3655 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3656
3657 if (first != last && DEBUG_INSN_P (last))
3658 do
3659 last = PREV_INSN (last);
3660 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3661 }
3662 /* Check if there is an unnecessary jump in previous basic block leading
3663 to next basic block left after removing INSN from stream.
3664 If it is so, remove that jump and redirect edge to current
3665 basic block (where there was INSN before deletion). This way
3666 when NOP will be deleted several instructions later with its
3667 basic block we will not get a jump to next instruction, which
3668 can be harmful. */
3669 if (first == last
3670 && !sel_bb_empty_p (xbb)
3671 && INSN_NOP_P (last)
3672 /* Flow goes fallthru from current block to the next. */
3673 && EDGE_COUNT (xbb->succs) == 1
3674 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3675 /* When successor is an EXIT block, it may not be the next block. */
3676 && single_succ (xbb) != EXIT_BLOCK_PTR
3677 /* And unconditional jump in previous basic block leads to
3678 next basic block of XBB and this jump can be safely removed. */
3679 && in_current_region_p (xbb->prev_bb)
3680 && jump_leads_only_to_bb_p (BB_END (xbb->prev_bb), xbb->next_bb)
3681 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3682 /* Also this jump is not at the scheduling boundary. */
3683 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3684 {
3685 bool recompute_toporder_p;
3686 /* Clear data structures of jump - jump itself will be removed
3687 by sel_redirect_edge_and_branch. */
3688 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3689 recompute_toporder_p
3690 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3691
3692 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3693
3694 /* It can turn out that after removing unused jump, basic block
3695 that contained that jump, becomes empty too. In such case
3696 remove it too. */
3697 if (sel_bb_empty_p (xbb->prev_bb))
3698 changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p);
3699 else if (recompute_toporder_p)
3700 sel_recompute_toporder ();
3701 }
3702
3703 return changed;
3704 }
3705
3706 /* Purge meaningless empty blocks in the middle of a region. */
3707 void
3708 purge_empty_blocks (void)
3709 {
3710 /* Do not attempt to delete preheader. */
3711 int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
3712
3713 while (i < current_nr_blocks)
3714 {
3715 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3716
3717 if (maybe_tidy_empty_bb (b, false))
3718 continue;
3719
3720 i++;
3721 }
3722 }
3723
3724 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3725 do not delete insn's data, because it will be later re-emitted.
3726 Return true if we have removed some blocks afterwards. */
3727 bool
3728 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3729 {
3730 basic_block bb = BLOCK_FOR_INSN (insn);
3731
3732 gcc_assert (INSN_IN_STREAM_P (insn));
3733
3734 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3735 {
3736 expr_t expr;
3737 av_set_iterator i;
3738
3739 /* When we remove a debug insn that is head of a BB, it remains
3740 in the AV_SET of the block, but it shouldn't. */
3741 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3742 if (EXPR_INSN_RTX (expr) == insn)
3743 {
3744 av_set_iter_remove (&i);
3745 break;
3746 }
3747 }
3748
3749 if (only_disconnect)
3750 {
3751 insn_t prev = PREV_INSN (insn);
3752 insn_t next = NEXT_INSN (insn);
3753 basic_block bb = BLOCK_FOR_INSN (insn);
3754
3755 NEXT_INSN (prev) = next;
3756 PREV_INSN (next) = prev;
3757
3758 if (BB_HEAD (bb) == insn)
3759 {
3760 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3761 BB_HEAD (bb) = prev;
3762 }
3763 if (BB_END (bb) == insn)
3764 BB_END (bb) = prev;
3765 }
3766 else
3767 {
3768 remove_insn (insn);
3769 clear_expr (INSN_EXPR (insn));
3770 }
3771
3772 /* It is necessary to null this fields before calling add_insn (). */
3773 PREV_INSN (insn) = NULL_RTX;
3774 NEXT_INSN (insn) = NULL_RTX;
3775
3776 return tidy_control_flow (bb, full_tidying);
3777 }
3778
3779 /* Estimate number of the insns in BB. */
3780 static int
3781 sel_estimate_number_of_insns (basic_block bb)
3782 {
3783 int res = 0;
3784 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3785
3786 for (; insn != next_tail; insn = NEXT_INSN (insn))
3787 if (NONDEBUG_INSN_P (insn))
3788 res++;
3789
3790 return res;
3791 }
3792
3793 /* We don't need separate luids for notes or labels. */
3794 static int
3795 sel_luid_for_non_insn (rtx x)
3796 {
3797 gcc_assert (NOTE_P (x) || LABEL_P (x));
3798
3799 return -1;
3800 }
3801
3802 /* Return seqno of the only predecessor of INSN. */
3803 static int
3804 get_seqno_of_a_pred (insn_t insn)
3805 {
3806 int seqno;
3807
3808 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3809
3810 if (!sel_bb_head_p (insn))
3811 seqno = INSN_SEQNO (PREV_INSN (insn));
3812 else
3813 {
3814 basic_block bb = BLOCK_FOR_INSN (insn);
3815
3816 if (single_pred_p (bb)
3817 && !in_current_region_p (single_pred (bb)))
3818 {
3819 /* We can have preds outside a region when splitting edges
3820 for pipelining of an outer loop. Use succ instead.
3821 There should be only one of them. */
3822 insn_t succ = NULL;
3823 succ_iterator si;
3824 bool first = true;
3825
3826 gcc_assert (flag_sel_sched_pipelining_outer_loops
3827 && current_loop_nest);
3828 FOR_EACH_SUCC_1 (succ, si, insn,
3829 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
3830 {
3831 gcc_assert (first);
3832 first = false;
3833 }
3834
3835 gcc_assert (succ != NULL);
3836 seqno = INSN_SEQNO (succ);
3837 }
3838 else
3839 {
3840 insn_t *preds;
3841 int n;
3842
3843 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
3844 gcc_assert (n == 1);
3845
3846 seqno = INSN_SEQNO (preds[0]);
3847
3848 free (preds);
3849 }
3850 }
3851
3852 return seqno;
3853 }
3854
3855 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
3856 with positive seqno exist. */
3857 int
3858 get_seqno_by_preds (rtx insn)
3859 {
3860 basic_block bb = BLOCK_FOR_INSN (insn);
3861 rtx tmp = insn, head = BB_HEAD (bb);
3862 insn_t *preds;
3863 int n, i, seqno;
3864
3865 while (tmp != head)
3866 if (INSN_P (tmp))
3867 return INSN_SEQNO (tmp);
3868 else
3869 tmp = PREV_INSN (tmp);
3870
3871 cfg_preds (bb, &preds, &n);
3872 for (i = 0, seqno = -1; i < n; i++)
3873 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
3874
3875 return seqno;
3876 }
3877
3878 \f
3879
3880 /* Extend pass-scope data structures for basic blocks. */
3881 void
3882 sel_extend_global_bb_info (void)
3883 {
3884 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
3885 last_basic_block);
3886 }
3887
3888 /* Extend region-scope data structures for basic blocks. */
3889 static void
3890 extend_region_bb_info (void)
3891 {
3892 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
3893 last_basic_block);
3894 }
3895
3896 /* Extend all data structures to fit for all basic blocks. */
3897 static void
3898 extend_bb_info (void)
3899 {
3900 sel_extend_global_bb_info ();
3901 extend_region_bb_info ();
3902 }
3903
3904 /* Finalize pass-scope data structures for basic blocks. */
3905 void
3906 sel_finish_global_bb_info (void)
3907 {
3908 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
3909 }
3910
3911 /* Finalize region-scope data structures for basic blocks. */
3912 static void
3913 finish_region_bb_info (void)
3914 {
3915 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
3916 }
3917 \f
3918
3919 /* Data for each insn in current region. */
3920 VEC (sel_insn_data_def, heap) *s_i_d = NULL;
3921
3922 /* A vector for the insns we've emitted. */
3923 static insn_vec_t new_insns = NULL;
3924
3925 /* Extend data structures for insns from current region. */
3926 static void
3927 extend_insn_data (void)
3928 {
3929 int reserve;
3930
3931 sched_extend_target ();
3932 sched_deps_init (false);
3933
3934 /* Extend data structures for insns from current region. */
3935 reserve = (sched_max_luid + 1
3936 - VEC_length (sel_insn_data_def, s_i_d));
3937 if (reserve > 0
3938 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
3939 {
3940 int size;
3941
3942 if (sched_max_luid / 2 > 1024)
3943 size = sched_max_luid + 1024;
3944 else
3945 size = 3 * sched_max_luid / 2;
3946
3947
3948 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
3949 }
3950 }
3951
3952 /* Finalize data structures for insns from current region. */
3953 static void
3954 finish_insns (void)
3955 {
3956 unsigned i;
3957
3958 /* Clear here all dependence contexts that may have left from insns that were
3959 removed during the scheduling. */
3960 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
3961 {
3962 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
3963
3964 if (sid_entry->live)
3965 return_regset_to_pool (sid_entry->live);
3966 if (sid_entry->analyzed_deps)
3967 {
3968 BITMAP_FREE (sid_entry->analyzed_deps);
3969 BITMAP_FREE (sid_entry->found_deps);
3970 htab_delete (sid_entry->transformed_insns);
3971 free_deps (&sid_entry->deps_context);
3972 }
3973 if (EXPR_VINSN (&sid_entry->expr))
3974 {
3975 clear_expr (&sid_entry->expr);
3976
3977 /* Also, clear CANT_MOVE bit here, because we really don't want it
3978 to be passed to the next region. */
3979 CANT_MOVE_BY_LUID (i) = 0;
3980 }
3981 }
3982
3983 VEC_free (sel_insn_data_def, heap, s_i_d);
3984 }
3985
3986 /* A proxy to pass initialization data to init_insn (). */
3987 static sel_insn_data_def _insn_init_ssid;
3988 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
3989
3990 /* If true create a new vinsn. Otherwise use the one from EXPR. */
3991 static bool insn_init_create_new_vinsn_p;
3992
3993 /* Set all necessary data for initialization of the new insn[s]. */
3994 static expr_t
3995 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
3996 {
3997 expr_t x = &insn_init_ssid->expr;
3998
3999 copy_expr_onside (x, expr);
4000 if (vi != NULL)
4001 {
4002 insn_init_create_new_vinsn_p = false;
4003 change_vinsn_in_expr (x, vi);
4004 }
4005 else
4006 insn_init_create_new_vinsn_p = true;
4007
4008 insn_init_ssid->seqno = seqno;
4009 return x;
4010 }
4011
4012 /* Init data for INSN. */
4013 static void
4014 init_insn_data (insn_t insn)
4015 {
4016 expr_t expr;
4017 sel_insn_data_t ssid = insn_init_ssid;
4018
4019 /* The fields mentioned below are special and hence are not being
4020 propagated to the new insns. */
4021 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4022 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4023 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4024
4025 expr = INSN_EXPR (insn);
4026 copy_expr (expr, &ssid->expr);
4027 prepare_insn_expr (insn, ssid->seqno);
4028
4029 if (insn_init_create_new_vinsn_p)
4030 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4031
4032 if (first_time_insn_init (insn))
4033 init_first_time_insn_data (insn);
4034 }
4035
4036 /* This is used to initialize spurious jumps generated by
4037 sel_redirect_edge (). */
4038 static void
4039 init_simplejump_data (insn_t insn)
4040 {
4041 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4042 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4043 false, true);
4044 INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
4045 init_first_time_insn_data (insn);
4046 }
4047
4048 /* Perform deferred initialization of insns. This is used to process
4049 a new jump that may be created by redirect_edge. */
4050 void
4051 sel_init_new_insn (insn_t insn, int flags)
4052 {
4053 /* We create data structures for bb when the first insn is emitted in it. */
4054 if (INSN_P (insn)
4055 && INSN_IN_STREAM_P (insn)
4056 && insn_is_the_only_one_in_bb_p (insn))
4057 {
4058 extend_bb_info ();
4059 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4060 }
4061
4062 if (flags & INSN_INIT_TODO_LUID)
4063 sched_init_luids (NULL, NULL, NULL, insn);
4064
4065 if (flags & INSN_INIT_TODO_SSID)
4066 {
4067 extend_insn_data ();
4068 init_insn_data (insn);
4069 clear_expr (&insn_init_ssid->expr);
4070 }
4071
4072 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4073 {
4074 extend_insn_data ();
4075 init_simplejump_data (insn);
4076 }
4077
4078 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4079 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4080 }
4081 \f
4082
4083 /* Functions to init/finish work with lv sets. */
4084
4085 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4086 static void
4087 init_lv_set (basic_block bb)
4088 {
4089 gcc_assert (!BB_LV_SET_VALID_P (bb));
4090
4091 BB_LV_SET (bb) = get_regset_from_pool ();
4092 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4093 BB_LV_SET_VALID_P (bb) = true;
4094 }
4095
4096 /* Copy liveness information to BB from FROM_BB. */
4097 static void
4098 copy_lv_set_from (basic_block bb, basic_block from_bb)
4099 {
4100 gcc_assert (!BB_LV_SET_VALID_P (bb));
4101
4102 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4103 BB_LV_SET_VALID_P (bb) = true;
4104 }
4105
4106 /* Initialize lv set of all bb headers. */
4107 void
4108 init_lv_sets (void)
4109 {
4110 basic_block bb;
4111
4112 /* Initialize of LV sets. */
4113 FOR_EACH_BB (bb)
4114 init_lv_set (bb);
4115
4116 /* Don't forget EXIT_BLOCK. */
4117 init_lv_set (EXIT_BLOCK_PTR);
4118 }
4119
4120 /* Release lv set of HEAD. */
4121 static void
4122 free_lv_set (basic_block bb)
4123 {
4124 gcc_assert (BB_LV_SET (bb) != NULL);
4125
4126 return_regset_to_pool (BB_LV_SET (bb));
4127 BB_LV_SET (bb) = NULL;
4128 BB_LV_SET_VALID_P (bb) = false;
4129 }
4130
4131 /* Finalize lv sets of all bb headers. */
4132 void
4133 free_lv_sets (void)
4134 {
4135 basic_block bb;
4136
4137 /* Don't forget EXIT_BLOCK. */
4138 free_lv_set (EXIT_BLOCK_PTR);
4139
4140 /* Free LV sets. */
4141 FOR_EACH_BB (bb)
4142 if (BB_LV_SET (bb))
4143 free_lv_set (bb);
4144 }
4145
4146 /* Initialize an invalid AV_SET for BB.
4147 This set will be updated next time compute_av () process BB. */
4148 static void
4149 invalidate_av_set (basic_block bb)
4150 {
4151 gcc_assert (BB_AV_LEVEL (bb) <= 0
4152 && BB_AV_SET (bb) == NULL);
4153
4154 BB_AV_LEVEL (bb) = -1;
4155 }
4156
4157 /* Create initial data sets for BB (they will be invalid). */
4158 static void
4159 create_initial_data_sets (basic_block bb)
4160 {
4161 if (BB_LV_SET (bb))
4162 BB_LV_SET_VALID_P (bb) = false;
4163 else
4164 BB_LV_SET (bb) = get_regset_from_pool ();
4165 invalidate_av_set (bb);
4166 }
4167
4168 /* Free av set of BB. */
4169 static void
4170 free_av_set (basic_block bb)
4171 {
4172 av_set_clear (&BB_AV_SET (bb));
4173 BB_AV_LEVEL (bb) = 0;
4174 }
4175
4176 /* Free data sets of BB. */
4177 void
4178 free_data_sets (basic_block bb)
4179 {
4180 free_lv_set (bb);
4181 free_av_set (bb);
4182 }
4183
4184 /* Exchange lv sets of TO and FROM. */
4185 static void
4186 exchange_lv_sets (basic_block to, basic_block from)
4187 {
4188 {
4189 regset to_lv_set = BB_LV_SET (to);
4190
4191 BB_LV_SET (to) = BB_LV_SET (from);
4192 BB_LV_SET (from) = to_lv_set;
4193 }
4194
4195 {
4196 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4197
4198 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4199 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4200 }
4201 }
4202
4203
4204 /* Exchange av sets of TO and FROM. */
4205 static void
4206 exchange_av_sets (basic_block to, basic_block from)
4207 {
4208 {
4209 av_set_t to_av_set = BB_AV_SET (to);
4210
4211 BB_AV_SET (to) = BB_AV_SET (from);
4212 BB_AV_SET (from) = to_av_set;
4213 }
4214
4215 {
4216 int to_av_level = BB_AV_LEVEL (to);
4217
4218 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4219 BB_AV_LEVEL (from) = to_av_level;
4220 }
4221 }
4222
4223 /* Exchange data sets of TO and FROM. */
4224 void
4225 exchange_data_sets (basic_block to, basic_block from)
4226 {
4227 exchange_lv_sets (to, from);
4228 exchange_av_sets (to, from);
4229 }
4230
4231 /* Copy data sets of FROM to TO. */
4232 void
4233 copy_data_sets (basic_block to, basic_block from)
4234 {
4235 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4236 gcc_assert (BB_AV_SET (to) == NULL);
4237
4238 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4239 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4240
4241 if (BB_AV_SET_VALID_P (from))
4242 {
4243 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4244 }
4245 if (BB_LV_SET_VALID_P (from))
4246 {
4247 gcc_assert (BB_LV_SET (to) != NULL);
4248 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4249 }
4250 }
4251
4252 /* Return an av set for INSN, if any. */
4253 av_set_t
4254 get_av_set (insn_t insn)
4255 {
4256 av_set_t av_set;
4257
4258 gcc_assert (AV_SET_VALID_P (insn));
4259
4260 if (sel_bb_head_p (insn))
4261 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4262 else
4263 av_set = NULL;
4264
4265 return av_set;
4266 }
4267
4268 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4269 int
4270 get_av_level (insn_t insn)
4271 {
4272 int av_level;
4273
4274 gcc_assert (INSN_P (insn));
4275
4276 if (sel_bb_head_p (insn))
4277 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4278 else
4279 av_level = INSN_WS_LEVEL (insn);
4280
4281 return av_level;
4282 }
4283
4284 \f
4285
4286 /* Variables to work with control-flow graph. */
4287
4288 /* The basic block that already has been processed by the sched_data_update (),
4289 but hasn't been in sel_add_bb () yet. */
4290 static VEC (basic_block, heap) *last_added_blocks = NULL;
4291
4292 /* A pool for allocating successor infos. */
4293 static struct
4294 {
4295 /* A stack for saving succs_info structures. */
4296 struct succs_info *stack;
4297
4298 /* Its size. */
4299 int size;
4300
4301 /* Top of the stack. */
4302 int top;
4303
4304 /* Maximal value of the top. */
4305 int max_top;
4306 } succs_info_pool;
4307
4308 /* Functions to work with control-flow graph. */
4309
4310 /* Return basic block note of BB. */
4311 insn_t
4312 sel_bb_head (basic_block bb)
4313 {
4314 insn_t head;
4315
4316 if (bb == EXIT_BLOCK_PTR)
4317 {
4318 gcc_assert (exit_insn != NULL_RTX);
4319 head = exit_insn;
4320 }
4321 else
4322 {
4323 insn_t note;
4324
4325 note = bb_note (bb);
4326 head = next_nonnote_insn (note);
4327
4328 if (head && BLOCK_FOR_INSN (head) != bb)
4329 head = NULL_RTX;
4330 }
4331
4332 return head;
4333 }
4334
4335 /* Return true if INSN is a basic block header. */
4336 bool
4337 sel_bb_head_p (insn_t insn)
4338 {
4339 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4340 }
4341
4342 /* Return last insn of BB. */
4343 insn_t
4344 sel_bb_end (basic_block bb)
4345 {
4346 if (sel_bb_empty_p (bb))
4347 return NULL_RTX;
4348
4349 gcc_assert (bb != EXIT_BLOCK_PTR);
4350
4351 return BB_END (bb);
4352 }
4353
4354 /* Return true if INSN is the last insn in its basic block. */
4355 bool
4356 sel_bb_end_p (insn_t insn)
4357 {
4358 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4359 }
4360
4361 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4362 bool
4363 sel_bb_empty_p (basic_block bb)
4364 {
4365 return sel_bb_head (bb) == NULL;
4366 }
4367
4368 /* True when BB belongs to the current scheduling region. */
4369 bool
4370 in_current_region_p (basic_block bb)
4371 {
4372 if (bb->index < NUM_FIXED_BLOCKS)
4373 return false;
4374
4375 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4376 }
4377
4378 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4379 basic_block
4380 fallthru_bb_of_jump (rtx jump)
4381 {
4382 if (!JUMP_P (jump))
4383 return NULL;
4384
4385 if (any_uncondjump_p (jump))
4386 return single_succ (BLOCK_FOR_INSN (jump));
4387
4388 if (!any_condjump_p (jump))
4389 return NULL;
4390
4391 /* A basic block that ends with a conditional jump may still have one successor
4392 (and be followed by a barrier), we are not interested. */
4393 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4394 return NULL;
4395
4396 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4397 }
4398
4399 /* Remove all notes from BB. */
4400 static void
4401 init_bb (basic_block bb)
4402 {
4403 remove_notes (bb_note (bb), BB_END (bb));
4404 BB_NOTE_LIST (bb) = note_list;
4405 }
4406
4407 void
4408 sel_init_bbs (bb_vec_t bbs, basic_block bb)
4409 {
4410 const struct sched_scan_info_def ssi =
4411 {
4412 extend_bb_info, /* extend_bb */
4413 init_bb, /* init_bb */
4414 NULL, /* extend_insn */
4415 NULL /* init_insn */
4416 };
4417
4418 sched_scan (&ssi, bbs, bb, new_insns, NULL);
4419 }
4420
4421 /* Restore notes for the whole region. */
4422 static void
4423 sel_restore_notes (void)
4424 {
4425 int bb;
4426 insn_t insn;
4427
4428 for (bb = 0; bb < current_nr_blocks; bb++)
4429 {
4430 basic_block first, last;
4431
4432 first = EBB_FIRST_BB (bb);
4433 last = EBB_LAST_BB (bb)->next_bb;
4434
4435 do
4436 {
4437 note_list = BB_NOTE_LIST (first);
4438 restore_other_notes (NULL, first);
4439 BB_NOTE_LIST (first) = NULL_RTX;
4440
4441 FOR_BB_INSNS (first, insn)
4442 if (NONDEBUG_INSN_P (insn))
4443 reemit_notes (insn);
4444
4445 first = first->next_bb;
4446 }
4447 while (first != last);
4448 }
4449 }
4450
4451 /* Free per-bb data structures. */
4452 void
4453 sel_finish_bbs (void)
4454 {
4455 sel_restore_notes ();
4456
4457 /* Remove current loop preheader from this loop. */
4458 if (current_loop_nest)
4459 sel_remove_loop_preheader ();
4460
4461 finish_region_bb_info ();
4462 }
4463
4464 /* Return true if INSN has a single successor of type FLAGS. */
4465 bool
4466 sel_insn_has_single_succ_p (insn_t insn, int flags)
4467 {
4468 insn_t succ;
4469 succ_iterator si;
4470 bool first_p = true;
4471
4472 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4473 {
4474 if (first_p)
4475 first_p = false;
4476 else
4477 return false;
4478 }
4479
4480 return true;
4481 }
4482
4483 /* Allocate successor's info. */
4484 static struct succs_info *
4485 alloc_succs_info (void)
4486 {
4487 if (succs_info_pool.top == succs_info_pool.max_top)
4488 {
4489 int i;
4490
4491 if (++succs_info_pool.max_top >= succs_info_pool.size)
4492 gcc_unreachable ();
4493
4494 i = ++succs_info_pool.top;
4495 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4496 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4497 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4498 }
4499 else
4500 succs_info_pool.top++;
4501
4502 return &succs_info_pool.stack[succs_info_pool.top];
4503 }
4504
4505 /* Free successor's info. */
4506 void
4507 free_succs_info (struct succs_info * sinfo)
4508 {
4509 gcc_assert (succs_info_pool.top >= 0
4510 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4511 succs_info_pool.top--;
4512
4513 /* Clear stale info. */
4514 VEC_block_remove (rtx, sinfo->succs_ok,
4515 0, VEC_length (rtx, sinfo->succs_ok));
4516 VEC_block_remove (rtx, sinfo->succs_other,
4517 0, VEC_length (rtx, sinfo->succs_other));
4518 VEC_block_remove (int, sinfo->probs_ok,
4519 0, VEC_length (int, sinfo->probs_ok));
4520 sinfo->all_prob = 0;
4521 sinfo->succs_ok_n = 0;
4522 sinfo->all_succs_n = 0;
4523 }
4524
4525 /* Compute successor info for INSN. FLAGS are the flags passed
4526 to the FOR_EACH_SUCC_1 iterator. */
4527 struct succs_info *
4528 compute_succs_info (insn_t insn, short flags)
4529 {
4530 succ_iterator si;
4531 insn_t succ;
4532 struct succs_info *sinfo = alloc_succs_info ();
4533
4534 /* Traverse *all* successors and decide what to do with each. */
4535 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4536 {
4537 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4538 perform code motion through inner loops. */
4539 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4540
4541 if (current_flags & flags)
4542 {
4543 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4544 VEC_safe_push (int, heap, sinfo->probs_ok,
4545 /* FIXME: Improve calculation when skipping
4546 inner loop to exits. */
4547 (si.bb_end
4548 ? si.e1->probability
4549 : REG_BR_PROB_BASE));
4550 sinfo->succs_ok_n++;
4551 }
4552 else
4553 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4554
4555 /* Compute all_prob. */
4556 if (!si.bb_end)
4557 sinfo->all_prob = REG_BR_PROB_BASE;
4558 else
4559 sinfo->all_prob += si.e1->probability;
4560
4561 sinfo->all_succs_n++;
4562 }
4563
4564 return sinfo;
4565 }
4566
4567 /* Return the predecessors of BB in PREDS and their number in N.
4568 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4569 static void
4570 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4571 {
4572 edge e;
4573 edge_iterator ei;
4574
4575 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4576
4577 FOR_EACH_EDGE (e, ei, bb->preds)
4578 {
4579 basic_block pred_bb = e->src;
4580 insn_t bb_end = BB_END (pred_bb);
4581
4582 /* ??? This code is not supposed to walk out of a region. */
4583 gcc_assert (in_current_region_p (pred_bb));
4584
4585 if (sel_bb_empty_p (pred_bb))
4586 cfg_preds_1 (pred_bb, preds, n, size);
4587 else
4588 {
4589 if (*n == *size)
4590 *preds = XRESIZEVEC (insn_t, *preds,
4591 (*size = 2 * *size + 1));
4592 (*preds)[(*n)++] = bb_end;
4593 }
4594 }
4595
4596 gcc_assert (*n != 0);
4597 }
4598
4599 /* Find all predecessors of BB and record them in PREDS and their number
4600 in N. Empty blocks are skipped, and only normal (forward in-region)
4601 edges are processed. */
4602 static void
4603 cfg_preds (basic_block bb, insn_t **preds, int *n)
4604 {
4605 int size = 0;
4606
4607 *preds = NULL;
4608 *n = 0;
4609 cfg_preds_1 (bb, preds, n, &size);
4610 }
4611
4612 /* Returns true if we are moving INSN through join point. */
4613 bool
4614 sel_num_cfg_preds_gt_1 (insn_t insn)
4615 {
4616 basic_block bb;
4617
4618 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4619 return false;
4620
4621 bb = BLOCK_FOR_INSN (insn);
4622
4623 while (1)
4624 {
4625 if (EDGE_COUNT (bb->preds) > 1)
4626 return true;
4627
4628 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4629 bb = EDGE_PRED (bb, 0)->src;
4630
4631 if (!sel_bb_empty_p (bb))
4632 break;
4633 }
4634
4635 return false;
4636 }
4637
4638 /* Returns true when BB should be the end of an ebb. Adapted from the
4639 code in sched-ebb.c. */
4640 bool
4641 bb_ends_ebb_p (basic_block bb)
4642 {
4643 basic_block next_bb = bb_next_bb (bb);
4644 edge e;
4645 edge_iterator ei;
4646
4647 if (next_bb == EXIT_BLOCK_PTR
4648 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4649 || (LABEL_P (BB_HEAD (next_bb))
4650 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4651 Work around that. */
4652 && !single_pred_p (next_bb)))
4653 return true;
4654
4655 if (!in_current_region_p (next_bb))
4656 return true;
4657
4658 FOR_EACH_EDGE (e, ei, bb->succs)
4659 if ((e->flags & EDGE_FALLTHRU) != 0)
4660 {
4661 gcc_assert (e->dest == next_bb);
4662
4663 return false;
4664 }
4665
4666 return true;
4667 }
4668
4669 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4670 successor of INSN. */
4671 bool
4672 in_same_ebb_p (insn_t insn, insn_t succ)
4673 {
4674 basic_block ptr = BLOCK_FOR_INSN (insn);
4675
4676 for(;;)
4677 {
4678 if (ptr == BLOCK_FOR_INSN (succ))
4679 return true;
4680
4681 if (bb_ends_ebb_p (ptr))
4682 return false;
4683
4684 ptr = bb_next_bb (ptr);
4685 }
4686
4687 gcc_unreachable ();
4688 return false;
4689 }
4690
4691 /* Recomputes the reverse topological order for the function and
4692 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4693 modified appropriately. */
4694 static void
4695 recompute_rev_top_order (void)
4696 {
4697 int *postorder;
4698 int n_blocks, i;
4699
4700 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4701 {
4702 rev_top_order_index_len = last_basic_block;
4703 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4704 rev_top_order_index_len);
4705 }
4706
4707 postorder = XNEWVEC (int, n_basic_blocks);
4708
4709 n_blocks = post_order_compute (postorder, true, false);
4710 gcc_assert (n_basic_blocks == n_blocks);
4711
4712 /* Build reverse function: for each basic block with BB->INDEX == K
4713 rev_top_order_index[K] is it's reverse topological sort number. */
4714 for (i = 0; i < n_blocks; i++)
4715 {
4716 gcc_assert (postorder[i] < rev_top_order_index_len);
4717 rev_top_order_index[postorder[i]] = i;
4718 }
4719
4720 free (postorder);
4721 }
4722
4723 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4724 void
4725 clear_outdated_rtx_info (basic_block bb)
4726 {
4727 rtx insn;
4728
4729 FOR_BB_INSNS (bb, insn)
4730 if (INSN_P (insn))
4731 {
4732 SCHED_GROUP_P (insn) = 0;
4733 INSN_AFTER_STALL_P (insn) = 0;
4734 INSN_SCHED_TIMES (insn) = 0;
4735 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4736
4737 /* We cannot use the changed caches, as previously we could ignore
4738 the LHS dependence due to enabled renaming and transform
4739 the expression, and currently we'll be unable to do this. */
4740 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4741 }
4742 }
4743
4744 /* Add BB_NOTE to the pool of available basic block notes. */
4745 static void
4746 return_bb_to_pool (basic_block bb)
4747 {
4748 rtx note = bb_note (bb);
4749
4750 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4751 && bb->aux == NULL);
4752
4753 /* It turns out that current cfg infrastructure does not support
4754 reuse of basic blocks. Don't bother for now. */
4755 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4756 }
4757
4758 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4759 static rtx
4760 get_bb_note_from_pool (void)
4761 {
4762 if (VEC_empty (rtx, bb_note_pool))
4763 return NULL_RTX;
4764 else
4765 {
4766 rtx note = VEC_pop (rtx, bb_note_pool);
4767
4768 PREV_INSN (note) = NULL_RTX;
4769 NEXT_INSN (note) = NULL_RTX;
4770
4771 return note;
4772 }
4773 }
4774
4775 /* Free bb_note_pool. */
4776 void
4777 free_bb_note_pool (void)
4778 {
4779 VEC_free (rtx, heap, bb_note_pool);
4780 }
4781
4782 /* Setup scheduler pool and successor structure. */
4783 void
4784 alloc_sched_pools (void)
4785 {
4786 int succs_size;
4787
4788 succs_size = MAX_WS + 1;
4789 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4790 succs_info_pool.size = succs_size;
4791 succs_info_pool.top = -1;
4792 succs_info_pool.max_top = -1;
4793
4794 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4795 sizeof (struct _list_node), 500);
4796 }
4797
4798 /* Free the pools. */
4799 void
4800 free_sched_pools (void)
4801 {
4802 int i;
4803
4804 free_alloc_pool (sched_lists_pool);
4805 gcc_assert (succs_info_pool.top == -1);
4806 for (i = 0; i < succs_info_pool.max_top; i++)
4807 {
4808 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4809 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4810 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4811 }
4812 free (succs_info_pool.stack);
4813 }
4814 \f
4815
4816 /* Returns a position in RGN where BB can be inserted retaining
4817 topological order. */
4818 static int
4819 find_place_to_insert_bb (basic_block bb, int rgn)
4820 {
4821 bool has_preds_outside_rgn = false;
4822 edge e;
4823 edge_iterator ei;
4824
4825 /* Find whether we have preds outside the region. */
4826 FOR_EACH_EDGE (e, ei, bb->preds)
4827 if (!in_current_region_p (e->src))
4828 {
4829 has_preds_outside_rgn = true;
4830 break;
4831 }
4832
4833 /* Recompute the top order -- needed when we have > 1 pred
4834 and in case we don't have preds outside. */
4835 if (flag_sel_sched_pipelining_outer_loops
4836 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
4837 {
4838 int i, bbi = bb->index, cur_bbi;
4839
4840 recompute_rev_top_order ();
4841 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
4842 {
4843 cur_bbi = BB_TO_BLOCK (i);
4844 if (rev_top_order_index[bbi]
4845 < rev_top_order_index[cur_bbi])
4846 break;
4847 }
4848
4849 /* We skipped the right block, so we increase i. We accomodate
4850 it for increasing by step later, so we decrease i. */
4851 return (i + 1) - 1;
4852 }
4853 else if (has_preds_outside_rgn)
4854 {
4855 /* This is the case when we generate an extra empty block
4856 to serve as region head during pipelining. */
4857 e = EDGE_SUCC (bb, 0);
4858 gcc_assert (EDGE_COUNT (bb->succs) == 1
4859 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
4860 && (BLOCK_TO_BB (e->dest->index) == 0));
4861 return -1;
4862 }
4863
4864 /* We don't have preds outside the region. We should have
4865 the only pred, because the multiple preds case comes from
4866 the pipelining of outer loops, and that is handled above.
4867 Just take the bbi of this single pred. */
4868 if (EDGE_COUNT (bb->succs) > 0)
4869 {
4870 int pred_bbi;
4871
4872 gcc_assert (EDGE_COUNT (bb->preds) == 1);
4873
4874 pred_bbi = EDGE_PRED (bb, 0)->src->index;
4875 return BLOCK_TO_BB (pred_bbi);
4876 }
4877 else
4878 /* BB has no successors. It is safe to put it in the end. */
4879 return current_nr_blocks - 1;
4880 }
4881
4882 /* Deletes an empty basic block freeing its data. */
4883 static void
4884 delete_and_free_basic_block (basic_block bb)
4885 {
4886 gcc_assert (sel_bb_empty_p (bb));
4887
4888 if (BB_LV_SET (bb))
4889 free_lv_set (bb);
4890
4891 bitmap_clear_bit (blocks_to_reschedule, bb->index);
4892
4893 /* Can't assert av_set properties because we use sel_aremove_bb
4894 when removing loop preheader from the region. At the point of
4895 removing the preheader we already have deallocated sel_region_bb_info. */
4896 gcc_assert (BB_LV_SET (bb) == NULL
4897 && !BB_LV_SET_VALID_P (bb)
4898 && BB_AV_LEVEL (bb) == 0
4899 && BB_AV_SET (bb) == NULL);
4900
4901 delete_basic_block (bb);
4902 }
4903
4904 /* Add BB to the current region and update the region data. */
4905 static void
4906 add_block_to_current_region (basic_block bb)
4907 {
4908 int i, pos, bbi = -2, rgn;
4909
4910 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4911 bbi = find_place_to_insert_bb (bb, rgn);
4912 bbi += 1;
4913 pos = RGN_BLOCKS (rgn) + bbi;
4914
4915 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4916 && ebb_head[bbi] == pos);
4917
4918 /* Make a place for the new block. */
4919 extend_regions ();
4920
4921 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4922 BLOCK_TO_BB (rgn_bb_table[i])++;
4923
4924 memmove (rgn_bb_table + pos + 1,
4925 rgn_bb_table + pos,
4926 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4927
4928 /* Initialize data for BB. */
4929 rgn_bb_table[pos] = bb->index;
4930 BLOCK_TO_BB (bb->index) = bbi;
4931 CONTAINING_RGN (bb->index) = rgn;
4932
4933 RGN_NR_BLOCKS (rgn)++;
4934
4935 for (i = rgn + 1; i <= nr_regions; i++)
4936 RGN_BLOCKS (i)++;
4937 }
4938
4939 /* Remove BB from the current region and update the region data. */
4940 static void
4941 remove_bb_from_region (basic_block bb)
4942 {
4943 int i, pos, bbi = -2, rgn;
4944
4945 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4946 bbi = BLOCK_TO_BB (bb->index);
4947 pos = RGN_BLOCKS (rgn) + bbi;
4948
4949 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4950 && ebb_head[bbi] == pos);
4951
4952 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4953 BLOCK_TO_BB (rgn_bb_table[i])--;
4954
4955 memmove (rgn_bb_table + pos,
4956 rgn_bb_table + pos + 1,
4957 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4958
4959 RGN_NR_BLOCKS (rgn)--;
4960 for (i = rgn + 1; i <= nr_regions; i++)
4961 RGN_BLOCKS (i)--;
4962 }
4963
4964 /* Add BB to the current region and update all data. If BB is NULL, add all
4965 blocks from last_added_blocks vector. */
4966 static void
4967 sel_add_bb (basic_block bb)
4968 {
4969 /* Extend luids so that new notes will receive zero luids. */
4970 sched_init_luids (NULL, NULL, NULL, NULL);
4971 sched_init_bbs ();
4972 sel_init_bbs (last_added_blocks, NULL);
4973
4974 /* When bb is passed explicitly, the vector should contain
4975 the only element that equals to bb; otherwise, the vector
4976 should not be NULL. */
4977 gcc_assert (last_added_blocks != NULL);
4978
4979 if (bb != NULL)
4980 {
4981 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
4982 && VEC_index (basic_block,
4983 last_added_blocks, 0) == bb);
4984 add_block_to_current_region (bb);
4985
4986 /* We associate creating/deleting data sets with the first insn
4987 appearing / disappearing in the bb. */
4988 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
4989 create_initial_data_sets (bb);
4990
4991 VEC_free (basic_block, heap, last_added_blocks);
4992 }
4993 else
4994 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
4995 {
4996 int i;
4997 basic_block temp_bb = NULL;
4998
4999 for (i = 0;
5000 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5001 {
5002 add_block_to_current_region (bb);
5003 temp_bb = bb;
5004 }
5005
5006 /* We need to fetch at least one bb so we know the region
5007 to update. */
5008 gcc_assert (temp_bb != NULL);
5009 bb = temp_bb;
5010
5011 VEC_free (basic_block, heap, last_added_blocks);
5012 }
5013
5014 rgn_setup_region (CONTAINING_RGN (bb->index));
5015 }
5016
5017 /* Remove BB from the current region and update all data.
5018 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5019 static void
5020 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5021 {
5022 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5023
5024 remove_bb_from_region (bb);
5025 return_bb_to_pool (bb);
5026 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5027
5028 if (remove_from_cfg_p)
5029 delete_and_free_basic_block (bb);
5030
5031 rgn_setup_region (CONTAINING_RGN (bb->index));
5032 }
5033
5034 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5035 static void
5036 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5037 {
5038 gcc_assert (in_current_region_p (merge_bb));
5039
5040 concat_note_lists (BB_NOTE_LIST (empty_bb),
5041 &BB_NOTE_LIST (merge_bb));
5042 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5043
5044 }
5045
5046 /* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put
5047 EMPTY_BB's note lists into its predecessor instead of putting them
5048 into the successor. When REMOVE_FROM_CFG_P is true, also remove
5049 the empty block. */
5050 void
5051 sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
5052 bool remove_from_cfg_p)
5053 {
5054 basic_block merge_bb;
5055
5056 gcc_assert (sel_bb_empty_p (empty_bb));
5057
5058 if (merge_up_p)
5059 {
5060 merge_bb = empty_bb->prev_bb;
5061 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1
5062 && EDGE_PRED (empty_bb, 0)->src == merge_bb);
5063 }
5064 else
5065 {
5066 edge e;
5067 edge_iterator ei;
5068
5069 merge_bb = bb_next_bb (empty_bb);
5070
5071 /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
5072 successor block. */
5073 for (ei = ei_start (empty_bb->preds);
5074 (e = ei_safe_edge (ei)); )
5075 {
5076 if (! (e->flags & EDGE_FALLTHRU))
5077 sel_redirect_edge_and_branch (e, merge_bb);
5078 else
5079 ei_next (&ei);
5080 }
5081
5082 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1
5083 && EDGE_SUCC (empty_bb, 0)->dest == merge_bb);
5084 }
5085
5086 move_bb_info (merge_bb, empty_bb);
5087 remove_empty_bb (empty_bb, remove_from_cfg_p);
5088 }
5089
5090 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5091 region, but keep it in CFG. */
5092 static void
5093 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5094 {
5095 /* The block should contain just a note or a label.
5096 We try to check whether it is unused below. */
5097 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5098 || LABEL_P (BB_HEAD (empty_bb)));
5099
5100 /* If basic block has predecessors or successors, redirect them. */
5101 if (remove_from_cfg_p
5102 && (EDGE_COUNT (empty_bb->preds) > 0
5103 || EDGE_COUNT (empty_bb->succs) > 0))
5104 {
5105 basic_block pred;
5106 basic_block succ;
5107
5108 /* We need to init PRED and SUCC before redirecting edges. */
5109 if (EDGE_COUNT (empty_bb->preds) > 0)
5110 {
5111 edge e;
5112
5113 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5114
5115 e = EDGE_PRED (empty_bb, 0);
5116 gcc_assert (e->src == empty_bb->prev_bb
5117 && (e->flags & EDGE_FALLTHRU));
5118
5119 pred = empty_bb->prev_bb;
5120 }
5121 else
5122 pred = NULL;
5123
5124 if (EDGE_COUNT (empty_bb->succs) > 0)
5125 {
5126 /* We do not check fallthruness here as above, because
5127 after removing a jump the edge may actually be not fallthru. */
5128 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5129 succ = EDGE_SUCC (empty_bb, 0)->dest;
5130 }
5131 else
5132 succ = NULL;
5133
5134 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5135 {
5136 edge e = EDGE_PRED (empty_bb, 0);
5137
5138 if (e->flags & EDGE_FALLTHRU)
5139 redirect_edge_succ_nodup (e, succ);
5140 else
5141 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5142 }
5143
5144 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5145 {
5146 edge e = EDGE_SUCC (empty_bb, 0);
5147
5148 if (find_edge (pred, e->dest) == NULL)
5149 redirect_edge_pred (e, pred);
5150 }
5151 }
5152
5153 /* Finish removing. */
5154 sel_remove_bb (empty_bb, remove_from_cfg_p);
5155 }
5156
5157 /* An implementation of create_basic_block hook, which additionally updates
5158 per-bb data structures. */
5159 static basic_block
5160 sel_create_basic_block (void *headp, void *endp, basic_block after)
5161 {
5162 basic_block new_bb;
5163 insn_t new_bb_note;
5164
5165 gcc_assert (flag_sel_sched_pipelining_outer_loops
5166 || last_added_blocks == NULL);
5167
5168 new_bb_note = get_bb_note_from_pool ();
5169
5170 if (new_bb_note == NULL_RTX)
5171 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5172 else
5173 {
5174 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5175 new_bb_note, after);
5176 new_bb->aux = NULL;
5177 }
5178
5179 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5180
5181 return new_bb;
5182 }
5183
5184 /* Implement sched_init_only_bb (). */
5185 static void
5186 sel_init_only_bb (basic_block bb, basic_block after)
5187 {
5188 gcc_assert (after == NULL);
5189
5190 extend_regions ();
5191 rgn_make_new_region_out_of_new_block (bb);
5192 }
5193
5194 /* Update the latch when we've splitted or merged it from FROM block to TO.
5195 This should be checked for all outer loops, too. */
5196 static void
5197 change_loops_latches (basic_block from, basic_block to)
5198 {
5199 gcc_assert (from != to);
5200
5201 if (current_loop_nest)
5202 {
5203 struct loop *loop;
5204
5205 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5206 if (considered_for_pipelining_p (loop) && loop->latch == from)
5207 {
5208 gcc_assert (loop == current_loop_nest);
5209 loop->latch = to;
5210 gcc_assert (loop_latch_edge (loop));
5211 }
5212 }
5213 }
5214
5215 /* Splits BB on two basic blocks, adding it to the region and extending
5216 per-bb data structures. Returns the newly created bb. */
5217 static basic_block
5218 sel_split_block (basic_block bb, rtx after)
5219 {
5220 basic_block new_bb;
5221 insn_t insn;
5222
5223 new_bb = sched_split_block_1 (bb, after);
5224 sel_add_bb (new_bb);
5225
5226 /* This should be called after sel_add_bb, because this uses
5227 CONTAINING_RGN for the new block, which is not yet initialized.
5228 FIXME: this function may be a no-op now. */
5229 change_loops_latches (bb, new_bb);
5230
5231 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5232 FOR_BB_INSNS (new_bb, insn)
5233 if (INSN_P (insn))
5234 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5235
5236 if (sel_bb_empty_p (bb))
5237 {
5238 gcc_assert (!sel_bb_empty_p (new_bb));
5239
5240 /* NEW_BB has data sets that need to be updated and BB holds
5241 data sets that should be removed. Exchange these data sets
5242 so that we won't lose BB's valid data sets. */
5243 exchange_data_sets (new_bb, bb);
5244 free_data_sets (bb);
5245 }
5246
5247 if (!sel_bb_empty_p (new_bb)
5248 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5249 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5250
5251 return new_bb;
5252 }
5253
5254 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5255 Otherwise returns NULL. */
5256 static rtx
5257 check_for_new_jump (basic_block bb, int prev_max_uid)
5258 {
5259 rtx end;
5260
5261 end = sel_bb_end (bb);
5262 if (end && INSN_UID (end) >= prev_max_uid)
5263 return end;
5264 return NULL;
5265 }
5266
5267 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5268 New means having UID at least equal to PREV_MAX_UID. */
5269 static rtx
5270 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5271 {
5272 rtx jump;
5273
5274 /* Return immediately if no new insns were emitted. */
5275 if (get_max_uid () == prev_max_uid)
5276 return NULL;
5277
5278 /* Now check both blocks for new jumps. It will ever be only one. */
5279 if ((jump = check_for_new_jump (from, prev_max_uid)))
5280 return jump;
5281
5282 if (jump_bb != NULL
5283 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5284 return jump;
5285 return NULL;
5286 }
5287
5288 /* Splits E and adds the newly created basic block to the current region.
5289 Returns this basic block. */
5290 basic_block
5291 sel_split_edge (edge e)
5292 {
5293 basic_block new_bb, src, other_bb = NULL;
5294 int prev_max_uid;
5295 rtx jump;
5296
5297 src = e->src;
5298 prev_max_uid = get_max_uid ();
5299 new_bb = split_edge (e);
5300
5301 if (flag_sel_sched_pipelining_outer_loops
5302 && current_loop_nest)
5303 {
5304 int i;
5305 basic_block bb;
5306
5307 /* Some of the basic blocks might not have been added to the loop.
5308 Add them here, until this is fixed in force_fallthru. */
5309 for (i = 0;
5310 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5311 if (!bb->loop_father)
5312 {
5313 add_bb_to_loop (bb, e->dest->loop_father);
5314
5315 gcc_assert (!other_bb && (new_bb->index != bb->index));
5316 other_bb = bb;
5317 }
5318 }
5319
5320 /* Add all last_added_blocks to the region. */
5321 sel_add_bb (NULL);
5322
5323 jump = find_new_jump (src, new_bb, prev_max_uid);
5324 if (jump)
5325 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5326
5327 /* Put the correct lv set on this block. */
5328 if (other_bb && !sel_bb_empty_p (other_bb))
5329 compute_live (sel_bb_head (other_bb));
5330
5331 return new_bb;
5332 }
5333
5334 /* Implement sched_create_empty_bb (). */
5335 static basic_block
5336 sel_create_empty_bb (basic_block after)
5337 {
5338 basic_block new_bb;
5339
5340 new_bb = sched_create_empty_bb_1 (after);
5341
5342 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5343 later. */
5344 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5345 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5346
5347 VEC_free (basic_block, heap, last_added_blocks);
5348 return new_bb;
5349 }
5350
5351 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5352 will be splitted to insert a check. */
5353 basic_block
5354 sel_create_recovery_block (insn_t orig_insn)
5355 {
5356 basic_block first_bb, second_bb, recovery_block;
5357 basic_block before_recovery = NULL;
5358 rtx jump;
5359
5360 first_bb = BLOCK_FOR_INSN (orig_insn);
5361 if (sel_bb_end_p (orig_insn))
5362 {
5363 /* Avoid introducing an empty block while splitting. */
5364 gcc_assert (single_succ_p (first_bb));
5365 second_bb = single_succ (first_bb);
5366 }
5367 else
5368 second_bb = sched_split_block (first_bb, orig_insn);
5369
5370 recovery_block = sched_create_recovery_block (&before_recovery);
5371 if (before_recovery)
5372 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5373
5374 gcc_assert (sel_bb_empty_p (recovery_block));
5375 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5376 if (current_loops != NULL)
5377 add_bb_to_loop (recovery_block, first_bb->loop_father);
5378
5379 sel_add_bb (recovery_block);
5380
5381 jump = BB_END (recovery_block);
5382 gcc_assert (sel_bb_head (recovery_block) == jump);
5383 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5384
5385 return recovery_block;
5386 }
5387
5388 /* Merge basic block B into basic block A. */
5389 void
5390 sel_merge_blocks (basic_block a, basic_block b)
5391 {
5392 sel_remove_empty_bb (b, true, false);
5393 merge_blocks (a, b);
5394
5395 change_loops_latches (b, a);
5396 }
5397
5398 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5399 data structures for possibly created bb and insns. Returns the newly
5400 added bb or NULL, when a bb was not needed. */
5401 void
5402 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5403 {
5404 basic_block jump_bb, src;
5405 int prev_max_uid;
5406 rtx jump;
5407
5408 gcc_assert (!sel_bb_empty_p (e->src));
5409
5410 src = e->src;
5411 prev_max_uid = get_max_uid ();
5412 jump_bb = redirect_edge_and_branch_force (e, to);
5413
5414 if (jump_bb != NULL)
5415 sel_add_bb (jump_bb);
5416
5417 /* This function could not be used to spoil the loop structure by now,
5418 thus we don't care to update anything. But check it to be sure. */
5419 if (current_loop_nest
5420 && pipelining_p)
5421 gcc_assert (loop_latch_edge (current_loop_nest));
5422
5423 jump = find_new_jump (src, jump_bb, prev_max_uid);
5424 if (jump)
5425 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5426 }
5427
5428 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5429 redirected edge are in reverse topological order. */
5430 bool
5431 sel_redirect_edge_and_branch (edge e, basic_block to)
5432 {
5433 bool latch_edge_p;
5434 basic_block src;
5435 int prev_max_uid;
5436 rtx jump;
5437 edge redirected;
5438 bool recompute_toporder_p = false;
5439
5440 latch_edge_p = (pipelining_p
5441 && current_loop_nest
5442 && e == loop_latch_edge (current_loop_nest));
5443
5444 src = e->src;
5445 prev_max_uid = get_max_uid ();
5446
5447 redirected = redirect_edge_and_branch (e, to);
5448
5449 gcc_assert (redirected && last_added_blocks == NULL);
5450
5451 /* When we've redirected a latch edge, update the header. */
5452 if (latch_edge_p)
5453 {
5454 current_loop_nest->header = to;
5455 gcc_assert (loop_latch_edge (current_loop_nest));
5456 }
5457
5458 /* In rare situations, the topological relation between the blocks connected
5459 by the redirected edge can change (see PR42245 for an example). Update
5460 block_to_bb/bb_to_block. */
5461 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5462 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5463 recompute_toporder_p = true;
5464
5465 jump = find_new_jump (src, NULL, prev_max_uid);
5466 if (jump)
5467 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5468
5469 return recompute_toporder_p;
5470 }
5471
5472 /* This variable holds the cfg hooks used by the selective scheduler. */
5473 static struct cfg_hooks sel_cfg_hooks;
5474
5475 /* Register sel-sched cfg hooks. */
5476 void
5477 sel_register_cfg_hooks (void)
5478 {
5479 sched_split_block = sel_split_block;
5480
5481 orig_cfg_hooks = get_cfg_hooks ();
5482 sel_cfg_hooks = orig_cfg_hooks;
5483
5484 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5485
5486 set_cfg_hooks (sel_cfg_hooks);
5487
5488 sched_init_only_bb = sel_init_only_bb;
5489 sched_split_block = sel_split_block;
5490 sched_create_empty_bb = sel_create_empty_bb;
5491 }
5492
5493 /* Unregister sel-sched cfg hooks. */
5494 void
5495 sel_unregister_cfg_hooks (void)
5496 {
5497 sched_create_empty_bb = NULL;
5498 sched_split_block = NULL;
5499 sched_init_only_bb = NULL;
5500
5501 set_cfg_hooks (orig_cfg_hooks);
5502 }
5503 \f
5504
5505 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5506 LABEL is where this jump should be directed. */
5507 rtx
5508 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5509 {
5510 rtx insn_rtx;
5511
5512 gcc_assert (!INSN_P (pattern));
5513
5514 start_sequence ();
5515
5516 if (label == NULL_RTX)
5517 insn_rtx = emit_insn (pattern);
5518 else if (DEBUG_INSN_P (label))
5519 insn_rtx = emit_debug_insn (pattern);
5520 else
5521 {
5522 insn_rtx = emit_jump_insn (pattern);
5523 JUMP_LABEL (insn_rtx) = label;
5524 ++LABEL_NUSES (label);
5525 }
5526
5527 end_sequence ();
5528
5529 sched_init_luids (NULL, NULL, NULL, NULL);
5530 sched_extend_target ();
5531 sched_deps_init (false);
5532
5533 /* Initialize INSN_CODE now. */
5534 recog_memoized (insn_rtx);
5535 return insn_rtx;
5536 }
5537
5538 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5539 must not be clonable. */
5540 vinsn_t
5541 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5542 {
5543 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5544
5545 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5546 return vinsn_create (insn_rtx, force_unique_p);
5547 }
5548
5549 /* Create a copy of INSN_RTX. */
5550 rtx
5551 create_copy_of_insn_rtx (rtx insn_rtx)
5552 {
5553 rtx res;
5554
5555 if (DEBUG_INSN_P (insn_rtx))
5556 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5557 insn_rtx);
5558
5559 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5560
5561 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5562 NULL_RTX);
5563 return res;
5564 }
5565
5566 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5567 void
5568 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5569 {
5570 vinsn_detach (EXPR_VINSN (expr));
5571
5572 EXPR_VINSN (expr) = new_vinsn;
5573 vinsn_attach (new_vinsn);
5574 }
5575
5576 /* Helpers for global init. */
5577 /* This structure is used to be able to call existing bundling mechanism
5578 and calculate insn priorities. */
5579 static struct haifa_sched_info sched_sel_haifa_sched_info =
5580 {
5581 NULL, /* init_ready_list */
5582 NULL, /* can_schedule_ready_p */
5583 NULL, /* schedule_more_p */
5584 NULL, /* new_ready */
5585 NULL, /* rgn_rank */
5586 sel_print_insn, /* rgn_print_insn */
5587 contributes_to_priority,
5588 NULL, /* insn_finishes_block_p */
5589
5590 NULL, NULL,
5591 NULL, NULL,
5592 0, 0,
5593
5594 NULL, /* add_remove_insn */
5595 NULL, /* begin_schedule_ready */
5596 NULL, /* advance_target_bb */
5597 SEL_SCHED | NEW_BBS
5598 };
5599
5600 /* Setup special insns used in the scheduler. */
5601 void
5602 setup_nop_and_exit_insns (void)
5603 {
5604 gcc_assert (nop_pattern == NULL_RTX
5605 && exit_insn == NULL_RTX);
5606
5607 nop_pattern = gen_nop ();
5608
5609 start_sequence ();
5610 emit_insn (nop_pattern);
5611 exit_insn = get_insns ();
5612 end_sequence ();
5613 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5614 }
5615
5616 /* Free special insns used in the scheduler. */
5617 void
5618 free_nop_and_exit_insns (void)
5619 {
5620 exit_insn = NULL_RTX;
5621 nop_pattern = NULL_RTX;
5622 }
5623
5624 /* Setup a special vinsn used in new insns initialization. */
5625 void
5626 setup_nop_vinsn (void)
5627 {
5628 nop_vinsn = vinsn_create (exit_insn, false);
5629 vinsn_attach (nop_vinsn);
5630 }
5631
5632 /* Free a special vinsn used in new insns initialization. */
5633 void
5634 free_nop_vinsn (void)
5635 {
5636 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5637 vinsn_detach (nop_vinsn);
5638 nop_vinsn = NULL;
5639 }
5640
5641 /* Call a set_sched_flags hook. */
5642 void
5643 sel_set_sched_flags (void)
5644 {
5645 /* ??? This means that set_sched_flags were called, and we decided to
5646 support speculation. However, set_sched_flags also modifies flags
5647 on current_sched_info, doing this only at global init. And we
5648 sometimes change c_s_i later. So put the correct flags again. */
5649 if (spec_info && targetm.sched.set_sched_flags)
5650 targetm.sched.set_sched_flags (spec_info);
5651 }
5652
5653 /* Setup pointers to global sched info structures. */
5654 void
5655 sel_setup_sched_infos (void)
5656 {
5657 rgn_setup_common_sched_info ();
5658
5659 memcpy (&sel_common_sched_info, common_sched_info,
5660 sizeof (sel_common_sched_info));
5661
5662 sel_common_sched_info.fix_recovery_cfg = NULL;
5663 sel_common_sched_info.add_block = NULL;
5664 sel_common_sched_info.estimate_number_of_insns
5665 = sel_estimate_number_of_insns;
5666 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5667 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5668
5669 common_sched_info = &sel_common_sched_info;
5670
5671 current_sched_info = &sched_sel_haifa_sched_info;
5672 current_sched_info->sched_max_insns_priority =
5673 get_rgn_sched_max_insns_priority ();
5674
5675 sel_set_sched_flags ();
5676 }
5677 \f
5678
5679 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5680 *BB_ORD_INDEX after that is increased. */
5681 static void
5682 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5683 {
5684 RGN_NR_BLOCKS (rgn) += 1;
5685 RGN_DONT_CALC_DEPS (rgn) = 0;
5686 RGN_HAS_REAL_EBB (rgn) = 0;
5687 CONTAINING_RGN (bb->index) = rgn;
5688 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5689 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5690 (*bb_ord_index)++;
5691
5692 /* FIXME: it is true only when not scheduling ebbs. */
5693 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5694 }
5695
5696 /* Functions to support pipelining of outer loops. */
5697
5698 /* Creates a new empty region and returns it's number. */
5699 static int
5700 sel_create_new_region (void)
5701 {
5702 int new_rgn_number = nr_regions;
5703
5704 RGN_NR_BLOCKS (new_rgn_number) = 0;
5705
5706 /* FIXME: This will work only when EBBs are not created. */
5707 if (new_rgn_number != 0)
5708 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5709 RGN_NR_BLOCKS (new_rgn_number - 1);
5710 else
5711 RGN_BLOCKS (new_rgn_number) = 0;
5712
5713 /* Set the blocks of the next region so the other functions may
5714 calculate the number of blocks in the region. */
5715 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5716 RGN_NR_BLOCKS (new_rgn_number);
5717
5718 nr_regions++;
5719
5720 return new_rgn_number;
5721 }
5722
5723 /* If X has a smaller topological sort number than Y, returns -1;
5724 if greater, returns 1. */
5725 static int
5726 bb_top_order_comparator (const void *x, const void *y)
5727 {
5728 basic_block bb1 = *(const basic_block *) x;
5729 basic_block bb2 = *(const basic_block *) y;
5730
5731 gcc_assert (bb1 == bb2
5732 || rev_top_order_index[bb1->index]
5733 != rev_top_order_index[bb2->index]);
5734
5735 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5736 bbs with greater number should go earlier. */
5737 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5738 return -1;
5739 else
5740 return 1;
5741 }
5742
5743 /* Create a region for LOOP and return its number. If we don't want
5744 to pipeline LOOP, return -1. */
5745 static int
5746 make_region_from_loop (struct loop *loop)
5747 {
5748 unsigned int i;
5749 int new_rgn_number = -1;
5750 struct loop *inner;
5751
5752 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5753 int bb_ord_index = 0;
5754 basic_block *loop_blocks;
5755 basic_block preheader_block;
5756
5757 if (loop->num_nodes
5758 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5759 return -1;
5760
5761 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5762 for (inner = loop->inner; inner; inner = inner->inner)
5763 if (flow_bb_inside_loop_p (inner, loop->latch))
5764 return -1;
5765
5766 loop->ninsns = num_loop_insns (loop);
5767 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5768 return -1;
5769
5770 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5771
5772 for (i = 0; i < loop->num_nodes; i++)
5773 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5774 {
5775 free (loop_blocks);
5776 return -1;
5777 }
5778
5779 preheader_block = loop_preheader_edge (loop)->src;
5780 gcc_assert (preheader_block);
5781 gcc_assert (loop_blocks[0] == loop->header);
5782
5783 new_rgn_number = sel_create_new_region ();
5784
5785 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5786 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5787
5788 for (i = 0; i < loop->num_nodes; i++)
5789 {
5790 /* Add only those blocks that haven't been scheduled in the inner loop.
5791 The exception is the basic blocks with bookkeeping code - they should
5792 be added to the region (and they actually don't belong to the loop
5793 body, but to the region containing that loop body). */
5794
5795 gcc_assert (new_rgn_number >= 0);
5796
5797 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5798 {
5799 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5800 new_rgn_number);
5801 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5802 }
5803 }
5804
5805 free (loop_blocks);
5806 MARK_LOOP_FOR_PIPELINING (loop);
5807
5808 return new_rgn_number;
5809 }
5810
5811 /* Create a new region from preheader blocks LOOP_BLOCKS. */
5812 void
5813 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5814 {
5815 unsigned int i;
5816 int new_rgn_number = -1;
5817 basic_block bb;
5818
5819 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5820 int bb_ord_index = 0;
5821
5822 new_rgn_number = sel_create_new_region ();
5823
5824 for (i = 0; VEC_iterate (basic_block, *loop_blocks, i, bb); i++)
5825 {
5826 gcc_assert (new_rgn_number >= 0);
5827
5828 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
5829 }
5830
5831 VEC_free (basic_block, heap, *loop_blocks);
5832 gcc_assert (*loop_blocks == NULL);
5833 }
5834
5835
5836 /* Create region(s) from loop nest LOOP, such that inner loops will be
5837 pipelined before outer loops. Returns true when a region for LOOP
5838 is created. */
5839 static bool
5840 make_regions_from_loop_nest (struct loop *loop)
5841 {
5842 struct loop *cur_loop;
5843 int rgn_number;
5844
5845 /* Traverse all inner nodes of the loop. */
5846 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
5847 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
5848 return false;
5849
5850 /* At this moment all regular inner loops should have been pipelined.
5851 Try to create a region from this loop. */
5852 rgn_number = make_region_from_loop (loop);
5853
5854 if (rgn_number < 0)
5855 return false;
5856
5857 VEC_safe_push (loop_p, heap, loop_nests, loop);
5858 return true;
5859 }
5860
5861 /* Initalize data structures needed. */
5862 void
5863 sel_init_pipelining (void)
5864 {
5865 /* Collect loop information to be used in outer loops pipelining. */
5866 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
5867 | LOOPS_HAVE_FALLTHRU_PREHEADERS
5868 | LOOPS_HAVE_RECORDED_EXITS
5869 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
5870 current_loop_nest = NULL;
5871
5872 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
5873 sbitmap_zero (bbs_in_loop_rgns);
5874
5875 recompute_rev_top_order ();
5876 }
5877
5878 /* Returns a struct loop for region RGN. */
5879 loop_p
5880 get_loop_nest_for_rgn (unsigned int rgn)
5881 {
5882 /* Regions created with extend_rgns don't have corresponding loop nests,
5883 because they don't represent loops. */
5884 if (rgn < VEC_length (loop_p, loop_nests))
5885 return VEC_index (loop_p, loop_nests, rgn);
5886 else
5887 return NULL;
5888 }
5889
5890 /* True when LOOP was included into pipelining regions. */
5891 bool
5892 considered_for_pipelining_p (struct loop *loop)
5893 {
5894 if (loop_depth (loop) == 0)
5895 return false;
5896
5897 /* Now, the loop could be too large or irreducible. Check whether its
5898 region is in LOOP_NESTS.
5899 We determine the region number of LOOP as the region number of its
5900 latch. We can't use header here, because this header could be
5901 just removed preheader and it will give us the wrong region number.
5902 Latch can't be used because it could be in the inner loop too. */
5903 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
5904 {
5905 int rgn = CONTAINING_RGN (loop->latch->index);
5906
5907 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
5908 return true;
5909 }
5910
5911 return false;
5912 }
5913
5914 /* Makes regions from the rest of the blocks, after loops are chosen
5915 for pipelining. */
5916 static void
5917 make_regions_from_the_rest (void)
5918 {
5919 int cur_rgn_blocks;
5920 int *loop_hdr;
5921 int i;
5922
5923 basic_block bb;
5924 edge e;
5925 edge_iterator ei;
5926 int *degree;
5927
5928 /* Index in rgn_bb_table where to start allocating new regions. */
5929 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
5930
5931 /* Make regions from all the rest basic blocks - those that don't belong to
5932 any loop or belong to irreducible loops. Prepare the data structures
5933 for extend_rgns. */
5934
5935 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
5936 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
5937 loop. */
5938 loop_hdr = XNEWVEC (int, last_basic_block);
5939 degree = XCNEWVEC (int, last_basic_block);
5940
5941
5942 /* For each basic block that belongs to some loop assign the number
5943 of innermost loop it belongs to. */
5944 for (i = 0; i < last_basic_block; i++)
5945 loop_hdr[i] = -1;
5946
5947 FOR_EACH_BB (bb)
5948 {
5949 if (bb->loop_father && !bb->loop_father->num == 0
5950 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
5951 loop_hdr[bb->index] = bb->loop_father->num;
5952 }
5953
5954 /* For each basic block degree is calculated as the number of incoming
5955 edges, that are going out of bbs that are not yet scheduled.
5956 The basic blocks that are scheduled have degree value of zero. */
5957 FOR_EACH_BB (bb)
5958 {
5959 degree[bb->index] = 0;
5960
5961 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
5962 {
5963 FOR_EACH_EDGE (e, ei, bb->preds)
5964 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
5965 degree[bb->index]++;
5966 }
5967 else
5968 degree[bb->index] = -1;
5969 }
5970
5971 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
5972
5973 /* Any block that did not end up in a region is placed into a region
5974 by itself. */
5975 FOR_EACH_BB (bb)
5976 if (degree[bb->index] >= 0)
5977 {
5978 rgn_bb_table[cur_rgn_blocks] = bb->index;
5979 RGN_NR_BLOCKS (nr_regions) = 1;
5980 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
5981 RGN_DONT_CALC_DEPS (nr_regions) = 0;
5982 RGN_HAS_REAL_EBB (nr_regions) = 0;
5983 CONTAINING_RGN (bb->index) = nr_regions++;
5984 BLOCK_TO_BB (bb->index) = 0;
5985 }
5986
5987 free (degree);
5988 free (loop_hdr);
5989 }
5990
5991 /* Free data structures used in pipelining of loops. */
5992 void sel_finish_pipelining (void)
5993 {
5994 loop_iterator li;
5995 struct loop *loop;
5996
5997 /* Release aux fields so we don't free them later by mistake. */
5998 FOR_EACH_LOOP (li, loop, 0)
5999 loop->aux = NULL;
6000
6001 loop_optimizer_finalize ();
6002
6003 VEC_free (loop_p, heap, loop_nests);
6004
6005 free (rev_top_order_index);
6006 rev_top_order_index = NULL;
6007 }
6008
6009 /* This function replaces the find_rgns when
6010 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6011 void
6012 sel_find_rgns (void)
6013 {
6014 sel_init_pipelining ();
6015 extend_regions ();
6016
6017 if (current_loops)
6018 {
6019 loop_p loop;
6020 loop_iterator li;
6021
6022 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6023 ? LI_FROM_INNERMOST
6024 : LI_ONLY_INNERMOST))
6025 make_regions_from_loop_nest (loop);
6026 }
6027
6028 /* Make regions from all the rest basic blocks and schedule them.
6029 These blocks include blocks that don't belong to any loop or belong
6030 to irreducible loops. */
6031 make_regions_from_the_rest ();
6032
6033 /* We don't need bbs_in_loop_rgns anymore. */
6034 sbitmap_free (bbs_in_loop_rgns);
6035 bbs_in_loop_rgns = NULL;
6036 }
6037
6038 /* Adds the preheader blocks from previous loop to current region taking
6039 it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
6040 This function is only used with -fsel-sched-pipelining-outer-loops. */
6041 void
6042 sel_add_loop_preheaders (void)
6043 {
6044 int i;
6045 basic_block bb;
6046 VEC(basic_block, heap) *preheader_blocks
6047 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6048
6049 for (i = 0;
6050 VEC_iterate (basic_block, preheader_blocks, i, bb);
6051 i++)
6052 {
6053 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6054 sel_add_bb (bb);
6055 }
6056
6057 VEC_free (basic_block, heap, preheader_blocks);
6058 }
6059
6060 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6061 Please note that the function should also work when pipelining_p is
6062 false, because it is used when deciding whether we should or should
6063 not reschedule pipelined code. */
6064 bool
6065 sel_is_loop_preheader_p (basic_block bb)
6066 {
6067 if (current_loop_nest)
6068 {
6069 struct loop *outer;
6070
6071 if (preheader_removed)
6072 return false;
6073
6074 /* Preheader is the first block in the region. */
6075 if (BLOCK_TO_BB (bb->index) == 0)
6076 return true;
6077
6078 /* We used to find a preheader with the topological information.
6079 Check that the above code is equivalent to what we did before. */
6080
6081 if (in_current_region_p (current_loop_nest->header))
6082 gcc_assert (!(BLOCK_TO_BB (bb->index)
6083 < BLOCK_TO_BB (current_loop_nest->header->index)));
6084
6085 /* Support the situation when the latch block of outer loop
6086 could be from here. */
6087 for (outer = loop_outer (current_loop_nest);
6088 outer;
6089 outer = loop_outer (outer))
6090 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6091 gcc_unreachable ();
6092 }
6093
6094 return false;
6095 }
6096
6097 /* Checks whether JUMP leads to basic block DEST_BB and no other blocks. */
6098 bool
6099 jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
6100 {
6101 basic_block jump_bb = BLOCK_FOR_INSN (jump);
6102
6103 /* It is not jump, jump with side-effects or jump can lead to several
6104 basic blocks. */
6105 if (!onlyjump_p (jump)
6106 || !any_uncondjump_p (jump))
6107 return false;
6108
6109 /* Several outgoing edges, abnormal edge or destination of jump is
6110 not DEST_BB. */
6111 if (EDGE_COUNT (jump_bb->succs) != 1
6112 || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
6113 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6114 return false;
6115
6116 /* If not anything of the upper. */
6117 return true;
6118 }
6119
6120 /* Removes the loop preheader from the current region and saves it in
6121 PREHEADER_BLOCKS of the father loop, so they will be added later to
6122 region that represents an outer loop. */
6123 static void
6124 sel_remove_loop_preheader (void)
6125 {
6126 int i, old_len;
6127 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6128 basic_block bb;
6129 bool all_empty_p = true;
6130 VEC(basic_block, heap) *preheader_blocks
6131 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6132
6133 gcc_assert (current_loop_nest);
6134 old_len = VEC_length (basic_block, preheader_blocks);
6135
6136 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6137 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6138 {
6139 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6140
6141 /* If the basic block belongs to region, but doesn't belong to
6142 corresponding loop, then it should be a preheader. */
6143 if (sel_is_loop_preheader_p (bb))
6144 {
6145 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6146 if (BB_END (bb) != bb_note (bb))
6147 all_empty_p = false;
6148 }
6149 }
6150
6151 /* Remove these blocks only after iterating over the whole region. */
6152 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6153 i >= old_len;
6154 i--)
6155 {
6156 bb = VEC_index (basic_block, preheader_blocks, i);
6157 sel_remove_bb (bb, false);
6158 }
6159
6160 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6161 {
6162 if (!all_empty_p)
6163 /* Immediately create new region from preheader. */
6164 make_region_from_loop_preheader (&preheader_blocks);
6165 else
6166 {
6167 /* If all preheader blocks are empty - dont create new empty region.
6168 Instead, remove them completely. */
6169 for (i = 0; VEC_iterate (basic_block, preheader_blocks, i, bb); i++)
6170 {
6171 edge e;
6172 edge_iterator ei;
6173 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6174
6175 /* Redirect all incoming edges to next basic block. */
6176 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6177 {
6178 if (! (e->flags & EDGE_FALLTHRU))
6179 redirect_edge_and_branch (e, bb->next_bb);
6180 else
6181 redirect_edge_succ (e, bb->next_bb);
6182 }
6183 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6184 delete_and_free_basic_block (bb);
6185
6186 /* Check if after deleting preheader there is a nonconditional
6187 jump in PREV_BB that leads to the next basic block NEXT_BB.
6188 If it is so - delete this jump and clear data sets of its
6189 basic block if it becomes empty. */
6190 if (next_bb->prev_bb == prev_bb
6191 && prev_bb != ENTRY_BLOCK_PTR
6192 && jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
6193 {
6194 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6195 if (BB_END (prev_bb) == bb_note (prev_bb))
6196 free_data_sets (prev_bb);
6197 }
6198 }
6199 }
6200 VEC_free (basic_block, heap, preheader_blocks);
6201 }
6202 else
6203 /* Store preheader within the father's loop structure. */
6204 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6205 preheader_blocks);
6206 }
6207 #endif