reassoc_4.f: Use dg-additional-options.
[gcc.git] / gcc / sched-ebb.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23 \f
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "diagnostic-core.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "recog.h"
39 #include "cfglayout.h"
40 #include "params.h"
41 #include "sched-int.h"
42 #include "target.h"
43 #include "output.h"
44
45 \f
46 #ifdef INSN_SCHEDULING
47
48 /* The number of insns to be scheduled in total. */
49 static int rgn_n_insns;
50
51 /* The number of insns scheduled so far. */
52 static int sched_rgn_n_insns;
53
54 /* Set of blocks, that already have their dependencies calculated. */
55 static bitmap_head dont_calc_deps;
56
57 /* Last basic block in current ebb. */
58 static basic_block last_bb;
59
60 /* Implementations of the sched_info functions for region scheduling. */
61 static void init_ready_list (void);
62 static void begin_schedule_ready (rtx);
63 static int schedule_more_p (void);
64 static const char *ebb_print_insn (const_rtx, int);
65 static int rank (rtx, rtx);
66 static int ebb_contributes_to_priority (rtx, rtx);
67 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
68 static void add_deps_for_risky_insns (rtx, rtx);
69 static void debug_ebb_dependencies (rtx, rtx);
70
71 static void ebb_add_remove_insn (rtx, int);
72 static void ebb_add_block (basic_block, basic_block);
73 static basic_block advance_target_bb (basic_block, rtx);
74 static void ebb_fix_recovery_cfg (int, int, int);
75
76 /* Allocate memory and store the state of the frontend. Return the allocated
77 memory. */
78 static void *
79 save_ebb_state (void)
80 {
81 int *p = XNEW (int);
82 *p = sched_rgn_n_insns;
83 return p;
84 }
85
86 /* Restore the state of the frontend from P_, then free it. */
87 static void
88 restore_ebb_state (void *p_)
89 {
90 int *p = (int *)p_;
91 sched_rgn_n_insns = *p;
92 free (p_);
93 }
94
95 /* Return nonzero if there are more insns that should be scheduled. */
96
97 static int
98 schedule_more_p (void)
99 {
100 return sched_rgn_n_insns < rgn_n_insns;
101 }
102
103 /* Print dependency information about ebb between HEAD and TAIL. */
104 static void
105 debug_ebb_dependencies (rtx head, rtx tail)
106 {
107 fprintf (sched_dump,
108 ";; --------------- forward dependences: ------------ \n");
109
110 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
111 BLOCK_NUM (head), BLOCK_NUM (tail));
112
113 debug_dependencies (head, tail);
114 }
115
116 /* Add all insns that are initially ready to the ready list READY. Called
117 once before scheduling a set of insns. */
118
119 static void
120 init_ready_list (void)
121 {
122 int n = 0;
123 rtx prev_head = current_sched_info->prev_head;
124 rtx next_tail = current_sched_info->next_tail;
125 rtx insn;
126
127 sched_rgn_n_insns = 0;
128
129 /* Print debugging information. */
130 if (sched_verbose >= 5)
131 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
132
133 /* Initialize ready list with all 'ready' insns in target block.
134 Count number of insns in the target block being scheduled. */
135 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
136 {
137 try_ready (insn);
138 n++;
139 }
140
141 gcc_assert (n == rgn_n_insns);
142 }
143
144 /* INSN is being scheduled after LAST. Update counters. */
145 static void
146 begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED)
147 {
148 sched_rgn_n_insns++;
149 }
150
151 /* INSN is being moved to its place in the schedule, after LAST. */
152 static void
153 begin_move_insn (rtx insn, rtx last)
154 {
155 if (BLOCK_FOR_INSN (insn) == last_bb
156 /* INSN is a jump in the last block, ... */
157 && control_flow_insn_p (insn)
158 /* that is going to be moved over some instructions. */
159 && last != PREV_INSN (insn))
160 {
161 edge e;
162 basic_block bb;
163
164 /* An obscure special case, where we do have partially dead
165 instruction scheduled after last control flow instruction.
166 In this case we can create new basic block. It is
167 always exactly one basic block last in the sequence. */
168
169 e = find_fallthru_edge (last_bb->succs);
170
171 gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
172
173 gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
174 && !IS_SPECULATION_CHECK_P (insn)
175 && BB_HEAD (last_bb) != insn
176 && BB_END (last_bb) == insn);
177
178 {
179 rtx x;
180
181 x = NEXT_INSN (insn);
182 if (e)
183 gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
184 else
185 gcc_checking_assert (BARRIER_P (x));
186 }
187
188 if (e)
189 {
190 bb = split_edge (e);
191 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
192 }
193 else
194 {
195 /* Create an empty unreachable block after the INSN. */
196 rtx next = NEXT_INSN (insn);
197 if (next && BARRIER_P (next))
198 next = NEXT_INSN (next);
199 bb = create_basic_block (next, NULL_RTX, last_bb);
200 }
201
202 /* split_edge () creates BB before E->DEST. Keep in mind, that
203 this operation extends scheduling region till the end of BB.
204 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
205 of the scheduling region. */
206 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
207 gcc_assert (current_sched_info->next_tail);
208
209 /* Append new basic block to the end of the ebb. */
210 sched_init_only_bb (bb, last_bb);
211 gcc_assert (last_bb == bb);
212 }
213 }
214
215 /* Return a string that contains the insn uid and optionally anything else
216 necessary to identify this insn in an output. It's valid to use a
217 static buffer for this. The ALIGNED parameter should cause the string
218 to be formatted so that multiple output lines will line up nicely. */
219
220 static const char *
221 ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
222 {
223 static char tmp[80];
224
225 /* '+' before insn means it is a new cycle start. */
226 if (GET_MODE (insn) == TImode)
227 sprintf (tmp, "+ %4d", INSN_UID (insn));
228 else
229 sprintf (tmp, " %4d", INSN_UID (insn));
230
231 return tmp;
232 }
233
234 /* Compare priority of two insns. Return a positive number if the second
235 insn is to be preferred for scheduling, and a negative one if the first
236 is to be preferred. Zero if they are equally good. */
237
238 static int
239 rank (rtx insn1, rtx insn2)
240 {
241 basic_block bb1 = BLOCK_FOR_INSN (insn1);
242 basic_block bb2 = BLOCK_FOR_INSN (insn2);
243
244 if (bb1->count > bb2->count
245 || bb1->frequency > bb2->frequency)
246 return -1;
247 if (bb1->count < bb2->count
248 || bb1->frequency < bb2->frequency)
249 return 1;
250 return 0;
251 }
252
253 /* NEXT is an instruction that depends on INSN (a backward dependence);
254 return nonzero if we should include this dependence in priority
255 calculations. */
256
257 static int
258 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
259 rtx insn ATTRIBUTE_UNUSED)
260 {
261 return 1;
262 }
263
264 /* INSN is a JUMP_INSN. Store the set of registers that
265 must be considered as used by this jump in USED. */
266
267 void
268 ebb_compute_jump_reg_dependencies (rtx insn, regset used)
269 {
270 basic_block b = BLOCK_FOR_INSN (insn);
271 edge e;
272 edge_iterator ei;
273
274 FOR_EACH_EDGE (e, ei, b->succs)
275 if ((e->flags & EDGE_FALLTHRU) == 0)
276 bitmap_ior_into (used, df_get_live_in (e->dest));
277 }
278
279 /* Used in schedule_insns to initialize current_sched_info for scheduling
280 regions (or single basic blocks). */
281
282 static struct common_sched_info_def ebb_common_sched_info;
283
284 static struct sched_deps_info_def ebb_sched_deps_info =
285 {
286 ebb_compute_jump_reg_dependencies,
287 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
288 NULL,
289 1, 0, 0
290 };
291
292 static struct haifa_sched_info ebb_sched_info =
293 {
294 init_ready_list,
295 NULL,
296 schedule_more_p,
297 NULL,
298 rank,
299 ebb_print_insn,
300 ebb_contributes_to_priority,
301 NULL, /* insn_finishes_block_p */
302
303 NULL, NULL,
304 NULL, NULL,
305 1, 0,
306
307 ebb_add_remove_insn,
308 begin_schedule_ready,
309 begin_move_insn,
310 advance_target_bb,
311
312 save_ebb_state,
313 restore_ebb_state,
314
315 SCHED_EBB
316 /* We can create new blocks in begin_schedule_ready (). */
317 | NEW_BBS
318 };
319 \f
320 /* Returns the earliest block in EBB currently being processed where a
321 "similar load" 'insn2' is found, and hence LOAD_INSN can move
322 speculatively into the found block. All the following must hold:
323
324 (1) both loads have 1 base register (PFREE_CANDIDATEs).
325 (2) load_insn and load2 have a def-use dependence upon
326 the same insn 'insn1'.
327
328 From all these we can conclude that the two loads access memory
329 addresses that differ at most by a constant, and hence if moving
330 load_insn would cause an exception, it would have been caused by
331 load2 anyhow.
332
333 The function uses list (given by LAST_BLOCK) of already processed
334 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
335
336 static basic_block
337 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
338 {
339 sd_iterator_def back_sd_it;
340 dep_t back_dep;
341 basic_block bb, earliest_block = NULL;
342
343 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
344 {
345 rtx insn1 = DEP_PRO (back_dep);
346
347 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
348 /* Found a DEF-USE dependence (insn1, load_insn). */
349 {
350 sd_iterator_def fore_sd_it;
351 dep_t fore_dep;
352
353 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
354 {
355 rtx insn2 = DEP_CON (fore_dep);
356 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
357
358 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
359 {
360 if (earliest_block != NULL
361 && earliest_block->index < insn2_block->index)
362 continue;
363
364 /* Found a DEF-USE dependence (insn1, insn2). */
365 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
366 /* insn2 not guaranteed to be a 1 base reg load. */
367 continue;
368
369 for (bb = last_block; bb; bb = (basic_block) bb->aux)
370 if (insn2_block == bb)
371 break;
372
373 if (!bb)
374 /* insn2 is the similar load. */
375 earliest_block = insn2_block;
376 }
377 }
378 }
379 }
380
381 return earliest_block;
382 }
383
384 /* The following function adds dependencies between jumps and risky
385 insns in given ebb. */
386
387 static void
388 add_deps_for_risky_insns (rtx head, rtx tail)
389 {
390 rtx insn, prev;
391 int classification;
392 rtx last_jump = NULL_RTX;
393 rtx next_tail = NEXT_INSN (tail);
394 basic_block last_block = NULL, bb;
395
396 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
397 {
398 add_delay_dependencies (insn);
399 if (control_flow_insn_p (insn))
400 {
401 bb = BLOCK_FOR_INSN (insn);
402 bb->aux = last_block;
403 last_block = bb;
404 /* Ensure blocks stay in the same order. */
405 if (last_jump)
406 add_dependence (insn, last_jump, REG_DEP_ANTI);
407 last_jump = insn;
408 }
409 else if (INSN_P (insn) && last_jump != NULL_RTX)
410 {
411 classification = haifa_classify_insn (insn);
412 prev = last_jump;
413
414 switch (classification)
415 {
416 case PFREE_CANDIDATE:
417 if (flag_schedule_speculative_load)
418 {
419 bb = earliest_block_with_similiar_load (last_block, insn);
420 if (bb)
421 {
422 bb = (basic_block) bb->aux;
423 if (!bb)
424 break;
425 prev = BB_END (bb);
426 }
427 }
428 /* Fall through. */
429 case TRAP_RISKY:
430 case IRISKY:
431 case PRISKY_CANDIDATE:
432 /* ??? We could implement better checking PRISKY_CANDIDATEs
433 analogous to sched-rgn.c. */
434 /* We can not change the mode of the backward
435 dependency because REG_DEP_ANTI has the lowest
436 rank. */
437 if (! sched_insns_conditions_mutex_p (insn, prev))
438 {
439 if ((current_sched_info->flags & DO_SPECULATION)
440 && (spec_info->mask & BEGIN_CONTROL))
441 {
442 dep_def _dep, *dep = &_dep;
443
444 init_dep (dep, prev, insn, REG_DEP_ANTI);
445
446 if (current_sched_info->flags & USE_DEPS_LIST)
447 {
448 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
449 MAX_DEP_WEAK);
450
451 }
452 sd_add_or_update_dep (dep, false);
453 }
454 else
455 add_dependence (insn, prev, REG_DEP_CONTROL);
456 }
457
458 break;
459
460 default:
461 break;
462 }
463 }
464 }
465 /* Maintain the invariant that bb->aux is clear after use. */
466 while (last_block)
467 {
468 bb = (basic_block) last_block->aux;
469 last_block->aux = NULL;
470 last_block = bb;
471 }
472 }
473
474 /* Schedule a single extended basic block, defined by the boundaries
475 HEAD and TAIL.
476
477 We change our expectations about scheduler behaviour depending on
478 whether MODULO_SCHEDULING is true. If it is, we expect that the
479 caller has already called set_modulo_params and created delay pairs
480 as appropriate. If the modulo schedule failed, we return
481 NULL_RTX. */
482
483 basic_block
484 schedule_ebb (rtx head, rtx tail, bool modulo_scheduling)
485 {
486 basic_block first_bb, target_bb;
487 struct deps_desc tmp_deps;
488 bool success;
489
490 /* Blah. We should fix the rest of the code not to get confused by
491 a note or two. */
492 while (head != tail)
493 {
494 if (NOTE_P (head) || DEBUG_INSN_P (head))
495 head = NEXT_INSN (head);
496 else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
497 tail = PREV_INSN (tail);
498 else if (LABEL_P (head))
499 head = NEXT_INSN (head);
500 else
501 break;
502 }
503
504 first_bb = BLOCK_FOR_INSN (head);
505 last_bb = BLOCK_FOR_INSN (tail);
506
507 if (no_real_insns_p (head, tail))
508 return BLOCK_FOR_INSN (tail);
509
510 gcc_assert (INSN_P (head) && INSN_P (tail));
511
512 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
513 {
514 init_deps_global ();
515
516 /* Compute dependencies. */
517 init_deps (&tmp_deps, false);
518 sched_analyze (&tmp_deps, head, tail);
519 free_deps (&tmp_deps);
520
521 add_deps_for_risky_insns (head, tail);
522
523 if (targetm.sched.dependencies_evaluation_hook)
524 targetm.sched.dependencies_evaluation_hook (head, tail);
525
526 finish_deps_global ();
527 }
528 else
529 /* Only recovery blocks can have their dependencies already calculated,
530 and they always are single block ebbs. */
531 gcc_assert (first_bb == last_bb);
532
533 /* Set priorities. */
534 current_sched_info->sched_max_insns_priority = 0;
535 rgn_n_insns = set_priorities (head, tail);
536 current_sched_info->sched_max_insns_priority++;
537
538 current_sched_info->prev_head = PREV_INSN (head);
539 current_sched_info->next_tail = NEXT_INSN (tail);
540
541 remove_notes (head, tail);
542
543 unlink_bb_notes (first_bb, last_bb);
544
545 target_bb = first_bb;
546
547 /* Make ready list big enough to hold all the instructions from the ebb. */
548 sched_extend_ready_list (rgn_n_insns);
549 success = schedule_block (&target_bb);
550 gcc_assert (success || modulo_scheduling);
551
552 /* Free ready list. */
553 sched_finish_ready_list ();
554
555 /* We might pack all instructions into fewer blocks,
556 so we may made some of them empty. Can't assert (b == last_bb). */
557
558 /* Sanity check: verify that all region insns were scheduled. */
559 gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
560
561 /* Free dependencies. */
562 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
563
564 gcc_assert (haifa_recovery_bb_ever_added_p
565 || deps_pools_are_empty_p ());
566
567 if (EDGE_COUNT (last_bb->preds) == 0)
568 /* LAST_BB is unreachable. */
569 {
570 gcc_assert (first_bb != last_bb
571 && EDGE_COUNT (last_bb->succs) == 0);
572 last_bb = last_bb->prev_bb;
573 delete_basic_block (last_bb->next_bb);
574 }
575
576 return success ? last_bb : NULL;
577 }
578
579 /* Perform initializations before running schedule_ebbs or a single
580 schedule_ebb. */
581 void
582 schedule_ebbs_init (void)
583 {
584 /* Setup infos. */
585 {
586 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
587 sizeof (ebb_common_sched_info));
588
589 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
590 ebb_common_sched_info.add_block = ebb_add_block;
591 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
592
593 common_sched_info = &ebb_common_sched_info;
594 sched_deps_info = &ebb_sched_deps_info;
595 current_sched_info = &ebb_sched_info;
596 }
597
598 haifa_sched_init ();
599
600 compute_bb_for_insn ();
601
602 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
603 bitmap_initialize (&dont_calc_deps, 0);
604 bitmap_clear (&dont_calc_deps);
605 }
606
607 /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
608 void
609 schedule_ebbs_finish (void)
610 {
611 bitmap_clear (&dont_calc_deps);
612
613 /* Reposition the prologue and epilogue notes in case we moved the
614 prologue/epilogue insns. */
615 if (reload_completed)
616 reposition_prologue_and_epilogue_notes ();
617
618 haifa_sched_finish ();
619 }
620
621 /* The main entry point in this file. */
622
623 void
624 schedule_ebbs (void)
625 {
626 basic_block bb;
627 int probability_cutoff;
628 rtx tail;
629
630 /* Taking care of this degenerate case makes the rest of
631 this code simpler. */
632 if (n_basic_blocks == NUM_FIXED_BLOCKS)
633 return;
634
635 if (profile_info && flag_branch_probabilities)
636 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
637 else
638 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
639 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
640
641 schedule_ebbs_init ();
642
643 /* Schedule every region in the subroutine. */
644 FOR_EACH_BB (bb)
645 {
646 rtx head = BB_HEAD (bb);
647
648 if (bb->flags & BB_DISABLE_SCHEDULE)
649 continue;
650
651 for (;;)
652 {
653 edge e;
654 tail = BB_END (bb);
655 if (bb->next_bb == EXIT_BLOCK_PTR
656 || LABEL_P (BB_HEAD (bb->next_bb)))
657 break;
658 e = find_fallthru_edge (bb->succs);
659 if (! e)
660 break;
661 if (e->probability <= probability_cutoff)
662 break;
663 if (e->dest->flags & BB_DISABLE_SCHEDULE)
664 break;
665 bb = bb->next_bb;
666 }
667
668 bb = schedule_ebb (head, tail, false);
669 }
670 schedule_ebbs_finish ();
671 }
672
673 /* INSN has been added to/removed from current ebb. */
674 static void
675 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
676 {
677 if (!remove_p)
678 rgn_n_insns++;
679 else
680 rgn_n_insns--;
681 }
682
683 /* BB was added to ebb after AFTER. */
684 static void
685 ebb_add_block (basic_block bb, basic_block after)
686 {
687 /* Recovery blocks are always bounded by BARRIERS,
688 therefore, they always form single block EBB,
689 therefore, we can use rec->index to identify such EBBs. */
690 if (after == EXIT_BLOCK_PTR)
691 bitmap_set_bit (&dont_calc_deps, bb->index);
692 else if (after == last_bb)
693 last_bb = bb;
694 }
695
696 /* Return next block in ebb chain. For parameter meaning please refer to
697 sched-int.h: struct sched_info: advance_target_bb. */
698 static basic_block
699 advance_target_bb (basic_block bb, rtx insn)
700 {
701 if (insn)
702 {
703 if (BLOCK_FOR_INSN (insn) != bb
704 && control_flow_insn_p (insn)
705 /* We handle interblock movement of the speculation check
706 or over a speculation check in
707 haifa-sched.c: move_block_after_check (). */
708 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
709 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
710 {
711 /* Assert that we don't move jumps across blocks. */
712 gcc_assert (!control_flow_insn_p (BB_END (bb))
713 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
714 return bb;
715 }
716 else
717 return 0;
718 }
719 else
720 /* Return next non empty block. */
721 {
722 do
723 {
724 gcc_assert (bb != last_bb);
725
726 bb = bb->next_bb;
727 }
728 while (bb_note (bb) == BB_END (bb));
729
730 return bb;
731 }
732 }
733
734 /* Fix internal data after interblock movement of jump instruction.
735 For parameter meaning please refer to
736 sched-int.h: struct sched_info: fix_recovery_cfg. */
737 static void
738 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
739 int jump_bb_nexti)
740 {
741 gcc_assert (last_bb->index != bbi);
742
743 if (jump_bb_nexti == last_bb->index)
744 last_bb = BASIC_BLOCK (jump_bbi);
745 }
746
747 #endif /* INSN_SCHEDULING */