Remove trailing white spaces.
[gcc.git] / gcc / sched-ebb.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23 \f
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "cfglayout.h"
41 #include "params.h"
42 #include "sched-int.h"
43 #include "target.h"
44 #include "output.h"
45
46 \f
47 #ifdef INSN_SCHEDULING
48
49 /* The number of insns to be scheduled in total. */
50 static int rgn_n_insns;
51
52 /* The number of insns scheduled so far. */
53 static int sched_rgn_n_insns;
54
55 /* Set of blocks, that already have their dependencies calculated. */
56 static bitmap_head dont_calc_deps;
57
58 /* Last basic block in current ebb. */
59 static basic_block last_bb;
60
61 /* Implementations of the sched_info functions for region scheduling. */
62 static void init_ready_list (void);
63 static void begin_schedule_ready (rtx, rtx);
64 static int schedule_more_p (void);
65 static const char *ebb_print_insn (const_rtx, int);
66 static int rank (rtx, rtx);
67 static int ebb_contributes_to_priority (rtx, rtx);
68 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
69 static void add_deps_for_risky_insns (rtx, rtx);
70 static basic_block schedule_ebb (rtx, rtx);
71 static void debug_ebb_dependencies (rtx, rtx);
72
73 static void ebb_add_remove_insn (rtx, int);
74 static void ebb_add_block (basic_block, basic_block);
75 static basic_block advance_target_bb (basic_block, rtx);
76 static void ebb_fix_recovery_cfg (int, int, int);
77
78 /* Return nonzero if there are more insns that should be scheduled. */
79
80 static int
81 schedule_more_p (void)
82 {
83 return sched_rgn_n_insns < rgn_n_insns;
84 }
85
86 /* Print dependency information about ebb between HEAD and TAIL. */
87 static void
88 debug_ebb_dependencies (rtx head, rtx tail)
89 {
90 fprintf (sched_dump,
91 ";; --------------- forward dependences: ------------ \n");
92
93 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
94 BLOCK_NUM (head), BLOCK_NUM (tail));
95
96 debug_dependencies (head, tail);
97 }
98
99 /* Add all insns that are initially ready to the ready list READY. Called
100 once before scheduling a set of insns. */
101
102 static void
103 init_ready_list (void)
104 {
105 int n = 0;
106 rtx prev_head = current_sched_info->prev_head;
107 rtx next_tail = current_sched_info->next_tail;
108 rtx insn;
109
110 sched_rgn_n_insns = 0;
111
112 /* Print debugging information. */
113 if (sched_verbose >= 5)
114 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
115
116 /* Initialize ready list with all 'ready' insns in target block.
117 Count number of insns in the target block being scheduled. */
118 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
119 {
120 try_ready (insn);
121 n++;
122 }
123
124 gcc_assert (n == rgn_n_insns);
125 }
126
127 /* INSN is being scheduled after LAST. Update counters. */
128 static void
129 begin_schedule_ready (rtx insn, rtx last)
130 {
131 sched_rgn_n_insns++;
132
133 if (BLOCK_FOR_INSN (insn) == last_bb
134 /* INSN is a jump in the last block, ... */
135 && control_flow_insn_p (insn)
136 /* that is going to be moved over some instructions. */
137 && last != PREV_INSN (insn))
138 {
139 edge e;
140 edge_iterator ei;
141 basic_block bb;
142
143 /* An obscure special case, where we do have partially dead
144 instruction scheduled after last control flow instruction.
145 In this case we can create new basic block. It is
146 always exactly one basic block last in the sequence. */
147
148 FOR_EACH_EDGE (e, ei, last_bb->succs)
149 if (e->flags & EDGE_FALLTHRU)
150 break;
151
152 #ifdef ENABLE_CHECKING
153 gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
154
155 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
156 && !IS_SPECULATION_CHECK_P (insn)
157 && BB_HEAD (last_bb) != insn
158 && BB_END (last_bb) == insn);
159
160 {
161 rtx x;
162
163 x = NEXT_INSN (insn);
164 if (e)
165 gcc_assert (NOTE_P (x) || LABEL_P (x));
166 else
167 gcc_assert (BARRIER_P (x));
168 }
169 #endif
170
171 if (e)
172 {
173 bb = split_edge (e);
174 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
175 }
176 else
177 /* Create an empty unreachable block after the INSN. */
178 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
179
180 /* split_edge () creates BB before E->DEST. Keep in mind, that
181 this operation extends scheduling region till the end of BB.
182 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
183 of the scheduling region. */
184 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
185 gcc_assert (current_sched_info->next_tail);
186
187 /* Append new basic block to the end of the ebb. */
188 sched_init_only_bb (bb, last_bb);
189 gcc_assert (last_bb == bb);
190 }
191 }
192
193 /* Return a string that contains the insn uid and optionally anything else
194 necessary to identify this insn in an output. It's valid to use a
195 static buffer for this. The ALIGNED parameter should cause the string
196 to be formatted so that multiple output lines will line up nicely. */
197
198 static const char *
199 ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
200 {
201 static char tmp[80];
202
203 /* '+' before insn means it is a new cycle start. */
204 if (GET_MODE (insn) == TImode)
205 sprintf (tmp, "+ %4d", INSN_UID (insn));
206 else
207 sprintf (tmp, " %4d", INSN_UID (insn));
208
209 return tmp;
210 }
211
212 /* Compare priority of two insns. Return a positive number if the second
213 insn is to be preferred for scheduling, and a negative one if the first
214 is to be preferred. Zero if they are equally good. */
215
216 static int
217 rank (rtx insn1, rtx insn2)
218 {
219 basic_block bb1 = BLOCK_FOR_INSN (insn1);
220 basic_block bb2 = BLOCK_FOR_INSN (insn2);
221
222 if (bb1->count > bb2->count
223 || bb1->frequency > bb2->frequency)
224 return -1;
225 if (bb1->count < bb2->count
226 || bb1->frequency < bb2->frequency)
227 return 1;
228 return 0;
229 }
230
231 /* NEXT is an instruction that depends on INSN (a backward dependence);
232 return nonzero if we should include this dependence in priority
233 calculations. */
234
235 static int
236 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
237 rtx insn ATTRIBUTE_UNUSED)
238 {
239 return 1;
240 }
241
242 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
243 conditionally set before INSN. Store the set of registers that
244 must be considered as used by this jump in USED and that of
245 registers that must be considered as set in SET. */
246
247 void
248 ebb_compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
249 regset set)
250 {
251 basic_block b = BLOCK_FOR_INSN (insn);
252 edge e;
253 edge_iterator ei;
254
255 FOR_EACH_EDGE (e, ei, b->succs)
256 if (e->flags & EDGE_FALLTHRU)
257 /* The jump may be a by-product of a branch that has been merged
258 in the main codepath after being conditionalized. Therefore
259 it may guard the fallthrough block from using a value that has
260 conditionally overwritten that of the main codepath. So we
261 consider that it restores the value of the main codepath. */
262 bitmap_and (set, df_get_live_in (e->dest), cond_set);
263 else
264 bitmap_ior_into (used, df_get_live_in (e->dest));
265 }
266
267 /* Used in schedule_insns to initialize current_sched_info for scheduling
268 regions (or single basic blocks). */
269
270 static struct common_sched_info_def ebb_common_sched_info;
271
272 static struct sched_deps_info_def ebb_sched_deps_info =
273 {
274 ebb_compute_jump_reg_dependencies,
275 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
276 NULL,
277 1, 0, 0
278 };
279
280 static struct haifa_sched_info ebb_sched_info =
281 {
282 init_ready_list,
283 NULL,
284 schedule_more_p,
285 NULL,
286 rank,
287 ebb_print_insn,
288 ebb_contributes_to_priority,
289 NULL, /* insn_finishes_block_p */
290
291 NULL, NULL,
292 NULL, NULL,
293 1, 0,
294
295 ebb_add_remove_insn,
296 begin_schedule_ready,
297 advance_target_bb,
298 SCHED_EBB
299 /* We can create new blocks in begin_schedule_ready (). */
300 | NEW_BBS
301 };
302 \f
303 /* Returns the earliest block in EBB currently being processed where a
304 "similar load" 'insn2' is found, and hence LOAD_INSN can move
305 speculatively into the found block. All the following must hold:
306
307 (1) both loads have 1 base register (PFREE_CANDIDATEs).
308 (2) load_insn and load2 have a def-use dependence upon
309 the same insn 'insn1'.
310
311 From all these we can conclude that the two loads access memory
312 addresses that differ at most by a constant, and hence if moving
313 load_insn would cause an exception, it would have been caused by
314 load2 anyhow.
315
316 The function uses list (given by LAST_BLOCK) of already processed
317 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
318
319 static basic_block
320 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
321 {
322 sd_iterator_def back_sd_it;
323 dep_t back_dep;
324 basic_block bb, earliest_block = NULL;
325
326 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
327 {
328 rtx insn1 = DEP_PRO (back_dep);
329
330 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
331 /* Found a DEF-USE dependence (insn1, load_insn). */
332 {
333 sd_iterator_def fore_sd_it;
334 dep_t fore_dep;
335
336 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
337 {
338 rtx insn2 = DEP_CON (fore_dep);
339 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
340
341 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
342 {
343 if (earliest_block != NULL
344 && earliest_block->index < insn2_block->index)
345 continue;
346
347 /* Found a DEF-USE dependence (insn1, insn2). */
348 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
349 /* insn2 not guaranteed to be a 1 base reg load. */
350 continue;
351
352 for (bb = last_block; bb; bb = (basic_block) bb->aux)
353 if (insn2_block == bb)
354 break;
355
356 if (!bb)
357 /* insn2 is the similar load. */
358 earliest_block = insn2_block;
359 }
360 }
361 }
362 }
363
364 return earliest_block;
365 }
366
367 /* The following function adds dependencies between jumps and risky
368 insns in given ebb. */
369
370 static void
371 add_deps_for_risky_insns (rtx head, rtx tail)
372 {
373 rtx insn, prev;
374 int classification;
375 rtx last_jump = NULL_RTX;
376 rtx next_tail = NEXT_INSN (tail);
377 basic_block last_block = NULL, bb;
378
379 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
380 if (control_flow_insn_p (insn))
381 {
382 bb = BLOCK_FOR_INSN (insn);
383 bb->aux = last_block;
384 last_block = bb;
385 last_jump = insn;
386 }
387 else if (INSN_P (insn) && last_jump != NULL_RTX)
388 {
389 classification = haifa_classify_insn (insn);
390 prev = last_jump;
391 switch (classification)
392 {
393 case PFREE_CANDIDATE:
394 if (flag_schedule_speculative_load)
395 {
396 bb = earliest_block_with_similiar_load (last_block, insn);
397 if (bb)
398 {
399 bb = (basic_block) bb->aux;
400 if (!bb)
401 break;
402 prev = BB_END (bb);
403 }
404 }
405 /* Fall through. */
406 case TRAP_RISKY:
407 case IRISKY:
408 case PRISKY_CANDIDATE:
409 /* ??? We could implement better checking PRISKY_CANDIDATEs
410 analogous to sched-rgn.c. */
411 /* We can not change the mode of the backward
412 dependency because REG_DEP_ANTI has the lowest
413 rank. */
414 if (! sched_insns_conditions_mutex_p (insn, prev))
415 {
416 dep_def _dep, *dep = &_dep;
417
418 init_dep (dep, prev, insn, REG_DEP_ANTI);
419
420 if (!(current_sched_info->flags & USE_DEPS_LIST))
421 {
422 enum DEPS_ADJUST_RESULT res;
423
424 res = sd_add_or_update_dep (dep, false);
425
426 /* We can't change an existing dependency with
427 DEP_ANTI. */
428 gcc_assert (res != DEP_CHANGED);
429 }
430 else
431 {
432 if ((current_sched_info->flags & DO_SPECULATION)
433 && (spec_info->mask & BEGIN_CONTROL))
434 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
435 MAX_DEP_WEAK);
436
437 sd_add_or_update_dep (dep, false);
438
439 /* Dep_status could have been changed.
440 No assertion here. */
441 }
442 }
443
444 break;
445
446 default:
447 break;
448 }
449 }
450 /* Maintain the invariant that bb->aux is clear after use. */
451 while (last_block)
452 {
453 bb = (basic_block) last_block->aux;
454 last_block->aux = NULL;
455 last_block = bb;
456 }
457 }
458
459 /* Schedule a single extended basic block, defined by the boundaries HEAD
460 and TAIL. */
461
462 static basic_block
463 schedule_ebb (rtx head, rtx tail)
464 {
465 basic_block first_bb, target_bb;
466 struct deps tmp_deps;
467
468 first_bb = BLOCK_FOR_INSN (head);
469 last_bb = BLOCK_FOR_INSN (tail);
470
471 if (no_real_insns_p (head, tail))
472 return BLOCK_FOR_INSN (tail);
473
474 gcc_assert (INSN_P (head) && INSN_P (tail));
475
476 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
477 {
478 init_deps_global ();
479
480 /* Compute dependencies. */
481 init_deps (&tmp_deps, false);
482 sched_analyze (&tmp_deps, head, tail);
483 free_deps (&tmp_deps);
484
485 add_deps_for_risky_insns (head, tail);
486
487 if (targetm.sched.dependencies_evaluation_hook)
488 targetm.sched.dependencies_evaluation_hook (head, tail);
489
490 finish_deps_global ();
491 }
492 else
493 /* Only recovery blocks can have their dependencies already calculated,
494 and they always are single block ebbs. */
495 gcc_assert (first_bb == last_bb);
496
497 /* Set priorities. */
498 current_sched_info->sched_max_insns_priority = 0;
499 rgn_n_insns = set_priorities (head, tail);
500 current_sched_info->sched_max_insns_priority++;
501
502 current_sched_info->prev_head = PREV_INSN (head);
503 current_sched_info->next_tail = NEXT_INSN (tail);
504
505 remove_notes (head, tail);
506
507 unlink_bb_notes (first_bb, last_bb);
508
509 target_bb = first_bb;
510
511 /* Make ready list big enough to hold all the instructions from the ebb. */
512 sched_extend_ready_list (rgn_n_insns);
513 schedule_block (&target_bb);
514 /* Free ready list. */
515 sched_finish_ready_list ();
516
517 /* We might pack all instructions into fewer blocks,
518 so we may made some of them empty. Can't assert (b == last_bb). */
519
520 /* Sanity check: verify that all region insns were scheduled. */
521 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
522
523 /* Free dependencies. */
524 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
525
526 gcc_assert (haifa_recovery_bb_ever_added_p
527 || deps_pools_are_empty_p ());
528
529 if (EDGE_COUNT (last_bb->preds) == 0)
530 /* LAST_BB is unreachable. */
531 {
532 gcc_assert (first_bb != last_bb
533 && EDGE_COUNT (last_bb->succs) == 0);
534 last_bb = last_bb->prev_bb;
535 delete_basic_block (last_bb->next_bb);
536 }
537
538 return last_bb;
539 }
540
541 /* The one entry point in this file. */
542
543 void
544 schedule_ebbs (void)
545 {
546 basic_block bb;
547 int probability_cutoff;
548 rtx tail;
549
550 if (profile_info && flag_branch_probabilities)
551 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
552 else
553 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
554 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
555
556 /* Taking care of this degenerate case makes the rest of
557 this code simpler. */
558 if (n_basic_blocks == NUM_FIXED_BLOCKS)
559 return;
560
561 /* Setup infos. */
562 {
563 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
564 sizeof (ebb_common_sched_info));
565
566 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
567 ebb_common_sched_info.add_block = ebb_add_block;
568 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
569
570 common_sched_info = &ebb_common_sched_info;
571 sched_deps_info = &ebb_sched_deps_info;
572 current_sched_info = &ebb_sched_info;
573 }
574
575 haifa_sched_init ();
576
577 compute_bb_for_insn ();
578
579 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
580 bitmap_initialize (&dont_calc_deps, 0);
581 bitmap_clear (&dont_calc_deps);
582
583 /* Schedule every region in the subroutine. */
584 FOR_EACH_BB (bb)
585 {
586 rtx head = BB_HEAD (bb);
587
588 for (;;)
589 {
590 edge e;
591 edge_iterator ei;
592 tail = BB_END (bb);
593 if (bb->next_bb == EXIT_BLOCK_PTR
594 || LABEL_P (BB_HEAD (bb->next_bb)))
595 break;
596 FOR_EACH_EDGE (e, ei, bb->succs)
597 if ((e->flags & EDGE_FALLTHRU) != 0)
598 break;
599 if (! e)
600 break;
601 if (e->probability <= probability_cutoff)
602 break;
603 bb = bb->next_bb;
604 }
605
606 /* Blah. We should fix the rest of the code not to get confused by
607 a note or two. */
608 while (head != tail)
609 {
610 if (NOTE_P (head) || BOUNDARY_DEBUG_INSN_P (head))
611 head = NEXT_INSN (head);
612 else if (NOTE_P (tail) || BOUNDARY_DEBUG_INSN_P (tail))
613 tail = PREV_INSN (tail);
614 else if (LABEL_P (head))
615 head = NEXT_INSN (head);
616 else
617 break;
618 }
619
620 bb = schedule_ebb (head, tail);
621 }
622 bitmap_clear (&dont_calc_deps);
623
624 /* Reposition the prologue and epilogue notes in case we moved the
625 prologue/epilogue insns. */
626 if (reload_completed)
627 reposition_prologue_and_epilogue_notes ();
628
629 haifa_sched_finish ();
630 }
631
632 /* INSN has been added to/removed from current ebb. */
633 static void
634 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
635 {
636 if (!remove_p)
637 rgn_n_insns++;
638 else
639 rgn_n_insns--;
640 }
641
642 /* BB was added to ebb after AFTER. */
643 static void
644 ebb_add_block (basic_block bb, basic_block after)
645 {
646 /* Recovery blocks are always bounded by BARRIERS,
647 therefore, they always form single block EBB,
648 therefore, we can use rec->index to identify such EBBs. */
649 if (after == EXIT_BLOCK_PTR)
650 bitmap_set_bit (&dont_calc_deps, bb->index);
651 else if (after == last_bb)
652 last_bb = bb;
653 }
654
655 /* Return next block in ebb chain. For parameter meaning please refer to
656 sched-int.h: struct sched_info: advance_target_bb. */
657 static basic_block
658 advance_target_bb (basic_block bb, rtx insn)
659 {
660 if (insn)
661 {
662 if (BLOCK_FOR_INSN (insn) != bb
663 && control_flow_insn_p (insn)
664 /* We handle interblock movement of the speculation check
665 or over a speculation check in
666 haifa-sched.c: move_block_after_check (). */
667 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
668 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
669 {
670 /* Assert that we don't move jumps across blocks. */
671 gcc_assert (!control_flow_insn_p (BB_END (bb))
672 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
673 return bb;
674 }
675 else
676 return 0;
677 }
678 else
679 /* Return next non empty block. */
680 {
681 do
682 {
683 gcc_assert (bb != last_bb);
684
685 bb = bb->next_bb;
686 }
687 while (bb_note (bb) == BB_END (bb));
688
689 return bb;
690 }
691 }
692
693 /* Fix internal data after interblock movement of jump instruction.
694 For parameter meaning please refer to
695 sched-int.h: struct sched_info: fix_recovery_cfg. */
696 static void
697 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
698 int jump_bb_nexti)
699 {
700 gcc_assert (last_bb->index != bbi);
701
702 if (jump_bb_nexti == last_bb->index)
703 last_bb = BASIC_BLOCK (jump_bbi);
704 }
705
706 #endif /* INSN_SCHEDULING */