genattr.c (struct range, [...]): Remove them.
[gcc.git] / gcc / sched-rgn.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
23
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
27
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
31
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
37
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
47 \f
48 #include "config.h"
49 #include "system.h"
50 #include "coretypes.h"
51 #include "tm.h"
52 #include "toplev.h"
53 #include "rtl.h"
54 #include "tm_p.h"
55 #include "hard-reg-set.h"
56 #include "basic-block.h"
57 #include "regs.h"
58 #include "function.h"
59 #include "flags.h"
60 #include "insn-config.h"
61 #include "insn-attr.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "recog.h"
65 #include "cfglayout.h"
66 #include "params.h"
67 #include "sched-int.h"
68 #include "target.h"
69
70 /* Define when we want to do count REG_DEAD notes before and after scheduling
71 for sanity checking. We can't do that when conditional execution is used,
72 as REG_DEAD exist only for unconditional deaths. */
73
74 #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
75 #define CHECK_DEAD_NOTES 1
76 #else
77 #define CHECK_DEAD_NOTES 0
78 #endif
79
80
81 #ifdef INSN_SCHEDULING
82 /* Some accessor macros for h_i_d members only used within this file. */
83 #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
84 #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
85 #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
86
87 /* nr_inter/spec counts interblock/speculative motion for the function. */
88 static int nr_inter, nr_spec;
89
90 /* Control flow graph edges are kept in circular lists. */
91 typedef struct
92 {
93 int from_block;
94 int to_block;
95 int next_in;
96 int next_out;
97 }
98 haifa_edge;
99 static haifa_edge *edge_table;
100
101 #define NEXT_IN(edge) (edge_table[edge].next_in)
102 #define NEXT_OUT(edge) (edge_table[edge].next_out)
103 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
104 #define TO_BLOCK(edge) (edge_table[edge].to_block)
105
106 /* Number of edges in the control flow graph. (In fact, larger than
107 that by 1, since edge 0 is unused.) */
108 static int nr_edges;
109
110 /* Circular list of incoming/outgoing edges of a block. */
111 static int *in_edges;
112 static int *out_edges;
113
114 #define IN_EDGES(block) (in_edges[block])
115 #define OUT_EDGES(block) (out_edges[block])
116
117 static int is_cfg_nonregular (void);
118 static int build_control_flow (struct edge_list *);
119 static void new_edge (int, int);
120
121 /* A region is the main entity for interblock scheduling: insns
122 are allowed to move between blocks in the same region, along
123 control flow graph edges, in the 'up' direction. */
124 typedef struct
125 {
126 int rgn_nr_blocks; /* Number of blocks in region. */
127 int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
128 }
129 region;
130
131 /* Number of regions in the procedure. */
132 static int nr_regions;
133
134 /* Table of region descriptions. */
135 static region *rgn_table;
136
137 /* Array of lists of regions' blocks. */
138 static int *rgn_bb_table;
139
140 /* Topological order of blocks in the region (if b2 is reachable from
141 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
142 always referred to by either block or b, while its topological
143 order name (in the region) is referred to by bb. */
144 static int *block_to_bb;
145
146 /* The number of the region containing a block. */
147 static int *containing_rgn;
148
149 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
150 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
151 #define BLOCK_TO_BB(block) (block_to_bb[block])
152 #define CONTAINING_RGN(block) (containing_rgn[block])
153
154 void debug_regions (void);
155 static void find_single_block_region (void);
156 static void find_rgns (struct edge_list *);
157 static bool too_large (int, int *, int *);
158
159 extern void debug_live (int, int);
160
161 /* Blocks of the current region being scheduled. */
162 static int current_nr_blocks;
163 static int current_blocks;
164
165 /* The mapping from bb to block. */
166 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
167
168 typedef struct
169 {
170 int *first_member; /* Pointer to the list start in bitlst_table. */
171 int nr_members; /* The number of members of the bit list. */
172 }
173 bitlst;
174
175 static int bitlst_table_last;
176 static int *bitlst_table;
177
178 static void extract_bitlst (sbitmap, bitlst *);
179
180 /* Target info declarations.
181
182 The block currently being scheduled is referred to as the "target" block,
183 while other blocks in the region from which insns can be moved to the
184 target are called "source" blocks. The candidate structure holds info
185 about such sources: are they valid? Speculative? Etc. */
186 typedef bitlst bblst;
187 typedef struct
188 {
189 char is_valid;
190 char is_speculative;
191 int src_prob;
192 bblst split_bbs;
193 bblst update_bbs;
194 }
195 candidate;
196
197 static candidate *candidate_table;
198
199 /* A speculative motion requires checking live information on the path
200 from 'source' to 'target'. The split blocks are those to be checked.
201 After a speculative motion, live information should be modified in
202 the 'update' blocks.
203
204 Lists of split and update blocks for each candidate of the current
205 target are in array bblst_table. */
206 static int *bblst_table, bblst_size, bblst_last;
207
208 #define IS_VALID(src) ( candidate_table[src].is_valid )
209 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
210 #define SRC_PROB(src) ( candidate_table[src].src_prob )
211
212 /* The bb being currently scheduled. */
213 static int target_bb;
214
215 /* List of edges. */
216 typedef bitlst edgelst;
217
218 /* Target info functions. */
219 static void split_edges (int, int, edgelst *);
220 static void compute_trg_info (int);
221 void debug_candidate (int);
222 void debug_candidates (int);
223
224 /* Dominators array: dom[i] contains the sbitmap of dominators of
225 bb i in the region. */
226 static sbitmap *dom;
227
228 /* bb 0 is the only region entry. */
229 #define IS_RGN_ENTRY(bb) (!bb)
230
231 /* Is bb_src dominated by bb_trg. */
232 #define IS_DOMINATED(bb_src, bb_trg) \
233 ( TEST_BIT (dom[bb_src], bb_trg) )
234
235 /* Probability: Prob[i] is a float in [0, 1] which is the probability
236 of bb i relative to the region entry. */
237 static float *prob;
238
239 /* The probability of bb_src, relative to bb_trg. Note, that while the
240 'prob[bb]' is a float in [0, 1], this macro returns an integer
241 in [0, 100]. */
242 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
243 prob[bb_trg])))
244
245 /* Bit-set of edges, where bit i stands for edge i. */
246 typedef sbitmap edgeset;
247
248 /* Number of edges in the region. */
249 static int rgn_nr_edges;
250
251 /* Array of size rgn_nr_edges. */
252 static int *rgn_edges;
253
254
255 /* Mapping from each edge in the graph to its number in the rgn. */
256 static int *edge_to_bit;
257 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
258
259 /* The split edges of a source bb is different for each target
260 bb. In order to compute this efficiently, the 'potential-split edges'
261 are computed for each bb prior to scheduling a region. This is actually
262 the split edges of each bb relative to the region entry.
263
264 pot_split[bb] is the set of potential split edges of bb. */
265 static edgeset *pot_split;
266
267 /* For every bb, a set of its ancestor edges. */
268 static edgeset *ancestor_edges;
269
270 static void compute_dom_prob_ps (int);
271
272 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
273 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
274 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
275
276 /* Parameters affecting the decision of rank_for_schedule().
277 ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
278 #define MIN_PROBABILITY 40
279
280 /* Speculative scheduling functions. */
281 static int check_live_1 (int, rtx);
282 static void update_live_1 (int, rtx);
283 static int check_live (rtx, int);
284 static void update_live (rtx, int);
285 static void set_spec_fed (rtx);
286 static int is_pfree (rtx, int, int);
287 static int find_conditional_protection (rtx, int);
288 static int is_conditionally_protected (rtx, int, int);
289 static int is_prisky (rtx, int, int);
290 static int is_exception_free (rtx, int, int);
291
292 static bool sets_likely_spilled (rtx);
293 static void sets_likely_spilled_1 (rtx, rtx, void *);
294 static void add_branch_dependences (rtx, rtx);
295 static void compute_block_backward_dependences (int);
296 void debug_dependencies (void);
297
298 static void init_regions (void);
299 static void schedule_region (int);
300 static rtx concat_INSN_LIST (rtx, rtx);
301 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
302 static void propagate_deps (int, struct deps *);
303 static void free_pending_lists (void);
304
305 /* Functions for construction of the control flow graph. */
306
307 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
308
309 We decide not to build the control flow graph if there is possibly more
310 than one entry to the function, if computed branches exist, of if we
311 have nonlocal gotos. */
312
313 static int
314 is_cfg_nonregular (void)
315 {
316 basic_block b;
317 rtx insn;
318 RTX_CODE code;
319
320 /* If we have a label that could be the target of a nonlocal goto, then
321 the cfg is not well structured. */
322 if (nonlocal_goto_handler_labels)
323 return 1;
324
325 /* If we have any forced labels, then the cfg is not well structured. */
326 if (forced_labels)
327 return 1;
328
329 /* If this function has a computed jump, then we consider the cfg
330 not well structured. */
331 if (current_function_has_computed_jump)
332 return 1;
333
334 /* If we have exception handlers, then we consider the cfg not well
335 structured. ?!? We should be able to handle this now that flow.c
336 computes an accurate cfg for EH. */
337 if (current_function_has_exception_handlers ())
338 return 1;
339
340 /* If we have non-jumping insns which refer to labels, then we consider
341 the cfg not well structured. */
342 /* Check for labels referred to other thn by jumps. */
343 FOR_EACH_BB (b)
344 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
345 {
346 code = GET_CODE (insn);
347 if (INSN_P (insn) && code != JUMP_INSN)
348 {
349 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
350
351 if (note
352 && ! (JUMP_P (NEXT_INSN (insn))
353 && find_reg_note (NEXT_INSN (insn), REG_LABEL,
354 XEXP (note, 0))))
355 return 1;
356 }
357
358 if (insn == BB_END (b))
359 break;
360 }
361
362 /* All the tests passed. Consider the cfg well structured. */
363 return 0;
364 }
365
366 /* Build the control flow graph and set nr_edges.
367
368 Instead of trying to build a cfg ourselves, we rely on flow to
369 do it for us. Stamp out useless code (and bug) duplication.
370
371 Return nonzero if an irregularity in the cfg is found which would
372 prevent cross block scheduling. */
373
374 static int
375 build_control_flow (struct edge_list *edge_list)
376 {
377 int i, unreachable, num_edges;
378 basic_block b;
379
380 /* This already accounts for entry/exit edges. */
381 num_edges = NUM_EDGES (edge_list);
382
383 /* Unreachable loops with more than one basic block are detected
384 during the DFS traversal in find_rgns.
385
386 Unreachable loops with a single block are detected here. This
387 test is redundant with the one in find_rgns, but it's much
388 cheaper to go ahead and catch the trivial case here. */
389 unreachable = 0;
390 FOR_EACH_BB (b)
391 {
392 if (b->pred == NULL
393 || (b->pred->src == b
394 && b->pred->pred_next == NULL))
395 unreachable = 1;
396 }
397
398 /* ??? We can kill these soon. */
399 in_edges = xcalloc (last_basic_block, sizeof (int));
400 out_edges = xcalloc (last_basic_block, sizeof (int));
401 edge_table = xcalloc (num_edges, sizeof (haifa_edge));
402
403 nr_edges = 0;
404 for (i = 0; i < num_edges; i++)
405 {
406 edge e = INDEX_EDGE (edge_list, i);
407
408 if (e->dest != EXIT_BLOCK_PTR
409 && e->src != ENTRY_BLOCK_PTR)
410 new_edge (e->src->index, e->dest->index);
411 }
412
413 /* Increment by 1, since edge 0 is unused. */
414 nr_edges++;
415
416 return unreachable;
417 }
418
419 /* Record an edge in the control flow graph from SOURCE to TARGET.
420
421 In theory, this is redundant with the s_succs computed above, but
422 we have not converted all of haifa to use information from the
423 integer lists. */
424
425 static void
426 new_edge (int source, int target)
427 {
428 int e, next_edge;
429 int curr_edge, fst_edge;
430
431 /* Check for duplicates. */
432 fst_edge = curr_edge = OUT_EDGES (source);
433 while (curr_edge)
434 {
435 if (FROM_BLOCK (curr_edge) == source
436 && TO_BLOCK (curr_edge) == target)
437 {
438 return;
439 }
440
441 curr_edge = NEXT_OUT (curr_edge);
442
443 if (fst_edge == curr_edge)
444 break;
445 }
446
447 e = ++nr_edges;
448
449 FROM_BLOCK (e) = source;
450 TO_BLOCK (e) = target;
451
452 if (OUT_EDGES (source))
453 {
454 next_edge = NEXT_OUT (OUT_EDGES (source));
455 NEXT_OUT (OUT_EDGES (source)) = e;
456 NEXT_OUT (e) = next_edge;
457 }
458 else
459 {
460 OUT_EDGES (source) = e;
461 NEXT_OUT (e) = e;
462 }
463
464 if (IN_EDGES (target))
465 {
466 next_edge = NEXT_IN (IN_EDGES (target));
467 NEXT_IN (IN_EDGES (target)) = e;
468 NEXT_IN (e) = next_edge;
469 }
470 else
471 {
472 IN_EDGES (target) = e;
473 NEXT_IN (e) = e;
474 }
475 }
476
477 /* Translate a bit-set SET to a list BL of the bit-set members. */
478
479 static void
480 extract_bitlst (sbitmap set, bitlst *bl)
481 {
482 int i;
483
484 /* bblst table space is reused in each call to extract_bitlst. */
485 bitlst_table_last = 0;
486
487 bl->first_member = &bitlst_table[bitlst_table_last];
488 bl->nr_members = 0;
489
490 /* Iterate over each word in the bitset. */
491 EXECUTE_IF_SET_IN_SBITMAP (set, 0, i,
492 {
493 bitlst_table[bitlst_table_last++] = i;
494 (bl->nr_members)++;
495 });
496
497 }
498
499 /* Functions for the construction of regions. */
500
501 /* Print the regions, for debugging purposes. Callable from debugger. */
502
503 void
504 debug_regions (void)
505 {
506 int rgn, bb;
507
508 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
509 for (rgn = 0; rgn < nr_regions; rgn++)
510 {
511 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
512 rgn_table[rgn].rgn_nr_blocks);
513 fprintf (sched_dump, ";;\tbb/block: ");
514
515 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
516 {
517 current_blocks = RGN_BLOCKS (rgn);
518
519 if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
520 abort ();
521
522 fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
523 }
524
525 fprintf (sched_dump, "\n\n");
526 }
527 }
528
529 /* Build a single block region for each basic block in the function.
530 This allows for using the same code for interblock and basic block
531 scheduling. */
532
533 static void
534 find_single_block_region (void)
535 {
536 basic_block bb;
537
538 nr_regions = 0;
539
540 FOR_EACH_BB (bb)
541 {
542 rgn_bb_table[nr_regions] = bb->index;
543 RGN_NR_BLOCKS (nr_regions) = 1;
544 RGN_BLOCKS (nr_regions) = nr_regions;
545 CONTAINING_RGN (bb->index) = nr_regions;
546 BLOCK_TO_BB (bb->index) = 0;
547 nr_regions++;
548 }
549 }
550
551 /* Update number of blocks and the estimate for number of insns
552 in the region. Return true if the region is "too large" for interblock
553 scheduling (compile time considerations). */
554
555 static bool
556 too_large (int block, int *num_bbs, int *num_insns)
557 {
558 (*num_bbs)++;
559 (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block)))
560 - INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
561
562 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
563 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
564 }
565
566 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
567 is still an inner loop. Put in max_hdr[blk] the header of the most inner
568 loop containing blk. */
569 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
570 { \
571 if (max_hdr[blk] == -1) \
572 max_hdr[blk] = hdr; \
573 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
574 RESET_BIT (inner, hdr); \
575 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
576 { \
577 RESET_BIT (inner,max_hdr[blk]); \
578 max_hdr[blk] = hdr; \
579 } \
580 }
581
582 /* Find regions for interblock scheduling.
583
584 A region for scheduling can be:
585
586 * A loop-free procedure, or
587
588 * A reducible inner loop, or
589
590 * A basic block not contained in any other region.
591
592 ?!? In theory we could build other regions based on extended basic
593 blocks or reverse extended basic blocks. Is it worth the trouble?
594
595 Loop blocks that form a region are put into the region's block list
596 in topological order.
597
598 This procedure stores its results into the following global (ick) variables
599
600 * rgn_nr
601 * rgn_table
602 * rgn_bb_table
603 * block_to_bb
604 * containing region
605
606 We use dominator relationships to avoid making regions out of non-reducible
607 loops.
608
609 This procedure needs to be converted to work on pred/succ lists instead
610 of edge tables. That would simplify it somewhat. */
611
612 static void
613 find_rgns (struct edge_list *edge_list)
614 {
615 int *max_hdr, *dfs_nr, *stack, *degree;
616 char no_loops = 1;
617 int node, child, loop_head, i, head, tail;
618 int count = 0, sp, idx = 0;
619 int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index];
620 int num_bbs, num_insns, unreachable;
621 int too_large_failure;
622 basic_block bb;
623
624 /* Note if an edge has been passed. */
625 sbitmap passed;
626
627 /* Note if a block is a natural loop header. */
628 sbitmap header;
629
630 /* Note if a block is a natural inner loop header. */
631 sbitmap inner;
632
633 /* Note if a block is in the block queue. */
634 sbitmap in_queue;
635
636 /* Note if a block is in the block queue. */
637 sbitmap in_stack;
638
639 int num_edges = NUM_EDGES (edge_list);
640
641 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
642 and a mapping from block to its loop header (if the block is contained
643 in a loop, else -1).
644
645 Store results in HEADER, INNER, and MAX_HDR respectively, these will
646 be used as inputs to the second traversal.
647
648 STACK, SP and DFS_NR are only used during the first traversal. */
649
650 /* Allocate and initialize variables for the first traversal. */
651 max_hdr = xmalloc (last_basic_block * sizeof (int));
652 dfs_nr = xcalloc (last_basic_block, sizeof (int));
653 stack = xmalloc (nr_edges * sizeof (int));
654
655 inner = sbitmap_alloc (last_basic_block);
656 sbitmap_ones (inner);
657
658 header = sbitmap_alloc (last_basic_block);
659 sbitmap_zero (header);
660
661 passed = sbitmap_alloc (nr_edges);
662 sbitmap_zero (passed);
663
664 in_queue = sbitmap_alloc (last_basic_block);
665 sbitmap_zero (in_queue);
666
667 in_stack = sbitmap_alloc (last_basic_block);
668 sbitmap_zero (in_stack);
669
670 for (i = 0; i < last_basic_block; i++)
671 max_hdr[i] = -1;
672
673 /* DFS traversal to find inner loops in the cfg. */
674
675 sp = -1;
676 while (1)
677 {
678 if (current_edge == 0 || TEST_BIT (passed, current_edge))
679 {
680 /* We have reached a leaf node or a node that was already
681 processed. Pop edges off the stack until we find
682 an edge that has not yet been processed. */
683 while (sp >= 0
684 && (current_edge == 0 || TEST_BIT (passed, current_edge)))
685 {
686 /* Pop entry off the stack. */
687 current_edge = stack[sp--];
688 node = FROM_BLOCK (current_edge);
689 child = TO_BLOCK (current_edge);
690 RESET_BIT (in_stack, child);
691 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
692 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
693 current_edge = NEXT_OUT (current_edge);
694 }
695
696 /* See if have finished the DFS tree traversal. */
697 if (sp < 0 && TEST_BIT (passed, current_edge))
698 break;
699
700 /* Nope, continue the traversal with the popped node. */
701 continue;
702 }
703
704 /* Process a node. */
705 node = FROM_BLOCK (current_edge);
706 child = TO_BLOCK (current_edge);
707 SET_BIT (in_stack, node);
708 dfs_nr[node] = ++count;
709
710 /* If the successor is in the stack, then we've found a loop.
711 Mark the loop, if it is not a natural loop, then it will
712 be rejected during the second traversal. */
713 if (TEST_BIT (in_stack, child))
714 {
715 no_loops = 0;
716 SET_BIT (header, child);
717 UPDATE_LOOP_RELATIONS (node, child);
718 SET_BIT (passed, current_edge);
719 current_edge = NEXT_OUT (current_edge);
720 continue;
721 }
722
723 /* If the child was already visited, then there is no need to visit
724 it again. Just update the loop relationships and restart
725 with a new edge. */
726 if (dfs_nr[child])
727 {
728 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
729 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
730 SET_BIT (passed, current_edge);
731 current_edge = NEXT_OUT (current_edge);
732 continue;
733 }
734
735 /* Push an entry on the stack and continue DFS traversal. */
736 stack[++sp] = current_edge;
737 SET_BIT (passed, current_edge);
738 current_edge = OUT_EDGES (child);
739
740 /* This is temporary until haifa is converted to use rth's new
741 cfg routines which have true entry/exit blocks and the
742 appropriate edges from/to those blocks.
743
744 Generally we update dfs_nr for a node when we process its
745 out edge. However, if the node has no out edge then we will
746 not set dfs_nr for that node. This can confuse the scheduler
747 into thinking that we have unreachable blocks, which in turn
748 disables cross block scheduling.
749
750 So, if we have a node with no out edges, go ahead and mark it
751 as reachable now. */
752 if (current_edge == 0)
753 dfs_nr[child] = ++count;
754 }
755
756 /* Another check for unreachable blocks. The earlier test in
757 is_cfg_nonregular only finds unreachable blocks that do not
758 form a loop.
759
760 The DFS traversal will mark every block that is reachable from
761 the entry node by placing a nonzero value in dfs_nr. Thus if
762 dfs_nr is zero for any block, then it must be unreachable. */
763 unreachable = 0;
764 FOR_EACH_BB (bb)
765 if (dfs_nr[bb->index] == 0)
766 {
767 unreachable = 1;
768 break;
769 }
770
771 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
772 to hold degree counts. */
773 degree = dfs_nr;
774
775 FOR_EACH_BB (bb)
776 degree[bb->index] = 0;
777 for (i = 0; i < num_edges; i++)
778 {
779 edge e = INDEX_EDGE (edge_list, i);
780
781 if (e->dest != EXIT_BLOCK_PTR)
782 degree[e->dest->index]++;
783 }
784
785 /* Do not perform region scheduling if there are any unreachable
786 blocks. */
787 if (!unreachable)
788 {
789 int *queue;
790
791 if (no_loops)
792 SET_BIT (header, 0);
793
794 /* Second traversal:find reducible inner loops and topologically sort
795 block of each region. */
796
797 queue = xmalloc (n_basic_blocks * sizeof (int));
798
799 /* Find blocks which are inner loop headers. We still have non-reducible
800 loops to consider at this point. */
801 FOR_EACH_BB (bb)
802 {
803 if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
804 {
805 edge e;
806 basic_block jbb;
807
808 /* Now check that the loop is reducible. We do this separate
809 from finding inner loops so that we do not find a reducible
810 loop which contains an inner non-reducible loop.
811
812 A simple way to find reducible/natural loops is to verify
813 that each block in the loop is dominated by the loop
814 header.
815
816 If there exists a block that is not dominated by the loop
817 header, then the block is reachable from outside the loop
818 and thus the loop is not a natural loop. */
819 FOR_EACH_BB (jbb)
820 {
821 /* First identify blocks in the loop, except for the loop
822 entry block. */
823 if (bb->index == max_hdr[jbb->index] && bb != jbb)
824 {
825 /* Now verify that the block is dominated by the loop
826 header. */
827 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
828 break;
829 }
830 }
831
832 /* If we exited the loop early, then I is the header of
833 a non-reducible loop and we should quit processing it
834 now. */
835 if (jbb != EXIT_BLOCK_PTR)
836 continue;
837
838 /* I is a header of an inner loop, or block 0 in a subroutine
839 with no loops at all. */
840 head = tail = -1;
841 too_large_failure = 0;
842 loop_head = max_hdr[bb->index];
843
844 /* Decrease degree of all I's successors for topological
845 ordering. */
846 for (e = bb->succ; e; e = e->succ_next)
847 if (e->dest != EXIT_BLOCK_PTR)
848 --degree[e->dest->index];
849
850 /* Estimate # insns, and count # blocks in the region. */
851 num_bbs = 1;
852 num_insns = (INSN_LUID (BB_END (bb))
853 - INSN_LUID (BB_HEAD (bb)));
854
855 /* Find all loop latches (blocks with back edges to the loop
856 header) or all the leaf blocks in the cfg has no loops.
857
858 Place those blocks into the queue. */
859 if (no_loops)
860 {
861 FOR_EACH_BB (jbb)
862 /* Leaf nodes have only a single successor which must
863 be EXIT_BLOCK. */
864 if (jbb->succ
865 && jbb->succ->dest == EXIT_BLOCK_PTR
866 && jbb->succ->succ_next == NULL)
867 {
868 queue[++tail] = jbb->index;
869 SET_BIT (in_queue, jbb->index);
870
871 if (too_large (jbb->index, &num_bbs, &num_insns))
872 {
873 too_large_failure = 1;
874 break;
875 }
876 }
877 }
878 else
879 {
880 edge e;
881
882 for (e = bb->pred; e; e = e->pred_next)
883 {
884 if (e->src == ENTRY_BLOCK_PTR)
885 continue;
886
887 node = e->src->index;
888
889 if (max_hdr[node] == loop_head && node != bb->index)
890 {
891 /* This is a loop latch. */
892 queue[++tail] = node;
893 SET_BIT (in_queue, node);
894
895 if (too_large (node, &num_bbs, &num_insns))
896 {
897 too_large_failure = 1;
898 break;
899 }
900 }
901 }
902 }
903
904 /* Now add all the blocks in the loop to the queue.
905
906 We know the loop is a natural loop; however the algorithm
907 above will not always mark certain blocks as being in the
908 loop. Consider:
909 node children
910 a b,c
911 b c
912 c a,d
913 d b
914
915 The algorithm in the DFS traversal may not mark B & D as part
916 of the loop (ie they will not have max_hdr set to A).
917
918 We know they can not be loop latches (else they would have
919 had max_hdr set since they'd have a backedge to a dominator
920 block). So we don't need them on the initial queue.
921
922 We know they are part of the loop because they are dominated
923 by the loop header and can be reached by a backwards walk of
924 the edges starting with nodes on the initial queue.
925
926 It is safe and desirable to include those nodes in the
927 loop/scheduling region. To do so we would need to decrease
928 the degree of a node if it is the target of a backedge
929 within the loop itself as the node is placed in the queue.
930
931 We do not do this because I'm not sure that the actual
932 scheduling code will properly handle this case. ?!? */
933
934 while (head < tail && !too_large_failure)
935 {
936 edge e;
937 child = queue[++head];
938
939 for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
940 {
941 node = e->src->index;
942
943 /* See discussion above about nodes not marked as in
944 this loop during the initial DFS traversal. */
945 if (e->src == ENTRY_BLOCK_PTR
946 || max_hdr[node] != loop_head)
947 {
948 tail = -1;
949 break;
950 }
951 else if (!TEST_BIT (in_queue, node) && node != bb->index)
952 {
953 queue[++tail] = node;
954 SET_BIT (in_queue, node);
955
956 if (too_large (node, &num_bbs, &num_insns))
957 {
958 too_large_failure = 1;
959 break;
960 }
961 }
962 }
963 }
964
965 if (tail >= 0 && !too_large_failure)
966 {
967 /* Place the loop header into list of region blocks. */
968 degree[bb->index] = -1;
969 rgn_bb_table[idx] = bb->index;
970 RGN_NR_BLOCKS (nr_regions) = num_bbs;
971 RGN_BLOCKS (nr_regions) = idx++;
972 CONTAINING_RGN (bb->index) = nr_regions;
973 BLOCK_TO_BB (bb->index) = count = 0;
974
975 /* Remove blocks from queue[] when their in degree
976 becomes zero. Repeat until no blocks are left on the
977 list. This produces a topological list of blocks in
978 the region. */
979 while (tail >= 0)
980 {
981 if (head < 0)
982 head = tail;
983 child = queue[head];
984 if (degree[child] == 0)
985 {
986 edge e;
987
988 degree[child] = -1;
989 rgn_bb_table[idx++] = child;
990 BLOCK_TO_BB (child) = ++count;
991 CONTAINING_RGN (child) = nr_regions;
992 queue[head] = queue[tail--];
993
994 for (e = BASIC_BLOCK (child)->succ;
995 e;
996 e = e->succ_next)
997 if (e->dest != EXIT_BLOCK_PTR)
998 --degree[e->dest->index];
999 }
1000 else
1001 --head;
1002 }
1003 ++nr_regions;
1004 }
1005 }
1006 }
1007 free (queue);
1008 }
1009
1010 /* Any block that did not end up in a region is placed into a region
1011 by itself. */
1012 FOR_EACH_BB (bb)
1013 if (degree[bb->index] >= 0)
1014 {
1015 rgn_bb_table[idx] = bb->index;
1016 RGN_NR_BLOCKS (nr_regions) = 1;
1017 RGN_BLOCKS (nr_regions) = idx++;
1018 CONTAINING_RGN (bb->index) = nr_regions++;
1019 BLOCK_TO_BB (bb->index) = 0;
1020 }
1021
1022 free (max_hdr);
1023 free (dfs_nr);
1024 free (stack);
1025 sbitmap_free (passed);
1026 sbitmap_free (header);
1027 sbitmap_free (inner);
1028 sbitmap_free (in_queue);
1029 sbitmap_free (in_stack);
1030 }
1031
1032 /* Functions for regions scheduling information. */
1033
1034 /* Compute dominators, probability, and potential-split-edges of bb.
1035 Assume that these values were already computed for bb's predecessors. */
1036
1037 static void
1038 compute_dom_prob_ps (int bb)
1039 {
1040 int nxt_in_edge, fst_in_edge, pred;
1041 int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
1042
1043 prob[bb] = 0.0;
1044 if (IS_RGN_ENTRY (bb))
1045 {
1046 SET_BIT (dom[bb], 0);
1047 prob[bb] = 1.0;
1048 return;
1049 }
1050
1051 fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
1052
1053 /* Initialize dom[bb] to '111..1'. */
1054 sbitmap_ones (dom[bb]);
1055
1056 do
1057 {
1058 pred = FROM_BLOCK (nxt_in_edge);
1059 sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]);
1060 sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]);
1061
1062 SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge));
1063
1064 nr_out_edges = 1;
1065 nr_rgn_out_edges = 0;
1066 fst_out_edge = OUT_EDGES (pred);
1067 nxt_out_edge = NEXT_OUT (fst_out_edge);
1068
1069 sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]);
1070
1071 SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge));
1072
1073 /* The successor doesn't belong in the region? */
1074 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
1075 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1076 ++nr_rgn_out_edges;
1077
1078 while (fst_out_edge != nxt_out_edge)
1079 {
1080 ++nr_out_edges;
1081 /* The successor doesn't belong in the region? */
1082 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
1083 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1084 ++nr_rgn_out_edges;
1085 SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
1086 nxt_out_edge = NEXT_OUT (nxt_out_edge);
1087
1088 }
1089
1090 /* Now nr_rgn_out_edges is the number of region-exit edges from
1091 pred, and nr_out_edges will be the number of pred out edges
1092 not leaving the region. */
1093 nr_out_edges -= nr_rgn_out_edges;
1094 if (nr_rgn_out_edges > 0)
1095 prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1096 else
1097 prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1098 nxt_in_edge = NEXT_IN (nxt_in_edge);
1099 }
1100 while (fst_in_edge != nxt_in_edge);
1101
1102 SET_BIT (dom[bb], bb);
1103 sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1104
1105 if (sched_verbose >= 2)
1106 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1107 (int) (100.0 * prob[bb]));
1108 }
1109
1110 /* Functions for target info. */
1111
1112 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1113 Note that bb_trg dominates bb_src. */
1114
1115 static void
1116 split_edges (int bb_src, int bb_trg, edgelst *bl)
1117 {
1118 sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
1119 sbitmap_copy (src, pot_split[bb_src]);
1120
1121 sbitmap_difference (src, src, pot_split[bb_trg]);
1122 extract_bitlst (src, bl);
1123 sbitmap_free (src);
1124 }
1125
1126 /* Find the valid candidate-source-blocks for the target block TRG, compute
1127 their probability, and check if they are speculative or not.
1128 For speculative sources, compute their update-blocks and split-blocks. */
1129
1130 static void
1131 compute_trg_info (int trg)
1132 {
1133 candidate *sp;
1134 edgelst el;
1135 int check_block, update_idx;
1136 int i, j, k, fst_edge, nxt_edge;
1137
1138 /* Define some of the fields for the target bb as well. */
1139 sp = candidate_table + trg;
1140 sp->is_valid = 1;
1141 sp->is_speculative = 0;
1142 sp->src_prob = 100;
1143
1144 for (i = trg + 1; i < current_nr_blocks; i++)
1145 {
1146 sp = candidate_table + i;
1147
1148 sp->is_valid = IS_DOMINATED (i, trg);
1149 if (sp->is_valid)
1150 {
1151 sp->src_prob = GET_SRC_PROB (i, trg);
1152 sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
1153 }
1154
1155 if (sp->is_valid)
1156 {
1157 split_edges (i, trg, &el);
1158 sp->is_speculative = (el.nr_members) ? 1 : 0;
1159 if (sp->is_speculative && !flag_schedule_speculative)
1160 sp->is_valid = 0;
1161 }
1162
1163 if (sp->is_valid)
1164 {
1165 char *update_blocks;
1166
1167 /* Compute split blocks and store them in bblst_table.
1168 The TO block of every split edge is a split block. */
1169 sp->split_bbs.first_member = &bblst_table[bblst_last];
1170 sp->split_bbs.nr_members = el.nr_members;
1171 for (j = 0; j < el.nr_members; bblst_last++, j++)
1172 bblst_table[bblst_last] =
1173 TO_BLOCK (rgn_edges[el.first_member[j]]);
1174 sp->update_bbs.first_member = &bblst_table[bblst_last];
1175
1176 /* Compute update blocks and store them in bblst_table.
1177 For every split edge, look at the FROM block, and check
1178 all out edges. For each out edge that is not a split edge,
1179 add the TO block to the update block list. This list can end
1180 up with a lot of duplicates. We need to weed them out to avoid
1181 overrunning the end of the bblst_table. */
1182 update_blocks = alloca (last_basic_block);
1183 memset (update_blocks, 0, last_basic_block);
1184
1185 update_idx = 0;
1186 for (j = 0; j < el.nr_members; j++)
1187 {
1188 check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
1189 fst_edge = nxt_edge = OUT_EDGES (check_block);
1190 do
1191 {
1192 if (! update_blocks[TO_BLOCK (nxt_edge)])
1193 {
1194 for (k = 0; k < el.nr_members; k++)
1195 if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
1196 break;
1197
1198 if (k >= el.nr_members)
1199 {
1200 bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
1201 update_blocks[TO_BLOCK (nxt_edge)] = 1;
1202 update_idx++;
1203 }
1204 }
1205
1206 nxt_edge = NEXT_OUT (nxt_edge);
1207 }
1208 while (fst_edge != nxt_edge);
1209 }
1210 sp->update_bbs.nr_members = update_idx;
1211
1212 /* Make sure we didn't overrun the end of bblst_table. */
1213 if (bblst_last > bblst_size)
1214 abort ();
1215 }
1216 else
1217 {
1218 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1219
1220 sp->is_speculative = 0;
1221 sp->src_prob = 0;
1222 }
1223 }
1224 }
1225
1226 /* Print candidates info, for debugging purposes. Callable from debugger. */
1227
1228 void
1229 debug_candidate (int i)
1230 {
1231 if (!candidate_table[i].is_valid)
1232 return;
1233
1234 if (candidate_table[i].is_speculative)
1235 {
1236 int j;
1237 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1238
1239 fprintf (sched_dump, "split path: ");
1240 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1241 {
1242 int b = candidate_table[i].split_bbs.first_member[j];
1243
1244 fprintf (sched_dump, " %d ", b);
1245 }
1246 fprintf (sched_dump, "\n");
1247
1248 fprintf (sched_dump, "update path: ");
1249 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1250 {
1251 int b = candidate_table[i].update_bbs.first_member[j];
1252
1253 fprintf (sched_dump, " %d ", b);
1254 }
1255 fprintf (sched_dump, "\n");
1256 }
1257 else
1258 {
1259 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1260 }
1261 }
1262
1263 /* Print candidates info, for debugging purposes. Callable from debugger. */
1264
1265 void
1266 debug_candidates (int trg)
1267 {
1268 int i;
1269
1270 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1271 BB_TO_BLOCK (trg), trg);
1272 for (i = trg + 1; i < current_nr_blocks; i++)
1273 debug_candidate (i);
1274 }
1275
1276 /* Functions for speculative scheduling. */
1277
1278 /* Return 0 if x is a set of a register alive in the beginning of one
1279 of the split-blocks of src, otherwise return 1. */
1280
1281 static int
1282 check_live_1 (int src, rtx x)
1283 {
1284 int i;
1285 int regno;
1286 rtx reg = SET_DEST (x);
1287
1288 if (reg == 0)
1289 return 1;
1290
1291 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1292 || GET_CODE (reg) == SIGN_EXTRACT
1293 || GET_CODE (reg) == STRICT_LOW_PART)
1294 reg = XEXP (reg, 0);
1295
1296 if (GET_CODE (reg) == PARALLEL)
1297 {
1298 int i;
1299
1300 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1301 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1302 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1303 return 1;
1304
1305 return 0;
1306 }
1307
1308 if (!REG_P (reg))
1309 return 1;
1310
1311 regno = REGNO (reg);
1312
1313 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1314 {
1315 /* Global registers are assumed live. */
1316 return 0;
1317 }
1318 else
1319 {
1320 if (regno < FIRST_PSEUDO_REGISTER)
1321 {
1322 /* Check for hard registers. */
1323 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1324 while (--j >= 0)
1325 {
1326 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1327 {
1328 int b = candidate_table[src].split_bbs.first_member[i];
1329
1330 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
1331 regno + j))
1332 {
1333 return 0;
1334 }
1335 }
1336 }
1337 }
1338 else
1339 {
1340 /* Check for pseudo registers. */
1341 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1342 {
1343 int b = candidate_table[src].split_bbs.first_member[i];
1344
1345 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
1346 {
1347 return 0;
1348 }
1349 }
1350 }
1351 }
1352
1353 return 1;
1354 }
1355
1356 /* If x is a set of a register R, mark that R is alive in the beginning
1357 of every update-block of src. */
1358
1359 static void
1360 update_live_1 (int src, rtx x)
1361 {
1362 int i;
1363 int regno;
1364 rtx reg = SET_DEST (x);
1365
1366 if (reg == 0)
1367 return;
1368
1369 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1370 || GET_CODE (reg) == SIGN_EXTRACT
1371 || GET_CODE (reg) == STRICT_LOW_PART)
1372 reg = XEXP (reg, 0);
1373
1374 if (GET_CODE (reg) == PARALLEL)
1375 {
1376 int i;
1377
1378 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1379 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1380 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1381
1382 return;
1383 }
1384
1385 if (!REG_P (reg))
1386 return;
1387
1388 /* Global registers are always live, so the code below does not apply
1389 to them. */
1390
1391 regno = REGNO (reg);
1392
1393 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
1394 {
1395 if (regno < FIRST_PSEUDO_REGISTER)
1396 {
1397 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1398 while (--j >= 0)
1399 {
1400 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1401 {
1402 int b = candidate_table[src].update_bbs.first_member[i];
1403
1404 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
1405 regno + j);
1406 }
1407 }
1408 }
1409 else
1410 {
1411 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1412 {
1413 int b = candidate_table[src].update_bbs.first_member[i];
1414
1415 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
1416 }
1417 }
1418 }
1419 }
1420
1421 /* Return 1 if insn can be speculatively moved from block src to trg,
1422 otherwise return 0. Called before first insertion of insn to
1423 ready-list or before the scheduling. */
1424
1425 static int
1426 check_live (rtx insn, int src)
1427 {
1428 /* Find the registers set by instruction. */
1429 if (GET_CODE (PATTERN (insn)) == SET
1430 || GET_CODE (PATTERN (insn)) == CLOBBER)
1431 return check_live_1 (src, PATTERN (insn));
1432 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1433 {
1434 int j;
1435 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1436 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1437 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1438 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1439 return 0;
1440
1441 return 1;
1442 }
1443
1444 return 1;
1445 }
1446
1447 /* Update the live registers info after insn was moved speculatively from
1448 block src to trg. */
1449
1450 static void
1451 update_live (rtx insn, int src)
1452 {
1453 /* Find the registers set by instruction. */
1454 if (GET_CODE (PATTERN (insn)) == SET
1455 || GET_CODE (PATTERN (insn)) == CLOBBER)
1456 update_live_1 (src, PATTERN (insn));
1457 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1458 {
1459 int j;
1460 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1461 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1462 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1463 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1464 }
1465 }
1466
1467 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1468 #define IS_REACHABLE(bb_from, bb_to) \
1469 (bb_from == bb_to \
1470 || IS_RGN_ENTRY (bb_from) \
1471 || (TEST_BIT (ancestor_edges[bb_to], \
1472 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
1473
1474 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1475
1476 static void
1477 set_spec_fed (rtx load_insn)
1478 {
1479 rtx link;
1480
1481 for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
1482 if (GET_MODE (link) == VOIDmode)
1483 FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
1484 } /* set_spec_fed */
1485
1486 /* On the path from the insn to load_insn_bb, find a conditional
1487 branch depending on insn, that guards the speculative load. */
1488
1489 static int
1490 find_conditional_protection (rtx insn, int load_insn_bb)
1491 {
1492 rtx link;
1493
1494 /* Iterate through DEF-USE forward dependences. */
1495 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1496 {
1497 rtx next = XEXP (link, 0);
1498 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1499 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1500 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1501 && load_insn_bb != INSN_BB (next)
1502 && GET_MODE (link) == VOIDmode
1503 && (JUMP_P (next)
1504 || find_conditional_protection (next, load_insn_bb)))
1505 return 1;
1506 }
1507 return 0;
1508 } /* find_conditional_protection */
1509
1510 /* Returns 1 if the same insn1 that participates in the computation
1511 of load_insn's address is feeding a conditional branch that is
1512 guarding on load_insn. This is true if we find a the two DEF-USE
1513 chains:
1514 insn1 -> ... -> conditional-branch
1515 insn1 -> ... -> load_insn,
1516 and if a flow path exist:
1517 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1518 and if insn1 is on the path
1519 region-entry -> ... -> bb_trg -> ... load_insn.
1520
1521 Locate insn1 by climbing on LOG_LINKS from load_insn.
1522 Locate the branch by following INSN_DEPEND from insn1. */
1523
1524 static int
1525 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1526 {
1527 rtx link;
1528
1529 for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
1530 {
1531 rtx insn1 = XEXP (link, 0);
1532
1533 /* Must be a DEF-USE dependence upon non-branch. */
1534 if (GET_MODE (link) != VOIDmode
1535 || JUMP_P (insn1))
1536 continue;
1537
1538 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1539 if (INSN_BB (insn1) == bb_src
1540 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1541 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1542 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1543 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1544 continue;
1545
1546 /* Now search for the conditional-branch. */
1547 if (find_conditional_protection (insn1, bb_src))
1548 return 1;
1549
1550 /* Recursive step: search another insn1, "above" current insn1. */
1551 return is_conditionally_protected (insn1, bb_src, bb_trg);
1552 }
1553
1554 /* The chain does not exist. */
1555 return 0;
1556 } /* is_conditionally_protected */
1557
1558 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1559 load_insn can move speculatively from bb_src to bb_trg. All the
1560 following must hold:
1561
1562 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1563 (2) load_insn and load1 have a def-use dependence upon
1564 the same insn 'insn1'.
1565 (3) either load2 is in bb_trg, or:
1566 - there's only one split-block, and
1567 - load1 is on the escape path, and
1568
1569 From all these we can conclude that the two loads access memory
1570 addresses that differ at most by a constant, and hence if moving
1571 load_insn would cause an exception, it would have been caused by
1572 load2 anyhow. */
1573
1574 static int
1575 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1576 {
1577 rtx back_link;
1578 candidate *candp = candidate_table + bb_src;
1579
1580 if (candp->split_bbs.nr_members != 1)
1581 /* Must have exactly one escape block. */
1582 return 0;
1583
1584 for (back_link = LOG_LINKS (load_insn);
1585 back_link; back_link = XEXP (back_link, 1))
1586 {
1587 rtx insn1 = XEXP (back_link, 0);
1588
1589 if (GET_MODE (back_link) == VOIDmode)
1590 {
1591 /* Found a DEF-USE dependence (insn1, load_insn). */
1592 rtx fore_link;
1593
1594 for (fore_link = INSN_DEPEND (insn1);
1595 fore_link; fore_link = XEXP (fore_link, 1))
1596 {
1597 rtx insn2 = XEXP (fore_link, 0);
1598 if (GET_MODE (fore_link) == VOIDmode)
1599 {
1600 /* Found a DEF-USE dependence (insn1, insn2). */
1601 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1602 /* insn2 not guaranteed to be a 1 base reg load. */
1603 continue;
1604
1605 if (INSN_BB (insn2) == bb_trg)
1606 /* insn2 is the similar load, in the target block. */
1607 return 1;
1608
1609 if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
1610 /* insn2 is a similar load, in a split-block. */
1611 return 1;
1612 }
1613 }
1614 }
1615 }
1616
1617 /* Couldn't find a similar load. */
1618 return 0;
1619 } /* is_pfree */
1620
1621 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1622 a load moved speculatively, or if load_insn is protected by
1623 a compare on load_insn's address). */
1624
1625 static int
1626 is_prisky (rtx load_insn, int bb_src, int bb_trg)
1627 {
1628 if (FED_BY_SPEC_LOAD (load_insn))
1629 return 1;
1630
1631 if (LOG_LINKS (load_insn) == NULL)
1632 /* Dependence may 'hide' out of the region. */
1633 return 1;
1634
1635 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
1636 return 1;
1637
1638 return 0;
1639 }
1640
1641 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
1642 Return 1 if insn is exception-free (and the motion is valid)
1643 and 0 otherwise. */
1644
1645 static int
1646 is_exception_free (rtx insn, int bb_src, int bb_trg)
1647 {
1648 int insn_class = haifa_classify_insn (insn);
1649
1650 /* Handle non-load insns. */
1651 switch (insn_class)
1652 {
1653 case TRAP_FREE:
1654 return 1;
1655 case TRAP_RISKY:
1656 return 0;
1657 default:;
1658 }
1659
1660 /* Handle loads. */
1661 if (!flag_schedule_speculative_load)
1662 return 0;
1663 IS_LOAD_INSN (insn) = 1;
1664 switch (insn_class)
1665 {
1666 case IFREE:
1667 return (1);
1668 case IRISKY:
1669 return 0;
1670 case PFREE_CANDIDATE:
1671 if (is_pfree (insn, bb_src, bb_trg))
1672 return 1;
1673 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
1674 case PRISKY_CANDIDATE:
1675 if (!flag_schedule_speculative_load_dangerous
1676 || is_prisky (insn, bb_src, bb_trg))
1677 return 0;
1678 break;
1679 default:;
1680 }
1681
1682 return flag_schedule_speculative_load_dangerous;
1683 }
1684 \f
1685 /* The number of insns from the current block scheduled so far. */
1686 static int sched_target_n_insns;
1687 /* The number of insns from the current block to be scheduled in total. */
1688 static int target_n_insns;
1689 /* The number of insns from the entire region scheduled so far. */
1690 static int sched_n_insns;
1691 /* Nonzero if the last scheduled insn was a jump. */
1692 static int last_was_jump;
1693
1694 /* Implementations of the sched_info functions for region scheduling. */
1695 static void init_ready_list (struct ready_list *);
1696 static int can_schedule_ready_p (rtx);
1697 static int new_ready (rtx);
1698 static int schedule_more_p (void);
1699 static const char *rgn_print_insn (rtx, int);
1700 static int rgn_rank (rtx, rtx);
1701 static int contributes_to_priority (rtx, rtx);
1702 static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
1703
1704 /* Return nonzero if there are more insns that should be scheduled. */
1705
1706 static int
1707 schedule_more_p (void)
1708 {
1709 return ! last_was_jump && sched_target_n_insns < target_n_insns;
1710 }
1711
1712 /* Add all insns that are initially ready to the ready list READY. Called
1713 once before scheduling a set of insns. */
1714
1715 static void
1716 init_ready_list (struct ready_list *ready)
1717 {
1718 rtx prev_head = current_sched_info->prev_head;
1719 rtx next_tail = current_sched_info->next_tail;
1720 int bb_src;
1721 rtx insn;
1722
1723 target_n_insns = 0;
1724 sched_target_n_insns = 0;
1725 sched_n_insns = 0;
1726 last_was_jump = 0;
1727
1728 /* Print debugging information. */
1729 if (sched_verbose >= 5)
1730 debug_dependencies ();
1731
1732 /* Prepare current target block info. */
1733 if (current_nr_blocks > 1)
1734 {
1735 candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
1736
1737 bblst_last = 0;
1738 /* bblst_table holds split blocks and update blocks for each block after
1739 the current one in the region. split blocks and update blocks are
1740 the TO blocks of region edges, so there can be at most rgn_nr_edges
1741 of them. */
1742 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1743 bblst_table = xmalloc (bblst_size * sizeof (int));
1744
1745 bitlst_table_last = 0;
1746 bitlst_table = xmalloc (rgn_nr_edges * sizeof (int));
1747
1748 compute_trg_info (target_bb);
1749 }
1750
1751 /* Initialize ready list with all 'ready' insns in target block.
1752 Count number of insns in the target block being scheduled. */
1753 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
1754 {
1755 if (INSN_DEP_COUNT (insn) == 0)
1756 {
1757 ready_add (ready, insn);
1758
1759 if (targetm.sched.adjust_priority)
1760 INSN_PRIORITY (insn) =
1761 targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
1762 }
1763 target_n_insns++;
1764 }
1765
1766 /* Add to ready list all 'ready' insns in valid source blocks.
1767 For speculative insns, check-live, exception-free, and
1768 issue-delay. */
1769 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
1770 if (IS_VALID (bb_src))
1771 {
1772 rtx src_head;
1773 rtx src_next_tail;
1774 rtx tail, head;
1775
1776 get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
1777 src_next_tail = NEXT_INSN (tail);
1778 src_head = head;
1779
1780 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
1781 {
1782 if (! INSN_P (insn))
1783 continue;
1784
1785 if (!CANT_MOVE (insn)
1786 && (!IS_SPECULATIVE_INSN (insn)
1787 || ((recog_memoized (insn) < 0
1788 || min_insn_conflict_delay (curr_state,
1789 insn, insn) <= 3)
1790 && check_live (insn, bb_src)
1791 && is_exception_free (insn, bb_src, target_bb))))
1792 if (INSN_DEP_COUNT (insn) == 0)
1793 {
1794 ready_add (ready, insn);
1795
1796 if (targetm.sched.adjust_priority)
1797 INSN_PRIORITY (insn) =
1798 targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
1799 }
1800 }
1801 }
1802 }
1803
1804 /* Called after taking INSN from the ready list. Returns nonzero if this
1805 insn can be scheduled, nonzero if we should silently discard it. */
1806
1807 static int
1808 can_schedule_ready_p (rtx insn)
1809 {
1810 if (JUMP_P (insn))
1811 last_was_jump = 1;
1812
1813 /* An interblock motion? */
1814 if (INSN_BB (insn) != target_bb)
1815 {
1816 basic_block b1;
1817
1818 if (IS_SPECULATIVE_INSN (insn))
1819 {
1820 if (!check_live (insn, INSN_BB (insn)))
1821 return 0;
1822 update_live (insn, INSN_BB (insn));
1823
1824 /* For speculative load, mark insns fed by it. */
1825 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
1826 set_spec_fed (insn);
1827
1828 nr_spec++;
1829 }
1830 nr_inter++;
1831
1832 /* Update source block boundaries. */
1833 b1 = BLOCK_FOR_INSN (insn);
1834 if (insn == BB_HEAD (b1) && insn == BB_END (b1))
1835 {
1836 /* We moved all the insns in the basic block.
1837 Emit a note after the last insn and update the
1838 begin/end boundaries to point to the note. */
1839 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
1840 BB_HEAD (b1) = note;
1841 BB_END (b1) = note;
1842 }
1843 else if (insn == BB_END (b1))
1844 {
1845 /* We took insns from the end of the basic block,
1846 so update the end of block boundary so that it
1847 points to the first insn we did not move. */
1848 BB_END (b1) = PREV_INSN (insn);
1849 }
1850 else if (insn == BB_HEAD (b1))
1851 {
1852 /* We took insns from the start of the basic block,
1853 so update the start of block boundary so that
1854 it points to the first insn we did not move. */
1855 BB_HEAD (b1) = NEXT_INSN (insn);
1856 }
1857 }
1858 else
1859 {
1860 /* In block motion. */
1861 sched_target_n_insns++;
1862 }
1863 sched_n_insns++;
1864
1865 return 1;
1866 }
1867
1868 /* Called after INSN has all its dependencies resolved. Return nonzero
1869 if it should be moved to the ready list or the queue, or zero if we
1870 should silently discard it. */
1871 static int
1872 new_ready (rtx next)
1873 {
1874 /* For speculative insns, before inserting to ready/queue,
1875 check live, exception-free, and issue-delay. */
1876 if (INSN_BB (next) != target_bb
1877 && (!IS_VALID (INSN_BB (next))
1878 || CANT_MOVE (next)
1879 || (IS_SPECULATIVE_INSN (next)
1880 && ((recog_memoized (next) >= 0
1881 && min_insn_conflict_delay (curr_state, next, next) > 3)
1882 || !check_live (next, INSN_BB (next))
1883 || !is_exception_free (next, INSN_BB (next), target_bb)))))
1884 return 0;
1885 return 1;
1886 }
1887
1888 /* Return a string that contains the insn uid and optionally anything else
1889 necessary to identify this insn in an output. It's valid to use a
1890 static buffer for this. The ALIGNED parameter should cause the string
1891 to be formatted so that multiple output lines will line up nicely. */
1892
1893 static const char *
1894 rgn_print_insn (rtx insn, int aligned)
1895 {
1896 static char tmp[80];
1897
1898 if (aligned)
1899 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
1900 else
1901 {
1902 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
1903 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
1904 else
1905 sprintf (tmp, "%d", INSN_UID (insn));
1906 }
1907 return tmp;
1908 }
1909
1910 /* Compare priority of two insns. Return a positive number if the second
1911 insn is to be preferred for scheduling, and a negative one if the first
1912 is to be preferred. Zero if they are equally good. */
1913
1914 static int
1915 rgn_rank (rtx insn1, rtx insn2)
1916 {
1917 /* Some comparison make sense in interblock scheduling only. */
1918 if (INSN_BB (insn1) != INSN_BB (insn2))
1919 {
1920 int spec_val, prob_val;
1921
1922 /* Prefer an inblock motion on an interblock motion. */
1923 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
1924 return 1;
1925 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
1926 return -1;
1927
1928 /* Prefer a useful motion on a speculative one. */
1929 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
1930 if (spec_val)
1931 return spec_val;
1932
1933 /* Prefer a more probable (speculative) insn. */
1934 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
1935 if (prob_val)
1936 return prob_val;
1937 }
1938 return 0;
1939 }
1940
1941 /* NEXT is an instruction that depends on INSN (a backward dependence);
1942 return nonzero if we should include this dependence in priority
1943 calculations. */
1944
1945 static int
1946 contributes_to_priority (rtx next, rtx insn)
1947 {
1948 return BLOCK_NUM (next) == BLOCK_NUM (insn);
1949 }
1950
1951 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
1952 conditionally set before INSN. Store the set of registers that
1953 must be considered as used by this jump in USED and that of
1954 registers that must be considered as set in SET. */
1955
1956 static void
1957 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
1958 regset cond_exec ATTRIBUTE_UNUSED,
1959 regset used ATTRIBUTE_UNUSED,
1960 regset set ATTRIBUTE_UNUSED)
1961 {
1962 /* Nothing to do here, since we postprocess jumps in
1963 add_branch_dependences. */
1964 }
1965
1966 /* Used in schedule_insns to initialize current_sched_info for scheduling
1967 regions (or single basic blocks). */
1968
1969 static struct sched_info region_sched_info =
1970 {
1971 init_ready_list,
1972 can_schedule_ready_p,
1973 schedule_more_p,
1974 new_ready,
1975 rgn_rank,
1976 rgn_print_insn,
1977 contributes_to_priority,
1978 compute_jump_reg_dependencies,
1979
1980 NULL, NULL,
1981 NULL, NULL,
1982 0, 0, 0
1983 };
1984
1985 /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
1986
1987 static bool
1988 sets_likely_spilled (rtx pat)
1989 {
1990 bool ret = false;
1991 note_stores (pat, sets_likely_spilled_1, &ret);
1992 return ret;
1993 }
1994
1995 static void
1996 sets_likely_spilled_1 (rtx x, rtx pat, void *data)
1997 {
1998 bool *ret = (bool *) data;
1999
2000 if (GET_CODE (pat) == SET
2001 && REG_P (x)
2002 && REGNO (x) < FIRST_PSEUDO_REGISTER
2003 && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x))))
2004 *ret = true;
2005 }
2006
2007 /* Add dependences so that branches are scheduled to run last in their
2008 block. */
2009
2010 static void
2011 add_branch_dependences (rtx head, rtx tail)
2012 {
2013 rtx insn, last;
2014
2015 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2016 that can throw exceptions, force them to remain in order at the end of
2017 the block by adding dependencies and giving the last a high priority.
2018 There may be notes present, and prev_head may also be a note.
2019
2020 Branches must obviously remain at the end. Calls should remain at the
2021 end since moving them results in worse register allocation. Uses remain
2022 at the end to ensure proper register allocation.
2023
2024 cc0 setters remain at the end because they can't be moved away from
2025 their cc0 user.
2026
2027 Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
2028 are not moved before reload because we can wind up with register
2029 allocation failures. */
2030
2031 insn = tail;
2032 last = 0;
2033 while (CALL_P (insn)
2034 || JUMP_P (insn)
2035 || (NONJUMP_INSN_P (insn)
2036 && (GET_CODE (PATTERN (insn)) == USE
2037 || GET_CODE (PATTERN (insn)) == CLOBBER
2038 || can_throw_internal (insn)
2039 #ifdef HAVE_cc0
2040 || sets_cc0_p (PATTERN (insn))
2041 #endif
2042 || (!reload_completed
2043 && sets_likely_spilled (PATTERN (insn)))))
2044 || NOTE_P (insn))
2045 {
2046 if (!NOTE_P (insn))
2047 {
2048 if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
2049 {
2050 add_dependence (last, insn, REG_DEP_ANTI);
2051 INSN_REF_COUNT (insn)++;
2052 }
2053
2054 CANT_MOVE (insn) = 1;
2055
2056 last = insn;
2057 }
2058
2059 /* Don't overrun the bounds of the basic block. */
2060 if (insn == head)
2061 break;
2062
2063 insn = PREV_INSN (insn);
2064 }
2065
2066 /* Make sure these insns are scheduled last in their block. */
2067 insn = last;
2068 if (insn != 0)
2069 while (insn != head)
2070 {
2071 insn = prev_nonnote_insn (insn);
2072
2073 if (INSN_REF_COUNT (insn) != 0)
2074 continue;
2075
2076 add_dependence (last, insn, REG_DEP_ANTI);
2077 INSN_REF_COUNT (insn) = 1;
2078 }
2079 }
2080
2081 /* Data structures for the computation of data dependences in a regions. We
2082 keep one `deps' structure for every basic block. Before analyzing the
2083 data dependences for a bb, its variables are initialized as a function of
2084 the variables of its predecessors. When the analysis for a bb completes,
2085 we save the contents to the corresponding bb_deps[bb] variable. */
2086
2087 static struct deps *bb_deps;
2088
2089 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2090
2091 static rtx
2092 concat_INSN_LIST (rtx copy, rtx old)
2093 {
2094 rtx new = old;
2095 for (; copy ; copy = XEXP (copy, 1))
2096 new = alloc_INSN_LIST (XEXP (copy, 0), new);
2097 return new;
2098 }
2099
2100 static void
2101 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2102 rtx *old_mems_p)
2103 {
2104 rtx new_insns = *old_insns_p;
2105 rtx new_mems = *old_mems_p;
2106
2107 while (copy_insns)
2108 {
2109 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2110 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2111 copy_insns = XEXP (copy_insns, 1);
2112 copy_mems = XEXP (copy_mems, 1);
2113 }
2114
2115 *old_insns_p = new_insns;
2116 *old_mems_p = new_mems;
2117 }
2118
2119 /* After computing the dependencies for block BB, propagate the dependencies
2120 found in TMP_DEPS to the successors of the block. */
2121 static void
2122 propagate_deps (int bb, struct deps *pred_deps)
2123 {
2124 int b = BB_TO_BLOCK (bb);
2125 int e, first_edge;
2126
2127 /* bb's structures are inherited by its successors. */
2128 first_edge = e = OUT_EDGES (b);
2129 if (e > 0)
2130 do
2131 {
2132 int b_succ = TO_BLOCK (e);
2133 int bb_succ = BLOCK_TO_BB (b_succ);
2134 struct deps *succ_deps = bb_deps + bb_succ;
2135 int reg;
2136
2137 /* Only bbs "below" bb, in the same region, are interesting. */
2138 if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
2139 || bb_succ <= bb)
2140 {
2141 e = NEXT_OUT (e);
2142 continue;
2143 }
2144
2145 /* The reg_last lists are inherited by bb_succ. */
2146 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
2147 {
2148 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2149 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2150
2151 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2152 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2153 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2154 succ_rl->clobbers);
2155 succ_rl->uses_length += pred_rl->uses_length;
2156 succ_rl->clobbers_length += pred_rl->clobbers_length;
2157 });
2158 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2159
2160 /* Mem read/write lists are inherited by bb_succ. */
2161 concat_insn_mem_list (pred_deps->pending_read_insns,
2162 pred_deps->pending_read_mems,
2163 &succ_deps->pending_read_insns,
2164 &succ_deps->pending_read_mems);
2165 concat_insn_mem_list (pred_deps->pending_write_insns,
2166 pred_deps->pending_write_mems,
2167 &succ_deps->pending_write_insns,
2168 &succ_deps->pending_write_mems);
2169
2170 succ_deps->last_pending_memory_flush
2171 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2172 succ_deps->last_pending_memory_flush);
2173
2174 succ_deps->pending_lists_length += pred_deps->pending_lists_length;
2175 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2176
2177 /* last_function_call is inherited by bb_succ. */
2178 succ_deps->last_function_call
2179 = concat_INSN_LIST (pred_deps->last_function_call,
2180 succ_deps->last_function_call);
2181
2182 /* sched_before_next_call is inherited by bb_succ. */
2183 succ_deps->sched_before_next_call
2184 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2185 succ_deps->sched_before_next_call);
2186
2187 e = NEXT_OUT (e);
2188 }
2189 while (e != first_edge);
2190
2191 /* These lists should point to the right place, for correct
2192 freeing later. */
2193 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2194 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2195 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2196 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2197
2198 /* Can't allow these to be freed twice. */
2199 pred_deps->pending_read_insns = 0;
2200 pred_deps->pending_read_mems = 0;
2201 pred_deps->pending_write_insns = 0;
2202 pred_deps->pending_write_mems = 0;
2203 }
2204
2205 /* Compute backward dependences inside bb. In a multiple blocks region:
2206 (1) a bb is analyzed after its predecessors, and (2) the lists in
2207 effect at the end of bb (after analyzing for bb) are inherited by
2208 bb's successors.
2209
2210 Specifically for reg-reg data dependences, the block insns are
2211 scanned by sched_analyze () top-to-bottom. Two lists are
2212 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2213 and reg_last[].uses for register USEs.
2214
2215 When analysis is completed for bb, we update for its successors:
2216 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2217 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2218
2219 The mechanism for computing mem-mem data dependence is very
2220 similar, and the result is interblock dependences in the region. */
2221
2222 static void
2223 compute_block_backward_dependences (int bb)
2224 {
2225 rtx head, tail;
2226 struct deps tmp_deps;
2227
2228 tmp_deps = bb_deps[bb];
2229
2230 /* Do the analysis for this block. */
2231 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2232 sched_analyze (&tmp_deps, head, tail);
2233 add_branch_dependences (head, tail);
2234
2235 if (current_nr_blocks > 1)
2236 propagate_deps (bb, &tmp_deps);
2237
2238 /* Free up the INSN_LISTs. */
2239 free_deps (&tmp_deps);
2240 }
2241
2242 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2243 them to the unused_*_list variables, so that they can be reused. */
2244
2245 static void
2246 free_pending_lists (void)
2247 {
2248 int bb;
2249
2250 for (bb = 0; bb < current_nr_blocks; bb++)
2251 {
2252 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2253 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2254 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2255 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2256 }
2257 }
2258 \f
2259 /* Print dependences for debugging, callable from debugger. */
2260
2261 void
2262 debug_dependencies (void)
2263 {
2264 int bb;
2265
2266 fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
2267 for (bb = 0; bb < current_nr_blocks; bb++)
2268 {
2269 rtx head, tail;
2270 rtx next_tail;
2271 rtx insn;
2272
2273 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2274 next_tail = NEXT_INSN (tail);
2275 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2276 BB_TO_BLOCK (bb), bb);
2277
2278 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2279 "insn", "code", "bb", "dep", "prio", "cost",
2280 "reservation");
2281 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2282 "----", "----", "--", "---", "----", "----",
2283 "-----------");
2284
2285 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2286 {
2287 rtx link;
2288
2289 if (! INSN_P (insn))
2290 {
2291 int n;
2292 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2293 if (NOTE_P (insn))
2294 {
2295 n = NOTE_LINE_NUMBER (insn);
2296 if (n < 0)
2297 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2298 else
2299 {
2300 expanded_location xloc;
2301 NOTE_EXPANDED_LOCATION (xloc, insn);
2302 fprintf (sched_dump, "line %d, file %s\n",
2303 xloc.line, xloc.file);
2304 }
2305 }
2306 else
2307 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2308 continue;
2309 }
2310
2311 fprintf (sched_dump,
2312 ";; %s%5d%6d%6d%6d%6d%6d ",
2313 (SCHED_GROUP_P (insn) ? "+" : " "),
2314 INSN_UID (insn),
2315 INSN_CODE (insn),
2316 INSN_BB (insn),
2317 INSN_DEP_COUNT (insn),
2318 INSN_PRIORITY (insn),
2319 insn_cost (insn, 0, 0));
2320
2321 if (recog_memoized (insn) < 0)
2322 fprintf (sched_dump, "nothing");
2323 else
2324 print_reservation (sched_dump, insn);
2325
2326 fprintf (sched_dump, "\t: ");
2327 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
2328 fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
2329 fprintf (sched_dump, "\n");
2330 }
2331 }
2332 fprintf (sched_dump, "\n");
2333 }
2334 \f
2335 /* Schedule a region. A region is either an inner loop, a loop-free
2336 subroutine, or a single basic block. Each bb in the region is
2337 scheduled after its flow predecessors. */
2338
2339 static void
2340 schedule_region (int rgn)
2341 {
2342 int bb;
2343 int rgn_n_insns = 0;
2344 int sched_rgn_n_insns = 0;
2345
2346 /* Set variables for the current region. */
2347 current_nr_blocks = RGN_NR_BLOCKS (rgn);
2348 current_blocks = RGN_BLOCKS (rgn);
2349
2350 init_deps_global ();
2351
2352 /* Initializations for region data dependence analysis. */
2353 bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
2354 for (bb = 0; bb < current_nr_blocks; bb++)
2355 init_deps (bb_deps + bb);
2356
2357 /* Compute LOG_LINKS. */
2358 for (bb = 0; bb < current_nr_blocks; bb++)
2359 compute_block_backward_dependences (bb);
2360
2361 /* Compute INSN_DEPEND. */
2362 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
2363 {
2364 rtx head, tail;
2365 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2366
2367 compute_forward_dependences (head, tail);
2368
2369 if (targetm.sched.dependencies_evaluation_hook)
2370 targetm.sched.dependencies_evaluation_hook (head, tail);
2371
2372 }
2373
2374 /* Set priorities. */
2375 for (bb = 0; bb < current_nr_blocks; bb++)
2376 {
2377 rtx head, tail;
2378 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2379
2380 rgn_n_insns += set_priorities (head, tail);
2381 }
2382
2383 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
2384 if (current_nr_blocks > 1)
2385 {
2386 int i;
2387
2388 prob = xmalloc ((current_nr_blocks) * sizeof (float));
2389
2390 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
2391 sbitmap_vector_zero (dom, current_nr_blocks);
2392 /* Edge to bit. */
2393 rgn_nr_edges = 0;
2394 edge_to_bit = xmalloc (nr_edges * sizeof (int));
2395 for (i = 1; i < nr_edges; i++)
2396 if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
2397 EDGE_TO_BIT (i) = rgn_nr_edges++;
2398 rgn_edges = xmalloc (rgn_nr_edges * sizeof (int));
2399
2400 rgn_nr_edges = 0;
2401 for (i = 1; i < nr_edges; i++)
2402 if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
2403 rgn_edges[rgn_nr_edges++] = i;
2404
2405 /* Split edges. */
2406 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2407 sbitmap_vector_zero (pot_split, current_nr_blocks);
2408 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2409 sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
2410
2411 /* Compute probabilities, dominators, split_edges. */
2412 for (bb = 0; bb < current_nr_blocks; bb++)
2413 compute_dom_prob_ps (bb);
2414 }
2415
2416 /* Now we can schedule all blocks. */
2417 for (bb = 0; bb < current_nr_blocks; bb++)
2418 {
2419 rtx head, tail;
2420 int b = BB_TO_BLOCK (bb);
2421
2422 get_block_head_tail (b, &head, &tail);
2423
2424 if (no_real_insns_p (head, tail))
2425 continue;
2426
2427 current_sched_info->prev_head = PREV_INSN (head);
2428 current_sched_info->next_tail = NEXT_INSN (tail);
2429
2430 if (write_symbols != NO_DEBUG)
2431 {
2432 save_line_notes (b, head, tail);
2433 rm_line_notes (head, tail);
2434 }
2435
2436 /* rm_other_notes only removes notes which are _inside_ the
2437 block---that is, it won't remove notes before the first real insn
2438 or after the last real insn of the block. So if the first insn
2439 has a REG_SAVE_NOTE which would otherwise be emitted before the
2440 insn, it is redundant with the note before the start of the
2441 block, and so we have to take it out. */
2442 if (INSN_P (head))
2443 {
2444 rtx note;
2445
2446 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
2447 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2448 {
2449 remove_note (head, note);
2450 note = XEXP (note, 1);
2451 remove_note (head, note);
2452 }
2453 }
2454
2455 /* Remove remaining note insns from the block, save them in
2456 note_list. These notes are restored at the end of
2457 schedule_block (). */
2458 rm_other_notes (head, tail);
2459
2460 target_bb = bb;
2461
2462 current_sched_info->queue_must_finish_empty
2463 = current_nr_blocks > 1 && !flag_schedule_interblock;
2464
2465 schedule_block (b, rgn_n_insns);
2466 sched_rgn_n_insns += sched_n_insns;
2467
2468 /* Update target block boundaries. */
2469 if (head == BB_HEAD (BASIC_BLOCK (b)))
2470 BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
2471 if (tail == BB_END (BASIC_BLOCK (b)))
2472 BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
2473
2474 /* Clean up. */
2475 if (current_nr_blocks > 1)
2476 {
2477 free (candidate_table);
2478 free (bblst_table);
2479 free (bitlst_table);
2480 }
2481 }
2482
2483 /* Sanity check: verify that all region insns were scheduled. */
2484 if (sched_rgn_n_insns != rgn_n_insns)
2485 abort ();
2486
2487 /* Restore line notes. */
2488 if (write_symbols != NO_DEBUG)
2489 {
2490 for (bb = 0; bb < current_nr_blocks; bb++)
2491 {
2492 rtx head, tail;
2493 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2494 restore_line_notes (head, tail);
2495 }
2496 }
2497
2498 /* Done with this region. */
2499 free_pending_lists ();
2500
2501 finish_deps_global ();
2502
2503 free (bb_deps);
2504
2505 if (current_nr_blocks > 1)
2506 {
2507 free (prob);
2508 sbitmap_vector_free (dom);
2509 sbitmap_vector_free (pot_split);
2510 sbitmap_vector_free (ancestor_edges);
2511 free (edge_to_bit);
2512 free (rgn_edges);
2513 }
2514 }
2515
2516 /* Indexed by region, holds the number of death notes found in that region.
2517 Used for consistency checks. */
2518 static int *deaths_in_region;
2519
2520 /* Initialize data structures for region scheduling. */
2521
2522 static void
2523 init_regions (void)
2524 {
2525 sbitmap blocks;
2526 int rgn;
2527
2528 nr_regions = 0;
2529 rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
2530 rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
2531 block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
2532 containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
2533
2534 /* Compute regions for scheduling. */
2535 if (reload_completed
2536 || n_basic_blocks == 1
2537 || !flag_schedule_interblock)
2538 {
2539 find_single_block_region ();
2540 }
2541 else
2542 {
2543 /* Verify that a 'good' control flow graph can be built. */
2544 if (is_cfg_nonregular ())
2545 {
2546 find_single_block_region ();
2547 }
2548 else
2549 {
2550 struct edge_list *edge_list;
2551
2552 /* The scheduler runs after estimate_probabilities; therefore, we
2553 can't blindly call back into find_basic_blocks since doing so
2554 could invalidate the branch probability info. We could,
2555 however, call cleanup_cfg. */
2556 edge_list = create_edge_list ();
2557
2558 /* Compute the dominators and post dominators. */
2559 calculate_dominance_info (CDI_DOMINATORS);
2560
2561 /* build_control_flow will return nonzero if it detects unreachable
2562 blocks or any other irregularity with the cfg which prevents
2563 cross block scheduling. */
2564 if (build_control_flow (edge_list) != 0)
2565 find_single_block_region ();
2566 else
2567 find_rgns (edge_list);
2568
2569 if (sched_verbose >= 3)
2570 debug_regions ();
2571
2572 /* We are done with flow's edge list. */
2573 free_edge_list (edge_list);
2574
2575 /* For now. This will move as more and more of haifa is converted
2576 to using the cfg code in flow.c. */
2577 free_dominance_info (CDI_DOMINATORS);
2578 }
2579 }
2580
2581
2582 if (CHECK_DEAD_NOTES)
2583 {
2584 blocks = sbitmap_alloc (last_basic_block);
2585 deaths_in_region = xmalloc (sizeof (int) * nr_regions);
2586 /* Remove all death notes from the subroutine. */
2587 for (rgn = 0; rgn < nr_regions; rgn++)
2588 {
2589 int b;
2590
2591 sbitmap_zero (blocks);
2592 for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
2593 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
2594
2595 deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
2596 }
2597 sbitmap_free (blocks);
2598 }
2599 else
2600 count_or_remove_death_notes (NULL, 1);
2601 }
2602
2603 /* The one entry point in this file. DUMP_FILE is the dump file for
2604 this pass. */
2605
2606 void
2607 schedule_insns (FILE *dump_file)
2608 {
2609 sbitmap large_region_blocks, blocks;
2610 int rgn;
2611 int any_large_regions;
2612 basic_block bb;
2613
2614 /* Taking care of this degenerate case makes the rest of
2615 this code simpler. */
2616 if (n_basic_blocks == 0)
2617 return;
2618
2619 nr_inter = 0;
2620 nr_spec = 0;
2621
2622 sched_init (dump_file);
2623
2624 init_regions ();
2625
2626 current_sched_info = &region_sched_info;
2627
2628 /* Schedule every region in the subroutine. */
2629 for (rgn = 0; rgn < nr_regions; rgn++)
2630 schedule_region (rgn);
2631
2632 /* Update life analysis for the subroutine. Do single block regions
2633 first so that we can verify that live_at_start didn't change. Then
2634 do all other blocks. */
2635 /* ??? There is an outside possibility that update_life_info, or more
2636 to the point propagate_block, could get called with nonzero flags
2637 more than once for one basic block. This would be kinda bad if it
2638 were to happen, since REG_INFO would be accumulated twice for the
2639 block, and we'd have twice the REG_DEAD notes.
2640
2641 I'm fairly certain that this _shouldn't_ happen, since I don't think
2642 that live_at_start should change at region heads. Not sure what the
2643 best way to test for this kind of thing... */
2644
2645 allocate_reg_life_data ();
2646 compute_bb_for_insn ();
2647
2648 any_large_regions = 0;
2649 large_region_blocks = sbitmap_alloc (last_basic_block);
2650 sbitmap_zero (large_region_blocks);
2651 FOR_EACH_BB (bb)
2652 SET_BIT (large_region_blocks, bb->index);
2653
2654 blocks = sbitmap_alloc (last_basic_block);
2655 sbitmap_zero (blocks);
2656
2657 /* Update life information. For regions consisting of multiple blocks
2658 we've possibly done interblock scheduling that affects global liveness.
2659 For regions consisting of single blocks we need to do only local
2660 liveness. */
2661 for (rgn = 0; rgn < nr_regions; rgn++)
2662 if (RGN_NR_BLOCKS (rgn) > 1)
2663 any_large_regions = 1;
2664 else
2665 {
2666 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2667 RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2668 }
2669
2670 /* Don't update reg info after reload, since that affects
2671 regs_ever_live, which should not change after reload. */
2672 update_life_info (blocks, UPDATE_LIFE_LOCAL,
2673 (reload_completed ? PROP_DEATH_NOTES
2674 : PROP_DEATH_NOTES | PROP_REG_INFO));
2675 if (any_large_regions)
2676 {
2677 update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
2678 PROP_DEATH_NOTES | PROP_REG_INFO);
2679 }
2680
2681 if (CHECK_DEAD_NOTES)
2682 {
2683 /* Verify the counts of basic block notes in single the basic block
2684 regions. */
2685 for (rgn = 0; rgn < nr_regions; rgn++)
2686 if (RGN_NR_BLOCKS (rgn) == 1)
2687 {
2688 sbitmap_zero (blocks);
2689 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2690
2691 if (deaths_in_region[rgn]
2692 != count_or_remove_death_notes (blocks, 0))
2693 abort ();
2694 }
2695 free (deaths_in_region);
2696 }
2697
2698 /* Reposition the prologue and epilogue notes in case we moved the
2699 prologue/epilogue insns. */
2700 if (reload_completed)
2701 reposition_prologue_and_epilogue_notes (get_insns ());
2702
2703 /* Delete redundant line notes. */
2704 if (write_symbols != NO_DEBUG)
2705 rm_redundant_line_notes ();
2706
2707 if (sched_verbose)
2708 {
2709 if (reload_completed == 0 && flag_schedule_interblock)
2710 {
2711 fprintf (sched_dump,
2712 "\n;; Procedure interblock/speculative motions == %d/%d \n",
2713 nr_inter, nr_spec);
2714 }
2715 else
2716 {
2717 if (nr_inter > 0)
2718 abort ();
2719 }
2720 fprintf (sched_dump, "\n\n");
2721 }
2722
2723 /* Clean up. */
2724 free (rgn_table);
2725 free (rgn_bb_table);
2726 free (block_to_bb);
2727 free (containing_rgn);
2728
2729 sched_finish ();
2730
2731 if (edge_table)
2732 {
2733 free (edge_table);
2734 edge_table = NULL;
2735 }
2736
2737 if (in_edges)
2738 {
2739 free (in_edges);
2740 in_edges = NULL;
2741 }
2742 if (out_edges)
2743 {
2744 free (out_edges);
2745 out_edges = NULL;
2746 }
2747
2748 sbitmap_free (blocks);
2749 sbitmap_free (large_region_blocks);
2750 }
2751 #endif