lcm.c: Include df.h.
[gcc.git] / gcc / lcm.c
1 /* Generic partial redundancy elimination with lazy code motion support.
2 Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 /* These routines are meant to be used by various optimization
22 passes which can be modeled as lazy code motion problems.
23 Including, but not limited to:
24
25 * Traditional partial redundancy elimination.
26
27 * Placement of caller/caller register save/restores.
28
29 * Load/store motion.
30
31 * Copy motion.
32
33 * Conversion of flat register files to a stacked register
34 model.
35
36 * Dead load/store elimination.
37
38 These routines accept as input:
39
40 * Basic block information (number of blocks, lists of
41 predecessors and successors). Note the granularity
42 does not need to be basic block, they could be statements
43 or functions.
44
45 * Bitmaps of local properties (computed, transparent and
46 anticipatable expressions).
47
48 The output of these routines is bitmap of redundant computations
49 and a bitmap of optimal placement points. */
50
51
52 #include "config.h"
53 #include "system.h"
54 #include "rtl.h"
55 #include "regs.h"
56 #include "hard-reg-set.h"
57 #include "flags.h"
58 #include "real.h"
59 #include "insn-config.h"
60 #include "recog.h"
61 #include "basic-block.h"
62 #include "tm_p.h"
63 #include "df.h"
64
65 /* We want target macros for the mode switching code to be able to refer
66 to instruction attribute values. */
67 #include "insn-attr.h"
68
69 /* Edge based LCM routines. */
70 static void compute_antinout_edge PARAMS ((sbitmap *, sbitmap *,
71 sbitmap *, sbitmap *));
72 static void compute_earliest PARAMS ((struct edge_list *, int,
73 sbitmap *, sbitmap *,
74 sbitmap *, sbitmap *,
75 sbitmap *));
76 static void compute_laterin PARAMS ((struct edge_list *, sbitmap *,
77 sbitmap *, sbitmap *,
78 sbitmap *));
79 static void compute_insert_delete PARAMS ((struct edge_list *edge_list,
80 sbitmap *, sbitmap *,
81 sbitmap *, sbitmap *,
82 sbitmap *));
83
84 /* Edge based LCM routines on a reverse flowgraph. */
85 static void compute_farthest PARAMS ((struct edge_list *, int,
86 sbitmap *, sbitmap *,
87 sbitmap*, sbitmap *,
88 sbitmap *));
89 static void compute_nearerout PARAMS ((struct edge_list *, sbitmap *,
90 sbitmap *, sbitmap *,
91 sbitmap *));
92 static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list,
93 sbitmap *, sbitmap *,
94 sbitmap *, sbitmap *,
95 sbitmap *));
96
97 static void available_transfer_function PARAMS ((int, int *, sbitmap, sbitmap,
98 sbitmap, sbitmap, void *));
99 \f
100 /* Edge based lcm routines. */
101 /* Compute expression anticipatability at entrance and exit of each block.
102 This is done based on the flow graph, and not on the pred-succ lists.
103 Other than that, its pretty much identical to compute_antinout. */
104
105 static void
106 compute_antinout_edge (antloc, transp, antin, antout)
107 sbitmap *antloc;
108 sbitmap *transp;
109 sbitmap *antin;
110 sbitmap *antout;
111 {
112 int bb;
113 edge e;
114 basic_block *worklist, *qin, *qout, *qend;
115 unsigned int qlen;
116 /* Allocate a worklist array/queue. Entries are only added to the
117 list if they were not already on the list. So the size is
118 bounded by the number of basic blocks. */
119 qin = qout = worklist
120 = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
121
122 /* We want a maximal solution, so make an optimistic initialization of
123 ANTIN. */
124 sbitmap_vector_ones (antin, n_basic_blocks);
125
126 /* Put every block on the worklist; this is necessary because of the
127 optimistic initialization of ANTIN above. */
128 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
129 {
130 *qin++ = BASIC_BLOCK (bb);
131 BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
132 }
133
134 qin = worklist;
135 qend = &worklist[n_basic_blocks];
136 qlen = n_basic_blocks;
137
138 /* Mark blocks which are predecessors of the exit block so that we
139 can easily identify them below. */
140 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
141 e->src->aux = EXIT_BLOCK_PTR;
142
143 /* Iterate until the worklist is empty. */
144 while (qlen)
145 {
146 /* Take the first entry off the worklist. */
147 basic_block b = *qout++;
148 bb = b->index;
149 qlen--;
150 if (qout >= qend)
151 qout = worklist;
152
153 if (b->aux == EXIT_BLOCK_PTR)
154 /* Do not clear the aux field for blocks which are predecessors of
155 the EXIT block. That way we never add then to the worklist
156 again. */
157 sbitmap_zero (antout[bb]);
158 else
159 {
160 /* Clear the aux field of this block so that it can be added to
161 the worklist again if necessary. */
162 b->aux = NULL;
163 sbitmap_intersection_of_succs (antout[bb], antin, bb);
164 }
165
166 if (sbitmap_a_or_b_and_c (antin[bb], antloc[bb], transp[bb], antout[bb]))
167 /* If the in state of this block changed, then we need
168 to add the predecessors of this block to the worklist
169 if they are not already on the worklist. */
170 for (e = b->pred; e; e = e->pred_next)
171 if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
172 {
173 *qin++ = e->src;
174 e->src->aux = e;
175 qlen++;
176 if (qin >= qend)
177 qin = worklist;
178 }
179 }
180
181 clear_aux_for_edges ();
182 clear_aux_for_blocks ();
183 free (worklist);
184 }
185
186 /* Compute the earliest vector for edge based lcm. */
187
188 static void
189 compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
190 struct edge_list *edge_list;
191 int n_exprs;
192 sbitmap *antin, *antout, *avout, *kill, *earliest;
193 {
194 sbitmap difference, temp_bitmap;
195 int x, num_edges;
196 basic_block pred, succ;
197
198 num_edges = NUM_EDGES (edge_list);
199
200 difference = sbitmap_alloc (n_exprs);
201 temp_bitmap = sbitmap_alloc (n_exprs);
202
203 for (x = 0; x < num_edges; x++)
204 {
205 pred = INDEX_EDGE_PRED_BB (edge_list, x);
206 succ = INDEX_EDGE_SUCC_BB (edge_list, x);
207 if (pred == ENTRY_BLOCK_PTR)
208 sbitmap_copy (earliest[x], antin[succ->index]);
209 else
210 {
211 /* We refer to the EXIT_BLOCK index, instead of testing for
212 EXIT_BLOCK_PTR, so that EXIT_BLOCK_PTR's index can be
213 changed so as to pretend it's a regular block, so that
214 its antin can be taken into account. */
215 if (succ->index == EXIT_BLOCK)
216 sbitmap_zero (earliest[x]);
217 else
218 {
219 sbitmap_difference (difference, antin[succ->index],
220 avout[pred->index]);
221 sbitmap_not (temp_bitmap, antout[pred->index]);
222 sbitmap_a_and_b_or_c (earliest[x], difference,
223 kill[pred->index], temp_bitmap);
224 }
225 }
226 }
227
228 free (temp_bitmap);
229 free (difference);
230 }
231
232 /* later(p,s) is dependent on the calculation of laterin(p).
233 laterin(p) is dependent on the calculation of later(p2,p).
234
235 laterin(ENTRY) is defined as all 0's
236 later(ENTRY, succs(ENTRY)) are defined using laterin(ENTRY)
237 laterin(succs(ENTRY)) is defined by later(ENTRY, succs(ENTRY)).
238
239 If we progress in this manner, starting with all basic blocks
240 in the work list, anytime we change later(bb), we need to add
241 succs(bb) to the worklist if they are not already on the worklist.
242
243 Boundary conditions:
244
245 We prime the worklist all the normal basic blocks. The ENTRY block can
246 never be added to the worklist since it is never the successor of any
247 block. We explicitly prevent the EXIT block from being added to the
248 worklist.
249
250 We optimistically initialize LATER. That is the only time this routine
251 will compute LATER for an edge out of the entry block since the entry
252 block is never on the worklist. Thus, LATERIN is neither used nor
253 computed for the ENTRY block.
254
255 Since the EXIT block is never added to the worklist, we will neither
256 use nor compute LATERIN for the exit block. Edges which reach the
257 EXIT block are handled in the normal fashion inside the loop. However,
258 the insertion/deletion computation needs LATERIN(EXIT), so we have
259 to compute it. */
260
261 static void
262 compute_laterin (edge_list, earliest, antloc, later, laterin)
263 struct edge_list *edge_list;
264 sbitmap *earliest, *antloc, *later, *laterin;
265 {
266 int bb, num_edges, i;
267 edge e;
268 basic_block *worklist, *qin, *qout, *qend;
269 unsigned int qlen;
270
271 num_edges = NUM_EDGES (edge_list);
272
273 /* Allocate a worklist array/queue. Entries are only added to the
274 list if they were not already on the list. So the size is
275 bounded by the number of basic blocks. */
276 qin = qout = worklist
277 = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
278
279 /* Initialize a mapping from each edge to its index. */
280 for (i = 0; i < num_edges; i++)
281 INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i;
282
283 /* We want a maximal solution, so initially consider LATER true for
284 all edges. This allows propagation through a loop since the incoming
285 loop edge will have LATER set, so if all the other incoming edges
286 to the loop are set, then LATERIN will be set for the head of the
287 loop.
288
289 If the optimistic setting of LATER on that edge was incorrect (for
290 example the expression is ANTLOC in a block within the loop) then
291 this algorithm will detect it when we process the block at the head
292 of the optimistic edge. That will requeue the affected blocks. */
293 sbitmap_vector_ones (later, num_edges);
294
295 /* Note that even though we want an optimistic setting of LATER, we
296 do not want to be overly optimistic. Consider an outgoing edge from
297 the entry block. That edge should always have a LATER value the
298 same as EARLIEST for that edge. */
299 for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
300 sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]);
301
302 /* Add all the blocks to the worklist. This prevents an early exit from
303 the loop given our optimistic initialization of LATER above. */
304 for (bb = 0; bb < n_basic_blocks; bb++)
305 {
306 basic_block b = BASIC_BLOCK (bb);
307 *qin++ = b;
308 b->aux = b;
309 }
310 qin = worklist;
311 /* Note that we do not use the last allocated element for our queue,
312 as EXIT_BLOCK is never inserted into it. In fact the above allocation
313 of n_basic_blocks + 1 elements is not encessary. */
314 qend = &worklist[n_basic_blocks];
315 qlen = n_basic_blocks;
316
317 /* Iterate until the worklist is empty. */
318 while (qlen)
319 {
320 /* Take the first entry off the worklist. */
321 basic_block b = *qout++;
322 b->aux = NULL;
323 qlen--;
324 if (qout >= qend)
325 qout = worklist;
326
327 /* Compute the intersection of LATERIN for each incoming edge to B. */
328 bb = b->index;
329 sbitmap_ones (laterin[bb]);
330 for (e = b->pred; e != NULL; e = e->pred_next)
331 sbitmap_a_and_b (laterin[bb], laterin[bb], later[(size_t)e->aux]);
332
333 /* Calculate LATER for all outgoing edges. */
334 for (e = b->succ; e != NULL; e = e->succ_next)
335 if (sbitmap_union_of_diff (later[(size_t) e->aux],
336 earliest[(size_t) e->aux],
337 laterin[e->src->index],
338 antloc[e->src->index])
339 /* If LATER for an outgoing edge was changed, then we need
340 to add the target of the outgoing edge to the worklist. */
341 && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
342 {
343 *qin++ = e->dest;
344 e->dest->aux = e;
345 qlen++;
346 if (qin >= qend)
347 qin = worklist;
348 }
349 }
350
351 /* Computation of insertion and deletion points requires computing LATERIN
352 for the EXIT block. We allocated an extra entry in the LATERIN array
353 for just this purpose. */
354 sbitmap_ones (laterin[n_basic_blocks]);
355 for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
356 sbitmap_a_and_b (laterin[n_basic_blocks],
357 laterin[n_basic_blocks],
358 later[(size_t) e->aux]);
359
360 clear_aux_for_edges ();
361 free (worklist);
362 }
363
364 /* Compute the insertion and deletion points for edge based LCM. */
365
366 static void
367 compute_insert_delete (edge_list, antloc, later, laterin,
368 insert, delete)
369 struct edge_list *edge_list;
370 sbitmap *antloc, *later, *laterin, *insert, *delete;
371 {
372 int x;
373
374 for (x = 0; x < n_basic_blocks; x++)
375 sbitmap_difference (delete[x], antloc[x], laterin[x]);
376
377 for (x = 0; x < NUM_EDGES (edge_list); x++)
378 {
379 basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
380
381 if (b == EXIT_BLOCK_PTR)
382 sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
383 else
384 sbitmap_difference (insert[x], later[x], laterin[b->index]);
385 }
386 }
387
388 /* Given local properties TRANSP, ANTLOC, AVOUT, KILL return the insert and
389 delete vectors for edge based LCM. Returns an edgelist which is used to
390 map the insert vector to what edge an expression should be inserted on. */
391
392 struct edge_list *
393 pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
394 FILE *file ATTRIBUTE_UNUSED;
395 int n_exprs;
396 sbitmap *transp;
397 sbitmap *avloc;
398 sbitmap *antloc;
399 sbitmap *kill;
400 sbitmap **insert;
401 sbitmap **delete;
402 {
403 sbitmap *antin, *antout, *earliest;
404 sbitmap *avin, *avout;
405 sbitmap *later, *laterin;
406 struct edge_list *edge_list;
407 int num_edges;
408
409 edge_list = create_edge_list ();
410 num_edges = NUM_EDGES (edge_list);
411
412 #ifdef LCM_DEBUG_INFO
413 if (file)
414 {
415 fprintf (file, "Edge List:\n");
416 verify_edge_list (file, edge_list);
417 print_edge_list (file, edge_list);
418 dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
419 dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
420 dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
421 dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
422 }
423 #endif
424
425 /* Compute global availability. */
426 avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
427 avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
428 compute_available (avloc, kill, avout, avin);
429 sbitmap_vector_free (avin);
430
431 /* Compute global anticipatability. */
432 antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
433 antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
434 compute_antinout_edge (antloc, transp, antin, antout);
435
436 #ifdef LCM_DEBUG_INFO
437 if (file)
438 {
439 dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
440 dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
441 }
442 #endif
443
444 /* Compute earliestness. */
445 earliest = sbitmap_vector_alloc (num_edges, n_exprs);
446 compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest);
447
448 #ifdef LCM_DEBUG_INFO
449 if (file)
450 dump_sbitmap_vector (file, "earliest", "", earliest, num_edges);
451 #endif
452
453 sbitmap_vector_free (antout);
454 sbitmap_vector_free (antin);
455 sbitmap_vector_free (avout);
456
457 later = sbitmap_vector_alloc (num_edges, n_exprs);
458
459 /* Allocate an extra element for the exit block in the laterin vector. */
460 laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
461 compute_laterin (edge_list, earliest, antloc, later, laterin);
462
463 #ifdef LCM_DEBUG_INFO
464 if (file)
465 {
466 dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
467 dump_sbitmap_vector (file, "later", "", later, num_edges);
468 }
469 #endif
470
471 sbitmap_vector_free (earliest);
472
473 *insert = sbitmap_vector_alloc (num_edges, n_exprs);
474 *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
475 compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
476
477 sbitmap_vector_free (laterin);
478 sbitmap_vector_free (later);
479
480 #ifdef LCM_DEBUG_INFO
481 if (file)
482 {
483 dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
484 dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
485 n_basic_blocks);
486 }
487 #endif
488
489 return edge_list;
490 }
491 /* Availability transfer function */
492 static void
493 available_transfer_function (bb, changed, in, out, gen, kill, data)
494 int bb ATTRIBUTE_UNUSED;
495 int *changed;
496 sbitmap in,out,gen,kill;
497 void *data ATTRIBUTE_UNUSED;
498 {
499 *changed = sbitmap_union_of_diff (out, gen, in, kill);
500 }
501 /* Compute the AVIN and AVOUT vectors from the AVLOC and KILL
502 vectors. */
503 void
504 compute_available (avloc, kill, avout, avin)
505 sbitmap *avloc;
506 sbitmap *kill;
507 sbitmap *avout;
508 sbitmap *avin;
509 {
510 int *dfs_order;
511 int *rc_order;
512 bitmap blocks;
513 int *inverse_rc_map;
514 int i;
515 dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
516 rc_order = xmalloc (sizeof (int) * n_basic_blocks);
517 inverse_rc_map = xmalloc (sizeof (int) * n_basic_blocks);
518 flow_depth_first_order_compute (dfs_order, rc_order);
519 blocks = BITMAP_XMALLOC ();
520 for (i = 0; i < n_basic_blocks; i ++)
521 {
522 inverse_rc_map[rc_order[i]] = i;
523 bitmap_set_bit (blocks, i);
524 }
525 sbitmap_vector_ones (avout, n_basic_blocks);
526 iterative_dataflow_sbitmap (avin, avout, avloc, kill, blocks,
527 FORWARD, INTERSECTION,
528 available_transfer_function, inverse_rc_map, 0);
529 BITMAP_XFREE (blocks);
530 free (dfs_order);
531 free (rc_order);
532 free (inverse_rc_map);
533 }
534
535 /* Compute the farthest vector for edge based lcm. */
536
537 static void
538 compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
539 kill, farthest)
540 struct edge_list *edge_list;
541 int n_exprs;
542 sbitmap *st_avout, *st_avin, *st_antin, *kill, *farthest;
543 {
544 sbitmap difference, temp_bitmap;
545 int x, num_edges;
546 basic_block pred, succ;
547
548 num_edges = NUM_EDGES (edge_list);
549
550 difference = sbitmap_alloc (n_exprs);
551 temp_bitmap = sbitmap_alloc (n_exprs);
552
553 for (x = 0; x < num_edges; x++)
554 {
555 pred = INDEX_EDGE_PRED_BB (edge_list, x);
556 succ = INDEX_EDGE_SUCC_BB (edge_list, x);
557 if (succ == EXIT_BLOCK_PTR)
558 sbitmap_copy (farthest[x], st_avout[pred->index]);
559 else
560 {
561 if (pred == ENTRY_BLOCK_PTR)
562 sbitmap_zero (farthest[x]);
563 else
564 {
565 sbitmap_difference (difference, st_avout[pred->index],
566 st_antin[succ->index]);
567 sbitmap_not (temp_bitmap, st_avin[succ->index]);
568 sbitmap_a_and_b_or_c (farthest[x], difference,
569 kill[succ->index], temp_bitmap);
570 }
571 }
572 }
573
574 free (temp_bitmap);
575 free (difference);
576 }
577
578 /* Compute nearer and nearerout vectors for edge based lcm.
579
580 This is the mirror of compute_laterin, additional comments on the
581 implementation can be found before compute_laterin. */
582
583 static void
584 compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
585 struct edge_list *edge_list;
586 sbitmap *farthest, *st_avloc, *nearer, *nearerout;
587 {
588 int bb, num_edges, i;
589 edge e;
590 basic_block *worklist, *tos;
591
592 num_edges = NUM_EDGES (edge_list);
593
594 /* Allocate a worklist array/queue. Entries are only added to the
595 list if they were not already on the list. So the size is
596 bounded by the number of basic blocks. */
597 tos = worklist
598 = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
599
600 /* Initialize NEARER for each edge and build a mapping from an edge to
601 its index. */
602 for (i = 0; i < num_edges; i++)
603 INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i;
604
605 /* We want a maximal solution. */
606 sbitmap_vector_ones (nearer, num_edges);
607
608 /* Note that even though we want an optimistic setting of NEARER, we
609 do not want to be overly optimistic. Consider an incoming edge to
610 the exit block. That edge should always have a NEARER value the
611 same as FARTHEST for that edge. */
612 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
613 sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);
614
615 /* Add all the blocks to the worklist. This prevents an early exit
616 from the loop given our optimistic initialization of NEARER. */
617 for (bb = 0; bb < n_basic_blocks; bb++)
618 {
619 basic_block b = BASIC_BLOCK (bb);
620 *tos++ = b;
621 b->aux = b;
622 }
623
624 /* Iterate until the worklist is empty. */
625 while (tos != worklist)
626 {
627 /* Take the first entry off the worklist. */
628 basic_block b = *--tos;
629 b->aux = NULL;
630
631 /* Compute the intersection of NEARER for each outgoing edge from B. */
632 bb = b->index;
633 sbitmap_ones (nearerout[bb]);
634 for (e = b->succ; e != NULL; e = e->succ_next)
635 sbitmap_a_and_b (nearerout[bb], nearerout[bb],
636 nearer[(size_t) e->aux]);
637
638 /* Calculate NEARER for all incoming edges. */
639 for (e = b->pred; e != NULL; e = e->pred_next)
640 if (sbitmap_union_of_diff (nearer[(size_t) e->aux],
641 farthest[(size_t) e->aux],
642 nearerout[e->dest->index],
643 st_avloc[e->dest->index])
644 /* If NEARER for an incoming edge was changed, then we need
645 to add the source of the incoming edge to the worklist. */
646 && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
647 {
648 *tos++ = e->src;
649 e->src->aux = e;
650 }
651 }
652
653 /* Computation of insertion and deletion points requires computing NEAREROUT
654 for the ENTRY block. We allocated an extra entry in the NEAREROUT array
655 for just this purpose. */
656 sbitmap_ones (nearerout[n_basic_blocks]);
657 for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
658 sbitmap_a_and_b (nearerout[n_basic_blocks],
659 nearerout[n_basic_blocks],
660 nearer[(size_t) e->aux]);
661
662 clear_aux_for_edges ();
663 free (tos);
664 }
665
666 /* Compute the insertion and deletion points for edge based LCM. */
667
668 static void
669 compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
670 insert, delete)
671 struct edge_list *edge_list;
672 sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
673 {
674 int x;
675
676 for (x = 0; x < n_basic_blocks; x++)
677 sbitmap_difference (delete[x], st_avloc[x], nearerout[x]);
678
679 for (x = 0; x < NUM_EDGES (edge_list); x++)
680 {
681 basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
682 if (b == ENTRY_BLOCK_PTR)
683 sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
684 else
685 sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
686 }
687 }
688
689 /* Given local properties TRANSP, ST_AVLOC, ST_ANTLOC, KILL return the
690 insert and delete vectors for edge based reverse LCM. Returns an
691 edgelist which is used to map the insert vector to what edge
692 an expression should be inserted on. */
693
694 struct edge_list *
695 pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
696 insert, delete)
697 FILE *file ATTRIBUTE_UNUSED;
698 int n_exprs;
699 sbitmap *transp;
700 sbitmap *st_avloc;
701 sbitmap *st_antloc;
702 sbitmap *kill;
703 sbitmap **insert;
704 sbitmap **delete;
705 {
706 sbitmap *st_antin, *st_antout;
707 sbitmap *st_avout, *st_avin, *farthest;
708 sbitmap *nearer, *nearerout;
709 struct edge_list *edge_list;
710 int num_edges;
711
712 edge_list = create_edge_list ();
713 num_edges = NUM_EDGES (edge_list);
714
715 st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
716 st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
717 sbitmap_vector_zero (st_antin, n_basic_blocks);
718 sbitmap_vector_zero (st_antout, n_basic_blocks);
719 compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
720
721 /* Compute global anticipatability. */
722 st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
723 st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
724 compute_available (st_avloc, kill, st_avout, st_avin);
725
726 #ifdef LCM_DEBUG_INFO
727 if (file)
728 {
729 fprintf (file, "Edge List:\n");
730 verify_edge_list (file, edge_list);
731 print_edge_list (file, edge_list);
732 dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
733 dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
734 dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
735 dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
736 dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
737 dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
738 }
739 #endif
740
741 #ifdef LCM_DEBUG_INFO
742 if (file)
743 {
744 dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
745 dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
746 }
747 #endif
748
749 /* Compute farthestness. */
750 farthest = sbitmap_vector_alloc (num_edges, n_exprs);
751 compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
752 kill, farthest);
753
754 #ifdef LCM_DEBUG_INFO
755 if (file)
756 dump_sbitmap_vector (file, "farthest", "", farthest, num_edges);
757 #endif
758
759 sbitmap_vector_free (st_antin);
760 sbitmap_vector_free (st_antout);
761
762 sbitmap_vector_free (st_avin);
763 sbitmap_vector_free (st_avout);
764
765 nearer = sbitmap_vector_alloc (num_edges, n_exprs);
766
767 /* Allocate an extra element for the entry block. */
768 nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
769 compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
770
771 #ifdef LCM_DEBUG_INFO
772 if (file)
773 {
774 dump_sbitmap_vector (file, "nearerout", "", nearerout,
775 n_basic_blocks + 1);
776 dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
777 }
778 #endif
779
780 sbitmap_vector_free (farthest);
781
782 *insert = sbitmap_vector_alloc (num_edges, n_exprs);
783 *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
784 compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
785 *insert, *delete);
786
787 sbitmap_vector_free (nearerout);
788 sbitmap_vector_free (nearer);
789
790 #ifdef LCM_DEBUG_INFO
791 if (file)
792 {
793 dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
794 dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
795 n_basic_blocks);
796 }
797 #endif
798 return edge_list;
799 }
800
801 /* Mode switching:
802
803 The algorithm for setting the modes consists of scanning the insn list
804 and finding all the insns which require a specific mode. Each insn gets
805 a unique struct seginfo element. These structures are inserted into a list
806 for each basic block. For each entity, there is an array of bb_info over
807 the flow graph basic blocks (local var 'bb_info'), and contains a list
808 of all insns within that basic block, in the order they are encountered.
809
810 For each entity, any basic block WITHOUT any insns requiring a specific
811 mode are given a single entry, without a mode. (Each basic block
812 in the flow graph must have at least one entry in the segment table.)
813
814 The LCM algorithm is then run over the flow graph to determine where to
815 place the sets to the highest-priority value in respect of first the first
816 insn in any one block. Any adjustments required to the transparancy
817 vectors are made, then the next iteration starts for the next-lower
818 priority mode, till for each entity all modes are exhasted.
819
820 More details are located in the code for optimize_mode_switching(). */
821
822 /* This structure contains the information for each insn which requires
823 either single or double mode to be set.
824 MODE is the mode this insn must be executed in.
825 INSN_PTR is the insn to be executed (may be the note that marks the
826 beginning of a basic block).
827 BBNUM is the flow graph basic block this insn occurs in.
828 NEXT is the next insn in the same basic block. */
829 struct seginfo
830 {
831 int mode;
832 rtx insn_ptr;
833 int bbnum;
834 struct seginfo *next;
835 HARD_REG_SET regs_live;
836 };
837
838 struct lcm_bb_info
839 {
840 struct seginfo *seginfo;
841 int computing;
842 };
843
844 /* These bitmaps are used for the LCM algorithm. */
845
846 #ifdef OPTIMIZE_MODE_SWITCHING
847 static sbitmap *antic;
848 static sbitmap *transp;
849 static sbitmap *comp;
850 static sbitmap *delete;
851 static sbitmap *insert;
852
853 static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET));
854 static void add_seginfo PARAMS ((struct lcm_bb_info *, struct seginfo *));
855 static void reg_dies PARAMS ((rtx, HARD_REG_SET));
856 static void reg_becomes_live PARAMS ((rtx, rtx, void *));
857 static void make_preds_opaque PARAMS ((basic_block, int));
858 #endif
859 \f
860 #ifdef OPTIMIZE_MODE_SWITCHING
861
862 /* This function will allocate a new BBINFO structure, initialized
863 with the MODE, INSN, and basic block BB parameters. */
864
865 static struct seginfo *
866 new_seginfo (mode, insn, bb, regs_live)
867 int mode;
868 rtx insn;
869 int bb;
870 HARD_REG_SET regs_live;
871 {
872 struct seginfo *ptr;
873 ptr = xmalloc (sizeof (struct seginfo));
874 ptr->mode = mode;
875 ptr->insn_ptr = insn;
876 ptr->bbnum = bb;
877 ptr->next = NULL;
878 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
879 return ptr;
880 }
881
882 /* Add a seginfo element to the end of a list.
883 HEAD is a pointer to the list beginning.
884 INFO is the structure to be linked in. */
885
886 static void
887 add_seginfo (head, info)
888 struct lcm_bb_info *head;
889 struct seginfo *info;
890 {
891 struct seginfo *ptr;
892
893 if (head->seginfo == NULL)
894 head->seginfo = info;
895 else
896 {
897 ptr = head->seginfo;
898 while (ptr->next != NULL)
899 ptr = ptr->next;
900 ptr->next = info;
901 }
902 }
903
904 /* Make all predecessors of basic block B opaque, recursively, till we hit
905 some that are already non-transparent, or an edge where aux is set; that
906 denotes that a mode set is to be done on that edge.
907 J is the bit number in the bitmaps that corresponds to the entity that
908 we are currently handling mode-switching for. */
909
910 static void
911 make_preds_opaque (b, j)
912 basic_block b;
913 int j;
914 {
915 edge e;
916
917 for (e = b->pred; e; e = e->pred_next)
918 {
919 basic_block pb = e->src;
920
921 if (e->aux || ! TEST_BIT (transp[pb->index], j))
922 continue;
923
924 RESET_BIT (transp[pb->index], j);
925 make_preds_opaque (pb, j);
926 }
927 }
928
929 /* Record in LIVE that register REG died. */
930
931 static void
932 reg_dies (reg, live)
933 rtx reg;
934 HARD_REG_SET live;
935 {
936 int regno, nregs;
937
938 if (GET_CODE (reg) != REG)
939 return;
940
941 regno = REGNO (reg);
942 if (regno < FIRST_PSEUDO_REGISTER)
943 for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
944 nregs--)
945 CLEAR_HARD_REG_BIT (live, regno + nregs);
946 }
947
948 /* Record in LIVE that register REG became live.
949 This is called via note_stores. */
950
951 static void
952 reg_becomes_live (reg, setter, live)
953 rtx reg;
954 rtx setter ATTRIBUTE_UNUSED;
955 void *live;
956 {
957 int regno, nregs;
958
959 if (GET_CODE (reg) == SUBREG)
960 reg = SUBREG_REG (reg);
961
962 if (GET_CODE (reg) != REG)
963 return;
964
965 regno = REGNO (reg);
966 if (regno < FIRST_PSEUDO_REGISTER)
967 for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
968 nregs--)
969 SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs);
970 }
971
972 /* Find all insns that need a particular mode setting, and insert the
973 necessary mode switches. Return true if we did work. */
974
975 int
976 optimize_mode_switching (file)
977 FILE *file;
978 {
979 rtx insn;
980 int bb, e;
981 int need_commit = 0;
982 sbitmap *kill;
983 struct edge_list *edge_list;
984 static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
985 #define N_ENTITIES (sizeof num_modes / sizeof (int))
986 int entity_map[N_ENTITIES];
987 struct lcm_bb_info *bb_info[N_ENTITIES];
988 int i, j;
989 int n_entities;
990 int max_num_modes = 0;
991 bool emited = false;
992
993 #ifdef NORMAL_MODE
994 /* Increment n_basic_blocks before allocating bb_info. */
995 n_basic_blocks++;
996 #endif
997
998 for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
999 if (OPTIMIZE_MODE_SWITCHING (e))
1000 {
1001 /* Create the list of segments within each basic block. */
1002 bb_info[n_entities]
1003 = (struct lcm_bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
1004 entity_map[n_entities++] = e;
1005 if (num_modes[e] > max_num_modes)
1006 max_num_modes = num_modes[e];
1007 }
1008
1009 #ifdef NORMAL_MODE
1010 /* Decrement it back in case we return below. */
1011 n_basic_blocks--;
1012 #endif
1013
1014 if (! n_entities)
1015 return 0;
1016
1017 #ifdef NORMAL_MODE
1018 /* We're going to pretend the EXIT_BLOCK is a regular basic block,
1019 so that switching back to normal mode when entering the
1020 EXIT_BLOCK isn't optimized away. We do this by incrementing the
1021 basic block count, growing the VARRAY of basic_block_info and
1022 appending the EXIT_BLOCK_PTR to it. */
1023 n_basic_blocks++;
1024 if (VARRAY_SIZE (basic_block_info) < n_basic_blocks)
1025 VARRAY_GROW (basic_block_info, n_basic_blocks);
1026 BASIC_BLOCK (n_basic_blocks - 1) = EXIT_BLOCK_PTR;
1027 EXIT_BLOCK_PTR->index = n_basic_blocks - 1;
1028 #endif
1029
1030 /* Create the bitmap vectors. */
1031
1032 antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1033 transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1034 comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1035
1036 sbitmap_vector_ones (transp, n_basic_blocks);
1037
1038 for (j = n_entities - 1; j >= 0; j--)
1039 {
1040 int e = entity_map[j];
1041 int no_mode = num_modes[e];
1042 struct lcm_bb_info *info = bb_info[j];
1043
1044 /* Determine what the first use (if any) need for a mode of entity E is.
1045 This will be the mode that is anticipatable for this block.
1046 Also compute the initial transparency settings. */
1047 for (bb = 0 ; bb < n_basic_blocks; bb++)
1048 {
1049 struct seginfo *ptr;
1050 int last_mode = no_mode;
1051 HARD_REG_SET live_now;
1052
1053 REG_SET_TO_HARD_REG_SET (live_now,
1054 BASIC_BLOCK (bb)->global_live_at_start);
1055 for (insn = BLOCK_HEAD (bb);
1056 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
1057 insn = NEXT_INSN (insn))
1058 {
1059 if (INSN_P (insn))
1060 {
1061 int mode = MODE_NEEDED (e, insn);
1062 rtx link;
1063
1064 if (mode != no_mode && mode != last_mode)
1065 {
1066 last_mode = mode;
1067 ptr = new_seginfo (mode, insn, bb, live_now);
1068 add_seginfo (info + bb, ptr);
1069 RESET_BIT (transp[bb], j);
1070 }
1071
1072 /* Update LIVE_NOW. */
1073 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1074 if (REG_NOTE_KIND (link) == REG_DEAD)
1075 reg_dies (XEXP (link, 0), live_now);
1076
1077 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
1078 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1079 if (REG_NOTE_KIND (link) == REG_UNUSED)
1080 reg_dies (XEXP (link, 0), live_now);
1081 }
1082 }
1083
1084 info[bb].computing = last_mode;
1085 /* Check for blocks without ANY mode requirements. */
1086 if (last_mode == no_mode)
1087 {
1088 ptr = new_seginfo (no_mode, insn, bb, live_now);
1089 add_seginfo (info + bb, ptr);
1090 }
1091 }
1092 #ifdef NORMAL_MODE
1093 {
1094 int mode = NORMAL_MODE (e);
1095
1096 if (mode != no_mode)
1097 {
1098 edge eg;
1099
1100 for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next)
1101 {
1102 bb = eg->dest->index;
1103
1104 /* By always making this nontransparent, we save
1105 an extra check in make_preds_opaque. We also
1106 need this to avoid confusing pre_edge_lcm when
1107 antic is cleared but transp and comp are set. */
1108 RESET_BIT (transp[bb], j);
1109
1110 /* If the block already has MODE, pretend it
1111 has none (because we don't need to set it),
1112 but retain whatever mode it computes. */
1113 if (info[bb].seginfo->mode == mode)
1114 info[bb].seginfo->mode = no_mode;
1115
1116 /* Insert a fake computing definition of MODE into entry
1117 blocks which compute no mode. This represents the mode on
1118 entry. */
1119 else if (info[bb].computing == no_mode)
1120 {
1121 info[bb].computing = mode;
1122 info[bb].seginfo->mode = no_mode;
1123 }
1124 }
1125
1126 bb = n_basic_blocks - 1;
1127 info[bb].seginfo->mode = mode;
1128 }
1129 }
1130 #endif /* NORMAL_MODE */
1131 }
1132
1133 kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1134 for (i = 0; i < max_num_modes; i++)
1135 {
1136 int current_mode[N_ENTITIES];
1137
1138 /* Set the anticipatable and computing arrays. */
1139 sbitmap_vector_zero (antic, n_basic_blocks);
1140 sbitmap_vector_zero (comp, n_basic_blocks);
1141 for (j = n_entities - 1; j >= 0; j--)
1142 {
1143 int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
1144 struct lcm_bb_info *info = bb_info[j];
1145
1146 for (bb = 0 ; bb < n_basic_blocks; bb++)
1147 {
1148 if (info[bb].seginfo->mode == m)
1149 SET_BIT (antic[bb], j);
1150
1151 if (info[bb].computing == m)
1152 SET_BIT (comp[bb], j);
1153 }
1154 }
1155
1156 /* Calculate the optimal locations for the
1157 placement mode switches to modes with priority I. */
1158
1159 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1160 sbitmap_not (kill[bb], transp[bb]);
1161 edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
1162 kill, &insert, &delete);
1163
1164 for (j = n_entities - 1; j >= 0; j--)
1165 {
1166 /* Insert all mode sets that have been inserted by lcm. */
1167 int no_mode = num_modes[entity_map[j]];
1168
1169 /* Wherever we have moved a mode setting upwards in the flow graph,
1170 the blocks between the new setting site and the now redundant
1171 computation ceases to be transparent for any lower-priority
1172 mode of the same entity. First set the aux field of each
1173 insertion site edge non-transparent, then propagate the new
1174 non-transparency from the redundant computation upwards till
1175 we hit an insertion site or an already non-transparent block. */
1176 for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
1177 {
1178 edge eg = INDEX_EDGE (edge_list, e);
1179 int mode;
1180 basic_block src_bb;
1181 HARD_REG_SET live_at_edge;
1182 rtx mode_set;
1183
1184 eg->aux = 0;
1185
1186 if (! TEST_BIT (insert[e], j))
1187 continue;
1188
1189 eg->aux = (void *)1;
1190
1191 mode = current_mode[j];
1192 src_bb = eg->src;
1193
1194 REG_SET_TO_HARD_REG_SET (live_at_edge,
1195 src_bb->global_live_at_end);
1196
1197 start_sequence ();
1198 EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
1199 mode_set = gen_sequence ();
1200 end_sequence ();
1201
1202 /* Do not bother to insert empty sequence. */
1203 if (GET_CODE (mode_set) == SEQUENCE
1204 && !XVECLEN (mode_set, 0))
1205 continue;
1206
1207 /* If this is an abnormal edge, we'll insert at the end
1208 of the previous block. */
1209 if (eg->flags & EDGE_ABNORMAL)
1210 {
1211 emited = true;
1212 if (GET_CODE (src_bb->end) == JUMP_INSN)
1213 emit_insn_before (mode_set, src_bb->end);
1214 /* It doesn't make sense to switch to normal mode
1215 after a CALL_INSN, so we're going to abort if we
1216 find one. The cases in which a CALL_INSN may
1217 have an abnormal edge are sibcalls and EH edges.
1218 In the case of sibcalls, the dest basic-block is
1219 the EXIT_BLOCK, that runs in normal mode; it is
1220 assumed that a sibcall insn requires normal mode
1221 itself, so no mode switch would be required after
1222 the call (it wouldn't make sense, anyway). In
1223 the case of EH edges, EH entry points also start
1224 in normal mode, so a similar reasoning applies. */
1225 else if (GET_CODE (src_bb->end) == INSN)
1226 emit_insn_after (mode_set, src_bb->end);
1227 else
1228 abort ();
1229 bb_info[j][src_bb->index].computing = mode;
1230 RESET_BIT (transp[src_bb->index], j);
1231 }
1232 else
1233 {
1234 need_commit = 1;
1235 insert_insn_on_edge (mode_set, eg);
1236 }
1237 }
1238
1239 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1240 if (TEST_BIT (delete[bb], j))
1241 {
1242 make_preds_opaque (BASIC_BLOCK (bb), j);
1243 /* Cancel the 'deleted' mode set. */
1244 bb_info[j][bb].seginfo->mode = no_mode;
1245 }
1246 }
1247
1248 clear_aux_for_edges ();
1249 free_edge_list (edge_list);
1250 }
1251
1252 #ifdef NORMAL_MODE
1253 /* Restore the special status of EXIT_BLOCK. */
1254 n_basic_blocks--;
1255 VARRAY_POP (basic_block_info);
1256 EXIT_BLOCK_PTR->index = EXIT_BLOCK;
1257 #endif
1258
1259 /* Now output the remaining mode sets in all the segments. */
1260 for (j = n_entities - 1; j >= 0; j--)
1261 {
1262 int no_mode = num_modes[entity_map[j]];
1263
1264 #ifdef NORMAL_MODE
1265 if (bb_info[j][n_basic_blocks].seginfo->mode != no_mode)
1266 {
1267 edge eg;
1268 struct seginfo *ptr = bb_info[j][n_basic_blocks].seginfo;
1269
1270 for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
1271 {
1272 rtx mode_set;
1273
1274 if (bb_info[j][eg->src->index].computing == ptr->mode)
1275 continue;
1276
1277 start_sequence ();
1278 EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
1279 mode_set = gen_sequence ();
1280 end_sequence ();
1281
1282 /* Do not bother to insert empty sequence. */
1283 if (GET_CODE (mode_set) == SEQUENCE
1284 && !XVECLEN (mode_set, 0))
1285 continue;
1286
1287 /* If this is an abnormal edge, we'll insert at the end of the
1288 previous block. */
1289 if (eg->flags & EDGE_ABNORMAL)
1290 {
1291 emited = true;
1292 if (GET_CODE (eg->src->end) == JUMP_INSN)
1293 emit_insn_before (mode_set, eg->src->end);
1294 else if (GET_CODE (eg->src->end) == INSN)
1295 emit_insn_after (mode_set, eg->src->end);
1296 else
1297 abort ();
1298 }
1299 else
1300 {
1301 need_commit = 1;
1302 insert_insn_on_edge (mode_set, eg);
1303 }
1304 }
1305
1306 }
1307 #endif
1308
1309 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1310 {
1311 struct seginfo *ptr, *next;
1312 for (ptr = bb_info[j][bb].seginfo; ptr; ptr = next)
1313 {
1314 next = ptr->next;
1315 if (ptr->mode != no_mode)
1316 {
1317 rtx mode_set;
1318
1319 start_sequence ();
1320 EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
1321 mode_set = gen_sequence ();
1322 end_sequence ();
1323
1324 /* Do not bother to insert empty sequence. */
1325 if (GET_CODE (mode_set) == SEQUENCE
1326 && !XVECLEN (mode_set, 0))
1327 continue;
1328
1329 emited = true;
1330 if (GET_CODE (ptr->insn_ptr) == NOTE
1331 && (NOTE_LINE_NUMBER (ptr->insn_ptr)
1332 == NOTE_INSN_BASIC_BLOCK))
1333 emit_insn_after (mode_set, ptr->insn_ptr);
1334 else
1335 emit_insn_before (mode_set, ptr->insn_ptr);
1336 }
1337
1338 free (ptr);
1339 }
1340 }
1341
1342 free (bb_info[j]);
1343 }
1344
1345 /* Finished. Free up all the things we've allocated. */
1346
1347 sbitmap_vector_free (kill);
1348 sbitmap_vector_free (antic);
1349 sbitmap_vector_free (transp);
1350 sbitmap_vector_free (comp);
1351 sbitmap_vector_free (delete);
1352 sbitmap_vector_free (insert);
1353
1354 if (need_commit)
1355 commit_edge_insertions ();
1356
1357 if (!need_commit && !emited)
1358 return 0;
1359
1360 /* Ideally we'd figure out what blocks were affected and start from
1361 there, but this is enormously complicated by commit_edge_insertions,
1362 which would screw up any indices we'd collected, and also need to
1363 be involved in the update. Bail and recompute global life info for
1364 everything. */
1365
1366 allocate_reg_life_data ();
1367 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
1368 (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE
1369 | PROP_SCAN_DEAD_CODE | PROP_REG_INFO));
1370
1371 return 1;
1372 }
1373 #endif /* OPTIMIZE_MODE_SWITCHING */