typeck2.c (store_init_value): Don't re-digest a bracketed initializer.
[gcc.git] / gcc / df.c
1 /* Dataflow support routines.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
4 mhayes@redhat.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA.
22
23
24 OVERVIEW:
25
26 This file provides some dataflow routines for computing reaching defs,
27 upward exposed uses, live variables, def-use chains, and use-def
28 chains. The global dataflow is performed using simple iterative
29 methods with a worklist and could be sped up by ordering the blocks
30 with a depth first search order.
31
32 A `struct ref' data structure (ref) is allocated for every register
33 reference (def or use) and this records the insn and bb the ref is
34 found within. The refs are linked together in chains of uses and defs
35 for each insn and for each register. Each ref also has a chain field
36 that links all the use refs for a def or all the def refs for a use.
37 This is used to create use-def or def-use chains.
38
39
40 USAGE:
41
42 Here's an example of using the dataflow routines.
43
44 struct df *df;
45
46 df = df_init ();
47
48 df_analyse (df, 0, DF_ALL);
49
50 df_dump (df, DF_ALL, stderr);
51
52 df_finish (df);
53
54
55 df_init simply creates a poor man's object (df) that needs to be
56 passed to all the dataflow routines. df_finish destroys this
57 object and frees up any allocated memory.
58
59 df_analyse performs the following:
60
61 1. Records defs and uses by scanning the insns in each basic block
62 or by scanning the insns queued by df_insn_modify.
63 2. Links defs and uses into insn-def and insn-use chains.
64 3. Links defs and uses into reg-def and reg-use chains.
65 4. Assigns LUIDs to each insn (for modified blocks).
66 5. Calculates local reaching definitions.
67 6. Calculates global reaching definitions.
68 7. Creates use-def chains.
69 8. Calculates local reaching uses (upwards exposed uses).
70 9. Calculates global reaching uses.
71 10. Creates def-use chains.
72 11. Calculates local live registers.
73 12. Calculates global live registers.
74 13. Calculates register lifetimes and determines local registers.
75
76
77 PHILOSOPHY:
78
79 Note that the dataflow information is not updated for every newly
80 deleted or created insn. If the dataflow information requires
81 updating then all the changed, new, or deleted insns needs to be
82 marked with df_insn_modify (or df_insns_modify) either directly or
83 indirectly (say through calling df_insn_delete). df_insn_modify
84 marks all the modified insns to get processed the next time df_analyse
85 is called.
86
87 Beware that tinkering with insns may invalidate the dataflow information.
88 The philosophy behind these routines is that once the dataflow
89 information has been gathered, the user should store what they require
90 before they tinker with any insn. Once a reg is replaced, for example,
91 then the reg-def/reg-use chains will point to the wrong place. Once a
92 whole lot of changes have been made, df_analyse can be called again
93 to update the dataflow information. Currently, this is not very smart
94 with regard to propagating changes to the dataflow so it should not
95 be called very often.
96
97
98 DATA STRUCTURES:
99
100 The basic object is a REF (reference) and this may either be a DEF
101 (definition) or a USE of a register.
102
103 These are linked into a variety of lists; namely reg-def, reg-use,
104 insn-def, insn-use, def-use, and use-def lists. For example,
105 the reg-def lists contain all the refs that define a given register
106 while the insn-use lists contain all the refs used by an insn.
107
108 Note that the reg-def and reg-use chains are generally short (except for the
109 hard registers) and thus it is much faster to search these chains
110 rather than searching the def or use bitmaps.
111
112 If the insns are in SSA form then the reg-def and use-def lists
113 should only contain the single defining ref.
114
115 TODO:
116
117 1) Incremental dataflow analysis.
118
119 Note that if a loop invariant insn is hoisted (or sunk), we do not
120 need to change the def-use or use-def chains. All we have to do is to
121 change the bb field for all the associated defs and uses and to
122 renumber the LUIDs for the original and new basic blocks of the insn.
123
124 When shadowing loop mems we create new uses and defs for new pseudos
125 so we do not affect the existing dataflow information.
126
127 My current strategy is to queue up all modified, created, or deleted
128 insns so when df_analyse is called we can easily determine all the new
129 or deleted refs. Currently the global dataflow information is
130 recomputed from scratch but this could be propagated more efficiently.
131
132 2) Improved global data flow computation using depth first search.
133
134 3) Reduced memory requirements.
135
136 We could operate a pool of ref structures. When a ref is deleted it
137 gets returned to the pool (say by linking on to a chain of free refs).
138 This will require a pair of bitmaps for defs and uses so that we can
139 tell which ones have been changed. Alternatively, we could
140 periodically squeeze the def and use tables and associated bitmaps and
141 renumber the def and use ids.
142
143 4) Ordering of reg-def and reg-use lists.
144
145 Should the first entry in the def list be the first def (within a BB)?
146 Similarly, should the first entry in the use list be the last use
147 (within a BB)?
148
149 5) Working with a sub-CFG.
150
151 Often the whole CFG does not need to be analysed, for example,
152 when optimising a loop, only certain registers are of interest.
153 Perhaps there should be a bitmap argument to df_analyse to specify
154 which registers should be analysed? */
155
156 #define HANDLE_SUBREG
157
158 #include "config.h"
159 #include "system.h"
160 #include "rtl.h"
161 #include "insn-config.h"
162 #include "recog.h"
163 #include "function.h"
164 #include "regs.h"
165 #include "obstack.h"
166 #include "hard-reg-set.h"
167 #include "basic-block.h"
168 #include "bitmap.h"
169 #include "df.h"
170
171
172 #define FOR_ALL_BBS(BB, CODE) \
173 do { \
174 int node_; \
175 for (node_ = 0; node_ < n_basic_blocks; node_++) \
176 {(BB) = BASIC_BLOCK (node_); CODE;};} while (0)
177
178 #define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
179 do { \
180 unsigned int node_; \
181 EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
182 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
183
184 #define FOR_EACH_BB_IN_BITMAP_REV(BITMAP, MIN, BB, CODE) \
185 do { \
186 unsigned int node_; \
187 EXECUTE_IF_SET_IN_BITMAP_REV (BITMAP, node_, \
188 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
189
190 #define FOR_EACH_BB_IN_SBITMAP(BITMAP, MIN, BB, CODE) \
191 do { \
192 unsigned int node_; \
193 EXECUTE_IF_SET_IN_SBITMAP (BITMAP, MIN, node_, \
194 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
195
196 #define obstack_chunk_alloc xmalloc
197 #define obstack_chunk_free free
198
199 static struct obstack df_ref_obstack;
200 static struct df *ddf;
201
202 static void df_reg_table_realloc PARAMS((struct df *, int));
203 #if 0
204 static void df_def_table_realloc PARAMS((struct df *, int));
205 #endif
206 static void df_insn_table_realloc PARAMS((struct df *, int));
207 static void df_bitmaps_alloc PARAMS((struct df *, int));
208 static void df_bitmaps_free PARAMS((struct df *, int));
209 static void df_free PARAMS((struct df *));
210 static void df_alloc PARAMS((struct df *, int));
211
212 static rtx df_reg_clobber_gen PARAMS((unsigned int));
213 static rtx df_reg_use_gen PARAMS((unsigned int));
214
215 static inline struct df_link *df_link_create PARAMS((struct ref *,
216 struct df_link *));
217 static struct df_link *df_ref_unlink PARAMS((struct df_link **, struct ref *));
218 static void df_def_unlink PARAMS((struct df *, struct ref *));
219 static void df_use_unlink PARAMS((struct df *, struct ref *));
220 static void df_insn_refs_unlink PARAMS ((struct df *, basic_block, rtx));
221 #if 0
222 static void df_bb_refs_unlink PARAMS ((struct df *, basic_block));
223 static void df_refs_unlink PARAMS ((struct df *, bitmap));
224 #endif
225
226 static struct ref *df_ref_create PARAMS((struct df *,
227 rtx, rtx *, basic_block, rtx,
228 enum df_ref_type));
229 static void df_ref_record_1 PARAMS((struct df *, rtx, rtx *,
230 basic_block, rtx, enum df_ref_type));
231 static void df_ref_record PARAMS((struct df *, rtx, rtx *,
232 basic_block bb, rtx, enum df_ref_type));
233 static void df_def_record_1 PARAMS((struct df *, rtx, basic_block, rtx));
234 static void df_defs_record PARAMS((struct df *, rtx, basic_block, rtx));
235 static void df_uses_record PARAMS((struct df *, rtx *,
236 enum df_ref_type, basic_block, rtx));
237 static void df_insn_refs_record PARAMS((struct df *, basic_block, rtx));
238 static void df_bb_refs_record PARAMS((struct df *, basic_block));
239 static void df_refs_record PARAMS((struct df *, bitmap));
240
241 static int df_visit_next_rc PARAMS ((struct df *, sbitmap));
242 static int df_visit_next_rts PARAMS ((struct df *, sbitmap));
243 static void df_bb_reg_def_chain_create PARAMS((struct df *, basic_block));
244 static void df_reg_def_chain_create PARAMS((struct df *, bitmap));
245 static void df_bb_reg_use_chain_create PARAMS((struct df *, basic_block));
246 static void df_reg_use_chain_create PARAMS((struct df *, bitmap));
247 static void df_bb_du_chain_create PARAMS((struct df *, basic_block, bitmap));
248 static void df_du_chain_create PARAMS((struct df *, bitmap));
249 static void df_bb_ud_chain_create PARAMS((struct df *, basic_block));
250 static void df_ud_chain_create PARAMS((struct df *, bitmap));
251 static void df_rd_global_compute PARAMS((struct df *, bitmap));
252 static void df_ru_global_compute PARAMS((struct df *, bitmap));
253 static void df_lr_global_compute PARAMS((struct df *, bitmap));
254 static void df_bb_rd_local_compute PARAMS((struct df *, basic_block));
255 static void df_rd_local_compute PARAMS((struct df *, bitmap));
256 static void df_bb_ru_local_compute PARAMS((struct df *, basic_block));
257 static void df_ru_local_compute PARAMS((struct df *, bitmap));
258 static void df_bb_lr_local_compute PARAMS((struct df *, basic_block));
259 static void df_lr_local_compute PARAMS((struct df *, bitmap));
260 static void df_bb_reg_info_compute PARAMS((struct df *, basic_block, bitmap));
261 static void df_reg_info_compute PARAMS((struct df *, bitmap));
262
263 static int df_bb_luids_set PARAMS((struct df *df, basic_block));
264 static int df_luids_set PARAMS((struct df *df, bitmap));
265
266 static int df_modified_p PARAMS ((struct df *, bitmap));
267 static int df_refs_queue PARAMS ((struct df *));
268 static int df_refs_process PARAMS ((struct df *));
269 static int df_bb_refs_update PARAMS ((struct df *, basic_block));
270 static int df_refs_update PARAMS ((struct df *));
271 static void df_analyse_1 PARAMS((struct df *, bitmap, int, int));
272
273 static void df_insns_modify PARAMS((struct df *, basic_block,
274 rtx, rtx));
275 static int df_rtx_mem_replace PARAMS ((rtx *, void *));
276 static int df_rtx_reg_replace PARAMS ((rtx *, void *));
277 void df_refs_reg_replace PARAMS ((struct df *, bitmap,
278 struct df_link *, rtx, rtx));
279
280 static int df_def_dominates_all_uses_p PARAMS((struct df *, struct ref *def));
281 static int df_def_dominates_uses_p PARAMS((struct df *,
282 struct ref *def, bitmap));
283 static struct ref *df_bb_regno_last_use_find PARAMS((struct df *, basic_block,
284 unsigned int));
285 static struct ref *df_bb_regno_first_def_find PARAMS((struct df *, basic_block,
286 unsigned int));
287 static struct ref *df_bb_insn_regno_last_use_find PARAMS((struct df *,
288 basic_block,
289 rtx, unsigned int));
290 static struct ref *df_bb_insn_regno_first_def_find PARAMS((struct df *,
291 basic_block,
292 rtx, unsigned int));
293
294 static void df_chain_dump PARAMS((struct df_link *, FILE *file));
295 static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
296 static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
297 static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
298
299 \f
300 /* Local memory allocation/deallocation routines. */
301
302
303 /* Increase the insn info table by SIZE more elements. */
304 static void
305 df_insn_table_realloc (df, size)
306 struct df *df;
307 int size;
308 {
309 /* Make table 25 percent larger by default. */
310 if (! size)
311 size = df->insn_size / 4;
312
313 size += df->insn_size;
314
315 df->insns = (struct insn_info *)
316 xrealloc (df->insns, size * sizeof (struct insn_info));
317
318 memset (df->insns + df->insn_size, 0,
319 (size - df->insn_size) * sizeof (struct insn_info));
320
321 df->insn_size = size;
322
323 if (! df->insns_modified)
324 {
325 df->insns_modified = BITMAP_XMALLOC ();
326 bitmap_zero (df->insns_modified);
327 }
328 }
329
330
331 /* Increase the reg info table by SIZE more elements. */
332 static void
333 df_reg_table_realloc (df, size)
334 struct df *df;
335 int size;
336 {
337 /* Make table 25 percent larger by default. */
338 if (! size)
339 size = df->reg_size / 4;
340
341 size += df->reg_size;
342
343 df->regs = (struct reg_info *)
344 xrealloc (df->regs, size * sizeof (struct reg_info));
345
346 /* Zero the new entries. */
347 memset (df->regs + df->reg_size, 0,
348 (size - df->reg_size) * sizeof (struct reg_info));
349
350 df->reg_size = size;
351 }
352
353
354 #if 0
355 /* Not currently used. */
356 static void
357 df_def_table_realloc (df, size)
358 struct df *df;
359 int size;
360 {
361 int i;
362 struct ref *refs;
363
364 /* Make table 25 percent larger by default. */
365 if (! size)
366 size = df->def_size / 4;
367
368 df->def_size += size;
369 df->defs = xrealloc (df->defs,
370 df->def_size * sizeof (*df->defs));
371
372 /* Allocate a new block of memory and link into list of blocks
373 that will need to be freed later. */
374
375 refs = xmalloc (size * sizeof (*refs));
376
377 /* Link all the new refs together, overloading the chain field. */
378 for (i = 0; i < size - 1; i++)
379 refs[i].chain = (struct df_link *)(refs + i + 1);
380 refs[size - 1].chain = 0;
381 }
382 #endif
383
384
385
386 /* Allocate bitmaps for each basic block. */
387 static void
388 df_bitmaps_alloc (df, flags)
389 struct df *df;
390 int flags;
391 {
392 unsigned int i;
393 int dflags = 0;
394
395 /* Free the bitmaps if they need resizing. */
396 if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
397 dflags |= DF_LR | DF_RU;
398 if ((flags & DF_RU) && df->n_uses < df->use_id)
399 dflags |= DF_RU;
400 if ((flags & DF_RD) && df->n_defs < df->def_id)
401 dflags |= DF_RD;
402
403 if (dflags)
404 df_bitmaps_free (df, dflags);
405
406 df->n_defs = df->def_id;
407 df->n_uses = df->use_id;
408
409 for (i = 0; i < df->n_bbs; i++)
410 {
411 basic_block bb = BASIC_BLOCK (i);
412 struct bb_info *bb_info = DF_BB_INFO (df, bb);
413
414 if (flags & DF_RD && ! bb_info->rd_in)
415 {
416 /* Allocate bitmaps for reaching definitions. */
417 bb_info->rd_kill = BITMAP_XMALLOC ();
418 bitmap_zero (bb_info->rd_kill);
419 bb_info->rd_gen = BITMAP_XMALLOC ();
420 bitmap_zero (bb_info->rd_gen);
421 bb_info->rd_in = BITMAP_XMALLOC ();
422 bb_info->rd_out = BITMAP_XMALLOC ();
423 bb_info->rd_valid = 0;
424 }
425
426 if (flags & DF_RU && ! bb_info->ru_in)
427 {
428 /* Allocate bitmaps for upward exposed uses. */
429 bb_info->ru_kill = BITMAP_XMALLOC ();
430 bitmap_zero (bb_info->ru_kill);
431 /* Note the lack of symmetry. */
432 bb_info->ru_gen = BITMAP_XMALLOC ();
433 bitmap_zero (bb_info->ru_gen);
434 bb_info->ru_in = BITMAP_XMALLOC ();
435 bb_info->ru_out = BITMAP_XMALLOC ();
436 bb_info->ru_valid = 0;
437 }
438
439 if (flags & DF_LR && ! bb_info->lr_in)
440 {
441 /* Allocate bitmaps for live variables. */
442 bb_info->lr_def = BITMAP_XMALLOC ();
443 bitmap_zero (bb_info->lr_def);
444 bb_info->lr_use = BITMAP_XMALLOC ();
445 bitmap_zero (bb_info->lr_use);
446 bb_info->lr_in = BITMAP_XMALLOC ();
447 bb_info->lr_out = BITMAP_XMALLOC ();
448 bb_info->lr_valid = 0;
449 }
450 }
451 }
452
453
454 /* Free bitmaps for each basic block. */
455 static void
456 df_bitmaps_free (df, flags)
457 struct df *df ATTRIBUTE_UNUSED;
458 int flags;
459 {
460 unsigned int i;
461
462 for (i = 0; i < df->n_bbs; i++)
463 {
464 basic_block bb = BASIC_BLOCK (i);
465 struct bb_info *bb_info = DF_BB_INFO (df, bb);
466
467 if (!bb_info)
468 continue;
469
470 if ((flags & DF_RD) && bb_info->rd_in)
471 {
472 /* Free bitmaps for reaching definitions. */
473 BITMAP_XFREE (bb_info->rd_kill);
474 bb_info->rd_kill = NULL;
475 BITMAP_XFREE (bb_info->rd_gen);
476 bb_info->rd_gen = NULL;
477 BITMAP_XFREE (bb_info->rd_in);
478 bb_info->rd_in = NULL;
479 BITMAP_XFREE (bb_info->rd_out);
480 bb_info->rd_out = NULL;
481 }
482
483 if ((flags & DF_RU) && bb_info->ru_in)
484 {
485 /* Free bitmaps for upward exposed uses. */
486 BITMAP_XFREE (bb_info->ru_kill);
487 bb_info->ru_kill = NULL;
488 BITMAP_XFREE (bb_info->ru_gen);
489 bb_info->ru_gen = NULL;
490 BITMAP_XFREE (bb_info->ru_in);
491 bb_info->ru_in = NULL;
492 BITMAP_XFREE (bb_info->ru_out);
493 bb_info->ru_out = NULL;
494 }
495
496 if ((flags & DF_LR) && bb_info->lr_in)
497 {
498 /* Free bitmaps for live variables. */
499 BITMAP_XFREE (bb_info->lr_def);
500 bb_info->lr_def = NULL;
501 BITMAP_XFREE (bb_info->lr_use);
502 bb_info->lr_use = NULL;
503 BITMAP_XFREE (bb_info->lr_in);
504 bb_info->lr_in = NULL;
505 BITMAP_XFREE (bb_info->lr_out);
506 bb_info->lr_out = NULL;
507 }
508 }
509 df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR));
510 }
511
512
513 /* Allocate and initialise dataflow memory. */
514 static void
515 df_alloc (df, n_regs)
516 struct df *df;
517 int n_regs;
518 {
519 int n_insns;
520 int i;
521
522 gcc_obstack_init (&df_ref_obstack);
523
524 /* Perhaps we should use LUIDs to save memory for the insn_refs
525 table. This is only a small saving; a few pointers. */
526 n_insns = get_max_uid () + 1;
527
528 df->def_id = 0;
529 df->n_defs = 0;
530 /* Approximate number of defs by number of insns. */
531 df->def_size = n_insns;
532 df->defs = xmalloc (df->def_size * sizeof (*df->defs));
533
534 df->use_id = 0;
535 df->n_uses = 0;
536 /* Approximate number of uses by twice number of insns. */
537 df->use_size = n_insns * 2;
538 df->uses = xmalloc (df->use_size * sizeof (*df->uses));
539
540 df->n_regs = n_regs;
541 df->n_bbs = n_basic_blocks;
542
543 /* Allocate temporary working array used during local dataflow analysis. */
544 df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
545
546 df_insn_table_realloc (df, n_insns);
547
548 df_reg_table_realloc (df, df->n_regs);
549
550 df->bbs_modified = BITMAP_XMALLOC ();
551 bitmap_zero (df->bbs_modified);
552
553 df->flags = 0;
554
555 df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
556
557 df->all_blocks = BITMAP_XMALLOC ();
558 for (i = 0; i < n_basic_blocks; i++)
559 bitmap_set_bit (df->all_blocks, i);
560 }
561
562
563 /* Free all the dataflow info. */
564 static void
565 df_free (df)
566 struct df *df;
567 {
568 df_bitmaps_free (df, DF_ALL);
569
570 if (df->bbs)
571 free (df->bbs);
572 df->bbs = 0;
573
574 if (df->insns)
575 free (df->insns);
576 df->insns = 0;
577 df->insn_size = 0;
578
579 if (df->defs)
580 free (df->defs);
581 df->defs = 0;
582 df->def_size = 0;
583 df->def_id = 0;
584
585 if (df->uses)
586 free (df->uses);
587 df->uses = 0;
588 df->use_size = 0;
589 df->use_id = 0;
590
591 if (df->regs)
592 free (df->regs);
593 df->regs = 0;
594 df->reg_size = 0;
595
596 if (df->bbs_modified)
597 BITMAP_XFREE (df->bbs_modified);
598 df->bbs_modified = 0;
599
600 if (df->insns_modified)
601 BITMAP_XFREE (df->insns_modified);
602 df->insns_modified = 0;
603
604 BITMAP_XFREE (df->all_blocks);
605 df->all_blocks = 0;
606
607 obstack_free (&df_ref_obstack, NULL);
608 }
609 \f
610 /* Local miscellaneous routines. */
611
612 /* Return a USE for register REGNO. */
613 static rtx df_reg_use_gen (regno)
614 unsigned int regno;
615 {
616 rtx reg;
617 rtx use;
618
619 reg = regno >= FIRST_PSEUDO_REGISTER
620 ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
621
622 use = gen_rtx_USE (GET_MODE (reg), reg);
623 return use;
624 }
625
626
627 /* Return a CLOBBER for register REGNO. */
628 static rtx df_reg_clobber_gen (regno)
629 unsigned int regno;
630 {
631 rtx reg;
632 rtx use;
633
634 reg = regno >= FIRST_PSEUDO_REGISTER
635 ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
636
637 use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
638 return use;
639 }
640 \f
641 /* Local chain manipulation routines. */
642
643 /* Create a link in a def-use or use-def chain. */
644 static inline struct df_link *
645 df_link_create (ref, next)
646 struct ref *ref;
647 struct df_link *next;
648 {
649 struct df_link *link;
650
651 link = (struct df_link *) obstack_alloc (&df_ref_obstack,
652 sizeof (*link));
653 link->next = next;
654 link->ref = ref;
655 return link;
656 }
657
658
659 /* Add REF to chain head pointed to by PHEAD. */
660 static struct df_link *
661 df_ref_unlink (phead, ref)
662 struct df_link **phead;
663 struct ref *ref;
664 {
665 struct df_link *link = *phead;
666
667 if (link)
668 {
669 if (! link->next)
670 {
671 /* Only a single ref. It must be the one we want.
672 If not, the def-use and use-def chains are likely to
673 be inconsistent. */
674 if (link->ref != ref)
675 abort ();
676 /* Now have an empty chain. */
677 *phead = NULL;
678 }
679 else
680 {
681 /* Multiple refs. One of them must be us. */
682 if (link->ref == ref)
683 *phead = link->next;
684 else
685 {
686 /* Follow chain. */
687 for (; link->next; link = link->next)
688 {
689 if (link->next->ref == ref)
690 {
691 /* Unlink from list. */
692 link->next = link->next->next;
693 return link->next;
694 }
695 }
696 }
697 }
698 }
699 return link;
700 }
701
702
703 /* Unlink REF from all def-use/use-def chains, etc. */
704 int
705 df_ref_remove (df, ref)
706 struct df *df;
707 struct ref *ref;
708 {
709 if (DF_REF_REG_DEF_P (ref))
710 {
711 df_def_unlink (df, ref);
712 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref);
713 }
714 else
715 {
716 df_use_unlink (df, ref);
717 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref);
718 }
719 return 1;
720 }
721
722
723 /* Unlink DEF from use-def and reg-def chains. */
724 static void
725 df_def_unlink (df, def)
726 struct df *df ATTRIBUTE_UNUSED;
727 struct ref *def;
728 {
729 struct df_link *du_link;
730 unsigned int dregno = DF_REF_REGNO (def);
731
732 /* Follow def-use chain to find all the uses of this def. */
733 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
734 {
735 struct ref *use = du_link->ref;
736
737 /* Unlink this def from the use-def chain. */
738 df_ref_unlink (&DF_REF_CHAIN (use), def);
739 }
740 DF_REF_CHAIN (def) = 0;
741
742 /* Unlink def from reg-def chain. */
743 df_ref_unlink (&df->regs[dregno].defs, def);
744
745 df->defs[DF_REF_ID (def)] = 0;
746 }
747
748
749 /* Unlink use from def-use and reg-use chains. */
750 static void
751 df_use_unlink (df, use)
752 struct df *df ATTRIBUTE_UNUSED;
753 struct ref *use;
754 {
755 struct df_link *ud_link;
756 unsigned int uregno = DF_REF_REGNO (use);
757
758 /* Follow use-def chain to find all the defs of this use. */
759 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
760 {
761 struct ref *def = ud_link->ref;
762
763 /* Unlink this use from the def-use chain. */
764 df_ref_unlink (&DF_REF_CHAIN (def), use);
765 }
766 DF_REF_CHAIN (use) = 0;
767
768 /* Unlink use from reg-use chain. */
769 df_ref_unlink (&df->regs[uregno].uses, use);
770
771 df->uses[DF_REF_ID (use)] = 0;
772 }
773 \f
774 /* Local routines for recording refs. */
775
776
777 /* Create a new ref of type DF_REF_TYPE for register REG at address
778 LOC within INSN of BB. */
779 static struct ref *
780 df_ref_create (df, reg, loc, bb, insn, ref_type)
781 struct df *df;
782 rtx reg;
783 rtx *loc;
784 basic_block bb;
785 rtx insn;
786 enum df_ref_type ref_type;
787 {
788 struct ref *this_ref;
789 unsigned int uid;
790
791 this_ref = (struct ref *) obstack_alloc (&df_ref_obstack,
792 sizeof (*this_ref));
793 DF_REF_REG (this_ref) = reg;
794 DF_REF_LOC (this_ref) = loc;
795 DF_REF_BB (this_ref) = bb;
796 DF_REF_INSN (this_ref) = insn;
797 DF_REF_CHAIN (this_ref) = 0;
798 DF_REF_TYPE (this_ref) = ref_type;
799 uid = INSN_UID (insn);
800
801 if (ref_type == DF_REF_REG_DEF)
802 {
803 if (df->def_id >= df->def_size)
804 {
805 /* Make table 25 percent larger. */
806 df->def_size += (df->def_size / 4);
807 df->defs = xrealloc (df->defs,
808 df->def_size * sizeof (*df->defs));
809 }
810 DF_REF_ID (this_ref) = df->def_id;
811 df->defs[df->def_id++] = this_ref;
812 }
813 else
814 {
815 if (df->use_id >= df->use_size)
816 {
817 /* Make table 25 percent larger. */
818 df->use_size += (df->use_size / 4);
819 df->uses = xrealloc (df->uses,
820 df->use_size * sizeof (*df->uses));
821 }
822 DF_REF_ID (this_ref) = df->use_id;
823 df->uses[df->use_id++] = this_ref;
824 }
825 return this_ref;
826 }
827
828
829 /* Create a new reference of type DF_REF_TYPE for a single register REG,
830 used inside the LOC rtx of INSN. */
831 static void
832 df_ref_record_1 (df, reg, loc, bb, insn, ref_type)
833 struct df *df;
834 rtx reg;
835 rtx *loc;
836 basic_block bb;
837 rtx insn;
838 enum df_ref_type ref_type;
839 {
840 df_ref_create (df, reg, loc, bb, insn, ref_type);
841 }
842
843
844 /* Create new references of type DF_REF_TYPE for each part of register REG
845 at address LOC within INSN of BB. */
846 static void
847 df_ref_record (df, reg, loc, bb, insn, ref_type)
848 struct df *df;
849 rtx reg;
850 rtx *loc;
851 basic_block bb;
852 rtx insn;
853 enum df_ref_type ref_type;
854 {
855 unsigned int regno;
856
857 if (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG)
858 abort ();
859
860 /* For the reg allocator we are interested in some SUBREG rtx's, but not
861 all. Notably only those representing a word extraction from a multi-word
862 reg. As written in the docu those should have the form
863 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
864 XXX Is that true? We could also use the global word_mode variable. */
865 if (GET_CODE (reg) == SUBREG
866 && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
867 || GET_MODE_SIZE (GET_MODE (reg))
868 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
869 {
870 loc = &SUBREG_REG (reg);
871 reg = *loc;
872 }
873
874 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
875 if (regno < FIRST_PSEUDO_REGISTER)
876 {
877 int i;
878 int endregno;
879
880 if (! (df->flags & DF_HARD_REGS))
881 return;
882
883 /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
884 for the mode, because we only want to add references to regs, which
885 are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
886 reference the whole reg 0 in DI mode (which would also include
887 reg 1, at least, if 0 and 1 are SImode registers). */
888 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
889
890 for (i = regno; i < endregno; i++)
891 df_ref_record_1 (df, gen_rtx_REG (reg_raw_mode[i], i),
892 loc, bb, insn, ref_type);
893 }
894 else
895 {
896 df_ref_record_1 (df, reg, loc, bb, insn, ref_type);
897 }
898 }
899
900
901 /* Process all the registers defined in the rtx, X. */
902 static void
903 df_def_record_1 (df, x, bb, insn)
904 struct df *df;
905 rtx x;
906 basic_block bb;
907 rtx insn;
908 {
909 rtx *loc = &SET_DEST (x);
910 rtx dst = *loc;
911
912 /* Some targets place small structures in registers for
913 return values of functions. */
914 if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
915 {
916 int i;
917
918 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
919 df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn);
920 return;
921 }
922
923 /* May be, we should flag the use of strict_low_part somehow. Might be
924 handy for the reg allocator. */
925 #ifdef HANDLE_SUBREG
926 while (GET_CODE (dst) == STRICT_LOW_PART
927 || GET_CODE (dst) == ZERO_EXTRACT
928 || GET_CODE (dst) == SIGN_EXTRACT)
929 {
930 loc = &XEXP (dst, 0);
931 dst = *loc;
932 }
933 /* For the reg allocator we are interested in exact register references.
934 This means, we want to know, if only a part of a register is
935 used/defd. */
936 /*
937 if (GET_CODE (dst) == SUBREG)
938 {
939 loc = &XEXP (dst, 0);
940 dst = *loc;
941 } */
942 #else
943
944 while (GET_CODE (dst) == SUBREG
945 || GET_CODE (dst) == ZERO_EXTRACT
946 || GET_CODE (dst) == SIGN_EXTRACT
947 || GET_CODE (dst) == STRICT_LOW_PART)
948 {
949 loc = &XEXP (dst, 0);
950 dst = *loc;
951 }
952 #endif
953
954 if (GET_CODE (dst) == REG
955 || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
956 df_ref_record (df, dst, loc, bb, insn, DF_REF_REG_DEF);
957 }
958
959
960 /* Process all the registers defined in the pattern rtx, X. */
961 static void
962 df_defs_record (df, x, bb, insn)
963 struct df *df;
964 rtx x;
965 basic_block bb;
966 rtx insn;
967 {
968 RTX_CODE code = GET_CODE (x);
969
970 if (code == SET || code == CLOBBER)
971 {
972 /* Mark the single def within the pattern. */
973 df_def_record_1 (df, x, bb, insn);
974 }
975 else if (code == PARALLEL)
976 {
977 int i;
978
979 /* Mark the multiple defs within the pattern. */
980 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
981 {
982 code = GET_CODE (XVECEXP (x, 0, i));
983 if (code == SET || code == CLOBBER)
984 df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn);
985 }
986 }
987 }
988
989
990 /* Process all the registers used in the rtx at address LOC. */
991 static void
992 df_uses_record (df, loc, ref_type, bb, insn)
993 struct df *df;
994 rtx *loc;
995 enum df_ref_type ref_type;
996 basic_block bb;
997 rtx insn;
998 {
999 RTX_CODE code;
1000 rtx x;
1001
1002 retry:
1003 x = *loc;
1004 code = GET_CODE (x);
1005 switch (code)
1006 {
1007 case LABEL_REF:
1008 case SYMBOL_REF:
1009 case CONST_INT:
1010 case CONST:
1011 case CONST_DOUBLE:
1012 case PC:
1013 case ADDR_VEC:
1014 case ADDR_DIFF_VEC:
1015 return;
1016
1017 case CLOBBER:
1018 /* If we are clobbering a MEM, mark any registers inside the address
1019 as being used. */
1020 if (GET_CODE (XEXP (x, 0)) == MEM)
1021 df_uses_record (df, &XEXP (XEXP (x, 0), 0),
1022 DF_REF_REG_MEM_STORE, bb, insn);
1023
1024 /* If we're clobbering a REG then we have a def so ignore. */
1025 return;
1026
1027 case MEM:
1028 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn);
1029 return;
1030
1031 case SUBREG:
1032 /* While we're here, optimize this case. */
1033 #if defined(HANDLE_SUBREG)
1034
1035 /* In case the SUBREG is not of a register, don't optimize. */
1036 if (GET_CODE (SUBREG_REG (x)) != REG)
1037 {
1038 loc = &SUBREG_REG (x);
1039 df_uses_record (df, loc, ref_type, bb, insn);
1040 return;
1041 }
1042 #else
1043 loc = &SUBREG_REG (x);
1044 x = *loc;
1045 if (GET_CODE (x) != REG)
1046 {
1047 df_uses_record (df, loc, ref_type, bb, insn);
1048 return;
1049 }
1050 #endif
1051
1052 /* ... Fall through ... */
1053
1054 case REG:
1055 /* See a register (or subreg) other than being set. */
1056 df_ref_record (df, x, loc, bb, insn, ref_type);
1057 return;
1058
1059 case SET:
1060 {
1061 rtx dst = SET_DEST (x);
1062 int use_dst = 0;
1063
1064 /* If storing into MEM, don't show it as being used. But do
1065 show the address as being used. */
1066 if (GET_CODE (dst) == MEM)
1067 {
1068 df_uses_record (df, &XEXP (dst, 0),
1069 DF_REF_REG_MEM_STORE,
1070 bb, insn);
1071 df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn);
1072 return;
1073 }
1074
1075 #if 1 && defined(HANDLE_SUBREG)
1076 /* Look for sets that perform a read-modify-write. */
1077 while (GET_CODE (dst) == STRICT_LOW_PART
1078 || GET_CODE (dst) == ZERO_EXTRACT
1079 || GET_CODE (dst) == SIGN_EXTRACT)
1080 {
1081 if (GET_CODE (dst) == STRICT_LOW_PART)
1082 {
1083 dst = XEXP (dst, 0);
1084 if (GET_CODE (dst) != SUBREG)
1085 abort ();
1086 /* A strict_low_part uses the whole reg not only the subreg. */
1087 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb, insn);
1088 }
1089 else
1090 {
1091 df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn);
1092 dst = XEXP (dst, 0);
1093 }
1094 }
1095 if (GET_CODE (dst) == SUBREG)
1096 {
1097 /* Paradoxical or too small subreg's are read-mod-write. */
1098 if (GET_MODE_SIZE (GET_MODE (dst)) < GET_MODE_SIZE (word_mode)
1099 || GET_MODE_SIZE (GET_MODE (dst))
1100 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dst))))
1101 use_dst = 1;
1102 }
1103 /* In the original code also some SUBREG rtx's were considered
1104 read-modify-write (those with
1105 REG_SIZE(SUBREG_REG(dst)) > REG_SIZE(dst) )
1106 e.g. a (subreg:QI (reg:SI A) 0). I can't see this. The only
1107 reason for a read cycle for reg A would be to somehow preserve
1108 the bits outside of the subreg:QI. But for this a strict_low_part
1109 was necessary anyway, and this we handled already. */
1110 #else
1111 while (GET_CODE (dst) == STRICT_LOW_PART
1112 || GET_CODE (dst) == ZERO_EXTRACT
1113 || GET_CODE (dst) == SIGN_EXTRACT
1114 || GET_CODE (dst) == SUBREG)
1115 {
1116 /* A SUBREG of a smaller size does not use the old value. */
1117 if (GET_CODE (dst) != SUBREG
1118 || (REG_SIZE (SUBREG_REG (dst)) > REG_SIZE (dst)))
1119 use_dst = 1;
1120 dst = XEXP (dst, 0);
1121 }
1122 #endif
1123
1124 if ((GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
1125 || GET_CODE (dst) == REG || GET_CODE (dst) == SUBREG)
1126 {
1127 #if 1 || !defined(HANDLE_SUBREG)
1128 if (use_dst)
1129 df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn);
1130 #endif
1131 df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn);
1132 return;
1133 }
1134 }
1135 break;
1136
1137 case RETURN:
1138 break;
1139
1140 case ASM_OPERANDS:
1141 case UNSPEC_VOLATILE:
1142 case TRAP_IF:
1143 case ASM_INPUT:
1144 {
1145 /* Traditional and volatile asm instructions must be considered to use
1146 and clobber all hard registers, all pseudo-registers and all of
1147 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1148
1149 Consider for instance a volatile asm that changes the fpu rounding
1150 mode. An insn should not be moved across this even if it only uses
1151 pseudo-regs because it might give an incorrectly rounded result.
1152
1153 For now, just mark any regs we can find in ASM_OPERANDS as
1154 used. */
1155
1156 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1157 We can not just fall through here since then we would be confused
1158 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1159 traditional asms unlike their normal usage. */
1160 if (code == ASM_OPERANDS)
1161 {
1162 int j;
1163
1164 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1165 df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
1166 DF_REF_REG_USE, bb, insn);
1167 return;
1168 }
1169 break;
1170 }
1171
1172 case PRE_DEC:
1173 case POST_DEC:
1174 case PRE_INC:
1175 case POST_INC:
1176 case PRE_MODIFY:
1177 case POST_MODIFY:
1178 /* Catch the def of the register being modified. */
1179 df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), bb, insn, DF_REF_REG_DEF);
1180
1181 /* ... Fall through to handle uses ... */
1182
1183 default:
1184 break;
1185 }
1186
1187 /* Recursively scan the operands of this expression. */
1188 {
1189 const char *fmt = GET_RTX_FORMAT (code);
1190 int i;
1191
1192 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1193 {
1194 if (fmt[i] == 'e')
1195 {
1196 /* Tail recursive case: save a function call level. */
1197 if (i == 0)
1198 {
1199 loc = &XEXP (x, 0);
1200 goto retry;
1201 }
1202 df_uses_record (df, &XEXP (x, i), ref_type, bb, insn);
1203 }
1204 else if (fmt[i] == 'E')
1205 {
1206 int j;
1207 for (j = 0; j < XVECLEN (x, i); j++)
1208 df_uses_record (df, &XVECEXP (x, i, j), ref_type,
1209 bb, insn);
1210 }
1211 }
1212 }
1213 }
1214
1215
1216 /* Record all the df within INSN of basic block BB. */
1217 static void
1218 df_insn_refs_record (df, bb, insn)
1219 struct df *df;
1220 basic_block bb;
1221 rtx insn;
1222 {
1223 int i;
1224
1225 if (INSN_P (insn))
1226 {
1227 /* Record register defs */
1228 df_defs_record (df, PATTERN (insn), bb, insn);
1229
1230 if (GET_CODE (insn) == CALL_INSN)
1231 {
1232 rtx note;
1233 rtx x;
1234
1235 /* Record the registers used to pass arguments. */
1236 for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1237 note = XEXP (note, 1))
1238 {
1239 if (GET_CODE (XEXP (note, 0)) == USE)
1240 df_uses_record (df, &SET_DEST (XEXP (note, 0)), DF_REF_REG_USE,
1241 bb, insn);
1242 }
1243
1244 /* The stack ptr is used (honorarily) by a CALL insn. */
1245 x = df_reg_use_gen (STACK_POINTER_REGNUM);
1246 df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn);
1247
1248 if (df->flags & DF_HARD_REGS)
1249 {
1250 /* Calls may also reference any of the global registers,
1251 so they are recorded as used. */
1252 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1253 if (global_regs[i])
1254 {
1255 x = df_reg_use_gen (i);
1256 df_uses_record (df, &SET_DEST (x),
1257 DF_REF_REG_USE, bb, insn);
1258 }
1259 }
1260 }
1261
1262 /* Record the register uses. */
1263 df_uses_record (df, &PATTERN (insn),
1264 DF_REF_REG_USE, bb, insn);
1265
1266
1267 if (GET_CODE (insn) == CALL_INSN)
1268 {
1269 rtx note;
1270
1271 if (df->flags & DF_HARD_REGS)
1272 {
1273 /* Kill all registers invalidated by a call. */
1274 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1275 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1276 {
1277 rtx reg_clob = df_reg_clobber_gen (i);
1278 df_defs_record (df, reg_clob, bb, insn);
1279 }
1280 }
1281
1282 /* There may be extra registers to be clobbered. */
1283 for (note = CALL_INSN_FUNCTION_USAGE (insn);
1284 note;
1285 note = XEXP (note, 1))
1286 if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1287 df_defs_record (df, XEXP (note, 0), bb, insn);
1288 }
1289 }
1290 }
1291
1292
1293 /* Record all the refs within the basic block BB. */
1294 static void
1295 df_bb_refs_record (df, bb)
1296 struct df *df;
1297 basic_block bb;
1298 {
1299 rtx insn;
1300
1301 /* Scan the block an insn at a time from beginning to end. */
1302 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1303 {
1304 if (INSN_P (insn))
1305 {
1306 /* Record defs within INSN. */
1307 df_insn_refs_record (df, bb, insn);
1308 }
1309 if (insn == bb->end)
1310 break;
1311 }
1312 }
1313
1314
1315 /* Record all the refs in the basic blocks specified by BLOCKS. */
1316 static void
1317 df_refs_record (df, blocks)
1318 struct df *df;
1319 bitmap blocks;
1320 {
1321 basic_block bb;
1322
1323 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1324 {
1325 df_bb_refs_record (df, bb);
1326 });
1327 }
1328 \f
1329 /* Dataflow analysis routines. */
1330
1331
1332 /* Create reg-def chains for basic block BB. These are a list of
1333 definitions for each register. */
1334 static void
1335 df_bb_reg_def_chain_create (df, bb)
1336 struct df *df;
1337 basic_block bb;
1338 {
1339 rtx insn;
1340
1341 /* Perhaps the defs should be sorted using a depth first search
1342 of the CFG (or possibly a breadth first search). We currently
1343 scan the basic blocks in reverse order so that the first defs
1344 apprear at the start of the chain. */
1345
1346 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1347 insn = PREV_INSN (insn))
1348 {
1349 struct df_link *link;
1350 unsigned int uid = INSN_UID (insn);
1351
1352 if (! INSN_P (insn))
1353 continue;
1354
1355 for (link = df->insns[uid].defs; link; link = link->next)
1356 {
1357 struct ref *def = link->ref;
1358 unsigned int dregno = DF_REF_REGNO (def);
1359
1360 df->regs[dregno].defs
1361 = df_link_create (def, df->regs[dregno].defs);
1362 }
1363 }
1364 }
1365
1366
1367 /* Create reg-def chains for each basic block within BLOCKS. These
1368 are a list of definitions for each register. */
1369 static void
1370 df_reg_def_chain_create (df, blocks)
1371 struct df *df;
1372 bitmap blocks;
1373 {
1374 basic_block bb;
1375
1376 FOR_EACH_BB_IN_BITMAP/*_REV*/ (blocks, 0, bb,
1377 {
1378 df_bb_reg_def_chain_create (df, bb);
1379 });
1380 }
1381
1382
1383 /* Create reg-use chains for basic block BB. These are a list of uses
1384 for each register. */
1385 static void
1386 df_bb_reg_use_chain_create (df, bb)
1387 struct df *df;
1388 basic_block bb;
1389 {
1390 rtx insn;
1391
1392 /* Scan in forward order so that the last uses appear at the
1393 start of the chain. */
1394
1395 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1396 insn = NEXT_INSN (insn))
1397 {
1398 struct df_link *link;
1399 unsigned int uid = INSN_UID (insn);
1400
1401 if (! INSN_P (insn))
1402 continue;
1403
1404 for (link = df->insns[uid].uses; link; link = link->next)
1405 {
1406 struct ref *use = link->ref;
1407 unsigned int uregno = DF_REF_REGNO (use);
1408
1409 df->regs[uregno].uses
1410 = df_link_create (use, df->regs[uregno].uses);
1411 }
1412 }
1413 }
1414
1415
1416 /* Create reg-use chains for each basic block within BLOCKS. These
1417 are a list of uses for each register. */
1418 static void
1419 df_reg_use_chain_create (df, blocks)
1420 struct df *df;
1421 bitmap blocks;
1422 {
1423 basic_block bb;
1424
1425 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1426 {
1427 df_bb_reg_use_chain_create (df, bb);
1428 });
1429 }
1430
1431
1432 /* Create def-use chains from reaching use bitmaps for basic block BB. */
1433 static void
1434 df_bb_du_chain_create (df, bb, ru)
1435 struct df *df;
1436 basic_block bb;
1437 bitmap ru;
1438 {
1439 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1440 rtx insn;
1441
1442 bitmap_copy (ru, bb_info->ru_out);
1443
1444 /* For each def in BB create a linked list (chain) of uses
1445 reached from the def. */
1446 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1447 insn = PREV_INSN (insn))
1448 {
1449 struct df_link *def_link;
1450 struct df_link *use_link;
1451 unsigned int uid = INSN_UID (insn);
1452
1453 if (! INSN_P (insn))
1454 continue;
1455
1456 /* For each def in insn... */
1457 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1458 {
1459 struct ref *def = def_link->ref;
1460 unsigned int dregno = DF_REF_REGNO (def);
1461
1462 DF_REF_CHAIN (def) = 0;
1463
1464 /* While the reg-use chains are not essential, it
1465 is _much_ faster to search these short lists rather
1466 than all the reaching uses, especially for large functions. */
1467 for (use_link = df->regs[dregno].uses; use_link;
1468 use_link = use_link->next)
1469 {
1470 struct ref *use = use_link->ref;
1471
1472 if (bitmap_bit_p (ru, DF_REF_ID (use)))
1473 {
1474 DF_REF_CHAIN (def)
1475 = df_link_create (use, DF_REF_CHAIN (def));
1476
1477 bitmap_clear_bit (ru, DF_REF_ID (use));
1478 }
1479 }
1480 }
1481
1482 /* For each use in insn... */
1483 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1484 {
1485 struct ref *use = use_link->ref;
1486 bitmap_set_bit (ru, DF_REF_ID (use));
1487 }
1488 }
1489 }
1490
1491
1492 /* Create def-use chains from reaching use bitmaps for basic blocks
1493 in BLOCKS. */
1494 static void
1495 df_du_chain_create (df, blocks)
1496 struct df *df;
1497 bitmap blocks;
1498 {
1499 bitmap ru;
1500 basic_block bb;
1501
1502 ru = BITMAP_XMALLOC ();
1503
1504 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1505 {
1506 df_bb_du_chain_create (df, bb, ru);
1507 });
1508
1509 BITMAP_XFREE (ru);
1510 }
1511
1512
1513 /* Create use-def chains from reaching def bitmaps for basic block BB. */
1514 static void
1515 df_bb_ud_chain_create (df, bb)
1516 struct df *df;
1517 basic_block bb;
1518 {
1519 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1520 struct ref **reg_def_last = df->reg_def_last;
1521 rtx insn;
1522
1523 memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *));
1524
1525 /* For each use in BB create a linked list (chain) of defs
1526 that reach the use. */
1527 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1528 insn = NEXT_INSN (insn))
1529 {
1530 unsigned int uid = INSN_UID (insn);
1531 struct df_link *use_link;
1532 struct df_link *def_link;
1533
1534 if (! INSN_P (insn))
1535 continue;
1536
1537 /* For each use in insn... */
1538 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1539 {
1540 struct ref *use = use_link->ref;
1541 unsigned int regno = DF_REF_REGNO (use);
1542
1543 DF_REF_CHAIN (use) = 0;
1544
1545 /* Has regno been defined in this BB yet? If so, use
1546 the last def as the single entry for the use-def
1547 chain for this use. Otherwise, we need to add all
1548 the defs using this regno that reach the start of
1549 this BB. */
1550 if (reg_def_last[regno])
1551 {
1552 DF_REF_CHAIN (use)
1553 = df_link_create (reg_def_last[regno], 0);
1554 }
1555 else
1556 {
1557 /* While the reg-def chains are not essential, it is
1558 _much_ faster to search these short lists rather than
1559 all the reaching defs, especially for large
1560 functions. */
1561 for (def_link = df->regs[regno].defs; def_link;
1562 def_link = def_link->next)
1563 {
1564 struct ref *def = def_link->ref;
1565
1566 if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def)))
1567 {
1568 DF_REF_CHAIN (use)
1569 = df_link_create (def, DF_REF_CHAIN (use));
1570 }
1571 }
1572 }
1573 }
1574
1575
1576 /* For each def in insn...record the last def of each reg. */
1577 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1578 {
1579 struct ref *def = def_link->ref;
1580 int dregno = DF_REF_REGNO (def);
1581
1582 reg_def_last[dregno] = def;
1583 }
1584 }
1585 }
1586
1587
1588 /* Create use-def chains from reaching def bitmaps for basic blocks
1589 within BLOCKS. */
1590 static void
1591 df_ud_chain_create (df, blocks)
1592 struct df *df;
1593 bitmap blocks;
1594 {
1595 basic_block bb;
1596
1597 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1598 {
1599 df_bb_ud_chain_create (df, bb);
1600 });
1601 }
1602 \f
1603
1604 /* Use reverse completion order, and the worklist, to figure out what block
1605 to look at next. */
1606
1607 static int
1608 df_visit_next_rc (df, blocks)
1609 struct df *df ATTRIBUTE_UNUSED;
1610 sbitmap blocks;
1611 {
1612 int i=0;
1613 for (i = 0; i < n_basic_blocks; i++)
1614 if (TEST_BIT (blocks, df->rc_order[i]))
1615 return df->rc_order[i];
1616 return sbitmap_first_set_bit (blocks);
1617 }
1618
1619 /* Use reverse topsort order, and the worklist, to figure out what block
1620 to look at next. */
1621
1622 static int
1623 df_visit_next_rts (df, blocks)
1624 struct df *df ATTRIBUTE_UNUSED;
1625 sbitmap blocks;
1626 {
1627 int i=0;
1628 for (i = 0; i < n_basic_blocks; i++)
1629 if (TEST_BIT (blocks, df->rts_order[i]))
1630 return df->rts_order[i];
1631 return sbitmap_first_set_bit (blocks);
1632 }
1633
1634
1635 /* Calculate reaching defs for each basic block in BLOCKS, i.e., the
1636 defs that are live at the start of a basic block. */
1637 static void
1638 df_rd_global_compute (df, blocks)
1639 struct df *df ATTRIBUTE_UNUSED;
1640 bitmap blocks;
1641 {
1642 int i;
1643 basic_block bb;
1644 sbitmap worklist;
1645
1646 worklist = sbitmap_alloc (n_basic_blocks);
1647 sbitmap_zero (worklist);
1648
1649 /* Copy the blocklist to the worklist */
1650 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
1651 {
1652 SET_BIT (worklist, i);
1653 });
1654
1655 /* We assume that only the basic blocks in WORKLIST have been
1656 modified. */
1657 FOR_EACH_BB_IN_SBITMAP (worklist, 0, bb,
1658 {
1659 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1660
1661 bitmap_copy (bb_info->rd_out, bb_info->rd_gen);
1662 });
1663
1664 while ((i = df_visit_next_rc (df, worklist)) >= 0)
1665 {
1666 struct bb_info *bb_info;
1667 edge e;
1668 int changed;
1669
1670 /* Remove this block from the worklist. */
1671 RESET_BIT (worklist, i);
1672
1673
1674 bb = BASIC_BLOCK (i);
1675 bb_info = DF_BB_INFO (df, bb);
1676
1677 /* Calculate union of predecessor outs. */
1678 bitmap_zero (bb_info->rd_in);
1679 for (e = bb->pred; e != 0; e = e->pred_next)
1680 {
1681 struct bb_info *pred_refs = DF_BB_INFO (df, e->src);
1682
1683 if (e->src == ENTRY_BLOCK_PTR)
1684 continue;
1685
1686 bitmap_a_or_b (bb_info->rd_in, bb_info->rd_in,
1687 pred_refs->rd_out);
1688 }
1689
1690 /* RD_OUT is the set of defs that are live at the end of the
1691 BB. These are the defs that are either generated by defs
1692 (RD_GEN) within the BB or are live at the start (RD_IN)
1693 and are not killed by other defs (RD_KILL). */
1694 changed = bitmap_union_of_diff (bb_info->rd_out, bb_info->rd_gen,
1695 bb_info->rd_in, bb_info->rd_kill);
1696
1697 if (changed)
1698 {
1699 /* Add each of this block's successors to the worklist. */
1700 for (e = bb->succ; e != 0; e = e->succ_next)
1701 {
1702 if (e->dest == EXIT_BLOCK_PTR)
1703 continue;
1704
1705 SET_BIT (worklist, e->dest->index);
1706 }
1707 }
1708 }
1709 sbitmap_free (worklist);
1710 }
1711
1712
1713 /* Calculate reaching uses for each basic block within BLOCKS, i.e.,
1714 the uses that are live at the start of a basic block. */
1715 static void
1716 df_ru_global_compute (df, blocks)
1717 struct df *df ATTRIBUTE_UNUSED;
1718 bitmap blocks;
1719 {
1720 int i;
1721 basic_block bb;
1722 sbitmap worklist;
1723
1724 worklist = sbitmap_alloc (n_basic_blocks);
1725 sbitmap_zero (worklist);
1726
1727 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
1728 {
1729 SET_BIT (worklist, i);
1730 });
1731
1732 /* We assume that only the basic blocks in WORKLIST have been
1733 modified. */
1734 FOR_EACH_BB_IN_SBITMAP (worklist, 0, bb,
1735 {
1736 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1737
1738 bitmap_copy (bb_info->ru_in, bb_info->ru_gen);
1739 });
1740
1741
1742 while ((i = df_visit_next_rts (df, worklist)) >= 0)
1743 {
1744 struct bb_info *bb_info;
1745 edge e;
1746 int changed;
1747
1748 /* Remove this block from the worklist. */
1749 RESET_BIT (worklist, i);
1750
1751 bb = BASIC_BLOCK (i);
1752 bb_info = DF_BB_INFO (df, bb);
1753
1754 /* Calculate union of successor ins. */
1755 bitmap_zero (bb_info->ru_out);
1756 for (e = bb->succ; e != 0; e = e->succ_next)
1757 {
1758 struct bb_info *succ_refs = DF_BB_INFO (df, e->dest);
1759
1760 if (e->dest == EXIT_BLOCK_PTR)
1761 continue;
1762
1763 bitmap_a_or_b (bb_info->ru_out, bb_info->ru_out,
1764 succ_refs->ru_in);
1765 }
1766
1767 /* RU_IN is the set of uses that are live at the start of the
1768 BB. These are the uses that are either generated within the
1769 BB (RU_GEN) or are live at the end (RU_OUT) and are not uses
1770 killed by defs within the BB (RU_KILL). */
1771 changed = bitmap_union_of_diff (bb_info->ru_in, bb_info->ru_gen,
1772 bb_info->ru_out, bb_info->ru_kill);
1773
1774 if (changed)
1775 {
1776 /* Add each of this block's predecessors to the worklist. */
1777 for (e = bb->pred; e != 0; e = e->pred_next)
1778 {
1779 if (e->src == ENTRY_BLOCK_PTR)
1780 continue;
1781
1782 SET_BIT (worklist, e->src->index);
1783 }
1784 }
1785 }
1786
1787 sbitmap_free (worklist);
1788 }
1789
1790
1791 /* Calculate live registers for each basic block within BLOCKS. */
1792 static void
1793 df_lr_global_compute (df, blocks)
1794 struct df *df ATTRIBUTE_UNUSED;
1795 bitmap blocks;
1796 {
1797 int i;
1798 basic_block bb;
1799 bitmap worklist;
1800
1801 worklist = BITMAP_XMALLOC ();
1802 bitmap_copy (worklist, blocks);
1803
1804 /* We assume that only the basic blocks in WORKLIST have been
1805 modified. */
1806 FOR_EACH_BB_IN_BITMAP (worklist, 0, bb,
1807 {
1808 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1809
1810 bitmap_copy (bb_info->lr_in, bb_info->lr_use);
1811 });
1812
1813 while ((i = bitmap_last_set_bit (worklist)) >= 0)
1814 {
1815 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1816 edge e;
1817 int changed;
1818
1819 /* Remove this block from the worklist. */
1820 bitmap_clear_bit (worklist, i);
1821
1822 bb = BASIC_BLOCK (i);
1823 bb_info = DF_BB_INFO (df, bb);
1824
1825 /* Calculate union of successor ins. */
1826 bitmap_zero (bb_info->lr_out);
1827 for (e = bb->succ; e != 0; e = e->succ_next)
1828 {
1829 struct bb_info *succ_refs = DF_BB_INFO (df, e->dest);
1830
1831 if (e->dest == EXIT_BLOCK_PTR)
1832 continue;
1833
1834 bitmap_a_or_b (bb_info->lr_out, bb_info->lr_out,
1835 succ_refs->lr_in);
1836 }
1837
1838 /* LR_IN is the set of uses that are live at the start of the
1839 BB. These are the uses that are either generated by uses
1840 (LR_USE) within the BB or are live at the end (LR_OUT)
1841 and are not killed by other uses (LR_DEF). */
1842 changed = bitmap_union_of_diff (bb_info->lr_in, bb_info->lr_use,
1843 bb_info->lr_out, bb_info->lr_def);
1844
1845 if (changed)
1846 {
1847 /* Add each of this block's predecessors to the worklist. */
1848 for (e = bb->pred; e != 0; e = e->pred_next)
1849 {
1850 if (e->src == ENTRY_BLOCK_PTR)
1851 continue;
1852
1853 bitmap_set_bit (worklist, e->src->index);
1854 }
1855 }
1856 }
1857 BITMAP_XFREE (worklist);
1858 }
1859
1860
1861 /* Compute local reaching def info for basic block BB. */
1862 static void
1863 df_bb_rd_local_compute (df, bb)
1864 struct df *df;
1865 basic_block bb;
1866 {
1867 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1868 rtx insn;
1869
1870 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1871 insn = NEXT_INSN (insn))
1872 {
1873 unsigned int uid = INSN_UID (insn);
1874 struct df_link *def_link;
1875
1876 if (! INSN_P (insn))
1877 continue;
1878
1879 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1880 {
1881 struct ref *def = def_link->ref;
1882 unsigned int regno = DF_REF_REGNO (def);
1883 struct df_link *def2_link;
1884
1885 for (def2_link = df->regs[regno].defs; def2_link;
1886 def2_link = def2_link->next)
1887 {
1888 struct ref *def2 = def2_link->ref;
1889
1890 /* Add all defs of this reg to the set of kills. This
1891 is greedy since many of these defs will not actually
1892 be killed by this BB but it keeps things a lot
1893 simpler. */
1894 bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
1895
1896 /* Zap from the set of gens for this BB. */
1897 bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
1898 }
1899
1900 bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
1901 }
1902 }
1903
1904 bb_info->rd_valid = 1;
1905 }
1906
1907
1908 /* Compute local reaching def info for each basic block within BLOCKS. */
1909 static void
1910 df_rd_local_compute (df, blocks)
1911 struct df *df;
1912 bitmap blocks;
1913 {
1914 basic_block bb;
1915
1916 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1917 {
1918 df_bb_rd_local_compute (df, bb);
1919 });
1920 }
1921
1922
1923 /* Compute local reaching use (upward exposed use) info for basic
1924 block BB. */
1925 static void
1926 df_bb_ru_local_compute (df, bb)
1927 struct df *df;
1928 basic_block bb;
1929 {
1930 /* This is much more tricky than computing reaching defs. With
1931 reaching defs, defs get killed by other defs. With upwards
1932 exposed uses, these get killed by defs with the same regno. */
1933
1934 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1935 rtx insn;
1936
1937 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1938 insn = PREV_INSN (insn))
1939 {
1940 unsigned int uid = INSN_UID (insn);
1941 struct df_link *def_link;
1942 struct df_link *use_link;
1943
1944 if (! INSN_P (insn))
1945 continue;
1946
1947 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1948 {
1949 struct ref *def = def_link->ref;
1950 unsigned int dregno = DF_REF_REGNO (def);
1951
1952 for (use_link = df->regs[dregno].uses; use_link;
1953 use_link = use_link->next)
1954 {
1955 struct ref *use = use_link->ref;
1956
1957 /* Add all uses of this reg to the set of kills. This
1958 is greedy since many of these uses will not actually
1959 be killed by this BB but it keeps things a lot
1960 simpler. */
1961 bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use));
1962
1963 /* Zap from the set of gens for this BB. */
1964 bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use));
1965 }
1966 }
1967
1968 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1969 {
1970 struct ref *use = use_link->ref;
1971 /* Add use to set of gens in this BB. */
1972 bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
1973 }
1974 }
1975 bb_info->ru_valid = 1;
1976 }
1977
1978
1979 /* Compute local reaching use (upward exposed use) info for each basic
1980 block within BLOCKS. */
1981 static void
1982 df_ru_local_compute (df, blocks)
1983 struct df *df;
1984 bitmap blocks;
1985 {
1986 basic_block bb;
1987
1988 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1989 {
1990 df_bb_ru_local_compute (df, bb);
1991 });
1992 }
1993
1994
1995 /* Compute local live variable info for basic block BB. */
1996 static void
1997 df_bb_lr_local_compute (df, bb)
1998 struct df *df;
1999 basic_block bb;
2000 {
2001 struct bb_info *bb_info = DF_BB_INFO (df, bb);
2002 rtx insn;
2003
2004 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
2005 insn = PREV_INSN (insn))
2006 {
2007 unsigned int uid = INSN_UID (insn);
2008 struct df_link *link;
2009
2010 if (! INSN_P (insn))
2011 continue;
2012
2013 for (link = df->insns[uid].defs; link; link = link->next)
2014 {
2015 struct ref *def = link->ref;
2016 unsigned int dregno = DF_REF_REGNO (def);
2017
2018 /* Add def to set of defs in this BB. */
2019 bitmap_set_bit (bb_info->lr_def, dregno);
2020
2021 bitmap_clear_bit (bb_info->lr_use, dregno);
2022 }
2023
2024 for (link = df->insns[uid].uses; link; link = link->next)
2025 {
2026 struct ref *use = link->ref;
2027 /* Add use to set of uses in this BB. */
2028 bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
2029 }
2030 }
2031 bb_info->lr_valid = 1;
2032 }
2033
2034
2035 /* Compute local live variable info for each basic block within BLOCKS. */
2036 static void
2037 df_lr_local_compute (df, blocks)
2038 struct df *df;
2039 bitmap blocks;
2040 {
2041 basic_block bb;
2042
2043 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2044 {
2045 df_bb_lr_local_compute (df, bb);
2046 });
2047 }
2048
2049
2050 /* Compute register info: lifetime, bb, and number of defs and uses
2051 for basic block BB. */
2052 static void
2053 df_bb_reg_info_compute (df, bb, live)
2054 struct df *df;
2055 basic_block bb;
2056 bitmap live;
2057 {
2058 struct reg_info *reg_info = df->regs;
2059 struct bb_info *bb_info = DF_BB_INFO (df, bb);
2060 rtx insn;
2061
2062 bitmap_copy (live, bb_info->lr_out);
2063
2064 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
2065 insn = PREV_INSN (insn))
2066 {
2067 unsigned int uid = INSN_UID (insn);
2068 unsigned int regno;
2069 struct df_link *link;
2070
2071 if (! INSN_P (insn))
2072 continue;
2073
2074 for (link = df->insns[uid].defs; link; link = link->next)
2075 {
2076 struct ref *def = link->ref;
2077 unsigned int dregno = DF_REF_REGNO (def);
2078
2079 /* Kill this register. */
2080 bitmap_clear_bit (live, dregno);
2081 reg_info[dregno].n_defs++;
2082 }
2083
2084 for (link = df->insns[uid].uses; link; link = link->next)
2085 {
2086 struct ref *use = link->ref;
2087 unsigned int uregno = DF_REF_REGNO (use);
2088
2089 /* This register is now live. */
2090 bitmap_set_bit (live, uregno);
2091 reg_info[uregno].n_uses++;
2092 }
2093
2094 /* Increment lifetimes of all live registers. */
2095 EXECUTE_IF_SET_IN_BITMAP (live, 0, regno,
2096 {
2097 reg_info[regno].lifetime++;
2098 });
2099 }
2100 }
2101
2102
2103 /* Compute register info: lifetime, bb, and number of defs and uses. */
2104 static void
2105 df_reg_info_compute (df, blocks)
2106 struct df *df;
2107 bitmap blocks;
2108 {
2109 basic_block bb;
2110 bitmap live;
2111
2112 live = BITMAP_XMALLOC ();
2113
2114 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2115 {
2116 df_bb_reg_info_compute (df, bb, live);
2117 });
2118
2119 BITMAP_XFREE (live);
2120 }
2121
2122
2123 /* Assign LUIDs for BB. */
2124 static int
2125 df_bb_luids_set (df, bb)
2126 struct df *df;
2127 basic_block bb;
2128 {
2129 rtx insn;
2130 int luid = 0;
2131
2132 /* The LUIDs are monotonically increasing for each basic block. */
2133
2134 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2135 {
2136 if (INSN_P (insn))
2137 DF_INSN_LUID (df, insn) = luid++;
2138 DF_INSN_LUID (df, insn) = luid;
2139
2140 if (insn == bb->end)
2141 break;
2142 }
2143 return luid;
2144 }
2145
2146
2147 /* Assign LUIDs for each basic block within BLOCKS. */
2148 static int
2149 df_luids_set (df, blocks)
2150 struct df *df;
2151 bitmap blocks;
2152 {
2153 basic_block bb;
2154 int total = 0;
2155
2156 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2157 {
2158 total += df_bb_luids_set (df, bb);
2159 });
2160 return total;
2161 }
2162
2163
2164 /* Perform dataflow analysis using existing DF structure for blocks
2165 within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
2166 static void
2167 df_analyse_1 (df, blocks, flags, update)
2168 struct df *df;
2169 bitmap blocks;
2170 int flags;
2171 int update;
2172 {
2173 int aflags;
2174 int dflags;
2175
2176 dflags = 0;
2177 aflags = flags;
2178 if (flags & DF_UD_CHAIN)
2179 aflags |= DF_RD | DF_RD_CHAIN;
2180
2181 if (flags & DF_DU_CHAIN)
2182 aflags |= DF_RU;
2183
2184 if (flags & DF_RU)
2185 aflags |= DF_RU_CHAIN;
2186
2187 if (flags & DF_REG_INFO)
2188 aflags |= DF_LR;
2189
2190 if (! blocks)
2191 blocks = df->all_blocks;
2192
2193 df->flags = flags;
2194 if (update)
2195 {
2196 df_refs_update (df);
2197 /* More fine grained incremental dataflow analysis would be
2198 nice. For now recompute the whole shebang for the
2199 modified blocks. */
2200 #if 0
2201 df_refs_unlink (df, blocks);
2202 #endif
2203 /* All the def-use, use-def chains can be potentially
2204 modified by changes in one block. The size of the
2205 bitmaps can also change. */
2206 }
2207 else
2208 {
2209 /* Scan the function for all register defs and uses. */
2210 df_refs_queue (df);
2211 df_refs_record (df, blocks);
2212
2213 /* Link all the new defs and uses to the insns. */
2214 df_refs_process (df);
2215 }
2216
2217 /* Allocate the bitmaps now the total number of defs and uses are
2218 known. If the number of defs or uses have changed, then
2219 these bitmaps need to be reallocated. */
2220 df_bitmaps_alloc (df, aflags);
2221
2222 /* Set the LUIDs for each specified basic block. */
2223 df_luids_set (df, blocks);
2224
2225 /* Recreate reg-def and reg-use chains from scratch so that first
2226 def is at the head of the reg-def chain and the last use is at
2227 the head of the reg-use chain. This is only important for
2228 regs local to a basic block as it speeds up searching. */
2229 if (aflags & DF_RD_CHAIN)
2230 {
2231 df_reg_def_chain_create (df, blocks);
2232 }
2233
2234 if (aflags & DF_RU_CHAIN)
2235 {
2236 df_reg_use_chain_create (df, blocks);
2237 }
2238
2239 df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
2240 df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
2241 df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
2242
2243 flow_depth_first_order_compute (df->dfs_order, df->rc_order);
2244 flow_reverse_top_sort_order_compute (df->rts_order);
2245 if (aflags & DF_RD)
2246 {
2247 /* Compute the sets of gens and kills for the defs of each bb. */
2248 df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
2249
2250 /* Compute the global reaching definitions. */
2251 df_rd_global_compute (df, df->all_blocks);
2252 }
2253
2254 if (aflags & DF_UD_CHAIN)
2255 {
2256 /* Create use-def chains. */
2257 df_ud_chain_create (df, df->all_blocks);
2258
2259 if (! (flags & DF_RD))
2260 dflags |= DF_RD;
2261 }
2262
2263 if (aflags & DF_RU)
2264 {
2265 /* Compute the sets of gens and kills for the upwards exposed
2266 uses in each bb. */
2267 df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
2268
2269 /* Compute the global reaching uses. */
2270 df_ru_global_compute (df, df->all_blocks);
2271 }
2272
2273 if (aflags & DF_DU_CHAIN)
2274 {
2275 /* Create def-use chains. */
2276 df_du_chain_create (df, df->all_blocks);
2277
2278 if (! (flags & DF_RU))
2279 dflags |= DF_RU;
2280 }
2281
2282 /* Free up bitmaps that are no longer required. */
2283 if (dflags)
2284 df_bitmaps_free (df, dflags);
2285
2286 if (aflags & DF_LR)
2287 {
2288 /* Compute the sets of defs and uses of live variables. */
2289 df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
2290
2291 /* Compute the global live variables. */
2292 df_lr_global_compute (df, df->all_blocks);
2293 }
2294
2295 if (aflags & DF_REG_INFO)
2296 {
2297 df_reg_info_compute (df, df->all_blocks);
2298 }
2299 free (df->dfs_order);
2300 free (df->rc_order);
2301 free (df->rts_order);
2302 }
2303
2304
2305 /* Initialise dataflow analysis. */
2306 struct df *
2307 df_init ()
2308 {
2309 struct df *df;
2310
2311 df = xcalloc (1, sizeof (struct df));
2312
2313 /* Squirrel away a global for debugging. */
2314 ddf = df;
2315
2316 return df;
2317 }
2318
2319
2320 /* Start queuing refs. */
2321 static int
2322 df_refs_queue (df)
2323 struct df *df;
2324 {
2325 df->def_id_save = df->def_id;
2326 df->use_id_save = df->use_id;
2327 /* ???? Perhaps we should save current obstack state so that we can
2328 unwind it. */
2329 return 0;
2330 }
2331
2332
2333 /* Process queued refs. */
2334 static int
2335 df_refs_process (df)
2336 struct df *df;
2337 {
2338 unsigned int i;
2339
2340 /* Build new insn-def chains. */
2341 for (i = df->def_id_save; i != df->def_id; i++)
2342 {
2343 struct ref *def = df->defs[i];
2344 unsigned int uid = DF_REF_INSN_UID (def);
2345
2346 /* Add def to head of def list for INSN. */
2347 df->insns[uid].defs
2348 = df_link_create (def, df->insns[uid].defs);
2349 }
2350
2351 /* Build new insn-use chains. */
2352 for (i = df->use_id_save; i != df->use_id; i++)
2353 {
2354 struct ref *use = df->uses[i];
2355 unsigned int uid = DF_REF_INSN_UID (use);
2356
2357 /* Add use to head of use list for INSN. */
2358 df->insns[uid].uses
2359 = df_link_create (use, df->insns[uid].uses);
2360 }
2361 return 0;
2362 }
2363
2364
2365 /* Update refs for basic block BB. */
2366 static int
2367 df_bb_refs_update (df, bb)
2368 struct df *df;
2369 basic_block bb;
2370 {
2371 rtx insn;
2372 int count = 0;
2373
2374 /* While we have to scan the chain of insns for this BB, we don't
2375 need to allocate and queue a long chain of BB/INSN pairs. Using
2376 a bitmap for insns_modified saves memory and avoids queuing
2377 duplicates. */
2378
2379 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2380 {
2381 unsigned int uid;
2382
2383 uid = INSN_UID (insn);
2384
2385 if (bitmap_bit_p (df->insns_modified, uid))
2386 {
2387 /* Delete any allocated refs of this insn. MPH, FIXME. */
2388 df_insn_refs_unlink (df, bb, insn);
2389
2390 /* Scan the insn for refs. */
2391 df_insn_refs_record (df, bb, insn);
2392
2393
2394 bitmap_clear_bit (df->insns_modified, uid);
2395 count++;
2396 }
2397 if (insn == bb->end)
2398 break;
2399 }
2400 return count;
2401 }
2402
2403
2404 /* Process all the modified/deleted insns that were queued. */
2405 static int
2406 df_refs_update (df)
2407 struct df *df;
2408 {
2409 basic_block bb;
2410 int count = 0;
2411
2412 if ((unsigned int)max_reg_num () >= df->reg_size)
2413 df_reg_table_realloc (df, 0);
2414
2415 df_refs_queue (df);
2416
2417 FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
2418 {
2419 count += df_bb_refs_update (df, bb);
2420 });
2421
2422 df_refs_process (df);
2423 return count;
2424 }
2425
2426
2427 /* Return non-zero if any of the requested blocks in the bitmap
2428 BLOCKS have been modified. */
2429 static int
2430 df_modified_p (df, blocks)
2431 struct df *df;
2432 bitmap blocks;
2433 {
2434 unsigned int j;
2435 int update = 0;
2436
2437 for (j = 0; j < df->n_bbs; j++)
2438 if (bitmap_bit_p (df->bbs_modified, j)
2439 && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, j)))
2440 {
2441 update = 1;
2442 break;
2443 }
2444
2445 return update;
2446 }
2447
2448
2449 /* Analyse dataflow info for the basic blocks specified by the bitmap
2450 BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
2451 modified blocks if BLOCKS is -1. */
2452 int
2453 df_analyse (df, blocks, flags)
2454 struct df *df;
2455 bitmap blocks;
2456 int flags;
2457 {
2458 int update;
2459
2460 /* We could deal with additional basic blocks being created by
2461 rescanning everything again. */
2462 if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
2463 abort ();
2464
2465 update = df_modified_p (df, blocks);
2466 if (update || (flags != df->flags))
2467 {
2468 if (! blocks)
2469 {
2470 if (df->n_bbs)
2471 {
2472 /* Recompute everything from scratch. */
2473 df_free (df);
2474 }
2475 /* Allocate and initialise data structures. */
2476 df_alloc (df, max_reg_num ());
2477 df_analyse_1 (df, 0, flags, 0);
2478 update = 1;
2479 }
2480 else
2481 {
2482 if (blocks == (bitmap) -1)
2483 blocks = df->bbs_modified;
2484
2485 if (! df->n_bbs)
2486 abort ();
2487
2488 df_analyse_1 (df, blocks, flags, 1);
2489 bitmap_zero (df->bbs_modified);
2490 }
2491 }
2492 return update;
2493 }
2494
2495
2496 /* Free all the dataflow info and the DF structure. */
2497 void
2498 df_finish (df)
2499 struct df *df;
2500 {
2501 df_free (df);
2502 free (df);
2503 }
2504
2505
2506 /* Unlink INSN from its reference information. */
2507 static void
2508 df_insn_refs_unlink (df, bb, insn)
2509 struct df *df;
2510 basic_block bb ATTRIBUTE_UNUSED;
2511 rtx insn;
2512 {
2513 struct df_link *link;
2514 unsigned int uid;
2515
2516 uid = INSN_UID (insn);
2517
2518 /* Unlink all refs defined by this insn. */
2519 for (link = df->insns[uid].defs; link; link = link->next)
2520 df_def_unlink (df, link->ref);
2521
2522 /* Unlink all refs used by this insn. */
2523 for (link = df->insns[uid].uses; link; link = link->next)
2524 df_use_unlink (df, link->ref);
2525
2526 df->insns[uid].defs = 0;
2527 df->insns[uid].uses = 0;
2528 }
2529
2530
2531 #if 0
2532 /* Unlink all the insns within BB from their reference information. */
2533 static void
2534 df_bb_refs_unlink (df, bb)
2535 struct df *df;
2536 basic_block bb;
2537 {
2538 rtx insn;
2539
2540 /* Scan the block an insn at a time from beginning to end. */
2541 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2542 {
2543 if (INSN_P (insn))
2544 {
2545 /* Unlink refs for INSN. */
2546 df_insn_refs_unlink (df, bb, insn);
2547 }
2548 if (insn == bb->end)
2549 break;
2550 }
2551 }
2552
2553
2554 /* Unlink all the refs in the basic blocks specified by BLOCKS.
2555 Not currently used. */
2556 static void
2557 df_refs_unlink (df, blocks)
2558 struct df *df;
2559 bitmap blocks;
2560 {
2561 basic_block bb;
2562
2563 if (blocks)
2564 {
2565 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2566 {
2567 df_bb_refs_unlink (df, bb);
2568 });
2569 }
2570 else
2571 {
2572 FOR_ALL_BBS (bb,
2573 {
2574 df_bb_refs_unlink (df, bb);
2575 });
2576 }
2577 }
2578 #endif
2579 \f
2580 /* Functions to modify insns. */
2581
2582
2583 /* Delete INSN and all its reference information. */
2584 rtx
2585 df_insn_delete (df, bb, insn)
2586 struct df *df;
2587 basic_block bb ATTRIBUTE_UNUSED;
2588 rtx insn;
2589 {
2590 /* If the insn is a jump, we should perhaps call delete_insn to
2591 handle the JUMP_LABEL? */
2592
2593 /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
2594 if (insn == bb->head)
2595 abort ();
2596
2597 /* Delete the insn. */
2598 delete_insn (insn);
2599
2600 df_insn_modify (df, bb, insn);
2601
2602 return NEXT_INSN (insn);
2603 }
2604
2605
2606 /* Mark that INSN within BB may have changed (created/modified/deleted).
2607 This may be called multiple times for the same insn. There is no
2608 harm calling this function if the insn wasn't changed; it will just
2609 slow down the rescanning of refs. */
2610 void
2611 df_insn_modify (df, bb, insn)
2612 struct df *df;
2613 basic_block bb;
2614 rtx insn;
2615 {
2616 unsigned int uid;
2617
2618 uid = INSN_UID (insn);
2619
2620 if (uid >= df->insn_size)
2621 df_insn_table_realloc (df, 0);
2622
2623 bitmap_set_bit (df->bbs_modified, bb->index);
2624 bitmap_set_bit (df->insns_modified, uid);
2625
2626 #if 0
2627 /* For incremental updating on the fly, perhaps we could make a copy
2628 of all the refs of the original insn and turn them into
2629 anti-refs. When df_refs_update finds these anti-refs, it annihilates
2630 the original refs. If validate_change fails then these anti-refs
2631 will just get ignored. */
2632 */
2633 #endif
2634 }
2635
2636
2637 typedef struct replace_args
2638 {
2639 rtx match;
2640 rtx replacement;
2641 rtx insn;
2642 int modified;
2643 } replace_args;
2644
2645
2646 /* Replace mem pointed to by PX with its associated pseudo register.
2647 DATA is actually a pointer to a structure describing the
2648 instruction currently being scanned and the MEM we are currently
2649 replacing. */
2650 static int
2651 df_rtx_mem_replace (px, data)
2652 rtx *px;
2653 void *data;
2654 {
2655 replace_args *args = (replace_args *) data;
2656 rtx mem = *px;
2657
2658 if (mem == NULL_RTX)
2659 return 0;
2660
2661 switch (GET_CODE (mem))
2662 {
2663 case MEM:
2664 break;
2665
2666 case CONST_DOUBLE:
2667 /* We're not interested in the MEM associated with a
2668 CONST_DOUBLE, so there's no need to traverse into one. */
2669 return -1;
2670
2671 default:
2672 /* This is not a MEM. */
2673 return 0;
2674 }
2675
2676 if (!rtx_equal_p (args->match, mem))
2677 /* This is not the MEM we are currently replacing. */
2678 return 0;
2679
2680 /* Actually replace the MEM. */
2681 validate_change (args->insn, px, args->replacement, 1);
2682 args->modified++;
2683
2684 return 0;
2685 }
2686
2687
2688 int
2689 df_insn_mem_replace (df, bb, insn, mem, reg)
2690 struct df *df;
2691 basic_block bb;
2692 rtx insn;
2693 rtx mem;
2694 rtx reg;
2695 {
2696 replace_args args;
2697
2698 args.insn = insn;
2699 args.match = mem;
2700 args.replacement = reg;
2701 args.modified = 0;
2702
2703 /* Seach and replace all matching mems within insn. */
2704 for_each_rtx (&insn, df_rtx_mem_replace, &args);
2705
2706 if (args.modified)
2707 df_insn_modify (df, bb, insn);
2708
2709 /* ???? FIXME. We may have a new def or one or more new uses of REG
2710 in INSN. REG should be a new pseudo so it won't affect the
2711 dataflow information that we currently have. We should add
2712 the new uses and defs to INSN and then recreate the chains
2713 when df_analyse is called. */
2714 return args.modified;
2715 }
2716
2717
2718 /* Replace one register with another. Called through for_each_rtx; PX
2719 points to the rtx being scanned. DATA is actually a pointer to a
2720 structure of arguments. */
2721 static int
2722 df_rtx_reg_replace (px, data)
2723 rtx *px;
2724 void *data;
2725 {
2726 rtx x = *px;
2727 replace_args *args = (replace_args *) data;
2728
2729 if (x == NULL_RTX)
2730 return 0;
2731
2732 if (x == args->match)
2733 {
2734 validate_change (args->insn, px, args->replacement, 1);
2735 args->modified++;
2736 }
2737
2738 return 0;
2739 }
2740
2741
2742 /* Replace the reg within every ref on CHAIN that is within the set
2743 BLOCKS of basic blocks with NEWREG. Also update the regs within
2744 REG_NOTES. */
2745 void
2746 df_refs_reg_replace (df, blocks, chain, oldreg, newreg)
2747 struct df *df;
2748 bitmap blocks;
2749 struct df_link *chain;
2750 rtx oldreg;
2751 rtx newreg;
2752 {
2753 struct df_link *link;
2754 replace_args args;
2755
2756 if (! blocks)
2757 blocks = df->all_blocks;
2758
2759 args.match = oldreg;
2760 args.replacement = newreg;
2761 args.modified = 0;
2762
2763 for (link = chain; link; link = link->next)
2764 {
2765 struct ref *ref = link->ref;
2766 rtx insn = DF_REF_INSN (ref);
2767
2768 if (! INSN_P (insn))
2769 continue;
2770
2771 if (bitmap_bit_p (blocks, DF_REF_BBNO (ref)))
2772 {
2773 df_ref_reg_replace (df, ref, oldreg, newreg);
2774
2775 /* Replace occurrences of the reg within the REG_NOTES. */
2776 if ((! link->next || DF_REF_INSN (ref)
2777 != DF_REF_INSN (link->next->ref))
2778 && REG_NOTES (insn))
2779 {
2780 args.insn = insn;
2781 for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
2782 }
2783 }
2784 else
2785 {
2786 /* Temporary check to ensure that we have a grip on which
2787 regs should be replaced. */
2788 abort ();
2789 }
2790 }
2791 }
2792
2793
2794 /* Replace all occurrences of register OLDREG with register NEWREG in
2795 blocks defined by bitmap BLOCKS. This also replaces occurrences of
2796 OLDREG in the REG_NOTES but only for insns containing OLDREG. This
2797 routine expects the reg-use and reg-def chains to be valid. */
2798 int
2799 df_reg_replace (df, blocks, oldreg, newreg)
2800 struct df *df;
2801 bitmap blocks;
2802 rtx oldreg;
2803 rtx newreg;
2804 {
2805 unsigned int oldregno = REGNO (oldreg);
2806
2807 df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg);
2808 df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg);
2809 return 1;
2810 }
2811
2812
2813 /* Try replacing the reg within REF with NEWREG. Do not modify
2814 def-use/use-def chains. */
2815 int
2816 df_ref_reg_replace (df, ref, oldreg, newreg)
2817 struct df *df;
2818 struct ref *ref;
2819 rtx oldreg;
2820 rtx newreg;
2821 {
2822 /* Check that insn was deleted by being converted into a NOTE. If
2823 so ignore this insn. */
2824 if (! INSN_P (DF_REF_INSN (ref)))
2825 return 0;
2826
2827 if (oldreg && oldreg != DF_REF_REG (ref))
2828 abort ();
2829
2830 if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
2831 return 0;
2832
2833 df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref));
2834 return 1;
2835 }
2836
2837
2838 struct ref*
2839 df_bb_def_use_swap (df, bb, def_insn, use_insn, regno)
2840 struct df * df;
2841 basic_block bb;
2842 rtx def_insn;
2843 rtx use_insn;
2844 unsigned int regno;
2845 {
2846 struct ref *def;
2847 struct ref *use;
2848 int def_uid;
2849 int use_uid;
2850 struct df_link *link;
2851
2852 def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno);
2853 if (! def)
2854 return 0;
2855
2856 use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno);
2857 if (! use)
2858 return 0;
2859
2860 /* The USE no longer exists. */
2861 use_uid = INSN_UID (use_insn);
2862 df_use_unlink (df, use);
2863 df_ref_unlink (&df->insns[use_uid].uses, use);
2864
2865 /* The DEF requires shifting so remove it from DEF_INSN
2866 and add it to USE_INSN by reusing LINK. */
2867 def_uid = INSN_UID (def_insn);
2868 link = df_ref_unlink (&df->insns[def_uid].defs, def);
2869 link->ref = def;
2870 link->next = df->insns[use_uid].defs;
2871 df->insns[use_uid].defs = link;
2872
2873 #if 0
2874 link = df_ref_unlink (&df->regs[regno].defs, def);
2875 link->ref = def;
2876 link->next = df->regs[regno].defs;
2877 df->insns[regno].defs = link;
2878 #endif
2879
2880 DF_REF_INSN (def) = use_insn;
2881 return def;
2882 }
2883
2884
2885 /* Record df between FIRST_INSN and LAST_INSN inclusive. All new
2886 insns must be processed by this routine. */
2887 static void
2888 df_insns_modify (df, bb, first_insn, last_insn)
2889 struct df *df;
2890 basic_block bb;
2891 rtx first_insn;
2892 rtx last_insn;
2893 {
2894 rtx insn;
2895
2896 for (insn = first_insn; ; insn = NEXT_INSN (insn))
2897 {
2898 unsigned int uid;
2899
2900 /* A non-const call should not have slipped through the net. If
2901 it does, we need to create a new basic block. Ouch. The
2902 same applies for a label. */
2903 if ((GET_CODE (insn) == CALL_INSN
2904 && ! CONST_OR_PURE_CALL_P (insn))
2905 || GET_CODE (insn) == CODE_LABEL)
2906 abort ();
2907
2908 uid = INSN_UID (insn);
2909
2910 if (uid >= df->insn_size)
2911 df_insn_table_realloc (df, 0);
2912
2913 df_insn_modify (df, bb, insn);
2914
2915 if (insn == last_insn)
2916 break;
2917 }
2918 }
2919
2920
2921 /* Emit PATTERN before INSN within BB. */
2922 rtx
2923 df_pattern_emit_before (df, pattern, bb, insn)
2924 struct df *df ATTRIBUTE_UNUSED;
2925 rtx pattern;
2926 basic_block bb;
2927 rtx insn;
2928 {
2929 rtx ret_insn;
2930 rtx prev_insn = PREV_INSN (insn);
2931
2932 /* We should not be inserting before the start of the block. */
2933 if (insn == bb->head)
2934 abort ();
2935 ret_insn = emit_insn_before (pattern, insn);
2936 if (ret_insn == insn)
2937 return ret_insn;
2938
2939 df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn);
2940 return ret_insn;
2941 }
2942
2943
2944 /* Emit PATTERN after INSN within BB. */
2945 rtx
2946 df_pattern_emit_after (df, pattern, bb, insn)
2947 struct df *df;
2948 rtx pattern;
2949 basic_block bb;
2950 rtx insn;
2951 {
2952 rtx ret_insn;
2953
2954 ret_insn = emit_insn_after (pattern, insn);
2955 if (ret_insn == insn)
2956 return ret_insn;
2957
2958 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2959 return ret_insn;
2960 }
2961
2962
2963 /* Emit jump PATTERN after INSN within BB. */
2964 rtx
2965 df_jump_pattern_emit_after (df, pattern, bb, insn)
2966 struct df *df;
2967 rtx pattern;
2968 basic_block bb;
2969 rtx insn;
2970 {
2971 rtx ret_insn;
2972
2973 ret_insn = emit_jump_insn_after (pattern, insn);
2974 if (ret_insn == insn)
2975 return ret_insn;
2976
2977 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2978 return ret_insn;
2979 }
2980
2981
2982 /* Move INSN within BB before BEFORE_INSN within BEFORE_BB.
2983
2984 This function should only be used to move loop invariant insns
2985 out of a loop where it has been proven that the def-use info
2986 will still be valid. */
2987 rtx
2988 df_insn_move_before (df, bb, insn, before_bb, before_insn)
2989 struct df *df;
2990 basic_block bb;
2991 rtx insn;
2992 basic_block before_bb;
2993 rtx before_insn;
2994 {
2995 struct df_link *link;
2996 unsigned int uid;
2997
2998 if (! bb)
2999 return df_pattern_emit_before (df, insn, before_bb, before_insn);
3000
3001 uid = INSN_UID (insn);
3002
3003 /* Change bb for all df defined and used by this insn. */
3004 for (link = df->insns[uid].defs; link; link = link->next)
3005 DF_REF_BB (link->ref) = before_bb;
3006 for (link = df->insns[uid].uses; link; link = link->next)
3007 DF_REF_BB (link->ref) = before_bb;
3008
3009 /* The lifetimes of the registers used in this insn will be reduced
3010 while the lifetimes of the registers defined in this insn
3011 are likely to be increased. */
3012
3013 /* ???? Perhaps all the insns moved should be stored on a list
3014 which df_analyse removes when it recalculates data flow. */
3015
3016 return emit_insn_before (insn, before_insn);
3017 }
3018 \f
3019 /* Functions to query dataflow information. */
3020
3021
3022 int
3023 df_insn_regno_def_p (df, bb, insn, regno)
3024 struct df *df;
3025 basic_block bb ATTRIBUTE_UNUSED;
3026 rtx insn;
3027 unsigned int regno;
3028 {
3029 unsigned int uid;
3030 struct df_link *link;
3031
3032 uid = INSN_UID (insn);
3033
3034 for (link = df->insns[uid].defs; link; link = link->next)
3035 {
3036 struct ref *def = link->ref;
3037
3038 if (DF_REF_REGNO (def) == regno)
3039 return 1;
3040 }
3041
3042 return 0;
3043 }
3044
3045
3046 static int
3047 df_def_dominates_all_uses_p (df, def)
3048 struct df *df ATTRIBUTE_UNUSED;
3049 struct ref *def;
3050 {
3051 struct df_link *du_link;
3052
3053 /* Follow def-use chain to find all the uses of this def. */
3054 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
3055 {
3056 struct ref *use = du_link->ref;
3057 struct df_link *ud_link;
3058
3059 /* Follow use-def chain to check all the defs for this use. */
3060 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
3061 if (ud_link->ref != def)
3062 return 0;
3063 }
3064 return 1;
3065 }
3066
3067
3068 int
3069 df_insn_dominates_all_uses_p (df, bb, insn)
3070 struct df *df;
3071 basic_block bb ATTRIBUTE_UNUSED;
3072 rtx insn;
3073 {
3074 unsigned int uid;
3075 struct df_link *link;
3076
3077 uid = INSN_UID (insn);
3078
3079 for (link = df->insns[uid].defs; link; link = link->next)
3080 {
3081 struct ref *def = link->ref;
3082
3083 if (! df_def_dominates_all_uses_p (df, def))
3084 return 0;
3085 }
3086
3087 return 1;
3088 }
3089
3090
3091 /* Return non-zero if all DF dominates all the uses within the bitmap
3092 BLOCKS. */
3093 static int
3094 df_def_dominates_uses_p (df, def, blocks)
3095 struct df *df ATTRIBUTE_UNUSED;
3096 struct ref *def;
3097 bitmap blocks;
3098 {
3099 struct df_link *du_link;
3100
3101 /* Follow def-use chain to find all the uses of this def. */
3102 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
3103 {
3104 struct ref *use = du_link->ref;
3105 struct df_link *ud_link;
3106
3107 /* Only worry about the uses within BLOCKS. For example,
3108 consider a register defined within a loop that is live at the
3109 loop exits. */
3110 if (bitmap_bit_p (blocks, DF_REF_BBNO (use)))
3111 {
3112 /* Follow use-def chain to check all the defs for this use. */
3113 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
3114 if (ud_link->ref != def)
3115 return 0;
3116 }
3117 }
3118 return 1;
3119 }
3120
3121
3122 /* Return non-zero if all the defs of INSN within BB dominates
3123 all the corresponding uses. */
3124 int
3125 df_insn_dominates_uses_p (df, bb, insn, blocks)
3126 struct df *df;
3127 basic_block bb ATTRIBUTE_UNUSED;
3128 rtx insn;
3129 bitmap blocks;
3130 {
3131 unsigned int uid;
3132 struct df_link *link;
3133
3134 uid = INSN_UID (insn);
3135
3136 for (link = df->insns[uid].defs; link; link = link->next)
3137 {
3138 struct ref *def = link->ref;
3139
3140 /* Only consider the defs within BLOCKS. */
3141 if (bitmap_bit_p (blocks, DF_REF_BBNO (def))
3142 && ! df_def_dominates_uses_p (df, def, blocks))
3143 return 0;
3144 }
3145 return 1;
3146 }
3147
3148
3149 /* Return the basic block that REG referenced in or NULL if referenced
3150 in multiple basic blocks. */
3151 basic_block
3152 df_regno_bb (df, regno)
3153 struct df *df;
3154 unsigned int regno;
3155 {
3156 struct df_link *defs = df->regs[regno].defs;
3157 struct df_link *uses = df->regs[regno].uses;
3158 struct ref *def = defs ? defs->ref : 0;
3159 struct ref *use = uses ? uses->ref : 0;
3160 basic_block bb_def = def ? DF_REF_BB (def) : 0;
3161 basic_block bb_use = use ? DF_REF_BB (use) : 0;
3162
3163 /* Compare blocks of first def and last use. ???? FIXME. What if
3164 the reg-def and reg-use lists are not correctly ordered. */
3165 return bb_def == bb_use ? bb_def : 0;
3166 }
3167
3168
3169 /* Return non-zero if REG used in multiple basic blocks. */
3170 int
3171 df_reg_global_p (df, reg)
3172 struct df *df;
3173 rtx reg;
3174 {
3175 return df_regno_bb (df, REGNO (reg)) != 0;
3176 }
3177
3178
3179 /* Return total lifetime (in insns) of REG. */
3180 int
3181 df_reg_lifetime (df, reg)
3182 struct df *df;
3183 rtx reg;
3184 {
3185 return df->regs[REGNO (reg)].lifetime;
3186 }
3187
3188
3189 /* Return non-zero if REG live at start of BB. */
3190 int
3191 df_bb_reg_live_start_p (df, bb, reg)
3192 struct df *df ATTRIBUTE_UNUSED;
3193 basic_block bb;
3194 rtx reg;
3195 {
3196 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3197
3198 #ifdef ENABLE_CHECKING
3199 if (! bb_info->lr_in)
3200 abort ();
3201 #endif
3202
3203 return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
3204 }
3205
3206
3207 /* Return non-zero if REG live at end of BB. */
3208 int
3209 df_bb_reg_live_end_p (df, bb, reg)
3210 struct df *df ATTRIBUTE_UNUSED;
3211 basic_block bb;
3212 rtx reg;
3213 {
3214 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3215
3216 #ifdef ENABLE_CHECKING
3217 if (! bb_info->lr_in)
3218 abort ();
3219 #endif
3220
3221 return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
3222 }
3223
3224
3225 /* Return -1 if life of REG1 before life of REG2, 1 if life of REG1
3226 after life of REG2, or 0, if the lives overlap. */
3227 int
3228 df_bb_regs_lives_compare (df, bb, reg1, reg2)
3229 struct df *df;
3230 basic_block bb;
3231 rtx reg1;
3232 rtx reg2;
3233 {
3234 unsigned int regno1 = REGNO (reg1);
3235 unsigned int regno2 = REGNO (reg2);
3236 struct ref *def1;
3237 struct ref *use1;
3238 struct ref *def2;
3239 struct ref *use2;
3240
3241
3242 /* The regs must be local to BB. */
3243 if (df_regno_bb (df, regno1) != bb
3244 || df_regno_bb (df, regno2) != bb)
3245 abort ();
3246
3247 def2 = df_bb_regno_first_def_find (df, bb, regno2);
3248 use1 = df_bb_regno_last_use_find (df, bb, regno1);
3249
3250 if (DF_INSN_LUID (df, DF_REF_INSN (def2))
3251 > DF_INSN_LUID (df, DF_REF_INSN (use1)))
3252 return -1;
3253
3254 def1 = df_bb_regno_first_def_find (df, bb, regno1);
3255 use2 = df_bb_regno_last_use_find (df, bb, regno2);
3256
3257 if (DF_INSN_LUID (df, DF_REF_INSN (def1))
3258 > DF_INSN_LUID (df, DF_REF_INSN (use2)))
3259 return 1;
3260
3261 return 0;
3262 }
3263
3264
3265 /* Return last use of REGNO within BB. */
3266 static struct ref *
3267 df_bb_regno_last_use_find (df, bb, regno)
3268 struct df * df;
3269 basic_block bb ATTRIBUTE_UNUSED;
3270 unsigned int regno;
3271 {
3272 struct df_link *link;
3273
3274 /* This assumes that the reg-use list is ordered such that for any
3275 BB, the last use is found first. However, since the BBs are not
3276 ordered, the first use in the chain is not necessarily the last
3277 use in the function. */
3278 for (link = df->regs[regno].uses; link; link = link->next)
3279 {
3280 struct ref *use = link->ref;
3281
3282 if (DF_REF_BB (use) == bb)
3283 return use;
3284 }
3285 return 0;
3286 }
3287
3288
3289 /* Return first def of REGNO within BB. */
3290 static struct ref *
3291 df_bb_regno_first_def_find (df, bb, regno)
3292 struct df * df;
3293 basic_block bb ATTRIBUTE_UNUSED;
3294 unsigned int regno;
3295 {
3296 struct df_link *link;
3297
3298 /* This assumes that the reg-def list is ordered such that for any
3299 BB, the first def is found first. However, since the BBs are not
3300 ordered, the first def in the chain is not necessarily the first
3301 def in the function. */
3302 for (link = df->regs[regno].defs; link; link = link->next)
3303 {
3304 struct ref *def = link->ref;
3305
3306 if (DF_REF_BB (def) == bb)
3307 return def;
3308 }
3309 return 0;
3310 }
3311
3312
3313 /* Return first use of REGNO inside INSN within BB. */
3314 static struct ref *
3315 df_bb_insn_regno_last_use_find (df, bb, insn, regno)
3316 struct df * df;
3317 basic_block bb ATTRIBUTE_UNUSED;
3318 rtx insn;
3319 unsigned int regno;
3320 {
3321 unsigned int uid;
3322 struct df_link *link;
3323
3324 uid = INSN_UID (insn);
3325
3326 for (link = df->insns[uid].uses; link; link = link->next)
3327 {
3328 struct ref *use = link->ref;
3329
3330 if (DF_REF_REGNO (use) == regno)
3331 return use;
3332 }
3333
3334 return 0;
3335 }
3336
3337
3338 /* Return first def of REGNO inside INSN within BB. */
3339 static struct ref *
3340 df_bb_insn_regno_first_def_find (df, bb, insn, regno)
3341 struct df * df;
3342 basic_block bb ATTRIBUTE_UNUSED;
3343 rtx insn;
3344 unsigned int regno;
3345 {
3346 unsigned int uid;
3347 struct df_link *link;
3348
3349 uid = INSN_UID (insn);
3350
3351 for (link = df->insns[uid].defs; link; link = link->next)
3352 {
3353 struct ref *def = link->ref;
3354
3355 if (DF_REF_REGNO (def) == regno)
3356 return def;
3357 }
3358
3359 return 0;
3360 }
3361
3362
3363 /* Return insn using REG if the BB contains only a single
3364 use and def of REG. */
3365 rtx
3366 df_bb_single_def_use_insn_find (df, bb, insn, reg)
3367 struct df * df;
3368 basic_block bb;
3369 rtx insn;
3370 rtx reg;
3371 {
3372 struct ref *def;
3373 struct ref *use;
3374 struct df_link *du_link;
3375
3376 def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
3377
3378 if (! def)
3379 abort ();
3380
3381 du_link = DF_REF_CHAIN (def);
3382
3383 if (! du_link)
3384 return NULL_RTX;
3385
3386 use = du_link->ref;
3387
3388 /* Check if def is dead. */
3389 if (! use)
3390 return NULL_RTX;
3391
3392 /* Check for multiple uses. */
3393 if (du_link->next)
3394 return NULL_RTX;
3395
3396 return DF_REF_INSN (use);
3397 }
3398 \f
3399 /* Functions for debugging/dumping dataflow information. */
3400
3401
3402 /* Dump a def-use or use-def chain for REF to FILE. */
3403 static void
3404 df_chain_dump (link, file)
3405 struct df_link *link;
3406 FILE *file;
3407 {
3408 fprintf (file, "{ ");
3409 for (; link; link = link->next)
3410 {
3411 fprintf (file, "%c%d ",
3412 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3413 DF_REF_ID (link->ref));
3414 }
3415 fprintf (file, "}");
3416 }
3417
3418 static void
3419 df_chain_dump_regno (link, file)
3420 struct df_link *link;
3421 FILE *file;
3422 {
3423 fprintf (file, "{ ");
3424 for (; link; link = link->next)
3425 {
3426 fprintf (file, "%c%d(%d) ",
3427 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3428 DF_REF_ID (link->ref),
3429 DF_REF_REGNO (link->ref));
3430 }
3431 fprintf (file, "}");
3432 }
3433
3434 /* Dump dataflow info. */
3435 void
3436 df_dump (df, flags, file)
3437 struct df *df;
3438 int flags;
3439 FILE *file;
3440 {
3441 unsigned int i;
3442 unsigned int j;
3443
3444 if (! df || ! file)
3445 return;
3446
3447 fprintf (file, "\nDataflow summary:\n");
3448 fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n",
3449 df->n_regs, df->n_defs, df->n_uses, df->n_bbs);
3450
3451 if (flags & DF_RD)
3452 {
3453 fprintf (file, "Reaching defs:\n");
3454 for (i = 0; i < df->n_bbs; i++)
3455 {
3456 basic_block bb = BASIC_BLOCK (i);
3457 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3458
3459 if (! bb_info->rd_in)
3460 continue;
3461
3462 fprintf (file, "bb %d in \t", i);
3463 dump_bitmap (file, bb_info->rd_in);
3464 fprintf (file, "bb %d gen \t", i);
3465 dump_bitmap (file, bb_info->rd_gen);
3466 fprintf (file, "bb %d kill\t", i);
3467 dump_bitmap (file, bb_info->rd_kill);
3468 fprintf (file, "bb %d out \t", i);
3469 dump_bitmap (file, bb_info->rd_out);
3470 }
3471 }
3472
3473 if (flags & DF_UD_CHAIN)
3474 {
3475 fprintf (file, "Use-def chains:\n");
3476 for (j = 0; j < df->n_defs; j++)
3477 {
3478 if (df->defs[j])
3479 {
3480 fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
3481 j, DF_REF_BBNO (df->defs[j]),
3482 DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
3483 DF_REF_INSN_UID (df->defs[j]),
3484 DF_REF_REGNO (df->defs[j]));
3485 df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
3486 fprintf (file, "\n");
3487 }
3488 }
3489 }
3490
3491 if (flags & DF_RU)
3492 {
3493 fprintf (file, "Reaching uses:\n");
3494 for (i = 0; i < df->n_bbs; i++)
3495 {
3496 basic_block bb = BASIC_BLOCK (i);
3497 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3498
3499 if (! bb_info->ru_in)
3500 continue;
3501
3502 fprintf (file, "bb %d in \t", i);
3503 dump_bitmap (file, bb_info->ru_in);
3504 fprintf (file, "bb %d gen \t", i);
3505 dump_bitmap (file, bb_info->ru_gen);
3506 fprintf (file, "bb %d kill\t", i);
3507 dump_bitmap (file, bb_info->ru_kill);
3508 fprintf (file, "bb %d out \t", i);
3509 dump_bitmap (file, bb_info->ru_out);
3510 }
3511 }
3512
3513 if (flags & DF_DU_CHAIN)
3514 {
3515 fprintf (file, "Def-use chains:\n");
3516 for (j = 0; j < df->n_uses; j++)
3517 {
3518 if (df->uses[j])
3519 {
3520 fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
3521 j, DF_REF_BBNO (df->uses[j]),
3522 DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
3523 DF_REF_INSN_UID (df->uses[j]),
3524 DF_REF_REGNO (df->uses[j]));
3525 df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
3526 fprintf (file, "\n");
3527 }
3528 }
3529 }
3530
3531 if (flags & DF_LR)
3532 {
3533 fprintf (file, "Live regs:\n");
3534 for (i = 0; i < df->n_bbs; i++)
3535 {
3536 basic_block bb = BASIC_BLOCK (i);
3537 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3538
3539 if (! bb_info->lr_in)
3540 continue;
3541
3542 fprintf (file, "bb %d in \t", i);
3543 dump_bitmap (file, bb_info->lr_in);
3544 fprintf (file, "bb %d use \t", i);
3545 dump_bitmap (file, bb_info->lr_use);
3546 fprintf (file, "bb %d def \t", i);
3547 dump_bitmap (file, bb_info->lr_def);
3548 fprintf (file, "bb %d out \t", i);
3549 dump_bitmap (file, bb_info->lr_out);
3550 }
3551 }
3552
3553 if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN))
3554 {
3555 struct reg_info *reg_info = df->regs;
3556
3557 fprintf (file, "Register info:\n");
3558 for (j = 0; j < df->n_regs; j++)
3559 {
3560 if (((flags & DF_REG_INFO)
3561 && (reg_info[j].n_uses || reg_info[j].n_defs))
3562 || ((flags & DF_RD_CHAIN) && reg_info[j].defs)
3563 || ((flags & DF_RU_CHAIN) && reg_info[j].uses))
3564 {
3565 fprintf (file, "reg %d", j);
3566 if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
3567 {
3568 basic_block bb = df_regno_bb (df, j);
3569
3570 if (bb)
3571 fprintf (file, " bb %d", bb->index);
3572 else
3573 fprintf (file, " bb ?");
3574 }
3575 if (flags & DF_REG_INFO)
3576 {
3577 fprintf (file, " life %d", reg_info[j].lifetime);
3578 }
3579
3580 if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
3581 {
3582 fprintf (file, " defs ");
3583 if (flags & DF_REG_INFO)
3584 fprintf (file, "%d ", reg_info[j].n_defs);
3585 if (flags & DF_RD_CHAIN)
3586 df_chain_dump (reg_info[j].defs, file);
3587 }
3588
3589 if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
3590 {
3591 fprintf (file, " uses ");
3592 if (flags & DF_REG_INFO)
3593 fprintf (file, "%d ", reg_info[j].n_uses);
3594 if (flags & DF_RU_CHAIN)
3595 df_chain_dump (reg_info[j].uses, file);
3596 }
3597
3598 fprintf (file, "\n");
3599 }
3600 }
3601 }
3602 fprintf (file, "\n");
3603 }
3604
3605
3606 void
3607 df_insn_debug (df, insn, file)
3608 struct df *df;
3609 rtx insn;
3610 FILE *file;
3611 {
3612 unsigned int uid;
3613 int bbi;
3614
3615 uid = INSN_UID (insn);
3616 if (uid >= df->insn_size)
3617 return;
3618
3619 if (df->insns[uid].defs)
3620 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3621 else if (df->insns[uid].uses)
3622 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3623 else
3624 bbi = -1;
3625
3626 fprintf (file, "insn %d bb %d luid %d defs ",
3627 uid, bbi, DF_INSN_LUID (df, insn));
3628 df_chain_dump (df->insns[uid].defs, file);
3629 fprintf (file, " uses ");
3630 df_chain_dump (df->insns[uid].uses, file);
3631 fprintf (file, "\n");
3632 }
3633
3634 void
3635 df_insn_debug_regno (df, insn, file)
3636 struct df *df;
3637 rtx insn;
3638 FILE *file;
3639 {
3640 unsigned int uid;
3641 int bbi;
3642
3643 uid = INSN_UID (insn);
3644 if (uid >= df->insn_size)
3645 return;
3646
3647 if (df->insns[uid].defs)
3648 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3649 else if (df->insns[uid].uses)
3650 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3651 else
3652 bbi = -1;
3653
3654 fprintf (file, "insn %d bb %d luid %d defs ",
3655 uid, bbi, DF_INSN_LUID (df, insn));
3656 df_chain_dump_regno (df->insns[uid].defs, file);
3657 fprintf (file, " uses ");
3658 df_chain_dump_regno (df->insns[uid].uses, file);
3659 fprintf (file, "\n");
3660 }
3661
3662 static void
3663 df_regno_debug (df, regno, file)
3664 struct df *df;
3665 unsigned int regno;
3666 FILE *file;
3667 {
3668 if (regno >= df->reg_size)
3669 return;
3670
3671 fprintf (file, "reg %d life %d defs ",
3672 regno, df->regs[regno].lifetime);
3673 df_chain_dump (df->regs[regno].defs, file);
3674 fprintf (file, " uses ");
3675 df_chain_dump (df->regs[regno].uses, file);
3676 fprintf (file, "\n");
3677 }
3678
3679
3680 static void
3681 df_ref_debug (df, ref, file)
3682 struct df *df;
3683 struct ref *ref;
3684 FILE *file;
3685 {
3686 fprintf (file, "%c%d ",
3687 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
3688 DF_REF_ID (ref));
3689 fprintf (file, "reg %d bb %d luid %d insn %d chain ",
3690 DF_REF_REGNO (ref),
3691 DF_REF_BBNO (ref),
3692 DF_INSN_LUID (df, DF_REF_INSN (ref)),
3693 INSN_UID (DF_REF_INSN (ref)));
3694 df_chain_dump (DF_REF_CHAIN (ref), file);
3695 fprintf (file, "\n");
3696 }
3697
3698
3699 void
3700 debug_df_insn (insn)
3701 rtx insn;
3702 {
3703 df_insn_debug (ddf, insn, stderr);
3704 debug_rtx (insn);
3705 }
3706
3707
3708 void
3709 debug_df_reg (reg)
3710 rtx reg;
3711 {
3712 df_regno_debug (ddf, REGNO (reg), stderr);
3713 }
3714
3715
3716 void
3717 debug_df_regno (regno)
3718 unsigned int regno;
3719 {
3720 df_regno_debug (ddf, regno, stderr);
3721 }
3722
3723
3724 void
3725 debug_df_ref (ref)
3726 struct ref *ref;
3727 {
3728 df_ref_debug (ddf, ref, stderr);
3729 }
3730
3731
3732 void
3733 debug_df_defno (defno)
3734 unsigned int defno;
3735 {
3736 df_ref_debug (ddf, ddf->defs[defno], stderr);
3737 }
3738
3739
3740 void
3741 debug_df_useno (defno)
3742 unsigned int defno;
3743 {
3744 df_ref_debug (ddf, ddf->uses[defno], stderr);
3745 }
3746
3747
3748 void
3749 debug_df_chain (link)
3750 struct df_link *link;
3751 {
3752 df_chain_dump (link, stderr);
3753 fputc ('\n', stderr);
3754 }