1 /* FIXME: We need to go back and add the warning messages about code
2 moved across setjmp. */
5 /* Scanning of rtl for dataflow analysis.
6 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
7 Free Software Foundation, Inc.
8 Originally contributed by Michael P. Hayes
9 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
10 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
11 and Kenneth Zadeck (zadeck@naturalbridge.com).
13 This file is part of GCC.
15 GCC is free software; you can redistribute it and/or modify it under
16 the terms of the GNU General Public License as published by the Free
17 Software Foundation; either version 2, or (at your option) any later
20 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 You should have received a copy of the GNU General Public License
26 along with GCC; see the file COPYING. If not, write to the Free
27 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
33 #include "coretypes.h"
37 #include "insn-config.h"
42 #include "alloc-pool.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
52 #define HAVE_epilogue 0
55 #define HAVE_prologue 0
57 #ifndef HAVE_sibcall_epilogue
58 #define HAVE_sibcall_epilogue 0
62 #define EPILOGUE_USES(REGNO) 0
65 /* Indicates where we are in the compilation. */
68 /* The bitmap_obstack is used to hold some static variables that
69 should not be reset after each function is compiled. */
71 static bitmap_obstack persistent_obstack
;
73 /* The set of hard registers in eliminables[i].from. */
75 static HARD_REG_SET elim_reg_set
;
77 /* This is a bitmap copy of regs_invalidated_by_call so that we can
78 easily add it into bitmaps, etc. */
80 bitmap df_invalidated_by_call
= NULL
;
82 /* Initialize ur_in and ur_out as if all hard registers were partially
85 bitmap df_all_hard_regs
= NULL
;
87 static void df_ref_record (struct dataflow
*, rtx
, rtx
*,
88 basic_block
, rtx
, enum df_ref_type
,
89 enum df_ref_flags
, bool record_live
);
90 static void df_def_record_1 (struct dataflow
*, rtx
, basic_block
, rtx
,
91 enum df_ref_flags
, bool record_live
);
92 static void df_defs_record (struct dataflow
*, rtx
, basic_block
, rtx
);
93 static void df_uses_record (struct dataflow
*, rtx
*, enum df_ref_type
,
94 basic_block
, rtx
, enum df_ref_flags
);
96 static void df_insn_refs_record (struct dataflow
*, basic_block
, rtx
);
97 static void df_bb_refs_record (struct dataflow
*, basic_block
);
98 static void df_refs_record (struct dataflow
*, bitmap
);
99 static struct df_ref
*df_ref_create_structure (struct dataflow
*, rtx
, rtx
*,
100 basic_block
, rtx
, enum df_ref_type
,
102 static void df_record_exit_block_uses (struct dataflow
*);
103 static void df_grow_reg_info (struct dataflow
*, struct df_ref_info
*);
104 static void df_grow_ref_info (struct df_ref_info
*, unsigned int);
105 static void df_grow_insn_info (struct df
*);
108 /*----------------------------------------------------------------------------
109 SCANNING DATAFLOW PROBLEM
111 There are several ways in which scanning looks just like the other
112 dataflow problems. It shares the all the mechanisms for local info
113 as well as basic block info. Where it differs is when and how often
114 it gets run. It also has no need for the iterative solver.
115 ----------------------------------------------------------------------------*/
117 /* Problem data for the scanning dataflow function. */
118 struct df_scan_problem_data
121 alloc_pool insn_pool
;
125 typedef struct df_scan_bb_info
*df_scan_bb_info_t
;
128 df_scan_free_internal (struct dataflow
*dflow
)
130 struct df
*df
= dflow
->df
;
131 struct df_scan_problem_data
*problem_data
=
132 (struct df_scan_problem_data
*) dflow
->problem_data
;
134 free (df
->def_info
.regs
);
135 free (df
->def_info
.refs
);
136 memset (&df
->def_info
, 0, (sizeof (struct df_ref_info
)));
138 free (df
->use_info
.regs
);
139 free (df
->use_info
.refs
);
140 memset (&df
->use_info
, 0, (sizeof (struct df_ref_info
)));
146 free (dflow
->block_info
);
147 dflow
->block_info
= NULL
;
148 dflow
->block_info_size
= 0;
150 BITMAP_FREE (df
->hardware_regs_used
);
151 BITMAP_FREE (df
->exit_block_uses
);
153 free_alloc_pool (dflow
->block_pool
);
154 free_alloc_pool (problem_data
->ref_pool
);
155 free_alloc_pool (problem_data
->insn_pool
);
156 free_alloc_pool (problem_data
->reg_pool
);
160 /* Get basic block info. */
162 struct df_scan_bb_info
*
163 df_scan_get_bb_info (struct dataflow
*dflow
, unsigned int index
)
165 gcc_assert (index
< dflow
->block_info_size
);
166 return (struct df_scan_bb_info
*) dflow
->block_info
[index
];
170 /* Set basic block info. */
173 df_scan_set_bb_info (struct dataflow
*dflow
, unsigned int index
,
174 struct df_scan_bb_info
*bb_info
)
176 gcc_assert (index
< dflow
->block_info_size
);
177 dflow
->block_info
[index
] = (void *) bb_info
;
181 /* Free basic block info. */
184 df_scan_free_bb_info (struct dataflow
*dflow
, basic_block bb
, void *vbb_info
)
186 struct df_scan_bb_info
*bb_info
= (struct df_scan_bb_info
*) vbb_info
;
189 df_bb_refs_delete (dflow
, bb
->index
);
190 pool_free (dflow
->block_pool
, bb_info
);
195 /* Allocate the problem data for the scanning problem. This should be
196 called when the problem is created or when the entire function is to
200 df_scan_alloc (struct dataflow
*dflow
, bitmap blocks_to_rescan
)
202 struct df
*df
= dflow
->df
;
203 struct df_scan_problem_data
*problem_data
;
204 unsigned int insn_num
= get_max_uid () + 1;
205 unsigned int block_size
= 50;
206 unsigned int bb_index
;
209 /* Given the number of pools, this is really faster than tearing
211 if (dflow
->problem_data
)
212 df_scan_free_internal (dflow
);
215 = create_alloc_pool ("df_scan_block pool",
216 sizeof (struct df_scan_bb_info
),
219 problem_data
= xmalloc (sizeof (struct df_scan_problem_data
));
220 dflow
->problem_data
= problem_data
;
222 problem_data
->ref_pool
223 = create_alloc_pool ("df_scan_ref pool",
224 sizeof (struct df_ref
), block_size
);
225 problem_data
->insn_pool
226 = create_alloc_pool ("df_scan_insn pool",
227 sizeof (struct df_insn_info
), block_size
);
228 problem_data
->reg_pool
229 = create_alloc_pool ("df_scan_reg pool",
230 sizeof (struct df_reg_info
), block_size
);
232 insn_num
+= insn_num
/ 4;
233 df_grow_reg_info (dflow
, &df
->def_info
);
234 df_grow_ref_info (&df
->def_info
, insn_num
);
236 df_grow_reg_info (dflow
, &df
->use_info
);
237 df_grow_ref_info (&df
->use_info
, insn_num
*2);
239 df_grow_insn_info (df
);
240 df_grow_bb_info (dflow
);
242 EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan
, 0, bb_index
, bi
)
244 struct df_scan_bb_info
*bb_info
= df_scan_get_bb_info (dflow
, bb_index
);
247 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
248 df_scan_set_bb_info (dflow
, bb_index
, bb_info
);
250 bb_info
->artificial_defs
= NULL
;
251 bb_info
->artificial_uses
= NULL
;
254 df
->hardware_regs_used
= BITMAP_ALLOC (NULL
);
255 df
->exit_block_uses
= BITMAP_ALLOC (NULL
);
259 /* Free all of the data associated with the scan problem. */
262 df_scan_free (struct dataflow
*dflow
)
264 struct df
*df
= dflow
->df
;
266 if (dflow
->problem_data
)
268 df_scan_free_internal (dflow
);
269 free (dflow
->problem_data
);
272 if (df
->blocks_to_scan
)
273 BITMAP_FREE (df
->blocks_to_scan
);
275 if (df
->blocks_to_analyze
)
276 BITMAP_FREE (df
->blocks_to_analyze
);
282 df_scan_dump (struct dataflow
*dflow ATTRIBUTE_UNUSED
, FILE *file ATTRIBUTE_UNUSED
)
284 struct df
*df
= dflow
->df
;
287 fprintf (file
, " all hard regs \t");
288 dump_bitmap (file
, df_all_hard_regs
);
289 fprintf (file
, " invalidated by call \t");
290 dump_bitmap (file
, df_invalidated_by_call
);
291 fprintf (file
, " hardware regs used \t");
292 dump_bitmap (file
, df
->hardware_regs_used
);
293 fprintf (file
, " exit block uses \t");
294 dump_bitmap (file
, df
->exit_block_uses
);
295 fprintf (file
, " regs ever live \t");
296 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
297 if (regs_ever_live
[i
])
298 fprintf (file
, "%d ", i
);
299 fprintf (file
, "\n");
302 static struct df_problem problem_SCAN
=
304 DF_SCAN
, /* Problem id. */
305 DF_NONE
, /* Direction. */
306 df_scan_alloc
, /* Allocate the problem specific data. */
307 df_scan_free_bb_info
, /* Free basic block info. */
308 NULL
, /* Local compute function. */
309 NULL
, /* Init the solution specific data. */
310 NULL
, /* Iterative solver. */
311 NULL
, /* Confluence operator 0. */
312 NULL
, /* Confluence operator n. */
313 NULL
, /* Transfer function. */
314 NULL
, /* Finalize function. */
315 df_scan_free
, /* Free all of the problem information. */
316 df_scan_dump
, /* Debugging. */
317 NULL
/* Dependent problem. */
321 /* Create a new DATAFLOW instance and add it to an existing instance
322 of DF. The returned structure is what is used to get at the
326 df_scan_add_problem (struct df
*df
)
328 return df_add_problem (df
, &problem_SCAN
);
331 /*----------------------------------------------------------------------------
332 Storage Allocation Utilities
333 ----------------------------------------------------------------------------*/
336 /* First, grow the reg_info information. If the current size is less than
337 the number of psuedos, grow to 25% more than the number of
340 Second, assure that all of the slots up to max_reg_num have been
341 filled with reg_info structures. */
344 df_grow_reg_info (struct dataflow
*dflow
, struct df_ref_info
*ref_info
)
346 unsigned int max_reg
= max_reg_num ();
347 unsigned int new_size
= max_reg
;
348 struct df_scan_problem_data
*problem_data
=
349 (struct df_scan_problem_data
*) dflow
->problem_data
;
352 if (ref_info
->regs_size
< new_size
)
354 new_size
+= new_size
/ 4;
355 ref_info
->regs
= xrealloc (ref_info
->regs
,
356 new_size
*sizeof (struct df_reg_info
*));
357 ref_info
->regs_size
= new_size
;
360 for (i
= ref_info
->regs_inited
; i
< max_reg
; i
++)
362 struct df_reg_info
*reg_info
= pool_alloc (problem_data
->reg_pool
);
363 memset (reg_info
, 0, sizeof (struct df_reg_info
));
364 ref_info
->regs
[i
] = reg_info
;
367 ref_info
->regs_inited
= max_reg
;
371 /* Grow the ref information. */
374 df_grow_ref_info (struct df_ref_info
*ref_info
, unsigned int new_size
)
376 if (ref_info
->refs_size
< new_size
)
378 ref_info
->refs
= xrealloc (ref_info
->refs
,
379 new_size
*sizeof (struct df_ref
*));
380 memset (ref_info
->refs
+ ref_info
->refs_size
, 0,
381 (new_size
- ref_info
->refs_size
) *sizeof (struct df_ref
*));
382 ref_info
->refs_size
= new_size
;
387 /* Grow the ref information. If the current size is less than the
388 number of instructions, grow to 25% more than the number of
392 df_grow_insn_info (struct df
*df
)
394 unsigned int new_size
= get_max_uid () + 1;
395 if (df
->insns_size
< new_size
)
397 new_size
+= new_size
/ 4;
398 df
->insns
= xrealloc (df
->insns
,
399 new_size
*sizeof (struct df_insn_info
*));
400 memset (df
->insns
+ df
->insns_size
, 0,
401 (new_size
- df
->insns_size
) *sizeof (struct df_insn_info
*));
402 df
->insns_size
= new_size
;
409 /*----------------------------------------------------------------------------
410 PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
411 ----------------------------------------------------------------------------*/
413 /* Rescan some BLOCKS or all the blocks defined by the last call to
414 df_set_blocks if BLOCKS is NULL); */
417 df_rescan_blocks (struct df
*df
, bitmap blocks
)
419 bitmap local_blocks_to_scan
= BITMAP_ALLOC (NULL
);
421 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
424 df
->def_info
.refs_organized
= false;
425 df
->use_info
.refs_organized
= false;
429 /* Need to assure that there are space in all of the tables. */
430 unsigned int insn_num
= get_max_uid () + 1;
431 insn_num
+= insn_num
/ 4;
433 df_grow_reg_info (dflow
, &df
->def_info
);
434 df_grow_ref_info (&df
->def_info
, insn_num
);
436 df_grow_reg_info (dflow
, &df
->use_info
);
437 df_grow_ref_info (&df
->use_info
, insn_num
*2);
439 df_grow_insn_info (df
);
440 df_grow_bb_info (dflow
);
442 bitmap_copy (local_blocks_to_scan
, blocks
);
443 df
->def_info
.add_refs_inline
= true;
444 df
->use_info
.add_refs_inline
= true;
446 df_refs_delete (dflow
, local_blocks_to_scan
);
448 /* This may be a mistake, but if an explicit blocks is passed in
449 and the set of blocks to analyze has been explicitly set, add
450 the extra blocks to blocks_to_analyze. The alternative is to
451 put an assert here. We do not want this to just go by
452 silently or else we may get storage leaks. */
453 if (df
->blocks_to_analyze
)
454 bitmap_ior_into (df
->blocks_to_analyze
, blocks
);
458 /* If we are going to do everything, just reallocate everything.
459 Most stuff is allocated in pools so this is faster than
461 if (df
->blocks_to_analyze
)
462 bitmap_copy (local_blocks_to_scan
, df
->blocks_to_analyze
);
466 bitmap_set_bit (local_blocks_to_scan
, bb
->index
);
468 df_scan_alloc (dflow
, local_blocks_to_scan
);
470 df
->def_info
.add_refs_inline
= false;
471 df
->use_info
.add_refs_inline
= false;
474 df_refs_record (dflow
, local_blocks_to_scan
);
476 bitmap_print (stderr
, local_blocks_to_scan
, "scanning: ", "\n");
479 if (!df
->blocks_to_scan
)
480 df
->blocks_to_scan
= BITMAP_ALLOC (NULL
);
482 bitmap_ior_into (df
->blocks_to_scan
, local_blocks_to_scan
);
483 BITMAP_FREE (local_blocks_to_scan
);
486 /* Create a new ref of type DF_REF_TYPE for register REG at address
487 LOC within INSN of BB. */
490 df_ref_create (struct df
*df
, rtx reg
, rtx
*loc
, rtx insn
,
492 enum df_ref_type ref_type
,
493 enum df_ref_flags ref_flags
)
495 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
496 struct df_scan_bb_info
*bb_info
;
498 df_grow_reg_info (dflow
, &df
->use_info
);
499 df_grow_reg_info (dflow
, &df
->def_info
);
500 df_grow_bb_info (dflow
);
502 /* Make sure there is the bb_info for this block. */
503 bb_info
= df_scan_get_bb_info (dflow
, bb
->index
);
506 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
507 df_scan_set_bb_info (dflow
, bb
->index
, bb_info
);
508 bb_info
->artificial_defs
= NULL
;
509 bb_info
->artificial_uses
= NULL
;
512 if (ref_type
== DF_REF_REG_DEF
)
513 df
->def_info
.add_refs_inline
= true;
515 df
->use_info
.add_refs_inline
= true;
517 return df_ref_create_structure (dflow
, reg
, loc
, bb
, insn
, ref_type
, ref_flags
);
522 /*----------------------------------------------------------------------------
523 UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
524 ----------------------------------------------------------------------------*/
527 /* Get the artifical uses for a basic block. */
530 df_get_artificial_defs (struct df
*df
, unsigned int bb_index
)
532 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
533 return df_scan_get_bb_info (dflow
, bb_index
)->artificial_defs
;
537 /* Get the artifical uses for a basic block. */
540 df_get_artificial_uses (struct df
*df
, unsigned int bb_index
)
542 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
543 return df_scan_get_bb_info (dflow
, bb_index
)->artificial_uses
;
547 /* Link REF at the front of reg_use or reg_def chain for REGNO. */
550 df_reg_chain_create (struct df_reg_info
*reg_info
,
553 struct df_ref
*head
= reg_info
->reg_chain
;
554 reg_info
->reg_chain
= ref
;
556 DF_REF_NEXT_REG (ref
) = head
;
558 /* We cannot actually link to the head of the chain. */
559 DF_REF_PREV_REG (ref
) = NULL
;
562 DF_REF_PREV_REG (head
) = ref
;
566 /* Remove REF from the CHAIN. Return the head of the chain. This
567 will be CHAIN unless the REF was at the beginning of the chain. */
569 static struct df_ref
*
570 df_ref_unlink (struct df_ref
*chain
, struct df_ref
*ref
)
572 struct df_ref
*orig_chain
= chain
;
573 struct df_ref
*prev
= NULL
;
580 prev
->next_ref
= ref
->next_ref
;
581 ref
->next_ref
= NULL
;
586 chain
= ref
->next_ref
;
587 ref
->next_ref
= NULL
;
593 chain
= chain
->next_ref
;
596 /* Someone passed in a ref that was not in the chain. */
602 /* Unlink and delete REF at the reg_use or reg_def chain. Also delete
603 the def-use or use-def chain if it exists. Returns the next ref in
604 uses or defs chain. */
607 df_reg_chain_unlink (struct dataflow
*dflow
, struct df_ref
*ref
)
609 struct df
*df
= dflow
->df
;
610 struct df_ref
*next
= DF_REF_NEXT_REG (ref
);
611 struct df_ref
*prev
= DF_REF_PREV_REG (ref
);
612 struct df_scan_problem_data
*problem_data
=
613 (struct df_scan_problem_data
*) dflow
->problem_data
;
614 struct df_reg_info
*reg_info
;
615 struct df_ref
*next_ref
= ref
->next_ref
;
616 unsigned int id
= DF_REF_ID (ref
);
618 if (DF_REF_TYPE (ref
) == DF_REF_REG_DEF
)
620 reg_info
= DF_REG_DEF_GET (df
, DF_REF_REGNO (ref
));
621 df
->def_info
.bitmap_size
--;
622 if (df
->def_info
.refs
&& (id
< df
->def_info
.refs_size
))
623 DF_DEFS_SET (df
, id
, NULL
);
627 reg_info
= DF_REG_USE_GET (df
, DF_REF_REGNO (ref
));
628 df
->use_info
.bitmap_size
--;
629 if (df
->use_info
.refs
&& (id
< df
->use_info
.refs_size
))
630 DF_USES_SET (df
, id
, NULL
);
633 /* Delete any def-use or use-def chains that start here. */
634 if (DF_REF_CHAIN (ref
))
635 df_chain_unlink (df
->problems_by_index
[DF_CHAIN
], ref
, NULL
);
639 /* Unlink from the reg chain. If there is no prev, this is the
640 first of the list. If not, just join the next and prev. */
643 DF_REF_NEXT_REG (prev
) = next
;
645 DF_REF_PREV_REG (next
) = prev
;
649 reg_info
->reg_chain
= next
;
651 DF_REF_PREV_REG (next
) = NULL
;
654 pool_free (problem_data
->ref_pool
, ref
);
659 /* Unlink REF from all def-use/use-def chains, etc. */
662 df_ref_remove (struct df
*df
, struct df_ref
*ref
)
664 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
665 if (DF_REF_REG_DEF_P (ref
))
667 if (DF_REF_FLAGS (ref
) & DF_REF_ARTIFICIAL
)
669 struct df_scan_bb_info
*bb_info
670 = df_scan_get_bb_info (dflow
, DF_REF_BB (ref
)->index
);
671 bb_info
->artificial_defs
672 = df_ref_unlink (bb_info
->artificial_defs
, ref
);
675 DF_INSN_UID_DEFS (df
, DF_REF_INSN_UID (ref
)) =
676 df_ref_unlink (DF_INSN_UID_DEFS (df
, DF_REF_INSN_UID (ref
)), ref
);
678 if (df
->def_info
.add_refs_inline
)
679 DF_DEFS_SET (df
, DF_REF_ID (ref
), NULL
);
683 if (DF_REF_FLAGS (ref
) & DF_REF_ARTIFICIAL
)
685 struct df_scan_bb_info
*bb_info
686 = df_scan_get_bb_info (dflow
, DF_REF_BB (ref
)->index
);
687 bb_info
->artificial_uses
688 = df_ref_unlink (bb_info
->artificial_uses
, ref
);
691 DF_INSN_UID_USES (df
, DF_REF_INSN_UID (ref
)) =
692 df_ref_unlink (DF_INSN_UID_USES (df
, DF_REF_INSN_UID (ref
)), ref
);
694 if (df
->use_info
.add_refs_inline
)
695 DF_USES_SET (df
, DF_REF_ID (ref
), NULL
);
698 df_reg_chain_unlink (dflow
, ref
);
702 /* Create the insn record for INSN. If there was one there, zero it out. */
704 static struct df_insn_info
*
705 df_insn_create_insn_record (struct dataflow
*dflow
, rtx insn
)
707 struct df
*df
= dflow
->df
;
708 struct df_scan_problem_data
*problem_data
=
709 (struct df_scan_problem_data
*) dflow
->problem_data
;
711 struct df_insn_info
*insn_rec
= DF_INSN_GET (df
, insn
);
714 insn_rec
= pool_alloc (problem_data
->insn_pool
);
715 DF_INSN_SET (df
, insn
, insn_rec
);
717 memset (insn_rec
, 0, sizeof (struct df_insn_info
));
723 /* Delete all of the refs information from INSN. */
726 df_insn_refs_delete (struct dataflow
*dflow
, rtx insn
)
728 struct df
*df
= dflow
->df
;
729 unsigned int uid
= INSN_UID (insn
);
730 struct df_insn_info
*insn_info
= DF_INSN_UID_GET (df
, uid
);
732 struct df_scan_problem_data
*problem_data
=
733 (struct df_scan_problem_data
*) dflow
->problem_data
;
737 ref
= insn_info
->defs
;
739 ref
= df_reg_chain_unlink (dflow
, ref
);
741 ref
= insn_info
->uses
;
743 ref
= df_reg_chain_unlink (dflow
, ref
);
745 pool_free (problem_data
->insn_pool
, insn_info
);
746 DF_INSN_SET (df
, insn
, NULL
);
751 /* Delete all of the refs information from basic_block with BB_INDEX. */
754 df_bb_refs_delete (struct dataflow
*dflow
, int bb_index
)
759 struct df_scan_bb_info
*bb_info
760 = df_scan_get_bb_info (dflow
, bb_index
);
762 basic_block bb
= BASIC_BLOCK (bb_index
);
763 FOR_BB_INSNS (bb
, insn
)
767 /* Record defs within INSN. */
768 df_insn_refs_delete (dflow
, insn
);
772 /* Get rid of any artifical uses. */
775 def
= bb_info
->artificial_defs
;
777 def
= df_reg_chain_unlink (dflow
, def
);
778 bb_info
->artificial_defs
= NULL
;
779 use
= bb_info
->artificial_uses
;
781 use
= df_reg_chain_unlink (dflow
, use
);
782 bb_info
->artificial_uses
= NULL
;
787 /* Delete all of the refs information from BLOCKS. */
790 df_refs_delete (struct dataflow
*dflow
, bitmap blocks
)
793 unsigned int bb_index
;
795 EXECUTE_IF_SET_IN_BITMAP (blocks
, 0, bb_index
, bi
)
797 df_bb_refs_delete (dflow
, bb_index
);
802 /* Take build ref table for either the uses or defs from the reg-use
803 or reg-def chains. */
806 df_reorganize_refs (struct df_ref_info
*ref_info
)
808 unsigned int m
= ref_info
->regs_inited
;
810 unsigned int offset
= 0;
811 unsigned int size
= 0;
813 if (ref_info
->refs_organized
)
816 if (ref_info
->refs_size
< ref_info
->bitmap_size
)
818 int new_size
= ref_info
->bitmap_size
+ ref_info
->bitmap_size
/ 4;
819 df_grow_ref_info (ref_info
, new_size
);
822 for (regno
= 0; regno
< m
; regno
++)
824 struct df_reg_info
*reg_info
= ref_info
->regs
[regno
];
828 struct df_ref
*ref
= reg_info
->reg_chain
;
829 reg_info
->begin
= offset
;
832 ref_info
->refs
[offset
] = ref
;
833 DF_REF_ID (ref
) = offset
++;
834 ref
= DF_REF_NEXT_REG (ref
);
838 reg_info
->n_refs
= count
;
842 /* The bitmap size is not decremented when refs are deleted. So
843 reset it now that we have squished out all of the empty
845 ref_info
->bitmap_size
= size
;
846 ref_info
->refs_organized
= true;
847 ref_info
->add_refs_inline
= true;
851 /* Local miscellaneous routines. */
853 /* Local routines for recording refs. */
855 /* Set where we are in the compilation. */
858 df_set_state (int state
)
865 /*----------------------------------------------------------------------------
866 Hard core instruction scanning code. No external interfaces here,
867 just a lot of routines that look inside insns.
868 ----------------------------------------------------------------------------*/
870 /* Create a ref and add it to the reg-def or reg-use chains. */
872 static struct df_ref
*
873 df_ref_create_structure (struct dataflow
*dflow
, rtx reg
, rtx
*loc
,
874 basic_block bb
, rtx insn
,
875 enum df_ref_type ref_type
,
876 enum df_ref_flags ref_flags
)
878 struct df_ref
*this_ref
;
879 struct df
*df
= dflow
->df
;
880 int regno
= REGNO (GET_CODE (reg
) == SUBREG
? SUBREG_REG (reg
) : reg
);
881 struct df_scan_problem_data
*problem_data
=
882 (struct df_scan_problem_data
*) dflow
->problem_data
;
884 this_ref
= pool_alloc (problem_data
->ref_pool
);
885 DF_REF_REG (this_ref
) = reg
;
886 DF_REF_REGNO (this_ref
) = regno
;
887 DF_REF_LOC (this_ref
) = loc
;
888 DF_REF_INSN (this_ref
) = insn
;
889 DF_REF_CHAIN (this_ref
) = NULL
;
890 DF_REF_TYPE (this_ref
) = ref_type
;
891 DF_REF_FLAGS (this_ref
) = ref_flags
;
892 DF_REF_DATA (this_ref
) = NULL
;
893 DF_REF_BB (this_ref
) = bb
;
895 /* Link the ref into the reg_def and reg_use chains and keep a count
897 if (ref_type
== DF_REF_REG_DEF
)
899 struct df_reg_info
*reg_info
= DF_REG_DEF_GET (df
, regno
);
902 /* Add the ref to the reg_def chain. */
903 df_reg_chain_create (reg_info
, this_ref
);
904 DF_REF_ID (this_ref
) = df
->def_info
.bitmap_size
;
905 if (df
->def_info
.add_refs_inline
)
907 if (DF_DEFS_SIZE (df
) >= df
->def_info
.refs_size
)
909 int new_size
= df
->def_info
.bitmap_size
910 + df
->def_info
.bitmap_size
/ 4;
911 df_grow_ref_info (&df
->def_info
, new_size
);
913 /* Add the ref to the big array of defs. */
914 DF_DEFS_SET (df
, df
->def_info
.bitmap_size
, this_ref
);
915 df
->def_info
.refs_organized
= false;
918 df
->def_info
.bitmap_size
++;
920 if (DF_REF_FLAGS (this_ref
) & DF_REF_ARTIFICIAL
)
922 struct df_scan_bb_info
*bb_info
923 = df_scan_get_bb_info (dflow
, bb
->index
);
924 this_ref
->next_ref
= bb_info
->artificial_defs
;
925 bb_info
->artificial_defs
= this_ref
;
929 this_ref
->next_ref
= DF_INSN_GET (df
, insn
)->defs
;
930 DF_INSN_GET (df
, insn
)->defs
= this_ref
;
935 struct df_reg_info
*reg_info
= DF_REG_USE_GET (df
, regno
);
938 /* Add the ref to the reg_use chain. */
939 df_reg_chain_create (reg_info
, this_ref
);
940 DF_REF_ID (this_ref
) = df
->use_info
.bitmap_size
;
941 if (df
->use_info
.add_refs_inline
)
943 if (DF_USES_SIZE (df
) >= df
->use_info
.refs_size
)
945 int new_size
= df
->use_info
.bitmap_size
946 + df
->use_info
.bitmap_size
/ 4;
947 df_grow_ref_info (&df
->use_info
, new_size
);
949 /* Add the ref to the big array of defs. */
950 DF_USES_SET (df
, df
->use_info
.bitmap_size
, this_ref
);
951 df
->use_info
.refs_organized
= false;
954 df
->use_info
.bitmap_size
++;
955 if (DF_REF_FLAGS (this_ref
) & DF_REF_ARTIFICIAL
)
957 struct df_scan_bb_info
*bb_info
958 = df_scan_get_bb_info (dflow
, bb
->index
);
959 this_ref
->next_ref
= bb_info
->artificial_uses
;
960 bb_info
->artificial_uses
= this_ref
;
964 this_ref
->next_ref
= DF_INSN_GET (df
, insn
)->uses
;
965 DF_INSN_GET (df
, insn
)->uses
= this_ref
;
972 /* Create new references of type DF_REF_TYPE for each part of register REG
973 at address LOC within INSN of BB. */
976 df_ref_record (struct dataflow
*dflow
, rtx reg
, rtx
*loc
,
977 basic_block bb
, rtx insn
,
978 enum df_ref_type ref_type
,
979 enum df_ref_flags ref_flags
,
983 struct df
*df
= dflow
->df
;
985 gcc_assert (REG_P (reg
) || GET_CODE (reg
) == SUBREG
);
987 /* For the reg allocator we are interested in some SUBREG rtx's, but not
988 all. Notably only those representing a word extraction from a multi-word
989 reg. As written in the docu those should have the form
990 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
991 XXX Is that true? We could also use the global word_mode variable. */
992 if ((df
->flags
& DF_SUBREGS
) == 0
993 && GET_CODE (reg
) == SUBREG
994 && (GET_MODE_SIZE (GET_MODE (reg
)) < GET_MODE_SIZE (word_mode
)
995 || GET_MODE_SIZE (GET_MODE (reg
))
996 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg
)))))
998 loc
= &SUBREG_REG (reg
);
1000 ref_flags
|= DF_REF_STRIPPED
;
1003 regno
= REGNO (GET_CODE (reg
) == SUBREG
? SUBREG_REG (reg
) : reg
);
1004 if (regno
< FIRST_PSEUDO_REGISTER
)
1009 if (! (df
->flags
& DF_HARD_REGS
))
1012 /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
1013 for the mode, because we only want to add references to regs, which
1014 are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
1015 reference the whole reg 0 in DI mode (which would also include
1016 reg 1, at least, if 0 and 1 are SImode registers). */
1017 endregno
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1018 if (GET_CODE (reg
) == SUBREG
)
1019 regno
+= subreg_regno_offset (regno
, GET_MODE (SUBREG_REG (reg
)),
1020 SUBREG_BYTE (reg
), GET_MODE (reg
));
1023 for (i
= regno
; i
< endregno
; i
++)
1025 /* Calls are handled at call site because regs_ever_live
1026 doesn't include clobbered regs, only used ones. */
1027 if (ref_type
== DF_REF_REG_DEF
&& record_live
)
1028 regs_ever_live
[i
] = 1;
1029 else if ((ref_type
== DF_REF_REG_USE
1030 || ref_type
== DF_REF_REG_MEM_STORE
1031 || ref_type
== DF_REF_REG_MEM_LOAD
)
1032 && ((ref_flags
& DF_REF_ARTIFICIAL
) == 0))
1034 /* Set regs_ever_live on uses of non-eliminable frame
1035 pointers and arg pointers. */
1036 if (! (TEST_HARD_REG_BIT (elim_reg_set
, regno
)
1037 && (regno
== FRAME_POINTER_REGNUM
1038 || regno
== ARG_POINTER_REGNUM
)))
1039 regs_ever_live
[i
] = 1;
1042 df_ref_create_structure (dflow
, regno_reg_rtx
[i
], loc
,
1043 bb
, insn
, ref_type
, ref_flags
);
1048 df_ref_create_structure (dflow
, reg
, loc
,
1049 bb
, insn
, ref_type
, ref_flags
);
1054 /* A set to a non-paradoxical SUBREG for which the number of word_mode units
1055 covered by the outer mode is smaller than that covered by the inner mode,
1056 is a read-modify-write operation.
1057 This function returns true iff the SUBREG X is such a SUBREG. */
1060 df_read_modify_subreg_p (rtx x
)
1062 unsigned int isize
, osize
;
1063 if (GET_CODE (x
) != SUBREG
)
1065 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1066 osize
= GET_MODE_SIZE (GET_MODE (x
));
1067 return (isize
> osize
&& isize
> UNITS_PER_WORD
);
1071 /* Process all the registers defined in the rtx, X.
1072 Autoincrement/decrement definitions will be picked up by
1076 df_def_record_1 (struct dataflow
*dflow
, rtx x
,
1077 basic_block bb
, rtx insn
,
1078 enum df_ref_flags flags
, bool record_live
)
1083 /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
1085 if (GET_CODE (x
) == EXPR_LIST
|| GET_CODE (x
) == CLOBBER
)
1088 loc
= &SET_DEST (x
);
1091 /* Some targets place small structures in registers for
1092 return values of functions. */
1093 if (GET_CODE (dst
) == PARALLEL
&& GET_MODE (dst
) == BLKmode
)
1097 for (i
= XVECLEN (dst
, 0) - 1; i
>= 0; i
--)
1099 rtx temp
= XVECEXP (dst
, 0, i
);
1100 if (GET_CODE (temp
) == EXPR_LIST
|| GET_CODE (temp
) == CLOBBER
1101 || GET_CODE (temp
) == SET
)
1102 df_def_record_1 (dflow
, temp
, bb
, insn
,
1103 GET_CODE (temp
) == CLOBBER
? flags
| DF_REF_CLOBBER
: flags
,
1109 /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
1110 be handy for the reg allocator. */
1111 while (GET_CODE (dst
) == STRICT_LOW_PART
1112 || GET_CODE (dst
) == ZERO_EXTRACT
1113 || df_read_modify_subreg_p (dst
))
1116 /* Strict low part always contains SUBREG, but we do not want to make
1117 it appear outside, as whole register is always considered. */
1118 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1120 loc
= &XEXP (dst
, 0);
1124 loc
= &XEXP (dst
, 0);
1126 flags
|= DF_REF_READ_WRITE
;
1130 || (GET_CODE (dst
) == SUBREG
&& REG_P (SUBREG_REG (dst
))))
1131 df_ref_record (dflow
, dst
, loc
, bb
, insn
,
1132 DF_REF_REG_DEF
, flags
, record_live
);
1136 /* Process all the registers defined in the pattern rtx, X. */
1139 df_defs_record (struct dataflow
*dflow
, rtx x
, basic_block bb
, rtx insn
)
1141 RTX_CODE code
= GET_CODE (x
);
1143 if (code
== SET
|| code
== CLOBBER
)
1145 /* Mark the single def within the pattern. */
1146 df_def_record_1 (dflow
, x
, bb
, insn
,
1147 code
== CLOBBER
? DF_REF_CLOBBER
: 0, true);
1149 else if (code
== COND_EXEC
)
1151 df_defs_record (dflow
, COND_EXEC_CODE (x
), bb
, insn
);
1153 else if (code
== PARALLEL
)
1157 /* Mark the multiple defs within the pattern. */
1158 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1159 df_defs_record (dflow
, XVECEXP (x
, 0, i
), bb
, insn
);
1164 /* Process all the registers used in the rtx at address LOC. */
1167 df_uses_record (struct dataflow
*dflow
, rtx
*loc
, enum df_ref_type ref_type
,
1168 basic_block bb
, rtx insn
, enum df_ref_flags flags
)
1176 code
= GET_CODE (x
);
1192 /* If we are clobbering a MEM, mark any registers inside the address
1194 if (MEM_P (XEXP (x
, 0)))
1195 df_uses_record (dflow
, &XEXP (XEXP (x
, 0), 0),
1196 DF_REF_REG_MEM_STORE
, bb
, insn
, flags
);
1198 /* If we're clobbering a REG then we have a def so ignore. */
1202 df_uses_record (dflow
, &XEXP (x
, 0), DF_REF_REG_MEM_LOAD
, bb
, insn
,
1203 flags
& DF_REF_IN_NOTE
);
1207 /* While we're here, optimize this case. */
1209 /* In case the SUBREG is not of a REG, do not optimize. */
1210 if (!REG_P (SUBREG_REG (x
)))
1212 loc
= &SUBREG_REG (x
);
1213 df_uses_record (dflow
, loc
, ref_type
, bb
, insn
, flags
);
1216 /* ... Fall through ... */
1219 df_ref_record (dflow
, x
, loc
, bb
, insn
, ref_type
, flags
, true);
1224 rtx dst
= SET_DEST (x
);
1225 gcc_assert (!(flags
& DF_REF_IN_NOTE
));
1226 df_uses_record (dflow
, &SET_SRC (x
), DF_REF_REG_USE
, bb
, insn
, 0);
1228 switch (GET_CODE (dst
))
1231 if (df_read_modify_subreg_p (dst
))
1233 df_uses_record (dflow
, &SUBREG_REG (dst
),
1235 insn
, DF_REF_READ_WRITE
);
1246 df_uses_record (dflow
, &XEXP (dst
, 0),
1247 DF_REF_REG_MEM_STORE
,
1250 case STRICT_LOW_PART
:
1252 rtx
*temp
= &XEXP (dst
, 0);
1253 /* A strict_low_part uses the whole REG and not just the
1255 dst
= XEXP (dst
, 0);
1256 df_uses_record (dflow
,
1257 (GET_CODE (dst
) == SUBREG
)
1258 ? &SUBREG_REG (dst
) : temp
,
1260 insn
, DF_REF_READ_WRITE
);
1265 df_uses_record (dflow
, &XEXP (dst
, 0),
1266 DF_REF_REG_USE
, bb
, insn
,
1268 df_uses_record (dflow
, &XEXP (dst
, 1),
1269 DF_REF_REG_USE
, bb
, insn
, 0);
1270 df_uses_record (dflow
, &XEXP (dst
, 2),
1271 DF_REF_REG_USE
, bb
, insn
, 0);
1272 dst
= XEXP (dst
, 0);
1284 case UNSPEC_VOLATILE
:
1288 /* Traditional and volatile asm instructions must be
1289 considered to use and clobber all hard registers, all
1290 pseudo-registers and all of memory. So must TRAP_IF and
1291 UNSPEC_VOLATILE operations.
1293 Consider for instance a volatile asm that changes the fpu
1294 rounding mode. An insn should not be moved across this
1295 even if it only uses pseudo-regs because it might give an
1296 incorrectly rounded result.
1298 However, flow.c's liveness computation did *not* do this,
1299 giving the reasoning as " ?!? Unfortunately, marking all
1300 hard registers as live causes massive problems for the
1301 register allocator and marking all pseudos as live creates
1302 mountains of uninitialized variable warnings."
1304 In order to maintain the status quo with regard to liveness
1305 and uses, we do what flow.c did and just mark any regs we
1306 can find in ASM_OPERANDS as used. Later on, when liveness
1307 is computed, asm insns are scanned and regs_asm_clobbered
1310 For all ASM_OPERANDS, we must traverse the vector of input
1311 operands. We can not just fall through here since then we
1312 would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
1313 which do not indicate traditional asms unlike their normal
1315 if (code
== ASM_OPERANDS
)
1319 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
1320 df_uses_record (dflow
, &ASM_OPERANDS_INPUT (x
, j
),
1321 DF_REF_REG_USE
, bb
, insn
, 0);
1333 /* Catch the def of the register being modified. */
1334 df_ref_record (dflow
, XEXP (x
, 0), &XEXP (x
, 0), bb
, insn
,
1335 DF_REF_REG_DEF
, DF_REF_READ_WRITE
, true);
1337 /* ... Fall through to handle uses ... */
1343 /* Recursively scan the operands of this expression. */
1345 const char *fmt
= GET_RTX_FORMAT (code
);
1348 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1352 /* Tail recursive case: save a function call level. */
1358 df_uses_record (dflow
, &XEXP (x
, i
), ref_type
, bb
, insn
, flags
);
1360 else if (fmt
[i
] == 'E')
1363 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1364 df_uses_record (dflow
, &XVECEXP (x
, i
, j
), ref_type
,
1371 /* Return true if *LOC contains an asm. */
1374 df_insn_contains_asm_1 (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
1378 if (GET_CODE (*loc
) == ASM_OPERANDS
)
1384 /* Return true if INSN contains an ASM. */
1387 df_insn_contains_asm (rtx insn
)
1389 return for_each_rtx (&insn
, df_insn_contains_asm_1
, NULL
);
1394 /* Record all the refs for DF within INSN of basic block BB. */
1397 df_insn_refs_record (struct dataflow
*dflow
, basic_block bb
, rtx insn
)
1400 struct df
*df
= dflow
->df
;
1406 if (df_insn_contains_asm (insn
))
1407 DF_INSN_CONTAINS_ASM (df
, insn
) = true;
1409 /* Record register defs. */
1410 df_defs_record (dflow
, PATTERN (insn
), bb
, insn
);
1412 if (df
->flags
& DF_EQUIV_NOTES
)
1413 for (note
= REG_NOTES (insn
); note
;
1414 note
= XEXP (note
, 1))
1416 switch (REG_NOTE_KIND (note
))
1420 df_uses_record (dflow
, &XEXP (note
, 0), DF_REF_REG_USE
,
1421 bb
, insn
, DF_REF_IN_NOTE
);
1431 /* Record the registers used to pass arguments, and explicitly
1432 noted as clobbered. */
1433 for (note
= CALL_INSN_FUNCTION_USAGE (insn
); note
;
1434 note
= XEXP (note
, 1))
1436 if (GET_CODE (XEXP (note
, 0)) == USE
)
1437 df_uses_record (dflow
, &XEXP (XEXP (note
, 0), 0),
1440 else if (GET_CODE (XEXP (note
, 0)) == CLOBBER
)
1442 df_defs_record (dflow
, XEXP (note
, 0), bb
, insn
);
1443 if (REG_P (XEXP (XEXP (note
, 0), 0)))
1445 rtx reg
= XEXP (XEXP (note
, 0), 0);
1450 regno_last
= regno_first
= REGNO (reg
);
1451 if (regno_first
< FIRST_PSEUDO_REGISTER
)
1453 += hard_regno_nregs
[regno_first
][GET_MODE (reg
)] - 1;
1454 for (i
= regno_first
; i
<= regno_last
; i
++)
1455 regs_ever_live
[i
] = 1;
1460 /* The stack ptr is used (honorarily) by a CALL insn. */
1461 df_uses_record (dflow
, ®no_reg_rtx
[STACK_POINTER_REGNUM
],
1462 DF_REF_REG_USE
, bb
, insn
,
1465 if (df
->flags
& DF_HARD_REGS
)
1469 /* Calls may also reference any of the global registers,
1470 so they are recorded as used. */
1471 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1473 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1474 DF_REF_REG_USE
, bb
, insn
,
1476 EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call
, 0, ui
, bi
)
1477 df_ref_record (dflow
, regno_reg_rtx
[ui
], ®no_reg_rtx
[ui
], bb
, insn
,
1478 DF_REF_REG_DEF
, DF_REF_CLOBBER
, false);
1482 /* Record the register uses. */
1483 df_uses_record (dflow
, &PATTERN (insn
),
1484 DF_REF_REG_USE
, bb
, insn
, 0);
1490 df_has_eh_preds (basic_block bb
)
1495 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1497 if (e
->flags
& EDGE_EH
)
1503 /* Record all the refs within the basic block BB. */
1506 df_bb_refs_record (struct dataflow
*dflow
, basic_block bb
)
1508 struct df
*df
= dflow
->df
;
1511 struct df_scan_bb_info
*bb_info
= df_scan_get_bb_info (dflow
, bb
->index
);
1513 /* Need to make sure that there is a record in the basic block info. */
1516 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
1517 df_scan_set_bb_info (dflow
, bb
->index
, bb_info
);
1518 bb_info
->artificial_defs
= NULL
;
1519 bb_info
->artificial_uses
= NULL
;
1522 /* Scan the block an insn at a time from beginning to end. */
1523 FOR_BB_INSNS (bb
, insn
)
1525 df_insn_create_insn_record (dflow
, insn
);
1528 /* Record defs within INSN. */
1529 DF_INSN_LUID (df
, insn
) = luid
++;
1530 df_insn_refs_record (dflow
, bb
, insn
);
1532 DF_INSN_LUID (df
, insn
) = luid
;
1535 #ifdef EH_RETURN_DATA_REGNO
1536 if ((df
->flags
& DF_HARD_REGS
)
1537 && df_has_eh_preds (bb
))
1540 /* Mark the registers that will contain data for the handler. */
1541 if (current_function_calls_eh_return
)
1544 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
1545 if (regno
== INVALID_REGNUM
)
1547 df_ref_record (dflow
, regno_reg_rtx
[i
], ®no_reg_rtx
[i
], bb
, NULL
,
1548 DF_REF_REG_DEF
, DF_REF_ARTIFICIAL
| DF_REF_AT_TOP
, false);
1554 /* This code is putting in a artificial ref for the use at the TOP
1555 of the block that receives the exception. It is too cumbersome
1556 to actually put the ref on the edge. We could either model this
1557 at the top of the receiver block or the bottom of the sender
1560 The bottom of the sender block is problematic because not all
1561 out-edges of the a block are eh-edges. However, it is true that
1562 all edges into a block are either eh-edges or none of them are
1563 eh-edges. Thus, we can model this at the top of the eh-receiver
1564 for all of the edges at once. */
1565 if ((df
->flags
& DF_HARD_REGS
)
1566 && df_has_eh_preds (bb
))
1569 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1571 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1572 DF_REF_REG_USE
, EXIT_BLOCK_PTR
, NULL
,
1573 DF_REF_ARTIFICIAL
| DF_REF_AT_TOP
);
1577 if ((df
->flags
& DF_HARD_REGS
)
1578 && bb
->index
>= NUM_FIXED_BLOCKS
)
1580 /* Before reload, there are a few registers that must be forced
1581 live everywhere -- which might not already be the case for
1582 blocks within infinite loops. */
1583 if (! reload_completed
)
1586 /* Any reference to any pseudo before reload is a potential
1587 reference of the frame pointer. */
1588 df_uses_record (dflow
, ®no_reg_rtx
[FRAME_POINTER_REGNUM
],
1589 DF_REF_REG_USE
, bb
, NULL
, DF_REF_ARTIFICIAL
);
1591 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1592 /* Pseudos with argument area equivalences may require
1593 reloading via the argument pointer. */
1594 if (fixed_regs
[ARG_POINTER_REGNUM
])
1595 df_uses_record (dflow
, ®no_reg_rtx
[ARG_POINTER_REGNUM
],
1596 DF_REF_REG_USE
, bb
, NULL
,
1600 /* Any constant, or pseudo with constant equivalences, may
1601 require reloading from memory using the pic register. */
1602 if ((unsigned) PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
1603 && fixed_regs
[PIC_OFFSET_TABLE_REGNUM
])
1604 df_uses_record (dflow
, ®no_reg_rtx
[PIC_OFFSET_TABLE_REGNUM
],
1605 DF_REF_REG_USE
, bb
, NULL
,
1608 /* The all-important stack pointer must always be live. */
1609 df_uses_record (dflow
, ®no_reg_rtx
[STACK_POINTER_REGNUM
],
1610 DF_REF_REG_USE
, bb
, NULL
, DF_REF_ARTIFICIAL
);
1615 /* Record all the refs in the basic blocks specified by BLOCKS. */
1618 df_refs_record (struct dataflow
*dflow
, bitmap blocks
)
1620 unsigned int bb_index
;
1623 EXECUTE_IF_SET_IN_BITMAP (blocks
, 0, bb_index
, bi
)
1625 basic_block bb
= BASIC_BLOCK (bb_index
);
1626 df_bb_refs_record (dflow
, bb
);
1629 if (bitmap_bit_p (blocks
, EXIT_BLOCK
))
1630 df_record_exit_block_uses (dflow
);
1634 /*----------------------------------------------------------------------------
1635 Specialized hard register scanning functions.
1636 ----------------------------------------------------------------------------*/
1638 /* Mark a register in SET. Hard registers in large modes get all
1639 of their component registers set as well. */
1642 df_mark_reg (rtx reg
, void *vset
)
1644 bitmap set
= (bitmap
) vset
;
1645 int regno
= REGNO (reg
);
1647 gcc_assert (GET_MODE (reg
) != BLKmode
);
1649 bitmap_set_bit (set
, regno
);
1650 if (regno
< FIRST_PSEUDO_REGISTER
)
1652 int n
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1654 bitmap_set_bit (set
, regno
+ n
);
1658 /* Record the set of hard registers that are used in the exit block. */
1661 df_record_exit_block_uses (struct dataflow
*dflow
)
1665 struct df
*df
= dflow
->df
;
1667 bitmap_clear (df
->exit_block_uses
);
1669 if (! (df
->flags
& DF_HARD_REGS
))
1672 /* If exiting needs the right stack value, consider the stack
1673 pointer live at the end of the function. */
1674 if ((HAVE_epilogue
&& epilogue_completed
)
1675 || ! EXIT_IGNORE_STACK
1676 || (! FRAME_POINTER_REQUIRED
1677 && ! current_function_calls_alloca
1678 && flag_omit_frame_pointer
)
1679 || current_function_sp_is_unchanging
)
1681 bitmap_set_bit (df
->exit_block_uses
, STACK_POINTER_REGNUM
);
1684 /* Mark the frame pointer if needed at the end of the function.
1685 If we end up eliminating it, it will be removed from the live
1686 list of each basic block by reload. */
1688 if (! reload_completed
|| frame_pointer_needed
)
1690 bitmap_set_bit (df
->exit_block_uses
, FRAME_POINTER_REGNUM
);
1691 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1692 /* If they are different, also mark the hard frame pointer as live. */
1693 if (! LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM
))
1694 bitmap_set_bit (df
->exit_block_uses
, HARD_FRAME_POINTER_REGNUM
);
1698 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1699 /* Many architectures have a GP register even without flag_pic.
1700 Assume the pic register is not in use, or will be handled by
1701 other means, if it is not fixed. */
1702 if ((unsigned) PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
1703 && fixed_regs
[PIC_OFFSET_TABLE_REGNUM
])
1704 bitmap_set_bit (df
->exit_block_uses
, PIC_OFFSET_TABLE_REGNUM
);
1707 /* Mark all global registers, and all registers used by the
1708 epilogue as being live at the end of the function since they
1709 may be referenced by our caller. */
1710 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1711 if (global_regs
[i
] || EPILOGUE_USES (i
))
1712 bitmap_set_bit (df
->exit_block_uses
, i
);
1714 if (HAVE_epilogue
&& epilogue_completed
)
1716 /* Mark all call-saved registers that we actually used. */
1717 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1718 if (regs_ever_live
[i
] && ! LOCAL_REGNO (i
)
1719 && ! TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1720 bitmap_set_bit (df
->exit_block_uses
, i
);
1723 #ifdef EH_RETURN_DATA_REGNO
1724 /* Mark the registers that will contain data for the handler. */
1725 if (reload_completed
&& current_function_calls_eh_return
)
1728 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
1729 if (regno
== INVALID_REGNUM
)
1731 bitmap_set_bit (df
->exit_block_uses
, regno
);
1735 #ifdef EH_RETURN_STACKADJ_RTX
1736 if ((! HAVE_epilogue
|| ! epilogue_completed
)
1737 && current_function_calls_eh_return
)
1739 rtx tmp
= EH_RETURN_STACKADJ_RTX
;
1740 if (tmp
&& REG_P (tmp
))
1741 df_mark_reg (tmp
, df
->exit_block_uses
);
1745 #ifdef EH_RETURN_HANDLER_RTX
1746 if ((! HAVE_epilogue
|| ! epilogue_completed
)
1747 && current_function_calls_eh_return
)
1749 rtx tmp
= EH_RETURN_HANDLER_RTX
;
1750 if (tmp
&& REG_P (tmp
))
1751 df_mark_reg (tmp
, df
->exit_block_uses
);
1755 /* Mark function return value. */
1756 diddle_return_value (df_mark_reg
, (void*) df
->exit_block_uses
);
1758 if (df
->flags
& DF_HARD_REGS
)
1759 EXECUTE_IF_SET_IN_BITMAP (df
->exit_block_uses
, 0, i
, bi
)
1760 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1761 DF_REF_REG_USE
, EXIT_BLOCK_PTR
, NULL
,
1765 static bool initialized
= false;
1767 /* Initialize some platform specific structures. */
1770 df_hard_reg_init (void)
1773 #ifdef ELIMINABLE_REGS
1774 static const struct {const int from
, to
; } eliminables
[] = ELIMINABLE_REGS
;
1776 /* After reload, some ports add certain bits to regs_ever_live so
1777 this cannot be reset. */
1779 if (!reload_completed
)
1780 memset (regs_ever_live
, 0, sizeof (regs_ever_live
));
1785 bitmap_obstack_initialize (&persistent_obstack
);
1787 /* Record which registers will be eliminated. We use this in
1789 CLEAR_HARD_REG_SET (elim_reg_set
);
1791 #ifdef ELIMINABLE_REGS
1792 for (i
= 0; i
< (int) ARRAY_SIZE (eliminables
); i
++)
1793 SET_HARD_REG_BIT (elim_reg_set
, eliminables
[i
].from
);
1795 SET_HARD_REG_BIT (elim_reg_set
, FRAME_POINTER_REGNUM
);
1798 df_invalidated_by_call
= BITMAP_ALLOC (&persistent_obstack
);
1800 /* Inconveniently, this is only readily available in hard reg set
1802 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
1803 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1804 bitmap_set_bit (df_invalidated_by_call
, i
);
1806 df_all_hard_regs
= BITMAP_ALLOC (&persistent_obstack
);
1808 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1809 bitmap_set_bit (df_all_hard_regs
, i
);