alias.c (record_alias_subset, [...]): Fix -Wc++-compat and/or -Wcast-qual warnings.
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008 Free Software Foundation, Inc.
4 Originally contributed by Michael P. Hayes
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
7 and Kenneth Zadeck (zadeck@naturalbridge.com).
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 /*
26 OVERVIEW:
27
28 The files in this collection (df*.c,df.h) provide a general framework
29 for solving dataflow problems. The global dataflow is performed using
30 a good implementation of iterative dataflow analysis.
31
32 The file df-problems.c provides problem instance for the most common
33 dataflow problems: reaching defs, upward exposed uses, live variables,
34 uninitialized variables, def-use chains, and use-def chains. However,
35 the interface allows other dataflow problems to be defined as well.
36
37 Dataflow analysis is available in most of the rtl backend (the parts
38 between pass_df_initialize and pass_df_finish). It is quite likely
39 that these boundaries will be expanded in the future. The only
40 requirement is that there be a correct control flow graph.
41
42 There are three variations of the live variable problem that are
43 available whenever dataflow is available. The LR problem finds the
44 areas that can reach a use of a variable, the UR problems finds the
45 areas that can be reached from a definition of a variable. The LIVE
46 problem finds the intersection of these two areas.
47
48 There are several optional problems. These can be enabled when they
49 are needed and disabled when they are not needed.
50
51 Dataflow problems are generally solved in three layers. The bottom
52 layer is called scanning where a data structure is built for each rtl
53 insn that describes the set of defs and uses of that insn. Scanning
54 is generally kept up to date, i.e. as the insns changes, the scanned
55 version of that insn changes also. There are various mechanisms for
56 making this happen and are described in the INCREMENTAL SCANNING
57 section.
58
59 In the middle layer, basic blocks are scanned to produce transfer
60 functions which describe the effects of that block on the global
61 dataflow solution. The transfer functions are only rebuilt if the
62 some instruction within the block has changed.
63
64 The top layer is the dataflow solution itself. The dataflow solution
65 is computed by using an efficient iterative solver and the transfer
66 functions. The dataflow solution must be recomputed whenever the
67 control changes or if one of the transfer function changes.
68
69
70 USAGE:
71
72 Here is an example of using the dataflow routines.
73
74 df_[chain,live,note,rd]_add_problem (flags);
75
76 df_set_blocks (blocks);
77
78 df_analyze ();
79
80 df_dump (stderr);
81
82 df_finish_pass (false);
83
84 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
85 instance to struct df_problem, to the set of problems solved in this
86 instance of df. All calls to add a problem for a given instance of df
87 must occur before the first call to DF_ANALYZE.
88
89 Problems can be dependent on other problems. For instance, solving
90 def-use or use-def chains is dependent on solving reaching
91 definitions. As long as these dependencies are listed in the problem
92 definition, the order of adding the problems is not material.
93 Otherwise, the problems will be solved in the order of calls to
94 df_add_problem. Note that it is not necessary to have a problem. In
95 that case, df will just be used to do the scanning.
96
97
98
99 DF_SET_BLOCKS is an optional call used to define a region of the
100 function on which the analysis will be performed. The normal case is
101 to analyze the entire function and no call to df_set_blocks is made.
102 DF_SET_BLOCKS only effects the blocks that are effected when computing
103 the transfer functions and final solution. The insn level information
104 is always kept up to date.
105
106 When a subset is given, the analysis behaves as if the function only
107 contains those blocks and any edges that occur directly between the
108 blocks in the set. Care should be taken to call df_set_blocks right
109 before the call to analyze in order to eliminate the possibility that
110 optimizations that reorder blocks invalidate the bitvector.
111
112 DF_ANALYZE causes all of the defined problems to be (re)solved. When
113 DF_ANALYZE is completes, the IN and OUT sets for each basic block
114 contain the computer information. The DF_*_BB_INFO macros can be used
115 to access these bitvectors. All deferred rescannings are down before
116 the transfer functions are recomputed.
117
118 DF_DUMP can then be called to dump the information produce to some
119 file. This calls DF_DUMP_START, to print the information that is not
120 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
121 for each block to print the basic specific information. These parts
122 can all be called separately as part of a larger dump function.
123
124
125 DF_FINISH_PASS causes df_remove_problem to be called on all of the
126 optional problems. It also causes any insns whose scanning has been
127 deferred to be rescanned as well as clears all of the changeable flags.
128 Setting the pass manager TODO_df_finish flag causes this function to
129 be run. However, the pass manager will call df_finish_pass AFTER the
130 pass dumping has been done, so if you want to see the results of the
131 optional problems in the pass dumps, use the TODO flag rather than
132 calling the function yourself.
133
134 INCREMENTAL SCANNING
135
136 There are four ways of doing the incremental scanning:
137
138 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
139 df_bb_delete, df_insn_change_bb have been added to most of
140 the low level service functions that maintain the cfg and change
141 rtl. Calling and of these routines many cause some number of insns
142 to be rescanned.
143
144 For most modern rtl passes, this is certainly the easiest way to
145 manage rescanning the insns. This technique also has the advantage
146 that the scanning information is always correct and can be relied
147 upon even after changes have been made to the instructions. This
148 technique is contra indicated in several cases:
149
150 a) If def-use chains OR use-def chains (but not both) are built,
151 using this is SIMPLY WRONG. The problem is that when a ref is
152 deleted that is the target of an edge, there is not enough
153 information to efficiently find the source of the edge and
154 delete the edge. This leaves a dangling reference that may
155 cause problems.
156
157 b) If def-use chains AND use-def chains are built, this may
158 produce unexpected results. The problem is that the incremental
159 scanning of an insn does not know how to repair the chains that
160 point into an insn when the insn changes. So the incremental
161 scanning just deletes the chains that enter and exit the insn
162 being changed. The dangling reference issue in (a) is not a
163 problem here, but if the pass is depending on the chains being
164 maintained after insns have been modified, this technique will
165 not do the correct thing.
166
167 c) If the pass modifies insns several times, this incremental
168 updating may be expensive.
169
170 d) If the pass modifies all of the insns, as does register
171 allocation, it is simply better to rescan the entire function.
172
173 e) If the pass uses either non-standard or ancient techniques to
174 modify insns, automatic detection of the insns that need to be
175 rescanned may be impractical. Cse and regrename fall into this
176 category.
177
178 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
179 df_insn_delete do not immediately change the insn but instead make
180 a note that the insn needs to be rescanned. The next call to
181 df_analyze, df_finish_pass, or df_process_deferred_rescans will
182 cause all of the pending rescans to be processed.
183
184 This is the technique of choice if either 1a, 1b, or 1c are issues
185 in the pass. In the case of 1a or 1b, a call to df_remove_problem
186 (df_chain) should be made before the next call to df_analyze or
187 df_process_deferred_rescans.
188
189 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
190 (This mode can be cleared by calling df_clear_flags
191 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
192 be rescanned.
193
194 3) Total rescanning - In this mode the rescanning is disabled.
195 However, the df information associated with deleted insn is delete
196 at the time the insn is deleted. At the end of the pass, a call
197 must be made to df_insn_rescan_all. This method is used by the
198 register allocator since it generally changes each insn multiple
199 times (once for each ref) and does not need to make use of the
200 updated scanning information.
201
202 It is also currently used by two older passes (cse, and regrename)
203 which change insns in hard to track ways. It is hoped that this
204 will be fixed soon since this it is expensive to rescan all of the
205 insns when only a small number of them have really changed.
206
207 4) Do it yourself - In this mechanism, the pass updates the insns
208 itself using the low level df primitives. Currently no pass does
209 this, but it has the advantage that it is quite efficient given
210 that the pass generally has exact knowledge of what it is changing.
211
212 DATA STRUCTURES
213
214 Scanning produces a `struct df_ref' data structure (ref) is allocated
215 for every register reference (def or use) and this records the insn
216 and bb the ref is found within. The refs are linked together in
217 chains of uses and defs for each insn and for each register. Each ref
218 also has a chain field that links all the use refs for a def or all
219 the def refs for a use. This is used to create use-def or def-use
220 chains.
221
222 Different optimizations have different needs. Ultimately, only
223 register allocation and schedulers should be using the bitmaps
224 produced for the live register and uninitialized register problems.
225 The rest of the backend should be upgraded to using and maintaining
226 the linked information such as def use or use def chains.
227
228
229 PHILOSOPHY:
230
231 While incremental bitmaps are not worthwhile to maintain, incremental
232 chains may be perfectly reasonable. The fastest way to build chains
233 from scratch or after significant modifications is to build reaching
234 definitions (RD) and build the chains from this.
235
236 However, general algorithms for maintaining use-def or def-use chains
237 are not practical. The amount of work to recompute the chain any
238 chain after an arbitrary change is large. However, with a modest
239 amount of work it is generally possible to have the application that
240 uses the chains keep them up to date. The high level knowledge of
241 what is really happening is essential to crafting efficient
242 incremental algorithms.
243
244 As for the bit vector problems, there is no interface to give a set of
245 blocks over with to resolve the iteration. In general, restarting a
246 dataflow iteration is difficult and expensive. Again, the best way to
247 keep the dataflow information up to data (if this is really what is
248 needed) it to formulate a problem specific solution.
249
250 There are fine grained calls for creating and deleting references from
251 instructions in df-scan.c. However, these are not currently connected
252 to the engine that resolves the dataflow equations.
253
254
255 DATA STRUCTURES:
256
257 The basic object is a DF_REF (reference) and this may either be a
258 DEF (definition) or a USE of a register.
259
260 These are linked into a variety of lists; namely reg-def, reg-use,
261 insn-def, insn-use, def-use, and use-def lists. For example, the
262 reg-def lists contain all the locations that define a given register
263 while the insn-use lists contain all the locations that use a
264 register.
265
266 Note that the reg-def and reg-use chains are generally short for
267 pseudos and long for the hard registers.
268
269 ACCESSING INSNS:
270
271 1) The df insn information is kept in an array of DF_INSN_INFO objects.
272 The array is indexed by insn uid, and every DF_REF points to the
273 DF_INSN_INFO object of the insn that contains the reference.
274
275 2) Each insn has three sets of refs, which are linked into one of three
276 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
277 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
278 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
279 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
280 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
281 The latter list are the list of references in REG_EQUAL or REG_EQUIV
282 notes. These macros produce a ref (or NULL), the rest of the list
283 can be obtained by traversal of the NEXT_REF field (accessed by the
284 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
285 the uses or refs in an instruction.
286
287 3) Each insn has a logical uid field (LUID) which is stored in the
288 DF_INSN_INFO object for the insn. The LUID field is accessed by
289 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
290 When properly set, the LUID is an integer that numbers each insn in
291 the basic block, in order from the start of the block.
292 The numbers are only correct after a call to df_analyze. They will
293 rot after insns are added deleted or moved round.
294
295 ACCESSING REFS:
296
297 There are 4 ways to obtain access to refs:
298
299 1) References are divided into two categories, REAL and ARTIFICIAL.
300
301 REAL refs are associated with instructions.
302
303 ARTIFICIAL refs are associated with basic blocks. The heads of
304 these lists can be accessed by calling df_get_artificial_defs or
305 df_get_artificial_uses for the particular basic block.
306
307 Artificial defs and uses occur both at the beginning and ends of blocks.
308
309 For blocks that area at the destination of eh edges, the
310 artificial uses and defs occur at the beginning. The defs relate
311 to the registers specified in EH_RETURN_DATA_REGNO and the uses
312 relate to the registers specified in ED_USES. Logically these
313 defs and uses should really occur along the eh edge, but there is
314 no convenient way to do this. Artificial edges that occur at the
315 beginning of the block have the DF_REF_AT_TOP flag set.
316
317 Artificial uses occur at the end of all blocks. These arise from
318 the hard registers that are always live, such as the stack
319 register and are put there to keep the code from forgetting about
320 them.
321
322 Artificial defs occur at the end of the entry block. These arise
323 from registers that are live at entry to the function.
324
325 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
326 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
327
328 All of the eq_uses, uses and defs associated with each pseudo or
329 hard register may be linked in a bidirectional chain. These are
330 called reg-use or reg_def chains. If the changeable flag
331 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
332 treated like uses. If it is not set they are ignored.
333
334 The first use, eq_use or def for a register can be obtained using
335 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
336 macros. Subsequent uses for the same regno can be obtained by
337 following the next_reg field of the ref. The number of elements in
338 each of the chains can be found by using the DF_REG_USE_COUNT,
339 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
340
341 In previous versions of this code, these chains were ordered. It
342 has not been practical to continue this practice.
343
344 3) If def-use or use-def chains are built, these can be traversed to
345 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
346 include the eq_uses. Otherwise these are ignored when building the
347 chains.
348
349 4) An array of all of the uses (and an array of all of the defs) can
350 be built. These arrays are indexed by the value in the id
351 structure. These arrays are only lazily kept up to date, and that
352 process can be expensive. To have these arrays built, call
353 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
354 has been set the array will contain the eq_uses. Otherwise these
355 are ignored when building the array and assigning the ids. Note
356 that the values in the id field of a ref may change across calls to
357 df_analyze or df_reorganize_defs or df_reorganize_uses.
358
359 If the only use of this array is to find all of the refs, it is
360 better to traverse all of the registers and then traverse all of
361 reg-use or reg-def chains.
362
363 NOTES:
364
365 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
366 both a use and a def. These are both marked read/write to show that they
367 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
368 will generate a use of reg 42 followed by a def of reg 42 (both marked
369 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
370 generates a use of reg 41 then a def of reg 41 (both marked read/write),
371 even though reg 41 is decremented before it is used for the memory
372 address in this second example.
373
374 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
375 for which the number of word_mode units covered by the outer mode is
376 smaller than that covered by the inner mode, invokes a read-modify-write
377 operation. We generate both a use and a def and again mark them
378 read/write.
379
380 Paradoxical subreg writes do not leave a trace of the old content, so they
381 are write-only operations.
382 */
383
384
385 #include "config.h"
386 #include "system.h"
387 #include "coretypes.h"
388 #include "tm.h"
389 #include "rtl.h"
390 #include "tm_p.h"
391 #include "insn-config.h"
392 #include "recog.h"
393 #include "function.h"
394 #include "regs.h"
395 #include "output.h"
396 #include "alloc-pool.h"
397 #include "flags.h"
398 #include "hard-reg-set.h"
399 #include "basic-block.h"
400 #include "sbitmap.h"
401 #include "bitmap.h"
402 #include "timevar.h"
403 #include "df.h"
404 #include "tree-pass.h"
405 #include "params.h"
406
407 static void *df_get_bb_info (struct dataflow *, unsigned int);
408 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
409 #ifdef DF_DEBUG_CFG
410 static void df_set_clean_cfg (void);
411 #endif
412
413 /* An obstack for bitmap not related to specific dataflow problems.
414 This obstack should e.g. be used for bitmaps with a short life time
415 such as temporary bitmaps. */
416
417 bitmap_obstack df_bitmap_obstack;
418
419
420 /*----------------------------------------------------------------------------
421 Functions to create, destroy and manipulate an instance of df.
422 ----------------------------------------------------------------------------*/
423
424 struct df *df;
425
426 /* Add PROBLEM (and any dependent problems) to the DF instance. */
427
428 void
429 df_add_problem (struct df_problem *problem)
430 {
431 struct dataflow *dflow;
432 int i;
433
434 /* First try to add the dependent problem. */
435 if (problem->dependent_problem)
436 df_add_problem (problem->dependent_problem);
437
438 /* Check to see if this problem has already been defined. If it
439 has, just return that instance, if not, add it to the end of the
440 vector. */
441 dflow = df->problems_by_index[problem->id];
442 if (dflow)
443 return;
444
445 /* Make a new one and add it to the end. */
446 dflow = XCNEW (struct dataflow);
447 dflow->problem = problem;
448 dflow->computed = false;
449 dflow->solutions_dirty = true;
450 df->problems_by_index[dflow->problem->id] = dflow;
451
452 /* Keep the defined problems ordered by index. This solves the
453 problem that RI will use the information from UREC if UREC has
454 been defined, or from LIVE if LIVE is defined and otherwise LR.
455 However for this to work, the computation of RI must be pushed
456 after which ever of those problems is defined, but we do not
457 require any of those except for LR to have actually been
458 defined. */
459 df->num_problems_defined++;
460 for (i = df->num_problems_defined - 2; i >= 0; i--)
461 {
462 if (problem->id < df->problems_in_order[i]->problem->id)
463 df->problems_in_order[i+1] = df->problems_in_order[i];
464 else
465 {
466 df->problems_in_order[i+1] = dflow;
467 return;
468 }
469 }
470 df->problems_in_order[0] = dflow;
471 }
472
473
474 /* Set the MASK flags in the DFLOW problem. The old flags are
475 returned. If a flag is not allowed to be changed this will fail if
476 checking is enabled. */
477 enum df_changeable_flags
478 df_set_flags (enum df_changeable_flags changeable_flags)
479 {
480 enum df_changeable_flags old_flags = df->changeable_flags;
481 df->changeable_flags |= changeable_flags;
482 return old_flags;
483 }
484
485
486 /* Clear the MASK flags in the DFLOW problem. The old flags are
487 returned. If a flag is not allowed to be changed this will fail if
488 checking is enabled. */
489 enum df_changeable_flags
490 df_clear_flags (enum df_changeable_flags changeable_flags)
491 {
492 enum df_changeable_flags old_flags = df->changeable_flags;
493 df->changeable_flags &= ~changeable_flags;
494 return old_flags;
495 }
496
497
498 /* Set the blocks that are to be considered for analysis. If this is
499 not called or is called with null, the entire function in
500 analyzed. */
501
502 void
503 df_set_blocks (bitmap blocks)
504 {
505 if (blocks)
506 {
507 if (dump_file)
508 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
509 if (df->blocks_to_analyze)
510 {
511 /* This block is called to change the focus from one subset
512 to another. */
513 int p;
514 bitmap diff = BITMAP_ALLOC (&df_bitmap_obstack);
515 bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
516 for (p = 0; p < df->num_problems_defined; p++)
517 {
518 struct dataflow *dflow = df->problems_in_order[p];
519 if (dflow->optional_p && dflow->problem->reset_fun)
520 dflow->problem->reset_fun (df->blocks_to_analyze);
521 else if (dflow->problem->free_blocks_on_set_blocks)
522 {
523 bitmap_iterator bi;
524 unsigned int bb_index;
525
526 EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
527 {
528 basic_block bb = BASIC_BLOCK (bb_index);
529 if (bb)
530 {
531 void *bb_info = df_get_bb_info (dflow, bb_index);
532 if (bb_info)
533 {
534 dflow->problem->free_bb_fun (bb, bb_info);
535 df_set_bb_info (dflow, bb_index, NULL);
536 }
537 }
538 }
539 }
540 }
541
542 BITMAP_FREE (diff);
543 }
544 else
545 {
546 /* This block of code is executed to change the focus from
547 the entire function to a subset. */
548 bitmap blocks_to_reset = NULL;
549 int p;
550 for (p = 0; p < df->num_problems_defined; p++)
551 {
552 struct dataflow *dflow = df->problems_in_order[p];
553 if (dflow->optional_p && dflow->problem->reset_fun)
554 {
555 if (!blocks_to_reset)
556 {
557 basic_block bb;
558 blocks_to_reset =
559 BITMAP_ALLOC (&df_bitmap_obstack);
560 FOR_ALL_BB(bb)
561 {
562 bitmap_set_bit (blocks_to_reset, bb->index);
563 }
564 }
565 dflow->problem->reset_fun (blocks_to_reset);
566 }
567 }
568 if (blocks_to_reset)
569 BITMAP_FREE (blocks_to_reset);
570
571 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
572 }
573 bitmap_copy (df->blocks_to_analyze, blocks);
574 df->analyze_subset = true;
575 }
576 else
577 {
578 /* This block is executed to reset the focus to the entire
579 function. */
580 if (dump_file)
581 fprintf (dump_file, "clearing blocks_to_analyze\n");
582 if (df->blocks_to_analyze)
583 {
584 BITMAP_FREE (df->blocks_to_analyze);
585 df->blocks_to_analyze = NULL;
586 }
587 df->analyze_subset = false;
588 }
589
590 /* Setting the blocks causes the refs to be unorganized since only
591 the refs in the blocks are seen. */
592 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
593 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
594 df_mark_solutions_dirty ();
595 }
596
597
598 /* Delete a DFLOW problem (and any problems that depend on this
599 problem). */
600
601 void
602 df_remove_problem (struct dataflow *dflow)
603 {
604 struct df_problem *problem;
605 int i;
606
607 if (!dflow)
608 return;
609
610 problem = dflow->problem;
611 gcc_assert (problem->remove_problem_fun);
612
613 /* Delete any problems that depended on this problem first. */
614 for (i = 0; i < df->num_problems_defined; i++)
615 if (df->problems_in_order[i]->problem->dependent_problem == problem)
616 df_remove_problem (df->problems_in_order[i]);
617
618 /* Now remove this problem. */
619 for (i = 0; i < df->num_problems_defined; i++)
620 if (df->problems_in_order[i] == dflow)
621 {
622 int j;
623 for (j = i + 1; j < df->num_problems_defined; j++)
624 df->problems_in_order[j-1] = df->problems_in_order[j];
625 df->problems_in_order[j] = NULL;
626 df->num_problems_defined--;
627 break;
628 }
629
630 (problem->remove_problem_fun) ();
631 df->problems_by_index[problem->id] = NULL;
632 }
633
634
635 /* Remove all of the problems that are not permanent. Scanning, LR
636 and (at -O2 or higher) LIVE are permanent, the rest are removable.
637 Also clear all of the changeable_flags. */
638
639 void
640 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
641 {
642 int i;
643 int removed = 0;
644
645 #ifdef ENABLE_DF_CHECKING
646 enum df_changeable_flags saved_flags;
647 #endif
648
649 if (!df)
650 return;
651
652 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
653 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
654
655 #ifdef ENABLE_DF_CHECKING
656 saved_flags = df->changeable_flags;
657 #endif
658
659 for (i = 0; i < df->num_problems_defined; i++)
660 {
661 struct dataflow *dflow = df->problems_in_order[i];
662 struct df_problem *problem = dflow->problem;
663
664 if (dflow->optional_p)
665 {
666 gcc_assert (problem->remove_problem_fun);
667 (problem->remove_problem_fun) ();
668 df->problems_in_order[i] = NULL;
669 df->problems_by_index[problem->id] = NULL;
670 removed++;
671 }
672 }
673 df->num_problems_defined -= removed;
674
675 /* Clear all of the flags. */
676 df->changeable_flags = 0;
677 df_process_deferred_rescans ();
678
679 /* Set the focus back to the whole function. */
680 if (df->blocks_to_analyze)
681 {
682 BITMAP_FREE (df->blocks_to_analyze);
683 df->blocks_to_analyze = NULL;
684 df_mark_solutions_dirty ();
685 df->analyze_subset = false;
686 }
687
688 #ifdef ENABLE_DF_CHECKING
689 /* Verification will fail in DF_NO_INSN_RESCAN. */
690 if (!(saved_flags & DF_NO_INSN_RESCAN))
691 {
692 df_lr_verify_transfer_functions ();
693 if (df_live)
694 df_live_verify_transfer_functions ();
695 }
696
697 #ifdef DF_DEBUG_CFG
698 df_set_clean_cfg ();
699 #endif
700 #endif
701
702 #ifdef ENABLE_CHECKING
703 if (verify)
704 df->changeable_flags |= DF_VERIFY_SCHEDULED;
705 #endif
706 }
707
708
709 /* Set up the dataflow instance for the entire back end. */
710
711 static unsigned int
712 rest_of_handle_df_initialize (void)
713 {
714 gcc_assert (!df);
715 df = XCNEW (struct df);
716 df->changeable_flags = 0;
717
718 bitmap_obstack_initialize (&df_bitmap_obstack);
719
720 /* Set this to a conservative value. Stack_ptr_mod will compute it
721 correctly later. */
722 current_function_sp_is_unchanging = 0;
723
724 df_scan_add_problem ();
725 df_scan_alloc (NULL);
726
727 /* These three problems are permanent. */
728 df_lr_add_problem ();
729 if (optimize > 1)
730 df_live_add_problem ();
731
732 df->postorder = XNEWVEC (int, last_basic_block);
733 df->postorder_inverted = XNEWVEC (int, last_basic_block);
734 df->n_blocks = post_order_compute (df->postorder, true, true);
735 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
736 gcc_assert (df->n_blocks == df->n_blocks_inverted);
737
738 df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
739 memset (df->hard_regs_live_count, 0,
740 sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
741
742 df_hard_reg_init ();
743 /* After reload, some ports add certain bits to regs_ever_live so
744 this cannot be reset. */
745 df_compute_regs_ever_live (true);
746 df_scan_blocks ();
747 df_compute_regs_ever_live (false);
748 return 0;
749 }
750
751
752 static bool
753 gate_opt (void)
754 {
755 return optimize > 0;
756 }
757
758
759 struct rtl_opt_pass pass_df_initialize_opt =
760 {
761 {
762 RTL_PASS,
763 "dfinit", /* name */
764 gate_opt, /* gate */
765 rest_of_handle_df_initialize, /* execute */
766 NULL, /* sub */
767 NULL, /* next */
768 0, /* static_pass_number */
769 0, /* tv_id */
770 0, /* properties_required */
771 0, /* properties_provided */
772 0, /* properties_destroyed */
773 0, /* todo_flags_start */
774 0 /* todo_flags_finish */
775 }
776 };
777
778
779 static bool
780 gate_no_opt (void)
781 {
782 return optimize == 0;
783 }
784
785
786 struct rtl_opt_pass pass_df_initialize_no_opt =
787 {
788 {
789 RTL_PASS,
790 "dfinit", /* name */
791 gate_no_opt, /* gate */
792 rest_of_handle_df_initialize, /* execute */
793 NULL, /* sub */
794 NULL, /* next */
795 0, /* static_pass_number */
796 0, /* tv_id */
797 0, /* properties_required */
798 0, /* properties_provided */
799 0, /* properties_destroyed */
800 0, /* todo_flags_start */
801 0 /* todo_flags_finish */
802 }
803 };
804
805
806 /* Free all the dataflow info and the DF structure. This should be
807 called from the df_finish macro which also NULLs the parm. */
808
809 static unsigned int
810 rest_of_handle_df_finish (void)
811 {
812 int i;
813
814 gcc_assert (df);
815
816 for (i = 0; i < df->num_problems_defined; i++)
817 {
818 struct dataflow *dflow = df->problems_in_order[i];
819 dflow->problem->free_fun ();
820 }
821
822 if (df->postorder)
823 free (df->postorder);
824 if (df->postorder_inverted)
825 free (df->postorder_inverted);
826 free (df->hard_regs_live_count);
827 free (df);
828 df = NULL;
829
830 bitmap_obstack_release (&df_bitmap_obstack);
831 return 0;
832 }
833
834
835 struct rtl_opt_pass pass_df_finish =
836 {
837 {
838 RTL_PASS,
839 "dfinish", /* name */
840 NULL, /* gate */
841 rest_of_handle_df_finish, /* execute */
842 NULL, /* sub */
843 NULL, /* next */
844 0, /* static_pass_number */
845 0, /* tv_id */
846 0, /* properties_required */
847 0, /* properties_provided */
848 0, /* properties_destroyed */
849 0, /* todo_flags_start */
850 0 /* todo_flags_finish */
851 }
852 };
853
854
855
856
857 \f
858 /*----------------------------------------------------------------------------
859 The general data flow analysis engine.
860 ----------------------------------------------------------------------------*/
861
862
863 /* Helper function for df_worklist_dataflow.
864 Propagate the dataflow forward.
865 Given a BB_INDEX, do the dataflow propagation
866 and set bits on for successors in PENDING
867 if the out set of the dataflow has changed. */
868
869 static void
870 df_worklist_propagate_forward (struct dataflow *dataflow,
871 unsigned bb_index,
872 unsigned *bbindex_to_postorder,
873 bitmap pending,
874 sbitmap considered)
875 {
876 edge e;
877 edge_iterator ei;
878 basic_block bb = BASIC_BLOCK (bb_index);
879
880 /* Calculate <conf_op> of incoming edges. */
881 if (EDGE_COUNT (bb->preds) > 0)
882 FOR_EACH_EDGE (e, ei, bb->preds)
883 {
884 if (TEST_BIT (considered, e->src->index))
885 dataflow->problem->con_fun_n (e);
886 }
887 else if (dataflow->problem->con_fun_0)
888 dataflow->problem->con_fun_0 (bb);
889
890 if (dataflow->problem->trans_fun (bb_index))
891 {
892 /* The out set of this block has changed.
893 Propagate to the outgoing blocks. */
894 FOR_EACH_EDGE (e, ei, bb->succs)
895 {
896 unsigned ob_index = e->dest->index;
897
898 if (TEST_BIT (considered, ob_index))
899 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
900 }
901 }
902 }
903
904
905 /* Helper function for df_worklist_dataflow.
906 Propagate the dataflow backward. */
907
908 static void
909 df_worklist_propagate_backward (struct dataflow *dataflow,
910 unsigned bb_index,
911 unsigned *bbindex_to_postorder,
912 bitmap pending,
913 sbitmap considered)
914 {
915 edge e;
916 edge_iterator ei;
917 basic_block bb = BASIC_BLOCK (bb_index);
918
919 /* Calculate <conf_op> of incoming edges. */
920 if (EDGE_COUNT (bb->succs) > 0)
921 FOR_EACH_EDGE (e, ei, bb->succs)
922 {
923 if (TEST_BIT (considered, e->dest->index))
924 dataflow->problem->con_fun_n (e);
925 }
926 else if (dataflow->problem->con_fun_0)
927 dataflow->problem->con_fun_0 (bb);
928
929 if (dataflow->problem->trans_fun (bb_index))
930 {
931 /* The out set of this block has changed.
932 Propagate to the outgoing blocks. */
933 FOR_EACH_EDGE (e, ei, bb->preds)
934 {
935 unsigned ob_index = e->src->index;
936
937 if (TEST_BIT (considered, ob_index))
938 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
939 }
940 }
941 }
942
943
944
945 /* This will free "pending". */
946 static void
947 df_worklist_dataflow_overeager (struct dataflow *dataflow,
948 bitmap pending,
949 sbitmap considered,
950 int *blocks_in_postorder,
951 unsigned *bbindex_to_postorder)
952 {
953 enum df_flow_dir dir = dataflow->problem->dir;
954 int count = 0;
955
956 while (!bitmap_empty_p (pending))
957 {
958 unsigned bb_index;
959 int index;
960 count++;
961
962 index = bitmap_first_set_bit (pending);
963 bitmap_clear_bit (pending, index);
964
965 bb_index = blocks_in_postorder[index];
966
967 if (dir == DF_FORWARD)
968 df_worklist_propagate_forward (dataflow, bb_index,
969 bbindex_to_postorder,
970 pending, considered);
971 else
972 df_worklist_propagate_backward (dataflow, bb_index,
973 bbindex_to_postorder,
974 pending, considered);
975 }
976
977 BITMAP_FREE (pending);
978
979 /* Dump statistics. */
980 if (dump_file)
981 fprintf (dump_file, "df_worklist_dataflow_overeager:"
982 "n_basic_blocks %d n_edges %d"
983 " count %d (%5.2g)\n",
984 n_basic_blocks, n_edges,
985 count, count / (float)n_basic_blocks);
986 }
987
988 static void
989 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
990 bitmap pending,
991 sbitmap considered,
992 int *blocks_in_postorder,
993 unsigned *bbindex_to_postorder)
994 {
995 enum df_flow_dir dir = dataflow->problem->dir;
996 int dcount = 0;
997 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
998
999 /* Double-queueing. Worklist is for the current iteration,
1000 and pending is for the next. */
1001 while (!bitmap_empty_p (pending))
1002 {
1003 /* Swap pending and worklist. */
1004 bitmap temp = worklist;
1005 worklist = pending;
1006 pending = temp;
1007
1008 do
1009 {
1010 int index;
1011 unsigned bb_index;
1012 dcount++;
1013
1014 index = bitmap_first_set_bit (worklist);
1015 bitmap_clear_bit (worklist, index);
1016
1017 bb_index = blocks_in_postorder[index];
1018
1019 if (dir == DF_FORWARD)
1020 df_worklist_propagate_forward (dataflow, bb_index,
1021 bbindex_to_postorder,
1022 pending, considered);
1023 else
1024 df_worklist_propagate_backward (dataflow, bb_index,
1025 bbindex_to_postorder,
1026 pending, considered);
1027 }
1028 while (!bitmap_empty_p (worklist));
1029 }
1030
1031 BITMAP_FREE (worklist);
1032 BITMAP_FREE (pending);
1033
1034 /* Dump statistics. */
1035 if (dump_file)
1036 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1037 "n_basic_blocks %d n_edges %d"
1038 " count %d (%5.2g)\n",
1039 n_basic_blocks, n_edges,
1040 dcount, dcount / (float)n_basic_blocks);
1041 }
1042
1043 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1044 with "n"-th bit representing the n-th block in the reverse-postorder order.
1045 This is so-called over-eager algorithm where it propagates
1046 changes on demand. This algorithm may visit blocks more than
1047 iterative method if there are deeply nested loops.
1048 Worklist algorithm works better than iterative algorithm
1049 for CFGs with no nested loops.
1050 In practice, the measurement shows worklist algorithm beats
1051 iterative algorithm by some margin overall.
1052 Note that this is slightly different from the traditional textbook worklist solver,
1053 in that the worklist is effectively sorted by the reverse postorder.
1054 For CFGs with no nested loops, this is optimal.
1055
1056 The overeager algorithm while works well for typical inputs,
1057 it could degenerate into excessive iterations given CFGs with high loop nests
1058 and unstructured loops. To cap the excessive iteration on such case,
1059 we switch to double-queueing when the original algorithm seems to
1060 get into such.
1061 */
1062
1063 void
1064 df_worklist_dataflow (struct dataflow *dataflow,
1065 bitmap blocks_to_consider,
1066 int *blocks_in_postorder,
1067 int n_blocks)
1068 {
1069 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1070 sbitmap considered = sbitmap_alloc (last_basic_block);
1071 bitmap_iterator bi;
1072 unsigned int *bbindex_to_postorder;
1073 int i;
1074 unsigned int index;
1075 enum df_flow_dir dir = dataflow->problem->dir;
1076
1077 gcc_assert (dir != DF_NONE);
1078
1079 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1080 bbindex_to_postorder =
1081 (unsigned int *)xmalloc (last_basic_block * sizeof (unsigned int));
1082
1083 /* Initialize the array to an out-of-bound value. */
1084 for (i = 0; i < last_basic_block; i++)
1085 bbindex_to_postorder[i] = last_basic_block;
1086
1087 /* Initialize the considered map. */
1088 sbitmap_zero (considered);
1089 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1090 {
1091 SET_BIT (considered, index);
1092 }
1093
1094 /* Initialize the mapping of block index to postorder. */
1095 for (i = 0; i < n_blocks; i++)
1096 {
1097 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1098 /* Add all blocks to the worklist. */
1099 bitmap_set_bit (pending, i);
1100 }
1101
1102 /* Initialize the problem. */
1103 if (dataflow->problem->init_fun)
1104 dataflow->problem->init_fun (blocks_to_consider);
1105
1106 /* Solve it. Determine the solving algorithm
1107 based on a simple heuristic. */
1108 if (n_edges > PARAM_VALUE (PARAM_DF_DOUBLE_QUEUE_THRESHOLD_FACTOR)
1109 * n_basic_blocks)
1110 {
1111 /* High average connectivity, meaning dense graph
1112 with more likely deep nested loops
1113 or unstructured loops. */
1114 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1115 blocks_in_postorder,
1116 bbindex_to_postorder);
1117 }
1118 else
1119 {
1120 /* Most inputs fall into this case
1121 with relatively flat or structured CFG. */
1122 df_worklist_dataflow_overeager (dataflow, pending, considered,
1123 blocks_in_postorder,
1124 bbindex_to_postorder);
1125 }
1126
1127 sbitmap_free (considered);
1128 free (bbindex_to_postorder);
1129 }
1130
1131
1132 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1133 the order of the remaining entries. Returns the length of the resulting
1134 list. */
1135
1136 static unsigned
1137 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1138 {
1139 unsigned act, last;
1140
1141 for (act = 0, last = 0; act < len; act++)
1142 if (bitmap_bit_p (blocks, list[act]))
1143 list[last++] = list[act];
1144
1145 return last;
1146 }
1147
1148
1149 /* Execute dataflow analysis on a single dataflow problem.
1150
1151 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1152 examined or will be computed. For calls from DF_ANALYZE, this is
1153 the set of blocks that has been passed to DF_SET_BLOCKS.
1154 */
1155
1156 void
1157 df_analyze_problem (struct dataflow *dflow,
1158 bitmap blocks_to_consider,
1159 int *postorder, int n_blocks)
1160 {
1161 timevar_push (dflow->problem->tv_id);
1162
1163 #ifdef ENABLE_DF_CHECKING
1164 if (dflow->problem->verify_start_fun)
1165 dflow->problem->verify_start_fun ();
1166 #endif
1167
1168 /* (Re)Allocate the datastructures necessary to solve the problem. */
1169 if (dflow->problem->alloc_fun)
1170 dflow->problem->alloc_fun (blocks_to_consider);
1171
1172 /* Set up the problem and compute the local information. */
1173 if (dflow->problem->local_compute_fun)
1174 dflow->problem->local_compute_fun (blocks_to_consider);
1175
1176 /* Solve the equations. */
1177 if (dflow->problem->dataflow_fun)
1178 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1179 postorder, n_blocks);
1180
1181 /* Massage the solution. */
1182 if (dflow->problem->finalize_fun)
1183 dflow->problem->finalize_fun (blocks_to_consider);
1184
1185 #ifdef ENABLE_DF_CHECKING
1186 if (dflow->problem->verify_end_fun)
1187 dflow->problem->verify_end_fun ();
1188 #endif
1189
1190 timevar_pop (dflow->problem->tv_id);
1191
1192 dflow->computed = true;
1193 }
1194
1195
1196 /* Analyze dataflow info for the basic blocks specified by the bitmap
1197 BLOCKS, or for the whole CFG if BLOCKS is zero. */
1198
1199 void
1200 df_analyze (void)
1201 {
1202 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1203 bool everything;
1204 int i;
1205
1206 if (df->postorder)
1207 free (df->postorder);
1208 if (df->postorder_inverted)
1209 free (df->postorder_inverted);
1210 df->postorder = XNEWVEC (int, last_basic_block);
1211 df->postorder_inverted = XNEWVEC (int, last_basic_block);
1212 df->n_blocks = post_order_compute (df->postorder, true, true);
1213 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1214
1215 /* These should be the same. */
1216 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1217
1218 /* We need to do this before the df_verify_all because this is
1219 not kept incrementally up to date. */
1220 df_compute_regs_ever_live (false);
1221 df_process_deferred_rescans ();
1222
1223 if (dump_file)
1224 fprintf (dump_file, "df_analyze called\n");
1225
1226 #ifndef ENABLE_DF_CHECKING
1227 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1228 #endif
1229 df_verify ();
1230
1231 for (i = 0; i < df->n_blocks; i++)
1232 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1233
1234 #ifdef ENABLE_CHECKING
1235 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1236 the ENTRY block. */
1237 for (i = 0; i < df->n_blocks_inverted; i++)
1238 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1239 #endif
1240
1241 /* Make sure that we have pruned any unreachable blocks from these
1242 sets. */
1243 if (df->analyze_subset)
1244 {
1245 everything = false;
1246 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1247 df->n_blocks = df_prune_to_subcfg (df->postorder,
1248 df->n_blocks, df->blocks_to_analyze);
1249 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1250 df->n_blocks_inverted,
1251 df->blocks_to_analyze);
1252 BITMAP_FREE (current_all_blocks);
1253 }
1254 else
1255 {
1256 everything = true;
1257 df->blocks_to_analyze = current_all_blocks;
1258 current_all_blocks = NULL;
1259 }
1260
1261 /* Skip over the DF_SCAN problem. */
1262 for (i = 1; i < df->num_problems_defined; i++)
1263 {
1264 struct dataflow *dflow = df->problems_in_order[i];
1265 if (dflow->solutions_dirty)
1266 {
1267 if (dflow->problem->dir == DF_FORWARD)
1268 df_analyze_problem (dflow,
1269 df->blocks_to_analyze,
1270 df->postorder_inverted,
1271 df->n_blocks_inverted);
1272 else
1273 df_analyze_problem (dflow,
1274 df->blocks_to_analyze,
1275 df->postorder,
1276 df->n_blocks);
1277 }
1278 }
1279
1280 if (everything)
1281 {
1282 BITMAP_FREE (df->blocks_to_analyze);
1283 df->blocks_to_analyze = NULL;
1284 }
1285
1286 #ifdef DF_DEBUG_CFG
1287 df_set_clean_cfg ();
1288 #endif
1289 }
1290
1291
1292 /* Return the number of basic blocks from the last call to df_analyze. */
1293
1294 int
1295 df_get_n_blocks (enum df_flow_dir dir)
1296 {
1297 gcc_assert (dir != DF_NONE);
1298
1299 if (dir == DF_FORWARD)
1300 {
1301 gcc_assert (df->postorder_inverted);
1302 return df->n_blocks_inverted;
1303 }
1304
1305 gcc_assert (df->postorder);
1306 return df->n_blocks;
1307 }
1308
1309
1310 /* Return a pointer to the array of basic blocks in the reverse postorder.
1311 Depending on the direction of the dataflow problem,
1312 it returns either the usual reverse postorder array
1313 or the reverse postorder of inverted traversal. */
1314 int *
1315 df_get_postorder (enum df_flow_dir dir)
1316 {
1317 gcc_assert (dir != DF_NONE);
1318
1319 if (dir == DF_FORWARD)
1320 {
1321 gcc_assert (df->postorder_inverted);
1322 return df->postorder_inverted;
1323 }
1324 gcc_assert (df->postorder);
1325 return df->postorder;
1326 }
1327
1328 static struct df_problem user_problem;
1329 static struct dataflow user_dflow;
1330
1331 /* Interface for calling iterative dataflow with user defined
1332 confluence and transfer functions. All that is necessary is to
1333 supply DIR, a direction, CONF_FUN_0, a confluence function for
1334 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1335 confluence function, TRANS_FUN, the basic block transfer function,
1336 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1337 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1338
1339 void
1340 df_simple_dataflow (enum df_flow_dir dir,
1341 df_init_function init_fun,
1342 df_confluence_function_0 con_fun_0,
1343 df_confluence_function_n con_fun_n,
1344 df_transfer_function trans_fun,
1345 bitmap blocks, int * postorder, int n_blocks)
1346 {
1347 memset (&user_problem, 0, sizeof (struct df_problem));
1348 user_problem.dir = dir;
1349 user_problem.init_fun = init_fun;
1350 user_problem.con_fun_0 = con_fun_0;
1351 user_problem.con_fun_n = con_fun_n;
1352 user_problem.trans_fun = trans_fun;
1353 user_dflow.problem = &user_problem;
1354 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1355 }
1356
1357
1358 \f
1359 /*----------------------------------------------------------------------------
1360 Functions to support limited incremental change.
1361 ----------------------------------------------------------------------------*/
1362
1363
1364 /* Get basic block info. */
1365
1366 static void *
1367 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1368 {
1369 if (dflow->block_info == NULL)
1370 return NULL;
1371 if (index >= dflow->block_info_size)
1372 return NULL;
1373 return (struct df_scan_bb_info *) dflow->block_info[index];
1374 }
1375
1376
1377 /* Set basic block info. */
1378
1379 static void
1380 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1381 void *bb_info)
1382 {
1383 gcc_assert (dflow->block_info);
1384 dflow->block_info[index] = bb_info;
1385 }
1386
1387
1388 /* Mark the solutions as being out of date. */
1389
1390 void
1391 df_mark_solutions_dirty (void)
1392 {
1393 if (df)
1394 {
1395 int p;
1396 for (p = 1; p < df->num_problems_defined; p++)
1397 df->problems_in_order[p]->solutions_dirty = true;
1398 }
1399 }
1400
1401
1402 /* Return true if BB needs it's transfer functions recomputed. */
1403
1404 bool
1405 df_get_bb_dirty (basic_block bb)
1406 {
1407 if (df && df_live)
1408 return bitmap_bit_p (df_live->out_of_date_transfer_functions, bb->index);
1409 else
1410 return false;
1411 }
1412
1413
1414 /* Mark BB as needing it's transfer functions as being out of
1415 date. */
1416
1417 void
1418 df_set_bb_dirty (basic_block bb)
1419 {
1420 if (df)
1421 {
1422 int p;
1423 for (p = 1; p < df->num_problems_defined; p++)
1424 {
1425 struct dataflow *dflow = df->problems_in_order[p];
1426 if (dflow->out_of_date_transfer_functions)
1427 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1428 }
1429 df_mark_solutions_dirty ();
1430 }
1431 }
1432
1433
1434 /* Clear the dirty bits. This is called from places that delete
1435 blocks. */
1436 static void
1437 df_clear_bb_dirty (basic_block bb)
1438 {
1439 int p;
1440 for (p = 1; p < df->num_problems_defined; p++)
1441 {
1442 struct dataflow *dflow = df->problems_in_order[p];
1443 if (dflow->out_of_date_transfer_functions)
1444 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1445 }
1446 }
1447 /* Called from the rtl_compact_blocks to reorganize the problems basic
1448 block info. */
1449
1450 void
1451 df_compact_blocks (void)
1452 {
1453 int i, p;
1454 basic_block bb;
1455 void **problem_temps;
1456 int size = last_basic_block * sizeof (void *);
1457 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
1458 problem_temps = XNEWVAR (void *, size);
1459
1460 for (p = 0; p < df->num_problems_defined; p++)
1461 {
1462 struct dataflow *dflow = df->problems_in_order[p];
1463
1464 /* Need to reorganize the out_of_date_transfer_functions for the
1465 dflow problem. */
1466 if (dflow->out_of_date_transfer_functions)
1467 {
1468 bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1469 bitmap_clear (dflow->out_of_date_transfer_functions);
1470 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1471 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1472 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1473 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1474
1475 i = NUM_FIXED_BLOCKS;
1476 FOR_EACH_BB (bb)
1477 {
1478 if (bitmap_bit_p (tmp, bb->index))
1479 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1480 i++;
1481 }
1482 }
1483
1484 /* Now shuffle the block info for the problem. */
1485 if (dflow->problem->free_bb_fun)
1486 {
1487 df_grow_bb_info (dflow);
1488 memcpy (problem_temps, dflow->block_info, size);
1489
1490 /* Copy the bb info from the problem tmps to the proper
1491 place in the block_info vector. Null out the copied
1492 item. The entry and exit blocks never move. */
1493 i = NUM_FIXED_BLOCKS;
1494 FOR_EACH_BB (bb)
1495 {
1496 df_set_bb_info (dflow, i, problem_temps[bb->index]);
1497 problem_temps[bb->index] = NULL;
1498 i++;
1499 }
1500 memset (dflow->block_info + i, 0,
1501 (last_basic_block - i) *sizeof (void *));
1502
1503 /* Free any block infos that were not copied (and NULLed).
1504 These are from orphaned blocks. */
1505 for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
1506 {
1507 basic_block bb = BASIC_BLOCK (i);
1508 if (problem_temps[i] && bb)
1509 dflow->problem->free_bb_fun
1510 (bb, problem_temps[i]);
1511 }
1512 }
1513 }
1514
1515 /* Shuffle the bits in the basic_block indexed arrays. */
1516
1517 if (df->blocks_to_analyze)
1518 {
1519 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1520 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1521 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1522 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1523 bitmap_copy (tmp, df->blocks_to_analyze);
1524 bitmap_clear (df->blocks_to_analyze);
1525 i = NUM_FIXED_BLOCKS;
1526 FOR_EACH_BB (bb)
1527 {
1528 if (bitmap_bit_p (tmp, bb->index))
1529 bitmap_set_bit (df->blocks_to_analyze, i);
1530 i++;
1531 }
1532 }
1533
1534 BITMAP_FREE (tmp);
1535
1536 free (problem_temps);
1537
1538 i = NUM_FIXED_BLOCKS;
1539 FOR_EACH_BB (bb)
1540 {
1541 SET_BASIC_BLOCK (i, bb);
1542 bb->index = i;
1543 i++;
1544 }
1545
1546 gcc_assert (i == n_basic_blocks);
1547
1548 for (; i < last_basic_block; i++)
1549 SET_BASIC_BLOCK (i, NULL);
1550
1551 #ifdef DF_DEBUG_CFG
1552 if (!df_lr->solutions_dirty)
1553 df_set_clean_cfg ();
1554 #endif
1555 }
1556
1557
1558 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1559 block. There is no excuse for people to do this kind of thing. */
1560
1561 void
1562 df_bb_replace (int old_index, basic_block new_block)
1563 {
1564 int new_block_index = new_block->index;
1565 int p;
1566
1567 if (dump_file)
1568 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1569
1570 gcc_assert (df);
1571 gcc_assert (BASIC_BLOCK (old_index) == NULL);
1572
1573 for (p = 0; p < df->num_problems_defined; p++)
1574 {
1575 struct dataflow *dflow = df->problems_in_order[p];
1576 if (dflow->block_info)
1577 {
1578 df_grow_bb_info (dflow);
1579 gcc_assert (df_get_bb_info (dflow, old_index) == NULL);
1580 df_set_bb_info (dflow, old_index,
1581 df_get_bb_info (dflow, new_block_index));
1582 }
1583 }
1584
1585 df_clear_bb_dirty (new_block);
1586 SET_BASIC_BLOCK (old_index, new_block);
1587 new_block->index = old_index;
1588 df_set_bb_dirty (BASIC_BLOCK (old_index));
1589 SET_BASIC_BLOCK (new_block_index, NULL);
1590 }
1591
1592
1593 /* Free all of the per basic block dataflow from all of the problems.
1594 This is typically called before a basic block is deleted and the
1595 problem will be reanalyzed. */
1596
1597 void
1598 df_bb_delete (int bb_index)
1599 {
1600 basic_block bb = BASIC_BLOCK (bb_index);
1601 int i;
1602
1603 if (!df)
1604 return;
1605
1606 for (i = 0; i < df->num_problems_defined; i++)
1607 {
1608 struct dataflow *dflow = df->problems_in_order[i];
1609 if (dflow->problem->free_bb_fun)
1610 {
1611 void *bb_info = df_get_bb_info (dflow, bb_index);
1612 if (bb_info)
1613 {
1614 dflow->problem->free_bb_fun (bb, bb_info);
1615 df_set_bb_info (dflow, bb_index, NULL);
1616 }
1617 }
1618 }
1619 df_clear_bb_dirty (bb);
1620 df_mark_solutions_dirty ();
1621 }
1622
1623
1624 /* Verify that there is a place for everything and everything is in
1625 its place. This is too expensive to run after every pass in the
1626 mainline. However this is an excellent debugging tool if the
1627 dataflow information is not being updated properly. You can just
1628 sprinkle calls in until you find the place that is changing an
1629 underlying structure without calling the proper updating
1630 routine. */
1631
1632 void
1633 df_verify (void)
1634 {
1635 df_scan_verify ();
1636 #ifdef ENABLE_DF_CHECKING
1637 df_lr_verify_transfer_functions ();
1638 if (df_live)
1639 df_live_verify_transfer_functions ();
1640 #endif
1641 }
1642
1643 #ifdef DF_DEBUG_CFG
1644
1645 /* Compute an array of ints that describes the cfg. This can be used
1646 to discover places where the cfg is modified by the appropriate
1647 calls have not been made to the keep df informed. The internals of
1648 this are unexciting, the key is that two instances of this can be
1649 compared to see if any changes have been made to the cfg. */
1650
1651 static int *
1652 df_compute_cfg_image (void)
1653 {
1654 basic_block bb;
1655 int size = 2 + (2 * n_basic_blocks);
1656 int i;
1657 int * map;
1658
1659 FOR_ALL_BB (bb)
1660 {
1661 size += EDGE_COUNT (bb->succs);
1662 }
1663
1664 map = XNEWVEC (int, size);
1665 map[0] = size;
1666 i = 1;
1667 FOR_ALL_BB (bb)
1668 {
1669 edge_iterator ei;
1670 edge e;
1671
1672 map[i++] = bb->index;
1673 FOR_EACH_EDGE (e, ei, bb->succs)
1674 map[i++] = e->dest->index;
1675 map[i++] = -1;
1676 }
1677 map[i] = -1;
1678 return map;
1679 }
1680
1681 static int *saved_cfg = NULL;
1682
1683
1684 /* This function compares the saved version of the cfg with the
1685 current cfg and aborts if the two are identical. The function
1686 silently returns if the cfg has been marked as dirty or the two are
1687 the same. */
1688
1689 void
1690 df_check_cfg_clean (void)
1691 {
1692 int *new_map;
1693
1694 if (!df)
1695 return;
1696
1697 if (df_lr->solutions_dirty)
1698 return;
1699
1700 if (saved_cfg == NULL)
1701 return;
1702
1703 new_map = df_compute_cfg_image ();
1704 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1705 free (new_map);
1706 }
1707
1708
1709 /* This function builds a cfg fingerprint and squirrels it away in
1710 saved_cfg. */
1711
1712 static void
1713 df_set_clean_cfg (void)
1714 {
1715 if (saved_cfg)
1716 free (saved_cfg);
1717 saved_cfg = df_compute_cfg_image ();
1718 }
1719
1720 #endif /* DF_DEBUG_CFG */
1721 /*----------------------------------------------------------------------------
1722 PUBLIC INTERFACES TO QUERY INFORMATION.
1723 ----------------------------------------------------------------------------*/
1724
1725
1726 /* Return first def of REGNO within BB. */
1727
1728 struct df_ref *
1729 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1730 {
1731 rtx insn;
1732 struct df_ref **def_rec;
1733 unsigned int uid;
1734
1735 FOR_BB_INSNS (bb, insn)
1736 {
1737 if (!INSN_P (insn))
1738 continue;
1739
1740 uid = INSN_UID (insn);
1741 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1742 {
1743 struct df_ref *def = *def_rec;
1744 if (DF_REF_REGNO (def) == regno)
1745 return def;
1746 }
1747 }
1748 return NULL;
1749 }
1750
1751
1752 /* Return last def of REGNO within BB. */
1753
1754 struct df_ref *
1755 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1756 {
1757 rtx insn;
1758 struct df_ref **def_rec;
1759 unsigned int uid;
1760
1761 FOR_BB_INSNS_REVERSE (bb, insn)
1762 {
1763 if (!INSN_P (insn))
1764 continue;
1765
1766 uid = INSN_UID (insn);
1767 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1768 {
1769 struct df_ref *def = *def_rec;
1770 if (DF_REF_REGNO (def) == regno)
1771 return def;
1772 }
1773 }
1774
1775 return NULL;
1776 }
1777
1778 /* Finds the reference corresponding to the definition of REG in INSN.
1779 DF is the dataflow object. */
1780
1781 struct df_ref *
1782 df_find_def (rtx insn, rtx reg)
1783 {
1784 unsigned int uid;
1785 struct df_ref **def_rec;
1786
1787 if (GET_CODE (reg) == SUBREG)
1788 reg = SUBREG_REG (reg);
1789 gcc_assert (REG_P (reg));
1790
1791 uid = INSN_UID (insn);
1792 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1793 {
1794 struct df_ref *def = *def_rec;
1795 if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
1796 return def;
1797 }
1798
1799 return NULL;
1800 }
1801
1802
1803 /* Return true if REG is defined in INSN, zero otherwise. */
1804
1805 bool
1806 df_reg_defined (rtx insn, rtx reg)
1807 {
1808 return df_find_def (insn, reg) != NULL;
1809 }
1810
1811
1812 /* Finds the reference corresponding to the use of REG in INSN.
1813 DF is the dataflow object. */
1814
1815 struct df_ref *
1816 df_find_use (rtx insn, rtx reg)
1817 {
1818 unsigned int uid;
1819 struct df_ref **use_rec;
1820
1821 if (GET_CODE (reg) == SUBREG)
1822 reg = SUBREG_REG (reg);
1823 gcc_assert (REG_P (reg));
1824
1825 uid = INSN_UID (insn);
1826 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
1827 {
1828 struct df_ref *use = *use_rec;
1829 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1830 return use;
1831 }
1832 if (df->changeable_flags & DF_EQ_NOTES)
1833 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
1834 {
1835 struct df_ref *use = *use_rec;
1836 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1837 return use;
1838 }
1839 return NULL;
1840 }
1841
1842
1843 /* Return true if REG is referenced in INSN, zero otherwise. */
1844
1845 bool
1846 df_reg_used (rtx insn, rtx reg)
1847 {
1848 return df_find_use (insn, reg) != NULL;
1849 }
1850
1851 \f
1852 /*----------------------------------------------------------------------------
1853 Debugging and printing functions.
1854 ----------------------------------------------------------------------------*/
1855
1856
1857 /* Write information about registers and basic blocks into FILE.
1858 This is part of making a debugging dump. */
1859
1860 void
1861 df_print_regset (FILE *file, bitmap r)
1862 {
1863 unsigned int i;
1864 bitmap_iterator bi;
1865
1866 if (r == NULL)
1867 fputs (" (nil)", file);
1868 else
1869 {
1870 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
1871 {
1872 fprintf (file, " %d", i);
1873 if (i < FIRST_PSEUDO_REGISTER)
1874 fprintf (file, " [%s]", reg_names[i]);
1875 }
1876 }
1877 fprintf (file, "\n");
1878 }
1879
1880
1881 /* Write information about registers and basic blocks into FILE. The
1882 bitmap is in the form used by df_byte_lr. This is part of making a
1883 debugging dump. */
1884
1885 void
1886 df_print_byte_regset (FILE *file, bitmap r)
1887 {
1888 unsigned int max_reg = max_reg_num ();
1889 bitmap_iterator bi;
1890
1891 if (r == NULL)
1892 fputs (" (nil)", file);
1893 else
1894 {
1895 unsigned int i;
1896 for (i = 0; i < max_reg; i++)
1897 {
1898 unsigned int first = df_byte_lr_get_regno_start (i);
1899 unsigned int len = df_byte_lr_get_regno_len (i);
1900
1901 if (len > 1)
1902 {
1903 bool found = false;
1904 unsigned int j;
1905
1906 EXECUTE_IF_SET_IN_BITMAP (r, first, j, bi)
1907 {
1908 found = j < first + len;
1909 break;
1910 }
1911 if (found)
1912 {
1913 const char * sep = "";
1914 fprintf (file, " %d", i);
1915 if (i < FIRST_PSEUDO_REGISTER)
1916 fprintf (file, " [%s]", reg_names[i]);
1917 fprintf (file, "(");
1918 EXECUTE_IF_SET_IN_BITMAP (r, first, j, bi)
1919 {
1920 if (j > first + len - 1)
1921 break;
1922 fprintf (file, "%s%d", sep, j-first);
1923 sep = ", ";
1924 }
1925 fprintf (file, ")");
1926 }
1927 }
1928 else
1929 {
1930 if (bitmap_bit_p (r, first))
1931 {
1932 fprintf (file, " %d", i);
1933 if (i < FIRST_PSEUDO_REGISTER)
1934 fprintf (file, " [%s]", reg_names[i]);
1935 }
1936 }
1937
1938 }
1939 }
1940 fprintf (file, "\n");
1941 }
1942
1943
1944 /* Dump dataflow info. */
1945
1946 void
1947 df_dump (FILE *file)
1948 {
1949 basic_block bb;
1950 df_dump_start (file);
1951
1952 FOR_ALL_BB (bb)
1953 {
1954 df_print_bb_index (bb, file);
1955 df_dump_top (bb, file);
1956 df_dump_bottom (bb, file);
1957 }
1958
1959 fprintf (file, "\n");
1960 }
1961
1962
1963 /* Dump dataflow info for df->blocks_to_analyze. */
1964
1965 void
1966 df_dump_region (FILE *file)
1967 {
1968 if (df->blocks_to_analyze)
1969 {
1970 bitmap_iterator bi;
1971 unsigned int bb_index;
1972
1973 fprintf (file, "\n\nstarting region dump\n");
1974 df_dump_start (file);
1975
1976 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
1977 {
1978 basic_block bb = BASIC_BLOCK (bb_index);
1979
1980 df_print_bb_index (bb, file);
1981 df_dump_top (bb, file);
1982 df_dump_bottom (bb, file);
1983 }
1984 fprintf (file, "\n");
1985 }
1986 else
1987 df_dump (file);
1988 }
1989
1990
1991 /* Dump the introductory information for each problem defined. */
1992
1993 void
1994 df_dump_start (FILE *file)
1995 {
1996 int i;
1997
1998 if (!df || !file)
1999 return;
2000
2001 fprintf (file, "\n\n%s\n", current_function_name ());
2002 fprintf (file, "\nDataflow summary:\n");
2003 if (df->blocks_to_analyze)
2004 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2005 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2006
2007 for (i = 0; i < df->num_problems_defined; i++)
2008 {
2009 struct dataflow *dflow = df->problems_in_order[i];
2010 if (dflow->computed)
2011 {
2012 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2013 if (fun)
2014 fun(file);
2015 }
2016 }
2017 }
2018
2019
2020 /* Dump the top of the block information for BB. */
2021
2022 void
2023 df_dump_top (basic_block bb, FILE *file)
2024 {
2025 int i;
2026
2027 if (!df || !file)
2028 return;
2029
2030 for (i = 0; i < df->num_problems_defined; i++)
2031 {
2032 struct dataflow *dflow = df->problems_in_order[i];
2033 if (dflow->computed)
2034 {
2035 df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
2036 if (bbfun)
2037 bbfun (bb, file);
2038 }
2039 }
2040 }
2041
2042
2043 /* Dump the bottom of the block information for BB. */
2044
2045 void
2046 df_dump_bottom (basic_block bb, FILE *file)
2047 {
2048 int i;
2049
2050 if (!df || !file)
2051 return;
2052
2053 for (i = 0; i < df->num_problems_defined; i++)
2054 {
2055 struct dataflow *dflow = df->problems_in_order[i];
2056 if (dflow->computed)
2057 {
2058 df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
2059 if (bbfun)
2060 bbfun (bb, file);
2061 }
2062 }
2063 }
2064
2065
2066 void
2067 df_refs_chain_dump (struct df_ref **ref_rec, bool follow_chain, FILE *file)
2068 {
2069 fprintf (file, "{ ");
2070 while (*ref_rec)
2071 {
2072 struct df_ref *ref = *ref_rec;
2073 fprintf (file, "%c%d(%d)",
2074 DF_REF_REG_DEF_P (ref) ? 'd' : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2075 DF_REF_ID (ref),
2076 DF_REF_REGNO (ref));
2077 if (follow_chain)
2078 df_chain_dump (DF_REF_CHAIN (ref), file);
2079 ref_rec++;
2080 }
2081 fprintf (file, "}");
2082 }
2083
2084
2085 /* Dump either a ref-def or reg-use chain. */
2086
2087 void
2088 df_regs_chain_dump (struct df_ref *ref, FILE *file)
2089 {
2090 fprintf (file, "{ ");
2091 while (ref)
2092 {
2093 fprintf (file, "%c%d(%d) ",
2094 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2095 DF_REF_ID (ref),
2096 DF_REF_REGNO (ref));
2097 ref = ref->next_reg;
2098 }
2099 fprintf (file, "}");
2100 }
2101
2102
2103 static void
2104 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
2105 {
2106 while (*mws)
2107 {
2108 fprintf (file, "mw %c r[%d..%d]\n",
2109 ((*mws)->type == DF_REF_REG_DEF) ? 'd' : 'u',
2110 (*mws)->start_regno, (*mws)->end_regno);
2111 mws++;
2112 }
2113 }
2114
2115
2116 static void
2117 df_insn_uid_debug (unsigned int uid,
2118 bool follow_chain, FILE *file)
2119 {
2120 fprintf (file, "insn %d luid %d",
2121 uid, DF_INSN_UID_LUID (uid));
2122
2123 if (DF_INSN_UID_DEFS (uid))
2124 {
2125 fprintf (file, " defs ");
2126 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2127 }
2128
2129 if (DF_INSN_UID_USES (uid))
2130 {
2131 fprintf (file, " uses ");
2132 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2133 }
2134
2135 if (DF_INSN_UID_EQ_USES (uid))
2136 {
2137 fprintf (file, " eq uses ");
2138 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2139 }
2140
2141 if (DF_INSN_UID_MWS (uid))
2142 {
2143 fprintf (file, " mws ");
2144 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2145 }
2146 fprintf (file, "\n");
2147 }
2148
2149
2150 void
2151 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2152 {
2153 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2154 }
2155
2156 void
2157 df_insn_debug_regno (rtx insn, FILE *file)
2158 {
2159 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2160
2161 fprintf (file, "insn %d bb %d luid %d defs ",
2162 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2163 DF_INSN_INFO_LUID (insn_info));
2164 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2165
2166 fprintf (file, " uses ");
2167 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2168
2169 fprintf (file, " eq_uses ");
2170 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2171 fprintf (file, "\n");
2172 }
2173
2174 void
2175 df_regno_debug (unsigned int regno, FILE *file)
2176 {
2177 fprintf (file, "reg %d defs ", regno);
2178 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2179 fprintf (file, " uses ");
2180 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2181 fprintf (file, " eq_uses ");
2182 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2183 fprintf (file, "\n");
2184 }
2185
2186
2187 void
2188 df_ref_debug (struct df_ref *ref, FILE *file)
2189 {
2190 fprintf (file, "%c%d ",
2191 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2192 DF_REF_ID (ref));
2193 fprintf (file, "reg %d bb %d insn %d flag 0x%x type 0x%x ",
2194 DF_REF_REGNO (ref),
2195 DF_REF_BBNO (ref),
2196 DF_REF_INSN_INFO (ref) ? INSN_UID (DF_REF_INSN (ref)) : -1,
2197 DF_REF_FLAGS (ref),
2198 DF_REF_TYPE (ref));
2199 if (DF_REF_LOC (ref))
2200 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref), (void *)*DF_REF_LOC (ref));
2201 else
2202 fprintf (file, "chain ");
2203 df_chain_dump (DF_REF_CHAIN (ref), file);
2204 fprintf (file, "\n");
2205 }
2206 \f
2207 /* Functions for debugging from GDB. */
2208
2209 void
2210 debug_df_insn (rtx insn)
2211 {
2212 df_insn_debug (insn, true, stderr);
2213 debug_rtx (insn);
2214 }
2215
2216
2217 void
2218 debug_df_reg (rtx reg)
2219 {
2220 df_regno_debug (REGNO (reg), stderr);
2221 }
2222
2223
2224 void
2225 debug_df_regno (unsigned int regno)
2226 {
2227 df_regno_debug (regno, stderr);
2228 }
2229
2230
2231 void
2232 debug_df_ref (struct df_ref *ref)
2233 {
2234 df_ref_debug (ref, stderr);
2235 }
2236
2237
2238 void
2239 debug_df_defno (unsigned int defno)
2240 {
2241 df_ref_debug (DF_DEFS_GET (defno), stderr);
2242 }
2243
2244
2245 void
2246 debug_df_useno (unsigned int defno)
2247 {
2248 df_ref_debug (DF_USES_GET (defno), stderr);
2249 }
2250
2251
2252 void
2253 debug_df_chain (struct df_link *link)
2254 {
2255 df_chain_dump (link, stderr);
2256 fputc ('\n', stderr);
2257 }