stor-layout.c (finish_builtin_struct): Copy fields into the variants.
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and for_each_rtx instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "function.h"
386 #include "regs.h"
387 #include "alloc-pool.h"
388 #include "flags.h"
389 #include "hard-reg-set.h"
390 #include "basic-block.h"
391 #include "sbitmap.h"
392 #include "bitmap.h"
393 #include "df.h"
394 #include "tree-pass.h"
395 #include "params.h"
396 #include "cfgloop.h"
397
398 static void *df_get_bb_info (struct dataflow *, unsigned int);
399 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
400 static void df_clear_bb_info (struct dataflow *, unsigned int);
401 #ifdef DF_DEBUG_CFG
402 static void df_set_clean_cfg (void);
403 #endif
404
405 /* The obstack on which regsets are allocated. */
406 struct bitmap_obstack reg_obstack;
407
408 /* An obstack for bitmap not related to specific dataflow problems.
409 This obstack should e.g. be used for bitmaps with a short life time
410 such as temporary bitmaps. */
411
412 bitmap_obstack df_bitmap_obstack;
413
414
415 /*----------------------------------------------------------------------------
416 Functions to create, destroy and manipulate an instance of df.
417 ----------------------------------------------------------------------------*/
418
419 struct df_d *df;
420
421 /* Add PROBLEM (and any dependent problems) to the DF instance. */
422
423 void
424 df_add_problem (struct df_problem *problem)
425 {
426 struct dataflow *dflow;
427 int i;
428
429 /* First try to add the dependent problem. */
430 if (problem->dependent_problem)
431 df_add_problem (problem->dependent_problem);
432
433 /* Check to see if this problem has already been defined. If it
434 has, just return that instance, if not, add it to the end of the
435 vector. */
436 dflow = df->problems_by_index[problem->id];
437 if (dflow)
438 return;
439
440 /* Make a new one and add it to the end. */
441 dflow = XCNEW (struct dataflow);
442 dflow->problem = problem;
443 dflow->computed = false;
444 dflow->solutions_dirty = true;
445 df->problems_by_index[dflow->problem->id] = dflow;
446
447 /* Keep the defined problems ordered by index. This solves the
448 problem that RI will use the information from UREC if UREC has
449 been defined, or from LIVE if LIVE is defined and otherwise LR.
450 However for this to work, the computation of RI must be pushed
451 after which ever of those problems is defined, but we do not
452 require any of those except for LR to have actually been
453 defined. */
454 df->num_problems_defined++;
455 for (i = df->num_problems_defined - 2; i >= 0; i--)
456 {
457 if (problem->id < df->problems_in_order[i]->problem->id)
458 df->problems_in_order[i+1] = df->problems_in_order[i];
459 else
460 {
461 df->problems_in_order[i+1] = dflow;
462 return;
463 }
464 }
465 df->problems_in_order[0] = dflow;
466 }
467
468
469 /* Set the MASK flags in the DFLOW problem. The old flags are
470 returned. If a flag is not allowed to be changed this will fail if
471 checking is enabled. */
472 int
473 df_set_flags (int changeable_flags)
474 {
475 int old_flags = df->changeable_flags;
476 df->changeable_flags |= changeable_flags;
477 return old_flags;
478 }
479
480
481 /* Clear the MASK flags in the DFLOW problem. The old flags are
482 returned. If a flag is not allowed to be changed this will fail if
483 checking is enabled. */
484 int
485 df_clear_flags (int changeable_flags)
486 {
487 int old_flags = df->changeable_flags;
488 df->changeable_flags &= ~changeable_flags;
489 return old_flags;
490 }
491
492
493 /* Set the blocks that are to be considered for analysis. If this is
494 not called or is called with null, the entire function in
495 analyzed. */
496
497 void
498 df_set_blocks (bitmap blocks)
499 {
500 if (blocks)
501 {
502 if (dump_file)
503 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
504 if (df->blocks_to_analyze)
505 {
506 /* This block is called to change the focus from one subset
507 to another. */
508 int p;
509 bitmap_head diff;
510 bitmap_initialize (&diff, &df_bitmap_obstack);
511 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
513 {
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
518 {
519 bitmap_iterator bi;
520 unsigned int bb_index;
521
522 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
523 {
524 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
525 if (bb)
526 {
527 void *bb_info = df_get_bb_info (dflow, bb_index);
528 dflow->problem->free_bb_fun (bb, bb_info);
529 df_clear_bb_info (dflow, bb_index);
530 }
531 }
532 }
533 }
534
535 bitmap_clear (&diff);
536 }
537 else
538 {
539 /* This block of code is executed to change the focus from
540 the entire function to a subset. */
541 bitmap_head blocks_to_reset;
542 bool initialized = false;
543 int p;
544 for (p = 0; p < df->num_problems_defined; p++)
545 {
546 struct dataflow *dflow = df->problems_in_order[p];
547 if (dflow->optional_p && dflow->problem->reset_fun)
548 {
549 if (!initialized)
550 {
551 basic_block bb;
552 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
553 FOR_ALL_BB_FN (bb, cfun)
554 {
555 bitmap_set_bit (&blocks_to_reset, bb->index);
556 }
557 }
558 dflow->problem->reset_fun (&blocks_to_reset);
559 }
560 }
561 if (initialized)
562 bitmap_clear (&blocks_to_reset);
563
564 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
565 }
566 bitmap_copy (df->blocks_to_analyze, blocks);
567 df->analyze_subset = true;
568 }
569 else
570 {
571 /* This block is executed to reset the focus to the entire
572 function. */
573 if (dump_file)
574 fprintf (dump_file, "clearing blocks_to_analyze\n");
575 if (df->blocks_to_analyze)
576 {
577 BITMAP_FREE (df->blocks_to_analyze);
578 df->blocks_to_analyze = NULL;
579 }
580 df->analyze_subset = false;
581 }
582
583 /* Setting the blocks causes the refs to be unorganized since only
584 the refs in the blocks are seen. */
585 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
586 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
587 df_mark_solutions_dirty ();
588 }
589
590
591 /* Delete a DFLOW problem (and any problems that depend on this
592 problem). */
593
594 void
595 df_remove_problem (struct dataflow *dflow)
596 {
597 struct df_problem *problem;
598 int i;
599
600 if (!dflow)
601 return;
602
603 problem = dflow->problem;
604 gcc_assert (problem->remove_problem_fun);
605
606 /* Delete any problems that depended on this problem first. */
607 for (i = 0; i < df->num_problems_defined; i++)
608 if (df->problems_in_order[i]->problem->dependent_problem == problem)
609 df_remove_problem (df->problems_in_order[i]);
610
611 /* Now remove this problem. */
612 for (i = 0; i < df->num_problems_defined; i++)
613 if (df->problems_in_order[i] == dflow)
614 {
615 int j;
616 for (j = i + 1; j < df->num_problems_defined; j++)
617 df->problems_in_order[j-1] = df->problems_in_order[j];
618 df->problems_in_order[j-1] = NULL;
619 df->num_problems_defined--;
620 break;
621 }
622
623 (problem->remove_problem_fun) ();
624 df->problems_by_index[problem->id] = NULL;
625 }
626
627
628 /* Remove all of the problems that are not permanent. Scanning, LR
629 and (at -O2 or higher) LIVE are permanent, the rest are removable.
630 Also clear all of the changeable_flags. */
631
632 void
633 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
634 {
635 int i;
636 int removed = 0;
637
638 #ifdef ENABLE_DF_CHECKING
639 int saved_flags;
640 #endif
641
642 if (!df)
643 return;
644
645 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
646 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
647
648 #ifdef ENABLE_DF_CHECKING
649 saved_flags = df->changeable_flags;
650 #endif
651
652 for (i = 0; i < df->num_problems_defined; i++)
653 {
654 struct dataflow *dflow = df->problems_in_order[i];
655 struct df_problem *problem = dflow->problem;
656
657 if (dflow->optional_p)
658 {
659 gcc_assert (problem->remove_problem_fun);
660 (problem->remove_problem_fun) ();
661 df->problems_in_order[i] = NULL;
662 df->problems_by_index[problem->id] = NULL;
663 removed++;
664 }
665 }
666 df->num_problems_defined -= removed;
667
668 /* Clear all of the flags. */
669 df->changeable_flags = 0;
670 df_process_deferred_rescans ();
671
672 /* Set the focus back to the whole function. */
673 if (df->blocks_to_analyze)
674 {
675 BITMAP_FREE (df->blocks_to_analyze);
676 df->blocks_to_analyze = NULL;
677 df_mark_solutions_dirty ();
678 df->analyze_subset = false;
679 }
680
681 #ifdef ENABLE_DF_CHECKING
682 /* Verification will fail in DF_NO_INSN_RESCAN. */
683 if (!(saved_flags & DF_NO_INSN_RESCAN))
684 {
685 df_lr_verify_transfer_functions ();
686 if (df_live)
687 df_live_verify_transfer_functions ();
688 }
689
690 #ifdef DF_DEBUG_CFG
691 df_set_clean_cfg ();
692 #endif
693 #endif
694
695 #ifdef ENABLE_CHECKING
696 if (verify)
697 df->changeable_flags |= DF_VERIFY_SCHEDULED;
698 #endif
699 }
700
701
702 /* Set up the dataflow instance for the entire back end. */
703
704 static unsigned int
705 rest_of_handle_df_initialize (void)
706 {
707 gcc_assert (!df);
708 df = XCNEW (struct df_d);
709 df->changeable_flags = 0;
710
711 bitmap_obstack_initialize (&df_bitmap_obstack);
712
713 /* Set this to a conservative value. Stack_ptr_mod will compute it
714 correctly later. */
715 crtl->sp_is_unchanging = 0;
716
717 df_scan_add_problem ();
718 df_scan_alloc (NULL);
719
720 /* These three problems are permanent. */
721 df_lr_add_problem ();
722 if (optimize > 1)
723 df_live_add_problem ();
724
725 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
726 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
727 df->n_blocks = post_order_compute (df->postorder, true, true);
728 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
729 gcc_assert (df->n_blocks == df->n_blocks_inverted);
730
731 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
732
733 df_hard_reg_init ();
734 /* After reload, some ports add certain bits to regs_ever_live so
735 this cannot be reset. */
736 df_compute_regs_ever_live (true);
737 df_scan_blocks ();
738 df_compute_regs_ever_live (false);
739 return 0;
740 }
741
742
743 namespace {
744
745 const pass_data pass_data_df_initialize_opt =
746 {
747 RTL_PASS, /* type */
748 "dfinit", /* name */
749 OPTGROUP_NONE, /* optinfo_flags */
750 true, /* has_execute */
751 TV_DF_SCAN, /* tv_id */
752 0, /* properties_required */
753 0, /* properties_provided */
754 0, /* properties_destroyed */
755 0, /* todo_flags_start */
756 0, /* todo_flags_finish */
757 };
758
759 class pass_df_initialize_opt : public rtl_opt_pass
760 {
761 public:
762 pass_df_initialize_opt (gcc::context *ctxt)
763 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
764 {}
765
766 /* opt_pass methods: */
767 virtual bool gate (function *) { return optimize > 0; }
768 virtual unsigned int execute (function *)
769 {
770 return rest_of_handle_df_initialize ();
771 }
772
773 }; // class pass_df_initialize_opt
774
775 } // anon namespace
776
777 rtl_opt_pass *
778 make_pass_df_initialize_opt (gcc::context *ctxt)
779 {
780 return new pass_df_initialize_opt (ctxt);
781 }
782
783
784 namespace {
785
786 const pass_data pass_data_df_initialize_no_opt =
787 {
788 RTL_PASS, /* type */
789 "no-opt dfinit", /* name */
790 OPTGROUP_NONE, /* optinfo_flags */
791 true, /* has_execute */
792 TV_DF_SCAN, /* tv_id */
793 0, /* properties_required */
794 0, /* properties_provided */
795 0, /* properties_destroyed */
796 0, /* todo_flags_start */
797 0, /* todo_flags_finish */
798 };
799
800 class pass_df_initialize_no_opt : public rtl_opt_pass
801 {
802 public:
803 pass_df_initialize_no_opt (gcc::context *ctxt)
804 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
805 {}
806
807 /* opt_pass methods: */
808 virtual bool gate (function *) { return optimize == 0; }
809 virtual unsigned int execute (function *)
810 {
811 return rest_of_handle_df_initialize ();
812 }
813
814 }; // class pass_df_initialize_no_opt
815
816 } // anon namespace
817
818 rtl_opt_pass *
819 make_pass_df_initialize_no_opt (gcc::context *ctxt)
820 {
821 return new pass_df_initialize_no_opt (ctxt);
822 }
823
824
825 /* Free all the dataflow info and the DF structure. This should be
826 called from the df_finish macro which also NULLs the parm. */
827
828 static unsigned int
829 rest_of_handle_df_finish (void)
830 {
831 int i;
832
833 gcc_assert (df);
834
835 for (i = 0; i < df->num_problems_defined; i++)
836 {
837 struct dataflow *dflow = df->problems_in_order[i];
838 dflow->problem->free_fun ();
839 }
840
841 free (df->postorder);
842 free (df->postorder_inverted);
843 free (df->hard_regs_live_count);
844 free (df);
845 df = NULL;
846
847 bitmap_obstack_release (&df_bitmap_obstack);
848 return 0;
849 }
850
851
852 namespace {
853
854 const pass_data pass_data_df_finish =
855 {
856 RTL_PASS, /* type */
857 "dfinish", /* name */
858 OPTGROUP_NONE, /* optinfo_flags */
859 true, /* has_execute */
860 TV_NONE, /* tv_id */
861 0, /* properties_required */
862 0, /* properties_provided */
863 0, /* properties_destroyed */
864 0, /* todo_flags_start */
865 0, /* todo_flags_finish */
866 };
867
868 class pass_df_finish : public rtl_opt_pass
869 {
870 public:
871 pass_df_finish (gcc::context *ctxt)
872 : rtl_opt_pass (pass_data_df_finish, ctxt)
873 {}
874
875 /* opt_pass methods: */
876 virtual unsigned int execute (function *)
877 {
878 return rest_of_handle_df_finish ();
879 }
880
881 }; // class pass_df_finish
882
883 } // anon namespace
884
885 rtl_opt_pass *
886 make_pass_df_finish (gcc::context *ctxt)
887 {
888 return new pass_df_finish (ctxt);
889 }
890
891
892
893
894 \f
895 /*----------------------------------------------------------------------------
896 The general data flow analysis engine.
897 ----------------------------------------------------------------------------*/
898
899 /* Return time BB when it was visited for last time. */
900 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
901
902 /* Helper function for df_worklist_dataflow.
903 Propagate the dataflow forward.
904 Given a BB_INDEX, do the dataflow propagation
905 and set bits on for successors in PENDING
906 if the out set of the dataflow has changed.
907
908 AGE specify time when BB was visited last time.
909 AGE of 0 means we are visiting for first time and need to
910 compute transfer function to initialize datastructures.
911 Otherwise we re-do transfer function only if something change
912 while computing confluence functions.
913 We need to compute confluence only of basic block that are younger
914 then last visit of the BB.
915
916 Return true if BB info has changed. This is always the case
917 in the first visit. */
918
919 static bool
920 df_worklist_propagate_forward (struct dataflow *dataflow,
921 unsigned bb_index,
922 unsigned *bbindex_to_postorder,
923 bitmap pending,
924 sbitmap considered,
925 ptrdiff_t age)
926 {
927 edge e;
928 edge_iterator ei;
929 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
930 bool changed = !age;
931
932 /* Calculate <conf_op> of incoming edges. */
933 if (EDGE_COUNT (bb->preds) > 0)
934 FOR_EACH_EDGE (e, ei, bb->preds)
935 {
936 if (age <= BB_LAST_CHANGE_AGE (e->src)
937 && bitmap_bit_p (considered, e->src->index))
938 changed |= dataflow->problem->con_fun_n (e);
939 }
940 else if (dataflow->problem->con_fun_0)
941 dataflow->problem->con_fun_0 (bb);
942
943 if (changed
944 && dataflow->problem->trans_fun (bb_index))
945 {
946 /* The out set of this block has changed.
947 Propagate to the outgoing blocks. */
948 FOR_EACH_EDGE (e, ei, bb->succs)
949 {
950 unsigned ob_index = e->dest->index;
951
952 if (bitmap_bit_p (considered, ob_index))
953 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
954 }
955 return true;
956 }
957 return false;
958 }
959
960
961 /* Helper function for df_worklist_dataflow.
962 Propagate the dataflow backward. */
963
964 static bool
965 df_worklist_propagate_backward (struct dataflow *dataflow,
966 unsigned bb_index,
967 unsigned *bbindex_to_postorder,
968 bitmap pending,
969 sbitmap considered,
970 ptrdiff_t age)
971 {
972 edge e;
973 edge_iterator ei;
974 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
975 bool changed = !age;
976
977 /* Calculate <conf_op> of incoming edges. */
978 if (EDGE_COUNT (bb->succs) > 0)
979 FOR_EACH_EDGE (e, ei, bb->succs)
980 {
981 if (age <= BB_LAST_CHANGE_AGE (e->dest)
982 && bitmap_bit_p (considered, e->dest->index))
983 changed |= dataflow->problem->con_fun_n (e);
984 }
985 else if (dataflow->problem->con_fun_0)
986 dataflow->problem->con_fun_0 (bb);
987
988 if (changed
989 && dataflow->problem->trans_fun (bb_index))
990 {
991 /* The out set of this block has changed.
992 Propagate to the outgoing blocks. */
993 FOR_EACH_EDGE (e, ei, bb->preds)
994 {
995 unsigned ob_index = e->src->index;
996
997 if (bitmap_bit_p (considered, ob_index))
998 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
999 }
1000 return true;
1001 }
1002 return false;
1003 }
1004
1005 /* Main dataflow solver loop.
1006
1007 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
1008 need to visit.
1009 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1010 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1011 PENDING will be freed.
1012
1013 The worklists are bitmaps indexed by postorder positions.
1014
1015 The function implements standard algorithm for dataflow solving with two
1016 worklists (we are processing WORKLIST and storing new BBs to visit in
1017 PENDING).
1018
1019 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1020 and when it was last visited (stored in last_visit_age). This avoids need
1021 to re-do confluence function for edges to basic blocks whose source
1022 did not change since destination was visited last time. */
1023
1024 static void
1025 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1026 bitmap pending,
1027 sbitmap considered,
1028 int *blocks_in_postorder,
1029 unsigned *bbindex_to_postorder,
1030 int n_blocks)
1031 {
1032 enum df_flow_dir dir = dataflow->problem->dir;
1033 int dcount = 0;
1034 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1035 int age = 0;
1036 bool changed;
1037 vec<int> last_visit_age = vNULL;
1038 int prev_age;
1039 basic_block bb;
1040 int i;
1041
1042 last_visit_age.safe_grow_cleared (n_blocks);
1043
1044 /* Double-queueing. Worklist is for the current iteration,
1045 and pending is for the next. */
1046 while (!bitmap_empty_p (pending))
1047 {
1048 bitmap_iterator bi;
1049 unsigned int index;
1050
1051 /* Swap pending and worklist. */
1052 bitmap temp = worklist;
1053 worklist = pending;
1054 pending = temp;
1055
1056 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1057 {
1058 unsigned bb_index;
1059 dcount++;
1060
1061 bitmap_clear_bit (pending, index);
1062 bb_index = blocks_in_postorder[index];
1063 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1064 prev_age = last_visit_age[index];
1065 if (dir == DF_FORWARD)
1066 changed = df_worklist_propagate_forward (dataflow, bb_index,
1067 bbindex_to_postorder,
1068 pending, considered,
1069 prev_age);
1070 else
1071 changed = df_worklist_propagate_backward (dataflow, bb_index,
1072 bbindex_to_postorder,
1073 pending, considered,
1074 prev_age);
1075 last_visit_age[index] = ++age;
1076 if (changed)
1077 bb->aux = (void *)(ptrdiff_t)age;
1078 }
1079 bitmap_clear (worklist);
1080 }
1081 for (i = 0; i < n_blocks; i++)
1082 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1083
1084 BITMAP_FREE (worklist);
1085 BITMAP_FREE (pending);
1086 last_visit_age.release ();
1087
1088 /* Dump statistics. */
1089 if (dump_file)
1090 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1091 "n_basic_blocks %d n_edges %d"
1092 " count %d (%5.2g)\n",
1093 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1094 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1095 }
1096
1097 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1098 with "n"-th bit representing the n-th block in the reverse-postorder order.
1099 The solver is a double-queue algorithm similar to the "double stack" solver
1100 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1101 The only significant difference is that the worklist in this implementation
1102 is always sorted in RPO of the CFG visiting direction. */
1103
1104 void
1105 df_worklist_dataflow (struct dataflow *dataflow,
1106 bitmap blocks_to_consider,
1107 int *blocks_in_postorder,
1108 int n_blocks)
1109 {
1110 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1111 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1112 bitmap_iterator bi;
1113 unsigned int *bbindex_to_postorder;
1114 int i;
1115 unsigned int index;
1116 enum df_flow_dir dir = dataflow->problem->dir;
1117
1118 gcc_assert (dir != DF_NONE);
1119
1120 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1121 bbindex_to_postorder = XNEWVEC (unsigned int,
1122 last_basic_block_for_fn (cfun));
1123
1124 /* Initialize the array to an out-of-bound value. */
1125 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1126 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1127
1128 /* Initialize the considered map. */
1129 bitmap_clear (considered);
1130 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1131 {
1132 bitmap_set_bit (considered, index);
1133 }
1134
1135 /* Initialize the mapping of block index to postorder. */
1136 for (i = 0; i < n_blocks; i++)
1137 {
1138 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1139 /* Add all blocks to the worklist. */
1140 bitmap_set_bit (pending, i);
1141 }
1142
1143 /* Initialize the problem. */
1144 if (dataflow->problem->init_fun)
1145 dataflow->problem->init_fun (blocks_to_consider);
1146
1147 /* Solve it. */
1148 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1149 blocks_in_postorder,
1150 bbindex_to_postorder,
1151 n_blocks);
1152 sbitmap_free (considered);
1153 free (bbindex_to_postorder);
1154 }
1155
1156
1157 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1158 the order of the remaining entries. Returns the length of the resulting
1159 list. */
1160
1161 static unsigned
1162 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1163 {
1164 unsigned act, last;
1165
1166 for (act = 0, last = 0; act < len; act++)
1167 if (bitmap_bit_p (blocks, list[act]))
1168 list[last++] = list[act];
1169
1170 return last;
1171 }
1172
1173
1174 /* Execute dataflow analysis on a single dataflow problem.
1175
1176 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1177 examined or will be computed. For calls from DF_ANALYZE, this is
1178 the set of blocks that has been passed to DF_SET_BLOCKS.
1179 */
1180
1181 void
1182 df_analyze_problem (struct dataflow *dflow,
1183 bitmap blocks_to_consider,
1184 int *postorder, int n_blocks)
1185 {
1186 timevar_push (dflow->problem->tv_id);
1187
1188 /* (Re)Allocate the datastructures necessary to solve the problem. */
1189 if (dflow->problem->alloc_fun)
1190 dflow->problem->alloc_fun (blocks_to_consider);
1191
1192 #ifdef ENABLE_DF_CHECKING
1193 if (dflow->problem->verify_start_fun)
1194 dflow->problem->verify_start_fun ();
1195 #endif
1196
1197 /* Set up the problem and compute the local information. */
1198 if (dflow->problem->local_compute_fun)
1199 dflow->problem->local_compute_fun (blocks_to_consider);
1200
1201 /* Solve the equations. */
1202 if (dflow->problem->dataflow_fun)
1203 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1204 postorder, n_blocks);
1205
1206 /* Massage the solution. */
1207 if (dflow->problem->finalize_fun)
1208 dflow->problem->finalize_fun (blocks_to_consider);
1209
1210 #ifdef ENABLE_DF_CHECKING
1211 if (dflow->problem->verify_end_fun)
1212 dflow->problem->verify_end_fun ();
1213 #endif
1214
1215 timevar_pop (dflow->problem->tv_id);
1216
1217 dflow->computed = true;
1218 }
1219
1220
1221 /* Analyze dataflow info. */
1222
1223 static void
1224 df_analyze_1 (void)
1225 {
1226 int i;
1227
1228 /* These should be the same. */
1229 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1230
1231 /* We need to do this before the df_verify_all because this is
1232 not kept incrementally up to date. */
1233 df_compute_regs_ever_live (false);
1234 df_process_deferred_rescans ();
1235
1236 if (dump_file)
1237 fprintf (dump_file, "df_analyze called\n");
1238
1239 #ifndef ENABLE_DF_CHECKING
1240 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1241 #endif
1242 df_verify ();
1243
1244 /* Skip over the DF_SCAN problem. */
1245 for (i = 1; i < df->num_problems_defined; i++)
1246 {
1247 struct dataflow *dflow = df->problems_in_order[i];
1248 if (dflow->solutions_dirty)
1249 {
1250 if (dflow->problem->dir == DF_FORWARD)
1251 df_analyze_problem (dflow,
1252 df->blocks_to_analyze,
1253 df->postorder_inverted,
1254 df->n_blocks_inverted);
1255 else
1256 df_analyze_problem (dflow,
1257 df->blocks_to_analyze,
1258 df->postorder,
1259 df->n_blocks);
1260 }
1261 }
1262
1263 if (!df->analyze_subset)
1264 {
1265 BITMAP_FREE (df->blocks_to_analyze);
1266 df->blocks_to_analyze = NULL;
1267 }
1268
1269 #ifdef DF_DEBUG_CFG
1270 df_set_clean_cfg ();
1271 #endif
1272 }
1273
1274 /* Analyze dataflow info. */
1275
1276 void
1277 df_analyze (void)
1278 {
1279 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1280 int i;
1281
1282 free (df->postorder);
1283 free (df->postorder_inverted);
1284 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1285 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1286 df->n_blocks = post_order_compute (df->postorder, true, true);
1287 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1288
1289 for (i = 0; i < df->n_blocks; i++)
1290 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1291
1292 #ifdef ENABLE_CHECKING
1293 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1294 the ENTRY block. */
1295 for (i = 0; i < df->n_blocks_inverted; i++)
1296 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1297 #endif
1298
1299 /* Make sure that we have pruned any unreachable blocks from these
1300 sets. */
1301 if (df->analyze_subset)
1302 {
1303 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1304 df->n_blocks = df_prune_to_subcfg (df->postorder,
1305 df->n_blocks, df->blocks_to_analyze);
1306 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1307 df->n_blocks_inverted,
1308 df->blocks_to_analyze);
1309 BITMAP_FREE (current_all_blocks);
1310 }
1311 else
1312 {
1313 df->blocks_to_analyze = current_all_blocks;
1314 current_all_blocks = NULL;
1315 }
1316
1317 df_analyze_1 ();
1318 }
1319
1320 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1321 Returns the number of blocks which is always loop->num_nodes. */
1322
1323 static int
1324 loop_post_order_compute (int *post_order, struct loop *loop)
1325 {
1326 edge_iterator *stack;
1327 int sp;
1328 int post_order_num = 0;
1329 bitmap visited;
1330
1331 /* Allocate stack for back-tracking up CFG. */
1332 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1333 sp = 0;
1334
1335 /* Allocate bitmap to track nodes that have been visited. */
1336 visited = BITMAP_ALLOC (NULL);
1337
1338 /* Push the first edge on to the stack. */
1339 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1340
1341 while (sp)
1342 {
1343 edge_iterator ei;
1344 basic_block src;
1345 basic_block dest;
1346
1347 /* Look at the edge on the top of the stack. */
1348 ei = stack[sp - 1];
1349 src = ei_edge (ei)->src;
1350 dest = ei_edge (ei)->dest;
1351
1352 /* Check if the edge destination has been visited yet and mark it
1353 if not so. */
1354 if (flow_bb_inside_loop_p (loop, dest)
1355 && bitmap_set_bit (visited, dest->index))
1356 {
1357 if (EDGE_COUNT (dest->succs) > 0)
1358 /* Since the DEST node has been visited for the first
1359 time, check its successors. */
1360 stack[sp++] = ei_start (dest->succs);
1361 else
1362 post_order[post_order_num++] = dest->index;
1363 }
1364 else
1365 {
1366 if (ei_one_before_end_p (ei)
1367 && src != loop_preheader_edge (loop)->src)
1368 post_order[post_order_num++] = src->index;
1369
1370 if (!ei_one_before_end_p (ei))
1371 ei_next (&stack[sp - 1]);
1372 else
1373 sp--;
1374 }
1375 }
1376
1377 free (stack);
1378 BITMAP_FREE (visited);
1379
1380 return post_order_num;
1381 }
1382
1383 /* Compute the reverse top sort order of the inverted sub-CFG specified
1384 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1385
1386 static int
1387 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1388 {
1389 basic_block bb;
1390 edge_iterator *stack;
1391 int sp;
1392 int post_order_num = 0;
1393 bitmap visited;
1394
1395 /* Allocate stack for back-tracking up CFG. */
1396 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1397 sp = 0;
1398
1399 /* Allocate bitmap to track nodes that have been visited. */
1400 visited = BITMAP_ALLOC (NULL);
1401
1402 /* Put all latches into the initial work list. In theory we'd want
1403 to start from loop exits but then we'd have the special case of
1404 endless loops. It doesn't really matter for DF iteration order and
1405 handling latches last is probably even better. */
1406 stack[sp++] = ei_start (loop->header->preds);
1407 bitmap_set_bit (visited, loop->header->index);
1408
1409 /* The inverted traversal loop. */
1410 while (sp)
1411 {
1412 edge_iterator ei;
1413 basic_block pred;
1414
1415 /* Look at the edge on the top of the stack. */
1416 ei = stack[sp - 1];
1417 bb = ei_edge (ei)->dest;
1418 pred = ei_edge (ei)->src;
1419
1420 /* Check if the predecessor has been visited yet and mark it
1421 if not so. */
1422 if (flow_bb_inside_loop_p (loop, pred)
1423 && bitmap_set_bit (visited, pred->index))
1424 {
1425 if (EDGE_COUNT (pred->preds) > 0)
1426 /* Since the predecessor node has been visited for the first
1427 time, check its predecessors. */
1428 stack[sp++] = ei_start (pred->preds);
1429 else
1430 post_order[post_order_num++] = pred->index;
1431 }
1432 else
1433 {
1434 if (flow_bb_inside_loop_p (loop, bb)
1435 && ei_one_before_end_p (ei))
1436 post_order[post_order_num++] = bb->index;
1437
1438 if (!ei_one_before_end_p (ei))
1439 ei_next (&stack[sp - 1]);
1440 else
1441 sp--;
1442 }
1443 }
1444
1445 free (stack);
1446 BITMAP_FREE (visited);
1447 return post_order_num;
1448 }
1449
1450
1451 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1452
1453 void
1454 df_analyze_loop (struct loop *loop)
1455 {
1456 free (df->postorder);
1457 free (df->postorder_inverted);
1458
1459 df->postorder = XNEWVEC (int, loop->num_nodes);
1460 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1461 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1462 df->n_blocks_inverted
1463 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1464 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1465 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1466
1467 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1468 for (int i = 0; i < df->n_blocks; ++i)
1469 bitmap_set_bit (blocks, df->postorder[i]);
1470 df_set_blocks (blocks);
1471 BITMAP_FREE (blocks);
1472
1473 df_analyze_1 ();
1474 }
1475
1476
1477 /* Return the number of basic blocks from the last call to df_analyze. */
1478
1479 int
1480 df_get_n_blocks (enum df_flow_dir dir)
1481 {
1482 gcc_assert (dir != DF_NONE);
1483
1484 if (dir == DF_FORWARD)
1485 {
1486 gcc_assert (df->postorder_inverted);
1487 return df->n_blocks_inverted;
1488 }
1489
1490 gcc_assert (df->postorder);
1491 return df->n_blocks;
1492 }
1493
1494
1495 /* Return a pointer to the array of basic blocks in the reverse postorder.
1496 Depending on the direction of the dataflow problem,
1497 it returns either the usual reverse postorder array
1498 or the reverse postorder of inverted traversal. */
1499 int *
1500 df_get_postorder (enum df_flow_dir dir)
1501 {
1502 gcc_assert (dir != DF_NONE);
1503
1504 if (dir == DF_FORWARD)
1505 {
1506 gcc_assert (df->postorder_inverted);
1507 return df->postorder_inverted;
1508 }
1509 gcc_assert (df->postorder);
1510 return df->postorder;
1511 }
1512
1513 static struct df_problem user_problem;
1514 static struct dataflow user_dflow;
1515
1516 /* Interface for calling iterative dataflow with user defined
1517 confluence and transfer functions. All that is necessary is to
1518 supply DIR, a direction, CONF_FUN_0, a confluence function for
1519 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1520 confluence function, TRANS_FUN, the basic block transfer function,
1521 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1522 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1523
1524 void
1525 df_simple_dataflow (enum df_flow_dir dir,
1526 df_init_function init_fun,
1527 df_confluence_function_0 con_fun_0,
1528 df_confluence_function_n con_fun_n,
1529 df_transfer_function trans_fun,
1530 bitmap blocks, int * postorder, int n_blocks)
1531 {
1532 memset (&user_problem, 0, sizeof (struct df_problem));
1533 user_problem.dir = dir;
1534 user_problem.init_fun = init_fun;
1535 user_problem.con_fun_0 = con_fun_0;
1536 user_problem.con_fun_n = con_fun_n;
1537 user_problem.trans_fun = trans_fun;
1538 user_dflow.problem = &user_problem;
1539 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1540 }
1541
1542
1543 \f
1544 /*----------------------------------------------------------------------------
1545 Functions to support limited incremental change.
1546 ----------------------------------------------------------------------------*/
1547
1548
1549 /* Get basic block info. */
1550
1551 static void *
1552 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1553 {
1554 if (dflow->block_info == NULL)
1555 return NULL;
1556 if (index >= dflow->block_info_size)
1557 return NULL;
1558 return (void *)((char *)dflow->block_info
1559 + index * dflow->problem->block_info_elt_size);
1560 }
1561
1562
1563 /* Set basic block info. */
1564
1565 static void
1566 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1567 void *bb_info)
1568 {
1569 gcc_assert (dflow->block_info);
1570 memcpy ((char *)dflow->block_info
1571 + index * dflow->problem->block_info_elt_size,
1572 bb_info, dflow->problem->block_info_elt_size);
1573 }
1574
1575
1576 /* Clear basic block info. */
1577
1578 static void
1579 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1580 {
1581 gcc_assert (dflow->block_info);
1582 gcc_assert (dflow->block_info_size > index);
1583 memset ((char *)dflow->block_info
1584 + index * dflow->problem->block_info_elt_size,
1585 0, dflow->problem->block_info_elt_size);
1586 }
1587
1588
1589 /* Mark the solutions as being out of date. */
1590
1591 void
1592 df_mark_solutions_dirty (void)
1593 {
1594 if (df)
1595 {
1596 int p;
1597 for (p = 1; p < df->num_problems_defined; p++)
1598 df->problems_in_order[p]->solutions_dirty = true;
1599 }
1600 }
1601
1602
1603 /* Return true if BB needs it's transfer functions recomputed. */
1604
1605 bool
1606 df_get_bb_dirty (basic_block bb)
1607 {
1608 return bitmap_bit_p ((df_live
1609 ? df_live : df_lr)->out_of_date_transfer_functions,
1610 bb->index);
1611 }
1612
1613
1614 /* Mark BB as needing it's transfer functions as being out of
1615 date. */
1616
1617 void
1618 df_set_bb_dirty (basic_block bb)
1619 {
1620 bb->flags |= BB_MODIFIED;
1621 if (df)
1622 {
1623 int p;
1624 for (p = 1; p < df->num_problems_defined; p++)
1625 {
1626 struct dataflow *dflow = df->problems_in_order[p];
1627 if (dflow->out_of_date_transfer_functions)
1628 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1629 }
1630 df_mark_solutions_dirty ();
1631 }
1632 }
1633
1634
1635 /* Grow the bb_info array. */
1636
1637 void
1638 df_grow_bb_info (struct dataflow *dflow)
1639 {
1640 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1641 if (dflow->block_info_size < new_size)
1642 {
1643 new_size += new_size / 4;
1644 dflow->block_info
1645 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1646 new_size
1647 * dflow->problem->block_info_elt_size);
1648 memset ((char *)dflow->block_info
1649 + dflow->block_info_size
1650 * dflow->problem->block_info_elt_size,
1651 0,
1652 (new_size - dflow->block_info_size)
1653 * dflow->problem->block_info_elt_size);
1654 dflow->block_info_size = new_size;
1655 }
1656 }
1657
1658
1659 /* Clear the dirty bits. This is called from places that delete
1660 blocks. */
1661 static void
1662 df_clear_bb_dirty (basic_block bb)
1663 {
1664 int p;
1665 for (p = 1; p < df->num_problems_defined; p++)
1666 {
1667 struct dataflow *dflow = df->problems_in_order[p];
1668 if (dflow->out_of_date_transfer_functions)
1669 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1670 }
1671 }
1672
1673 /* Called from the rtl_compact_blocks to reorganize the problems basic
1674 block info. */
1675
1676 void
1677 df_compact_blocks (void)
1678 {
1679 int i, p;
1680 basic_block bb;
1681 void *problem_temps;
1682 bitmap_head tmp;
1683
1684 bitmap_initialize (&tmp, &df_bitmap_obstack);
1685 for (p = 0; p < df->num_problems_defined; p++)
1686 {
1687 struct dataflow *dflow = df->problems_in_order[p];
1688
1689 /* Need to reorganize the out_of_date_transfer_functions for the
1690 dflow problem. */
1691 if (dflow->out_of_date_transfer_functions)
1692 {
1693 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1694 bitmap_clear (dflow->out_of_date_transfer_functions);
1695 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1696 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1697 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1698 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1699
1700 i = NUM_FIXED_BLOCKS;
1701 FOR_EACH_BB_FN (bb, cfun)
1702 {
1703 if (bitmap_bit_p (&tmp, bb->index))
1704 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1705 i++;
1706 }
1707 }
1708
1709 /* Now shuffle the block info for the problem. */
1710 if (dflow->problem->free_bb_fun)
1711 {
1712 int size = (last_basic_block_for_fn (cfun)
1713 * dflow->problem->block_info_elt_size);
1714 problem_temps = XNEWVAR (char, size);
1715 df_grow_bb_info (dflow);
1716 memcpy (problem_temps, dflow->block_info, size);
1717
1718 /* Copy the bb info from the problem tmps to the proper
1719 place in the block_info vector. Null out the copied
1720 item. The entry and exit blocks never move. */
1721 i = NUM_FIXED_BLOCKS;
1722 FOR_EACH_BB_FN (bb, cfun)
1723 {
1724 df_set_bb_info (dflow, i,
1725 (char *)problem_temps
1726 + bb->index * dflow->problem->block_info_elt_size);
1727 i++;
1728 }
1729 memset ((char *)dflow->block_info
1730 + i * dflow->problem->block_info_elt_size, 0,
1731 (last_basic_block_for_fn (cfun) - i)
1732 * dflow->problem->block_info_elt_size);
1733 free (problem_temps);
1734 }
1735 }
1736
1737 /* Shuffle the bits in the basic_block indexed arrays. */
1738
1739 if (df->blocks_to_analyze)
1740 {
1741 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1742 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1743 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1744 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1745 bitmap_copy (&tmp, df->blocks_to_analyze);
1746 bitmap_clear (df->blocks_to_analyze);
1747 i = NUM_FIXED_BLOCKS;
1748 FOR_EACH_BB_FN (bb, cfun)
1749 {
1750 if (bitmap_bit_p (&tmp, bb->index))
1751 bitmap_set_bit (df->blocks_to_analyze, i);
1752 i++;
1753 }
1754 }
1755
1756 bitmap_clear (&tmp);
1757
1758 i = NUM_FIXED_BLOCKS;
1759 FOR_EACH_BB_FN (bb, cfun)
1760 {
1761 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1762 bb->index = i;
1763 i++;
1764 }
1765
1766 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1767
1768 for (; i < last_basic_block_for_fn (cfun); i++)
1769 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1770
1771 #ifdef DF_DEBUG_CFG
1772 if (!df_lr->solutions_dirty)
1773 df_set_clean_cfg ();
1774 #endif
1775 }
1776
1777
1778 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1779 block. There is no excuse for people to do this kind of thing. */
1780
1781 void
1782 df_bb_replace (int old_index, basic_block new_block)
1783 {
1784 int new_block_index = new_block->index;
1785 int p;
1786
1787 if (dump_file)
1788 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1789
1790 gcc_assert (df);
1791 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1792
1793 for (p = 0; p < df->num_problems_defined; p++)
1794 {
1795 struct dataflow *dflow = df->problems_in_order[p];
1796 if (dflow->block_info)
1797 {
1798 df_grow_bb_info (dflow);
1799 df_set_bb_info (dflow, old_index,
1800 df_get_bb_info (dflow, new_block_index));
1801 }
1802 }
1803
1804 df_clear_bb_dirty (new_block);
1805 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1806 new_block->index = old_index;
1807 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1808 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1809 }
1810
1811
1812 /* Free all of the per basic block dataflow from all of the problems.
1813 This is typically called before a basic block is deleted and the
1814 problem will be reanalyzed. */
1815
1816 void
1817 df_bb_delete (int bb_index)
1818 {
1819 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1820 int i;
1821
1822 if (!df)
1823 return;
1824
1825 for (i = 0; i < df->num_problems_defined; i++)
1826 {
1827 struct dataflow *dflow = df->problems_in_order[i];
1828 if (dflow->problem->free_bb_fun)
1829 {
1830 void *bb_info = df_get_bb_info (dflow, bb_index);
1831 if (bb_info)
1832 {
1833 dflow->problem->free_bb_fun (bb, bb_info);
1834 df_clear_bb_info (dflow, bb_index);
1835 }
1836 }
1837 }
1838 df_clear_bb_dirty (bb);
1839 df_mark_solutions_dirty ();
1840 }
1841
1842
1843 /* Verify that there is a place for everything and everything is in
1844 its place. This is too expensive to run after every pass in the
1845 mainline. However this is an excellent debugging tool if the
1846 dataflow information is not being updated properly. You can just
1847 sprinkle calls in until you find the place that is changing an
1848 underlying structure without calling the proper updating
1849 routine. */
1850
1851 void
1852 df_verify (void)
1853 {
1854 df_scan_verify ();
1855 #ifdef ENABLE_DF_CHECKING
1856 df_lr_verify_transfer_functions ();
1857 if (df_live)
1858 df_live_verify_transfer_functions ();
1859 #endif
1860 }
1861
1862 #ifdef DF_DEBUG_CFG
1863
1864 /* Compute an array of ints that describes the cfg. This can be used
1865 to discover places where the cfg is modified by the appropriate
1866 calls have not been made to the keep df informed. The internals of
1867 this are unexciting, the key is that two instances of this can be
1868 compared to see if any changes have been made to the cfg. */
1869
1870 static int *
1871 df_compute_cfg_image (void)
1872 {
1873 basic_block bb;
1874 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1875 int i;
1876 int * map;
1877
1878 FOR_ALL_BB_FN (bb, cfun)
1879 {
1880 size += EDGE_COUNT (bb->succs);
1881 }
1882
1883 map = XNEWVEC (int, size);
1884 map[0] = size;
1885 i = 1;
1886 FOR_ALL_BB_FN (bb, cfun)
1887 {
1888 edge_iterator ei;
1889 edge e;
1890
1891 map[i++] = bb->index;
1892 FOR_EACH_EDGE (e, ei, bb->succs)
1893 map[i++] = e->dest->index;
1894 map[i++] = -1;
1895 }
1896 map[i] = -1;
1897 return map;
1898 }
1899
1900 static int *saved_cfg = NULL;
1901
1902
1903 /* This function compares the saved version of the cfg with the
1904 current cfg and aborts if the two are identical. The function
1905 silently returns if the cfg has been marked as dirty or the two are
1906 the same. */
1907
1908 void
1909 df_check_cfg_clean (void)
1910 {
1911 int *new_map;
1912
1913 if (!df)
1914 return;
1915
1916 if (df_lr->solutions_dirty)
1917 return;
1918
1919 if (saved_cfg == NULL)
1920 return;
1921
1922 new_map = df_compute_cfg_image ();
1923 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1924 free (new_map);
1925 }
1926
1927
1928 /* This function builds a cfg fingerprint and squirrels it away in
1929 saved_cfg. */
1930
1931 static void
1932 df_set_clean_cfg (void)
1933 {
1934 free (saved_cfg);
1935 saved_cfg = df_compute_cfg_image ();
1936 }
1937
1938 #endif /* DF_DEBUG_CFG */
1939 /*----------------------------------------------------------------------------
1940 PUBLIC INTERFACES TO QUERY INFORMATION.
1941 ----------------------------------------------------------------------------*/
1942
1943
1944 /* Return first def of REGNO within BB. */
1945
1946 df_ref
1947 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1948 {
1949 rtx insn;
1950 df_ref def;
1951
1952 FOR_BB_INSNS (bb, insn)
1953 {
1954 if (!INSN_P (insn))
1955 continue;
1956
1957 FOR_EACH_INSN_DEF (def, insn)
1958 if (DF_REF_REGNO (def) == regno)
1959 return def;
1960 }
1961 return NULL;
1962 }
1963
1964
1965 /* Return last def of REGNO within BB. */
1966
1967 df_ref
1968 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1969 {
1970 rtx insn;
1971 df_ref def;
1972
1973 FOR_BB_INSNS_REVERSE (bb, insn)
1974 {
1975 if (!INSN_P (insn))
1976 continue;
1977
1978 FOR_EACH_INSN_DEF (def, insn)
1979 if (DF_REF_REGNO (def) == regno)
1980 return def;
1981 }
1982
1983 return NULL;
1984 }
1985
1986 /* Finds the reference corresponding to the definition of REG in INSN.
1987 DF is the dataflow object. */
1988
1989 df_ref
1990 df_find_def (rtx insn, rtx reg)
1991 {
1992 df_ref def;
1993
1994 if (GET_CODE (reg) == SUBREG)
1995 reg = SUBREG_REG (reg);
1996 gcc_assert (REG_P (reg));
1997
1998 FOR_EACH_INSN_DEF (def, insn)
1999 if (DF_REF_REGNO (def) == REGNO (reg))
2000 return def;
2001
2002 return NULL;
2003 }
2004
2005
2006 /* Return true if REG is defined in INSN, zero otherwise. */
2007
2008 bool
2009 df_reg_defined (rtx insn, rtx reg)
2010 {
2011 return df_find_def (insn, reg) != NULL;
2012 }
2013
2014
2015 /* Finds the reference corresponding to the use of REG in INSN.
2016 DF is the dataflow object. */
2017
2018 df_ref
2019 df_find_use (rtx insn, rtx reg)
2020 {
2021 df_ref use;
2022
2023 if (GET_CODE (reg) == SUBREG)
2024 reg = SUBREG_REG (reg);
2025 gcc_assert (REG_P (reg));
2026
2027 df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2028 FOR_EACH_INSN_INFO_USE (use, insn_info)
2029 if (DF_REF_REGNO (use) == REGNO (reg))
2030 return use;
2031 if (df->changeable_flags & DF_EQ_NOTES)
2032 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
2033 if (DF_REF_REGNO (use) == REGNO (reg))
2034 return use;
2035 return NULL;
2036 }
2037
2038
2039 /* Return true if REG is referenced in INSN, zero otherwise. */
2040
2041 bool
2042 df_reg_used (rtx insn, rtx reg)
2043 {
2044 return df_find_use (insn, reg) != NULL;
2045 }
2046
2047 \f
2048 /*----------------------------------------------------------------------------
2049 Debugging and printing functions.
2050 ----------------------------------------------------------------------------*/
2051
2052 /* Write information about registers and basic blocks into FILE.
2053 This is part of making a debugging dump. */
2054
2055 void
2056 dump_regset (regset r, FILE *outf)
2057 {
2058 unsigned i;
2059 reg_set_iterator rsi;
2060
2061 if (r == NULL)
2062 {
2063 fputs (" (nil)", outf);
2064 return;
2065 }
2066
2067 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2068 {
2069 fprintf (outf, " %d", i);
2070 if (i < FIRST_PSEUDO_REGISTER)
2071 fprintf (outf, " [%s]",
2072 reg_names[i]);
2073 }
2074 }
2075
2076 /* Print a human-readable representation of R on the standard error
2077 stream. This function is designed to be used from within the
2078 debugger. */
2079 extern void debug_regset (regset);
2080 DEBUG_FUNCTION void
2081 debug_regset (regset r)
2082 {
2083 dump_regset (r, stderr);
2084 putc ('\n', stderr);
2085 }
2086
2087 /* Write information about registers and basic blocks into FILE.
2088 This is part of making a debugging dump. */
2089
2090 void
2091 df_print_regset (FILE *file, bitmap r)
2092 {
2093 unsigned int i;
2094 bitmap_iterator bi;
2095
2096 if (r == NULL)
2097 fputs (" (nil)", file);
2098 else
2099 {
2100 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2101 {
2102 fprintf (file, " %d", i);
2103 if (i < FIRST_PSEUDO_REGISTER)
2104 fprintf (file, " [%s]", reg_names[i]);
2105 }
2106 }
2107 fprintf (file, "\n");
2108 }
2109
2110
2111 /* Write information about registers and basic blocks into FILE. The
2112 bitmap is in the form used by df_byte_lr. This is part of making a
2113 debugging dump. */
2114
2115 void
2116 df_print_word_regset (FILE *file, bitmap r)
2117 {
2118 unsigned int max_reg = max_reg_num ();
2119
2120 if (r == NULL)
2121 fputs (" (nil)", file);
2122 else
2123 {
2124 unsigned int i;
2125 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2126 {
2127 bool found = (bitmap_bit_p (r, 2 * i)
2128 || bitmap_bit_p (r, 2 * i + 1));
2129 if (found)
2130 {
2131 int word;
2132 const char * sep = "";
2133 fprintf (file, " %d", i);
2134 fprintf (file, "(");
2135 for (word = 0; word < 2; word++)
2136 if (bitmap_bit_p (r, 2 * i + word))
2137 {
2138 fprintf (file, "%s%d", sep, word);
2139 sep = ", ";
2140 }
2141 fprintf (file, ")");
2142 }
2143 }
2144 }
2145 fprintf (file, "\n");
2146 }
2147
2148
2149 /* Dump dataflow info. */
2150
2151 void
2152 df_dump (FILE *file)
2153 {
2154 basic_block bb;
2155 df_dump_start (file);
2156
2157 FOR_ALL_BB_FN (bb, cfun)
2158 {
2159 df_print_bb_index (bb, file);
2160 df_dump_top (bb, file);
2161 df_dump_bottom (bb, file);
2162 }
2163
2164 fprintf (file, "\n");
2165 }
2166
2167
2168 /* Dump dataflow info for df->blocks_to_analyze. */
2169
2170 void
2171 df_dump_region (FILE *file)
2172 {
2173 if (df->blocks_to_analyze)
2174 {
2175 bitmap_iterator bi;
2176 unsigned int bb_index;
2177
2178 fprintf (file, "\n\nstarting region dump\n");
2179 df_dump_start (file);
2180
2181 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2182 {
2183 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2184 dump_bb (file, bb, 0, TDF_DETAILS);
2185 }
2186 fprintf (file, "\n");
2187 }
2188 else
2189 df_dump (file);
2190 }
2191
2192
2193 /* Dump the introductory information for each problem defined. */
2194
2195 void
2196 df_dump_start (FILE *file)
2197 {
2198 int i;
2199
2200 if (!df || !file)
2201 return;
2202
2203 fprintf (file, "\n\n%s\n", current_function_name ());
2204 fprintf (file, "\nDataflow summary:\n");
2205 if (df->blocks_to_analyze)
2206 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2207 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2208
2209 for (i = 0; i < df->num_problems_defined; i++)
2210 {
2211 struct dataflow *dflow = df->problems_in_order[i];
2212 if (dflow->computed)
2213 {
2214 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2215 if (fun)
2216 fun (file);
2217 }
2218 }
2219 }
2220
2221
2222 /* Dump the top or bottom of the block information for BB. */
2223 static void
2224 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2225 {
2226 int i;
2227
2228 if (!df || !file)
2229 return;
2230
2231 for (i = 0; i < df->num_problems_defined; i++)
2232 {
2233 struct dataflow *dflow = df->problems_in_order[i];
2234 if (dflow->computed)
2235 {
2236 df_dump_bb_problem_function bbfun;
2237
2238 if (top)
2239 bbfun = dflow->problem->dump_top_fun;
2240 else
2241 bbfun = dflow->problem->dump_bottom_fun;
2242
2243 if (bbfun)
2244 bbfun (bb, file);
2245 }
2246 }
2247 }
2248
2249 /* Dump the top of the block information for BB. */
2250
2251 void
2252 df_dump_top (basic_block bb, FILE *file)
2253 {
2254 df_dump_bb_problem_data (bb, file, /*top=*/true);
2255 }
2256
2257 /* Dump the bottom of the block information for BB. */
2258
2259 void
2260 df_dump_bottom (basic_block bb, FILE *file)
2261 {
2262 df_dump_bb_problem_data (bb, file, /*top=*/false);
2263 }
2264
2265
2266 /* Dump information about INSN just before or after dumping INSN itself. */
2267 static void
2268 df_dump_insn_problem_data (const_rtx insn, FILE *file, bool top)
2269 {
2270 int i;
2271
2272 if (!df || !file)
2273 return;
2274
2275 for (i = 0; i < df->num_problems_defined; i++)
2276 {
2277 struct dataflow *dflow = df->problems_in_order[i];
2278 if (dflow->computed)
2279 {
2280 df_dump_insn_problem_function insnfun;
2281
2282 if (top)
2283 insnfun = dflow->problem->dump_insn_top_fun;
2284 else
2285 insnfun = dflow->problem->dump_insn_bottom_fun;
2286
2287 if (insnfun)
2288 insnfun (insn, file);
2289 }
2290 }
2291 }
2292
2293 /* Dump information about INSN before dumping INSN itself. */
2294
2295 void
2296 df_dump_insn_top (const_rtx insn, FILE *file)
2297 {
2298 df_dump_insn_problem_data (insn, file, /*top=*/true);
2299 }
2300
2301 /* Dump information about INSN after dumping INSN itself. */
2302
2303 void
2304 df_dump_insn_bottom (const_rtx insn, FILE *file)
2305 {
2306 df_dump_insn_problem_data (insn, file, /*top=*/false);
2307 }
2308
2309
2310 static void
2311 df_ref_dump (df_ref ref, FILE *file)
2312 {
2313 fprintf (file, "%c%d(%d)",
2314 DF_REF_REG_DEF_P (ref)
2315 ? 'd'
2316 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2317 DF_REF_ID (ref),
2318 DF_REF_REGNO (ref));
2319 }
2320
2321 void
2322 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
2323 {
2324 fprintf (file, "{ ");
2325 for (; ref; ref = DF_REF_NEXT_LOC (ref))
2326 {
2327 df_ref_dump (ref, file);
2328 if (follow_chain)
2329 df_chain_dump (DF_REF_CHAIN (ref), file);
2330 }
2331 fprintf (file, "}");
2332 }
2333
2334
2335 /* Dump either a ref-def or reg-use chain. */
2336
2337 void
2338 df_regs_chain_dump (df_ref ref, FILE *file)
2339 {
2340 fprintf (file, "{ ");
2341 while (ref)
2342 {
2343 df_ref_dump (ref, file);
2344 ref = DF_REF_NEXT_REG (ref);
2345 }
2346 fprintf (file, "}");
2347 }
2348
2349
2350 static void
2351 df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
2352 {
2353 for (; mws; mws = DF_MWS_NEXT (mws))
2354 fprintf (file, "mw %c r[%d..%d]\n",
2355 DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
2356 mws->start_regno, mws->end_regno);
2357 }
2358
2359
2360 static void
2361 df_insn_uid_debug (unsigned int uid,
2362 bool follow_chain, FILE *file)
2363 {
2364 fprintf (file, "insn %d luid %d",
2365 uid, DF_INSN_UID_LUID (uid));
2366
2367 if (DF_INSN_UID_DEFS (uid))
2368 {
2369 fprintf (file, " defs ");
2370 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2371 }
2372
2373 if (DF_INSN_UID_USES (uid))
2374 {
2375 fprintf (file, " uses ");
2376 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2377 }
2378
2379 if (DF_INSN_UID_EQ_USES (uid))
2380 {
2381 fprintf (file, " eq uses ");
2382 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2383 }
2384
2385 if (DF_INSN_UID_MWS (uid))
2386 {
2387 fprintf (file, " mws ");
2388 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2389 }
2390 fprintf (file, "\n");
2391 }
2392
2393
2394 DEBUG_FUNCTION void
2395 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2396 {
2397 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2398 }
2399
2400 DEBUG_FUNCTION void
2401 df_insn_debug_regno (rtx insn, FILE *file)
2402 {
2403 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2404
2405 fprintf (file, "insn %d bb %d luid %d defs ",
2406 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2407 DF_INSN_INFO_LUID (insn_info));
2408 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2409
2410 fprintf (file, " uses ");
2411 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2412
2413 fprintf (file, " eq_uses ");
2414 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2415 fprintf (file, "\n");
2416 }
2417
2418 DEBUG_FUNCTION void
2419 df_regno_debug (unsigned int regno, FILE *file)
2420 {
2421 fprintf (file, "reg %d defs ", regno);
2422 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2423 fprintf (file, " uses ");
2424 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2425 fprintf (file, " eq_uses ");
2426 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2427 fprintf (file, "\n");
2428 }
2429
2430
2431 DEBUG_FUNCTION void
2432 df_ref_debug (df_ref ref, FILE *file)
2433 {
2434 fprintf (file, "%c%d ",
2435 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2436 DF_REF_ID (ref));
2437 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2438 DF_REF_REGNO (ref),
2439 DF_REF_BBNO (ref),
2440 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2441 DF_REF_FLAGS (ref),
2442 DF_REF_TYPE (ref));
2443 if (DF_REF_LOC (ref))
2444 {
2445 if (flag_dump_noaddr)
2446 fprintf (file, "loc #(#) chain ");
2447 else
2448 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2449 (void *)*DF_REF_LOC (ref));
2450 }
2451 else
2452 fprintf (file, "chain ");
2453 df_chain_dump (DF_REF_CHAIN (ref), file);
2454 fprintf (file, "\n");
2455 }
2456 \f
2457 /* Functions for debugging from GDB. */
2458
2459 DEBUG_FUNCTION void
2460 debug_df_insn (rtx insn)
2461 {
2462 df_insn_debug (insn, true, stderr);
2463 debug_rtx (insn);
2464 }
2465
2466
2467 DEBUG_FUNCTION void
2468 debug_df_reg (rtx reg)
2469 {
2470 df_regno_debug (REGNO (reg), stderr);
2471 }
2472
2473
2474 DEBUG_FUNCTION void
2475 debug_df_regno (unsigned int regno)
2476 {
2477 df_regno_debug (regno, stderr);
2478 }
2479
2480
2481 DEBUG_FUNCTION void
2482 debug_df_ref (df_ref ref)
2483 {
2484 df_ref_debug (ref, stderr);
2485 }
2486
2487
2488 DEBUG_FUNCTION void
2489 debug_df_defno (unsigned int defno)
2490 {
2491 df_ref_debug (DF_DEFS_GET (defno), stderr);
2492 }
2493
2494
2495 DEBUG_FUNCTION void
2496 debug_df_useno (unsigned int defno)
2497 {
2498 df_ref_debug (DF_USES_GET (defno), stderr);
2499 }
2500
2501
2502 DEBUG_FUNCTION void
2503 debug_df_chain (struct df_link *link)
2504 {
2505 df_chain_dump (link, stderr);
2506 fputc ('\n', stderr);
2507 }