New syntax for -fsanitize-recover.
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and for_each_rtx instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "hashtab.h"
386 #include "hash-set.h"
387 #include "vec.h"
388 #include "machmode.h"
389 #include "hard-reg-set.h"
390 #include "input.h"
391 #include "function.h"
392 #include "regs.h"
393 #include "alloc-pool.h"
394 #include "flags.h"
395 #include "basic-block.h"
396 #include "sbitmap.h"
397 #include "bitmap.h"
398 #include "df.h"
399 #include "tree-pass.h"
400 #include "params.h"
401 #include "cfgloop.h"
402
403 static void *df_get_bb_info (struct dataflow *, unsigned int);
404 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
405 static void df_clear_bb_info (struct dataflow *, unsigned int);
406 #ifdef DF_DEBUG_CFG
407 static void df_set_clean_cfg (void);
408 #endif
409
410 /* The obstack on which regsets are allocated. */
411 struct bitmap_obstack reg_obstack;
412
413 /* An obstack for bitmap not related to specific dataflow problems.
414 This obstack should e.g. be used for bitmaps with a short life time
415 such as temporary bitmaps. */
416
417 bitmap_obstack df_bitmap_obstack;
418
419
420 /*----------------------------------------------------------------------------
421 Functions to create, destroy and manipulate an instance of df.
422 ----------------------------------------------------------------------------*/
423
424 struct df_d *df;
425
426 /* Add PROBLEM (and any dependent problems) to the DF instance. */
427
428 void
429 df_add_problem (struct df_problem *problem)
430 {
431 struct dataflow *dflow;
432 int i;
433
434 /* First try to add the dependent problem. */
435 if (problem->dependent_problem)
436 df_add_problem (problem->dependent_problem);
437
438 /* Check to see if this problem has already been defined. If it
439 has, just return that instance, if not, add it to the end of the
440 vector. */
441 dflow = df->problems_by_index[problem->id];
442 if (dflow)
443 return;
444
445 /* Make a new one and add it to the end. */
446 dflow = XCNEW (struct dataflow);
447 dflow->problem = problem;
448 dflow->computed = false;
449 dflow->solutions_dirty = true;
450 df->problems_by_index[dflow->problem->id] = dflow;
451
452 /* Keep the defined problems ordered by index. This solves the
453 problem that RI will use the information from UREC if UREC has
454 been defined, or from LIVE if LIVE is defined and otherwise LR.
455 However for this to work, the computation of RI must be pushed
456 after which ever of those problems is defined, but we do not
457 require any of those except for LR to have actually been
458 defined. */
459 df->num_problems_defined++;
460 for (i = df->num_problems_defined - 2; i >= 0; i--)
461 {
462 if (problem->id < df->problems_in_order[i]->problem->id)
463 df->problems_in_order[i+1] = df->problems_in_order[i];
464 else
465 {
466 df->problems_in_order[i+1] = dflow;
467 return;
468 }
469 }
470 df->problems_in_order[0] = dflow;
471 }
472
473
474 /* Set the MASK flags in the DFLOW problem. The old flags are
475 returned. If a flag is not allowed to be changed this will fail if
476 checking is enabled. */
477 int
478 df_set_flags (int changeable_flags)
479 {
480 int old_flags = df->changeable_flags;
481 df->changeable_flags |= changeable_flags;
482 return old_flags;
483 }
484
485
486 /* Clear the MASK flags in the DFLOW problem. The old flags are
487 returned. If a flag is not allowed to be changed this will fail if
488 checking is enabled. */
489 int
490 df_clear_flags (int changeable_flags)
491 {
492 int old_flags = df->changeable_flags;
493 df->changeable_flags &= ~changeable_flags;
494 return old_flags;
495 }
496
497
498 /* Set the blocks that are to be considered for analysis. If this is
499 not called or is called with null, the entire function in
500 analyzed. */
501
502 void
503 df_set_blocks (bitmap blocks)
504 {
505 if (blocks)
506 {
507 if (dump_file)
508 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
509 if (df->blocks_to_analyze)
510 {
511 /* This block is called to change the focus from one subset
512 to another. */
513 int p;
514 bitmap_head diff;
515 bitmap_initialize (&diff, &df_bitmap_obstack);
516 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
517 for (p = 0; p < df->num_problems_defined; p++)
518 {
519 struct dataflow *dflow = df->problems_in_order[p];
520 if (dflow->optional_p && dflow->problem->reset_fun)
521 dflow->problem->reset_fun (df->blocks_to_analyze);
522 else if (dflow->problem->free_blocks_on_set_blocks)
523 {
524 bitmap_iterator bi;
525 unsigned int bb_index;
526
527 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
528 {
529 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
530 if (bb)
531 {
532 void *bb_info = df_get_bb_info (dflow, bb_index);
533 dflow->problem->free_bb_fun (bb, bb_info);
534 df_clear_bb_info (dflow, bb_index);
535 }
536 }
537 }
538 }
539
540 bitmap_clear (&diff);
541 }
542 else
543 {
544 /* This block of code is executed to change the focus from
545 the entire function to a subset. */
546 bitmap_head blocks_to_reset;
547 bool initialized = false;
548 int p;
549 for (p = 0; p < df->num_problems_defined; p++)
550 {
551 struct dataflow *dflow = df->problems_in_order[p];
552 if (dflow->optional_p && dflow->problem->reset_fun)
553 {
554 if (!initialized)
555 {
556 basic_block bb;
557 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
558 FOR_ALL_BB_FN (bb, cfun)
559 {
560 bitmap_set_bit (&blocks_to_reset, bb->index);
561 }
562 }
563 dflow->problem->reset_fun (&blocks_to_reset);
564 }
565 }
566 if (initialized)
567 bitmap_clear (&blocks_to_reset);
568
569 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
570 }
571 bitmap_copy (df->blocks_to_analyze, blocks);
572 df->analyze_subset = true;
573 }
574 else
575 {
576 /* This block is executed to reset the focus to the entire
577 function. */
578 if (dump_file)
579 fprintf (dump_file, "clearing blocks_to_analyze\n");
580 if (df->blocks_to_analyze)
581 {
582 BITMAP_FREE (df->blocks_to_analyze);
583 df->blocks_to_analyze = NULL;
584 }
585 df->analyze_subset = false;
586 }
587
588 /* Setting the blocks causes the refs to be unorganized since only
589 the refs in the blocks are seen. */
590 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
591 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
592 df_mark_solutions_dirty ();
593 }
594
595
596 /* Delete a DFLOW problem (and any problems that depend on this
597 problem). */
598
599 void
600 df_remove_problem (struct dataflow *dflow)
601 {
602 struct df_problem *problem;
603 int i;
604
605 if (!dflow)
606 return;
607
608 problem = dflow->problem;
609 gcc_assert (problem->remove_problem_fun);
610
611 /* Delete any problems that depended on this problem first. */
612 for (i = 0; i < df->num_problems_defined; i++)
613 if (df->problems_in_order[i]->problem->dependent_problem == problem)
614 df_remove_problem (df->problems_in_order[i]);
615
616 /* Now remove this problem. */
617 for (i = 0; i < df->num_problems_defined; i++)
618 if (df->problems_in_order[i] == dflow)
619 {
620 int j;
621 for (j = i + 1; j < df->num_problems_defined; j++)
622 df->problems_in_order[j-1] = df->problems_in_order[j];
623 df->problems_in_order[j-1] = NULL;
624 df->num_problems_defined--;
625 break;
626 }
627
628 (problem->remove_problem_fun) ();
629 df->problems_by_index[problem->id] = NULL;
630 }
631
632
633 /* Remove all of the problems that are not permanent. Scanning, LR
634 and (at -O2 or higher) LIVE are permanent, the rest are removable.
635 Also clear all of the changeable_flags. */
636
637 void
638 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
639 {
640 int i;
641 int removed = 0;
642
643 #ifdef ENABLE_DF_CHECKING
644 int saved_flags;
645 #endif
646
647 if (!df)
648 return;
649
650 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
651 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
652
653 #ifdef ENABLE_DF_CHECKING
654 saved_flags = df->changeable_flags;
655 #endif
656
657 for (i = 0; i < df->num_problems_defined; i++)
658 {
659 struct dataflow *dflow = df->problems_in_order[i];
660 struct df_problem *problem = dflow->problem;
661
662 if (dflow->optional_p)
663 {
664 gcc_assert (problem->remove_problem_fun);
665 (problem->remove_problem_fun) ();
666 df->problems_in_order[i] = NULL;
667 df->problems_by_index[problem->id] = NULL;
668 removed++;
669 }
670 }
671 df->num_problems_defined -= removed;
672
673 /* Clear all of the flags. */
674 df->changeable_flags = 0;
675 df_process_deferred_rescans ();
676
677 /* Set the focus back to the whole function. */
678 if (df->blocks_to_analyze)
679 {
680 BITMAP_FREE (df->blocks_to_analyze);
681 df->blocks_to_analyze = NULL;
682 df_mark_solutions_dirty ();
683 df->analyze_subset = false;
684 }
685
686 #ifdef ENABLE_DF_CHECKING
687 /* Verification will fail in DF_NO_INSN_RESCAN. */
688 if (!(saved_flags & DF_NO_INSN_RESCAN))
689 {
690 df_lr_verify_transfer_functions ();
691 if (df_live)
692 df_live_verify_transfer_functions ();
693 }
694
695 #ifdef DF_DEBUG_CFG
696 df_set_clean_cfg ();
697 #endif
698 #endif
699
700 #ifdef ENABLE_CHECKING
701 if (verify)
702 df->changeable_flags |= DF_VERIFY_SCHEDULED;
703 #endif
704 }
705
706
707 /* Set up the dataflow instance for the entire back end. */
708
709 static unsigned int
710 rest_of_handle_df_initialize (void)
711 {
712 gcc_assert (!df);
713 df = XCNEW (struct df_d);
714 df->changeable_flags = 0;
715
716 bitmap_obstack_initialize (&df_bitmap_obstack);
717
718 /* Set this to a conservative value. Stack_ptr_mod will compute it
719 correctly later. */
720 crtl->sp_is_unchanging = 0;
721
722 df_scan_add_problem ();
723 df_scan_alloc (NULL);
724
725 /* These three problems are permanent. */
726 df_lr_add_problem ();
727 if (optimize > 1)
728 df_live_add_problem ();
729
730 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
731 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
732 df->n_blocks = post_order_compute (df->postorder, true, true);
733 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
734 gcc_assert (df->n_blocks == df->n_blocks_inverted);
735
736 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
737
738 df_hard_reg_init ();
739 /* After reload, some ports add certain bits to regs_ever_live so
740 this cannot be reset. */
741 df_compute_regs_ever_live (true);
742 df_scan_blocks ();
743 df_compute_regs_ever_live (false);
744 return 0;
745 }
746
747
748 namespace {
749
750 const pass_data pass_data_df_initialize_opt =
751 {
752 RTL_PASS, /* type */
753 "dfinit", /* name */
754 OPTGROUP_NONE, /* optinfo_flags */
755 TV_DF_SCAN, /* tv_id */
756 0, /* properties_required */
757 0, /* properties_provided */
758 0, /* properties_destroyed */
759 0, /* todo_flags_start */
760 0, /* todo_flags_finish */
761 };
762
763 class pass_df_initialize_opt : public rtl_opt_pass
764 {
765 public:
766 pass_df_initialize_opt (gcc::context *ctxt)
767 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
768 {}
769
770 /* opt_pass methods: */
771 virtual bool gate (function *) { return optimize > 0; }
772 virtual unsigned int execute (function *)
773 {
774 return rest_of_handle_df_initialize ();
775 }
776
777 }; // class pass_df_initialize_opt
778
779 } // anon namespace
780
781 rtl_opt_pass *
782 make_pass_df_initialize_opt (gcc::context *ctxt)
783 {
784 return new pass_df_initialize_opt (ctxt);
785 }
786
787
788 namespace {
789
790 const pass_data pass_data_df_initialize_no_opt =
791 {
792 RTL_PASS, /* type */
793 "no-opt dfinit", /* name */
794 OPTGROUP_NONE, /* optinfo_flags */
795 TV_DF_SCAN, /* tv_id */
796 0, /* properties_required */
797 0, /* properties_provided */
798 0, /* properties_destroyed */
799 0, /* todo_flags_start */
800 0, /* todo_flags_finish */
801 };
802
803 class pass_df_initialize_no_opt : public rtl_opt_pass
804 {
805 public:
806 pass_df_initialize_no_opt (gcc::context *ctxt)
807 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
808 {}
809
810 /* opt_pass methods: */
811 virtual bool gate (function *) { return optimize == 0; }
812 virtual unsigned int execute (function *)
813 {
814 return rest_of_handle_df_initialize ();
815 }
816
817 }; // class pass_df_initialize_no_opt
818
819 } // anon namespace
820
821 rtl_opt_pass *
822 make_pass_df_initialize_no_opt (gcc::context *ctxt)
823 {
824 return new pass_df_initialize_no_opt (ctxt);
825 }
826
827
828 /* Free all the dataflow info and the DF structure. This should be
829 called from the df_finish macro which also NULLs the parm. */
830
831 static unsigned int
832 rest_of_handle_df_finish (void)
833 {
834 int i;
835
836 gcc_assert (df);
837
838 for (i = 0; i < df->num_problems_defined; i++)
839 {
840 struct dataflow *dflow = df->problems_in_order[i];
841 dflow->problem->free_fun ();
842 }
843
844 free (df->postorder);
845 free (df->postorder_inverted);
846 free (df->hard_regs_live_count);
847 free (df);
848 df = NULL;
849
850 bitmap_obstack_release (&df_bitmap_obstack);
851 return 0;
852 }
853
854
855 namespace {
856
857 const pass_data pass_data_df_finish =
858 {
859 RTL_PASS, /* type */
860 "dfinish", /* name */
861 OPTGROUP_NONE, /* optinfo_flags */
862 TV_NONE, /* tv_id */
863 0, /* properties_required */
864 0, /* properties_provided */
865 0, /* properties_destroyed */
866 0, /* todo_flags_start */
867 0, /* todo_flags_finish */
868 };
869
870 class pass_df_finish : public rtl_opt_pass
871 {
872 public:
873 pass_df_finish (gcc::context *ctxt)
874 : rtl_opt_pass (pass_data_df_finish, ctxt)
875 {}
876
877 /* opt_pass methods: */
878 virtual unsigned int execute (function *)
879 {
880 return rest_of_handle_df_finish ();
881 }
882
883 }; // class pass_df_finish
884
885 } // anon namespace
886
887 rtl_opt_pass *
888 make_pass_df_finish (gcc::context *ctxt)
889 {
890 return new pass_df_finish (ctxt);
891 }
892
893
894
895
896 \f
897 /*----------------------------------------------------------------------------
898 The general data flow analysis engine.
899 ----------------------------------------------------------------------------*/
900
901 /* Return time BB when it was visited for last time. */
902 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
903
904 /* Helper function for df_worklist_dataflow.
905 Propagate the dataflow forward.
906 Given a BB_INDEX, do the dataflow propagation
907 and set bits on for successors in PENDING
908 if the out set of the dataflow has changed.
909
910 AGE specify time when BB was visited last time.
911 AGE of 0 means we are visiting for first time and need to
912 compute transfer function to initialize datastructures.
913 Otherwise we re-do transfer function only if something change
914 while computing confluence functions.
915 We need to compute confluence only of basic block that are younger
916 then last visit of the BB.
917
918 Return true if BB info has changed. This is always the case
919 in the first visit. */
920
921 static bool
922 df_worklist_propagate_forward (struct dataflow *dataflow,
923 unsigned bb_index,
924 unsigned *bbindex_to_postorder,
925 bitmap pending,
926 sbitmap considered,
927 ptrdiff_t age)
928 {
929 edge e;
930 edge_iterator ei;
931 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
932 bool changed = !age;
933
934 /* Calculate <conf_op> of incoming edges. */
935 if (EDGE_COUNT (bb->preds) > 0)
936 FOR_EACH_EDGE (e, ei, bb->preds)
937 {
938 if (age <= BB_LAST_CHANGE_AGE (e->src)
939 && bitmap_bit_p (considered, e->src->index))
940 changed |= dataflow->problem->con_fun_n (e);
941 }
942 else if (dataflow->problem->con_fun_0)
943 dataflow->problem->con_fun_0 (bb);
944
945 if (changed
946 && dataflow->problem->trans_fun (bb_index))
947 {
948 /* The out set of this block has changed.
949 Propagate to the outgoing blocks. */
950 FOR_EACH_EDGE (e, ei, bb->succs)
951 {
952 unsigned ob_index = e->dest->index;
953
954 if (bitmap_bit_p (considered, ob_index))
955 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
956 }
957 return true;
958 }
959 return false;
960 }
961
962
963 /* Helper function for df_worklist_dataflow.
964 Propagate the dataflow backward. */
965
966 static bool
967 df_worklist_propagate_backward (struct dataflow *dataflow,
968 unsigned bb_index,
969 unsigned *bbindex_to_postorder,
970 bitmap pending,
971 sbitmap considered,
972 ptrdiff_t age)
973 {
974 edge e;
975 edge_iterator ei;
976 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
977 bool changed = !age;
978
979 /* Calculate <conf_op> of incoming edges. */
980 if (EDGE_COUNT (bb->succs) > 0)
981 FOR_EACH_EDGE (e, ei, bb->succs)
982 {
983 if (age <= BB_LAST_CHANGE_AGE (e->dest)
984 && bitmap_bit_p (considered, e->dest->index))
985 changed |= dataflow->problem->con_fun_n (e);
986 }
987 else if (dataflow->problem->con_fun_0)
988 dataflow->problem->con_fun_0 (bb);
989
990 if (changed
991 && dataflow->problem->trans_fun (bb_index))
992 {
993 /* The out set of this block has changed.
994 Propagate to the outgoing blocks. */
995 FOR_EACH_EDGE (e, ei, bb->preds)
996 {
997 unsigned ob_index = e->src->index;
998
999 if (bitmap_bit_p (considered, ob_index))
1000 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
1001 }
1002 return true;
1003 }
1004 return false;
1005 }
1006
1007 /* Main dataflow solver loop.
1008
1009 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
1010 need to visit.
1011 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1012 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1013 PENDING will be freed.
1014
1015 The worklists are bitmaps indexed by postorder positions.
1016
1017 The function implements standard algorithm for dataflow solving with two
1018 worklists (we are processing WORKLIST and storing new BBs to visit in
1019 PENDING).
1020
1021 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1022 and when it was last visited (stored in last_visit_age). This avoids need
1023 to re-do confluence function for edges to basic blocks whose source
1024 did not change since destination was visited last time. */
1025
1026 static void
1027 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1028 bitmap pending,
1029 sbitmap considered,
1030 int *blocks_in_postorder,
1031 unsigned *bbindex_to_postorder,
1032 int n_blocks)
1033 {
1034 enum df_flow_dir dir = dataflow->problem->dir;
1035 int dcount = 0;
1036 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1037 int age = 0;
1038 bool changed;
1039 vec<int> last_visit_age = vNULL;
1040 int prev_age;
1041 basic_block bb;
1042 int i;
1043
1044 last_visit_age.safe_grow_cleared (n_blocks);
1045
1046 /* Double-queueing. Worklist is for the current iteration,
1047 and pending is for the next. */
1048 while (!bitmap_empty_p (pending))
1049 {
1050 bitmap_iterator bi;
1051 unsigned int index;
1052
1053 /* Swap pending and worklist. */
1054 bitmap temp = worklist;
1055 worklist = pending;
1056 pending = temp;
1057
1058 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1059 {
1060 unsigned bb_index;
1061 dcount++;
1062
1063 bitmap_clear_bit (pending, index);
1064 bb_index = blocks_in_postorder[index];
1065 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1066 prev_age = last_visit_age[index];
1067 if (dir == DF_FORWARD)
1068 changed = df_worklist_propagate_forward (dataflow, bb_index,
1069 bbindex_to_postorder,
1070 pending, considered,
1071 prev_age);
1072 else
1073 changed = df_worklist_propagate_backward (dataflow, bb_index,
1074 bbindex_to_postorder,
1075 pending, considered,
1076 prev_age);
1077 last_visit_age[index] = ++age;
1078 if (changed)
1079 bb->aux = (void *)(ptrdiff_t)age;
1080 }
1081 bitmap_clear (worklist);
1082 }
1083 for (i = 0; i < n_blocks; i++)
1084 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1085
1086 BITMAP_FREE (worklist);
1087 BITMAP_FREE (pending);
1088 last_visit_age.release ();
1089
1090 /* Dump statistics. */
1091 if (dump_file)
1092 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1093 "n_basic_blocks %d n_edges %d"
1094 " count %d (%5.2g)\n",
1095 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1096 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1097 }
1098
1099 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1100 with "n"-th bit representing the n-th block in the reverse-postorder order.
1101 The solver is a double-queue algorithm similar to the "double stack" solver
1102 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1103 The only significant difference is that the worklist in this implementation
1104 is always sorted in RPO of the CFG visiting direction. */
1105
1106 void
1107 df_worklist_dataflow (struct dataflow *dataflow,
1108 bitmap blocks_to_consider,
1109 int *blocks_in_postorder,
1110 int n_blocks)
1111 {
1112 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1113 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1114 bitmap_iterator bi;
1115 unsigned int *bbindex_to_postorder;
1116 int i;
1117 unsigned int index;
1118 enum df_flow_dir dir = dataflow->problem->dir;
1119
1120 gcc_assert (dir != DF_NONE);
1121
1122 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1123 bbindex_to_postorder = XNEWVEC (unsigned int,
1124 last_basic_block_for_fn (cfun));
1125
1126 /* Initialize the array to an out-of-bound value. */
1127 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1128 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1129
1130 /* Initialize the considered map. */
1131 bitmap_clear (considered);
1132 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1133 {
1134 bitmap_set_bit (considered, index);
1135 }
1136
1137 /* Initialize the mapping of block index to postorder. */
1138 for (i = 0; i < n_blocks; i++)
1139 {
1140 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1141 /* Add all blocks to the worklist. */
1142 bitmap_set_bit (pending, i);
1143 }
1144
1145 /* Initialize the problem. */
1146 if (dataflow->problem->init_fun)
1147 dataflow->problem->init_fun (blocks_to_consider);
1148
1149 /* Solve it. */
1150 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1151 blocks_in_postorder,
1152 bbindex_to_postorder,
1153 n_blocks);
1154 sbitmap_free (considered);
1155 free (bbindex_to_postorder);
1156 }
1157
1158
1159 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1160 the order of the remaining entries. Returns the length of the resulting
1161 list. */
1162
1163 static unsigned
1164 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1165 {
1166 unsigned act, last;
1167
1168 for (act = 0, last = 0; act < len; act++)
1169 if (bitmap_bit_p (blocks, list[act]))
1170 list[last++] = list[act];
1171
1172 return last;
1173 }
1174
1175
1176 /* Execute dataflow analysis on a single dataflow problem.
1177
1178 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1179 examined or will be computed. For calls from DF_ANALYZE, this is
1180 the set of blocks that has been passed to DF_SET_BLOCKS.
1181 */
1182
1183 void
1184 df_analyze_problem (struct dataflow *dflow,
1185 bitmap blocks_to_consider,
1186 int *postorder, int n_blocks)
1187 {
1188 timevar_push (dflow->problem->tv_id);
1189
1190 /* (Re)Allocate the datastructures necessary to solve the problem. */
1191 if (dflow->problem->alloc_fun)
1192 dflow->problem->alloc_fun (blocks_to_consider);
1193
1194 #ifdef ENABLE_DF_CHECKING
1195 if (dflow->problem->verify_start_fun)
1196 dflow->problem->verify_start_fun ();
1197 #endif
1198
1199 /* Set up the problem and compute the local information. */
1200 if (dflow->problem->local_compute_fun)
1201 dflow->problem->local_compute_fun (blocks_to_consider);
1202
1203 /* Solve the equations. */
1204 if (dflow->problem->dataflow_fun)
1205 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1206 postorder, n_blocks);
1207
1208 /* Massage the solution. */
1209 if (dflow->problem->finalize_fun)
1210 dflow->problem->finalize_fun (blocks_to_consider);
1211
1212 #ifdef ENABLE_DF_CHECKING
1213 if (dflow->problem->verify_end_fun)
1214 dflow->problem->verify_end_fun ();
1215 #endif
1216
1217 timevar_pop (dflow->problem->tv_id);
1218
1219 dflow->computed = true;
1220 }
1221
1222
1223 /* Analyze dataflow info. */
1224
1225 static void
1226 df_analyze_1 (void)
1227 {
1228 int i;
1229
1230 /* These should be the same. */
1231 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1232
1233 /* We need to do this before the df_verify_all because this is
1234 not kept incrementally up to date. */
1235 df_compute_regs_ever_live (false);
1236 df_process_deferred_rescans ();
1237
1238 if (dump_file)
1239 fprintf (dump_file, "df_analyze called\n");
1240
1241 #ifndef ENABLE_DF_CHECKING
1242 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1243 #endif
1244 df_verify ();
1245
1246 /* Skip over the DF_SCAN problem. */
1247 for (i = 1; i < df->num_problems_defined; i++)
1248 {
1249 struct dataflow *dflow = df->problems_in_order[i];
1250 if (dflow->solutions_dirty)
1251 {
1252 if (dflow->problem->dir == DF_FORWARD)
1253 df_analyze_problem (dflow,
1254 df->blocks_to_analyze,
1255 df->postorder_inverted,
1256 df->n_blocks_inverted);
1257 else
1258 df_analyze_problem (dflow,
1259 df->blocks_to_analyze,
1260 df->postorder,
1261 df->n_blocks);
1262 }
1263 }
1264
1265 if (!df->analyze_subset)
1266 {
1267 BITMAP_FREE (df->blocks_to_analyze);
1268 df->blocks_to_analyze = NULL;
1269 }
1270
1271 #ifdef DF_DEBUG_CFG
1272 df_set_clean_cfg ();
1273 #endif
1274 }
1275
1276 /* Analyze dataflow info. */
1277
1278 void
1279 df_analyze (void)
1280 {
1281 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1282 int i;
1283
1284 free (df->postorder);
1285 free (df->postorder_inverted);
1286 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1287 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1288 df->n_blocks = post_order_compute (df->postorder, true, true);
1289 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1290
1291 for (i = 0; i < df->n_blocks; i++)
1292 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1293
1294 #ifdef ENABLE_CHECKING
1295 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1296 the ENTRY block. */
1297 for (i = 0; i < df->n_blocks_inverted; i++)
1298 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1299 #endif
1300
1301 /* Make sure that we have pruned any unreachable blocks from these
1302 sets. */
1303 if (df->analyze_subset)
1304 {
1305 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1306 df->n_blocks = df_prune_to_subcfg (df->postorder,
1307 df->n_blocks, df->blocks_to_analyze);
1308 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1309 df->n_blocks_inverted,
1310 df->blocks_to_analyze);
1311 BITMAP_FREE (current_all_blocks);
1312 }
1313 else
1314 {
1315 df->blocks_to_analyze = current_all_blocks;
1316 current_all_blocks = NULL;
1317 }
1318
1319 df_analyze_1 ();
1320 }
1321
1322 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1323 Returns the number of blocks which is always loop->num_nodes. */
1324
1325 static int
1326 loop_post_order_compute (int *post_order, struct loop *loop)
1327 {
1328 edge_iterator *stack;
1329 int sp;
1330 int post_order_num = 0;
1331 bitmap visited;
1332
1333 /* Allocate stack for back-tracking up CFG. */
1334 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1335 sp = 0;
1336
1337 /* Allocate bitmap to track nodes that have been visited. */
1338 visited = BITMAP_ALLOC (NULL);
1339
1340 /* Push the first edge on to the stack. */
1341 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1342
1343 while (sp)
1344 {
1345 edge_iterator ei;
1346 basic_block src;
1347 basic_block dest;
1348
1349 /* Look at the edge on the top of the stack. */
1350 ei = stack[sp - 1];
1351 src = ei_edge (ei)->src;
1352 dest = ei_edge (ei)->dest;
1353
1354 /* Check if the edge destination has been visited yet and mark it
1355 if not so. */
1356 if (flow_bb_inside_loop_p (loop, dest)
1357 && bitmap_set_bit (visited, dest->index))
1358 {
1359 if (EDGE_COUNT (dest->succs) > 0)
1360 /* Since the DEST node has been visited for the first
1361 time, check its successors. */
1362 stack[sp++] = ei_start (dest->succs);
1363 else
1364 post_order[post_order_num++] = dest->index;
1365 }
1366 else
1367 {
1368 if (ei_one_before_end_p (ei)
1369 && src != loop_preheader_edge (loop)->src)
1370 post_order[post_order_num++] = src->index;
1371
1372 if (!ei_one_before_end_p (ei))
1373 ei_next (&stack[sp - 1]);
1374 else
1375 sp--;
1376 }
1377 }
1378
1379 free (stack);
1380 BITMAP_FREE (visited);
1381
1382 return post_order_num;
1383 }
1384
1385 /* Compute the reverse top sort order of the inverted sub-CFG specified
1386 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1387
1388 static int
1389 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1390 {
1391 basic_block bb;
1392 edge_iterator *stack;
1393 int sp;
1394 int post_order_num = 0;
1395 bitmap visited;
1396
1397 /* Allocate stack for back-tracking up CFG. */
1398 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1399 sp = 0;
1400
1401 /* Allocate bitmap to track nodes that have been visited. */
1402 visited = BITMAP_ALLOC (NULL);
1403
1404 /* Put all latches into the initial work list. In theory we'd want
1405 to start from loop exits but then we'd have the special case of
1406 endless loops. It doesn't really matter for DF iteration order and
1407 handling latches last is probably even better. */
1408 stack[sp++] = ei_start (loop->header->preds);
1409 bitmap_set_bit (visited, loop->header->index);
1410
1411 /* The inverted traversal loop. */
1412 while (sp)
1413 {
1414 edge_iterator ei;
1415 basic_block pred;
1416
1417 /* Look at the edge on the top of the stack. */
1418 ei = stack[sp - 1];
1419 bb = ei_edge (ei)->dest;
1420 pred = ei_edge (ei)->src;
1421
1422 /* Check if the predecessor has been visited yet and mark it
1423 if not so. */
1424 if (flow_bb_inside_loop_p (loop, pred)
1425 && bitmap_set_bit (visited, pred->index))
1426 {
1427 if (EDGE_COUNT (pred->preds) > 0)
1428 /* Since the predecessor node has been visited for the first
1429 time, check its predecessors. */
1430 stack[sp++] = ei_start (pred->preds);
1431 else
1432 post_order[post_order_num++] = pred->index;
1433 }
1434 else
1435 {
1436 if (flow_bb_inside_loop_p (loop, bb)
1437 && ei_one_before_end_p (ei))
1438 post_order[post_order_num++] = bb->index;
1439
1440 if (!ei_one_before_end_p (ei))
1441 ei_next (&stack[sp - 1]);
1442 else
1443 sp--;
1444 }
1445 }
1446
1447 free (stack);
1448 BITMAP_FREE (visited);
1449 return post_order_num;
1450 }
1451
1452
1453 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1454
1455 void
1456 df_analyze_loop (struct loop *loop)
1457 {
1458 free (df->postorder);
1459 free (df->postorder_inverted);
1460
1461 df->postorder = XNEWVEC (int, loop->num_nodes);
1462 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1463 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1464 df->n_blocks_inverted
1465 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1466 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1467 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1468
1469 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1470 for (int i = 0; i < df->n_blocks; ++i)
1471 bitmap_set_bit (blocks, df->postorder[i]);
1472 df_set_blocks (blocks);
1473 BITMAP_FREE (blocks);
1474
1475 df_analyze_1 ();
1476 }
1477
1478
1479 /* Return the number of basic blocks from the last call to df_analyze. */
1480
1481 int
1482 df_get_n_blocks (enum df_flow_dir dir)
1483 {
1484 gcc_assert (dir != DF_NONE);
1485
1486 if (dir == DF_FORWARD)
1487 {
1488 gcc_assert (df->postorder_inverted);
1489 return df->n_blocks_inverted;
1490 }
1491
1492 gcc_assert (df->postorder);
1493 return df->n_blocks;
1494 }
1495
1496
1497 /* Return a pointer to the array of basic blocks in the reverse postorder.
1498 Depending on the direction of the dataflow problem,
1499 it returns either the usual reverse postorder array
1500 or the reverse postorder of inverted traversal. */
1501 int *
1502 df_get_postorder (enum df_flow_dir dir)
1503 {
1504 gcc_assert (dir != DF_NONE);
1505
1506 if (dir == DF_FORWARD)
1507 {
1508 gcc_assert (df->postorder_inverted);
1509 return df->postorder_inverted;
1510 }
1511 gcc_assert (df->postorder);
1512 return df->postorder;
1513 }
1514
1515 static struct df_problem user_problem;
1516 static struct dataflow user_dflow;
1517
1518 /* Interface for calling iterative dataflow with user defined
1519 confluence and transfer functions. All that is necessary is to
1520 supply DIR, a direction, CONF_FUN_0, a confluence function for
1521 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1522 confluence function, TRANS_FUN, the basic block transfer function,
1523 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1524 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1525
1526 void
1527 df_simple_dataflow (enum df_flow_dir dir,
1528 df_init_function init_fun,
1529 df_confluence_function_0 con_fun_0,
1530 df_confluence_function_n con_fun_n,
1531 df_transfer_function trans_fun,
1532 bitmap blocks, int * postorder, int n_blocks)
1533 {
1534 memset (&user_problem, 0, sizeof (struct df_problem));
1535 user_problem.dir = dir;
1536 user_problem.init_fun = init_fun;
1537 user_problem.con_fun_0 = con_fun_0;
1538 user_problem.con_fun_n = con_fun_n;
1539 user_problem.trans_fun = trans_fun;
1540 user_dflow.problem = &user_problem;
1541 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1542 }
1543
1544
1545 \f
1546 /*----------------------------------------------------------------------------
1547 Functions to support limited incremental change.
1548 ----------------------------------------------------------------------------*/
1549
1550
1551 /* Get basic block info. */
1552
1553 static void *
1554 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1555 {
1556 if (dflow->block_info == NULL)
1557 return NULL;
1558 if (index >= dflow->block_info_size)
1559 return NULL;
1560 return (void *)((char *)dflow->block_info
1561 + index * dflow->problem->block_info_elt_size);
1562 }
1563
1564
1565 /* Set basic block info. */
1566
1567 static void
1568 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1569 void *bb_info)
1570 {
1571 gcc_assert (dflow->block_info);
1572 memcpy ((char *)dflow->block_info
1573 + index * dflow->problem->block_info_elt_size,
1574 bb_info, dflow->problem->block_info_elt_size);
1575 }
1576
1577
1578 /* Clear basic block info. */
1579
1580 static void
1581 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1582 {
1583 gcc_assert (dflow->block_info);
1584 gcc_assert (dflow->block_info_size > index);
1585 memset ((char *)dflow->block_info
1586 + index * dflow->problem->block_info_elt_size,
1587 0, dflow->problem->block_info_elt_size);
1588 }
1589
1590
1591 /* Mark the solutions as being out of date. */
1592
1593 void
1594 df_mark_solutions_dirty (void)
1595 {
1596 if (df)
1597 {
1598 int p;
1599 for (p = 1; p < df->num_problems_defined; p++)
1600 df->problems_in_order[p]->solutions_dirty = true;
1601 }
1602 }
1603
1604
1605 /* Return true if BB needs it's transfer functions recomputed. */
1606
1607 bool
1608 df_get_bb_dirty (basic_block bb)
1609 {
1610 return bitmap_bit_p ((df_live
1611 ? df_live : df_lr)->out_of_date_transfer_functions,
1612 bb->index);
1613 }
1614
1615
1616 /* Mark BB as needing it's transfer functions as being out of
1617 date. */
1618
1619 void
1620 df_set_bb_dirty (basic_block bb)
1621 {
1622 bb->flags |= BB_MODIFIED;
1623 if (df)
1624 {
1625 int p;
1626 for (p = 1; p < df->num_problems_defined; p++)
1627 {
1628 struct dataflow *dflow = df->problems_in_order[p];
1629 if (dflow->out_of_date_transfer_functions)
1630 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1631 }
1632 df_mark_solutions_dirty ();
1633 }
1634 }
1635
1636
1637 /* Grow the bb_info array. */
1638
1639 void
1640 df_grow_bb_info (struct dataflow *dflow)
1641 {
1642 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1643 if (dflow->block_info_size < new_size)
1644 {
1645 new_size += new_size / 4;
1646 dflow->block_info
1647 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1648 new_size
1649 * dflow->problem->block_info_elt_size);
1650 memset ((char *)dflow->block_info
1651 + dflow->block_info_size
1652 * dflow->problem->block_info_elt_size,
1653 0,
1654 (new_size - dflow->block_info_size)
1655 * dflow->problem->block_info_elt_size);
1656 dflow->block_info_size = new_size;
1657 }
1658 }
1659
1660
1661 /* Clear the dirty bits. This is called from places that delete
1662 blocks. */
1663 static void
1664 df_clear_bb_dirty (basic_block bb)
1665 {
1666 int p;
1667 for (p = 1; p < df->num_problems_defined; p++)
1668 {
1669 struct dataflow *dflow = df->problems_in_order[p];
1670 if (dflow->out_of_date_transfer_functions)
1671 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1672 }
1673 }
1674
1675 /* Called from the rtl_compact_blocks to reorganize the problems basic
1676 block info. */
1677
1678 void
1679 df_compact_blocks (void)
1680 {
1681 int i, p;
1682 basic_block bb;
1683 void *problem_temps;
1684 bitmap_head tmp;
1685
1686 bitmap_initialize (&tmp, &df_bitmap_obstack);
1687 for (p = 0; p < df->num_problems_defined; p++)
1688 {
1689 struct dataflow *dflow = df->problems_in_order[p];
1690
1691 /* Need to reorganize the out_of_date_transfer_functions for the
1692 dflow problem. */
1693 if (dflow->out_of_date_transfer_functions)
1694 {
1695 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1696 bitmap_clear (dflow->out_of_date_transfer_functions);
1697 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1698 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1699 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1700 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1701
1702 i = NUM_FIXED_BLOCKS;
1703 FOR_EACH_BB_FN (bb, cfun)
1704 {
1705 if (bitmap_bit_p (&tmp, bb->index))
1706 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1707 i++;
1708 }
1709 }
1710
1711 /* Now shuffle the block info for the problem. */
1712 if (dflow->problem->free_bb_fun)
1713 {
1714 int size = (last_basic_block_for_fn (cfun)
1715 * dflow->problem->block_info_elt_size);
1716 problem_temps = XNEWVAR (char, size);
1717 df_grow_bb_info (dflow);
1718 memcpy (problem_temps, dflow->block_info, size);
1719
1720 /* Copy the bb info from the problem tmps to the proper
1721 place in the block_info vector. Null out the copied
1722 item. The entry and exit blocks never move. */
1723 i = NUM_FIXED_BLOCKS;
1724 FOR_EACH_BB_FN (bb, cfun)
1725 {
1726 df_set_bb_info (dflow, i,
1727 (char *)problem_temps
1728 + bb->index * dflow->problem->block_info_elt_size);
1729 i++;
1730 }
1731 memset ((char *)dflow->block_info
1732 + i * dflow->problem->block_info_elt_size, 0,
1733 (last_basic_block_for_fn (cfun) - i)
1734 * dflow->problem->block_info_elt_size);
1735 free (problem_temps);
1736 }
1737 }
1738
1739 /* Shuffle the bits in the basic_block indexed arrays. */
1740
1741 if (df->blocks_to_analyze)
1742 {
1743 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1744 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1745 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1746 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1747 bitmap_copy (&tmp, df->blocks_to_analyze);
1748 bitmap_clear (df->blocks_to_analyze);
1749 i = NUM_FIXED_BLOCKS;
1750 FOR_EACH_BB_FN (bb, cfun)
1751 {
1752 if (bitmap_bit_p (&tmp, bb->index))
1753 bitmap_set_bit (df->blocks_to_analyze, i);
1754 i++;
1755 }
1756 }
1757
1758 bitmap_clear (&tmp);
1759
1760 i = NUM_FIXED_BLOCKS;
1761 FOR_EACH_BB_FN (bb, cfun)
1762 {
1763 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1764 bb->index = i;
1765 i++;
1766 }
1767
1768 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1769
1770 for (; i < last_basic_block_for_fn (cfun); i++)
1771 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1772
1773 #ifdef DF_DEBUG_CFG
1774 if (!df_lr->solutions_dirty)
1775 df_set_clean_cfg ();
1776 #endif
1777 }
1778
1779
1780 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1781 block. There is no excuse for people to do this kind of thing. */
1782
1783 void
1784 df_bb_replace (int old_index, basic_block new_block)
1785 {
1786 int new_block_index = new_block->index;
1787 int p;
1788
1789 if (dump_file)
1790 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1791
1792 gcc_assert (df);
1793 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1794
1795 for (p = 0; p < df->num_problems_defined; p++)
1796 {
1797 struct dataflow *dflow = df->problems_in_order[p];
1798 if (dflow->block_info)
1799 {
1800 df_grow_bb_info (dflow);
1801 df_set_bb_info (dflow, old_index,
1802 df_get_bb_info (dflow, new_block_index));
1803 }
1804 }
1805
1806 df_clear_bb_dirty (new_block);
1807 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1808 new_block->index = old_index;
1809 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1810 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1811 }
1812
1813
1814 /* Free all of the per basic block dataflow from all of the problems.
1815 This is typically called before a basic block is deleted and the
1816 problem will be reanalyzed. */
1817
1818 void
1819 df_bb_delete (int bb_index)
1820 {
1821 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1822 int i;
1823
1824 if (!df)
1825 return;
1826
1827 for (i = 0; i < df->num_problems_defined; i++)
1828 {
1829 struct dataflow *dflow = df->problems_in_order[i];
1830 if (dflow->problem->free_bb_fun)
1831 {
1832 void *bb_info = df_get_bb_info (dflow, bb_index);
1833 if (bb_info)
1834 {
1835 dflow->problem->free_bb_fun (bb, bb_info);
1836 df_clear_bb_info (dflow, bb_index);
1837 }
1838 }
1839 }
1840 df_clear_bb_dirty (bb);
1841 df_mark_solutions_dirty ();
1842 }
1843
1844
1845 /* Verify that there is a place for everything and everything is in
1846 its place. This is too expensive to run after every pass in the
1847 mainline. However this is an excellent debugging tool if the
1848 dataflow information is not being updated properly. You can just
1849 sprinkle calls in until you find the place that is changing an
1850 underlying structure without calling the proper updating
1851 routine. */
1852
1853 void
1854 df_verify (void)
1855 {
1856 df_scan_verify ();
1857 #ifdef ENABLE_DF_CHECKING
1858 df_lr_verify_transfer_functions ();
1859 if (df_live)
1860 df_live_verify_transfer_functions ();
1861 #endif
1862 }
1863
1864 #ifdef DF_DEBUG_CFG
1865
1866 /* Compute an array of ints that describes the cfg. This can be used
1867 to discover places where the cfg is modified by the appropriate
1868 calls have not been made to the keep df informed. The internals of
1869 this are unexciting, the key is that two instances of this can be
1870 compared to see if any changes have been made to the cfg. */
1871
1872 static int *
1873 df_compute_cfg_image (void)
1874 {
1875 basic_block bb;
1876 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1877 int i;
1878 int * map;
1879
1880 FOR_ALL_BB_FN (bb, cfun)
1881 {
1882 size += EDGE_COUNT (bb->succs);
1883 }
1884
1885 map = XNEWVEC (int, size);
1886 map[0] = size;
1887 i = 1;
1888 FOR_ALL_BB_FN (bb, cfun)
1889 {
1890 edge_iterator ei;
1891 edge e;
1892
1893 map[i++] = bb->index;
1894 FOR_EACH_EDGE (e, ei, bb->succs)
1895 map[i++] = e->dest->index;
1896 map[i++] = -1;
1897 }
1898 map[i] = -1;
1899 return map;
1900 }
1901
1902 static int *saved_cfg = NULL;
1903
1904
1905 /* This function compares the saved version of the cfg with the
1906 current cfg and aborts if the two are identical. The function
1907 silently returns if the cfg has been marked as dirty or the two are
1908 the same. */
1909
1910 void
1911 df_check_cfg_clean (void)
1912 {
1913 int *new_map;
1914
1915 if (!df)
1916 return;
1917
1918 if (df_lr->solutions_dirty)
1919 return;
1920
1921 if (saved_cfg == NULL)
1922 return;
1923
1924 new_map = df_compute_cfg_image ();
1925 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1926 free (new_map);
1927 }
1928
1929
1930 /* This function builds a cfg fingerprint and squirrels it away in
1931 saved_cfg. */
1932
1933 static void
1934 df_set_clean_cfg (void)
1935 {
1936 free (saved_cfg);
1937 saved_cfg = df_compute_cfg_image ();
1938 }
1939
1940 #endif /* DF_DEBUG_CFG */
1941 /*----------------------------------------------------------------------------
1942 PUBLIC INTERFACES TO QUERY INFORMATION.
1943 ----------------------------------------------------------------------------*/
1944
1945
1946 /* Return first def of REGNO within BB. */
1947
1948 df_ref
1949 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1950 {
1951 rtx_insn *insn;
1952 df_ref def;
1953
1954 FOR_BB_INSNS (bb, insn)
1955 {
1956 if (!INSN_P (insn))
1957 continue;
1958
1959 FOR_EACH_INSN_DEF (def, insn)
1960 if (DF_REF_REGNO (def) == regno)
1961 return def;
1962 }
1963 return NULL;
1964 }
1965
1966
1967 /* Return last def of REGNO within BB. */
1968
1969 df_ref
1970 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1971 {
1972 rtx_insn *insn;
1973 df_ref def;
1974
1975 FOR_BB_INSNS_REVERSE (bb, insn)
1976 {
1977 if (!INSN_P (insn))
1978 continue;
1979
1980 FOR_EACH_INSN_DEF (def, insn)
1981 if (DF_REF_REGNO (def) == regno)
1982 return def;
1983 }
1984
1985 return NULL;
1986 }
1987
1988 /* Finds the reference corresponding to the definition of REG in INSN.
1989 DF is the dataflow object. */
1990
1991 df_ref
1992 df_find_def (rtx_insn *insn, rtx reg)
1993 {
1994 df_ref def;
1995
1996 if (GET_CODE (reg) == SUBREG)
1997 reg = SUBREG_REG (reg);
1998 gcc_assert (REG_P (reg));
1999
2000 FOR_EACH_INSN_DEF (def, insn)
2001 if (DF_REF_REGNO (def) == REGNO (reg))
2002 return def;
2003
2004 return NULL;
2005 }
2006
2007
2008 /* Return true if REG is defined in INSN, zero otherwise. */
2009
2010 bool
2011 df_reg_defined (rtx_insn *insn, rtx reg)
2012 {
2013 return df_find_def (insn, reg) != NULL;
2014 }
2015
2016
2017 /* Finds the reference corresponding to the use of REG in INSN.
2018 DF is the dataflow object. */
2019
2020 df_ref
2021 df_find_use (rtx_insn *insn, rtx reg)
2022 {
2023 df_ref use;
2024
2025 if (GET_CODE (reg) == SUBREG)
2026 reg = SUBREG_REG (reg);
2027 gcc_assert (REG_P (reg));
2028
2029 df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2030 FOR_EACH_INSN_INFO_USE (use, insn_info)
2031 if (DF_REF_REGNO (use) == REGNO (reg))
2032 return use;
2033 if (df->changeable_flags & DF_EQ_NOTES)
2034 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
2035 if (DF_REF_REGNO (use) == REGNO (reg))
2036 return use;
2037 return NULL;
2038 }
2039
2040
2041 /* Return true if REG is referenced in INSN, zero otherwise. */
2042
2043 bool
2044 df_reg_used (rtx_insn *insn, rtx reg)
2045 {
2046 return df_find_use (insn, reg) != NULL;
2047 }
2048
2049 \f
2050 /*----------------------------------------------------------------------------
2051 Debugging and printing functions.
2052 ----------------------------------------------------------------------------*/
2053
2054 /* Write information about registers and basic blocks into FILE.
2055 This is part of making a debugging dump. */
2056
2057 void
2058 dump_regset (regset r, FILE *outf)
2059 {
2060 unsigned i;
2061 reg_set_iterator rsi;
2062
2063 if (r == NULL)
2064 {
2065 fputs (" (nil)", outf);
2066 return;
2067 }
2068
2069 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2070 {
2071 fprintf (outf, " %d", i);
2072 if (i < FIRST_PSEUDO_REGISTER)
2073 fprintf (outf, " [%s]",
2074 reg_names[i]);
2075 }
2076 }
2077
2078 /* Print a human-readable representation of R on the standard error
2079 stream. This function is designed to be used from within the
2080 debugger. */
2081 extern void debug_regset (regset);
2082 DEBUG_FUNCTION void
2083 debug_regset (regset r)
2084 {
2085 dump_regset (r, stderr);
2086 putc ('\n', stderr);
2087 }
2088
2089 /* Write information about registers and basic blocks into FILE.
2090 This is part of making a debugging dump. */
2091
2092 void
2093 df_print_regset (FILE *file, bitmap r)
2094 {
2095 unsigned int i;
2096 bitmap_iterator bi;
2097
2098 if (r == NULL)
2099 fputs (" (nil)", file);
2100 else
2101 {
2102 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2103 {
2104 fprintf (file, " %d", i);
2105 if (i < FIRST_PSEUDO_REGISTER)
2106 fprintf (file, " [%s]", reg_names[i]);
2107 }
2108 }
2109 fprintf (file, "\n");
2110 }
2111
2112
2113 /* Write information about registers and basic blocks into FILE. The
2114 bitmap is in the form used by df_byte_lr. This is part of making a
2115 debugging dump. */
2116
2117 void
2118 df_print_word_regset (FILE *file, bitmap r)
2119 {
2120 unsigned int max_reg = max_reg_num ();
2121
2122 if (r == NULL)
2123 fputs (" (nil)", file);
2124 else
2125 {
2126 unsigned int i;
2127 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2128 {
2129 bool found = (bitmap_bit_p (r, 2 * i)
2130 || bitmap_bit_p (r, 2 * i + 1));
2131 if (found)
2132 {
2133 int word;
2134 const char * sep = "";
2135 fprintf (file, " %d", i);
2136 fprintf (file, "(");
2137 for (word = 0; word < 2; word++)
2138 if (bitmap_bit_p (r, 2 * i + word))
2139 {
2140 fprintf (file, "%s%d", sep, word);
2141 sep = ", ";
2142 }
2143 fprintf (file, ")");
2144 }
2145 }
2146 }
2147 fprintf (file, "\n");
2148 }
2149
2150
2151 /* Dump dataflow info. */
2152
2153 void
2154 df_dump (FILE *file)
2155 {
2156 basic_block bb;
2157 df_dump_start (file);
2158
2159 FOR_ALL_BB_FN (bb, cfun)
2160 {
2161 df_print_bb_index (bb, file);
2162 df_dump_top (bb, file);
2163 df_dump_bottom (bb, file);
2164 }
2165
2166 fprintf (file, "\n");
2167 }
2168
2169
2170 /* Dump dataflow info for df->blocks_to_analyze. */
2171
2172 void
2173 df_dump_region (FILE *file)
2174 {
2175 if (df->blocks_to_analyze)
2176 {
2177 bitmap_iterator bi;
2178 unsigned int bb_index;
2179
2180 fprintf (file, "\n\nstarting region dump\n");
2181 df_dump_start (file);
2182
2183 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2184 {
2185 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2186 dump_bb (file, bb, 0, TDF_DETAILS);
2187 }
2188 fprintf (file, "\n");
2189 }
2190 else
2191 df_dump (file);
2192 }
2193
2194
2195 /* Dump the introductory information for each problem defined. */
2196
2197 void
2198 df_dump_start (FILE *file)
2199 {
2200 int i;
2201
2202 if (!df || !file)
2203 return;
2204
2205 fprintf (file, "\n\n%s\n", current_function_name ());
2206 fprintf (file, "\nDataflow summary:\n");
2207 if (df->blocks_to_analyze)
2208 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2209 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2210
2211 for (i = 0; i < df->num_problems_defined; i++)
2212 {
2213 struct dataflow *dflow = df->problems_in_order[i];
2214 if (dflow->computed)
2215 {
2216 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2217 if (fun)
2218 fun (file);
2219 }
2220 }
2221 }
2222
2223
2224 /* Dump the top or bottom of the block information for BB. */
2225 static void
2226 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2227 {
2228 int i;
2229
2230 if (!df || !file)
2231 return;
2232
2233 for (i = 0; i < df->num_problems_defined; i++)
2234 {
2235 struct dataflow *dflow = df->problems_in_order[i];
2236 if (dflow->computed)
2237 {
2238 df_dump_bb_problem_function bbfun;
2239
2240 if (top)
2241 bbfun = dflow->problem->dump_top_fun;
2242 else
2243 bbfun = dflow->problem->dump_bottom_fun;
2244
2245 if (bbfun)
2246 bbfun (bb, file);
2247 }
2248 }
2249 }
2250
2251 /* Dump the top of the block information for BB. */
2252
2253 void
2254 df_dump_top (basic_block bb, FILE *file)
2255 {
2256 df_dump_bb_problem_data (bb, file, /*top=*/true);
2257 }
2258
2259 /* Dump the bottom of the block information for BB. */
2260
2261 void
2262 df_dump_bottom (basic_block bb, FILE *file)
2263 {
2264 df_dump_bb_problem_data (bb, file, /*top=*/false);
2265 }
2266
2267
2268 /* Dump information about INSN just before or after dumping INSN itself. */
2269 static void
2270 df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top)
2271 {
2272 int i;
2273
2274 if (!df || !file)
2275 return;
2276
2277 for (i = 0; i < df->num_problems_defined; i++)
2278 {
2279 struct dataflow *dflow = df->problems_in_order[i];
2280 if (dflow->computed)
2281 {
2282 df_dump_insn_problem_function insnfun;
2283
2284 if (top)
2285 insnfun = dflow->problem->dump_insn_top_fun;
2286 else
2287 insnfun = dflow->problem->dump_insn_bottom_fun;
2288
2289 if (insnfun)
2290 insnfun (insn, file);
2291 }
2292 }
2293 }
2294
2295 /* Dump information about INSN before dumping INSN itself. */
2296
2297 void
2298 df_dump_insn_top (const rtx_insn *insn, FILE *file)
2299 {
2300 df_dump_insn_problem_data (insn, file, /*top=*/true);
2301 }
2302
2303 /* Dump information about INSN after dumping INSN itself. */
2304
2305 void
2306 df_dump_insn_bottom (const rtx_insn *insn, FILE *file)
2307 {
2308 df_dump_insn_problem_data (insn, file, /*top=*/false);
2309 }
2310
2311
2312 static void
2313 df_ref_dump (df_ref ref, FILE *file)
2314 {
2315 fprintf (file, "%c%d(%d)",
2316 DF_REF_REG_DEF_P (ref)
2317 ? 'd'
2318 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2319 DF_REF_ID (ref),
2320 DF_REF_REGNO (ref));
2321 }
2322
2323 void
2324 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
2325 {
2326 fprintf (file, "{ ");
2327 for (; ref; ref = DF_REF_NEXT_LOC (ref))
2328 {
2329 df_ref_dump (ref, file);
2330 if (follow_chain)
2331 df_chain_dump (DF_REF_CHAIN (ref), file);
2332 }
2333 fprintf (file, "}");
2334 }
2335
2336
2337 /* Dump either a ref-def or reg-use chain. */
2338
2339 void
2340 df_regs_chain_dump (df_ref ref, FILE *file)
2341 {
2342 fprintf (file, "{ ");
2343 while (ref)
2344 {
2345 df_ref_dump (ref, file);
2346 ref = DF_REF_NEXT_REG (ref);
2347 }
2348 fprintf (file, "}");
2349 }
2350
2351
2352 static void
2353 df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
2354 {
2355 for (; mws; mws = DF_MWS_NEXT (mws))
2356 fprintf (file, "mw %c r[%d..%d]\n",
2357 DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
2358 mws->start_regno, mws->end_regno);
2359 }
2360
2361
2362 static void
2363 df_insn_uid_debug (unsigned int uid,
2364 bool follow_chain, FILE *file)
2365 {
2366 fprintf (file, "insn %d luid %d",
2367 uid, DF_INSN_UID_LUID (uid));
2368
2369 if (DF_INSN_UID_DEFS (uid))
2370 {
2371 fprintf (file, " defs ");
2372 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2373 }
2374
2375 if (DF_INSN_UID_USES (uid))
2376 {
2377 fprintf (file, " uses ");
2378 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2379 }
2380
2381 if (DF_INSN_UID_EQ_USES (uid))
2382 {
2383 fprintf (file, " eq uses ");
2384 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2385 }
2386
2387 if (DF_INSN_UID_MWS (uid))
2388 {
2389 fprintf (file, " mws ");
2390 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2391 }
2392 fprintf (file, "\n");
2393 }
2394
2395
2396 DEBUG_FUNCTION void
2397 df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file)
2398 {
2399 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2400 }
2401
2402 DEBUG_FUNCTION void
2403 df_insn_debug_regno (rtx_insn *insn, FILE *file)
2404 {
2405 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2406
2407 fprintf (file, "insn %d bb %d luid %d defs ",
2408 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2409 DF_INSN_INFO_LUID (insn_info));
2410 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2411
2412 fprintf (file, " uses ");
2413 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2414
2415 fprintf (file, " eq_uses ");
2416 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2417 fprintf (file, "\n");
2418 }
2419
2420 DEBUG_FUNCTION void
2421 df_regno_debug (unsigned int regno, FILE *file)
2422 {
2423 fprintf (file, "reg %d defs ", regno);
2424 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2425 fprintf (file, " uses ");
2426 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2427 fprintf (file, " eq_uses ");
2428 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2429 fprintf (file, "\n");
2430 }
2431
2432
2433 DEBUG_FUNCTION void
2434 df_ref_debug (df_ref ref, FILE *file)
2435 {
2436 fprintf (file, "%c%d ",
2437 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2438 DF_REF_ID (ref));
2439 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2440 DF_REF_REGNO (ref),
2441 DF_REF_BBNO (ref),
2442 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2443 DF_REF_FLAGS (ref),
2444 DF_REF_TYPE (ref));
2445 if (DF_REF_LOC (ref))
2446 {
2447 if (flag_dump_noaddr)
2448 fprintf (file, "loc #(#) chain ");
2449 else
2450 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2451 (void *)*DF_REF_LOC (ref));
2452 }
2453 else
2454 fprintf (file, "chain ");
2455 df_chain_dump (DF_REF_CHAIN (ref), file);
2456 fprintf (file, "\n");
2457 }
2458 \f
2459 /* Functions for debugging from GDB. */
2460
2461 DEBUG_FUNCTION void
2462 debug_df_insn (rtx_insn *insn)
2463 {
2464 df_insn_debug (insn, true, stderr);
2465 debug_rtx (insn);
2466 }
2467
2468
2469 DEBUG_FUNCTION void
2470 debug_df_reg (rtx reg)
2471 {
2472 df_regno_debug (REGNO (reg), stderr);
2473 }
2474
2475
2476 DEBUG_FUNCTION void
2477 debug_df_regno (unsigned int regno)
2478 {
2479 df_regno_debug (regno, stderr);
2480 }
2481
2482
2483 DEBUG_FUNCTION void
2484 debug_df_ref (df_ref ref)
2485 {
2486 df_ref_debug (ref, stderr);
2487 }
2488
2489
2490 DEBUG_FUNCTION void
2491 debug_df_defno (unsigned int defno)
2492 {
2493 df_ref_debug (DF_DEFS_GET (defno), stderr);
2494 }
2495
2496
2497 DEBUG_FUNCTION void
2498 debug_df_useno (unsigned int defno)
2499 {
2500 df_ref_debug (DF_USES_GET (defno), stderr);
2501 }
2502
2503
2504 DEBUG_FUNCTION void
2505 debug_df_chain (struct df_link *link)
2506 {
2507 df_chain_dump (link, stderr);
2508 fputc ('\n', stderr);
2509 }