re PR ipa/60026 (ICE at -O3 on valid code (with the optimize pragma) on x86_64-linux...
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and for_each_rtx instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "function.h"
386 #include "regs.h"
387 #include "alloc-pool.h"
388 #include "flags.h"
389 #include "hard-reg-set.h"
390 #include "basic-block.h"
391 #include "sbitmap.h"
392 #include "bitmap.h"
393 #include "df.h"
394 #include "tree-pass.h"
395 #include "params.h"
396 #include "cfgloop.h"
397
398 static void *df_get_bb_info (struct dataflow *, unsigned int);
399 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
400 static void df_clear_bb_info (struct dataflow *, unsigned int);
401 #ifdef DF_DEBUG_CFG
402 static void df_set_clean_cfg (void);
403 #endif
404
405 /* The obstack on which regsets are allocated. */
406 struct bitmap_obstack reg_obstack;
407
408 /* An obstack for bitmap not related to specific dataflow problems.
409 This obstack should e.g. be used for bitmaps with a short life time
410 such as temporary bitmaps. */
411
412 bitmap_obstack df_bitmap_obstack;
413
414
415 /*----------------------------------------------------------------------------
416 Functions to create, destroy and manipulate an instance of df.
417 ----------------------------------------------------------------------------*/
418
419 struct df_d *df;
420
421 /* Add PROBLEM (and any dependent problems) to the DF instance. */
422
423 void
424 df_add_problem (struct df_problem *problem)
425 {
426 struct dataflow *dflow;
427 int i;
428
429 /* First try to add the dependent problem. */
430 if (problem->dependent_problem)
431 df_add_problem (problem->dependent_problem);
432
433 /* Check to see if this problem has already been defined. If it
434 has, just return that instance, if not, add it to the end of the
435 vector. */
436 dflow = df->problems_by_index[problem->id];
437 if (dflow)
438 return;
439
440 /* Make a new one and add it to the end. */
441 dflow = XCNEW (struct dataflow);
442 dflow->problem = problem;
443 dflow->computed = false;
444 dflow->solutions_dirty = true;
445 df->problems_by_index[dflow->problem->id] = dflow;
446
447 /* Keep the defined problems ordered by index. This solves the
448 problem that RI will use the information from UREC if UREC has
449 been defined, or from LIVE if LIVE is defined and otherwise LR.
450 However for this to work, the computation of RI must be pushed
451 after which ever of those problems is defined, but we do not
452 require any of those except for LR to have actually been
453 defined. */
454 df->num_problems_defined++;
455 for (i = df->num_problems_defined - 2; i >= 0; i--)
456 {
457 if (problem->id < df->problems_in_order[i]->problem->id)
458 df->problems_in_order[i+1] = df->problems_in_order[i];
459 else
460 {
461 df->problems_in_order[i+1] = dflow;
462 return;
463 }
464 }
465 df->problems_in_order[0] = dflow;
466 }
467
468
469 /* Set the MASK flags in the DFLOW problem. The old flags are
470 returned. If a flag is not allowed to be changed this will fail if
471 checking is enabled. */
472 int
473 df_set_flags (int changeable_flags)
474 {
475 int old_flags = df->changeable_flags;
476 df->changeable_flags |= changeable_flags;
477 return old_flags;
478 }
479
480
481 /* Clear the MASK flags in the DFLOW problem. The old flags are
482 returned. If a flag is not allowed to be changed this will fail if
483 checking is enabled. */
484 int
485 df_clear_flags (int changeable_flags)
486 {
487 int old_flags = df->changeable_flags;
488 df->changeable_flags &= ~changeable_flags;
489 return old_flags;
490 }
491
492
493 /* Set the blocks that are to be considered for analysis. If this is
494 not called or is called with null, the entire function in
495 analyzed. */
496
497 void
498 df_set_blocks (bitmap blocks)
499 {
500 if (blocks)
501 {
502 if (dump_file)
503 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
504 if (df->blocks_to_analyze)
505 {
506 /* This block is called to change the focus from one subset
507 to another. */
508 int p;
509 bitmap_head diff;
510 bitmap_initialize (&diff, &df_bitmap_obstack);
511 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
513 {
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
518 {
519 bitmap_iterator bi;
520 unsigned int bb_index;
521
522 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
523 {
524 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
525 if (bb)
526 {
527 void *bb_info = df_get_bb_info (dflow, bb_index);
528 dflow->problem->free_bb_fun (bb, bb_info);
529 df_clear_bb_info (dflow, bb_index);
530 }
531 }
532 }
533 }
534
535 bitmap_clear (&diff);
536 }
537 else
538 {
539 /* This block of code is executed to change the focus from
540 the entire function to a subset. */
541 bitmap_head blocks_to_reset;
542 bool initialized = false;
543 int p;
544 for (p = 0; p < df->num_problems_defined; p++)
545 {
546 struct dataflow *dflow = df->problems_in_order[p];
547 if (dflow->optional_p && dflow->problem->reset_fun)
548 {
549 if (!initialized)
550 {
551 basic_block bb;
552 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
553 FOR_ALL_BB_FN (bb, cfun)
554 {
555 bitmap_set_bit (&blocks_to_reset, bb->index);
556 }
557 }
558 dflow->problem->reset_fun (&blocks_to_reset);
559 }
560 }
561 if (initialized)
562 bitmap_clear (&blocks_to_reset);
563
564 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
565 }
566 bitmap_copy (df->blocks_to_analyze, blocks);
567 df->analyze_subset = true;
568 }
569 else
570 {
571 /* This block is executed to reset the focus to the entire
572 function. */
573 if (dump_file)
574 fprintf (dump_file, "clearing blocks_to_analyze\n");
575 if (df->blocks_to_analyze)
576 {
577 BITMAP_FREE (df->blocks_to_analyze);
578 df->blocks_to_analyze = NULL;
579 }
580 df->analyze_subset = false;
581 }
582
583 /* Setting the blocks causes the refs to be unorganized since only
584 the refs in the blocks are seen. */
585 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
586 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
587 df_mark_solutions_dirty ();
588 }
589
590
591 /* Delete a DFLOW problem (and any problems that depend on this
592 problem). */
593
594 void
595 df_remove_problem (struct dataflow *dflow)
596 {
597 struct df_problem *problem;
598 int i;
599
600 if (!dflow)
601 return;
602
603 problem = dflow->problem;
604 gcc_assert (problem->remove_problem_fun);
605
606 /* Delete any problems that depended on this problem first. */
607 for (i = 0; i < df->num_problems_defined; i++)
608 if (df->problems_in_order[i]->problem->dependent_problem == problem)
609 df_remove_problem (df->problems_in_order[i]);
610
611 /* Now remove this problem. */
612 for (i = 0; i < df->num_problems_defined; i++)
613 if (df->problems_in_order[i] == dflow)
614 {
615 int j;
616 for (j = i + 1; j < df->num_problems_defined; j++)
617 df->problems_in_order[j-1] = df->problems_in_order[j];
618 df->problems_in_order[j-1] = NULL;
619 df->num_problems_defined--;
620 break;
621 }
622
623 (problem->remove_problem_fun) ();
624 df->problems_by_index[problem->id] = NULL;
625 }
626
627
628 /* Remove all of the problems that are not permanent. Scanning, LR
629 and (at -O2 or higher) LIVE are permanent, the rest are removable.
630 Also clear all of the changeable_flags. */
631
632 void
633 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
634 {
635 int i;
636 int removed = 0;
637
638 #ifdef ENABLE_DF_CHECKING
639 int saved_flags;
640 #endif
641
642 if (!df)
643 return;
644
645 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
646 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
647
648 #ifdef ENABLE_DF_CHECKING
649 saved_flags = df->changeable_flags;
650 #endif
651
652 for (i = 0; i < df->num_problems_defined; i++)
653 {
654 struct dataflow *dflow = df->problems_in_order[i];
655 struct df_problem *problem = dflow->problem;
656
657 if (dflow->optional_p)
658 {
659 gcc_assert (problem->remove_problem_fun);
660 (problem->remove_problem_fun) ();
661 df->problems_in_order[i] = NULL;
662 df->problems_by_index[problem->id] = NULL;
663 removed++;
664 }
665 }
666 df->num_problems_defined -= removed;
667
668 /* Clear all of the flags. */
669 df->changeable_flags = 0;
670 df_process_deferred_rescans ();
671
672 /* Set the focus back to the whole function. */
673 if (df->blocks_to_analyze)
674 {
675 BITMAP_FREE (df->blocks_to_analyze);
676 df->blocks_to_analyze = NULL;
677 df_mark_solutions_dirty ();
678 df->analyze_subset = false;
679 }
680
681 #ifdef ENABLE_DF_CHECKING
682 /* Verification will fail in DF_NO_INSN_RESCAN. */
683 if (!(saved_flags & DF_NO_INSN_RESCAN))
684 {
685 df_lr_verify_transfer_functions ();
686 if (df_live)
687 df_live_verify_transfer_functions ();
688 }
689
690 #ifdef DF_DEBUG_CFG
691 df_set_clean_cfg ();
692 #endif
693 #endif
694
695 #ifdef ENABLE_CHECKING
696 if (verify)
697 df->changeable_flags |= DF_VERIFY_SCHEDULED;
698 #endif
699 }
700
701
702 /* Set up the dataflow instance for the entire back end. */
703
704 static unsigned int
705 rest_of_handle_df_initialize (void)
706 {
707 gcc_assert (!df);
708 df = XCNEW (struct df_d);
709 df->changeable_flags = 0;
710
711 bitmap_obstack_initialize (&df_bitmap_obstack);
712
713 /* Set this to a conservative value. Stack_ptr_mod will compute it
714 correctly later. */
715 crtl->sp_is_unchanging = 0;
716
717 df_scan_add_problem ();
718 df_scan_alloc (NULL);
719
720 /* These three problems are permanent. */
721 df_lr_add_problem ();
722 if (optimize > 1)
723 df_live_add_problem ();
724
725 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
726 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
727 df->n_blocks = post_order_compute (df->postorder, true, true);
728 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
729 gcc_assert (df->n_blocks == df->n_blocks_inverted);
730
731 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
732
733 df_hard_reg_init ();
734 /* After reload, some ports add certain bits to regs_ever_live so
735 this cannot be reset. */
736 df_compute_regs_ever_live (true);
737 df_scan_blocks ();
738 df_compute_regs_ever_live (false);
739 return 0;
740 }
741
742
743 static bool
744 gate_opt (void)
745 {
746 return optimize > 0;
747 }
748
749
750 namespace {
751
752 const pass_data pass_data_df_initialize_opt =
753 {
754 RTL_PASS, /* type */
755 "dfinit", /* name */
756 OPTGROUP_NONE, /* optinfo_flags */
757 true, /* has_gate */
758 true, /* has_execute */
759 TV_DF_SCAN, /* tv_id */
760 0, /* properties_required */
761 0, /* properties_provided */
762 0, /* properties_destroyed */
763 0, /* todo_flags_start */
764 0, /* todo_flags_finish */
765 };
766
767 class pass_df_initialize_opt : public rtl_opt_pass
768 {
769 public:
770 pass_df_initialize_opt (gcc::context *ctxt)
771 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
772 {}
773
774 /* opt_pass methods: */
775 bool gate () { return gate_opt (); }
776 unsigned int execute () { return rest_of_handle_df_initialize (); }
777
778 }; // class pass_df_initialize_opt
779
780 } // anon namespace
781
782 rtl_opt_pass *
783 make_pass_df_initialize_opt (gcc::context *ctxt)
784 {
785 return new pass_df_initialize_opt (ctxt);
786 }
787
788
789 static bool
790 gate_no_opt (void)
791 {
792 return optimize == 0;
793 }
794
795
796 namespace {
797
798 const pass_data pass_data_df_initialize_no_opt =
799 {
800 RTL_PASS, /* type */
801 "no-opt dfinit", /* name */
802 OPTGROUP_NONE, /* optinfo_flags */
803 true, /* has_gate */
804 true, /* has_execute */
805 TV_DF_SCAN, /* tv_id */
806 0, /* properties_required */
807 0, /* properties_provided */
808 0, /* properties_destroyed */
809 0, /* todo_flags_start */
810 0, /* todo_flags_finish */
811 };
812
813 class pass_df_initialize_no_opt : public rtl_opt_pass
814 {
815 public:
816 pass_df_initialize_no_opt (gcc::context *ctxt)
817 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
818 {}
819
820 /* opt_pass methods: */
821 bool gate () { return gate_no_opt (); }
822 unsigned int execute () { return rest_of_handle_df_initialize (); }
823
824 }; // class pass_df_initialize_no_opt
825
826 } // anon namespace
827
828 rtl_opt_pass *
829 make_pass_df_initialize_no_opt (gcc::context *ctxt)
830 {
831 return new pass_df_initialize_no_opt (ctxt);
832 }
833
834
835 /* Free all the dataflow info and the DF structure. This should be
836 called from the df_finish macro which also NULLs the parm. */
837
838 static unsigned int
839 rest_of_handle_df_finish (void)
840 {
841 int i;
842
843 gcc_assert (df);
844
845 for (i = 0; i < df->num_problems_defined; i++)
846 {
847 struct dataflow *dflow = df->problems_in_order[i];
848 dflow->problem->free_fun ();
849 }
850
851 free (df->postorder);
852 free (df->postorder_inverted);
853 free (df->hard_regs_live_count);
854 free (df);
855 df = NULL;
856
857 bitmap_obstack_release (&df_bitmap_obstack);
858 return 0;
859 }
860
861
862 namespace {
863
864 const pass_data pass_data_df_finish =
865 {
866 RTL_PASS, /* type */
867 "dfinish", /* name */
868 OPTGROUP_NONE, /* optinfo_flags */
869 false, /* has_gate */
870 true, /* has_execute */
871 TV_NONE, /* tv_id */
872 0, /* properties_required */
873 0, /* properties_provided */
874 0, /* properties_destroyed */
875 0, /* todo_flags_start */
876 0, /* todo_flags_finish */
877 };
878
879 class pass_df_finish : public rtl_opt_pass
880 {
881 public:
882 pass_df_finish (gcc::context *ctxt)
883 : rtl_opt_pass (pass_data_df_finish, ctxt)
884 {}
885
886 /* opt_pass methods: */
887 unsigned int execute () { return rest_of_handle_df_finish (); }
888
889 }; // class pass_df_finish
890
891 } // anon namespace
892
893 rtl_opt_pass *
894 make_pass_df_finish (gcc::context *ctxt)
895 {
896 return new pass_df_finish (ctxt);
897 }
898
899
900
901
902 \f
903 /*----------------------------------------------------------------------------
904 The general data flow analysis engine.
905 ----------------------------------------------------------------------------*/
906
907 /* Return time BB when it was visited for last time. */
908 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
909
910 /* Helper function for df_worklist_dataflow.
911 Propagate the dataflow forward.
912 Given a BB_INDEX, do the dataflow propagation
913 and set bits on for successors in PENDING
914 if the out set of the dataflow has changed.
915
916 AGE specify time when BB was visited last time.
917 AGE of 0 means we are visiting for first time and need to
918 compute transfer function to initialize datastructures.
919 Otherwise we re-do transfer function only if something change
920 while computing confluence functions.
921 We need to compute confluence only of basic block that are younger
922 then last visit of the BB.
923
924 Return true if BB info has changed. This is always the case
925 in the first visit. */
926
927 static bool
928 df_worklist_propagate_forward (struct dataflow *dataflow,
929 unsigned bb_index,
930 unsigned *bbindex_to_postorder,
931 bitmap pending,
932 sbitmap considered,
933 ptrdiff_t age)
934 {
935 edge e;
936 edge_iterator ei;
937 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
938 bool changed = !age;
939
940 /* Calculate <conf_op> of incoming edges. */
941 if (EDGE_COUNT (bb->preds) > 0)
942 FOR_EACH_EDGE (e, ei, bb->preds)
943 {
944 if (age <= BB_LAST_CHANGE_AGE (e->src)
945 && bitmap_bit_p (considered, e->src->index))
946 changed |= dataflow->problem->con_fun_n (e);
947 }
948 else if (dataflow->problem->con_fun_0)
949 dataflow->problem->con_fun_0 (bb);
950
951 if (changed
952 && dataflow->problem->trans_fun (bb_index))
953 {
954 /* The out set of this block has changed.
955 Propagate to the outgoing blocks. */
956 FOR_EACH_EDGE (e, ei, bb->succs)
957 {
958 unsigned ob_index = e->dest->index;
959
960 if (bitmap_bit_p (considered, ob_index))
961 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
962 }
963 return true;
964 }
965 return false;
966 }
967
968
969 /* Helper function for df_worklist_dataflow.
970 Propagate the dataflow backward. */
971
972 static bool
973 df_worklist_propagate_backward (struct dataflow *dataflow,
974 unsigned bb_index,
975 unsigned *bbindex_to_postorder,
976 bitmap pending,
977 sbitmap considered,
978 ptrdiff_t age)
979 {
980 edge e;
981 edge_iterator ei;
982 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
983 bool changed = !age;
984
985 /* Calculate <conf_op> of incoming edges. */
986 if (EDGE_COUNT (bb->succs) > 0)
987 FOR_EACH_EDGE (e, ei, bb->succs)
988 {
989 if (age <= BB_LAST_CHANGE_AGE (e->dest)
990 && bitmap_bit_p (considered, e->dest->index))
991 changed |= dataflow->problem->con_fun_n (e);
992 }
993 else if (dataflow->problem->con_fun_0)
994 dataflow->problem->con_fun_0 (bb);
995
996 if (changed
997 && dataflow->problem->trans_fun (bb_index))
998 {
999 /* The out set of this block has changed.
1000 Propagate to the outgoing blocks. */
1001 FOR_EACH_EDGE (e, ei, bb->preds)
1002 {
1003 unsigned ob_index = e->src->index;
1004
1005 if (bitmap_bit_p (considered, ob_index))
1006 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
1007 }
1008 return true;
1009 }
1010 return false;
1011 }
1012
1013 /* Main dataflow solver loop.
1014
1015 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
1016 need to visit.
1017 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1018 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1019 PENDING will be freed.
1020
1021 The worklists are bitmaps indexed by postorder positions.
1022
1023 The function implements standard algorithm for dataflow solving with two
1024 worklists (we are processing WORKLIST and storing new BBs to visit in
1025 PENDING).
1026
1027 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1028 and when it was last visited (stored in last_visit_age). This avoids need
1029 to re-do confluence function for edges to basic blocks whose source
1030 did not change since destination was visited last time. */
1031
1032 static void
1033 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1034 bitmap pending,
1035 sbitmap considered,
1036 int *blocks_in_postorder,
1037 unsigned *bbindex_to_postorder,
1038 int n_blocks)
1039 {
1040 enum df_flow_dir dir = dataflow->problem->dir;
1041 int dcount = 0;
1042 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1043 int age = 0;
1044 bool changed;
1045 vec<int> last_visit_age = vNULL;
1046 int prev_age;
1047 basic_block bb;
1048 int i;
1049
1050 last_visit_age.safe_grow_cleared (n_blocks);
1051
1052 /* Double-queueing. Worklist is for the current iteration,
1053 and pending is for the next. */
1054 while (!bitmap_empty_p (pending))
1055 {
1056 bitmap_iterator bi;
1057 unsigned int index;
1058
1059 /* Swap pending and worklist. */
1060 bitmap temp = worklist;
1061 worklist = pending;
1062 pending = temp;
1063
1064 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1065 {
1066 unsigned bb_index;
1067 dcount++;
1068
1069 bitmap_clear_bit (pending, index);
1070 bb_index = blocks_in_postorder[index];
1071 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1072 prev_age = last_visit_age[index];
1073 if (dir == DF_FORWARD)
1074 changed = df_worklist_propagate_forward (dataflow, bb_index,
1075 bbindex_to_postorder,
1076 pending, considered,
1077 prev_age);
1078 else
1079 changed = df_worklist_propagate_backward (dataflow, bb_index,
1080 bbindex_to_postorder,
1081 pending, considered,
1082 prev_age);
1083 last_visit_age[index] = ++age;
1084 if (changed)
1085 bb->aux = (void *)(ptrdiff_t)age;
1086 }
1087 bitmap_clear (worklist);
1088 }
1089 for (i = 0; i < n_blocks; i++)
1090 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1091
1092 BITMAP_FREE (worklist);
1093 BITMAP_FREE (pending);
1094 last_visit_age.release ();
1095
1096 /* Dump statistics. */
1097 if (dump_file)
1098 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1099 "n_basic_blocks %d n_edges %d"
1100 " count %d (%5.2g)\n",
1101 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1102 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1103 }
1104
1105 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1106 with "n"-th bit representing the n-th block in the reverse-postorder order.
1107 The solver is a double-queue algorithm similar to the "double stack" solver
1108 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1109 The only significant difference is that the worklist in this implementation
1110 is always sorted in RPO of the CFG visiting direction. */
1111
1112 void
1113 df_worklist_dataflow (struct dataflow *dataflow,
1114 bitmap blocks_to_consider,
1115 int *blocks_in_postorder,
1116 int n_blocks)
1117 {
1118 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1119 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1120 bitmap_iterator bi;
1121 unsigned int *bbindex_to_postorder;
1122 int i;
1123 unsigned int index;
1124 enum df_flow_dir dir = dataflow->problem->dir;
1125
1126 gcc_assert (dir != DF_NONE);
1127
1128 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1129 bbindex_to_postorder = XNEWVEC (unsigned int,
1130 last_basic_block_for_fn (cfun));
1131
1132 /* Initialize the array to an out-of-bound value. */
1133 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1134 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1135
1136 /* Initialize the considered map. */
1137 bitmap_clear (considered);
1138 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1139 {
1140 bitmap_set_bit (considered, index);
1141 }
1142
1143 /* Initialize the mapping of block index to postorder. */
1144 for (i = 0; i < n_blocks; i++)
1145 {
1146 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1147 /* Add all blocks to the worklist. */
1148 bitmap_set_bit (pending, i);
1149 }
1150
1151 /* Initialize the problem. */
1152 if (dataflow->problem->init_fun)
1153 dataflow->problem->init_fun (blocks_to_consider);
1154
1155 /* Solve it. */
1156 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1157 blocks_in_postorder,
1158 bbindex_to_postorder,
1159 n_blocks);
1160 sbitmap_free (considered);
1161 free (bbindex_to_postorder);
1162 }
1163
1164
1165 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1166 the order of the remaining entries. Returns the length of the resulting
1167 list. */
1168
1169 static unsigned
1170 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1171 {
1172 unsigned act, last;
1173
1174 for (act = 0, last = 0; act < len; act++)
1175 if (bitmap_bit_p (blocks, list[act]))
1176 list[last++] = list[act];
1177
1178 return last;
1179 }
1180
1181
1182 /* Execute dataflow analysis on a single dataflow problem.
1183
1184 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1185 examined or will be computed. For calls from DF_ANALYZE, this is
1186 the set of blocks that has been passed to DF_SET_BLOCKS.
1187 */
1188
1189 void
1190 df_analyze_problem (struct dataflow *dflow,
1191 bitmap blocks_to_consider,
1192 int *postorder, int n_blocks)
1193 {
1194 timevar_push (dflow->problem->tv_id);
1195
1196 /* (Re)Allocate the datastructures necessary to solve the problem. */
1197 if (dflow->problem->alloc_fun)
1198 dflow->problem->alloc_fun (blocks_to_consider);
1199
1200 #ifdef ENABLE_DF_CHECKING
1201 if (dflow->problem->verify_start_fun)
1202 dflow->problem->verify_start_fun ();
1203 #endif
1204
1205 /* Set up the problem and compute the local information. */
1206 if (dflow->problem->local_compute_fun)
1207 dflow->problem->local_compute_fun (blocks_to_consider);
1208
1209 /* Solve the equations. */
1210 if (dflow->problem->dataflow_fun)
1211 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1212 postorder, n_blocks);
1213
1214 /* Massage the solution. */
1215 if (dflow->problem->finalize_fun)
1216 dflow->problem->finalize_fun (blocks_to_consider);
1217
1218 #ifdef ENABLE_DF_CHECKING
1219 if (dflow->problem->verify_end_fun)
1220 dflow->problem->verify_end_fun ();
1221 #endif
1222
1223 timevar_pop (dflow->problem->tv_id);
1224
1225 dflow->computed = true;
1226 }
1227
1228
1229 /* Analyze dataflow info. */
1230
1231 static void
1232 df_analyze_1 (void)
1233 {
1234 int i;
1235
1236 /* These should be the same. */
1237 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1238
1239 /* We need to do this before the df_verify_all because this is
1240 not kept incrementally up to date. */
1241 df_compute_regs_ever_live (false);
1242 df_process_deferred_rescans ();
1243
1244 if (dump_file)
1245 fprintf (dump_file, "df_analyze called\n");
1246
1247 #ifndef ENABLE_DF_CHECKING
1248 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1249 #endif
1250 df_verify ();
1251
1252 /* Skip over the DF_SCAN problem. */
1253 for (i = 1; i < df->num_problems_defined; i++)
1254 {
1255 struct dataflow *dflow = df->problems_in_order[i];
1256 if (dflow->solutions_dirty)
1257 {
1258 if (dflow->problem->dir == DF_FORWARD)
1259 df_analyze_problem (dflow,
1260 df->blocks_to_analyze,
1261 df->postorder_inverted,
1262 df->n_blocks_inverted);
1263 else
1264 df_analyze_problem (dflow,
1265 df->blocks_to_analyze,
1266 df->postorder,
1267 df->n_blocks);
1268 }
1269 }
1270
1271 if (!df->analyze_subset)
1272 {
1273 BITMAP_FREE (df->blocks_to_analyze);
1274 df->blocks_to_analyze = NULL;
1275 }
1276
1277 #ifdef DF_DEBUG_CFG
1278 df_set_clean_cfg ();
1279 #endif
1280 }
1281
1282 /* Analyze dataflow info. */
1283
1284 void
1285 df_analyze (void)
1286 {
1287 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1288 int i;
1289
1290 free (df->postorder);
1291 free (df->postorder_inverted);
1292 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1293 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1294 df->n_blocks = post_order_compute (df->postorder, true, true);
1295 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1296
1297 for (i = 0; i < df->n_blocks; i++)
1298 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1299
1300 #ifdef ENABLE_CHECKING
1301 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1302 the ENTRY block. */
1303 for (i = 0; i < df->n_blocks_inverted; i++)
1304 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1305 #endif
1306
1307 /* Make sure that we have pruned any unreachable blocks from these
1308 sets. */
1309 if (df->analyze_subset)
1310 {
1311 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1312 df->n_blocks = df_prune_to_subcfg (df->postorder,
1313 df->n_blocks, df->blocks_to_analyze);
1314 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1315 df->n_blocks_inverted,
1316 df->blocks_to_analyze);
1317 BITMAP_FREE (current_all_blocks);
1318 }
1319 else
1320 {
1321 df->blocks_to_analyze = current_all_blocks;
1322 current_all_blocks = NULL;
1323 }
1324
1325 df_analyze_1 ();
1326 }
1327
1328 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1329 Returns the number of blocks which is always loop->num_nodes. */
1330
1331 static int
1332 loop_post_order_compute (int *post_order, struct loop *loop)
1333 {
1334 edge_iterator *stack;
1335 int sp;
1336 int post_order_num = 0;
1337 bitmap visited;
1338
1339 /* Allocate stack for back-tracking up CFG. */
1340 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1341 sp = 0;
1342
1343 /* Allocate bitmap to track nodes that have been visited. */
1344 visited = BITMAP_ALLOC (NULL);
1345
1346 /* Push the first edge on to the stack. */
1347 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1348
1349 while (sp)
1350 {
1351 edge_iterator ei;
1352 basic_block src;
1353 basic_block dest;
1354
1355 /* Look at the edge on the top of the stack. */
1356 ei = stack[sp - 1];
1357 src = ei_edge (ei)->src;
1358 dest = ei_edge (ei)->dest;
1359
1360 /* Check if the edge destination has been visited yet and mark it
1361 if not so. */
1362 if (flow_bb_inside_loop_p (loop, dest)
1363 && bitmap_set_bit (visited, dest->index))
1364 {
1365 if (EDGE_COUNT (dest->succs) > 0)
1366 /* Since the DEST node has been visited for the first
1367 time, check its successors. */
1368 stack[sp++] = ei_start (dest->succs);
1369 else
1370 post_order[post_order_num++] = dest->index;
1371 }
1372 else
1373 {
1374 if (ei_one_before_end_p (ei)
1375 && src != loop_preheader_edge (loop)->src)
1376 post_order[post_order_num++] = src->index;
1377
1378 if (!ei_one_before_end_p (ei))
1379 ei_next (&stack[sp - 1]);
1380 else
1381 sp--;
1382 }
1383 }
1384
1385 free (stack);
1386 BITMAP_FREE (visited);
1387
1388 return post_order_num;
1389 }
1390
1391 /* Compute the reverse top sort order of the inverted sub-CFG specified
1392 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1393
1394 static int
1395 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1396 {
1397 basic_block bb;
1398 edge_iterator *stack;
1399 int sp;
1400 int post_order_num = 0;
1401 bitmap visited;
1402
1403 /* Allocate stack for back-tracking up CFG. */
1404 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1405 sp = 0;
1406
1407 /* Allocate bitmap to track nodes that have been visited. */
1408 visited = BITMAP_ALLOC (NULL);
1409
1410 /* Put all latches into the initial work list. In theory we'd want
1411 to start from loop exits but then we'd have the special case of
1412 endless loops. It doesn't really matter for DF iteration order and
1413 handling latches last is probably even better. */
1414 stack[sp++] = ei_start (loop->header->preds);
1415 bitmap_set_bit (visited, loop->header->index);
1416
1417 /* The inverted traversal loop. */
1418 while (sp)
1419 {
1420 edge_iterator ei;
1421 basic_block pred;
1422
1423 /* Look at the edge on the top of the stack. */
1424 ei = stack[sp - 1];
1425 bb = ei_edge (ei)->dest;
1426 pred = ei_edge (ei)->src;
1427
1428 /* Check if the predecessor has been visited yet and mark it
1429 if not so. */
1430 if (flow_bb_inside_loop_p (loop, pred)
1431 && bitmap_set_bit (visited, pred->index))
1432 {
1433 if (EDGE_COUNT (pred->preds) > 0)
1434 /* Since the predecessor node has been visited for the first
1435 time, check its predecessors. */
1436 stack[sp++] = ei_start (pred->preds);
1437 else
1438 post_order[post_order_num++] = pred->index;
1439 }
1440 else
1441 {
1442 if (flow_bb_inside_loop_p (loop, bb)
1443 && ei_one_before_end_p (ei))
1444 post_order[post_order_num++] = bb->index;
1445
1446 if (!ei_one_before_end_p (ei))
1447 ei_next (&stack[sp - 1]);
1448 else
1449 sp--;
1450 }
1451 }
1452
1453 free (stack);
1454 BITMAP_FREE (visited);
1455 return post_order_num;
1456 }
1457
1458
1459 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1460
1461 void
1462 df_analyze_loop (struct loop *loop)
1463 {
1464 free (df->postorder);
1465 free (df->postorder_inverted);
1466
1467 df->postorder = XNEWVEC (int, loop->num_nodes);
1468 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1469 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1470 df->n_blocks_inverted
1471 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1472 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1473 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1474
1475 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1476 for (int i = 0; i < df->n_blocks; ++i)
1477 bitmap_set_bit (blocks, df->postorder[i]);
1478 df_set_blocks (blocks);
1479 BITMAP_FREE (blocks);
1480
1481 df_analyze_1 ();
1482 }
1483
1484
1485 /* Return the number of basic blocks from the last call to df_analyze. */
1486
1487 int
1488 df_get_n_blocks (enum df_flow_dir dir)
1489 {
1490 gcc_assert (dir != DF_NONE);
1491
1492 if (dir == DF_FORWARD)
1493 {
1494 gcc_assert (df->postorder_inverted);
1495 return df->n_blocks_inverted;
1496 }
1497
1498 gcc_assert (df->postorder);
1499 return df->n_blocks;
1500 }
1501
1502
1503 /* Return a pointer to the array of basic blocks in the reverse postorder.
1504 Depending on the direction of the dataflow problem,
1505 it returns either the usual reverse postorder array
1506 or the reverse postorder of inverted traversal. */
1507 int *
1508 df_get_postorder (enum df_flow_dir dir)
1509 {
1510 gcc_assert (dir != DF_NONE);
1511
1512 if (dir == DF_FORWARD)
1513 {
1514 gcc_assert (df->postorder_inverted);
1515 return df->postorder_inverted;
1516 }
1517 gcc_assert (df->postorder);
1518 return df->postorder;
1519 }
1520
1521 static struct df_problem user_problem;
1522 static struct dataflow user_dflow;
1523
1524 /* Interface for calling iterative dataflow with user defined
1525 confluence and transfer functions. All that is necessary is to
1526 supply DIR, a direction, CONF_FUN_0, a confluence function for
1527 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1528 confluence function, TRANS_FUN, the basic block transfer function,
1529 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1530 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1531
1532 void
1533 df_simple_dataflow (enum df_flow_dir dir,
1534 df_init_function init_fun,
1535 df_confluence_function_0 con_fun_0,
1536 df_confluence_function_n con_fun_n,
1537 df_transfer_function trans_fun,
1538 bitmap blocks, int * postorder, int n_blocks)
1539 {
1540 memset (&user_problem, 0, sizeof (struct df_problem));
1541 user_problem.dir = dir;
1542 user_problem.init_fun = init_fun;
1543 user_problem.con_fun_0 = con_fun_0;
1544 user_problem.con_fun_n = con_fun_n;
1545 user_problem.trans_fun = trans_fun;
1546 user_dflow.problem = &user_problem;
1547 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1548 }
1549
1550
1551 \f
1552 /*----------------------------------------------------------------------------
1553 Functions to support limited incremental change.
1554 ----------------------------------------------------------------------------*/
1555
1556
1557 /* Get basic block info. */
1558
1559 static void *
1560 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1561 {
1562 if (dflow->block_info == NULL)
1563 return NULL;
1564 if (index >= dflow->block_info_size)
1565 return NULL;
1566 return (void *)((char *)dflow->block_info
1567 + index * dflow->problem->block_info_elt_size);
1568 }
1569
1570
1571 /* Set basic block info. */
1572
1573 static void
1574 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1575 void *bb_info)
1576 {
1577 gcc_assert (dflow->block_info);
1578 memcpy ((char *)dflow->block_info
1579 + index * dflow->problem->block_info_elt_size,
1580 bb_info, dflow->problem->block_info_elt_size);
1581 }
1582
1583
1584 /* Clear basic block info. */
1585
1586 static void
1587 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1588 {
1589 gcc_assert (dflow->block_info);
1590 gcc_assert (dflow->block_info_size > index);
1591 memset ((char *)dflow->block_info
1592 + index * dflow->problem->block_info_elt_size,
1593 0, dflow->problem->block_info_elt_size);
1594 }
1595
1596
1597 /* Mark the solutions as being out of date. */
1598
1599 void
1600 df_mark_solutions_dirty (void)
1601 {
1602 if (df)
1603 {
1604 int p;
1605 for (p = 1; p < df->num_problems_defined; p++)
1606 df->problems_in_order[p]->solutions_dirty = true;
1607 }
1608 }
1609
1610
1611 /* Return true if BB needs it's transfer functions recomputed. */
1612
1613 bool
1614 df_get_bb_dirty (basic_block bb)
1615 {
1616 return bitmap_bit_p ((df_live
1617 ? df_live : df_lr)->out_of_date_transfer_functions,
1618 bb->index);
1619 }
1620
1621
1622 /* Mark BB as needing it's transfer functions as being out of
1623 date. */
1624
1625 void
1626 df_set_bb_dirty (basic_block bb)
1627 {
1628 bb->flags |= BB_MODIFIED;
1629 if (df)
1630 {
1631 int p;
1632 for (p = 1; p < df->num_problems_defined; p++)
1633 {
1634 struct dataflow *dflow = df->problems_in_order[p];
1635 if (dflow->out_of_date_transfer_functions)
1636 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1637 }
1638 df_mark_solutions_dirty ();
1639 }
1640 }
1641
1642
1643 /* Grow the bb_info array. */
1644
1645 void
1646 df_grow_bb_info (struct dataflow *dflow)
1647 {
1648 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1649 if (dflow->block_info_size < new_size)
1650 {
1651 new_size += new_size / 4;
1652 dflow->block_info
1653 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1654 new_size
1655 * dflow->problem->block_info_elt_size);
1656 memset ((char *)dflow->block_info
1657 + dflow->block_info_size
1658 * dflow->problem->block_info_elt_size,
1659 0,
1660 (new_size - dflow->block_info_size)
1661 * dflow->problem->block_info_elt_size);
1662 dflow->block_info_size = new_size;
1663 }
1664 }
1665
1666
1667 /* Clear the dirty bits. This is called from places that delete
1668 blocks. */
1669 static void
1670 df_clear_bb_dirty (basic_block bb)
1671 {
1672 int p;
1673 for (p = 1; p < df->num_problems_defined; p++)
1674 {
1675 struct dataflow *dflow = df->problems_in_order[p];
1676 if (dflow->out_of_date_transfer_functions)
1677 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1678 }
1679 }
1680
1681 /* Called from the rtl_compact_blocks to reorganize the problems basic
1682 block info. */
1683
1684 void
1685 df_compact_blocks (void)
1686 {
1687 int i, p;
1688 basic_block bb;
1689 void *problem_temps;
1690 bitmap_head tmp;
1691
1692 bitmap_initialize (&tmp, &df_bitmap_obstack);
1693 for (p = 0; p < df->num_problems_defined; p++)
1694 {
1695 struct dataflow *dflow = df->problems_in_order[p];
1696
1697 /* Need to reorganize the out_of_date_transfer_functions for the
1698 dflow problem. */
1699 if (dflow->out_of_date_transfer_functions)
1700 {
1701 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1702 bitmap_clear (dflow->out_of_date_transfer_functions);
1703 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1704 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1705 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1706 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1707
1708 i = NUM_FIXED_BLOCKS;
1709 FOR_EACH_BB_FN (bb, cfun)
1710 {
1711 if (bitmap_bit_p (&tmp, bb->index))
1712 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1713 i++;
1714 }
1715 }
1716
1717 /* Now shuffle the block info for the problem. */
1718 if (dflow->problem->free_bb_fun)
1719 {
1720 int size = (last_basic_block_for_fn (cfun)
1721 * dflow->problem->block_info_elt_size);
1722 problem_temps = XNEWVAR (char, size);
1723 df_grow_bb_info (dflow);
1724 memcpy (problem_temps, dflow->block_info, size);
1725
1726 /* Copy the bb info from the problem tmps to the proper
1727 place in the block_info vector. Null out the copied
1728 item. The entry and exit blocks never move. */
1729 i = NUM_FIXED_BLOCKS;
1730 FOR_EACH_BB_FN (bb, cfun)
1731 {
1732 df_set_bb_info (dflow, i,
1733 (char *)problem_temps
1734 + bb->index * dflow->problem->block_info_elt_size);
1735 i++;
1736 }
1737 memset ((char *)dflow->block_info
1738 + i * dflow->problem->block_info_elt_size, 0,
1739 (last_basic_block_for_fn (cfun) - i)
1740 * dflow->problem->block_info_elt_size);
1741 free (problem_temps);
1742 }
1743 }
1744
1745 /* Shuffle the bits in the basic_block indexed arrays. */
1746
1747 if (df->blocks_to_analyze)
1748 {
1749 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1750 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1751 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1752 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1753 bitmap_copy (&tmp, df->blocks_to_analyze);
1754 bitmap_clear (df->blocks_to_analyze);
1755 i = NUM_FIXED_BLOCKS;
1756 FOR_EACH_BB_FN (bb, cfun)
1757 {
1758 if (bitmap_bit_p (&tmp, bb->index))
1759 bitmap_set_bit (df->blocks_to_analyze, i);
1760 i++;
1761 }
1762 }
1763
1764 bitmap_clear (&tmp);
1765
1766 i = NUM_FIXED_BLOCKS;
1767 FOR_EACH_BB_FN (bb, cfun)
1768 {
1769 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1770 bb->index = i;
1771 i++;
1772 }
1773
1774 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1775
1776 for (; i < last_basic_block_for_fn (cfun); i++)
1777 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1778
1779 #ifdef DF_DEBUG_CFG
1780 if (!df_lr->solutions_dirty)
1781 df_set_clean_cfg ();
1782 #endif
1783 }
1784
1785
1786 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1787 block. There is no excuse for people to do this kind of thing. */
1788
1789 void
1790 df_bb_replace (int old_index, basic_block new_block)
1791 {
1792 int new_block_index = new_block->index;
1793 int p;
1794
1795 if (dump_file)
1796 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1797
1798 gcc_assert (df);
1799 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1800
1801 for (p = 0; p < df->num_problems_defined; p++)
1802 {
1803 struct dataflow *dflow = df->problems_in_order[p];
1804 if (dflow->block_info)
1805 {
1806 df_grow_bb_info (dflow);
1807 df_set_bb_info (dflow, old_index,
1808 df_get_bb_info (dflow, new_block_index));
1809 }
1810 }
1811
1812 df_clear_bb_dirty (new_block);
1813 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1814 new_block->index = old_index;
1815 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1816 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1817 }
1818
1819
1820 /* Free all of the per basic block dataflow from all of the problems.
1821 This is typically called before a basic block is deleted and the
1822 problem will be reanalyzed. */
1823
1824 void
1825 df_bb_delete (int bb_index)
1826 {
1827 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1828 int i;
1829
1830 if (!df)
1831 return;
1832
1833 for (i = 0; i < df->num_problems_defined; i++)
1834 {
1835 struct dataflow *dflow = df->problems_in_order[i];
1836 if (dflow->problem->free_bb_fun)
1837 {
1838 void *bb_info = df_get_bb_info (dflow, bb_index);
1839 if (bb_info)
1840 {
1841 dflow->problem->free_bb_fun (bb, bb_info);
1842 df_clear_bb_info (dflow, bb_index);
1843 }
1844 }
1845 }
1846 df_clear_bb_dirty (bb);
1847 df_mark_solutions_dirty ();
1848 }
1849
1850
1851 /* Verify that there is a place for everything and everything is in
1852 its place. This is too expensive to run after every pass in the
1853 mainline. However this is an excellent debugging tool if the
1854 dataflow information is not being updated properly. You can just
1855 sprinkle calls in until you find the place that is changing an
1856 underlying structure without calling the proper updating
1857 routine. */
1858
1859 void
1860 df_verify (void)
1861 {
1862 df_scan_verify ();
1863 #ifdef ENABLE_DF_CHECKING
1864 df_lr_verify_transfer_functions ();
1865 if (df_live)
1866 df_live_verify_transfer_functions ();
1867 #endif
1868 }
1869
1870 #ifdef DF_DEBUG_CFG
1871
1872 /* Compute an array of ints that describes the cfg. This can be used
1873 to discover places where the cfg is modified by the appropriate
1874 calls have not been made to the keep df informed. The internals of
1875 this are unexciting, the key is that two instances of this can be
1876 compared to see if any changes have been made to the cfg. */
1877
1878 static int *
1879 df_compute_cfg_image (void)
1880 {
1881 basic_block bb;
1882 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1883 int i;
1884 int * map;
1885
1886 FOR_ALL_BB_FN (bb, cfun)
1887 {
1888 size += EDGE_COUNT (bb->succs);
1889 }
1890
1891 map = XNEWVEC (int, size);
1892 map[0] = size;
1893 i = 1;
1894 FOR_ALL_BB_FN (bb, cfun)
1895 {
1896 edge_iterator ei;
1897 edge e;
1898
1899 map[i++] = bb->index;
1900 FOR_EACH_EDGE (e, ei, bb->succs)
1901 map[i++] = e->dest->index;
1902 map[i++] = -1;
1903 }
1904 map[i] = -1;
1905 return map;
1906 }
1907
1908 static int *saved_cfg = NULL;
1909
1910
1911 /* This function compares the saved version of the cfg with the
1912 current cfg and aborts if the two are identical. The function
1913 silently returns if the cfg has been marked as dirty or the two are
1914 the same. */
1915
1916 void
1917 df_check_cfg_clean (void)
1918 {
1919 int *new_map;
1920
1921 if (!df)
1922 return;
1923
1924 if (df_lr->solutions_dirty)
1925 return;
1926
1927 if (saved_cfg == NULL)
1928 return;
1929
1930 new_map = df_compute_cfg_image ();
1931 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1932 free (new_map);
1933 }
1934
1935
1936 /* This function builds a cfg fingerprint and squirrels it away in
1937 saved_cfg. */
1938
1939 static void
1940 df_set_clean_cfg (void)
1941 {
1942 free (saved_cfg);
1943 saved_cfg = df_compute_cfg_image ();
1944 }
1945
1946 #endif /* DF_DEBUG_CFG */
1947 /*----------------------------------------------------------------------------
1948 PUBLIC INTERFACES TO QUERY INFORMATION.
1949 ----------------------------------------------------------------------------*/
1950
1951
1952 /* Return first def of REGNO within BB. */
1953
1954 df_ref
1955 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1956 {
1957 rtx insn;
1958 df_ref *def_rec;
1959 unsigned int uid;
1960
1961 FOR_BB_INSNS (bb, insn)
1962 {
1963 if (!INSN_P (insn))
1964 continue;
1965
1966 uid = INSN_UID (insn);
1967 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1968 {
1969 df_ref def = *def_rec;
1970 if (DF_REF_REGNO (def) == regno)
1971 return def;
1972 }
1973 }
1974 return NULL;
1975 }
1976
1977
1978 /* Return last def of REGNO within BB. */
1979
1980 df_ref
1981 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1982 {
1983 rtx insn;
1984 df_ref *def_rec;
1985 unsigned int uid;
1986
1987 FOR_BB_INSNS_REVERSE (bb, insn)
1988 {
1989 if (!INSN_P (insn))
1990 continue;
1991
1992 uid = INSN_UID (insn);
1993 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1994 {
1995 df_ref def = *def_rec;
1996 if (DF_REF_REGNO (def) == regno)
1997 return def;
1998 }
1999 }
2000
2001 return NULL;
2002 }
2003
2004 /* Finds the reference corresponding to the definition of REG in INSN.
2005 DF is the dataflow object. */
2006
2007 df_ref
2008 df_find_def (rtx insn, rtx reg)
2009 {
2010 unsigned int uid;
2011 df_ref *def_rec;
2012
2013 if (GET_CODE (reg) == SUBREG)
2014 reg = SUBREG_REG (reg);
2015 gcc_assert (REG_P (reg));
2016
2017 uid = INSN_UID (insn);
2018 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
2019 {
2020 df_ref def = *def_rec;
2021 if (DF_REF_REGNO (def) == REGNO (reg))
2022 return def;
2023 }
2024
2025 return NULL;
2026 }
2027
2028
2029 /* Return true if REG is defined in INSN, zero otherwise. */
2030
2031 bool
2032 df_reg_defined (rtx insn, rtx reg)
2033 {
2034 return df_find_def (insn, reg) != NULL;
2035 }
2036
2037
2038 /* Finds the reference corresponding to the use of REG in INSN.
2039 DF is the dataflow object. */
2040
2041 df_ref
2042 df_find_use (rtx insn, rtx reg)
2043 {
2044 unsigned int uid;
2045 df_ref *use_rec;
2046
2047 if (GET_CODE (reg) == SUBREG)
2048 reg = SUBREG_REG (reg);
2049 gcc_assert (REG_P (reg));
2050
2051 uid = INSN_UID (insn);
2052 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
2053 {
2054 df_ref use = *use_rec;
2055 if (DF_REF_REGNO (use) == REGNO (reg))
2056 return use;
2057 }
2058 if (df->changeable_flags & DF_EQ_NOTES)
2059 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
2060 {
2061 df_ref use = *use_rec;
2062 if (DF_REF_REGNO (use) == REGNO (reg))
2063 return use;
2064 }
2065 return NULL;
2066 }
2067
2068
2069 /* Return true if REG is referenced in INSN, zero otherwise. */
2070
2071 bool
2072 df_reg_used (rtx insn, rtx reg)
2073 {
2074 return df_find_use (insn, reg) != NULL;
2075 }
2076
2077 \f
2078 /*----------------------------------------------------------------------------
2079 Debugging and printing functions.
2080 ----------------------------------------------------------------------------*/
2081
2082 /* Write information about registers and basic blocks into FILE.
2083 This is part of making a debugging dump. */
2084
2085 void
2086 dump_regset (regset r, FILE *outf)
2087 {
2088 unsigned i;
2089 reg_set_iterator rsi;
2090
2091 if (r == NULL)
2092 {
2093 fputs (" (nil)", outf);
2094 return;
2095 }
2096
2097 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2098 {
2099 fprintf (outf, " %d", i);
2100 if (i < FIRST_PSEUDO_REGISTER)
2101 fprintf (outf, " [%s]",
2102 reg_names[i]);
2103 }
2104 }
2105
2106 /* Print a human-readable representation of R on the standard error
2107 stream. This function is designed to be used from within the
2108 debugger. */
2109 extern void debug_regset (regset);
2110 DEBUG_FUNCTION void
2111 debug_regset (regset r)
2112 {
2113 dump_regset (r, stderr);
2114 putc ('\n', stderr);
2115 }
2116
2117 /* Write information about registers and basic blocks into FILE.
2118 This is part of making a debugging dump. */
2119
2120 void
2121 df_print_regset (FILE *file, bitmap r)
2122 {
2123 unsigned int i;
2124 bitmap_iterator bi;
2125
2126 if (r == NULL)
2127 fputs (" (nil)", file);
2128 else
2129 {
2130 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2131 {
2132 fprintf (file, " %d", i);
2133 if (i < FIRST_PSEUDO_REGISTER)
2134 fprintf (file, " [%s]", reg_names[i]);
2135 }
2136 }
2137 fprintf (file, "\n");
2138 }
2139
2140
2141 /* Write information about registers and basic blocks into FILE. The
2142 bitmap is in the form used by df_byte_lr. This is part of making a
2143 debugging dump. */
2144
2145 void
2146 df_print_word_regset (FILE *file, bitmap r)
2147 {
2148 unsigned int max_reg = max_reg_num ();
2149
2150 if (r == NULL)
2151 fputs (" (nil)", file);
2152 else
2153 {
2154 unsigned int i;
2155 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2156 {
2157 bool found = (bitmap_bit_p (r, 2 * i)
2158 || bitmap_bit_p (r, 2 * i + 1));
2159 if (found)
2160 {
2161 int word;
2162 const char * sep = "";
2163 fprintf (file, " %d", i);
2164 fprintf (file, "(");
2165 for (word = 0; word < 2; word++)
2166 if (bitmap_bit_p (r, 2 * i + word))
2167 {
2168 fprintf (file, "%s%d", sep, word);
2169 sep = ", ";
2170 }
2171 fprintf (file, ")");
2172 }
2173 }
2174 }
2175 fprintf (file, "\n");
2176 }
2177
2178
2179 /* Dump dataflow info. */
2180
2181 void
2182 df_dump (FILE *file)
2183 {
2184 basic_block bb;
2185 df_dump_start (file);
2186
2187 FOR_ALL_BB_FN (bb, cfun)
2188 {
2189 df_print_bb_index (bb, file);
2190 df_dump_top (bb, file);
2191 df_dump_bottom (bb, file);
2192 }
2193
2194 fprintf (file, "\n");
2195 }
2196
2197
2198 /* Dump dataflow info for df->blocks_to_analyze. */
2199
2200 void
2201 df_dump_region (FILE *file)
2202 {
2203 if (df->blocks_to_analyze)
2204 {
2205 bitmap_iterator bi;
2206 unsigned int bb_index;
2207
2208 fprintf (file, "\n\nstarting region dump\n");
2209 df_dump_start (file);
2210
2211 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2212 {
2213 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2214 dump_bb (file, bb, 0, TDF_DETAILS);
2215 }
2216 fprintf (file, "\n");
2217 }
2218 else
2219 df_dump (file);
2220 }
2221
2222
2223 /* Dump the introductory information for each problem defined. */
2224
2225 void
2226 df_dump_start (FILE *file)
2227 {
2228 int i;
2229
2230 if (!df || !file)
2231 return;
2232
2233 fprintf (file, "\n\n%s\n", current_function_name ());
2234 fprintf (file, "\nDataflow summary:\n");
2235 if (df->blocks_to_analyze)
2236 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2237 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2238
2239 for (i = 0; i < df->num_problems_defined; i++)
2240 {
2241 struct dataflow *dflow = df->problems_in_order[i];
2242 if (dflow->computed)
2243 {
2244 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2245 if (fun)
2246 fun (file);
2247 }
2248 }
2249 }
2250
2251
2252 /* Dump the top or bottom of the block information for BB. */
2253 static void
2254 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2255 {
2256 int i;
2257
2258 if (!df || !file)
2259 return;
2260
2261 for (i = 0; i < df->num_problems_defined; i++)
2262 {
2263 struct dataflow *dflow = df->problems_in_order[i];
2264 if (dflow->computed)
2265 {
2266 df_dump_bb_problem_function bbfun;
2267
2268 if (top)
2269 bbfun = dflow->problem->dump_top_fun;
2270 else
2271 bbfun = dflow->problem->dump_bottom_fun;
2272
2273 if (bbfun)
2274 bbfun (bb, file);
2275 }
2276 }
2277 }
2278
2279 /* Dump the top of the block information for BB. */
2280
2281 void
2282 df_dump_top (basic_block bb, FILE *file)
2283 {
2284 df_dump_bb_problem_data (bb, file, /*top=*/true);
2285 }
2286
2287 /* Dump the bottom of the block information for BB. */
2288
2289 void
2290 df_dump_bottom (basic_block bb, FILE *file)
2291 {
2292 df_dump_bb_problem_data (bb, file, /*top=*/false);
2293 }
2294
2295
2296 /* Dump information about INSN just before or after dumping INSN itself. */
2297 static void
2298 df_dump_insn_problem_data (const_rtx insn, FILE *file, bool top)
2299 {
2300 int i;
2301
2302 if (!df || !file)
2303 return;
2304
2305 for (i = 0; i < df->num_problems_defined; i++)
2306 {
2307 struct dataflow *dflow = df->problems_in_order[i];
2308 if (dflow->computed)
2309 {
2310 df_dump_insn_problem_function insnfun;
2311
2312 if (top)
2313 insnfun = dflow->problem->dump_insn_top_fun;
2314 else
2315 insnfun = dflow->problem->dump_insn_bottom_fun;
2316
2317 if (insnfun)
2318 insnfun (insn, file);
2319 }
2320 }
2321 }
2322
2323 /* Dump information about INSN before dumping INSN itself. */
2324
2325 void
2326 df_dump_insn_top (const_rtx insn, FILE *file)
2327 {
2328 df_dump_insn_problem_data (insn, file, /*top=*/true);
2329 }
2330
2331 /* Dump information about INSN after dumping INSN itself. */
2332
2333 void
2334 df_dump_insn_bottom (const_rtx insn, FILE *file)
2335 {
2336 df_dump_insn_problem_data (insn, file, /*top=*/false);
2337 }
2338
2339
2340 static void
2341 df_ref_dump (df_ref ref, FILE *file)
2342 {
2343 fprintf (file, "%c%d(%d)",
2344 DF_REF_REG_DEF_P (ref)
2345 ? 'd'
2346 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2347 DF_REF_ID (ref),
2348 DF_REF_REGNO (ref));
2349 }
2350
2351 void
2352 df_refs_chain_dump (df_ref *ref_rec, bool follow_chain, FILE *file)
2353 {
2354 fprintf (file, "{ ");
2355 while (*ref_rec)
2356 {
2357 df_ref ref = *ref_rec;
2358 df_ref_dump (ref, file);
2359 if (follow_chain)
2360 df_chain_dump (DF_REF_CHAIN (ref), file);
2361 ref_rec++;
2362 }
2363 fprintf (file, "}");
2364 }
2365
2366
2367 /* Dump either a ref-def or reg-use chain. */
2368
2369 void
2370 df_regs_chain_dump (df_ref ref, FILE *file)
2371 {
2372 fprintf (file, "{ ");
2373 while (ref)
2374 {
2375 df_ref_dump (ref, file);
2376 ref = DF_REF_NEXT_REG (ref);
2377 }
2378 fprintf (file, "}");
2379 }
2380
2381
2382 static void
2383 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
2384 {
2385 while (*mws)
2386 {
2387 fprintf (file, "mw %c r[%d..%d]\n",
2388 (DF_MWS_REG_DEF_P (*mws)) ? 'd' : 'u',
2389 (*mws)->start_regno, (*mws)->end_regno);
2390 mws++;
2391 }
2392 }
2393
2394
2395 static void
2396 df_insn_uid_debug (unsigned int uid,
2397 bool follow_chain, FILE *file)
2398 {
2399 fprintf (file, "insn %d luid %d",
2400 uid, DF_INSN_UID_LUID (uid));
2401
2402 if (DF_INSN_UID_DEFS (uid))
2403 {
2404 fprintf (file, " defs ");
2405 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2406 }
2407
2408 if (DF_INSN_UID_USES (uid))
2409 {
2410 fprintf (file, " uses ");
2411 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2412 }
2413
2414 if (DF_INSN_UID_EQ_USES (uid))
2415 {
2416 fprintf (file, " eq uses ");
2417 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2418 }
2419
2420 if (DF_INSN_UID_MWS (uid))
2421 {
2422 fprintf (file, " mws ");
2423 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2424 }
2425 fprintf (file, "\n");
2426 }
2427
2428
2429 DEBUG_FUNCTION void
2430 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2431 {
2432 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2433 }
2434
2435 DEBUG_FUNCTION void
2436 df_insn_debug_regno (rtx insn, FILE *file)
2437 {
2438 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2439
2440 fprintf (file, "insn %d bb %d luid %d defs ",
2441 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2442 DF_INSN_INFO_LUID (insn_info));
2443 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2444
2445 fprintf (file, " uses ");
2446 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2447
2448 fprintf (file, " eq_uses ");
2449 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2450 fprintf (file, "\n");
2451 }
2452
2453 DEBUG_FUNCTION void
2454 df_regno_debug (unsigned int regno, FILE *file)
2455 {
2456 fprintf (file, "reg %d defs ", regno);
2457 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2458 fprintf (file, " uses ");
2459 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2460 fprintf (file, " eq_uses ");
2461 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2462 fprintf (file, "\n");
2463 }
2464
2465
2466 DEBUG_FUNCTION void
2467 df_ref_debug (df_ref ref, FILE *file)
2468 {
2469 fprintf (file, "%c%d ",
2470 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2471 DF_REF_ID (ref));
2472 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2473 DF_REF_REGNO (ref),
2474 DF_REF_BBNO (ref),
2475 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2476 DF_REF_FLAGS (ref),
2477 DF_REF_TYPE (ref));
2478 if (DF_REF_LOC (ref))
2479 {
2480 if (flag_dump_noaddr)
2481 fprintf (file, "loc #(#) chain ");
2482 else
2483 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2484 (void *)*DF_REF_LOC (ref));
2485 }
2486 else
2487 fprintf (file, "chain ");
2488 df_chain_dump (DF_REF_CHAIN (ref), file);
2489 fprintf (file, "\n");
2490 }
2491 \f
2492 /* Functions for debugging from GDB. */
2493
2494 DEBUG_FUNCTION void
2495 debug_df_insn (rtx insn)
2496 {
2497 df_insn_debug (insn, true, stderr);
2498 debug_rtx (insn);
2499 }
2500
2501
2502 DEBUG_FUNCTION void
2503 debug_df_reg (rtx reg)
2504 {
2505 df_regno_debug (REGNO (reg), stderr);
2506 }
2507
2508
2509 DEBUG_FUNCTION void
2510 debug_df_regno (unsigned int regno)
2511 {
2512 df_regno_debug (regno, stderr);
2513 }
2514
2515
2516 DEBUG_FUNCTION void
2517 debug_df_ref (df_ref ref)
2518 {
2519 df_ref_debug (ref, stderr);
2520 }
2521
2522
2523 DEBUG_FUNCTION void
2524 debug_df_defno (unsigned int defno)
2525 {
2526 df_ref_debug (DF_DEFS_GET (defno), stderr);
2527 }
2528
2529
2530 DEBUG_FUNCTION void
2531 debug_df_useno (unsigned int defno)
2532 {
2533 df_ref_debug (DF_USES_GET (defno), stderr);
2534 }
2535
2536
2537 DEBUG_FUNCTION void
2538 debug_df_chain (struct df_link *link)
2539 {
2540 df_chain_dump (link, stderr);
2541 fputc ('\n', stderr);
2542 }