re PR target/65527 (ICE: in expand_builtin_with_bounds, at builtins.c:7120 with ...
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and rtx iterators instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "hashtab.h"
386 #include "hash-set.h"
387 #include "vec.h"
388 #include "machmode.h"
389 #include "hard-reg-set.h"
390 #include "input.h"
391 #include "function.h"
392 #include "regs.h"
393 #include "alloc-pool.h"
394 #include "flags.h"
395 #include "predict.h"
396 #include "dominance.h"
397 #include "cfg.h"
398 #include "cfganal.h"
399 #include "basic-block.h"
400 #include "sbitmap.h"
401 #include "bitmap.h"
402 #include "df.h"
403 #include "tree-pass.h"
404 #include "params.h"
405 #include "cfgloop.h"
406
407 static void *df_get_bb_info (struct dataflow *, unsigned int);
408 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
409 static void df_clear_bb_info (struct dataflow *, unsigned int);
410 #ifdef DF_DEBUG_CFG
411 static void df_set_clean_cfg (void);
412 #endif
413
414 /* The obstack on which regsets are allocated. */
415 struct bitmap_obstack reg_obstack;
416
417 /* An obstack for bitmap not related to specific dataflow problems.
418 This obstack should e.g. be used for bitmaps with a short life time
419 such as temporary bitmaps. */
420
421 bitmap_obstack df_bitmap_obstack;
422
423
424 /*----------------------------------------------------------------------------
425 Functions to create, destroy and manipulate an instance of df.
426 ----------------------------------------------------------------------------*/
427
428 struct df_d *df;
429
430 /* Add PROBLEM (and any dependent problems) to the DF instance. */
431
432 void
433 df_add_problem (struct df_problem *problem)
434 {
435 struct dataflow *dflow;
436 int i;
437
438 /* First try to add the dependent problem. */
439 if (problem->dependent_problem)
440 df_add_problem (problem->dependent_problem);
441
442 /* Check to see if this problem has already been defined. If it
443 has, just return that instance, if not, add it to the end of the
444 vector. */
445 dflow = df->problems_by_index[problem->id];
446 if (dflow)
447 return;
448
449 /* Make a new one and add it to the end. */
450 dflow = XCNEW (struct dataflow);
451 dflow->problem = problem;
452 dflow->computed = false;
453 dflow->solutions_dirty = true;
454 df->problems_by_index[dflow->problem->id] = dflow;
455
456 /* Keep the defined problems ordered by index. This solves the
457 problem that RI will use the information from UREC if UREC has
458 been defined, or from LIVE if LIVE is defined and otherwise LR.
459 However for this to work, the computation of RI must be pushed
460 after which ever of those problems is defined, but we do not
461 require any of those except for LR to have actually been
462 defined. */
463 df->num_problems_defined++;
464 for (i = df->num_problems_defined - 2; i >= 0; i--)
465 {
466 if (problem->id < df->problems_in_order[i]->problem->id)
467 df->problems_in_order[i+1] = df->problems_in_order[i];
468 else
469 {
470 df->problems_in_order[i+1] = dflow;
471 return;
472 }
473 }
474 df->problems_in_order[0] = dflow;
475 }
476
477
478 /* Set the MASK flags in the DFLOW problem. The old flags are
479 returned. If a flag is not allowed to be changed this will fail if
480 checking is enabled. */
481 int
482 df_set_flags (int changeable_flags)
483 {
484 int old_flags = df->changeable_flags;
485 df->changeable_flags |= changeable_flags;
486 return old_flags;
487 }
488
489
490 /* Clear the MASK flags in the DFLOW problem. The old flags are
491 returned. If a flag is not allowed to be changed this will fail if
492 checking is enabled. */
493 int
494 df_clear_flags (int changeable_flags)
495 {
496 int old_flags = df->changeable_flags;
497 df->changeable_flags &= ~changeable_flags;
498 return old_flags;
499 }
500
501
502 /* Set the blocks that are to be considered for analysis. If this is
503 not called or is called with null, the entire function in
504 analyzed. */
505
506 void
507 df_set_blocks (bitmap blocks)
508 {
509 if (blocks)
510 {
511 if (dump_file)
512 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
513 if (df->blocks_to_analyze)
514 {
515 /* This block is called to change the focus from one subset
516 to another. */
517 int p;
518 bitmap_head diff;
519 bitmap_initialize (&diff, &df_bitmap_obstack);
520 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
521 for (p = 0; p < df->num_problems_defined; p++)
522 {
523 struct dataflow *dflow = df->problems_in_order[p];
524 if (dflow->optional_p && dflow->problem->reset_fun)
525 dflow->problem->reset_fun (df->blocks_to_analyze);
526 else if (dflow->problem->free_blocks_on_set_blocks)
527 {
528 bitmap_iterator bi;
529 unsigned int bb_index;
530
531 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
532 {
533 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
534 if (bb)
535 {
536 void *bb_info = df_get_bb_info (dflow, bb_index);
537 dflow->problem->free_bb_fun (bb, bb_info);
538 df_clear_bb_info (dflow, bb_index);
539 }
540 }
541 }
542 }
543
544 bitmap_clear (&diff);
545 }
546 else
547 {
548 /* This block of code is executed to change the focus from
549 the entire function to a subset. */
550 bitmap_head blocks_to_reset;
551 bool initialized = false;
552 int p;
553 for (p = 0; p < df->num_problems_defined; p++)
554 {
555 struct dataflow *dflow = df->problems_in_order[p];
556 if (dflow->optional_p && dflow->problem->reset_fun)
557 {
558 if (!initialized)
559 {
560 basic_block bb;
561 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
562 FOR_ALL_BB_FN (bb, cfun)
563 {
564 bitmap_set_bit (&blocks_to_reset, bb->index);
565 }
566 }
567 dflow->problem->reset_fun (&blocks_to_reset);
568 }
569 }
570 if (initialized)
571 bitmap_clear (&blocks_to_reset);
572
573 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
574 }
575 bitmap_copy (df->blocks_to_analyze, blocks);
576 df->analyze_subset = true;
577 }
578 else
579 {
580 /* This block is executed to reset the focus to the entire
581 function. */
582 if (dump_file)
583 fprintf (dump_file, "clearing blocks_to_analyze\n");
584 if (df->blocks_to_analyze)
585 {
586 BITMAP_FREE (df->blocks_to_analyze);
587 df->blocks_to_analyze = NULL;
588 }
589 df->analyze_subset = false;
590 }
591
592 /* Setting the blocks causes the refs to be unorganized since only
593 the refs in the blocks are seen. */
594 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
595 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
596 df_mark_solutions_dirty ();
597 }
598
599
600 /* Delete a DFLOW problem (and any problems that depend on this
601 problem). */
602
603 void
604 df_remove_problem (struct dataflow *dflow)
605 {
606 struct df_problem *problem;
607 int i;
608
609 if (!dflow)
610 return;
611
612 problem = dflow->problem;
613 gcc_assert (problem->remove_problem_fun);
614
615 /* Delete any problems that depended on this problem first. */
616 for (i = 0; i < df->num_problems_defined; i++)
617 if (df->problems_in_order[i]->problem->dependent_problem == problem)
618 df_remove_problem (df->problems_in_order[i]);
619
620 /* Now remove this problem. */
621 for (i = 0; i < df->num_problems_defined; i++)
622 if (df->problems_in_order[i] == dflow)
623 {
624 int j;
625 for (j = i + 1; j < df->num_problems_defined; j++)
626 df->problems_in_order[j-1] = df->problems_in_order[j];
627 df->problems_in_order[j-1] = NULL;
628 df->num_problems_defined--;
629 break;
630 }
631
632 (problem->remove_problem_fun) ();
633 df->problems_by_index[problem->id] = NULL;
634 }
635
636
637 /* Remove all of the problems that are not permanent. Scanning, LR
638 and (at -O2 or higher) LIVE are permanent, the rest are removable.
639 Also clear all of the changeable_flags. */
640
641 void
642 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
643 {
644 int i;
645
646 #ifdef ENABLE_DF_CHECKING
647 int saved_flags;
648 #endif
649
650 if (!df)
651 return;
652
653 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
654 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
655
656 #ifdef ENABLE_DF_CHECKING
657 saved_flags = df->changeable_flags;
658 #endif
659
660 /* We iterate over problems by index as each problem removed will
661 lead to problems_in_order to be reordered. */
662 for (i = 0; i < DF_LAST_PROBLEM_PLUS1; i++)
663 {
664 struct dataflow *dflow = df->problems_by_index[i];
665
666 if (dflow && dflow->optional_p)
667 df_remove_problem (dflow);
668 }
669
670 /* Clear all of the flags. */
671 df->changeable_flags = 0;
672 df_process_deferred_rescans ();
673
674 /* Set the focus back to the whole function. */
675 if (df->blocks_to_analyze)
676 {
677 BITMAP_FREE (df->blocks_to_analyze);
678 df->blocks_to_analyze = NULL;
679 df_mark_solutions_dirty ();
680 df->analyze_subset = false;
681 }
682
683 #ifdef ENABLE_DF_CHECKING
684 /* Verification will fail in DF_NO_INSN_RESCAN. */
685 if (!(saved_flags & DF_NO_INSN_RESCAN))
686 {
687 df_lr_verify_transfer_functions ();
688 if (df_live)
689 df_live_verify_transfer_functions ();
690 }
691
692 #ifdef DF_DEBUG_CFG
693 df_set_clean_cfg ();
694 #endif
695 #endif
696
697 #ifdef ENABLE_CHECKING
698 if (verify)
699 df->changeable_flags |= DF_VERIFY_SCHEDULED;
700 #endif
701 }
702
703
704 /* Set up the dataflow instance for the entire back end. */
705
706 static unsigned int
707 rest_of_handle_df_initialize (void)
708 {
709 gcc_assert (!df);
710 df = XCNEW (struct df_d);
711 df->changeable_flags = 0;
712
713 bitmap_obstack_initialize (&df_bitmap_obstack);
714
715 /* Set this to a conservative value. Stack_ptr_mod will compute it
716 correctly later. */
717 crtl->sp_is_unchanging = 0;
718
719 df_scan_add_problem ();
720 df_scan_alloc (NULL);
721
722 /* These three problems are permanent. */
723 df_lr_add_problem ();
724 if (optimize > 1)
725 df_live_add_problem ();
726
727 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
728 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
729 df->n_blocks = post_order_compute (df->postorder, true, true);
730 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
731 gcc_assert (df->n_blocks == df->n_blocks_inverted);
732
733 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
734
735 df_hard_reg_init ();
736 /* After reload, some ports add certain bits to regs_ever_live so
737 this cannot be reset. */
738 df_compute_regs_ever_live (true);
739 df_scan_blocks ();
740 df_compute_regs_ever_live (false);
741 return 0;
742 }
743
744
745 namespace {
746
747 const pass_data pass_data_df_initialize_opt =
748 {
749 RTL_PASS, /* type */
750 "dfinit", /* name */
751 OPTGROUP_NONE, /* optinfo_flags */
752 TV_DF_SCAN, /* tv_id */
753 0, /* properties_required */
754 0, /* properties_provided */
755 0, /* properties_destroyed */
756 0, /* todo_flags_start */
757 0, /* todo_flags_finish */
758 };
759
760 class pass_df_initialize_opt : public rtl_opt_pass
761 {
762 public:
763 pass_df_initialize_opt (gcc::context *ctxt)
764 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
765 {}
766
767 /* opt_pass methods: */
768 virtual bool gate (function *) { return optimize > 0; }
769 virtual unsigned int execute (function *)
770 {
771 return rest_of_handle_df_initialize ();
772 }
773
774 }; // class pass_df_initialize_opt
775
776 } // anon namespace
777
778 rtl_opt_pass *
779 make_pass_df_initialize_opt (gcc::context *ctxt)
780 {
781 return new pass_df_initialize_opt (ctxt);
782 }
783
784
785 namespace {
786
787 const pass_data pass_data_df_initialize_no_opt =
788 {
789 RTL_PASS, /* type */
790 "no-opt dfinit", /* name */
791 OPTGROUP_NONE, /* optinfo_flags */
792 TV_DF_SCAN, /* tv_id */
793 0, /* properties_required */
794 0, /* properties_provided */
795 0, /* properties_destroyed */
796 0, /* todo_flags_start */
797 0, /* todo_flags_finish */
798 };
799
800 class pass_df_initialize_no_opt : public rtl_opt_pass
801 {
802 public:
803 pass_df_initialize_no_opt (gcc::context *ctxt)
804 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
805 {}
806
807 /* opt_pass methods: */
808 virtual bool gate (function *) { return optimize == 0; }
809 virtual unsigned int execute (function *)
810 {
811 return rest_of_handle_df_initialize ();
812 }
813
814 }; // class pass_df_initialize_no_opt
815
816 } // anon namespace
817
818 rtl_opt_pass *
819 make_pass_df_initialize_no_opt (gcc::context *ctxt)
820 {
821 return new pass_df_initialize_no_opt (ctxt);
822 }
823
824
825 /* Free all the dataflow info and the DF structure. This should be
826 called from the df_finish macro which also NULLs the parm. */
827
828 static unsigned int
829 rest_of_handle_df_finish (void)
830 {
831 int i;
832
833 gcc_assert (df);
834
835 for (i = 0; i < df->num_problems_defined; i++)
836 {
837 struct dataflow *dflow = df->problems_in_order[i];
838 dflow->problem->free_fun ();
839 }
840
841 free (df->postorder);
842 free (df->postorder_inverted);
843 free (df->hard_regs_live_count);
844 free (df);
845 df = NULL;
846
847 bitmap_obstack_release (&df_bitmap_obstack);
848 return 0;
849 }
850
851
852 namespace {
853
854 const pass_data pass_data_df_finish =
855 {
856 RTL_PASS, /* type */
857 "dfinish", /* name */
858 OPTGROUP_NONE, /* optinfo_flags */
859 TV_NONE, /* tv_id */
860 0, /* properties_required */
861 0, /* properties_provided */
862 0, /* properties_destroyed */
863 0, /* todo_flags_start */
864 0, /* todo_flags_finish */
865 };
866
867 class pass_df_finish : public rtl_opt_pass
868 {
869 public:
870 pass_df_finish (gcc::context *ctxt)
871 : rtl_opt_pass (pass_data_df_finish, ctxt)
872 {}
873
874 /* opt_pass methods: */
875 virtual unsigned int execute (function *)
876 {
877 return rest_of_handle_df_finish ();
878 }
879
880 }; // class pass_df_finish
881
882 } // anon namespace
883
884 rtl_opt_pass *
885 make_pass_df_finish (gcc::context *ctxt)
886 {
887 return new pass_df_finish (ctxt);
888 }
889
890
891
892
893 \f
894 /*----------------------------------------------------------------------------
895 The general data flow analysis engine.
896 ----------------------------------------------------------------------------*/
897
898 /* Return time BB when it was visited for last time. */
899 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
900
901 /* Helper function for df_worklist_dataflow.
902 Propagate the dataflow forward.
903 Given a BB_INDEX, do the dataflow propagation
904 and set bits on for successors in PENDING
905 if the out set of the dataflow has changed.
906
907 AGE specify time when BB was visited last time.
908 AGE of 0 means we are visiting for first time and need to
909 compute transfer function to initialize datastructures.
910 Otherwise we re-do transfer function only if something change
911 while computing confluence functions.
912 We need to compute confluence only of basic block that are younger
913 then last visit of the BB.
914
915 Return true if BB info has changed. This is always the case
916 in the first visit. */
917
918 static bool
919 df_worklist_propagate_forward (struct dataflow *dataflow,
920 unsigned bb_index,
921 unsigned *bbindex_to_postorder,
922 bitmap pending,
923 sbitmap considered,
924 ptrdiff_t age)
925 {
926 edge e;
927 edge_iterator ei;
928 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
929 bool changed = !age;
930
931 /* Calculate <conf_op> of incoming edges. */
932 if (EDGE_COUNT (bb->preds) > 0)
933 FOR_EACH_EDGE (e, ei, bb->preds)
934 {
935 if (age <= BB_LAST_CHANGE_AGE (e->src)
936 && bitmap_bit_p (considered, e->src->index))
937 changed |= dataflow->problem->con_fun_n (e);
938 }
939 else if (dataflow->problem->con_fun_0)
940 dataflow->problem->con_fun_0 (bb);
941
942 if (changed
943 && dataflow->problem->trans_fun (bb_index))
944 {
945 /* The out set of this block has changed.
946 Propagate to the outgoing blocks. */
947 FOR_EACH_EDGE (e, ei, bb->succs)
948 {
949 unsigned ob_index = e->dest->index;
950
951 if (bitmap_bit_p (considered, ob_index))
952 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
953 }
954 return true;
955 }
956 return false;
957 }
958
959
960 /* Helper function for df_worklist_dataflow.
961 Propagate the dataflow backward. */
962
963 static bool
964 df_worklist_propagate_backward (struct dataflow *dataflow,
965 unsigned bb_index,
966 unsigned *bbindex_to_postorder,
967 bitmap pending,
968 sbitmap considered,
969 ptrdiff_t age)
970 {
971 edge e;
972 edge_iterator ei;
973 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
974 bool changed = !age;
975
976 /* Calculate <conf_op> of incoming edges. */
977 if (EDGE_COUNT (bb->succs) > 0)
978 FOR_EACH_EDGE (e, ei, bb->succs)
979 {
980 if (age <= BB_LAST_CHANGE_AGE (e->dest)
981 && bitmap_bit_p (considered, e->dest->index))
982 changed |= dataflow->problem->con_fun_n (e);
983 }
984 else if (dataflow->problem->con_fun_0)
985 dataflow->problem->con_fun_0 (bb);
986
987 if (changed
988 && dataflow->problem->trans_fun (bb_index))
989 {
990 /* The out set of this block has changed.
991 Propagate to the outgoing blocks. */
992 FOR_EACH_EDGE (e, ei, bb->preds)
993 {
994 unsigned ob_index = e->src->index;
995
996 if (bitmap_bit_p (considered, ob_index))
997 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
998 }
999 return true;
1000 }
1001 return false;
1002 }
1003
1004 /* Main dataflow solver loop.
1005
1006 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
1007 need to visit.
1008 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1009 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1010 PENDING will be freed.
1011
1012 The worklists are bitmaps indexed by postorder positions.
1013
1014 The function implements standard algorithm for dataflow solving with two
1015 worklists (we are processing WORKLIST and storing new BBs to visit in
1016 PENDING).
1017
1018 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1019 and when it was last visited (stored in last_visit_age). This avoids need
1020 to re-do confluence function for edges to basic blocks whose source
1021 did not change since destination was visited last time. */
1022
1023 static void
1024 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1025 bitmap pending,
1026 sbitmap considered,
1027 int *blocks_in_postorder,
1028 unsigned *bbindex_to_postorder,
1029 int n_blocks)
1030 {
1031 enum df_flow_dir dir = dataflow->problem->dir;
1032 int dcount = 0;
1033 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1034 int age = 0;
1035 bool changed;
1036 vec<int> last_visit_age = vNULL;
1037 int prev_age;
1038 basic_block bb;
1039 int i;
1040
1041 last_visit_age.safe_grow_cleared (n_blocks);
1042
1043 /* Double-queueing. Worklist is for the current iteration,
1044 and pending is for the next. */
1045 while (!bitmap_empty_p (pending))
1046 {
1047 bitmap_iterator bi;
1048 unsigned int index;
1049
1050 /* Swap pending and worklist. */
1051 bitmap temp = worklist;
1052 worklist = pending;
1053 pending = temp;
1054
1055 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1056 {
1057 unsigned bb_index;
1058 dcount++;
1059
1060 bitmap_clear_bit (pending, index);
1061 bb_index = blocks_in_postorder[index];
1062 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1063 prev_age = last_visit_age[index];
1064 if (dir == DF_FORWARD)
1065 changed = df_worklist_propagate_forward (dataflow, bb_index,
1066 bbindex_to_postorder,
1067 pending, considered,
1068 prev_age);
1069 else
1070 changed = df_worklist_propagate_backward (dataflow, bb_index,
1071 bbindex_to_postorder,
1072 pending, considered,
1073 prev_age);
1074 last_visit_age[index] = ++age;
1075 if (changed)
1076 bb->aux = (void *)(ptrdiff_t)age;
1077 }
1078 bitmap_clear (worklist);
1079 }
1080 for (i = 0; i < n_blocks; i++)
1081 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1082
1083 BITMAP_FREE (worklist);
1084 BITMAP_FREE (pending);
1085 last_visit_age.release ();
1086
1087 /* Dump statistics. */
1088 if (dump_file)
1089 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1090 "n_basic_blocks %d n_edges %d"
1091 " count %d (%5.2g)\n",
1092 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1093 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1094 }
1095
1096 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1097 with "n"-th bit representing the n-th block in the reverse-postorder order.
1098 The solver is a double-queue algorithm similar to the "double stack" solver
1099 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1100 The only significant difference is that the worklist in this implementation
1101 is always sorted in RPO of the CFG visiting direction. */
1102
1103 void
1104 df_worklist_dataflow (struct dataflow *dataflow,
1105 bitmap blocks_to_consider,
1106 int *blocks_in_postorder,
1107 int n_blocks)
1108 {
1109 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1110 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1111 bitmap_iterator bi;
1112 unsigned int *bbindex_to_postorder;
1113 int i;
1114 unsigned int index;
1115 enum df_flow_dir dir = dataflow->problem->dir;
1116
1117 gcc_assert (dir != DF_NONE);
1118
1119 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1120 bbindex_to_postorder = XNEWVEC (unsigned int,
1121 last_basic_block_for_fn (cfun));
1122
1123 /* Initialize the array to an out-of-bound value. */
1124 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1125 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1126
1127 /* Initialize the considered map. */
1128 bitmap_clear (considered);
1129 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1130 {
1131 bitmap_set_bit (considered, index);
1132 }
1133
1134 /* Initialize the mapping of block index to postorder. */
1135 for (i = 0; i < n_blocks; i++)
1136 {
1137 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1138 /* Add all blocks to the worklist. */
1139 bitmap_set_bit (pending, i);
1140 }
1141
1142 /* Initialize the problem. */
1143 if (dataflow->problem->init_fun)
1144 dataflow->problem->init_fun (blocks_to_consider);
1145
1146 /* Solve it. */
1147 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1148 blocks_in_postorder,
1149 bbindex_to_postorder,
1150 n_blocks);
1151 sbitmap_free (considered);
1152 free (bbindex_to_postorder);
1153 }
1154
1155
1156 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1157 the order of the remaining entries. Returns the length of the resulting
1158 list. */
1159
1160 static unsigned
1161 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1162 {
1163 unsigned act, last;
1164
1165 for (act = 0, last = 0; act < len; act++)
1166 if (bitmap_bit_p (blocks, list[act]))
1167 list[last++] = list[act];
1168
1169 return last;
1170 }
1171
1172
1173 /* Execute dataflow analysis on a single dataflow problem.
1174
1175 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1176 examined or will be computed. For calls from DF_ANALYZE, this is
1177 the set of blocks that has been passed to DF_SET_BLOCKS.
1178 */
1179
1180 void
1181 df_analyze_problem (struct dataflow *dflow,
1182 bitmap blocks_to_consider,
1183 int *postorder, int n_blocks)
1184 {
1185 timevar_push (dflow->problem->tv_id);
1186
1187 /* (Re)Allocate the datastructures necessary to solve the problem. */
1188 if (dflow->problem->alloc_fun)
1189 dflow->problem->alloc_fun (blocks_to_consider);
1190
1191 #ifdef ENABLE_DF_CHECKING
1192 if (dflow->problem->verify_start_fun)
1193 dflow->problem->verify_start_fun ();
1194 #endif
1195
1196 /* Set up the problem and compute the local information. */
1197 if (dflow->problem->local_compute_fun)
1198 dflow->problem->local_compute_fun (blocks_to_consider);
1199
1200 /* Solve the equations. */
1201 if (dflow->problem->dataflow_fun)
1202 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1203 postorder, n_blocks);
1204
1205 /* Massage the solution. */
1206 if (dflow->problem->finalize_fun)
1207 dflow->problem->finalize_fun (blocks_to_consider);
1208
1209 #ifdef ENABLE_DF_CHECKING
1210 if (dflow->problem->verify_end_fun)
1211 dflow->problem->verify_end_fun ();
1212 #endif
1213
1214 timevar_pop (dflow->problem->tv_id);
1215
1216 dflow->computed = true;
1217 }
1218
1219
1220 /* Analyze dataflow info. */
1221
1222 static void
1223 df_analyze_1 (void)
1224 {
1225 int i;
1226
1227 /* These should be the same. */
1228 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1229
1230 /* We need to do this before the df_verify_all because this is
1231 not kept incrementally up to date. */
1232 df_compute_regs_ever_live (false);
1233 df_process_deferred_rescans ();
1234
1235 if (dump_file)
1236 fprintf (dump_file, "df_analyze called\n");
1237
1238 #ifndef ENABLE_DF_CHECKING
1239 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1240 #endif
1241 df_verify ();
1242
1243 /* Skip over the DF_SCAN problem. */
1244 for (i = 1; i < df->num_problems_defined; i++)
1245 {
1246 struct dataflow *dflow = df->problems_in_order[i];
1247 if (dflow->solutions_dirty)
1248 {
1249 if (dflow->problem->dir == DF_FORWARD)
1250 df_analyze_problem (dflow,
1251 df->blocks_to_analyze,
1252 df->postorder_inverted,
1253 df->n_blocks_inverted);
1254 else
1255 df_analyze_problem (dflow,
1256 df->blocks_to_analyze,
1257 df->postorder,
1258 df->n_blocks);
1259 }
1260 }
1261
1262 if (!df->analyze_subset)
1263 {
1264 BITMAP_FREE (df->blocks_to_analyze);
1265 df->blocks_to_analyze = NULL;
1266 }
1267
1268 #ifdef DF_DEBUG_CFG
1269 df_set_clean_cfg ();
1270 #endif
1271 }
1272
1273 /* Analyze dataflow info. */
1274
1275 void
1276 df_analyze (void)
1277 {
1278 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1279 int i;
1280
1281 free (df->postorder);
1282 free (df->postorder_inverted);
1283 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1284 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1285 df->n_blocks = post_order_compute (df->postorder, true, true);
1286 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1287
1288 for (i = 0; i < df->n_blocks; i++)
1289 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1290
1291 #ifdef ENABLE_CHECKING
1292 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1293 the ENTRY block. */
1294 for (i = 0; i < df->n_blocks_inverted; i++)
1295 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1296 #endif
1297
1298 /* Make sure that we have pruned any unreachable blocks from these
1299 sets. */
1300 if (df->analyze_subset)
1301 {
1302 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1303 df->n_blocks = df_prune_to_subcfg (df->postorder,
1304 df->n_blocks, df->blocks_to_analyze);
1305 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1306 df->n_blocks_inverted,
1307 df->blocks_to_analyze);
1308 BITMAP_FREE (current_all_blocks);
1309 }
1310 else
1311 {
1312 df->blocks_to_analyze = current_all_blocks;
1313 current_all_blocks = NULL;
1314 }
1315
1316 df_analyze_1 ();
1317 }
1318
1319 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1320 Returns the number of blocks which is always loop->num_nodes. */
1321
1322 static int
1323 loop_post_order_compute (int *post_order, struct loop *loop)
1324 {
1325 edge_iterator *stack;
1326 int sp;
1327 int post_order_num = 0;
1328 bitmap visited;
1329
1330 /* Allocate stack for back-tracking up CFG. */
1331 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1332 sp = 0;
1333
1334 /* Allocate bitmap to track nodes that have been visited. */
1335 visited = BITMAP_ALLOC (NULL);
1336
1337 /* Push the first edge on to the stack. */
1338 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1339
1340 while (sp)
1341 {
1342 edge_iterator ei;
1343 basic_block src;
1344 basic_block dest;
1345
1346 /* Look at the edge on the top of the stack. */
1347 ei = stack[sp - 1];
1348 src = ei_edge (ei)->src;
1349 dest = ei_edge (ei)->dest;
1350
1351 /* Check if the edge destination has been visited yet and mark it
1352 if not so. */
1353 if (flow_bb_inside_loop_p (loop, dest)
1354 && bitmap_set_bit (visited, dest->index))
1355 {
1356 if (EDGE_COUNT (dest->succs) > 0)
1357 /* Since the DEST node has been visited for the first
1358 time, check its successors. */
1359 stack[sp++] = ei_start (dest->succs);
1360 else
1361 post_order[post_order_num++] = dest->index;
1362 }
1363 else
1364 {
1365 if (ei_one_before_end_p (ei)
1366 && src != loop_preheader_edge (loop)->src)
1367 post_order[post_order_num++] = src->index;
1368
1369 if (!ei_one_before_end_p (ei))
1370 ei_next (&stack[sp - 1]);
1371 else
1372 sp--;
1373 }
1374 }
1375
1376 free (stack);
1377 BITMAP_FREE (visited);
1378
1379 return post_order_num;
1380 }
1381
1382 /* Compute the reverse top sort order of the inverted sub-CFG specified
1383 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1384
1385 static int
1386 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1387 {
1388 basic_block bb;
1389 edge_iterator *stack;
1390 int sp;
1391 int post_order_num = 0;
1392 bitmap visited;
1393
1394 /* Allocate stack for back-tracking up CFG. */
1395 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1396 sp = 0;
1397
1398 /* Allocate bitmap to track nodes that have been visited. */
1399 visited = BITMAP_ALLOC (NULL);
1400
1401 /* Put all latches into the initial work list. In theory we'd want
1402 to start from loop exits but then we'd have the special case of
1403 endless loops. It doesn't really matter for DF iteration order and
1404 handling latches last is probably even better. */
1405 stack[sp++] = ei_start (loop->header->preds);
1406 bitmap_set_bit (visited, loop->header->index);
1407
1408 /* The inverted traversal loop. */
1409 while (sp)
1410 {
1411 edge_iterator ei;
1412 basic_block pred;
1413
1414 /* Look at the edge on the top of the stack. */
1415 ei = stack[sp - 1];
1416 bb = ei_edge (ei)->dest;
1417 pred = ei_edge (ei)->src;
1418
1419 /* Check if the predecessor has been visited yet and mark it
1420 if not so. */
1421 if (flow_bb_inside_loop_p (loop, pred)
1422 && bitmap_set_bit (visited, pred->index))
1423 {
1424 if (EDGE_COUNT (pred->preds) > 0)
1425 /* Since the predecessor node has been visited for the first
1426 time, check its predecessors. */
1427 stack[sp++] = ei_start (pred->preds);
1428 else
1429 post_order[post_order_num++] = pred->index;
1430 }
1431 else
1432 {
1433 if (flow_bb_inside_loop_p (loop, bb)
1434 && ei_one_before_end_p (ei))
1435 post_order[post_order_num++] = bb->index;
1436
1437 if (!ei_one_before_end_p (ei))
1438 ei_next (&stack[sp - 1]);
1439 else
1440 sp--;
1441 }
1442 }
1443
1444 free (stack);
1445 BITMAP_FREE (visited);
1446 return post_order_num;
1447 }
1448
1449
1450 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1451
1452 void
1453 df_analyze_loop (struct loop *loop)
1454 {
1455 free (df->postorder);
1456 free (df->postorder_inverted);
1457
1458 df->postorder = XNEWVEC (int, loop->num_nodes);
1459 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1460 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1461 df->n_blocks_inverted
1462 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1463 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1464 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1465
1466 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1467 for (int i = 0; i < df->n_blocks; ++i)
1468 bitmap_set_bit (blocks, df->postorder[i]);
1469 df_set_blocks (blocks);
1470 BITMAP_FREE (blocks);
1471
1472 df_analyze_1 ();
1473 }
1474
1475
1476 /* Return the number of basic blocks from the last call to df_analyze. */
1477
1478 int
1479 df_get_n_blocks (enum df_flow_dir dir)
1480 {
1481 gcc_assert (dir != DF_NONE);
1482
1483 if (dir == DF_FORWARD)
1484 {
1485 gcc_assert (df->postorder_inverted);
1486 return df->n_blocks_inverted;
1487 }
1488
1489 gcc_assert (df->postorder);
1490 return df->n_blocks;
1491 }
1492
1493
1494 /* Return a pointer to the array of basic blocks in the reverse postorder.
1495 Depending on the direction of the dataflow problem,
1496 it returns either the usual reverse postorder array
1497 or the reverse postorder of inverted traversal. */
1498 int *
1499 df_get_postorder (enum df_flow_dir dir)
1500 {
1501 gcc_assert (dir != DF_NONE);
1502
1503 if (dir == DF_FORWARD)
1504 {
1505 gcc_assert (df->postorder_inverted);
1506 return df->postorder_inverted;
1507 }
1508 gcc_assert (df->postorder);
1509 return df->postorder;
1510 }
1511
1512 static struct df_problem user_problem;
1513 static struct dataflow user_dflow;
1514
1515 /* Interface for calling iterative dataflow with user defined
1516 confluence and transfer functions. All that is necessary is to
1517 supply DIR, a direction, CONF_FUN_0, a confluence function for
1518 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1519 confluence function, TRANS_FUN, the basic block transfer function,
1520 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1521 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1522
1523 void
1524 df_simple_dataflow (enum df_flow_dir dir,
1525 df_init_function init_fun,
1526 df_confluence_function_0 con_fun_0,
1527 df_confluence_function_n con_fun_n,
1528 df_transfer_function trans_fun,
1529 bitmap blocks, int * postorder, int n_blocks)
1530 {
1531 memset (&user_problem, 0, sizeof (struct df_problem));
1532 user_problem.dir = dir;
1533 user_problem.init_fun = init_fun;
1534 user_problem.con_fun_0 = con_fun_0;
1535 user_problem.con_fun_n = con_fun_n;
1536 user_problem.trans_fun = trans_fun;
1537 user_dflow.problem = &user_problem;
1538 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1539 }
1540
1541
1542 \f
1543 /*----------------------------------------------------------------------------
1544 Functions to support limited incremental change.
1545 ----------------------------------------------------------------------------*/
1546
1547
1548 /* Get basic block info. */
1549
1550 static void *
1551 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1552 {
1553 if (dflow->block_info == NULL)
1554 return NULL;
1555 if (index >= dflow->block_info_size)
1556 return NULL;
1557 return (void *)((char *)dflow->block_info
1558 + index * dflow->problem->block_info_elt_size);
1559 }
1560
1561
1562 /* Set basic block info. */
1563
1564 static void
1565 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1566 void *bb_info)
1567 {
1568 gcc_assert (dflow->block_info);
1569 memcpy ((char *)dflow->block_info
1570 + index * dflow->problem->block_info_elt_size,
1571 bb_info, dflow->problem->block_info_elt_size);
1572 }
1573
1574
1575 /* Clear basic block info. */
1576
1577 static void
1578 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1579 {
1580 gcc_assert (dflow->block_info);
1581 gcc_assert (dflow->block_info_size > index);
1582 memset ((char *)dflow->block_info
1583 + index * dflow->problem->block_info_elt_size,
1584 0, dflow->problem->block_info_elt_size);
1585 }
1586
1587
1588 /* Mark the solutions as being out of date. */
1589
1590 void
1591 df_mark_solutions_dirty (void)
1592 {
1593 if (df)
1594 {
1595 int p;
1596 for (p = 1; p < df->num_problems_defined; p++)
1597 df->problems_in_order[p]->solutions_dirty = true;
1598 }
1599 }
1600
1601
1602 /* Return true if BB needs it's transfer functions recomputed. */
1603
1604 bool
1605 df_get_bb_dirty (basic_block bb)
1606 {
1607 return bitmap_bit_p ((df_live
1608 ? df_live : df_lr)->out_of_date_transfer_functions,
1609 bb->index);
1610 }
1611
1612
1613 /* Mark BB as needing it's transfer functions as being out of
1614 date. */
1615
1616 void
1617 df_set_bb_dirty (basic_block bb)
1618 {
1619 bb->flags |= BB_MODIFIED;
1620 if (df)
1621 {
1622 int p;
1623 for (p = 1; p < df->num_problems_defined; p++)
1624 {
1625 struct dataflow *dflow = df->problems_in_order[p];
1626 if (dflow->out_of_date_transfer_functions)
1627 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1628 }
1629 df_mark_solutions_dirty ();
1630 }
1631 }
1632
1633
1634 /* Grow the bb_info array. */
1635
1636 void
1637 df_grow_bb_info (struct dataflow *dflow)
1638 {
1639 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1640 if (dflow->block_info_size < new_size)
1641 {
1642 new_size += new_size / 4;
1643 dflow->block_info
1644 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1645 new_size
1646 * dflow->problem->block_info_elt_size);
1647 memset ((char *)dflow->block_info
1648 + dflow->block_info_size
1649 * dflow->problem->block_info_elt_size,
1650 0,
1651 (new_size - dflow->block_info_size)
1652 * dflow->problem->block_info_elt_size);
1653 dflow->block_info_size = new_size;
1654 }
1655 }
1656
1657
1658 /* Clear the dirty bits. This is called from places that delete
1659 blocks. */
1660 static void
1661 df_clear_bb_dirty (basic_block bb)
1662 {
1663 int p;
1664 for (p = 1; p < df->num_problems_defined; p++)
1665 {
1666 struct dataflow *dflow = df->problems_in_order[p];
1667 if (dflow->out_of_date_transfer_functions)
1668 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1669 }
1670 }
1671
1672 /* Called from the rtl_compact_blocks to reorganize the problems basic
1673 block info. */
1674
1675 void
1676 df_compact_blocks (void)
1677 {
1678 int i, p;
1679 basic_block bb;
1680 void *problem_temps;
1681 bitmap_head tmp;
1682
1683 bitmap_initialize (&tmp, &df_bitmap_obstack);
1684 for (p = 0; p < df->num_problems_defined; p++)
1685 {
1686 struct dataflow *dflow = df->problems_in_order[p];
1687
1688 /* Need to reorganize the out_of_date_transfer_functions for the
1689 dflow problem. */
1690 if (dflow->out_of_date_transfer_functions)
1691 {
1692 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1693 bitmap_clear (dflow->out_of_date_transfer_functions);
1694 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1695 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1696 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1697 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1698
1699 i = NUM_FIXED_BLOCKS;
1700 FOR_EACH_BB_FN (bb, cfun)
1701 {
1702 if (bitmap_bit_p (&tmp, bb->index))
1703 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1704 i++;
1705 }
1706 }
1707
1708 /* Now shuffle the block info for the problem. */
1709 if (dflow->problem->free_bb_fun)
1710 {
1711 int size = (last_basic_block_for_fn (cfun)
1712 * dflow->problem->block_info_elt_size);
1713 problem_temps = XNEWVAR (char, size);
1714 df_grow_bb_info (dflow);
1715 memcpy (problem_temps, dflow->block_info, size);
1716
1717 /* Copy the bb info from the problem tmps to the proper
1718 place in the block_info vector. Null out the copied
1719 item. The entry and exit blocks never move. */
1720 i = NUM_FIXED_BLOCKS;
1721 FOR_EACH_BB_FN (bb, cfun)
1722 {
1723 df_set_bb_info (dflow, i,
1724 (char *)problem_temps
1725 + bb->index * dflow->problem->block_info_elt_size);
1726 i++;
1727 }
1728 memset ((char *)dflow->block_info
1729 + i * dflow->problem->block_info_elt_size, 0,
1730 (last_basic_block_for_fn (cfun) - i)
1731 * dflow->problem->block_info_elt_size);
1732 free (problem_temps);
1733 }
1734 }
1735
1736 /* Shuffle the bits in the basic_block indexed arrays. */
1737
1738 if (df->blocks_to_analyze)
1739 {
1740 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1741 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1742 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1743 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1744 bitmap_copy (&tmp, df->blocks_to_analyze);
1745 bitmap_clear (df->blocks_to_analyze);
1746 i = NUM_FIXED_BLOCKS;
1747 FOR_EACH_BB_FN (bb, cfun)
1748 {
1749 if (bitmap_bit_p (&tmp, bb->index))
1750 bitmap_set_bit (df->blocks_to_analyze, i);
1751 i++;
1752 }
1753 }
1754
1755 bitmap_clear (&tmp);
1756
1757 i = NUM_FIXED_BLOCKS;
1758 FOR_EACH_BB_FN (bb, cfun)
1759 {
1760 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1761 bb->index = i;
1762 i++;
1763 }
1764
1765 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1766
1767 for (; i < last_basic_block_for_fn (cfun); i++)
1768 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1769
1770 #ifdef DF_DEBUG_CFG
1771 if (!df_lr->solutions_dirty)
1772 df_set_clean_cfg ();
1773 #endif
1774 }
1775
1776
1777 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1778 block. There is no excuse for people to do this kind of thing. */
1779
1780 void
1781 df_bb_replace (int old_index, basic_block new_block)
1782 {
1783 int new_block_index = new_block->index;
1784 int p;
1785
1786 if (dump_file)
1787 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1788
1789 gcc_assert (df);
1790 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1791
1792 for (p = 0; p < df->num_problems_defined; p++)
1793 {
1794 struct dataflow *dflow = df->problems_in_order[p];
1795 if (dflow->block_info)
1796 {
1797 df_grow_bb_info (dflow);
1798 df_set_bb_info (dflow, old_index,
1799 df_get_bb_info (dflow, new_block_index));
1800 }
1801 }
1802
1803 df_clear_bb_dirty (new_block);
1804 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1805 new_block->index = old_index;
1806 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1807 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1808 }
1809
1810
1811 /* Free all of the per basic block dataflow from all of the problems.
1812 This is typically called before a basic block is deleted and the
1813 problem will be reanalyzed. */
1814
1815 void
1816 df_bb_delete (int bb_index)
1817 {
1818 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1819 int i;
1820
1821 if (!df)
1822 return;
1823
1824 for (i = 0; i < df->num_problems_defined; i++)
1825 {
1826 struct dataflow *dflow = df->problems_in_order[i];
1827 if (dflow->problem->free_bb_fun)
1828 {
1829 void *bb_info = df_get_bb_info (dflow, bb_index);
1830 if (bb_info)
1831 {
1832 dflow->problem->free_bb_fun (bb, bb_info);
1833 df_clear_bb_info (dflow, bb_index);
1834 }
1835 }
1836 }
1837 df_clear_bb_dirty (bb);
1838 df_mark_solutions_dirty ();
1839 }
1840
1841
1842 /* Verify that there is a place for everything and everything is in
1843 its place. This is too expensive to run after every pass in the
1844 mainline. However this is an excellent debugging tool if the
1845 dataflow information is not being updated properly. You can just
1846 sprinkle calls in until you find the place that is changing an
1847 underlying structure without calling the proper updating
1848 routine. */
1849
1850 void
1851 df_verify (void)
1852 {
1853 df_scan_verify ();
1854 #ifdef ENABLE_DF_CHECKING
1855 df_lr_verify_transfer_functions ();
1856 if (df_live)
1857 df_live_verify_transfer_functions ();
1858 #endif
1859 }
1860
1861 #ifdef DF_DEBUG_CFG
1862
1863 /* Compute an array of ints that describes the cfg. This can be used
1864 to discover places where the cfg is modified by the appropriate
1865 calls have not been made to the keep df informed. The internals of
1866 this are unexciting, the key is that two instances of this can be
1867 compared to see if any changes have been made to the cfg. */
1868
1869 static int *
1870 df_compute_cfg_image (void)
1871 {
1872 basic_block bb;
1873 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1874 int i;
1875 int * map;
1876
1877 FOR_ALL_BB_FN (bb, cfun)
1878 {
1879 size += EDGE_COUNT (bb->succs);
1880 }
1881
1882 map = XNEWVEC (int, size);
1883 map[0] = size;
1884 i = 1;
1885 FOR_ALL_BB_FN (bb, cfun)
1886 {
1887 edge_iterator ei;
1888 edge e;
1889
1890 map[i++] = bb->index;
1891 FOR_EACH_EDGE (e, ei, bb->succs)
1892 map[i++] = e->dest->index;
1893 map[i++] = -1;
1894 }
1895 map[i] = -1;
1896 return map;
1897 }
1898
1899 static int *saved_cfg = NULL;
1900
1901
1902 /* This function compares the saved version of the cfg with the
1903 current cfg and aborts if the two are identical. The function
1904 silently returns if the cfg has been marked as dirty or the two are
1905 the same. */
1906
1907 void
1908 df_check_cfg_clean (void)
1909 {
1910 int *new_map;
1911
1912 if (!df)
1913 return;
1914
1915 if (df_lr->solutions_dirty)
1916 return;
1917
1918 if (saved_cfg == NULL)
1919 return;
1920
1921 new_map = df_compute_cfg_image ();
1922 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1923 free (new_map);
1924 }
1925
1926
1927 /* This function builds a cfg fingerprint and squirrels it away in
1928 saved_cfg. */
1929
1930 static void
1931 df_set_clean_cfg (void)
1932 {
1933 free (saved_cfg);
1934 saved_cfg = df_compute_cfg_image ();
1935 }
1936
1937 #endif /* DF_DEBUG_CFG */
1938 /*----------------------------------------------------------------------------
1939 PUBLIC INTERFACES TO QUERY INFORMATION.
1940 ----------------------------------------------------------------------------*/
1941
1942
1943 /* Return first def of REGNO within BB. */
1944
1945 df_ref
1946 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1947 {
1948 rtx_insn *insn;
1949 df_ref def;
1950
1951 FOR_BB_INSNS (bb, insn)
1952 {
1953 if (!INSN_P (insn))
1954 continue;
1955
1956 FOR_EACH_INSN_DEF (def, insn)
1957 if (DF_REF_REGNO (def) == regno)
1958 return def;
1959 }
1960 return NULL;
1961 }
1962
1963
1964 /* Return last def of REGNO within BB. */
1965
1966 df_ref
1967 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1968 {
1969 rtx_insn *insn;
1970 df_ref def;
1971
1972 FOR_BB_INSNS_REVERSE (bb, insn)
1973 {
1974 if (!INSN_P (insn))
1975 continue;
1976
1977 FOR_EACH_INSN_DEF (def, insn)
1978 if (DF_REF_REGNO (def) == regno)
1979 return def;
1980 }
1981
1982 return NULL;
1983 }
1984
1985 /* Finds the reference corresponding to the definition of REG in INSN.
1986 DF is the dataflow object. */
1987
1988 df_ref
1989 df_find_def (rtx_insn *insn, rtx reg)
1990 {
1991 df_ref def;
1992
1993 if (GET_CODE (reg) == SUBREG)
1994 reg = SUBREG_REG (reg);
1995 gcc_assert (REG_P (reg));
1996
1997 FOR_EACH_INSN_DEF (def, insn)
1998 if (DF_REF_REGNO (def) == REGNO (reg))
1999 return def;
2000
2001 return NULL;
2002 }
2003
2004
2005 /* Return true if REG is defined in INSN, zero otherwise. */
2006
2007 bool
2008 df_reg_defined (rtx_insn *insn, rtx reg)
2009 {
2010 return df_find_def (insn, reg) != NULL;
2011 }
2012
2013
2014 /* Finds the reference corresponding to the use of REG in INSN.
2015 DF is the dataflow object. */
2016
2017 df_ref
2018 df_find_use (rtx_insn *insn, rtx reg)
2019 {
2020 df_ref use;
2021
2022 if (GET_CODE (reg) == SUBREG)
2023 reg = SUBREG_REG (reg);
2024 gcc_assert (REG_P (reg));
2025
2026 df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2027 FOR_EACH_INSN_INFO_USE (use, insn_info)
2028 if (DF_REF_REGNO (use) == REGNO (reg))
2029 return use;
2030 if (df->changeable_flags & DF_EQ_NOTES)
2031 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
2032 if (DF_REF_REGNO (use) == REGNO (reg))
2033 return use;
2034 return NULL;
2035 }
2036
2037
2038 /* Return true if REG is referenced in INSN, zero otherwise. */
2039
2040 bool
2041 df_reg_used (rtx_insn *insn, rtx reg)
2042 {
2043 return df_find_use (insn, reg) != NULL;
2044 }
2045
2046 \f
2047 /*----------------------------------------------------------------------------
2048 Debugging and printing functions.
2049 ----------------------------------------------------------------------------*/
2050
2051 /* Write information about registers and basic blocks into FILE.
2052 This is part of making a debugging dump. */
2053
2054 void
2055 dump_regset (regset r, FILE *outf)
2056 {
2057 unsigned i;
2058 reg_set_iterator rsi;
2059
2060 if (r == NULL)
2061 {
2062 fputs (" (nil)", outf);
2063 return;
2064 }
2065
2066 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2067 {
2068 fprintf (outf, " %d", i);
2069 if (i < FIRST_PSEUDO_REGISTER)
2070 fprintf (outf, " [%s]",
2071 reg_names[i]);
2072 }
2073 }
2074
2075 /* Print a human-readable representation of R on the standard error
2076 stream. This function is designed to be used from within the
2077 debugger. */
2078 extern void debug_regset (regset);
2079 DEBUG_FUNCTION void
2080 debug_regset (regset r)
2081 {
2082 dump_regset (r, stderr);
2083 putc ('\n', stderr);
2084 }
2085
2086 /* Write information about registers and basic blocks into FILE.
2087 This is part of making a debugging dump. */
2088
2089 void
2090 df_print_regset (FILE *file, bitmap r)
2091 {
2092 unsigned int i;
2093 bitmap_iterator bi;
2094
2095 if (r == NULL)
2096 fputs (" (nil)", file);
2097 else
2098 {
2099 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2100 {
2101 fprintf (file, " %d", i);
2102 if (i < FIRST_PSEUDO_REGISTER)
2103 fprintf (file, " [%s]", reg_names[i]);
2104 }
2105 }
2106 fprintf (file, "\n");
2107 }
2108
2109
2110 /* Write information about registers and basic blocks into FILE. The
2111 bitmap is in the form used by df_byte_lr. This is part of making a
2112 debugging dump. */
2113
2114 void
2115 df_print_word_regset (FILE *file, bitmap r)
2116 {
2117 unsigned int max_reg = max_reg_num ();
2118
2119 if (r == NULL)
2120 fputs (" (nil)", file);
2121 else
2122 {
2123 unsigned int i;
2124 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2125 {
2126 bool found = (bitmap_bit_p (r, 2 * i)
2127 || bitmap_bit_p (r, 2 * i + 1));
2128 if (found)
2129 {
2130 int word;
2131 const char * sep = "";
2132 fprintf (file, " %d", i);
2133 fprintf (file, "(");
2134 for (word = 0; word < 2; word++)
2135 if (bitmap_bit_p (r, 2 * i + word))
2136 {
2137 fprintf (file, "%s%d", sep, word);
2138 sep = ", ";
2139 }
2140 fprintf (file, ")");
2141 }
2142 }
2143 }
2144 fprintf (file, "\n");
2145 }
2146
2147
2148 /* Dump dataflow info. */
2149
2150 void
2151 df_dump (FILE *file)
2152 {
2153 basic_block bb;
2154 df_dump_start (file);
2155
2156 FOR_ALL_BB_FN (bb, cfun)
2157 {
2158 df_print_bb_index (bb, file);
2159 df_dump_top (bb, file);
2160 df_dump_bottom (bb, file);
2161 }
2162
2163 fprintf (file, "\n");
2164 }
2165
2166
2167 /* Dump dataflow info for df->blocks_to_analyze. */
2168
2169 void
2170 df_dump_region (FILE *file)
2171 {
2172 if (df->blocks_to_analyze)
2173 {
2174 bitmap_iterator bi;
2175 unsigned int bb_index;
2176
2177 fprintf (file, "\n\nstarting region dump\n");
2178 df_dump_start (file);
2179
2180 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2181 {
2182 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2183 dump_bb (file, bb, 0, TDF_DETAILS);
2184 }
2185 fprintf (file, "\n");
2186 }
2187 else
2188 df_dump (file);
2189 }
2190
2191
2192 /* Dump the introductory information for each problem defined. */
2193
2194 void
2195 df_dump_start (FILE *file)
2196 {
2197 int i;
2198
2199 if (!df || !file)
2200 return;
2201
2202 fprintf (file, "\n\n%s\n", current_function_name ());
2203 fprintf (file, "\nDataflow summary:\n");
2204 if (df->blocks_to_analyze)
2205 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2206 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2207
2208 for (i = 0; i < df->num_problems_defined; i++)
2209 {
2210 struct dataflow *dflow = df->problems_in_order[i];
2211 if (dflow->computed)
2212 {
2213 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2214 if (fun)
2215 fun (file);
2216 }
2217 }
2218 }
2219
2220
2221 /* Dump the top or bottom of the block information for BB. */
2222 static void
2223 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2224 {
2225 int i;
2226
2227 if (!df || !file)
2228 return;
2229
2230 for (i = 0; i < df->num_problems_defined; i++)
2231 {
2232 struct dataflow *dflow = df->problems_in_order[i];
2233 if (dflow->computed)
2234 {
2235 df_dump_bb_problem_function bbfun;
2236
2237 if (top)
2238 bbfun = dflow->problem->dump_top_fun;
2239 else
2240 bbfun = dflow->problem->dump_bottom_fun;
2241
2242 if (bbfun)
2243 bbfun (bb, file);
2244 }
2245 }
2246 }
2247
2248 /* Dump the top of the block information for BB. */
2249
2250 void
2251 df_dump_top (basic_block bb, FILE *file)
2252 {
2253 df_dump_bb_problem_data (bb, file, /*top=*/true);
2254 }
2255
2256 /* Dump the bottom of the block information for BB. */
2257
2258 void
2259 df_dump_bottom (basic_block bb, FILE *file)
2260 {
2261 df_dump_bb_problem_data (bb, file, /*top=*/false);
2262 }
2263
2264
2265 /* Dump information about INSN just before or after dumping INSN itself. */
2266 static void
2267 df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top)
2268 {
2269 int i;
2270
2271 if (!df || !file)
2272 return;
2273
2274 for (i = 0; i < df->num_problems_defined; i++)
2275 {
2276 struct dataflow *dflow = df->problems_in_order[i];
2277 if (dflow->computed)
2278 {
2279 df_dump_insn_problem_function insnfun;
2280
2281 if (top)
2282 insnfun = dflow->problem->dump_insn_top_fun;
2283 else
2284 insnfun = dflow->problem->dump_insn_bottom_fun;
2285
2286 if (insnfun)
2287 insnfun (insn, file);
2288 }
2289 }
2290 }
2291
2292 /* Dump information about INSN before dumping INSN itself. */
2293
2294 void
2295 df_dump_insn_top (const rtx_insn *insn, FILE *file)
2296 {
2297 df_dump_insn_problem_data (insn, file, /*top=*/true);
2298 }
2299
2300 /* Dump information about INSN after dumping INSN itself. */
2301
2302 void
2303 df_dump_insn_bottom (const rtx_insn *insn, FILE *file)
2304 {
2305 df_dump_insn_problem_data (insn, file, /*top=*/false);
2306 }
2307
2308
2309 static void
2310 df_ref_dump (df_ref ref, FILE *file)
2311 {
2312 fprintf (file, "%c%d(%d)",
2313 DF_REF_REG_DEF_P (ref)
2314 ? 'd'
2315 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2316 DF_REF_ID (ref),
2317 DF_REF_REGNO (ref));
2318 }
2319
2320 void
2321 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
2322 {
2323 fprintf (file, "{ ");
2324 for (; ref; ref = DF_REF_NEXT_LOC (ref))
2325 {
2326 df_ref_dump (ref, file);
2327 if (follow_chain)
2328 df_chain_dump (DF_REF_CHAIN (ref), file);
2329 }
2330 fprintf (file, "}");
2331 }
2332
2333
2334 /* Dump either a ref-def or reg-use chain. */
2335
2336 void
2337 df_regs_chain_dump (df_ref ref, FILE *file)
2338 {
2339 fprintf (file, "{ ");
2340 while (ref)
2341 {
2342 df_ref_dump (ref, file);
2343 ref = DF_REF_NEXT_REG (ref);
2344 }
2345 fprintf (file, "}");
2346 }
2347
2348
2349 static void
2350 df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
2351 {
2352 for (; mws; mws = DF_MWS_NEXT (mws))
2353 fprintf (file, "mw %c r[%d..%d]\n",
2354 DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
2355 mws->start_regno, mws->end_regno);
2356 }
2357
2358
2359 static void
2360 df_insn_uid_debug (unsigned int uid,
2361 bool follow_chain, FILE *file)
2362 {
2363 fprintf (file, "insn %d luid %d",
2364 uid, DF_INSN_UID_LUID (uid));
2365
2366 if (DF_INSN_UID_DEFS (uid))
2367 {
2368 fprintf (file, " defs ");
2369 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2370 }
2371
2372 if (DF_INSN_UID_USES (uid))
2373 {
2374 fprintf (file, " uses ");
2375 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2376 }
2377
2378 if (DF_INSN_UID_EQ_USES (uid))
2379 {
2380 fprintf (file, " eq uses ");
2381 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2382 }
2383
2384 if (DF_INSN_UID_MWS (uid))
2385 {
2386 fprintf (file, " mws ");
2387 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2388 }
2389 fprintf (file, "\n");
2390 }
2391
2392
2393 DEBUG_FUNCTION void
2394 df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file)
2395 {
2396 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2397 }
2398
2399 DEBUG_FUNCTION void
2400 df_insn_debug_regno (rtx_insn *insn, FILE *file)
2401 {
2402 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2403
2404 fprintf (file, "insn %d bb %d luid %d defs ",
2405 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2406 DF_INSN_INFO_LUID (insn_info));
2407 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2408
2409 fprintf (file, " uses ");
2410 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2411
2412 fprintf (file, " eq_uses ");
2413 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2414 fprintf (file, "\n");
2415 }
2416
2417 DEBUG_FUNCTION void
2418 df_regno_debug (unsigned int regno, FILE *file)
2419 {
2420 fprintf (file, "reg %d defs ", regno);
2421 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2422 fprintf (file, " uses ");
2423 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2424 fprintf (file, " eq_uses ");
2425 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2426 fprintf (file, "\n");
2427 }
2428
2429
2430 DEBUG_FUNCTION void
2431 df_ref_debug (df_ref ref, FILE *file)
2432 {
2433 fprintf (file, "%c%d ",
2434 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2435 DF_REF_ID (ref));
2436 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2437 DF_REF_REGNO (ref),
2438 DF_REF_BBNO (ref),
2439 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2440 DF_REF_FLAGS (ref),
2441 DF_REF_TYPE (ref));
2442 if (DF_REF_LOC (ref))
2443 {
2444 if (flag_dump_noaddr)
2445 fprintf (file, "loc #(#) chain ");
2446 else
2447 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2448 (void *)*DF_REF_LOC (ref));
2449 }
2450 else
2451 fprintf (file, "chain ");
2452 df_chain_dump (DF_REF_CHAIN (ref), file);
2453 fprintf (file, "\n");
2454 }
2455 \f
2456 /* Functions for debugging from GDB. */
2457
2458 DEBUG_FUNCTION void
2459 debug_df_insn (rtx_insn *insn)
2460 {
2461 df_insn_debug (insn, true, stderr);
2462 debug_rtx (insn);
2463 }
2464
2465
2466 DEBUG_FUNCTION void
2467 debug_df_reg (rtx reg)
2468 {
2469 df_regno_debug (REGNO (reg), stderr);
2470 }
2471
2472
2473 DEBUG_FUNCTION void
2474 debug_df_regno (unsigned int regno)
2475 {
2476 df_regno_debug (regno, stderr);
2477 }
2478
2479
2480 DEBUG_FUNCTION void
2481 debug_df_ref (df_ref ref)
2482 {
2483 df_ref_debug (ref, stderr);
2484 }
2485
2486
2487 DEBUG_FUNCTION void
2488 debug_df_defno (unsigned int defno)
2489 {
2490 df_ref_debug (DF_DEFS_GET (defno), stderr);
2491 }
2492
2493
2494 DEBUG_FUNCTION void
2495 debug_df_useno (unsigned int defno)
2496 {
2497 df_ref_debug (DF_USES_GET (defno), stderr);
2498 }
2499
2500
2501 DEBUG_FUNCTION void
2502 debug_df_chain (struct df_link *link)
2503 {
2504 df_chain_dump (link, stderr);
2505 fputc ('\n', stderr);
2506 }