tree-data-ref.c (subscript_dependence_tester_1): Call free_conflict_function.
[gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Originally contributed by Michael P. Hayes
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
7 and Kenneth Zadeck (zadeck@naturalbridge.com).
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 /*
26 OVERVIEW:
27
28 The files in this collection (df*.c,df.h) provide a general framework
29 for solving dataflow problems. The global dataflow is performed using
30 a good implementation of iterative dataflow analysis.
31
32 The file df-problems.c provides problem instance for the most common
33 dataflow problems: reaching defs, upward exposed uses, live variables,
34 uninitialized variables, def-use chains, and use-def chains. However,
35 the interface allows other dataflow problems to be defined as well.
36
37 Dataflow analysis is available in most of the rtl backend (the parts
38 between pass_df_initialize and pass_df_finish). It is quite likely
39 that these boundaries will be expanded in the future. The only
40 requirement is that there be a correct control flow graph.
41
42 There are three variations of the live variable problem that are
43 available whenever dataflow is available. The LR problem finds the
44 areas that can reach a use of a variable, the UR problems finds the
45 areas tha can be reached from a definition of a variable. The LIVE
46 problem finds the intersection of these two areas.
47
48 There are several optional problems. These can be enabled when they
49 are needed and disabled when they are not needed.
50
51 Dataflow problems are generally solved in three layers. The bottom
52 layer is called scanning where a data structure is built for each rtl
53 insn that describes the set of defs and uses of that insn. Scanning
54 is generally kept up to date, i.e. as the insns changes, the scanned
55 version of that insn changes also. There are various mechanisms for
56 making this happen and are described in the INCREMENTAL SCANNING
57 section.
58
59 In the middle layer, basic blocks are scanned to produce transfer
60 functions which describe the effects of that block on the a global
61 dataflow solution. The transfer functions are only rebuilt if the
62 some instruction within the block has changed.
63
64 The top layer is the dataflow solution itself. The dataflow solution
65 is computed by using an efficient iterative solver and the transfer
66 functions. The dataflow solution must be recomputed whenever the
67 control changes or if one of the transfer function changes.
68
69
70 USAGE:
71
72 Here is an example of using the dataflow routines.
73
74 df_[chain,live,note,rd]_add_problem (flags);
75
76 df_set_blocks (blocks);
77
78 df_analyze ();
79
80 df_dump (stderr);
81
82 df_finish_pass (false);
83
84 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
85 instance to struct df_problem, to the set of problems solved in this
86 instance of df. All calls to add a problem for a given instance of df
87 must occur before the first call to DF_ANALYZE.
88
89 Problems can be dependent on other problems. For instance, solving
90 def-use or use-def chains is dependent on solving reaching
91 definitions. As long as these dependencies are listed in the problem
92 definition, the order of adding the problems is not material.
93 Otherwise, the problems will be solved in the order of calls to
94 df_add_problem. Note that it is not necessary to have a problem. In
95 that case, df will just be used to do the scanning.
96
97
98
99 DF_SET_BLOCKS is an optional call used to define a region of the
100 function on which the analysis will be performed. The normal case is
101 to analyze the entire function and no call to df_set_blocks is made.
102 DF_SET_BLOCKS only effects the blocks that are effected when computing
103 the transfer functions and final solution. The insn level information
104 is always kept up to date.
105
106 When a subset is given, the analysis behaves as if the function only
107 contains those blocks and any edges that occur directly between the
108 blocks in the set. Care should be taken to call df_set_blocks right
109 before the call to analyze in order to eliminate the possibility that
110 optimizations that reorder blocks invalidate the bitvector.
111
112 DF_ANALYZE causes all of the defined problems to be (re)solved. When
113 DF_ANALYZE is completes, the IN and OUT sets for each basic block
114 contain the computer information. The DF_*_BB_INFO macros can be used
115 to access these bitvectors. All deferred rescannings are down before
116 the transfer functions are recomputed.
117
118 DF_DUMP can then be called to dump the information produce to some
119 file. This calls DF_DUMP_START, to print the information that is not
120 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
121 for each block to print the basic specific information. These parts
122 can all be called separately as part of a larger dump function.
123
124
125 DF_FINISH_PASS causes df_remove_problem to be called on all of the
126 optional problems. It also causes any insns whose scanning has been
127 deferred to be rescanned as well as clears all of the changeable flags.
128 Setting the pass manager TODO_df_finish flag causes this function to
129 be run. However, the pass manager will call df_finish_pass AFTER the
130 pass dumping has been done, so if you want to see the results of the
131 optional problems in the pass dumps, use the TODO flag rather than
132 calling the function yourself.
133
134 INCREMENTAL SCANNING
135
136 There are four ways of doing the incremental scanning:
137
138 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
139 df_bb_delete, df_insn_change_bb have been added to most of
140 the low level service functions that maintain the cfg and change
141 rtl. Calling and of these routines many cause some number of insns
142 to be rescanned.
143
144 For most modern rtl passes, this is certainly the easiest way to
145 manage rescanning the insns. This technique also has the advantage
146 that the scanning information is always correct and can be relied
147 upon even after changes have been made to the instructions. This
148 technique is contra indicated in several cases:
149
150 a) If def-use chains OR use-def chains (but not both) are built,
151 using this is SIMPLY WRONG. The problem is that when a ref is
152 deleted that is the target of an edge, there is not enough
153 information to efficiently find the source of the edge and
154 delete the edge. This leaves a dangling reference that may
155 cause problems.
156
157 b) If def-use chains AND use-def chains are built, this may
158 produce unexpected results. The problem is that the incremental
159 scanning of an insn does not know how to repair the chains that
160 point into an insn when the insn changes. So the incremental
161 scanning just deletes the chains that enter and exit the insn
162 being changed. The dangling reference issue in (a) is not a
163 problem here, but if the pass is depending on the chains being
164 maintained after insns have been modified, this technique will
165 not do the correct thing.
166
167 c) If the pass modifies insns several times, this incremental
168 updating may be expensive.
169
170 d) If the pass modifies all of the insns, as does register
171 allocation, it is simply better to rescan the entire function.
172
173 e) If the pass uses either non-standard or ancient techniques to
174 modify insns, automatic detection of the insns that need to be
175 rescanned may be impractical. Cse and regrename fall into this
176 category.
177
178 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
179 df_insn_delete do not immediately change the insn but instead make
180 a note that the insn needs to be rescanned. The next call to
181 df_analyze, df_finish_pass, or df_process_deferred_rescans will
182 cause all of the pending rescans to be processed.
183
184 This is the technique of choice if either 1a, 1b, or 1c are issues
185 in the pass. In the case of 1a or 1b, a call to df_remove_problem
186 (df_chain) should be made before the next call to df_analyze or
187 df_process_deferred_rescans.
188
189 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
190 (This mode can be cleared by calling df_clear_flags
191 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
192 be rescanned.
193
194 3) Total rescanning - In this mode the rescanning is disabled.
195 However, the df information associated with deleted insn is delete
196 at the time the insn is deleted. At the end of the pass, a call
197 must be made to df_insn_rescan_all. This method is used by the
198 register allocator since it generally changes each insn multiple
199 times (once for each ref) and does not need to make use of the
200 updated scanning information.
201
202 It is also currently used by two older passes (cse, and regrename)
203 which change insns in hard to track ways. It is hoped that this
204 will be fixed soon since this it is expensive to rescan all of the
205 insns when only a small number of them have really changed.
206
207 4) Do it yourself - In this mechanism, the pass updates the insns
208 itself using the low level df primitives. Currently no pass does
209 this, but it has the advantage that it is quite efficient given
210 that the pass generally has exact knowledge of what it is changing.
211
212 DATA STRUCTURES
213
214 Scanning produces a `struct df_ref' data structure (ref) is allocated
215 for every register reference (def or use) and this records the insn
216 and bb the ref is found within. The refs are linked together in
217 chains of uses and defs for each insn and for each register. Each ref
218 also has a chain field that links all the use refs for a def or all
219 the def refs for a use. This is used to create use-def or def-use
220 chains.
221
222 Different optimizations have different needs. Ultimately, only
223 register allocation and schedulers should be using the bitmaps
224 produced for the live register and uninitialized register problems.
225 The rest of the backend should be upgraded to using and maintaining
226 the linked information such as def use or use def chains.
227
228
229 PHILOSOPHY:
230
231 While incremental bitmaps are not worthwhile to maintain, incremental
232 chains may be perfectly reasonable. The fastest way to build chains
233 from scratch or after significant modifications is to build reaching
234 definitions (RD) and build the chains from this.
235
236 However, general algorithms for maintaining use-def or def-use chains
237 are not practical. The amount of work to recompute the chain any
238 chain after an arbitrary change is large. However, with a modest
239 amount of work it is generally possible to have the application that
240 uses the chains keep them up to date. The high level knowledge of
241 what is really happening is essential to crafting efficient
242 incremental algorithms.
243
244 As for the bit vector problems, there is no interface to give a set of
245 blocks over with to resolve the iteration. In general, restarting a
246 dataflow iteration is difficult and expensive. Again, the best way to
247 keep the dataflow information up to data (if this is really what is
248 needed) it to formulate a problem specific solution.
249
250 There are fine grained calls for creating and deleting references from
251 instructions in df-scan.c. However, these are not currently connected
252 to the engine that resolves the dataflow equations.
253
254
255 DATA STRUCTURES:
256
257 The basic object is a DF_REF (reference) and this may either be a
258 DEF (definition) or a USE of a register.
259
260 These are linked into a variety of lists; namely reg-def, reg-use,
261 insn-def, insn-use, def-use, and use-def lists. For example, the
262 reg-def lists contain all the locations that define a given register
263 while the insn-use lists contain all the locations that use a
264 register.
265
266 Note that the reg-def and reg-use chains are generally short for
267 pseudos and long for the hard registers.
268
269 ACCESSING INSNS:
270
271 1) The df insn information is kept in the insns array. This array is
272 indexed by insn uid.
273
274 2) Each insn has three sets of refs: They are linked into one of three
275 lists: the insn's defs list (accessed by the DF_INSN_DEFS or
276 DF_INSN_UID_DEFS macros), the insn's uses list (accessed by the
277 DF_INSN_USES or DF_INSN_UID_USES macros) or the insn's eq_uses list
278 (accessed by the DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
279 The latter list are the list of references in REG_EQUAL or
280 REG_EQUIV notes. These macros produce a ref (or NULL), the rest of
281 the list can be obtained by traversal of the NEXT_REF field
282 (accessed by the DF_REF_NEXT_REF macro.) There is no significance
283 to the ordering of the uses or refs in an instruction.
284
285 3) Each insn has a logical uid field (LUID). When properly set, this
286 is an integer that numbers each insn in the basic block, in order from
287 the start of the block. The numbers are only correct after a call to
288 df_analyse. They will rot after insns are added deleted or moved
289 around.
290
291 ACCESSING REFS:
292
293 There are 4 ways to obtain access to refs:
294
295 1) References are divided into two categories, REAL and ARTIFICIAL.
296
297 REAL refs are associated with instructions.
298
299 ARTIFICIAL refs are associated with basic blocks. The heads of
300 these lists can be accessed by calling df_get_artificial_defs or
301 df_get_artificial_uses for the particular basic block.
302
303 Artificial defs and uses occur both at the beginning and ends of blocks.
304
305 For blocks that area at the destination of eh edges, the
306 artificial uses and defs occur at the beginning. The defs relate
307 to the registers specified in EH_RETURN_DATA_REGNO and the uses
308 relate to the registers specified in ED_USES. Logically these
309 defs and uses should really occur along the eh edge, but there is
310 no convenient way to do this. Artificial edges that occur at the
311 beginning of the block have the DF_REF_AT_TOP flag set.
312
313 Artificial uses occur at the end of all blocks. These arise from
314 the hard registers that are always live, such as the stack
315 register and are put there to keep the code from forgetting about
316 them.
317
318 Artificial defs occur at the end of the entry block. These arise
319 from registers that are live at entry to the function.
320
321 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
322 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
323
324 All of the eq_uses, uses and defs associated with each pseudo or
325 hard register may be linked in a bidirectional chain. These are
326 called reg-use or reg_def chains. If the changeable flag
327 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
328 treated like uses. If it is not set they are ignored.
329
330 The first use, eq_use or def for a register can be obtained using
331 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
332 macros. Subsequent uses for the same regno can be obtained by
333 following the next_reg field of the ref. The number of elements in
334 each of the chains can be found by using the DF_REG_USE_COUNT,
335 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
336
337 In previous versions of this code, these chains were ordered. It
338 has not been practical to continue this practice.
339
340 3) If def-use or use-def chains are built, these can be traversed to
341 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
342 include the eq_uses. Otherwise these are ignored when building the
343 chains.
344
345 4) An array of all of the uses (and an array of all of the defs) can
346
347 be built. These arrays are indexed by the value in the id
348 structure. These arrays are only lazily kept up to date, and that
349 process can be expensive. To have these arrays built, call
350 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
351 has been set the array will contain the eq_uses. Otherwise these
352 are ignored when building the array and assigning the ids. Note
353 that the values in the id field of a ref may change across calls to
354 df_analyze or df_reorganize_defs or df_reorganize_uses.
355
356 If the only use of this array is to find all of the refs, it is
357 better to traverse all of the registers and then traverse all of
358 reg-use or reg-def chains.
359
360 NOTES:
361
362 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
363 both a use and a def. These are both marked read/write to show that they
364 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
365 will generate a use of reg 42 followed by a def of reg 42 (both marked
366 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
367 generates a use of reg 41 then a def of reg 41 (both marked read/write),
368 even though reg 41 is decremented before it is used for the memory
369 address in this second example.
370
371 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
372 for which the number of word_mode units covered by the outer mode is
373 smaller than that covered by the inner mode, invokes a read-modify-write.
374 operation. We generate both a use and a def and again mark them
375 read/write.
376
377 Paradoxical subreg writes do not leave a trace of the old content, so they
378 are write-only operations.
379 */
380
381
382 #include "config.h"
383 #include "system.h"
384 #include "coretypes.h"
385 #include "tm.h"
386 #include "rtl.h"
387 #include "tm_p.h"
388 #include "insn-config.h"
389 #include "recog.h"
390 #include "function.h"
391 #include "regs.h"
392 #include "output.h"
393 #include "alloc-pool.h"
394 #include "flags.h"
395 #include "hard-reg-set.h"
396 #include "basic-block.h"
397 #include "sbitmap.h"
398 #include "bitmap.h"
399 #include "timevar.h"
400 #include "df.h"
401 #include "tree-pass.h"
402
403 static void *df_get_bb_info (struct dataflow *, unsigned int);
404 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
405 #ifdef DF_DEBUG_CFG
406 static void df_set_clean_cfg (void);
407 #endif
408
409 /* An obstack for bitmap not related to specific dataflow problems.
410 This obstack should e.g. be used for bitmaps with a short life time
411 such as temporary bitmaps. */
412
413 bitmap_obstack df_bitmap_obstack;
414
415
416 /*----------------------------------------------------------------------------
417 Functions to create, destroy and manipulate an instance of df.
418 ----------------------------------------------------------------------------*/
419
420 struct df *df;
421
422 /* Add PROBLEM (and any dependent problems) to the DF instance. */
423
424 void
425 df_add_problem (struct df_problem *problem)
426 {
427 struct dataflow *dflow;
428 int i;
429
430 /* First try to add the dependent problem. */
431 if (problem->dependent_problem)
432 df_add_problem (problem->dependent_problem);
433
434 /* Check to see if this problem has already been defined. If it
435 has, just return that instance, if not, add it to the end of the
436 vector. */
437 dflow = df->problems_by_index[problem->id];
438 if (dflow)
439 return;
440
441 /* Make a new one and add it to the end. */
442 dflow = XCNEW (struct dataflow);
443 dflow->problem = problem;
444 dflow->computed = false;
445 dflow->solutions_dirty = true;
446 df->problems_by_index[dflow->problem->id] = dflow;
447
448 /* Keep the defined problems ordered by index. This solves the
449 problem that RI will use the information from UREC if UREC has
450 been defined, or from LIVE if LIVE is defined and otherwise LR.
451 However for this to work, the computation of RI must be pushed
452 after which ever of those problems is defined, but we do not
453 require any of those except for LR to have actually been
454 defined. */
455 df->num_problems_defined++;
456 for (i = df->num_problems_defined - 2; i >= 0; i--)
457 {
458 if (problem->id < df->problems_in_order[i]->problem->id)
459 df->problems_in_order[i+1] = df->problems_in_order[i];
460 else
461 {
462 df->problems_in_order[i+1] = dflow;
463 return;
464 }
465 }
466 df->problems_in_order[0] = dflow;
467 }
468
469
470 /* Set the MASK flags in the DFLOW problem. The old flags are
471 returned. If a flag is not allowed to be changed this will fail if
472 checking is enabled. */
473 enum df_changeable_flags
474 df_set_flags (enum df_changeable_flags changeable_flags)
475 {
476 enum df_changeable_flags old_flags = df->changeable_flags;
477 df->changeable_flags |= changeable_flags;
478 return old_flags;
479 }
480
481
482 /* Clear the MASK flags in the DFLOW problem. The old flags are
483 returned. If a flag is not allowed to be changed this will fail if
484 checking is enabled. */
485 enum df_changeable_flags
486 df_clear_flags (enum df_changeable_flags changeable_flags)
487 {
488 enum df_changeable_flags old_flags = df->changeable_flags;
489 df->changeable_flags &= ~changeable_flags;
490 return old_flags;
491 }
492
493
494 /* Set the blocks that are to be considered for analysis. If this is
495 not called or is called with null, the entire function in
496 analyzed. */
497
498 void
499 df_set_blocks (bitmap blocks)
500 {
501 if (blocks)
502 {
503 if (dump_file)
504 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
505 if (df->blocks_to_analyze)
506 {
507 /* This block is called to change the focus from one subset
508 to another. */
509 int p;
510 bitmap diff = BITMAP_ALLOC (&df_bitmap_obstack);
511 bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
513 {
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
518 {
519 bitmap_iterator bi;
520 unsigned int bb_index;
521
522 EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
523 {
524 basic_block bb = BASIC_BLOCK (bb_index);
525 if (bb)
526 {
527 void *bb_info = df_get_bb_info (dflow, bb_index);
528 if (bb_info)
529 {
530 dflow->problem->free_bb_fun (bb, bb_info);
531 df_set_bb_info (dflow, bb_index, NULL);
532 }
533 }
534 }
535 }
536 }
537
538 BITMAP_FREE (diff);
539 }
540 else
541 {
542 /* This block of code is executed to change the focus from
543 the entire function to a subset. */
544 bitmap blocks_to_reset = NULL;
545 int p;
546 for (p = 0; p < df->num_problems_defined; p++)
547 {
548 struct dataflow *dflow = df->problems_in_order[p];
549 if (dflow->optional_p && dflow->problem->reset_fun)
550 {
551 if (!blocks_to_reset)
552 {
553 basic_block bb;
554 blocks_to_reset =
555 BITMAP_ALLOC (&df_bitmap_obstack);
556 FOR_ALL_BB(bb)
557 {
558 bitmap_set_bit (blocks_to_reset, bb->index);
559 }
560 }
561 dflow->problem->reset_fun (blocks_to_reset);
562 }
563 }
564 if (blocks_to_reset)
565 BITMAP_FREE (blocks_to_reset);
566
567 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
568 }
569 bitmap_copy (df->blocks_to_analyze, blocks);
570 df->analyze_subset = true;
571 }
572 else
573 {
574 /* This block is executed to reset the focus to the entire
575 function. */
576 if (dump_file)
577 fprintf (dump_file, "clearing blocks_to_analyze\n");
578 if (df->blocks_to_analyze)
579 {
580 BITMAP_FREE (df->blocks_to_analyze);
581 df->blocks_to_analyze = NULL;
582 }
583 df->analyze_subset = false;
584 }
585
586 /* Setting the blocks causes the refs to be unorganized since only
587 the refs in the blocks are seen. */
588 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
589 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
590 df_mark_solutions_dirty ();
591 }
592
593
594 /* Delete a DFLOW problem (and any problems that depend on this
595 problem). */
596
597 void
598 df_remove_problem (struct dataflow *dflow)
599 {
600 struct df_problem *problem;
601 int i;
602
603 if (!dflow)
604 return;
605
606 problem = dflow->problem;
607 gcc_assert (problem->remove_problem_fun);
608
609 /* Delete any problems that depended on this problem first. */
610 for (i = 0; i < df->num_problems_defined; i++)
611 if (df->problems_in_order[i]->problem->dependent_problem == problem)
612 df_remove_problem (df->problems_in_order[i]);
613
614 /* Now remove this problem. */
615 for (i = 0; i < df->num_problems_defined; i++)
616 if (df->problems_in_order[i] == dflow)
617 {
618 int j;
619 for (j = i + 1; j < df->num_problems_defined; j++)
620 df->problems_in_order[j-1] = df->problems_in_order[j];
621 df->problems_in_order[j] = NULL;
622 df->num_problems_defined--;
623 break;
624 }
625
626 (problem->remove_problem_fun) ();
627 df->problems_by_index[problem->id] = NULL;
628 }
629
630
631 /* Remove all of the problems that are not permanent. Scanning, LR
632 and (at -O2 or higher) LIVE are permanent, the rest are removable.
633 Also clear all of the changeable_flags. */
634
635 void
636 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
637 {
638 int i;
639 int removed = 0;
640
641 #ifdef ENABLE_DF_CHECKING
642 enum df_changeable_flags saved_flags;
643 #endif
644
645 if (!df)
646 return;
647
648 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
649 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
650
651 #ifdef ENABLE_DF_CHECKING
652 saved_flags = df->changeable_flags;
653 #endif
654
655 for (i = 0; i < df->num_problems_defined; i++)
656 {
657 struct dataflow *dflow = df->problems_in_order[i];
658 struct df_problem *problem = dflow->problem;
659
660 if (dflow->optional_p)
661 {
662 gcc_assert (problem->remove_problem_fun);
663 (problem->remove_problem_fun) ();
664 df->problems_in_order[i] = NULL;
665 df->problems_by_index[problem->id] = NULL;
666 removed++;
667 }
668 }
669 df->num_problems_defined -= removed;
670
671 /* Clear all of the flags. */
672 df->changeable_flags = 0;
673 df_process_deferred_rescans ();
674
675 /* Set the focus back to the whole function. */
676 if (df->blocks_to_analyze)
677 {
678 BITMAP_FREE (df->blocks_to_analyze);
679 df->blocks_to_analyze = NULL;
680 df_mark_solutions_dirty ();
681 df->analyze_subset = false;
682 }
683
684 #ifdef ENABLE_DF_CHECKING
685 /* Verification will fail in DF_NO_INSN_RESCAN. */
686 if (!(saved_flags & DF_NO_INSN_RESCAN))
687 {
688 df_lr_verify_transfer_functions ();
689 if (df_live)
690 df_live_verify_transfer_functions ();
691 }
692
693 #ifdef DF_DEBUG_CFG
694 df_set_clean_cfg ();
695 #endif
696 #endif
697
698 #ifdef ENABLE_CHECKING
699 if (verify)
700 df->changeable_flags |= DF_VERIFY_SCHEDULED;
701 #endif
702 }
703
704
705 /* Set up the dataflow instance for the entire back end. */
706
707 static unsigned int
708 rest_of_handle_df_initialize (void)
709 {
710 gcc_assert (!df);
711 df = XCNEW (struct df);
712 df->changeable_flags = 0;
713
714 bitmap_obstack_initialize (&df_bitmap_obstack);
715
716 /* Set this to a conservative value. Stack_ptr_mod will compute it
717 correctly later. */
718 current_function_sp_is_unchanging = 0;
719
720 df_scan_add_problem ();
721 df_scan_alloc (NULL);
722
723 /* These three problems are permanent. */
724 df_lr_add_problem ();
725 if (optimize > 1)
726 df_live_add_problem ();
727
728 df->postorder = XNEWVEC (int, last_basic_block);
729 df->postorder_inverted = XNEWVEC (int, last_basic_block);
730 df->n_blocks = post_order_compute (df->postorder, true, true);
731 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
732 gcc_assert (df->n_blocks == df->n_blocks_inverted);
733
734 df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
735 memset (df->hard_regs_live_count, 0,
736 sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
737
738 df_hard_reg_init ();
739 /* After reload, some ports add certain bits to regs_ever_live so
740 this cannot be reset. */
741 df_compute_regs_ever_live (true);
742 df_scan_blocks ();
743 df_compute_regs_ever_live (false);
744 return 0;
745 }
746
747
748 static bool
749 gate_opt (void)
750 {
751 return optimize > 0;
752 }
753
754
755 struct tree_opt_pass pass_df_initialize_opt =
756 {
757 "dfinit", /* name */
758 gate_opt, /* gate */
759 rest_of_handle_df_initialize, /* execute */
760 NULL, /* sub */
761 NULL, /* next */
762 0, /* static_pass_number */
763 0, /* tv_id */
764 0, /* properties_required */
765 0, /* properties_provided */
766 0, /* properties_destroyed */
767 0, /* todo_flags_start */
768 0, /* todo_flags_finish */
769 'z' /* letter */
770 };
771
772
773 static bool
774 gate_no_opt (void)
775 {
776 return optimize == 0;
777 }
778
779
780 struct tree_opt_pass pass_df_initialize_no_opt =
781 {
782 "dfinit", /* name */
783 gate_no_opt, /* gate */
784 rest_of_handle_df_initialize, /* execute */
785 NULL, /* sub */
786 NULL, /* next */
787 0, /* static_pass_number */
788 0, /* tv_id */
789 0, /* properties_required */
790 0, /* properties_provided */
791 0, /* properties_destroyed */
792 0, /* todo_flags_start */
793 0, /* todo_flags_finish */
794 'z' /* letter */
795 };
796
797
798 /* Free all the dataflow info and the DF structure. This should be
799 called from the df_finish macro which also NULLs the parm. */
800
801 static unsigned int
802 rest_of_handle_df_finish (void)
803 {
804 int i;
805
806 gcc_assert (df);
807
808 for (i = 0; i < df->num_problems_defined; i++)
809 {
810 struct dataflow *dflow = df->problems_in_order[i];
811 dflow->problem->free_fun ();
812 }
813
814 if (df->postorder)
815 free (df->postorder);
816 if (df->postorder_inverted)
817 free (df->postorder_inverted);
818 free (df->hard_regs_live_count);
819 free (df);
820 df = NULL;
821
822 bitmap_obstack_release (&df_bitmap_obstack);
823 return 0;
824 }
825
826
827 struct tree_opt_pass pass_df_finish =
828 {
829 "dfinish", /* name */
830 NULL, /* gate */
831 rest_of_handle_df_finish, /* execute */
832 NULL, /* sub */
833 NULL, /* next */
834 0, /* static_pass_number */
835 0, /* tv_id */
836 0, /* properties_required */
837 0, /* properties_provided */
838 0, /* properties_destroyed */
839 0, /* todo_flags_start */
840 0, /* todo_flags_finish */
841 'z' /* letter */
842 };
843
844
845
846
847 \f
848 /*----------------------------------------------------------------------------
849 The general data flow analysis engine.
850 ----------------------------------------------------------------------------*/
851
852
853 /* Helper function for df_worklist_dataflow.
854 Propagate the dataflow forward.
855 Given a BB_INDEX, do the dataflow propagation
856 and set bits on for successors in PENDING
857 if the out set of the dataflow has changed. */
858
859 static void
860 df_worklist_propagate_forward (struct dataflow *dataflow,
861 unsigned bb_index,
862 unsigned *bbindex_to_postorder,
863 bitmap pending,
864 sbitmap considered)
865 {
866 edge e;
867 edge_iterator ei;
868 basic_block bb = BASIC_BLOCK (bb_index);
869
870 /* Calculate <conf_op> of incoming edges. */
871 if (EDGE_COUNT (bb->preds) > 0)
872 FOR_EACH_EDGE (e, ei, bb->preds)
873 {
874 if (TEST_BIT (considered, e->src->index))
875 dataflow->problem->con_fun_n (e);
876 }
877 else if (dataflow->problem->con_fun_0)
878 dataflow->problem->con_fun_0 (bb);
879
880 if (dataflow->problem->trans_fun (bb_index))
881 {
882 /* The out set of this block has changed.
883 Propagate to the outgoing blocks. */
884 FOR_EACH_EDGE (e, ei, bb->succs)
885 {
886 unsigned ob_index = e->dest->index;
887
888 if (TEST_BIT (considered, ob_index))
889 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
890 }
891 }
892 }
893
894
895 /* Helper function for df_worklist_dataflow.
896 Propagate the dataflow backward. */
897
898 static void
899 df_worklist_propagate_backward (struct dataflow *dataflow,
900 unsigned bb_index,
901 unsigned *bbindex_to_postorder,
902 bitmap pending,
903 sbitmap considered)
904 {
905 edge e;
906 edge_iterator ei;
907 basic_block bb = BASIC_BLOCK (bb_index);
908
909 /* Calculate <conf_op> of incoming edges. */
910 if (EDGE_COUNT (bb->succs) > 0)
911 FOR_EACH_EDGE (e, ei, bb->succs)
912 {
913 if (TEST_BIT (considered, e->dest->index))
914 dataflow->problem->con_fun_n (e);
915 }
916 else if (dataflow->problem->con_fun_0)
917 dataflow->problem->con_fun_0 (bb);
918
919 if (dataflow->problem->trans_fun (bb_index))
920 {
921 /* The out set of this block has changed.
922 Propagate to the outgoing blocks. */
923 FOR_EACH_EDGE (e, ei, bb->preds)
924 {
925 unsigned ob_index = e->src->index;
926
927 if (TEST_BIT (considered, ob_index))
928 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
929 }
930 }
931 }
932
933
934 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
935 with "n"-th bit representing the n-th block in the reverse-postorder order.
936 This is so-called over-eager algorithm where it propagates
937 changes on demand. This algorithm may visit blocks more than
938 iterative method if there are deeply nested loops.
939 Worklist algorithm works better than iterative algorithm
940 for CFGs with no nested loops.
941 In practice, the measurement shows worklist algorithm beats
942 iterative algorithm by some margin overall.
943 Note that this is slightly different from the traditional textbook worklist solver,
944 in that the worklist is effectively sorted by the reverse postorder.
945 For CFGs with no nested loops, this is optimal. */
946
947 void
948 df_worklist_dataflow (struct dataflow *dataflow,
949 bitmap blocks_to_consider,
950 int *blocks_in_postorder,
951 int n_blocks)
952 {
953 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
954 sbitmap considered = sbitmap_alloc (last_basic_block);
955 bitmap_iterator bi;
956 unsigned int *bbindex_to_postorder;
957 int i;
958 unsigned int index;
959 enum df_flow_dir dir = dataflow->problem->dir;
960
961 gcc_assert (dir != DF_NONE);
962
963 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
964 bbindex_to_postorder =
965 (unsigned int *)xmalloc (last_basic_block * sizeof (unsigned int));
966
967 /* Initialize the array to an out-of-bound value. */
968 for (i = 0; i < last_basic_block; i++)
969 bbindex_to_postorder[i] = last_basic_block;
970
971 /* Initialize the considered map. */
972 sbitmap_zero (considered);
973 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
974 {
975 SET_BIT (considered, index);
976 }
977
978 /* Initialize the mapping of block index to postorder. */
979 for (i = 0; i < n_blocks; i++)
980 {
981 bbindex_to_postorder[blocks_in_postorder[i]] = i;
982 /* Add all blocks to the worklist. */
983 bitmap_set_bit (pending, i);
984 }
985
986 if (dataflow->problem->init_fun)
987 dataflow->problem->init_fun (blocks_to_consider);
988
989 while (!bitmap_empty_p (pending))
990 {
991 unsigned bb_index;
992
993 index = bitmap_first_set_bit (pending);
994 bitmap_clear_bit (pending, index);
995
996 bb_index = blocks_in_postorder[index];
997
998 if (dir == DF_FORWARD)
999 df_worklist_propagate_forward (dataflow, bb_index,
1000 bbindex_to_postorder,
1001 pending, considered);
1002 else
1003 df_worklist_propagate_backward (dataflow, bb_index,
1004 bbindex_to_postorder,
1005 pending, considered);
1006 }
1007
1008 BITMAP_FREE (pending);
1009 sbitmap_free (considered);
1010 free (bbindex_to_postorder);
1011 }
1012
1013
1014 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1015 the order of the remaining entries. Returns the length of the resulting
1016 list. */
1017
1018 static unsigned
1019 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1020 {
1021 unsigned act, last;
1022
1023 for (act = 0, last = 0; act < len; act++)
1024 if (bitmap_bit_p (blocks, list[act]))
1025 list[last++] = list[act];
1026
1027 return last;
1028 }
1029
1030
1031 /* Execute dataflow analysis on a single dataflow problem.
1032
1033 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1034 examined or will be computed. For calls from DF_ANALYZE, this is
1035 the set of blocks that has been passed to DF_SET_BLOCKS.
1036 */
1037
1038 void
1039 df_analyze_problem (struct dataflow *dflow,
1040 bitmap blocks_to_consider,
1041 int *postorder, int n_blocks)
1042 {
1043 timevar_push (dflow->problem->tv_id);
1044
1045 #ifdef ENABLE_DF_CHECKING
1046 if (dflow->problem->verify_start_fun)
1047 dflow->problem->verify_start_fun ();
1048 #endif
1049
1050 /* (Re)Allocate the datastructures necessary to solve the problem. */
1051 if (dflow->problem->alloc_fun)
1052 dflow->problem->alloc_fun (blocks_to_consider);
1053
1054 /* Set up the problem and compute the local information. */
1055 if (dflow->problem->local_compute_fun)
1056 dflow->problem->local_compute_fun (blocks_to_consider);
1057
1058 /* Solve the equations. */
1059 if (dflow->problem->dataflow_fun)
1060 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1061 postorder, n_blocks);
1062
1063 /* Massage the solution. */
1064 if (dflow->problem->finalize_fun)
1065 dflow->problem->finalize_fun (blocks_to_consider);
1066
1067 #ifdef ENABLE_DF_CHECKING
1068 if (dflow->problem->verify_end_fun)
1069 dflow->problem->verify_end_fun ();
1070 #endif
1071
1072 timevar_pop (dflow->problem->tv_id);
1073
1074 dflow->computed = true;
1075 }
1076
1077
1078 /* Analyze dataflow info for the basic blocks specified by the bitmap
1079 BLOCKS, or for the whole CFG if BLOCKS is zero. */
1080
1081 void
1082 df_analyze (void)
1083 {
1084 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1085 bool everything;
1086 int i;
1087
1088 if (df->postorder)
1089 free (df->postorder);
1090 if (df->postorder_inverted)
1091 free (df->postorder_inverted);
1092 df->postorder = XNEWVEC (int, last_basic_block);
1093 df->postorder_inverted = XNEWVEC (int, last_basic_block);
1094 df->n_blocks = post_order_compute (df->postorder, true, true);
1095 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1096
1097 /* These should be the same. */
1098 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1099
1100 /* We need to do this before the df_verify_all because this is
1101 not kept incrementally up to date. */
1102 df_compute_regs_ever_live (false);
1103 df_process_deferred_rescans ();
1104
1105 if (dump_file)
1106 fprintf (dump_file, "df_analyze called\n");
1107
1108 #ifndef ENABLE_DF_CHECKING
1109 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1110 #endif
1111 df_verify ();
1112
1113 for (i = 0; i < df->n_blocks; i++)
1114 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1115
1116 #ifdef ENABLE_CHECKING
1117 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1118 the ENTRY block. */
1119 for (i = 0; i < df->n_blocks_inverted; i++)
1120 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1121 #endif
1122
1123 /* Make sure that we have pruned any unreachable blocks from these
1124 sets. */
1125 if (df->analyze_subset)
1126 {
1127 everything = false;
1128 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1129 df->n_blocks = df_prune_to_subcfg (df->postorder,
1130 df->n_blocks, df->blocks_to_analyze);
1131 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1132 df->n_blocks_inverted,
1133 df->blocks_to_analyze);
1134 BITMAP_FREE (current_all_blocks);
1135 }
1136 else
1137 {
1138 everything = true;
1139 df->blocks_to_analyze = current_all_blocks;
1140 current_all_blocks = NULL;
1141 }
1142
1143 /* Skip over the DF_SCAN problem. */
1144 for (i = 1; i < df->num_problems_defined; i++)
1145 {
1146 struct dataflow *dflow = df->problems_in_order[i];
1147 if (dflow->solutions_dirty)
1148 {
1149 if (dflow->problem->dir == DF_FORWARD)
1150 df_analyze_problem (dflow,
1151 df->blocks_to_analyze,
1152 df->postorder_inverted,
1153 df->n_blocks_inverted);
1154 else
1155 df_analyze_problem (dflow,
1156 df->blocks_to_analyze,
1157 df->postorder,
1158 df->n_blocks);
1159 }
1160 }
1161
1162 if (everything)
1163 {
1164 BITMAP_FREE (df->blocks_to_analyze);
1165 df->blocks_to_analyze = NULL;
1166 }
1167
1168 #ifdef DF_DEBUG_CFG
1169 df_set_clean_cfg ();
1170 #endif
1171 }
1172
1173
1174 /* Return the number of basic blocks from the last call to df_analyze. */
1175
1176 int
1177 df_get_n_blocks (enum df_flow_dir dir)
1178 {
1179 gcc_assert (dir != DF_NONE);
1180
1181 if (dir == DF_FORWARD)
1182 {
1183 gcc_assert (df->postorder_inverted);
1184 return df->n_blocks_inverted;
1185 }
1186
1187 gcc_assert (df->postorder);
1188 return df->n_blocks;
1189 }
1190
1191
1192 /* Return a pointer to the array of basic blocks in the reverse postorder.
1193 Depending on the direction of the dataflow problem,
1194 it returns either the usual reverse postorder array
1195 or the reverse postorder of inverted traversal. */
1196 int *
1197 df_get_postorder (enum df_flow_dir dir)
1198 {
1199 gcc_assert (dir != DF_NONE);
1200
1201 if (dir == DF_FORWARD)
1202 {
1203 gcc_assert (df->postorder_inverted);
1204 return df->postorder_inverted;
1205 }
1206 gcc_assert (df->postorder);
1207 return df->postorder;
1208 }
1209
1210 static struct df_problem user_problem;
1211 static struct dataflow user_dflow;
1212
1213 /* Interface for calling iterative dataflow with user defined
1214 confluence and transfer functions. All that is necessary is to
1215 supply DIR, a direction, CONF_FUN_0, a confluence function for
1216 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1217 confluence function, TRANS_FUN, the basic block transfer function,
1218 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1219 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1220
1221 void
1222 df_simple_dataflow (enum df_flow_dir dir,
1223 df_init_function init_fun,
1224 df_confluence_function_0 con_fun_0,
1225 df_confluence_function_n con_fun_n,
1226 df_transfer_function trans_fun,
1227 bitmap blocks, int * postorder, int n_blocks)
1228 {
1229 memset (&user_problem, 0, sizeof (struct df_problem));
1230 user_problem.dir = dir;
1231 user_problem.init_fun = init_fun;
1232 user_problem.con_fun_0 = con_fun_0;
1233 user_problem.con_fun_n = con_fun_n;
1234 user_problem.trans_fun = trans_fun;
1235 user_dflow.problem = &user_problem;
1236 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1237 }
1238
1239
1240 \f
1241 /*----------------------------------------------------------------------------
1242 Functions to support limited incremental change.
1243 ----------------------------------------------------------------------------*/
1244
1245
1246 /* Get basic block info. */
1247
1248 static void *
1249 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1250 {
1251 if (dflow->block_info == NULL)
1252 return NULL;
1253 if (index >= dflow->block_info_size)
1254 return NULL;
1255 return (struct df_scan_bb_info *) dflow->block_info[index];
1256 }
1257
1258
1259 /* Set basic block info. */
1260
1261 static void
1262 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1263 void *bb_info)
1264 {
1265 gcc_assert (dflow->block_info);
1266 dflow->block_info[index] = bb_info;
1267 }
1268
1269
1270 /* Mark the solutions as being out of date. */
1271
1272 void
1273 df_mark_solutions_dirty (void)
1274 {
1275 if (df)
1276 {
1277 int p;
1278 for (p = 1; p < df->num_problems_defined; p++)
1279 df->problems_in_order[p]->solutions_dirty = true;
1280 }
1281 }
1282
1283
1284 /* Return true if BB needs it's transfer functions recomputed. */
1285
1286 bool
1287 df_get_bb_dirty (basic_block bb)
1288 {
1289 if (df && df_live)
1290 return bitmap_bit_p (df_live->out_of_date_transfer_functions, bb->index);
1291 else
1292 return false;
1293 }
1294
1295
1296 /* Mark BB as needing it's transfer functions as being out of
1297 date. */
1298
1299 void
1300 df_set_bb_dirty (basic_block bb)
1301 {
1302 if (df)
1303 {
1304 int p;
1305 for (p = 1; p < df->num_problems_defined; p++)
1306 {
1307 struct dataflow *dflow = df->problems_in_order[p];
1308 if (dflow->out_of_date_transfer_functions)
1309 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1310 }
1311 df_mark_solutions_dirty ();
1312 }
1313 }
1314
1315
1316 /* Clear the dirty bits. This is called from places that delete
1317 blocks. */
1318 static void
1319 df_clear_bb_dirty (basic_block bb)
1320 {
1321 int p;
1322 for (p = 1; p < df->num_problems_defined; p++)
1323 {
1324 struct dataflow *dflow = df->problems_in_order[p];
1325 if (dflow->out_of_date_transfer_functions)
1326 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1327 }
1328 }
1329 /* Called from the rtl_compact_blocks to reorganize the problems basic
1330 block info. */
1331
1332 void
1333 df_compact_blocks (void)
1334 {
1335 int i, p;
1336 basic_block bb;
1337 void **problem_temps;
1338 int size = last_basic_block * sizeof (void *);
1339 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
1340 problem_temps = xmalloc (size);
1341
1342 for (p = 0; p < df->num_problems_defined; p++)
1343 {
1344 struct dataflow *dflow = df->problems_in_order[p];
1345
1346 /* Need to reorganize the out_of_date_transfer_functions for the
1347 dflow problem. */
1348 if (dflow->out_of_date_transfer_functions)
1349 {
1350 bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1351 bitmap_clear (dflow->out_of_date_transfer_functions);
1352 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1353 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1354 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1355 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1356
1357 i = NUM_FIXED_BLOCKS;
1358 FOR_EACH_BB (bb)
1359 {
1360 if (bitmap_bit_p (tmp, bb->index))
1361 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1362 i++;
1363 }
1364 }
1365
1366 /* Now shuffle the block info for the problem. */
1367 if (dflow->problem->free_bb_fun)
1368 {
1369 df_grow_bb_info (dflow);
1370 memcpy (problem_temps, dflow->block_info, size);
1371
1372 /* Copy the bb info from the problem tmps to the proper
1373 place in the block_info vector. Null out the copied
1374 item. The entry and exit blocks never move. */
1375 i = NUM_FIXED_BLOCKS;
1376 FOR_EACH_BB (bb)
1377 {
1378 df_set_bb_info (dflow, i, problem_temps[bb->index]);
1379 problem_temps[bb->index] = NULL;
1380 i++;
1381 }
1382 memset (dflow->block_info + i, 0,
1383 (last_basic_block - i) *sizeof (void *));
1384
1385 /* Free any block infos that were not copied (and NULLed).
1386 These are from orphaned blocks. */
1387 for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
1388 {
1389 basic_block bb = BASIC_BLOCK (i);
1390 if (problem_temps[i] && bb)
1391 dflow->problem->free_bb_fun
1392 (bb, problem_temps[i]);
1393 }
1394 }
1395 }
1396
1397 /* Shuffle the bits in the basic_block indexed arrays. */
1398
1399 if (df->blocks_to_analyze)
1400 {
1401 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1402 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1403 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1404 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1405 bitmap_copy (tmp, df->blocks_to_analyze);
1406 bitmap_clear (df->blocks_to_analyze);
1407 i = NUM_FIXED_BLOCKS;
1408 FOR_EACH_BB (bb)
1409 {
1410 if (bitmap_bit_p (tmp, bb->index))
1411 bitmap_set_bit (df->blocks_to_analyze, i);
1412 i++;
1413 }
1414 }
1415
1416 BITMAP_FREE (tmp);
1417
1418 free (problem_temps);
1419
1420 i = NUM_FIXED_BLOCKS;
1421 FOR_EACH_BB (bb)
1422 {
1423 SET_BASIC_BLOCK (i, bb);
1424 bb->index = i;
1425 i++;
1426 }
1427
1428 gcc_assert (i == n_basic_blocks);
1429
1430 for (; i < last_basic_block; i++)
1431 SET_BASIC_BLOCK (i, NULL);
1432
1433 #ifdef DF_DEBUG_CFG
1434 if (!df_lr->solutions_dirty)
1435 df_set_clean_cfg ();
1436 #endif
1437 }
1438
1439
1440 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1441 block. There is no excuse for people to do this kind of thing. */
1442
1443 void
1444 df_bb_replace (int old_index, basic_block new_block)
1445 {
1446 int new_block_index = new_block->index;
1447 int p;
1448
1449 if (dump_file)
1450 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1451
1452 gcc_assert (df);
1453 gcc_assert (BASIC_BLOCK (old_index) == NULL);
1454
1455 for (p = 0; p < df->num_problems_defined; p++)
1456 {
1457 struct dataflow *dflow = df->problems_in_order[p];
1458 if (dflow->block_info)
1459 {
1460 df_grow_bb_info (dflow);
1461 gcc_assert (df_get_bb_info (dflow, old_index) == NULL);
1462 df_set_bb_info (dflow, old_index,
1463 df_get_bb_info (dflow, new_block_index));
1464 }
1465 }
1466
1467 df_clear_bb_dirty (new_block);
1468 SET_BASIC_BLOCK (old_index, new_block);
1469 new_block->index = old_index;
1470 df_set_bb_dirty (BASIC_BLOCK (old_index));
1471 SET_BASIC_BLOCK (new_block_index, NULL);
1472 }
1473
1474
1475 /* Free all of the per basic block dataflow from all of the problems.
1476 This is typically called before a basic block is deleted and the
1477 problem will be reanalyzed. */
1478
1479 void
1480 df_bb_delete (int bb_index)
1481 {
1482 basic_block bb = BASIC_BLOCK (bb_index);
1483 int i;
1484
1485 if (!df)
1486 return;
1487
1488 for (i = 0; i < df->num_problems_defined; i++)
1489 {
1490 struct dataflow *dflow = df->problems_in_order[i];
1491 if (dflow->problem->free_bb_fun)
1492 {
1493 void *bb_info = df_get_bb_info (dflow, bb_index);
1494 if (bb_info)
1495 {
1496 dflow->problem->free_bb_fun (bb, bb_info);
1497 df_set_bb_info (dflow, bb_index, NULL);
1498 }
1499 }
1500 }
1501 df_clear_bb_dirty (bb);
1502 df_mark_solutions_dirty ();
1503 }
1504
1505
1506 /* Verify that there is a place for everything and everything is in
1507 its place. This is too expensive to run after every pass in the
1508 mainline. However this is an excellent debugging tool if the
1509 dataflow information is not being updated properly. You can just
1510 sprinkle calls in until you find the place that is changing an
1511 underlying structure without calling the proper updating
1512 routine. */
1513
1514 void
1515 df_verify (void)
1516 {
1517 df_scan_verify ();
1518 #ifdef ENABLE_DF_CHECKING
1519 df_lr_verify_transfer_functions ();
1520 if (df_live)
1521 df_live_verify_transfer_functions ();
1522 #endif
1523 }
1524
1525 #ifdef DF_DEBUG_CFG
1526
1527 /* Compute an array of ints that describes the cfg. This can be used
1528 to discover places where the cfg is modified by the appropriate
1529 calls have not been made to the keep df informed. The internals of
1530 this are unexciting, the key is that two instances of this can be
1531 compared to see if any changes have been made to the cfg. */
1532
1533 static int *
1534 df_compute_cfg_image (void)
1535 {
1536 basic_block bb;
1537 int size = 2 + (2 * n_basic_blocks);
1538 int i;
1539 int * map;
1540
1541 FOR_ALL_BB (bb)
1542 {
1543 size += EDGE_COUNT (bb->succs);
1544 }
1545
1546 map = XNEWVEC (int, size);
1547 map[0] = size;
1548 i = 1;
1549 FOR_ALL_BB (bb)
1550 {
1551 edge_iterator ei;
1552 edge e;
1553
1554 map[i++] = bb->index;
1555 FOR_EACH_EDGE (e, ei, bb->succs)
1556 map[i++] = e->dest->index;
1557 map[i++] = -1;
1558 }
1559 map[i] = -1;
1560 return map;
1561 }
1562
1563 static int *saved_cfg = NULL;
1564
1565
1566 /* This function compares the saved version of the cfg with the
1567 current cfg and aborts if the two are identical. The function
1568 silently returns if the cfg has been marked as dirty or the two are
1569 the same. */
1570
1571 void
1572 df_check_cfg_clean (void)
1573 {
1574 int *new_map;
1575
1576 if (!df)
1577 return;
1578
1579 if (df_lr->solutions_dirty)
1580 return;
1581
1582 if (saved_cfg == NULL)
1583 return;
1584
1585 new_map = df_compute_cfg_image ();
1586 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1587 free (new_map);
1588 }
1589
1590
1591 /* This function builds a cfg fingerprint and squirrels it away in
1592 saved_cfg. */
1593
1594 static void
1595 df_set_clean_cfg (void)
1596 {
1597 if (saved_cfg)
1598 free (saved_cfg);
1599 saved_cfg = df_compute_cfg_image ();
1600 }
1601
1602 #endif /* DF_DEBUG_CFG */
1603 /*----------------------------------------------------------------------------
1604 PUBLIC INTERFACES TO QUERY INFORMATION.
1605 ----------------------------------------------------------------------------*/
1606
1607
1608 /* Return first def of REGNO within BB. */
1609
1610 struct df_ref *
1611 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1612 {
1613 rtx insn;
1614 struct df_ref **def_rec;
1615 unsigned int uid;
1616
1617 FOR_BB_INSNS (bb, insn)
1618 {
1619 if (!INSN_P (insn))
1620 continue;
1621
1622 uid = INSN_UID (insn);
1623 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1624 {
1625 struct df_ref *def = *def_rec;
1626 if (DF_REF_REGNO (def) == regno)
1627 return def;
1628 }
1629 }
1630 return NULL;
1631 }
1632
1633
1634 /* Return last def of REGNO within BB. */
1635
1636 struct df_ref *
1637 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1638 {
1639 rtx insn;
1640 struct df_ref **def_rec;
1641 unsigned int uid;
1642
1643 FOR_BB_INSNS_REVERSE (bb, insn)
1644 {
1645 if (!INSN_P (insn))
1646 continue;
1647
1648 uid = INSN_UID (insn);
1649 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1650 {
1651 struct df_ref *def = *def_rec;
1652 if (DF_REF_REGNO (def) == regno)
1653 return def;
1654 }
1655 }
1656
1657 return NULL;
1658 }
1659
1660 /* Finds the reference corresponding to the definition of REG in INSN.
1661 DF is the dataflow object. */
1662
1663 struct df_ref *
1664 df_find_def (rtx insn, rtx reg)
1665 {
1666 unsigned int uid;
1667 struct df_ref **def_rec;
1668
1669 if (GET_CODE (reg) == SUBREG)
1670 reg = SUBREG_REG (reg);
1671 gcc_assert (REG_P (reg));
1672
1673 uid = INSN_UID (insn);
1674 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1675 {
1676 struct df_ref *def = *def_rec;
1677 if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
1678 return def;
1679 }
1680
1681 return NULL;
1682 }
1683
1684
1685 /* Return true if REG is defined in INSN, zero otherwise. */
1686
1687 bool
1688 df_reg_defined (rtx insn, rtx reg)
1689 {
1690 return df_find_def (insn, reg) != NULL;
1691 }
1692
1693
1694 /* Finds the reference corresponding to the use of REG in INSN.
1695 DF is the dataflow object. */
1696
1697 struct df_ref *
1698 df_find_use (rtx insn, rtx reg)
1699 {
1700 unsigned int uid;
1701 struct df_ref **use_rec;
1702
1703 if (GET_CODE (reg) == SUBREG)
1704 reg = SUBREG_REG (reg);
1705 gcc_assert (REG_P (reg));
1706
1707 uid = INSN_UID (insn);
1708 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
1709 {
1710 struct df_ref *use = *use_rec;
1711 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1712 return use;
1713 }
1714 if (df->changeable_flags & DF_EQ_NOTES)
1715 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
1716 {
1717 struct df_ref *use = *use_rec;
1718 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1719 return use;
1720 }
1721 return NULL;
1722 }
1723
1724
1725 /* Return true if REG is referenced in INSN, zero otherwise. */
1726
1727 bool
1728 df_reg_used (rtx insn, rtx reg)
1729 {
1730 return df_find_use (insn, reg) != NULL;
1731 }
1732
1733 \f
1734 /*----------------------------------------------------------------------------
1735 Debugging and printing functions.
1736 ----------------------------------------------------------------------------*/
1737
1738
1739 /* Write information about registers and basic blocks into FILE.
1740 This is part of making a debugging dump. */
1741
1742 void
1743 df_print_regset (FILE *file, bitmap r)
1744 {
1745 unsigned int i;
1746 bitmap_iterator bi;
1747
1748 if (r == NULL)
1749 fputs (" (nil)", file);
1750 else
1751 {
1752 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
1753 {
1754 fprintf (file, " %d", i);
1755 if (i < FIRST_PSEUDO_REGISTER)
1756 fprintf (file, " [%s]", reg_names[i]);
1757 }
1758 }
1759 fprintf (file, "\n");
1760 }
1761
1762
1763 /* Dump dataflow info. */
1764
1765 void
1766 df_dump (FILE *file)
1767 {
1768 basic_block bb;
1769 df_dump_start (file);
1770
1771 FOR_ALL_BB (bb)
1772 {
1773 df_print_bb_index (bb, file);
1774 df_dump_top (bb, file);
1775 df_dump_bottom (bb, file);
1776 }
1777
1778 fprintf (file, "\n");
1779 }
1780
1781
1782 /* Dump dataflow info for df->blocks_to_analyze. */
1783
1784 void
1785 df_dump_region (FILE *file)
1786 {
1787 if (df->blocks_to_analyze)
1788 {
1789 bitmap_iterator bi;
1790 unsigned int bb_index;
1791
1792 fprintf (file, "\n\nstarting region dump\n");
1793 df_dump_start (file);
1794
1795 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
1796 {
1797 basic_block bb = BASIC_BLOCK (bb_index);
1798
1799 df_print_bb_index (bb, file);
1800 df_dump_top (bb, file);
1801 df_dump_bottom (bb, file);
1802 }
1803 fprintf (file, "\n");
1804 }
1805 else
1806 df_dump (file);
1807 }
1808
1809
1810 /* Dump the introductory information for each problem defined. */
1811
1812 void
1813 df_dump_start (FILE *file)
1814 {
1815 int i;
1816
1817 if (!df || !file)
1818 return;
1819
1820 fprintf (file, "\n\n%s\n", current_function_name ());
1821 fprintf (file, "\nDataflow summary:\n");
1822 if (df->blocks_to_analyze)
1823 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
1824 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
1825
1826 for (i = 0; i < df->num_problems_defined; i++)
1827 {
1828 struct dataflow *dflow = df->problems_in_order[i];
1829 if (dflow->computed)
1830 {
1831 df_dump_problem_function fun = dflow->problem->dump_start_fun;
1832 if (fun)
1833 fun(file);
1834 }
1835 }
1836 }
1837
1838
1839 /* Dump the top of the block information for BB. */
1840
1841 void
1842 df_dump_top (basic_block bb, FILE *file)
1843 {
1844 int i;
1845
1846 if (!df || !file)
1847 return;
1848
1849 for (i = 0; i < df->num_problems_defined; i++)
1850 {
1851 struct dataflow *dflow = df->problems_in_order[i];
1852 if (dflow->computed)
1853 {
1854 df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
1855 if (bbfun)
1856 bbfun (bb, file);
1857 }
1858 }
1859 }
1860
1861
1862 /* Dump the bottom of the block information for BB. */
1863
1864 void
1865 df_dump_bottom (basic_block bb, FILE *file)
1866 {
1867 int i;
1868
1869 if (!df || !file)
1870 return;
1871
1872 for (i = 0; i < df->num_problems_defined; i++)
1873 {
1874 struct dataflow *dflow = df->problems_in_order[i];
1875 if (dflow->computed)
1876 {
1877 df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
1878 if (bbfun)
1879 bbfun (bb, file);
1880 }
1881 }
1882 }
1883
1884
1885 void
1886 df_refs_chain_dump (struct df_ref **ref_rec, bool follow_chain, FILE *file)
1887 {
1888 fprintf (file, "{ ");
1889 while (*ref_rec)
1890 {
1891 struct df_ref *ref = *ref_rec;
1892 fprintf (file, "%c%d(%d)",
1893 DF_REF_REG_DEF_P (ref) ? 'd' : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
1894 DF_REF_ID (ref),
1895 DF_REF_REGNO (ref));
1896 if (follow_chain)
1897 df_chain_dump (DF_REF_CHAIN (ref), file);
1898 ref_rec++;
1899 }
1900 fprintf (file, "}");
1901 }
1902
1903
1904 /* Dump either a ref-def or reg-use chain. */
1905
1906 void
1907 df_regs_chain_dump (struct df_ref *ref, FILE *file)
1908 {
1909 fprintf (file, "{ ");
1910 while (ref)
1911 {
1912 fprintf (file, "%c%d(%d) ",
1913 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
1914 DF_REF_ID (ref),
1915 DF_REF_REGNO (ref));
1916 ref = ref->next_reg;
1917 }
1918 fprintf (file, "}");
1919 }
1920
1921
1922 static void
1923 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
1924 {
1925 while (*mws)
1926 {
1927 fprintf (file, "mw %c r[%d..%d]\n",
1928 ((*mws)->type == DF_REF_REG_DEF) ? 'd' : 'u',
1929 (*mws)->start_regno, (*mws)->end_regno);
1930 mws++;
1931 }
1932 }
1933
1934
1935 static void
1936 df_insn_uid_debug (unsigned int uid,
1937 bool follow_chain, FILE *file)
1938 {
1939 fprintf (file, "insn %d luid %d",
1940 uid, DF_INSN_UID_LUID (uid));
1941
1942 if (DF_INSN_UID_DEFS (uid))
1943 {
1944 fprintf (file, " defs ");
1945 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
1946 }
1947
1948 if (DF_INSN_UID_USES (uid))
1949 {
1950 fprintf (file, " uses ");
1951 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
1952 }
1953
1954 if (DF_INSN_UID_EQ_USES (uid))
1955 {
1956 fprintf (file, " eq uses ");
1957 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
1958 }
1959
1960 if (DF_INSN_UID_MWS (uid))
1961 {
1962 fprintf (file, " mws ");
1963 df_mws_dump (DF_INSN_UID_MWS (uid), file);
1964 }
1965 fprintf (file, "\n");
1966 }
1967
1968
1969 void
1970 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
1971 {
1972 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
1973 }
1974
1975 void
1976 df_insn_debug_regno (rtx insn, FILE *file)
1977 {
1978 unsigned int uid = INSN_UID(insn);
1979
1980 fprintf (file, "insn %d bb %d luid %d defs ",
1981 uid, BLOCK_FOR_INSN (insn)->index, DF_INSN_LUID (insn));
1982 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), false, file);
1983
1984 fprintf (file, " uses ");
1985 df_refs_chain_dump (DF_INSN_UID_USES (uid), false, file);
1986
1987 fprintf (file, " eq_uses ");
1988 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), false, file);
1989 fprintf (file, "\n");
1990 }
1991
1992 void
1993 df_regno_debug (unsigned int regno, FILE *file)
1994 {
1995 fprintf (file, "reg %d defs ", regno);
1996 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
1997 fprintf (file, " uses ");
1998 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
1999 fprintf (file, " eq_uses ");
2000 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2001 fprintf (file, "\n");
2002 }
2003
2004
2005 void
2006 df_ref_debug (struct df_ref *ref, FILE *file)
2007 {
2008 fprintf (file, "%c%d ",
2009 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2010 DF_REF_ID (ref));
2011 fprintf (file, "reg %d bb %d insn %d flag 0x%x type 0x%x ",
2012 DF_REF_REGNO (ref),
2013 DF_REF_BBNO (ref),
2014 DF_REF_INSN (ref) ? INSN_UID (DF_REF_INSN (ref)) : -1,
2015 DF_REF_FLAGS (ref),
2016 DF_REF_TYPE (ref));
2017 if (DF_REF_LOC (ref))
2018 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref), (void *)*DF_REF_LOC (ref));
2019 else
2020 fprintf (file, "chain ");
2021 df_chain_dump (DF_REF_CHAIN (ref), file);
2022 fprintf (file, "\n");
2023 }
2024 \f
2025 /* Functions for debugging from GDB. */
2026
2027 void
2028 debug_df_insn (rtx insn)
2029 {
2030 df_insn_debug (insn, true, stderr);
2031 debug_rtx (insn);
2032 }
2033
2034
2035 void
2036 debug_df_reg (rtx reg)
2037 {
2038 df_regno_debug (REGNO (reg), stderr);
2039 }
2040
2041
2042 void
2043 debug_df_regno (unsigned int regno)
2044 {
2045 df_regno_debug (regno, stderr);
2046 }
2047
2048
2049 void
2050 debug_df_ref (struct df_ref *ref)
2051 {
2052 df_ref_debug (ref, stderr);
2053 }
2054
2055
2056 void
2057 debug_df_defno (unsigned int defno)
2058 {
2059 df_ref_debug (DF_DEFS_GET (defno), stderr);
2060 }
2061
2062
2063 void
2064 debug_df_useno (unsigned int defno)
2065 {
2066 df_ref_debug (DF_USES_GET (defno), stderr);
2067 }
2068
2069
2070 void
2071 debug_df_chain (struct df_link *link)
2072 {
2073 df_chain_dump (link, stderr);
2074 fputc ('\n', stderr);
2075 }