8540.md: New file.
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
151
152 #include "rtl.h"
153 #include "tm_p.h"
154 #include "regs.h"
155 #include "hard-reg-set.h"
156 #include "flags.h"
157 #include "real.h"
158 #include "insn-config.h"
159 #include "recog.h"
160 #include "basic-block.h"
161 #include "output.h"
162 #include "function.h"
163 #include "expr.h"
164 #include "except.h"
165 #include "ggc.h"
166 #include "params.h"
167 #include "cselib.h"
168
169 #include "obstack.h"
170
171 /* Propagate flow information through back edges and thus enable PRE's
172 moving loop invariant calculations out of loops.
173
174 Originally this tended to create worse overall code, but several
175 improvements during the development of PRE seem to have made following
176 back edges generally a win.
177
178 Note much of the loop invariant code motion done here would normally
179 be done by loop.c, which has more heuristics for when to move invariants
180 out of loops. At some point we might need to move some of those
181 heuristics into gcse.c. */
182
183 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
184 are a superset of those done by GCSE.
185
186 We perform the following steps:
187
188 1) Compute basic block information.
189
190 2) Compute table of places where registers are set.
191
192 3) Perform copy/constant propagation.
193
194 4) Perform global cse.
195
196 5) Perform another pass of copy/constant propagation.
197
198 Two passes of copy/constant propagation are done because the first one
199 enables more GCSE and the second one helps to clean up the copies that
200 GCSE creates. This is needed more for PRE than for Classic because Classic
201 GCSE will try to use an existing register containing the common
202 subexpression rather than create a new one. This is harder to do for PRE
203 because of the code motion (which Classic GCSE doesn't do).
204
205 Expressions we are interested in GCSE-ing are of the form
206 (set (pseudo-reg) (expression)).
207 Function want_to_gcse_p says what these are.
208
209 PRE handles moving invariant expressions out of loops (by treating them as
210 partially redundant).
211
212 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
213 assignment) based GVN (global value numbering). L. T. Simpson's paper
214 (Rice University) on value numbering is a useful reference for this.
215
216 **********************
217
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
222 the expense.
223
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228
229 It was found doing copy propagation between each pass enables further
230 substitutions.
231
232 PRE is quite expensive in complicated functions because the DFA can take
233 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
234 be modified if one wants to experiment.
235
236 **********************
237
238 The steps for PRE are:
239
240 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
241
242 2) Perform the data flow analysis for PRE.
243
244 3) Delete the redundant instructions
245
246 4) Insert the required copies [if any] that make the partially
247 redundant instructions fully redundant.
248
249 5) For other reaching expressions, insert an instruction to copy the value
250 to a newly created pseudo that will reach the redundant instruction.
251
252 The deletion is done first so that when we do insertions we
253 know which pseudo reg to use.
254
255 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
256 argue it is not. The number of iterations for the algorithm to converge
257 is typically 2-4 so I don't view it as that expensive (relatively speaking).
258
259 PRE GCSE depends heavily on the second CSE pass to clean up the copies
260 we create. To make an expression reach the place where it's redundant,
261 the result of the expression is copied to a new register, and the redundant
262 expression is deleted by replacing it with this new register. Classic GCSE
263 doesn't have this problem as much as it computes the reaching defs of
264 each register in each block and thus can try to use an existing register.
265
266 **********************
267
268 A fair bit of simplicity is created by creating small functions for simple
269 tasks, even when the function is only called in one place. This may
270 measurably slow things down [or may not] by creating more function call
271 overhead than is necessary. The source is laid out so that it's trivial
272 to make the affected functions inline so that one can measure what speed
273 up, if any, can be achieved, and maybe later when things settle things can
274 be rearranged.
275
276 Help stamp out big monolithic functions! */
277 \f
278 /* GCSE global vars. */
279
280 /* -dG dump file. */
281 static FILE *gcse_file;
282
283 /* Note whether or not we should run jump optimization after gcse. We
284 want to do this for two cases.
285
286 * If we changed any jumps via cprop.
287
288 * If we added any labels via edge splitting. */
289
290 static int run_jump_opt_after_gcse;
291
292 /* Bitmaps are normally not included in debugging dumps.
293 However it's useful to be able to print them from GDB.
294 We could create special functions for this, but it's simpler to
295 just allow passing stderr to the dump_foo fns. Since stderr can
296 be a macro, we store a copy here. */
297 static FILE *debug_stderr;
298
299 /* An obstack for our working variables. */
300 static struct obstack gcse_obstack;
301
302 /* Nonzero for each mode that supports (set (reg) (reg)).
303 This is trivially true for integer and floating point values.
304 It may or may not be true for condition codes. */
305 static char can_copy_p[(int) NUM_MACHINE_MODES];
306
307 /* Nonzero if can_copy_p has been initialized. */
308 static int can_copy_init_p;
309
310 struct reg_use {rtx reg_rtx; };
311
312 /* Hash table of expressions. */
313
314 struct expr
315 {
316 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
317 rtx expr;
318 /* Index in the available expression bitmaps. */
319 int bitmap_index;
320 /* Next entry with the same hash. */
321 struct expr *next_same_hash;
322 /* List of anticipatable occurrences in basic blocks in the function.
323 An "anticipatable occurrence" is one that is the first occurrence in the
324 basic block, the operands are not modified in the basic block prior
325 to the occurrence and the output is not used between the start of
326 the block and the occurrence. */
327 struct occr *antic_occr;
328 /* List of available occurrence in basic blocks in the function.
329 An "available occurrence" is one that is the last occurrence in the
330 basic block and the operands are not modified by following statements in
331 the basic block [including this insn]. */
332 struct occr *avail_occr;
333 /* Non-null if the computation is PRE redundant.
334 The value is the newly created pseudo-reg to record a copy of the
335 expression in all the places that reach the redundant copy. */
336 rtx reaching_reg;
337 };
338
339 /* Occurrence of an expression.
340 There is one per basic block. If a pattern appears more than once the
341 last appearance is used [or first for anticipatable expressions]. */
342
343 struct occr
344 {
345 /* Next occurrence of this expression. */
346 struct occr *next;
347 /* The insn that computes the expression. */
348 rtx insn;
349 /* Nonzero if this [anticipatable] occurrence has been deleted. */
350 char deleted_p;
351 /* Nonzero if this [available] occurrence has been copied to
352 reaching_reg. */
353 /* ??? This is mutually exclusive with deleted_p, so they could share
354 the same byte. */
355 char copied_p;
356 };
357
358 /* Expression and copy propagation hash tables.
359 Each hash table is an array of buckets.
360 ??? It is known that if it were an array of entries, structure elements
361 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
362 not clear whether in the final analysis a sufficient amount of memory would
363 be saved as the size of the available expression bitmaps would be larger
364 [one could build a mapping table without holes afterwards though].
365 Someday I'll perform the computation and figure it out. */
366
367 struct hash_table
368 {
369 /* The table itself.
370 This is an array of `expr_hash_table_size' elements. */
371 struct expr **table;
372
373 /* Size of the hash table, in elements. */
374 unsigned int size;
375
376 /* Number of hash table elements. */
377 unsigned int n_elems;
378
379 /* Whether the table is expression of copy propagation one. */
380 int set_p;
381 };
382
383 /* Expression hash table. */
384 static struct hash_table expr_hash_table;
385
386 /* Copy propagation hash table. */
387 static struct hash_table set_hash_table;
388
389 /* Mapping of uids to cuids.
390 Only real insns get cuids. */
391 static int *uid_cuid;
392
393 /* Highest UID in UID_CUID. */
394 static int max_uid;
395
396 /* Get the cuid of an insn. */
397 #ifdef ENABLE_CHECKING
398 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
399 #else
400 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
401 #endif
402
403 /* Number of cuids. */
404 static int max_cuid;
405
406 /* Mapping of cuids to insns. */
407 static rtx *cuid_insn;
408
409 /* Get insn from cuid. */
410 #define CUID_INSN(CUID) (cuid_insn[CUID])
411
412 /* Maximum register number in function prior to doing gcse + 1.
413 Registers created during this pass have regno >= max_gcse_regno.
414 This is named with "gcse" to not collide with global of same name. */
415 static unsigned int max_gcse_regno;
416
417 /* Table of registers that are modified.
418
419 For each register, each element is a list of places where the pseudo-reg
420 is set.
421
422 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
423 requires knowledge of which blocks kill which regs [and thus could use
424 a bitmap instead of the lists `reg_set_table' uses].
425
426 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
427 num-regs) [however perhaps it may be useful to keep the data as is]. One
428 advantage of recording things this way is that `reg_set_table' is fairly
429 sparse with respect to pseudo regs but for hard regs could be fairly dense
430 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
431 up functions like compute_transp since in the case of pseudo-regs we only
432 need to iterate over the number of times a pseudo-reg is set, not over the
433 number of basic blocks [clearly there is a bit of a slow down in the cases
434 where a pseudo is set more than once in a block, however it is believed
435 that the net effect is to speed things up]. This isn't done for hard-regs
436 because recording call-clobbered hard-regs in `reg_set_table' at each
437 function call can consume a fair bit of memory, and iterating over
438 hard-regs stored this way in compute_transp will be more expensive. */
439
440 typedef struct reg_set
441 {
442 /* The next setting of this register. */
443 struct reg_set *next;
444 /* The insn where it was set. */
445 rtx insn;
446 } reg_set;
447
448 static reg_set **reg_set_table;
449
450 /* Size of `reg_set_table'.
451 The table starts out at max_gcse_regno + slop, and is enlarged as
452 necessary. */
453 static int reg_set_table_size;
454
455 /* Amount to grow `reg_set_table' by when it's full. */
456 #define REG_SET_TABLE_SLOP 100
457
458 /* This is a list of expressions which are MEMs and will be used by load
459 or store motion.
460 Load motion tracks MEMs which aren't killed by
461 anything except itself. (ie, loads and stores to a single location).
462 We can then allow movement of these MEM refs with a little special
463 allowance. (all stores copy the same value to the reaching reg used
464 for the loads). This means all values used to store into memory must have
465 no side effects so we can re-issue the setter value.
466 Store Motion uses this structure as an expression table to track stores
467 which look interesting, and might be moveable towards the exit block. */
468
469 struct ls_expr
470 {
471 struct expr * expr; /* Gcse expression reference for LM. */
472 rtx pattern; /* Pattern of this mem. */
473 rtx loads; /* INSN list of loads seen. */
474 rtx stores; /* INSN list of stores seen. */
475 struct ls_expr * next; /* Next in the list. */
476 int invalid; /* Invalid for some reason. */
477 int index; /* If it maps to a bitmap index. */
478 int hash_index; /* Index when in a hash table. */
479 rtx reaching_reg; /* Register to use when re-writing. */
480 };
481
482 /* Array of implicit set patterns indexed by basic block index. */
483 static rtx *implicit_sets;
484
485 /* Head of the list of load/store memory refs. */
486 static struct ls_expr * pre_ldst_mems = NULL;
487
488 /* Bitmap containing one bit for each register in the program.
489 Used when performing GCSE to track which registers have been set since
490 the start of the basic block. */
491 static regset reg_set_bitmap;
492
493 /* For each block, a bitmap of registers set in the block.
494 This is used by expr_killed_p and compute_transp.
495 It is computed during hash table computation and not by compute_sets
496 as it includes registers added since the last pass (or between cprop and
497 gcse) and it's currently not easy to realloc sbitmap vectors. */
498 static sbitmap *reg_set_in_block;
499
500 /* Array, indexed by basic block number for a list of insns which modify
501 memory within that block. */
502 static rtx * modify_mem_list;
503 bitmap modify_mem_list_set;
504
505 /* This array parallels modify_mem_list, but is kept canonicalized. */
506 static rtx * canon_modify_mem_list;
507 bitmap canon_modify_mem_list_set;
508 /* Various variables for statistics gathering. */
509
510 /* Memory used in a pass.
511 This isn't intended to be absolutely precise. Its intent is only
512 to keep an eye on memory usage. */
513 static int bytes_used;
514
515 /* GCSE substitutions made. */
516 static int gcse_subst_count;
517 /* Number of copy instructions created. */
518 static int gcse_create_count;
519 /* Number of constants propagated. */
520 static int const_prop_count;
521 /* Number of copys propagated. */
522 static int copy_prop_count;
523 \f
524 /* These variables are used by classic GCSE.
525 Normally they'd be defined a bit later, but `rd_gen' needs to
526 be declared sooner. */
527
528 /* Each block has a bitmap of each type.
529 The length of each blocks bitmap is:
530
531 max_cuid - for reaching definitions
532 n_exprs - for available expressions
533
534 Thus we view the bitmaps as 2 dimensional arrays. i.e.
535 rd_kill[block_num][cuid_num]
536 ae_kill[block_num][expr_num] */
537
538 /* For reaching defs */
539 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
540
541 /* for available exprs */
542 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
543
544 /* Objects of this type are passed around by the null-pointer check
545 removal routines. */
546 struct null_pointer_info
547 {
548 /* The basic block being processed. */
549 basic_block current_block;
550 /* The first register to be handled in this pass. */
551 unsigned int min_reg;
552 /* One greater than the last register to be handled in this pass. */
553 unsigned int max_reg;
554 sbitmap *nonnull_local;
555 sbitmap *nonnull_killed;
556 };
557 \f
558 static void compute_can_copy PARAMS ((void));
559 static char *gmalloc PARAMS ((unsigned int));
560 static char *grealloc PARAMS ((char *, unsigned int));
561 static char *gcse_alloc PARAMS ((unsigned long));
562 static void alloc_gcse_mem PARAMS ((rtx));
563 static void free_gcse_mem PARAMS ((void));
564 static void alloc_reg_set_mem PARAMS ((int));
565 static void free_reg_set_mem PARAMS ((void));
566 static int get_bitmap_width PARAMS ((int, int, int));
567 static void record_one_set PARAMS ((int, rtx));
568 static void record_set_info PARAMS ((rtx, rtx, void *));
569 static void compute_sets PARAMS ((rtx));
570 static void hash_scan_insn PARAMS ((rtx, struct hash_table *, int));
571 static void hash_scan_set PARAMS ((rtx, rtx, struct hash_table *));
572 static void hash_scan_clobber PARAMS ((rtx, rtx, struct hash_table *));
573 static void hash_scan_call PARAMS ((rtx, rtx, struct hash_table *));
574 static int want_to_gcse_p PARAMS ((rtx));
575 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
576 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
577 static int oprs_available_p PARAMS ((rtx, rtx));
578 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
579 int, int, struct hash_table *));
580 static void insert_set_in_table PARAMS ((rtx, rtx, struct hash_table *));
581 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
582 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
583 static unsigned int hash_string_1 PARAMS ((const char *));
584 static unsigned int hash_set PARAMS ((int, int));
585 static int expr_equiv_p PARAMS ((rtx, rtx));
586 static void record_last_reg_set_info PARAMS ((rtx, int));
587 static void record_last_mem_set_info PARAMS ((rtx));
588 static void record_last_set_info PARAMS ((rtx, rtx, void *));
589 static void compute_hash_table PARAMS ((struct hash_table *));
590 static void alloc_hash_table PARAMS ((int, struct hash_table *, int));
591 static void free_hash_table PARAMS ((struct hash_table *));
592 static void compute_hash_table_work PARAMS ((struct hash_table *));
593 static void dump_hash_table PARAMS ((FILE *, const char *,
594 struct hash_table *));
595 static struct expr *lookup_expr PARAMS ((rtx, struct hash_table *));
596 static struct expr *lookup_set PARAMS ((unsigned int, struct hash_table *));
597 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
598 static void reset_opr_set_tables PARAMS ((void));
599 static int oprs_not_set_p PARAMS ((rtx, rtx));
600 static void mark_call PARAMS ((rtx));
601 static void mark_set PARAMS ((rtx, rtx));
602 static void mark_clobber PARAMS ((rtx, rtx));
603 static void mark_oprs_set PARAMS ((rtx));
604 static void alloc_cprop_mem PARAMS ((int, int));
605 static void free_cprop_mem PARAMS ((void));
606 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
607 static void compute_transpout PARAMS ((void));
608 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
609 struct hash_table *));
610 static void compute_cprop_data PARAMS ((void));
611 static void find_used_regs PARAMS ((rtx *, void *));
612 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
613 static struct expr *find_avail_set PARAMS ((int, rtx));
614 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx, rtx));
615 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
616 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
617 static void canon_list_insert PARAMS ((rtx, rtx, void *));
618 static int cprop_insn PARAMS ((rtx, int));
619 static int cprop PARAMS ((int));
620 static rtx fis_get_condition PARAMS ((rtx));
621 static void find_implicit_sets PARAMS ((void));
622 static int one_cprop_pass PARAMS ((int, int, int));
623 static bool constprop_register PARAMS ((rtx, rtx, rtx, int));
624 static struct expr *find_bypass_set PARAMS ((int, int));
625 static int bypass_block PARAMS ((basic_block, rtx, rtx));
626 static int bypass_conditional_jumps PARAMS ((void));
627 static void alloc_pre_mem PARAMS ((int, int));
628 static void free_pre_mem PARAMS ((void));
629 static void compute_pre_data PARAMS ((void));
630 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
631 basic_block));
632 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
633 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
634 static void pre_insert_copies PARAMS ((void));
635 static int pre_delete PARAMS ((void));
636 static int pre_gcse PARAMS ((void));
637 static int one_pre_gcse_pass PARAMS ((int));
638 static void add_label_notes PARAMS ((rtx, rtx));
639 static void alloc_code_hoist_mem PARAMS ((int, int));
640 static void free_code_hoist_mem PARAMS ((void));
641 static void compute_code_hoist_vbeinout PARAMS ((void));
642 static void compute_code_hoist_data PARAMS ((void));
643 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
644 char *));
645 static void hoist_code PARAMS ((void));
646 static int one_code_hoisting_pass PARAMS ((void));
647 static void alloc_rd_mem PARAMS ((int, int));
648 static void free_rd_mem PARAMS ((void));
649 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
650 static void compute_kill_rd PARAMS ((void));
651 static void compute_rd PARAMS ((void));
652 static void alloc_avail_expr_mem PARAMS ((int, int));
653 static void free_avail_expr_mem PARAMS ((void));
654 static void compute_ae_gen PARAMS ((struct hash_table *));
655 static int expr_killed_p PARAMS ((rtx, basic_block));
656 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *, struct hash_table *));
657 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
658 basic_block, int));
659 static rtx computing_insn PARAMS ((struct expr *, rtx));
660 static int def_reaches_here_p PARAMS ((rtx, rtx));
661 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
662 static int handle_avail_expr PARAMS ((rtx, struct expr *));
663 static int classic_gcse PARAMS ((void));
664 static int one_classic_gcse_pass PARAMS ((int));
665 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
666 static int delete_null_pointer_checks_1 PARAMS ((unsigned int *,
667 sbitmap *, sbitmap *,
668 struct null_pointer_info *));
669 static rtx process_insert_insn PARAMS ((struct expr *));
670 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
671 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
672 basic_block, int, char *));
673 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
674 basic_block, char *));
675 static struct ls_expr * ldst_entry PARAMS ((rtx));
676 static void free_ldst_entry PARAMS ((struct ls_expr *));
677 static void free_ldst_mems PARAMS ((void));
678 static void print_ldst_list PARAMS ((FILE *));
679 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
680 static int enumerate_ldsts PARAMS ((void));
681 static inline struct ls_expr * first_ls_expr PARAMS ((void));
682 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
683 static int simple_mem PARAMS ((rtx));
684 static void invalidate_any_buried_refs PARAMS ((rtx));
685 static void compute_ld_motion_mems PARAMS ((void));
686 static void trim_ld_motion_mems PARAMS ((void));
687 static void update_ld_motion_stores PARAMS ((struct expr *));
688 static void reg_set_info PARAMS ((rtx, rtx, void *));
689 static int store_ops_ok PARAMS ((rtx, basic_block));
690 static void find_moveable_store PARAMS ((rtx));
691 static int compute_store_table PARAMS ((void));
692 static int load_kills_store PARAMS ((rtx, rtx));
693 static int find_loads PARAMS ((rtx, rtx));
694 static int store_killed_in_insn PARAMS ((rtx, rtx));
695 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
696 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
697 static void build_store_vectors PARAMS ((void));
698 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
699 static int insert_store PARAMS ((struct ls_expr *, edge));
700 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
701 static void delete_store PARAMS ((struct ls_expr *,
702 basic_block));
703 static void free_store_memory PARAMS ((void));
704 static void store_motion PARAMS ((void));
705 static void free_insn_expr_list_list PARAMS ((rtx *));
706 static void clear_modify_mem_tables PARAMS ((void));
707 static void free_modify_mem_tables PARAMS ((void));
708 static rtx gcse_emit_move_after PARAMS ((rtx, rtx, rtx));
709 static void local_cprop_find_used_regs PARAMS ((rtx *, void *));
710 static bool do_local_cprop PARAMS ((rtx, rtx, int, rtx*));
711 static bool adjust_libcall_notes PARAMS ((rtx, rtx, rtx, rtx*));
712 static void local_cprop_pass PARAMS ((int));
713 \f
714 /* Entry point for global common subexpression elimination.
715 F is the first instruction in the function. */
716
717 int
718 gcse_main (f, file)
719 rtx f;
720 FILE *file;
721 {
722 int changed, pass;
723 /* Bytes used at start of pass. */
724 int initial_bytes_used;
725 /* Maximum number of bytes used by a pass. */
726 int max_pass_bytes;
727 /* Point to release obstack data from for each pass. */
728 char *gcse_obstack_bottom;
729
730 /* We do not construct an accurate cfg in functions which call
731 setjmp, so just punt to be safe. */
732 if (current_function_calls_setjmp)
733 return 0;
734
735 /* Assume that we do not need to run jump optimizations after gcse. */
736 run_jump_opt_after_gcse = 0;
737
738 /* For calling dump_foo fns from gdb. */
739 debug_stderr = stderr;
740 gcse_file = file;
741
742 /* Identify the basic block information for this function, including
743 successors and predecessors. */
744 max_gcse_regno = max_reg_num ();
745
746 if (file)
747 dump_flow_info (file);
748
749 /* Return if there's nothing to do. */
750 if (n_basic_blocks <= 1)
751 return 0;
752
753 /* Trying to perform global optimizations on flow graphs which have
754 a high connectivity will take a long time and is unlikely to be
755 particularly useful.
756
757 In normal circumstances a cfg should have about twice as many edges
758 as blocks. But we do not want to punish small functions which have
759 a couple switch statements. So we require a relatively large number
760 of basic blocks and the ratio of edges to blocks to be high. */
761 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
762 {
763 if (warn_disabled_optimization)
764 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
765 n_basic_blocks, n_edges / n_basic_blocks);
766 return 0;
767 }
768
769 /* If allocating memory for the cprop bitmap would take up too much
770 storage it's better just to disable the optimization. */
771 if ((n_basic_blocks
772 * SBITMAP_SET_SIZE (max_gcse_regno)
773 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
774 {
775 if (warn_disabled_optimization)
776 warning ("GCSE disabled: %d basic blocks and %d registers",
777 n_basic_blocks, max_gcse_regno);
778
779 return 0;
780 }
781
782 /* See what modes support reg/reg copy operations. */
783 if (! can_copy_init_p)
784 {
785 compute_can_copy ();
786 can_copy_init_p = 1;
787 }
788
789 gcc_obstack_init (&gcse_obstack);
790 bytes_used = 0;
791
792 /* We need alias. */
793 init_alias_analysis ();
794 /* Record where pseudo-registers are set. This data is kept accurate
795 during each pass. ??? We could also record hard-reg information here
796 [since it's unchanging], however it is currently done during hash table
797 computation.
798
799 It may be tempting to compute MEM set information here too, but MEM sets
800 will be subject to code motion one day and thus we need to compute
801 information about memory sets when we build the hash tables. */
802
803 alloc_reg_set_mem (max_gcse_regno);
804 compute_sets (f);
805
806 pass = 0;
807 initial_bytes_used = bytes_used;
808 max_pass_bytes = 0;
809 gcse_obstack_bottom = gcse_alloc (1);
810 changed = 1;
811 while (changed && pass < MAX_GCSE_PASSES)
812 {
813 changed = 0;
814 if (file)
815 fprintf (file, "GCSE pass %d\n\n", pass + 1);
816
817 /* Initialize bytes_used to the space for the pred/succ lists,
818 and the reg_set_table data. */
819 bytes_used = initial_bytes_used;
820
821 /* Each pass may create new registers, so recalculate each time. */
822 max_gcse_regno = max_reg_num ();
823
824 alloc_gcse_mem (f);
825
826 /* Don't allow constant propagation to modify jumps
827 during this pass. */
828 changed = one_cprop_pass (pass + 1, 0, 0);
829
830 if (optimize_size)
831 changed |= one_classic_gcse_pass (pass + 1);
832 else
833 {
834 changed |= one_pre_gcse_pass (pass + 1);
835 /* We may have just created new basic blocks. Release and
836 recompute various things which are sized on the number of
837 basic blocks. */
838 if (changed)
839 {
840 free_modify_mem_tables ();
841 modify_mem_list
842 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
843 canon_modify_mem_list
844 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
845 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
846 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
847 }
848 free_reg_set_mem ();
849 alloc_reg_set_mem (max_reg_num ());
850 compute_sets (f);
851 run_jump_opt_after_gcse = 1;
852 }
853
854 if (max_pass_bytes < bytes_used)
855 max_pass_bytes = bytes_used;
856
857 /* Free up memory, then reallocate for code hoisting. We can
858 not re-use the existing allocated memory because the tables
859 will not have info for the insns or registers created by
860 partial redundancy elimination. */
861 free_gcse_mem ();
862
863 /* It does not make sense to run code hoisting unless we optimizing
864 for code size -- it rarely makes programs faster, and can make
865 them bigger if we did partial redundancy elimination (when optimizing
866 for space, we use a classic gcse algorithm instead of partial
867 redundancy algorithms). */
868 if (optimize_size)
869 {
870 max_gcse_regno = max_reg_num ();
871 alloc_gcse_mem (f);
872 changed |= one_code_hoisting_pass ();
873 free_gcse_mem ();
874
875 if (max_pass_bytes < bytes_used)
876 max_pass_bytes = bytes_used;
877 }
878
879 if (file)
880 {
881 fprintf (file, "\n");
882 fflush (file);
883 }
884
885 obstack_free (&gcse_obstack, gcse_obstack_bottom);
886 pass++;
887 }
888
889 /* Do one last pass of copy propagation, including cprop into
890 conditional jumps. */
891
892 max_gcse_regno = max_reg_num ();
893 alloc_gcse_mem (f);
894 /* This time, go ahead and allow cprop to alter jumps. */
895 one_cprop_pass (pass + 1, 1, 0);
896 free_gcse_mem ();
897
898 if (file)
899 {
900 fprintf (file, "GCSE of %s: %d basic blocks, ",
901 current_function_name, n_basic_blocks);
902 fprintf (file, "%d pass%s, %d bytes\n\n",
903 pass, pass > 1 ? "es" : "", max_pass_bytes);
904 }
905
906 obstack_free (&gcse_obstack, NULL);
907 free_reg_set_mem ();
908 /* We are finished with alias. */
909 end_alias_analysis ();
910 allocate_reg_info (max_reg_num (), FALSE, FALSE);
911
912 /* Store motion disabled until it is fixed. */
913 if (0 && !optimize_size && flag_gcse_sm)
914 store_motion ();
915 /* Record where pseudo-registers are set. */
916 return run_jump_opt_after_gcse;
917 }
918 \f
919 /* Misc. utilities. */
920
921 /* Compute which modes support reg/reg copy operations. */
922
923 static void
924 compute_can_copy ()
925 {
926 int i;
927 #ifndef AVOID_CCMODE_COPIES
928 rtx reg, insn;
929 #endif
930 memset (can_copy_p, 0, NUM_MACHINE_MODES);
931
932 start_sequence ();
933 for (i = 0; i < NUM_MACHINE_MODES; i++)
934 if (GET_MODE_CLASS (i) == MODE_CC)
935 {
936 #ifdef AVOID_CCMODE_COPIES
937 can_copy_p[i] = 0;
938 #else
939 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
940 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
941 if (recog (PATTERN (insn), insn, NULL) >= 0)
942 can_copy_p[i] = 1;
943 #endif
944 }
945 else
946 can_copy_p[i] = 1;
947
948 end_sequence ();
949 }
950 \f
951 /* Cover function to xmalloc to record bytes allocated. */
952
953 static char *
954 gmalloc (size)
955 unsigned int size;
956 {
957 bytes_used += size;
958 return xmalloc (size);
959 }
960
961 /* Cover function to xrealloc.
962 We don't record the additional size since we don't know it.
963 It won't affect memory usage stats much anyway. */
964
965 static char *
966 grealloc (ptr, size)
967 char *ptr;
968 unsigned int size;
969 {
970 return xrealloc (ptr, size);
971 }
972
973 /* Cover function to obstack_alloc. */
974
975 static char *
976 gcse_alloc (size)
977 unsigned long size;
978 {
979 bytes_used += size;
980 return (char *) obstack_alloc (&gcse_obstack, size);
981 }
982
983 /* Allocate memory for the cuid mapping array,
984 and reg/memory set tracking tables.
985
986 This is called at the start of each pass. */
987
988 static void
989 alloc_gcse_mem (f)
990 rtx f;
991 {
992 int i, n;
993 rtx insn;
994
995 /* Find the largest UID and create a mapping from UIDs to CUIDs.
996 CUIDs are like UIDs except they increase monotonically, have no gaps,
997 and only apply to real insns. */
998
999 max_uid = get_max_uid ();
1000 n = (max_uid + 1) * sizeof (int);
1001 uid_cuid = (int *) gmalloc (n);
1002 memset ((char *) uid_cuid, 0, n);
1003 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1004 {
1005 if (INSN_P (insn))
1006 uid_cuid[INSN_UID (insn)] = i++;
1007 else
1008 uid_cuid[INSN_UID (insn)] = i;
1009 }
1010
1011 /* Create a table mapping cuids to insns. */
1012
1013 max_cuid = i;
1014 n = (max_cuid + 1) * sizeof (rtx);
1015 cuid_insn = (rtx *) gmalloc (n);
1016 memset ((char *) cuid_insn, 0, n);
1017 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1018 if (INSN_P (insn))
1019 CUID_INSN (i++) = insn;
1020
1021 /* Allocate vars to track sets of regs. */
1022 reg_set_bitmap = BITMAP_XMALLOC ();
1023
1024 /* Allocate vars to track sets of regs, memory per block. */
1025 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
1026 max_gcse_regno);
1027 /* Allocate array to keep a list of insns which modify memory in each
1028 basic block. */
1029 modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1030 canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1031 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
1032 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
1033 modify_mem_list_set = BITMAP_XMALLOC ();
1034 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1035 }
1036
1037 /* Free memory allocated by alloc_gcse_mem. */
1038
1039 static void
1040 free_gcse_mem ()
1041 {
1042 free (uid_cuid);
1043 free (cuid_insn);
1044
1045 BITMAP_XFREE (reg_set_bitmap);
1046
1047 sbitmap_vector_free (reg_set_in_block);
1048 free_modify_mem_tables ();
1049 BITMAP_XFREE (modify_mem_list_set);
1050 BITMAP_XFREE (canon_modify_mem_list_set);
1051 }
1052
1053 /* Many of the global optimization algorithms work by solving dataflow
1054 equations for various expressions. Initially, some local value is
1055 computed for each expression in each block. Then, the values across the
1056 various blocks are combined (by following flow graph edges) to arrive at
1057 global values. Conceptually, each set of equations is independent. We
1058 may therefore solve all the equations in parallel, solve them one at a
1059 time, or pick any intermediate approach.
1060
1061 When you're going to need N two-dimensional bitmaps, each X (say, the
1062 number of blocks) by Y (say, the number of expressions), call this
1063 function. It's not important what X and Y represent; only that Y
1064 correspond to the things that can be done in parallel. This function will
1065 return an appropriate chunking factor C; you should solve C sets of
1066 equations in parallel. By going through this function, we can easily
1067 trade space against time; by solving fewer equations in parallel we use
1068 less space. */
1069
1070 static int
1071 get_bitmap_width (n, x, y)
1072 int n;
1073 int x;
1074 int y;
1075 {
1076 /* It's not really worth figuring out *exactly* how much memory will
1077 be used by a particular choice. The important thing is to get
1078 something approximately right. */
1079 size_t max_bitmap_memory = 10 * 1024 * 1024;
1080
1081 /* The number of bytes we'd use for a single column of minimum
1082 width. */
1083 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1084
1085 /* Often, it's reasonable just to solve all the equations in
1086 parallel. */
1087 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1088 return y;
1089
1090 /* Otherwise, pick the largest width we can, without going over the
1091 limit. */
1092 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1093 / column_size);
1094 }
1095 \f
1096 /* Compute the local properties of each recorded expression.
1097
1098 Local properties are those that are defined by the block, irrespective of
1099 other blocks.
1100
1101 An expression is transparent in a block if its operands are not modified
1102 in the block.
1103
1104 An expression is computed (locally available) in a block if it is computed
1105 at least once and expression would contain the same value if the
1106 computation was moved to the end of the block.
1107
1108 An expression is locally anticipatable in a block if it is computed at
1109 least once and expression would contain the same value if the computation
1110 was moved to the beginning of the block.
1111
1112 We call this routine for cprop, pre and code hoisting. They all compute
1113 basically the same information and thus can easily share this code.
1114
1115 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1116 properties. If NULL, then it is not necessary to compute or record that
1117 particular property.
1118
1119 TABLE controls which hash table to look at. If it is set hash table,
1120 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1121 ABSALTERED. */
1122
1123 static void
1124 compute_local_properties (transp, comp, antloc, table)
1125 sbitmap *transp;
1126 sbitmap *comp;
1127 sbitmap *antloc;
1128 struct hash_table *table;
1129 {
1130 unsigned int i;
1131
1132 /* Initialize any bitmaps that were passed in. */
1133 if (transp)
1134 {
1135 if (table->set_p)
1136 sbitmap_vector_zero (transp, last_basic_block);
1137 else
1138 sbitmap_vector_ones (transp, last_basic_block);
1139 }
1140
1141 if (comp)
1142 sbitmap_vector_zero (comp, last_basic_block);
1143 if (antloc)
1144 sbitmap_vector_zero (antloc, last_basic_block);
1145
1146 for (i = 0; i < table->size; i++)
1147 {
1148 struct expr *expr;
1149
1150 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1151 {
1152 int indx = expr->bitmap_index;
1153 struct occr *occr;
1154
1155 /* The expression is transparent in this block if it is not killed.
1156 We start by assuming all are transparent [none are killed], and
1157 then reset the bits for those that are. */
1158 if (transp)
1159 compute_transp (expr->expr, indx, transp, table->set_p);
1160
1161 /* The occurrences recorded in antic_occr are exactly those that
1162 we want to set to nonzero in ANTLOC. */
1163 if (antloc)
1164 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1165 {
1166 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1167
1168 /* While we're scanning the table, this is a good place to
1169 initialize this. */
1170 occr->deleted_p = 0;
1171 }
1172
1173 /* The occurrences recorded in avail_occr are exactly those that
1174 we want to set to nonzero in COMP. */
1175 if (comp)
1176 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1177 {
1178 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1179
1180 /* While we're scanning the table, this is a good place to
1181 initialize this. */
1182 occr->copied_p = 0;
1183 }
1184
1185 /* While we're scanning the table, this is a good place to
1186 initialize this. */
1187 expr->reaching_reg = 0;
1188 }
1189 }
1190 }
1191 \f
1192 /* Register set information.
1193
1194 `reg_set_table' records where each register is set or otherwise
1195 modified. */
1196
1197 static struct obstack reg_set_obstack;
1198
1199 static void
1200 alloc_reg_set_mem (n_regs)
1201 int n_regs;
1202 {
1203 unsigned int n;
1204
1205 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1206 n = reg_set_table_size * sizeof (struct reg_set *);
1207 reg_set_table = (struct reg_set **) gmalloc (n);
1208 memset ((char *) reg_set_table, 0, n);
1209
1210 gcc_obstack_init (&reg_set_obstack);
1211 }
1212
1213 static void
1214 free_reg_set_mem ()
1215 {
1216 free (reg_set_table);
1217 obstack_free (&reg_set_obstack, NULL);
1218 }
1219
1220 /* Record REGNO in the reg_set table. */
1221
1222 static void
1223 record_one_set (regno, insn)
1224 int regno;
1225 rtx insn;
1226 {
1227 /* Allocate a new reg_set element and link it onto the list. */
1228 struct reg_set *new_reg_info;
1229
1230 /* If the table isn't big enough, enlarge it. */
1231 if (regno >= reg_set_table_size)
1232 {
1233 int new_size = regno + REG_SET_TABLE_SLOP;
1234
1235 reg_set_table
1236 = (struct reg_set **) grealloc ((char *) reg_set_table,
1237 new_size * sizeof (struct reg_set *));
1238 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1239 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1240 reg_set_table_size = new_size;
1241 }
1242
1243 new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
1244 sizeof (struct reg_set));
1245 bytes_used += sizeof (struct reg_set);
1246 new_reg_info->insn = insn;
1247 new_reg_info->next = reg_set_table[regno];
1248 reg_set_table[regno] = new_reg_info;
1249 }
1250
1251 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1252 an insn. The DATA is really the instruction in which the SET is
1253 occurring. */
1254
1255 static void
1256 record_set_info (dest, setter, data)
1257 rtx dest, setter ATTRIBUTE_UNUSED;
1258 void *data;
1259 {
1260 rtx record_set_insn = (rtx) data;
1261
1262 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1263 record_one_set (REGNO (dest), record_set_insn);
1264 }
1265
1266 /* Scan the function and record each set of each pseudo-register.
1267
1268 This is called once, at the start of the gcse pass. See the comments for
1269 `reg_set_table' for further documentation. */
1270
1271 static void
1272 compute_sets (f)
1273 rtx f;
1274 {
1275 rtx insn;
1276
1277 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1278 if (INSN_P (insn))
1279 note_stores (PATTERN (insn), record_set_info, insn);
1280 }
1281 \f
1282 /* Hash table support. */
1283
1284 struct reg_avail_info
1285 {
1286 basic_block last_bb;
1287 int first_set;
1288 int last_set;
1289 };
1290
1291 static struct reg_avail_info *reg_avail_info;
1292 static basic_block current_bb;
1293
1294
1295 /* See whether X, the source of a set, is something we want to consider for
1296 GCSE. */
1297
1298 static GTY(()) rtx test_insn;
1299 static int
1300 want_to_gcse_p (x)
1301 rtx x;
1302 {
1303 int num_clobbers = 0;
1304 int icode;
1305
1306 switch (GET_CODE (x))
1307 {
1308 case REG:
1309 case SUBREG:
1310 case CONST_INT:
1311 case CONST_DOUBLE:
1312 case CONST_VECTOR:
1313 case CALL:
1314 case CONSTANT_P_RTX:
1315 return 0;
1316
1317 default:
1318 break;
1319 }
1320
1321 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1322 if (general_operand (x, GET_MODE (x)))
1323 return 1;
1324 else if (GET_MODE (x) == VOIDmode)
1325 return 0;
1326
1327 /* Otherwise, check if we can make a valid insn from it. First initialize
1328 our test insn if we haven't already. */
1329 if (test_insn == 0)
1330 {
1331 test_insn
1332 = make_insn_raw (gen_rtx_SET (VOIDmode,
1333 gen_rtx_REG (word_mode,
1334 FIRST_PSEUDO_REGISTER * 2),
1335 const0_rtx));
1336 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1337 }
1338
1339 /* Now make an insn like the one we would make when GCSE'ing and see if
1340 valid. */
1341 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1342 SET_SRC (PATTERN (test_insn)) = x;
1343 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1344 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1345 }
1346
1347 /* Return nonzero if the operands of expression X are unchanged from the
1348 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1349 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1350
1351 static int
1352 oprs_unchanged_p (x, insn, avail_p)
1353 rtx x, insn;
1354 int avail_p;
1355 {
1356 int i, j;
1357 enum rtx_code code;
1358 const char *fmt;
1359
1360 if (x == 0)
1361 return 1;
1362
1363 code = GET_CODE (x);
1364 switch (code)
1365 {
1366 case REG:
1367 {
1368 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1369
1370 if (info->last_bb != current_bb)
1371 return 1;
1372 if (avail_p)
1373 return info->last_set < INSN_CUID (insn);
1374 else
1375 return info->first_set >= INSN_CUID (insn);
1376 }
1377
1378 case MEM:
1379 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1380 x, avail_p))
1381 return 0;
1382 else
1383 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1384
1385 case PRE_DEC:
1386 case PRE_INC:
1387 case POST_DEC:
1388 case POST_INC:
1389 case PRE_MODIFY:
1390 case POST_MODIFY:
1391 return 0;
1392
1393 case PC:
1394 case CC0: /*FIXME*/
1395 case CONST:
1396 case CONST_INT:
1397 case CONST_DOUBLE:
1398 case CONST_VECTOR:
1399 case SYMBOL_REF:
1400 case LABEL_REF:
1401 case ADDR_VEC:
1402 case ADDR_DIFF_VEC:
1403 return 1;
1404
1405 default:
1406 break;
1407 }
1408
1409 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1410 {
1411 if (fmt[i] == 'e')
1412 {
1413 /* If we are about to do the last recursive call needed at this
1414 level, change it into iteration. This function is called enough
1415 to be worth it. */
1416 if (i == 0)
1417 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1418
1419 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1420 return 0;
1421 }
1422 else if (fmt[i] == 'E')
1423 for (j = 0; j < XVECLEN (x, i); j++)
1424 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1425 return 0;
1426 }
1427
1428 return 1;
1429 }
1430
1431 /* Used for communication between mems_conflict_for_gcse_p and
1432 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1433 conflict between two memory references. */
1434 static int gcse_mems_conflict_p;
1435
1436 /* Used for communication between mems_conflict_for_gcse_p and
1437 load_killed_in_block_p. A memory reference for a load instruction,
1438 mems_conflict_for_gcse_p will see if a memory store conflicts with
1439 this memory load. */
1440 static rtx gcse_mem_operand;
1441
1442 /* DEST is the output of an instruction. If it is a memory reference, and
1443 possibly conflicts with the load found in gcse_mem_operand, then set
1444 gcse_mems_conflict_p to a nonzero value. */
1445
1446 static void
1447 mems_conflict_for_gcse_p (dest, setter, data)
1448 rtx dest, setter ATTRIBUTE_UNUSED;
1449 void *data ATTRIBUTE_UNUSED;
1450 {
1451 while (GET_CODE (dest) == SUBREG
1452 || GET_CODE (dest) == ZERO_EXTRACT
1453 || GET_CODE (dest) == SIGN_EXTRACT
1454 || GET_CODE (dest) == STRICT_LOW_PART)
1455 dest = XEXP (dest, 0);
1456
1457 /* If DEST is not a MEM, then it will not conflict with the load. Note
1458 that function calls are assumed to clobber memory, but are handled
1459 elsewhere. */
1460 if (GET_CODE (dest) != MEM)
1461 return;
1462
1463 /* If we are setting a MEM in our list of specially recognized MEMs,
1464 don't mark as killed this time. */
1465
1466 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1467 {
1468 if (!find_rtx_in_ldst (dest))
1469 gcse_mems_conflict_p = 1;
1470 return;
1471 }
1472
1473 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1474 rtx_addr_varies_p))
1475 gcse_mems_conflict_p = 1;
1476 }
1477
1478 /* Return nonzero if the expression in X (a memory reference) is killed
1479 in block BB before or after the insn with the CUID in UID_LIMIT.
1480 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1481 before UID_LIMIT.
1482
1483 To check the entire block, set UID_LIMIT to max_uid + 1 and
1484 AVAIL_P to 0. */
1485
1486 static int
1487 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1488 basic_block bb;
1489 int uid_limit;
1490 rtx x;
1491 int avail_p;
1492 {
1493 rtx list_entry = modify_mem_list[bb->index];
1494 while (list_entry)
1495 {
1496 rtx setter;
1497 /* Ignore entries in the list that do not apply. */
1498 if ((avail_p
1499 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1500 || (! avail_p
1501 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1502 {
1503 list_entry = XEXP (list_entry, 1);
1504 continue;
1505 }
1506
1507 setter = XEXP (list_entry, 0);
1508
1509 /* If SETTER is a call everything is clobbered. Note that calls
1510 to pure functions are never put on the list, so we need not
1511 worry about them. */
1512 if (GET_CODE (setter) == CALL_INSN)
1513 return 1;
1514
1515 /* SETTER must be an INSN of some kind that sets memory. Call
1516 note_stores to examine each hunk of memory that is modified.
1517
1518 The note_stores interface is pretty limited, so we have to
1519 communicate via global variables. Yuk. */
1520 gcse_mem_operand = x;
1521 gcse_mems_conflict_p = 0;
1522 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1523 if (gcse_mems_conflict_p)
1524 return 1;
1525 list_entry = XEXP (list_entry, 1);
1526 }
1527 return 0;
1528 }
1529
1530 /* Return nonzero if the operands of expression X are unchanged from
1531 the start of INSN's basic block up to but not including INSN. */
1532
1533 static int
1534 oprs_anticipatable_p (x, insn)
1535 rtx x, insn;
1536 {
1537 return oprs_unchanged_p (x, insn, 0);
1538 }
1539
1540 /* Return nonzero if the operands of expression X are unchanged from
1541 INSN to the end of INSN's basic block. */
1542
1543 static int
1544 oprs_available_p (x, insn)
1545 rtx x, insn;
1546 {
1547 return oprs_unchanged_p (x, insn, 1);
1548 }
1549
1550 /* Hash expression X.
1551
1552 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1553 indicating if a volatile operand is found or if the expression contains
1554 something we don't want to insert in the table.
1555
1556 ??? One might want to merge this with canon_hash. Later. */
1557
1558 static unsigned int
1559 hash_expr (x, mode, do_not_record_p, hash_table_size)
1560 rtx x;
1561 enum machine_mode mode;
1562 int *do_not_record_p;
1563 int hash_table_size;
1564 {
1565 unsigned int hash;
1566
1567 *do_not_record_p = 0;
1568
1569 hash = hash_expr_1 (x, mode, do_not_record_p);
1570 return hash % hash_table_size;
1571 }
1572
1573 /* Hash a string. Just add its bytes up. */
1574
1575 static inline unsigned
1576 hash_string_1 (ps)
1577 const char *ps;
1578 {
1579 unsigned hash = 0;
1580 const unsigned char *p = (const unsigned char *) ps;
1581
1582 if (p)
1583 while (*p)
1584 hash += *p++;
1585
1586 return hash;
1587 }
1588
1589 /* Subroutine of hash_expr to do the actual work. */
1590
1591 static unsigned int
1592 hash_expr_1 (x, mode, do_not_record_p)
1593 rtx x;
1594 enum machine_mode mode;
1595 int *do_not_record_p;
1596 {
1597 int i, j;
1598 unsigned hash = 0;
1599 enum rtx_code code;
1600 const char *fmt;
1601
1602 /* Used to turn recursion into iteration. We can't rely on GCC's
1603 tail-recursion elimination since we need to keep accumulating values
1604 in HASH. */
1605
1606 if (x == 0)
1607 return hash;
1608
1609 repeat:
1610 code = GET_CODE (x);
1611 switch (code)
1612 {
1613 case REG:
1614 hash += ((unsigned int) REG << 7) + REGNO (x);
1615 return hash;
1616
1617 case CONST_INT:
1618 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1619 + (unsigned int) INTVAL (x));
1620 return hash;
1621
1622 case CONST_DOUBLE:
1623 /* This is like the general case, except that it only counts
1624 the integers representing the constant. */
1625 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1626 if (GET_MODE (x) != VOIDmode)
1627 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1628 hash += (unsigned int) XWINT (x, i);
1629 else
1630 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1631 + (unsigned int) CONST_DOUBLE_HIGH (x));
1632 return hash;
1633
1634 case CONST_VECTOR:
1635 {
1636 int units;
1637 rtx elt;
1638
1639 units = CONST_VECTOR_NUNITS (x);
1640
1641 for (i = 0; i < units; ++i)
1642 {
1643 elt = CONST_VECTOR_ELT (x, i);
1644 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1645 }
1646
1647 return hash;
1648 }
1649
1650 /* Assume there is only one rtx object for any given label. */
1651 case LABEL_REF:
1652 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1653 differences and differences between each stage's debugging dumps. */
1654 hash += (((unsigned int) LABEL_REF << 7)
1655 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1656 return hash;
1657
1658 case SYMBOL_REF:
1659 {
1660 /* Don't hash on the symbol's address to avoid bootstrap differences.
1661 Different hash values may cause expressions to be recorded in
1662 different orders and thus different registers to be used in the
1663 final assembler. This also avoids differences in the dump files
1664 between various stages. */
1665 unsigned int h = 0;
1666 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1667
1668 while (*p)
1669 h += (h << 7) + *p++; /* ??? revisit */
1670
1671 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1672 return hash;
1673 }
1674
1675 case MEM:
1676 if (MEM_VOLATILE_P (x))
1677 {
1678 *do_not_record_p = 1;
1679 return 0;
1680 }
1681
1682 hash += (unsigned int) MEM;
1683 /* We used alias set for hashing, but this is not good, since the alias
1684 set may differ in -fprofile-arcs and -fbranch-probabilities compilation
1685 causing the profiles to fail to match. */
1686 x = XEXP (x, 0);
1687 goto repeat;
1688
1689 case PRE_DEC:
1690 case PRE_INC:
1691 case POST_DEC:
1692 case POST_INC:
1693 case PC:
1694 case CC0:
1695 case CALL:
1696 case UNSPEC_VOLATILE:
1697 *do_not_record_p = 1;
1698 return 0;
1699
1700 case ASM_OPERANDS:
1701 if (MEM_VOLATILE_P (x))
1702 {
1703 *do_not_record_p = 1;
1704 return 0;
1705 }
1706 else
1707 {
1708 /* We don't want to take the filename and line into account. */
1709 hash += (unsigned) code + (unsigned) GET_MODE (x)
1710 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1711 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1712 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1713
1714 if (ASM_OPERANDS_INPUT_LENGTH (x))
1715 {
1716 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1717 {
1718 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1719 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1720 do_not_record_p)
1721 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1722 (x, i)));
1723 }
1724
1725 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1726 x = ASM_OPERANDS_INPUT (x, 0);
1727 mode = GET_MODE (x);
1728 goto repeat;
1729 }
1730 return hash;
1731 }
1732
1733 default:
1734 break;
1735 }
1736
1737 hash += (unsigned) code + (unsigned) GET_MODE (x);
1738 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1739 {
1740 if (fmt[i] == 'e')
1741 {
1742 /* If we are about to do the last recursive call
1743 needed at this level, change it into iteration.
1744 This function is called enough to be worth it. */
1745 if (i == 0)
1746 {
1747 x = XEXP (x, i);
1748 goto repeat;
1749 }
1750
1751 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1752 if (*do_not_record_p)
1753 return 0;
1754 }
1755
1756 else if (fmt[i] == 'E')
1757 for (j = 0; j < XVECLEN (x, i); j++)
1758 {
1759 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1760 if (*do_not_record_p)
1761 return 0;
1762 }
1763
1764 else if (fmt[i] == 's')
1765 hash += hash_string_1 (XSTR (x, i));
1766 else if (fmt[i] == 'i')
1767 hash += (unsigned int) XINT (x, i);
1768 else
1769 abort ();
1770 }
1771
1772 return hash;
1773 }
1774
1775 /* Hash a set of register REGNO.
1776
1777 Sets are hashed on the register that is set. This simplifies the PRE copy
1778 propagation code.
1779
1780 ??? May need to make things more elaborate. Later, as necessary. */
1781
1782 static unsigned int
1783 hash_set (regno, hash_table_size)
1784 int regno;
1785 int hash_table_size;
1786 {
1787 unsigned int hash;
1788
1789 hash = regno;
1790 return hash % hash_table_size;
1791 }
1792
1793 /* Return nonzero if exp1 is equivalent to exp2.
1794 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1795
1796 static int
1797 expr_equiv_p (x, y)
1798 rtx x, y;
1799 {
1800 int i, j;
1801 enum rtx_code code;
1802 const char *fmt;
1803
1804 if (x == y)
1805 return 1;
1806
1807 if (x == 0 || y == 0)
1808 return x == y;
1809
1810 code = GET_CODE (x);
1811 if (code != GET_CODE (y))
1812 return 0;
1813
1814 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1815 if (GET_MODE (x) != GET_MODE (y))
1816 return 0;
1817
1818 switch (code)
1819 {
1820 case PC:
1821 case CC0:
1822 return x == y;
1823
1824 case CONST_INT:
1825 return INTVAL (x) == INTVAL (y);
1826
1827 case LABEL_REF:
1828 return XEXP (x, 0) == XEXP (y, 0);
1829
1830 case SYMBOL_REF:
1831 return XSTR (x, 0) == XSTR (y, 0);
1832
1833 case REG:
1834 return REGNO (x) == REGNO (y);
1835
1836 case MEM:
1837 /* Can't merge two expressions in different alias sets, since we can
1838 decide that the expression is transparent in a block when it isn't,
1839 due to it being set with the different alias set. */
1840 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1841 return 0;
1842 break;
1843
1844 /* For commutative operations, check both orders. */
1845 case PLUS:
1846 case MULT:
1847 case AND:
1848 case IOR:
1849 case XOR:
1850 case NE:
1851 case EQ:
1852 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1853 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1854 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1855 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1856
1857 case ASM_OPERANDS:
1858 /* We don't use the generic code below because we want to
1859 disregard filename and line numbers. */
1860
1861 /* A volatile asm isn't equivalent to any other. */
1862 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1863 return 0;
1864
1865 if (GET_MODE (x) != GET_MODE (y)
1866 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1867 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1868 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1869 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1870 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1871 return 0;
1872
1873 if (ASM_OPERANDS_INPUT_LENGTH (x))
1874 {
1875 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1876 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1877 ASM_OPERANDS_INPUT (y, i))
1878 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1879 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1880 return 0;
1881 }
1882
1883 return 1;
1884
1885 default:
1886 break;
1887 }
1888
1889 /* Compare the elements. If any pair of corresponding elements
1890 fail to match, return 0 for the whole thing. */
1891
1892 fmt = GET_RTX_FORMAT (code);
1893 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1894 {
1895 switch (fmt[i])
1896 {
1897 case 'e':
1898 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1899 return 0;
1900 break;
1901
1902 case 'E':
1903 if (XVECLEN (x, i) != XVECLEN (y, i))
1904 return 0;
1905 for (j = 0; j < XVECLEN (x, i); j++)
1906 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1907 return 0;
1908 break;
1909
1910 case 's':
1911 if (strcmp (XSTR (x, i), XSTR (y, i)))
1912 return 0;
1913 break;
1914
1915 case 'i':
1916 if (XINT (x, i) != XINT (y, i))
1917 return 0;
1918 break;
1919
1920 case 'w':
1921 if (XWINT (x, i) != XWINT (y, i))
1922 return 0;
1923 break;
1924
1925 case '0':
1926 break;
1927
1928 default:
1929 abort ();
1930 }
1931 }
1932
1933 return 1;
1934 }
1935
1936 /* Insert expression X in INSN in the hash TABLE.
1937 If it is already present, record it as the last occurrence in INSN's
1938 basic block.
1939
1940 MODE is the mode of the value X is being stored into.
1941 It is only used if X is a CONST_INT.
1942
1943 ANTIC_P is nonzero if X is an anticipatable expression.
1944 AVAIL_P is nonzero if X is an available expression. */
1945
1946 static void
1947 insert_expr_in_table (x, mode, insn, antic_p, avail_p, table)
1948 rtx x;
1949 enum machine_mode mode;
1950 rtx insn;
1951 int antic_p, avail_p;
1952 struct hash_table *table;
1953 {
1954 int found, do_not_record_p;
1955 unsigned int hash;
1956 struct expr *cur_expr, *last_expr = NULL;
1957 struct occr *antic_occr, *avail_occr;
1958 struct occr *last_occr = NULL;
1959
1960 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1961
1962 /* Do not insert expression in table if it contains volatile operands,
1963 or if hash_expr determines the expression is something we don't want
1964 to or can't handle. */
1965 if (do_not_record_p)
1966 return;
1967
1968 cur_expr = table->table[hash];
1969 found = 0;
1970
1971 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1972 {
1973 /* If the expression isn't found, save a pointer to the end of
1974 the list. */
1975 last_expr = cur_expr;
1976 cur_expr = cur_expr->next_same_hash;
1977 }
1978
1979 if (! found)
1980 {
1981 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1982 bytes_used += sizeof (struct expr);
1983 if (table->table[hash] == NULL)
1984 /* This is the first pattern that hashed to this index. */
1985 table->table[hash] = cur_expr;
1986 else
1987 /* Add EXPR to end of this hash chain. */
1988 last_expr->next_same_hash = cur_expr;
1989
1990 /* Set the fields of the expr element. */
1991 cur_expr->expr = x;
1992 cur_expr->bitmap_index = table->n_elems++;
1993 cur_expr->next_same_hash = NULL;
1994 cur_expr->antic_occr = NULL;
1995 cur_expr->avail_occr = NULL;
1996 }
1997
1998 /* Now record the occurrence(s). */
1999 if (antic_p)
2000 {
2001 antic_occr = cur_expr->antic_occr;
2002
2003 /* Search for another occurrence in the same basic block. */
2004 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
2005 {
2006 /* If an occurrence isn't found, save a pointer to the end of
2007 the list. */
2008 last_occr = antic_occr;
2009 antic_occr = antic_occr->next;
2010 }
2011
2012 if (antic_occr)
2013 /* Found another instance of the expression in the same basic block.
2014 Prefer the currently recorded one. We want the first one in the
2015 block and the block is scanned from start to end. */
2016 ; /* nothing to do */
2017 else
2018 {
2019 /* First occurrence of this expression in this basic block. */
2020 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2021 bytes_used += sizeof (struct occr);
2022 /* First occurrence of this expression in any block? */
2023 if (cur_expr->antic_occr == NULL)
2024 cur_expr->antic_occr = antic_occr;
2025 else
2026 last_occr->next = antic_occr;
2027
2028 antic_occr->insn = insn;
2029 antic_occr->next = NULL;
2030 }
2031 }
2032
2033 if (avail_p)
2034 {
2035 avail_occr = cur_expr->avail_occr;
2036
2037 /* Search for another occurrence in the same basic block. */
2038 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2039 {
2040 /* If an occurrence isn't found, save a pointer to the end of
2041 the list. */
2042 last_occr = avail_occr;
2043 avail_occr = avail_occr->next;
2044 }
2045
2046 if (avail_occr)
2047 /* Found another instance of the expression in the same basic block.
2048 Prefer this occurrence to the currently recorded one. We want
2049 the last one in the block and the block is scanned from start
2050 to end. */
2051 avail_occr->insn = insn;
2052 else
2053 {
2054 /* First occurrence of this expression in this basic block. */
2055 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2056 bytes_used += sizeof (struct occr);
2057
2058 /* First occurrence of this expression in any block? */
2059 if (cur_expr->avail_occr == NULL)
2060 cur_expr->avail_occr = avail_occr;
2061 else
2062 last_occr->next = avail_occr;
2063
2064 avail_occr->insn = insn;
2065 avail_occr->next = NULL;
2066 }
2067 }
2068 }
2069
2070 /* Insert pattern X in INSN in the hash table.
2071 X is a SET of a reg to either another reg or a constant.
2072 If it is already present, record it as the last occurrence in INSN's
2073 basic block. */
2074
2075 static void
2076 insert_set_in_table (x, insn, table)
2077 rtx x;
2078 rtx insn;
2079 struct hash_table *table;
2080 {
2081 int found;
2082 unsigned int hash;
2083 struct expr *cur_expr, *last_expr = NULL;
2084 struct occr *cur_occr, *last_occr = NULL;
2085
2086 if (GET_CODE (x) != SET
2087 || GET_CODE (SET_DEST (x)) != REG)
2088 abort ();
2089
2090 hash = hash_set (REGNO (SET_DEST (x)), table->size);
2091
2092 cur_expr = table->table[hash];
2093 found = 0;
2094
2095 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2096 {
2097 /* If the expression isn't found, save a pointer to the end of
2098 the list. */
2099 last_expr = cur_expr;
2100 cur_expr = cur_expr->next_same_hash;
2101 }
2102
2103 if (! found)
2104 {
2105 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2106 bytes_used += sizeof (struct expr);
2107 if (table->table[hash] == NULL)
2108 /* This is the first pattern that hashed to this index. */
2109 table->table[hash] = cur_expr;
2110 else
2111 /* Add EXPR to end of this hash chain. */
2112 last_expr->next_same_hash = cur_expr;
2113
2114 /* Set the fields of the expr element.
2115 We must copy X because it can be modified when copy propagation is
2116 performed on its operands. */
2117 cur_expr->expr = copy_rtx (x);
2118 cur_expr->bitmap_index = table->n_elems++;
2119 cur_expr->next_same_hash = NULL;
2120 cur_expr->antic_occr = NULL;
2121 cur_expr->avail_occr = NULL;
2122 }
2123
2124 /* Now record the occurrence. */
2125 cur_occr = cur_expr->avail_occr;
2126
2127 /* Search for another occurrence in the same basic block. */
2128 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2129 {
2130 /* If an occurrence isn't found, save a pointer to the end of
2131 the list. */
2132 last_occr = cur_occr;
2133 cur_occr = cur_occr->next;
2134 }
2135
2136 if (cur_occr)
2137 /* Found another instance of the expression in the same basic block.
2138 Prefer this occurrence to the currently recorded one. We want the
2139 last one in the block and the block is scanned from start to end. */
2140 cur_occr->insn = insn;
2141 else
2142 {
2143 /* First occurrence of this expression in this basic block. */
2144 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2145 bytes_used += sizeof (struct occr);
2146
2147 /* First occurrence of this expression in any block? */
2148 if (cur_expr->avail_occr == NULL)
2149 cur_expr->avail_occr = cur_occr;
2150 else
2151 last_occr->next = cur_occr;
2152
2153 cur_occr->insn = insn;
2154 cur_occr->next = NULL;
2155 }
2156 }
2157
2158 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
2159 expression one). */
2160
2161 static void
2162 hash_scan_set (pat, insn, table)
2163 rtx pat, insn;
2164 struct hash_table *table;
2165 {
2166 rtx src = SET_SRC (pat);
2167 rtx dest = SET_DEST (pat);
2168 rtx note;
2169
2170 if (GET_CODE (src) == CALL)
2171 hash_scan_call (src, insn, table);
2172
2173 else if (GET_CODE (dest) == REG)
2174 {
2175 unsigned int regno = REGNO (dest);
2176 rtx tmp;
2177
2178 /* If this is a single set and we are doing constant propagation,
2179 see if a REG_NOTE shows this equivalent to a constant. */
2180 if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2181 && CONSTANT_P (XEXP (note, 0)))
2182 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2183
2184 /* Only record sets of pseudo-regs in the hash table. */
2185 if (! table->set_p
2186 && regno >= FIRST_PSEUDO_REGISTER
2187 /* Don't GCSE something if we can't do a reg/reg copy. */
2188 && can_copy_p [GET_MODE (dest)]
2189 /* GCSE commonly inserts instruction after the insn. We can't
2190 do that easily for EH_REGION notes so disable GCSE on these
2191 for now. */
2192 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2193 /* Is SET_SRC something we want to gcse? */
2194 && want_to_gcse_p (src)
2195 /* Don't CSE a nop. */
2196 && ! set_noop_p (pat)
2197 /* Don't GCSE if it has attached REG_EQUIV note.
2198 At this point this only function parameters should have
2199 REG_EQUIV notes and if the argument slot is used somewhere
2200 explicitly, it means address of parameter has been taken,
2201 so we should not extend the lifetime of the pseudo. */
2202 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2203 || GET_CODE (XEXP (note, 0)) != MEM))
2204 {
2205 /* An expression is not anticipatable if its operands are
2206 modified before this insn or if this is not the only SET in
2207 this insn. */
2208 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2209 /* An expression is not available if its operands are
2210 subsequently modified, including this insn. It's also not
2211 available if this is a branch, because we can't insert
2212 a set after the branch. */
2213 int avail_p = (oprs_available_p (src, insn)
2214 && ! JUMP_P (insn));
2215
2216 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
2217 }
2218
2219 /* Record sets for constant/copy propagation. */
2220 else if (table->set_p
2221 && regno >= FIRST_PSEUDO_REGISTER
2222 && ((GET_CODE (src) == REG
2223 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2224 && can_copy_p [GET_MODE (dest)]
2225 && REGNO (src) != regno)
2226 || (CONSTANT_P (src)
2227 && GET_CODE (src) != CONSTANT_P_RTX))
2228 /* A copy is not available if its src or dest is subsequently
2229 modified. Here we want to search from INSN+1 on, but
2230 oprs_available_p searches from INSN on. */
2231 && (insn == BLOCK_END (BLOCK_NUM (insn))
2232 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2233 && oprs_available_p (pat, tmp))))
2234 insert_set_in_table (pat, insn, table);
2235 }
2236 }
2237
2238 static void
2239 hash_scan_clobber (x, insn, table)
2240 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2241 struct hash_table *table ATTRIBUTE_UNUSED;
2242 {
2243 /* Currently nothing to do. */
2244 }
2245
2246 static void
2247 hash_scan_call (x, insn, table)
2248 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2249 struct hash_table *table ATTRIBUTE_UNUSED;
2250 {
2251 /* Currently nothing to do. */
2252 }
2253
2254 /* Process INSN and add hash table entries as appropriate.
2255
2256 Only available expressions that set a single pseudo-reg are recorded.
2257
2258 Single sets in a PARALLEL could be handled, but it's an extra complication
2259 that isn't dealt with right now. The trick is handling the CLOBBERs that
2260 are also in the PARALLEL. Later.
2261
2262 If SET_P is nonzero, this is for the assignment hash table,
2263 otherwise it is for the expression hash table.
2264 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2265 not record any expressions. */
2266
2267 static void
2268 hash_scan_insn (insn, table, in_libcall_block)
2269 rtx insn;
2270 struct hash_table *table;
2271 int in_libcall_block;
2272 {
2273 rtx pat = PATTERN (insn);
2274 int i;
2275
2276 if (in_libcall_block)
2277 return;
2278
2279 /* Pick out the sets of INSN and for other forms of instructions record
2280 what's been modified. */
2281
2282 if (GET_CODE (pat) == SET)
2283 hash_scan_set (pat, insn, table);
2284 else if (GET_CODE (pat) == PARALLEL)
2285 for (i = 0; i < XVECLEN (pat, 0); i++)
2286 {
2287 rtx x = XVECEXP (pat, 0, i);
2288
2289 if (GET_CODE (x) == SET)
2290 hash_scan_set (x, insn, table);
2291 else if (GET_CODE (x) == CLOBBER)
2292 hash_scan_clobber (x, insn, table);
2293 else if (GET_CODE (x) == CALL)
2294 hash_scan_call (x, insn, table);
2295 }
2296
2297 else if (GET_CODE (pat) == CLOBBER)
2298 hash_scan_clobber (pat, insn, table);
2299 else if (GET_CODE (pat) == CALL)
2300 hash_scan_call (pat, insn, table);
2301 }
2302
2303 static void
2304 dump_hash_table (file, name, table)
2305 FILE *file;
2306 const char *name;
2307 struct hash_table *table;
2308 {
2309 int i;
2310 /* Flattened out table, so it's printed in proper order. */
2311 struct expr **flat_table;
2312 unsigned int *hash_val;
2313 struct expr *expr;
2314
2315 flat_table
2316 = (struct expr **) xcalloc (table->n_elems, sizeof (struct expr *));
2317 hash_val = (unsigned int *) xmalloc (table->n_elems * sizeof (unsigned int));
2318
2319 for (i = 0; i < (int) table->size; i++)
2320 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
2321 {
2322 flat_table[expr->bitmap_index] = expr;
2323 hash_val[expr->bitmap_index] = i;
2324 }
2325
2326 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2327 name, table->size, table->n_elems);
2328
2329 for (i = 0; i < (int) table->n_elems; i++)
2330 if (flat_table[i] != 0)
2331 {
2332 expr = flat_table[i];
2333 fprintf (file, "Index %d (hash value %d)\n ",
2334 expr->bitmap_index, hash_val[i]);
2335 print_rtl (file, expr->expr);
2336 fprintf (file, "\n");
2337 }
2338
2339 fprintf (file, "\n");
2340
2341 free (flat_table);
2342 free (hash_val);
2343 }
2344
2345 /* Record register first/last/block set information for REGNO in INSN.
2346
2347 first_set records the first place in the block where the register
2348 is set and is used to compute "anticipatability".
2349
2350 last_set records the last place in the block where the register
2351 is set and is used to compute "availability".
2352
2353 last_bb records the block for which first_set and last_set are
2354 valid, as a quick test to invalidate them.
2355
2356 reg_set_in_block records whether the register is set in the block
2357 and is used to compute "transparency". */
2358
2359 static void
2360 record_last_reg_set_info (insn, regno)
2361 rtx insn;
2362 int regno;
2363 {
2364 struct reg_avail_info *info = &reg_avail_info[regno];
2365 int cuid = INSN_CUID (insn);
2366
2367 info->last_set = cuid;
2368 if (info->last_bb != current_bb)
2369 {
2370 info->last_bb = current_bb;
2371 info->first_set = cuid;
2372 SET_BIT (reg_set_in_block[current_bb->index], regno);
2373 }
2374 }
2375
2376
2377 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2378 Note we store a pair of elements in the list, so they have to be
2379 taken off pairwise. */
2380
2381 static void
2382 canon_list_insert (dest, unused1, v_insn)
2383 rtx dest ATTRIBUTE_UNUSED;
2384 rtx unused1 ATTRIBUTE_UNUSED;
2385 void * v_insn;
2386 {
2387 rtx dest_addr, insn;
2388 int bb;
2389
2390 while (GET_CODE (dest) == SUBREG
2391 || GET_CODE (dest) == ZERO_EXTRACT
2392 || GET_CODE (dest) == SIGN_EXTRACT
2393 || GET_CODE (dest) == STRICT_LOW_PART)
2394 dest = XEXP (dest, 0);
2395
2396 /* If DEST is not a MEM, then it will not conflict with a load. Note
2397 that function calls are assumed to clobber memory, but are handled
2398 elsewhere. */
2399
2400 if (GET_CODE (dest) != MEM)
2401 return;
2402
2403 dest_addr = get_addr (XEXP (dest, 0));
2404 dest_addr = canon_rtx (dest_addr);
2405 insn = (rtx) v_insn;
2406 bb = BLOCK_NUM (insn);
2407
2408 canon_modify_mem_list[bb] =
2409 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
2410 canon_modify_mem_list[bb] =
2411 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
2412 bitmap_set_bit (canon_modify_mem_list_set, bb);
2413 }
2414
2415 /* Record memory modification information for INSN. We do not actually care
2416 about the memory location(s) that are set, or even how they are set (consider
2417 a CALL_INSN). We merely need to record which insns modify memory. */
2418
2419 static void
2420 record_last_mem_set_info (insn)
2421 rtx insn;
2422 {
2423 int bb = BLOCK_NUM (insn);
2424
2425 /* load_killed_in_block_p will handle the case of calls clobbering
2426 everything. */
2427 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
2428 bitmap_set_bit (modify_mem_list_set, bb);
2429
2430 if (GET_CODE (insn) == CALL_INSN)
2431 {
2432 /* Note that traversals of this loop (other than for free-ing)
2433 will break after encountering a CALL_INSN. So, there's no
2434 need to insert a pair of items, as canon_list_insert does. */
2435 canon_modify_mem_list[bb] =
2436 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
2437 bitmap_set_bit (canon_modify_mem_list_set, bb);
2438 }
2439 else
2440 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2441 }
2442
2443 /* Called from compute_hash_table via note_stores to handle one
2444 SET or CLOBBER in an insn. DATA is really the instruction in which
2445 the SET is taking place. */
2446
2447 static void
2448 record_last_set_info (dest, setter, data)
2449 rtx dest, setter ATTRIBUTE_UNUSED;
2450 void *data;
2451 {
2452 rtx last_set_insn = (rtx) data;
2453
2454 if (GET_CODE (dest) == SUBREG)
2455 dest = SUBREG_REG (dest);
2456
2457 if (GET_CODE (dest) == REG)
2458 record_last_reg_set_info (last_set_insn, REGNO (dest));
2459 else if (GET_CODE (dest) == MEM
2460 /* Ignore pushes, they clobber nothing. */
2461 && ! push_operand (dest, GET_MODE (dest)))
2462 record_last_mem_set_info (last_set_insn);
2463 }
2464
2465 /* Top level function to create an expression or assignment hash table.
2466
2467 Expression entries are placed in the hash table if
2468 - they are of the form (set (pseudo-reg) src),
2469 - src is something we want to perform GCSE on,
2470 - none of the operands are subsequently modified in the block
2471
2472 Assignment entries are placed in the hash table if
2473 - they are of the form (set (pseudo-reg) src),
2474 - src is something we want to perform const/copy propagation on,
2475 - none of the operands or target are subsequently modified in the block
2476
2477 Currently src must be a pseudo-reg or a const_int.
2478
2479 TABLE is the table computed. */
2480
2481 static void
2482 compute_hash_table_work (table)
2483 struct hash_table *table;
2484 {
2485 unsigned int i;
2486
2487 /* While we compute the hash table we also compute a bit array of which
2488 registers are set in which blocks.
2489 ??? This isn't needed during const/copy propagation, but it's cheap to
2490 compute. Later. */
2491 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2492
2493 /* re-Cache any INSN_LIST nodes we have allocated. */
2494 clear_modify_mem_tables ();
2495 /* Some working arrays used to track first and last set in each block. */
2496 reg_avail_info = (struct reg_avail_info*)
2497 gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2498
2499 for (i = 0; i < max_gcse_regno; ++i)
2500 reg_avail_info[i].last_bb = NULL;
2501
2502 FOR_EACH_BB (current_bb)
2503 {
2504 rtx insn;
2505 unsigned int regno;
2506 int in_libcall_block;
2507
2508 /* First pass over the instructions records information used to
2509 determine when registers and memory are first and last set.
2510 ??? hard-reg reg_set_in_block computation
2511 could be moved to compute_sets since they currently don't change. */
2512
2513 for (insn = current_bb->head;
2514 insn && insn != NEXT_INSN (current_bb->end);
2515 insn = NEXT_INSN (insn))
2516 {
2517 if (! INSN_P (insn))
2518 continue;
2519
2520 if (GET_CODE (insn) == CALL_INSN)
2521 {
2522 bool clobbers_all = false;
2523 #ifdef NON_SAVING_SETJMP
2524 if (NON_SAVING_SETJMP
2525 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2526 clobbers_all = true;
2527 #endif
2528
2529 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2530 if (clobbers_all
2531 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2532 record_last_reg_set_info (insn, regno);
2533
2534 mark_call (insn);
2535 }
2536
2537 note_stores (PATTERN (insn), record_last_set_info, insn);
2538 }
2539
2540 /* Insert implicit sets in the hash table. */
2541 if (table->set_p
2542 && implicit_sets[current_bb->index] != NULL_RTX)
2543 hash_scan_set (implicit_sets[current_bb->index],
2544 current_bb->head, table);
2545
2546 /* The next pass builds the hash table. */
2547
2548 for (insn = current_bb->head, in_libcall_block = 0;
2549 insn && insn != NEXT_INSN (current_bb->end);
2550 insn = NEXT_INSN (insn))
2551 if (INSN_P (insn))
2552 {
2553 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2554 in_libcall_block = 1;
2555 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2556 in_libcall_block = 0;
2557 hash_scan_insn (insn, table, in_libcall_block);
2558 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2559 in_libcall_block = 0;
2560 }
2561 }
2562
2563 free (reg_avail_info);
2564 reg_avail_info = NULL;
2565 }
2566
2567 /* Allocate space for the set/expr hash TABLE.
2568 N_INSNS is the number of instructions in the function.
2569 It is used to determine the number of buckets to use.
2570 SET_P determines whether set or expression table will
2571 be created. */
2572
2573 static void
2574 alloc_hash_table (n_insns, table, set_p)
2575 int n_insns;
2576 struct hash_table *table;
2577 int set_p;
2578 {
2579 int n;
2580
2581 table->size = n_insns / 4;
2582 if (table->size < 11)
2583 table->size = 11;
2584
2585 /* Attempt to maintain efficient use of hash table.
2586 Making it an odd number is simplest for now.
2587 ??? Later take some measurements. */
2588 table->size |= 1;
2589 n = table->size * sizeof (struct expr *);
2590 table->table = (struct expr **) gmalloc (n);
2591 table->set_p = set_p;
2592 }
2593
2594 /* Free things allocated by alloc_hash_table. */
2595
2596 static void
2597 free_hash_table (table)
2598 struct hash_table *table;
2599 {
2600 free (table->table);
2601 }
2602
2603 /* Compute the hash TABLE for doing copy/const propagation or
2604 expression hash table. */
2605
2606 static void
2607 compute_hash_table (table)
2608 struct hash_table *table;
2609 {
2610 /* Initialize count of number of entries in hash table. */
2611 table->n_elems = 0;
2612 memset ((char *) table->table, 0,
2613 table->size * sizeof (struct expr *));
2614
2615 compute_hash_table_work (table);
2616 }
2617 \f
2618 /* Expression tracking support. */
2619
2620 /* Lookup pattern PAT in the expression TABLE.
2621 The result is a pointer to the table entry, or NULL if not found. */
2622
2623 static struct expr *
2624 lookup_expr (pat, table)
2625 rtx pat;
2626 struct hash_table *table;
2627 {
2628 int do_not_record_p;
2629 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2630 table->size);
2631 struct expr *expr;
2632
2633 if (do_not_record_p)
2634 return NULL;
2635
2636 expr = table->table[hash];
2637
2638 while (expr && ! expr_equiv_p (expr->expr, pat))
2639 expr = expr->next_same_hash;
2640
2641 return expr;
2642 }
2643
2644 /* Lookup REGNO in the set TABLE. The result is a pointer to the
2645 table entry, or NULL if not found. */
2646
2647 static struct expr *
2648 lookup_set (regno, table)
2649 unsigned int regno;
2650 struct hash_table *table;
2651 {
2652 unsigned int hash = hash_set (regno, table->size);
2653 struct expr *expr;
2654
2655 expr = table->table[hash];
2656
2657 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2658 expr = expr->next_same_hash;
2659
2660 return expr;
2661 }
2662
2663 /* Return the next entry for REGNO in list EXPR. */
2664
2665 static struct expr *
2666 next_set (regno, expr)
2667 unsigned int regno;
2668 struct expr *expr;
2669 {
2670 do
2671 expr = expr->next_same_hash;
2672 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2673
2674 return expr;
2675 }
2676
2677 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2678 types may be mixed. */
2679
2680 static void
2681 free_insn_expr_list_list (listp)
2682 rtx *listp;
2683 {
2684 rtx list, next;
2685
2686 for (list = *listp; list ; list = next)
2687 {
2688 next = XEXP (list, 1);
2689 if (GET_CODE (list) == EXPR_LIST)
2690 free_EXPR_LIST_node (list);
2691 else
2692 free_INSN_LIST_node (list);
2693 }
2694
2695 *listp = NULL;
2696 }
2697
2698 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2699 static void
2700 clear_modify_mem_tables ()
2701 {
2702 int i;
2703
2704 EXECUTE_IF_SET_IN_BITMAP
2705 (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
2706 bitmap_clear (modify_mem_list_set);
2707
2708 EXECUTE_IF_SET_IN_BITMAP
2709 (canon_modify_mem_list_set, 0, i,
2710 free_insn_expr_list_list (canon_modify_mem_list + i));
2711 bitmap_clear (canon_modify_mem_list_set);
2712 }
2713
2714 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2715
2716 static void
2717 free_modify_mem_tables ()
2718 {
2719 clear_modify_mem_tables ();
2720 free (modify_mem_list);
2721 free (canon_modify_mem_list);
2722 modify_mem_list = 0;
2723 canon_modify_mem_list = 0;
2724 }
2725
2726 /* Reset tables used to keep track of what's still available [since the
2727 start of the block]. */
2728
2729 static void
2730 reset_opr_set_tables ()
2731 {
2732 /* Maintain a bitmap of which regs have been set since beginning of
2733 the block. */
2734 CLEAR_REG_SET (reg_set_bitmap);
2735
2736 /* Also keep a record of the last instruction to modify memory.
2737 For now this is very trivial, we only record whether any memory
2738 location has been modified. */
2739 clear_modify_mem_tables ();
2740 }
2741
2742 /* Return nonzero if the operands of X are not set before INSN in
2743 INSN's basic block. */
2744
2745 static int
2746 oprs_not_set_p (x, insn)
2747 rtx x, insn;
2748 {
2749 int i, j;
2750 enum rtx_code code;
2751 const char *fmt;
2752
2753 if (x == 0)
2754 return 1;
2755
2756 code = GET_CODE (x);
2757 switch (code)
2758 {
2759 case PC:
2760 case CC0:
2761 case CONST:
2762 case CONST_INT:
2763 case CONST_DOUBLE:
2764 case CONST_VECTOR:
2765 case SYMBOL_REF:
2766 case LABEL_REF:
2767 case ADDR_VEC:
2768 case ADDR_DIFF_VEC:
2769 return 1;
2770
2771 case MEM:
2772 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2773 INSN_CUID (insn), x, 0))
2774 return 0;
2775 else
2776 return oprs_not_set_p (XEXP (x, 0), insn);
2777
2778 case REG:
2779 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2780
2781 default:
2782 break;
2783 }
2784
2785 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2786 {
2787 if (fmt[i] == 'e')
2788 {
2789 /* If we are about to do the last recursive call
2790 needed at this level, change it into iteration.
2791 This function is called enough to be worth it. */
2792 if (i == 0)
2793 return oprs_not_set_p (XEXP (x, i), insn);
2794
2795 if (! oprs_not_set_p (XEXP (x, i), insn))
2796 return 0;
2797 }
2798 else if (fmt[i] == 'E')
2799 for (j = 0; j < XVECLEN (x, i); j++)
2800 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2801 return 0;
2802 }
2803
2804 return 1;
2805 }
2806
2807 /* Mark things set by a CALL. */
2808
2809 static void
2810 mark_call (insn)
2811 rtx insn;
2812 {
2813 if (! CONST_OR_PURE_CALL_P (insn))
2814 record_last_mem_set_info (insn);
2815 }
2816
2817 /* Mark things set by a SET. */
2818
2819 static void
2820 mark_set (pat, insn)
2821 rtx pat, insn;
2822 {
2823 rtx dest = SET_DEST (pat);
2824
2825 while (GET_CODE (dest) == SUBREG
2826 || GET_CODE (dest) == ZERO_EXTRACT
2827 || GET_CODE (dest) == SIGN_EXTRACT
2828 || GET_CODE (dest) == STRICT_LOW_PART)
2829 dest = XEXP (dest, 0);
2830
2831 if (GET_CODE (dest) == REG)
2832 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2833 else if (GET_CODE (dest) == MEM)
2834 record_last_mem_set_info (insn);
2835
2836 if (GET_CODE (SET_SRC (pat)) == CALL)
2837 mark_call (insn);
2838 }
2839
2840 /* Record things set by a CLOBBER. */
2841
2842 static void
2843 mark_clobber (pat, insn)
2844 rtx pat, insn;
2845 {
2846 rtx clob = XEXP (pat, 0);
2847
2848 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2849 clob = XEXP (clob, 0);
2850
2851 if (GET_CODE (clob) == REG)
2852 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2853 else
2854 record_last_mem_set_info (insn);
2855 }
2856
2857 /* Record things set by INSN.
2858 This data is used by oprs_not_set_p. */
2859
2860 static void
2861 mark_oprs_set (insn)
2862 rtx insn;
2863 {
2864 rtx pat = PATTERN (insn);
2865 int i;
2866
2867 if (GET_CODE (pat) == SET)
2868 mark_set (pat, insn);
2869 else if (GET_CODE (pat) == PARALLEL)
2870 for (i = 0; i < XVECLEN (pat, 0); i++)
2871 {
2872 rtx x = XVECEXP (pat, 0, i);
2873
2874 if (GET_CODE (x) == SET)
2875 mark_set (x, insn);
2876 else if (GET_CODE (x) == CLOBBER)
2877 mark_clobber (x, insn);
2878 else if (GET_CODE (x) == CALL)
2879 mark_call (insn);
2880 }
2881
2882 else if (GET_CODE (pat) == CLOBBER)
2883 mark_clobber (pat, insn);
2884 else if (GET_CODE (pat) == CALL)
2885 mark_call (insn);
2886 }
2887
2888 \f
2889 /* Classic GCSE reaching definition support. */
2890
2891 /* Allocate reaching def variables. */
2892
2893 static void
2894 alloc_rd_mem (n_blocks, n_insns)
2895 int n_blocks, n_insns;
2896 {
2897 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2898 sbitmap_vector_zero (rd_kill, n_blocks);
2899
2900 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2901 sbitmap_vector_zero (rd_gen, n_blocks);
2902
2903 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2904 sbitmap_vector_zero (reaching_defs, n_blocks);
2905
2906 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2907 sbitmap_vector_zero (rd_out, n_blocks);
2908 }
2909
2910 /* Free reaching def variables. */
2911
2912 static void
2913 free_rd_mem ()
2914 {
2915 sbitmap_vector_free (rd_kill);
2916 sbitmap_vector_free (rd_gen);
2917 sbitmap_vector_free (reaching_defs);
2918 sbitmap_vector_free (rd_out);
2919 }
2920
2921 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2922
2923 static void
2924 handle_rd_kill_set (insn, regno, bb)
2925 rtx insn;
2926 int regno;
2927 basic_block bb;
2928 {
2929 struct reg_set *this_reg;
2930
2931 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2932 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2933 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2934 }
2935
2936 /* Compute the set of kill's for reaching definitions. */
2937
2938 static void
2939 compute_kill_rd ()
2940 {
2941 int cuid;
2942 unsigned int regno;
2943 int i;
2944 basic_block bb;
2945
2946 /* For each block
2947 For each set bit in `gen' of the block (i.e each insn which
2948 generates a definition in the block)
2949 Call the reg set by the insn corresponding to that bit regx
2950 Look at the linked list starting at reg_set_table[regx]
2951 For each setting of regx in the linked list, which is not in
2952 this block
2953 Set the bit in `kill' corresponding to that insn. */
2954 FOR_EACH_BB (bb)
2955 for (cuid = 0; cuid < max_cuid; cuid++)
2956 if (TEST_BIT (rd_gen[bb->index], cuid))
2957 {
2958 rtx insn = CUID_INSN (cuid);
2959 rtx pat = PATTERN (insn);
2960
2961 if (GET_CODE (insn) == CALL_INSN)
2962 {
2963 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2964 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2965 handle_rd_kill_set (insn, regno, bb);
2966 }
2967
2968 if (GET_CODE (pat) == PARALLEL)
2969 {
2970 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2971 {
2972 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2973
2974 if ((code == SET || code == CLOBBER)
2975 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2976 handle_rd_kill_set (insn,
2977 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2978 bb);
2979 }
2980 }
2981 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2982 /* Each setting of this register outside of this block
2983 must be marked in the set of kills in this block. */
2984 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
2985 }
2986 }
2987
2988 /* Compute the reaching definitions as in
2989 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2990 Chapter 10. It is the same algorithm as used for computing available
2991 expressions but applied to the gens and kills of reaching definitions. */
2992
2993 static void
2994 compute_rd ()
2995 {
2996 int changed, passes;
2997 basic_block bb;
2998
2999 FOR_EACH_BB (bb)
3000 sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
3001
3002 passes = 0;
3003 changed = 1;
3004 while (changed)
3005 {
3006 changed = 0;
3007 FOR_EACH_BB (bb)
3008 {
3009 sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
3010 changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
3011 reaching_defs[bb->index], rd_kill[bb->index]);
3012 }
3013 passes++;
3014 }
3015
3016 if (gcse_file)
3017 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3018 }
3019 \f
3020 /* Classic GCSE available expression support. */
3021
3022 /* Allocate memory for available expression computation. */
3023
3024 static void
3025 alloc_avail_expr_mem (n_blocks, n_exprs)
3026 int n_blocks, n_exprs;
3027 {
3028 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3029 sbitmap_vector_zero (ae_kill, n_blocks);
3030
3031 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3032 sbitmap_vector_zero (ae_gen, n_blocks);
3033
3034 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3035 sbitmap_vector_zero (ae_in, n_blocks);
3036
3037 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3038 sbitmap_vector_zero (ae_out, n_blocks);
3039 }
3040
3041 static void
3042 free_avail_expr_mem ()
3043 {
3044 sbitmap_vector_free (ae_kill);
3045 sbitmap_vector_free (ae_gen);
3046 sbitmap_vector_free (ae_in);
3047 sbitmap_vector_free (ae_out);
3048 }
3049
3050 /* Compute the set of available expressions generated in each basic block. */
3051
3052 static void
3053 compute_ae_gen (expr_hash_table)
3054 struct hash_table *expr_hash_table;
3055 {
3056 unsigned int i;
3057 struct expr *expr;
3058 struct occr *occr;
3059
3060 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3061 This is all we have to do because an expression is not recorded if it
3062 is not available, and the only expressions we want to work with are the
3063 ones that are recorded. */
3064 for (i = 0; i < expr_hash_table->size; i++)
3065 for (expr = expr_hash_table->table[i]; expr != 0; expr = expr->next_same_hash)
3066 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3067 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3068 }
3069
3070 /* Return nonzero if expression X is killed in BB. */
3071
3072 static int
3073 expr_killed_p (x, bb)
3074 rtx x;
3075 basic_block bb;
3076 {
3077 int i, j;
3078 enum rtx_code code;
3079 const char *fmt;
3080
3081 if (x == 0)
3082 return 1;
3083
3084 code = GET_CODE (x);
3085 switch (code)
3086 {
3087 case REG:
3088 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3089
3090 case MEM:
3091 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3092 return 1;
3093 else
3094 return expr_killed_p (XEXP (x, 0), bb);
3095
3096 case PC:
3097 case CC0: /*FIXME*/
3098 case CONST:
3099 case CONST_INT:
3100 case CONST_DOUBLE:
3101 case CONST_VECTOR:
3102 case SYMBOL_REF:
3103 case LABEL_REF:
3104 case ADDR_VEC:
3105 case ADDR_DIFF_VEC:
3106 return 0;
3107
3108 default:
3109 break;
3110 }
3111
3112 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3113 {
3114 if (fmt[i] == 'e')
3115 {
3116 /* If we are about to do the last recursive call
3117 needed at this level, change it into iteration.
3118 This function is called enough to be worth it. */
3119 if (i == 0)
3120 return expr_killed_p (XEXP (x, i), bb);
3121 else if (expr_killed_p (XEXP (x, i), bb))
3122 return 1;
3123 }
3124 else if (fmt[i] == 'E')
3125 for (j = 0; j < XVECLEN (x, i); j++)
3126 if (expr_killed_p (XVECEXP (x, i, j), bb))
3127 return 1;
3128 }
3129
3130 return 0;
3131 }
3132
3133 /* Compute the set of available expressions killed in each basic block. */
3134
3135 static void
3136 compute_ae_kill (ae_gen, ae_kill, expr_hash_table)
3137 sbitmap *ae_gen, *ae_kill;
3138 struct hash_table *expr_hash_table;
3139 {
3140 basic_block bb;
3141 unsigned int i;
3142 struct expr *expr;
3143
3144 FOR_EACH_BB (bb)
3145 for (i = 0; i < expr_hash_table->size; i++)
3146 for (expr = expr_hash_table->table[i]; expr; expr = expr->next_same_hash)
3147 {
3148 /* Skip EXPR if generated in this block. */
3149 if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
3150 continue;
3151
3152 if (expr_killed_p (expr->expr, bb))
3153 SET_BIT (ae_kill[bb->index], expr->bitmap_index);
3154 }
3155 }
3156 \f
3157 /* Actually perform the Classic GCSE optimizations. */
3158
3159 /* Return nonzero if occurrence OCCR of expression EXPR reaches block BB.
3160
3161 CHECK_SELF_LOOP is nonzero if we should consider a block reaching itself
3162 as a positive reach. We want to do this when there are two computations
3163 of the expression in the block.
3164
3165 VISITED is a pointer to a working buffer for tracking which BB's have
3166 been visited. It is NULL for the top-level call.
3167
3168 We treat reaching expressions that go through blocks containing the same
3169 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3170 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3171 2 as not reaching. The intent is to improve the probability of finding
3172 only one reaching expression and to reduce register lifetimes by picking
3173 the closest such expression. */
3174
3175 static int
3176 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3177 struct occr *occr;
3178 struct expr *expr;
3179 basic_block bb;
3180 int check_self_loop;
3181 char *visited;
3182 {
3183 edge pred;
3184
3185 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3186 {
3187 basic_block pred_bb = pred->src;
3188
3189 if (visited[pred_bb->index])
3190 /* This predecessor has already been visited. Nothing to do. */
3191 ;
3192 else if (pred_bb == bb)
3193 {
3194 /* BB loops on itself. */
3195 if (check_self_loop
3196 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3197 && BLOCK_NUM (occr->insn) == pred_bb->index)
3198 return 1;
3199
3200 visited[pred_bb->index] = 1;
3201 }
3202
3203 /* Ignore this predecessor if it kills the expression. */
3204 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3205 visited[pred_bb->index] = 1;
3206
3207 /* Does this predecessor generate this expression? */
3208 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3209 {
3210 /* Is this the occurrence we're looking for?
3211 Note that there's only one generating occurrence per block
3212 so we just need to check the block number. */
3213 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3214 return 1;
3215
3216 visited[pred_bb->index] = 1;
3217 }
3218
3219 /* Neither gen nor kill. */
3220 else
3221 {
3222 visited[pred_bb->index] = 1;
3223 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3224 visited))
3225
3226 return 1;
3227 }
3228 }
3229
3230 /* All paths have been checked. */
3231 return 0;
3232 }
3233
3234 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3235 memory allocated for that function is returned. */
3236
3237 static int
3238 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3239 struct occr *occr;
3240 struct expr *expr;
3241 basic_block bb;
3242 int check_self_loop;
3243 {
3244 int rval;
3245 char *visited = (char *) xcalloc (last_basic_block, 1);
3246
3247 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3248
3249 free (visited);
3250 return rval;
3251 }
3252
3253 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3254 If there is more than one such instruction, return NULL.
3255
3256 Called only by handle_avail_expr. */
3257
3258 static rtx
3259 computing_insn (expr, insn)
3260 struct expr *expr;
3261 rtx insn;
3262 {
3263 basic_block bb = BLOCK_FOR_INSN (insn);
3264
3265 if (expr->avail_occr->next == NULL)
3266 {
3267 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3268 /* The available expression is actually itself
3269 (i.e. a loop in the flow graph) so do nothing. */
3270 return NULL;
3271
3272 /* (FIXME) Case that we found a pattern that was created by
3273 a substitution that took place. */
3274 return expr->avail_occr->insn;
3275 }
3276 else
3277 {
3278 /* Pattern is computed more than once.
3279 Search backwards from this insn to see how many of these
3280 computations actually reach this insn. */
3281 struct occr *occr;
3282 rtx insn_computes_expr = NULL;
3283 int can_reach = 0;
3284
3285 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3286 {
3287 if (BLOCK_FOR_INSN (occr->insn) == bb)
3288 {
3289 /* The expression is generated in this block.
3290 The only time we care about this is when the expression
3291 is generated later in the block [and thus there's a loop].
3292 We let the normal cse pass handle the other cases. */
3293 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3294 && expr_reaches_here_p (occr, expr, bb, 1))
3295 {
3296 can_reach++;
3297 if (can_reach > 1)
3298 return NULL;
3299
3300 insn_computes_expr = occr->insn;
3301 }
3302 }
3303 else if (expr_reaches_here_p (occr, expr, bb, 0))
3304 {
3305 can_reach++;
3306 if (can_reach > 1)
3307 return NULL;
3308
3309 insn_computes_expr = occr->insn;
3310 }
3311 }
3312
3313 if (insn_computes_expr == NULL)
3314 abort ();
3315
3316 return insn_computes_expr;
3317 }
3318 }
3319
3320 /* Return nonzero if the definition in DEF_INSN can reach INSN.
3321 Only called by can_disregard_other_sets. */
3322
3323 static int
3324 def_reaches_here_p (insn, def_insn)
3325 rtx insn, def_insn;
3326 {
3327 rtx reg;
3328
3329 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3330 return 1;
3331
3332 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3333 {
3334 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3335 {
3336 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3337 return 1;
3338 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3339 reg = XEXP (PATTERN (def_insn), 0);
3340 else if (GET_CODE (PATTERN (def_insn)) == SET)
3341 reg = SET_DEST (PATTERN (def_insn));
3342 else
3343 abort ();
3344
3345 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3346 }
3347 else
3348 return 0;
3349 }
3350
3351 return 0;
3352 }
3353
3354 /* Return nonzero if *ADDR_THIS_REG can only have one value at INSN. The
3355 value returned is the number of definitions that reach INSN. Returning a
3356 value of zero means that [maybe] more than one definition reaches INSN and
3357 the caller can't perform whatever optimization it is trying. i.e. it is
3358 always safe to return zero. */
3359
3360 static int
3361 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3362 struct reg_set **addr_this_reg;
3363 rtx insn;
3364 int for_combine;
3365 {
3366 int number_of_reaching_defs = 0;
3367 struct reg_set *this_reg;
3368
3369 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3370 if (def_reaches_here_p (insn, this_reg->insn))
3371 {
3372 number_of_reaching_defs++;
3373 /* Ignore parallels for now. */
3374 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3375 return 0;
3376
3377 if (!for_combine
3378 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3379 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3380 SET_SRC (PATTERN (insn)))))
3381 /* A setting of the reg to a different value reaches INSN. */
3382 return 0;
3383
3384 if (number_of_reaching_defs > 1)
3385 {
3386 /* If in this setting the value the register is being set to is
3387 equal to the previous value the register was set to and this
3388 setting reaches the insn we are trying to do the substitution
3389 on then we are ok. */
3390 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3391 return 0;
3392 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3393 SET_SRC (PATTERN (insn))))
3394 return 0;
3395 }
3396
3397 *addr_this_reg = this_reg;
3398 }
3399
3400 return number_of_reaching_defs;
3401 }
3402
3403 /* Expression computed by insn is available and the substitution is legal,
3404 so try to perform the substitution.
3405
3406 The result is nonzero if any changes were made. */
3407
3408 static int
3409 handle_avail_expr (insn, expr)
3410 rtx insn;
3411 struct expr *expr;
3412 {
3413 rtx pat, insn_computes_expr, expr_set;
3414 rtx to;
3415 struct reg_set *this_reg;
3416 int found_setting, use_src;
3417 int changed = 0;
3418
3419 /* We only handle the case where one computation of the expression
3420 reaches this instruction. */
3421 insn_computes_expr = computing_insn (expr, insn);
3422 if (insn_computes_expr == NULL)
3423 return 0;
3424 expr_set = single_set (insn_computes_expr);
3425 if (!expr_set)
3426 abort ();
3427
3428 found_setting = 0;
3429 use_src = 0;
3430
3431 /* At this point we know only one computation of EXPR outside of this
3432 block reaches this insn. Now try to find a register that the
3433 expression is computed into. */
3434 if (GET_CODE (SET_SRC (expr_set)) == REG)
3435 {
3436 /* This is the case when the available expression that reaches
3437 here has already been handled as an available expression. */
3438 unsigned int regnum_for_replacing
3439 = REGNO (SET_SRC (expr_set));
3440
3441 /* If the register was created by GCSE we can't use `reg_set_table',
3442 however we know it's set only once. */
3443 if (regnum_for_replacing >= max_gcse_regno
3444 /* If the register the expression is computed into is set only once,
3445 or only one set reaches this insn, we can use it. */
3446 || (((this_reg = reg_set_table[regnum_for_replacing]),
3447 this_reg->next == NULL)
3448 || can_disregard_other_sets (&this_reg, insn, 0)))
3449 {
3450 use_src = 1;
3451 found_setting = 1;
3452 }
3453 }
3454
3455 if (!found_setting)
3456 {
3457 unsigned int regnum_for_replacing
3458 = REGNO (SET_DEST (expr_set));
3459
3460 /* This shouldn't happen. */
3461 if (regnum_for_replacing >= max_gcse_regno)
3462 abort ();
3463
3464 this_reg = reg_set_table[regnum_for_replacing];
3465
3466 /* If the register the expression is computed into is set only once,
3467 or only one set reaches this insn, use it. */
3468 if (this_reg->next == NULL
3469 || can_disregard_other_sets (&this_reg, insn, 0))
3470 found_setting = 1;
3471 }
3472
3473 if (found_setting)
3474 {
3475 pat = PATTERN (insn);
3476 if (use_src)
3477 to = SET_SRC (expr_set);
3478 else
3479 to = SET_DEST (expr_set);
3480 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3481
3482 /* We should be able to ignore the return code from validate_change but
3483 to play it safe we check. */
3484 if (changed)
3485 {
3486 gcse_subst_count++;
3487 if (gcse_file != NULL)
3488 {
3489 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3490 INSN_UID (insn));
3491 fprintf (gcse_file, " reg %d %s insn %d\n",
3492 REGNO (to), use_src ? "from" : "set in",
3493 INSN_UID (insn_computes_expr));
3494 }
3495 }
3496 }
3497
3498 /* The register that the expr is computed into is set more than once. */
3499 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3500 {
3501 /* Insert an insn after insnx that copies the reg set in insnx
3502 into a new pseudo register call this new register REGN.
3503 From insnb until end of basic block or until REGB is set
3504 replace all uses of REGB with REGN. */
3505 rtx new_insn;
3506
3507 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3508
3509 /* Generate the new insn. */
3510 /* ??? If the change fails, we return 0, even though we created
3511 an insn. I think this is ok. */
3512 new_insn
3513 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3514 SET_DEST (expr_set)),
3515 insn_computes_expr);
3516
3517 /* Keep register set table up to date. */
3518 record_one_set (REGNO (to), new_insn);
3519
3520 gcse_create_count++;
3521 if (gcse_file != NULL)
3522 {
3523 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3524 INSN_UID (NEXT_INSN (insn_computes_expr)),
3525 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3526 fprintf (gcse_file, ", computed in insn %d,\n",
3527 INSN_UID (insn_computes_expr));
3528 fprintf (gcse_file, " into newly allocated reg %d\n",
3529 REGNO (to));
3530 }
3531
3532 pat = PATTERN (insn);
3533
3534 /* Do register replacement for INSN. */
3535 changed = validate_change (insn, &SET_SRC (pat),
3536 SET_DEST (PATTERN
3537 (NEXT_INSN (insn_computes_expr))),
3538 0);
3539
3540 /* We should be able to ignore the return code from validate_change but
3541 to play it safe we check. */
3542 if (changed)
3543 {
3544 gcse_subst_count++;
3545 if (gcse_file != NULL)
3546 {
3547 fprintf (gcse_file,
3548 "GCSE: Replacing the source in insn %d with reg %d ",
3549 INSN_UID (insn),
3550 REGNO (SET_DEST (PATTERN (NEXT_INSN
3551 (insn_computes_expr)))));
3552 fprintf (gcse_file, "set in insn %d\n",
3553 INSN_UID (insn_computes_expr));
3554 }
3555 }
3556 }
3557
3558 return changed;
3559 }
3560
3561 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3562 the dataflow analysis has been done.
3563
3564 The result is nonzero if a change was made. */
3565
3566 static int
3567 classic_gcse ()
3568 {
3569 int changed;
3570 rtx insn;
3571 basic_block bb;
3572
3573 /* Note we start at block 1. */
3574
3575 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3576 return 0;
3577
3578 changed = 0;
3579 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3580 {
3581 /* Reset tables used to keep track of what's still valid [since the
3582 start of the block]. */
3583 reset_opr_set_tables ();
3584
3585 for (insn = bb->head;
3586 insn != NULL && insn != NEXT_INSN (bb->end);
3587 insn = NEXT_INSN (insn))
3588 {
3589 /* Is insn of form (set (pseudo-reg) ...)? */
3590 if (GET_CODE (insn) == INSN
3591 && GET_CODE (PATTERN (insn)) == SET
3592 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3593 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3594 {
3595 rtx pat = PATTERN (insn);
3596 rtx src = SET_SRC (pat);
3597 struct expr *expr;
3598
3599 if (want_to_gcse_p (src)
3600 /* Is the expression recorded? */
3601 && ((expr = lookup_expr (src, &expr_hash_table)) != NULL)
3602 /* Is the expression available [at the start of the
3603 block]? */
3604 && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
3605 /* Are the operands unchanged since the start of the
3606 block? */
3607 && oprs_not_set_p (src, insn))
3608 changed |= handle_avail_expr (insn, expr);
3609 }
3610
3611 /* Keep track of everything modified by this insn. */
3612 /* ??? Need to be careful w.r.t. mods done to INSN. */
3613 if (INSN_P (insn))
3614 mark_oprs_set (insn);
3615 }
3616 }
3617
3618 return changed;
3619 }
3620
3621 /* Top level routine to perform one classic GCSE pass.
3622
3623 Return nonzero if a change was made. */
3624
3625 static int
3626 one_classic_gcse_pass (pass)
3627 int pass;
3628 {
3629 int changed = 0;
3630
3631 gcse_subst_count = 0;
3632 gcse_create_count = 0;
3633
3634 alloc_hash_table (max_cuid, &expr_hash_table, 0);
3635 alloc_rd_mem (last_basic_block, max_cuid);
3636 compute_hash_table (&expr_hash_table);
3637 if (gcse_file)
3638 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
3639
3640 if (expr_hash_table.n_elems > 0)
3641 {
3642 compute_kill_rd ();
3643 compute_rd ();
3644 alloc_avail_expr_mem (last_basic_block, expr_hash_table.n_elems);
3645 compute_ae_gen (&expr_hash_table);
3646 compute_ae_kill (ae_gen, ae_kill, &expr_hash_table);
3647 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3648 changed = classic_gcse ();
3649 free_avail_expr_mem ();
3650 }
3651
3652 free_rd_mem ();
3653 free_hash_table (&expr_hash_table);
3654
3655 if (gcse_file)
3656 {
3657 fprintf (gcse_file, "\n");
3658 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3659 current_function_name, pass, bytes_used, gcse_subst_count);
3660 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3661 }
3662
3663 return changed;
3664 }
3665 \f
3666 /* Compute copy/constant propagation working variables. */
3667
3668 /* Local properties of assignments. */
3669 static sbitmap *cprop_pavloc;
3670 static sbitmap *cprop_absaltered;
3671
3672 /* Global properties of assignments (computed from the local properties). */
3673 static sbitmap *cprop_avin;
3674 static sbitmap *cprop_avout;
3675
3676 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3677 basic blocks. N_SETS is the number of sets. */
3678
3679 static void
3680 alloc_cprop_mem (n_blocks, n_sets)
3681 int n_blocks, n_sets;
3682 {
3683 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3684 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3685
3686 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3687 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3688 }
3689
3690 /* Free vars used by copy/const propagation. */
3691
3692 static void
3693 free_cprop_mem ()
3694 {
3695 sbitmap_vector_free (cprop_pavloc);
3696 sbitmap_vector_free (cprop_absaltered);
3697 sbitmap_vector_free (cprop_avin);
3698 sbitmap_vector_free (cprop_avout);
3699 }
3700
3701 /* For each block, compute whether X is transparent. X is either an
3702 expression or an assignment [though we don't care which, for this context
3703 an assignment is treated as an expression]. For each block where an
3704 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3705 bit in BMAP. */
3706
3707 static void
3708 compute_transp (x, indx, bmap, set_p)
3709 rtx x;
3710 int indx;
3711 sbitmap *bmap;
3712 int set_p;
3713 {
3714 int i, j;
3715 basic_block bb;
3716 enum rtx_code code;
3717 reg_set *r;
3718 const char *fmt;
3719
3720 /* repeat is used to turn tail-recursion into iteration since GCC
3721 can't do it when there's no return value. */
3722 repeat:
3723
3724 if (x == 0)
3725 return;
3726
3727 code = GET_CODE (x);
3728 switch (code)
3729 {
3730 case REG:
3731 if (set_p)
3732 {
3733 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3734 {
3735 FOR_EACH_BB (bb)
3736 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3737 SET_BIT (bmap[bb->index], indx);
3738 }
3739 else
3740 {
3741 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3742 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3743 }
3744 }
3745 else
3746 {
3747 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3748 {
3749 FOR_EACH_BB (bb)
3750 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3751 RESET_BIT (bmap[bb->index], indx);
3752 }
3753 else
3754 {
3755 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3756 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3757 }
3758 }
3759
3760 return;
3761
3762 case MEM:
3763 FOR_EACH_BB (bb)
3764 {
3765 rtx list_entry = canon_modify_mem_list[bb->index];
3766
3767 while (list_entry)
3768 {
3769 rtx dest, dest_addr;
3770
3771 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3772 {
3773 if (set_p)
3774 SET_BIT (bmap[bb->index], indx);
3775 else
3776 RESET_BIT (bmap[bb->index], indx);
3777 break;
3778 }
3779 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3780 Examine each hunk of memory that is modified. */
3781
3782 dest = XEXP (list_entry, 0);
3783 list_entry = XEXP (list_entry, 1);
3784 dest_addr = XEXP (list_entry, 0);
3785
3786 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3787 x, rtx_addr_varies_p))
3788 {
3789 if (set_p)
3790 SET_BIT (bmap[bb->index], indx);
3791 else
3792 RESET_BIT (bmap[bb->index], indx);
3793 break;
3794 }
3795 list_entry = XEXP (list_entry, 1);
3796 }
3797 }
3798
3799 x = XEXP (x, 0);
3800 goto repeat;
3801
3802 case PC:
3803 case CC0: /*FIXME*/
3804 case CONST:
3805 case CONST_INT:
3806 case CONST_DOUBLE:
3807 case CONST_VECTOR:
3808 case SYMBOL_REF:
3809 case LABEL_REF:
3810 case ADDR_VEC:
3811 case ADDR_DIFF_VEC:
3812 return;
3813
3814 default:
3815 break;
3816 }
3817
3818 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3819 {
3820 if (fmt[i] == 'e')
3821 {
3822 /* If we are about to do the last recursive call
3823 needed at this level, change it into iteration.
3824 This function is called enough to be worth it. */
3825 if (i == 0)
3826 {
3827 x = XEXP (x, i);
3828 goto repeat;
3829 }
3830
3831 compute_transp (XEXP (x, i), indx, bmap, set_p);
3832 }
3833 else if (fmt[i] == 'E')
3834 for (j = 0; j < XVECLEN (x, i); j++)
3835 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3836 }
3837 }
3838
3839 /* Top level routine to do the dataflow analysis needed by copy/const
3840 propagation. */
3841
3842 static void
3843 compute_cprop_data ()
3844 {
3845 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
3846 compute_available (cprop_pavloc, cprop_absaltered,
3847 cprop_avout, cprop_avin);
3848 }
3849 \f
3850 /* Copy/constant propagation. */
3851
3852 /* Maximum number of register uses in an insn that we handle. */
3853 #define MAX_USES 8
3854
3855 /* Table of uses found in an insn.
3856 Allocated statically to avoid alloc/free complexity and overhead. */
3857 static struct reg_use reg_use_table[MAX_USES];
3858
3859 /* Index into `reg_use_table' while building it. */
3860 static int reg_use_count;
3861
3862 /* Set up a list of register numbers used in INSN. The found uses are stored
3863 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3864 and contains the number of uses in the table upon exit.
3865
3866 ??? If a register appears multiple times we will record it multiple times.
3867 This doesn't hurt anything but it will slow things down. */
3868
3869 static void
3870 find_used_regs (xptr, data)
3871 rtx *xptr;
3872 void *data ATTRIBUTE_UNUSED;
3873 {
3874 int i, j;
3875 enum rtx_code code;
3876 const char *fmt;
3877 rtx x = *xptr;
3878
3879 /* repeat is used to turn tail-recursion into iteration since GCC
3880 can't do it when there's no return value. */
3881 repeat:
3882 if (x == 0)
3883 return;
3884
3885 code = GET_CODE (x);
3886 if (REG_P (x))
3887 {
3888 if (reg_use_count == MAX_USES)
3889 return;
3890
3891 reg_use_table[reg_use_count].reg_rtx = x;
3892 reg_use_count++;
3893 }
3894
3895 /* Recursively scan the operands of this expression. */
3896
3897 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3898 {
3899 if (fmt[i] == 'e')
3900 {
3901 /* If we are about to do the last recursive call
3902 needed at this level, change it into iteration.
3903 This function is called enough to be worth it. */
3904 if (i == 0)
3905 {
3906 x = XEXP (x, 0);
3907 goto repeat;
3908 }
3909
3910 find_used_regs (&XEXP (x, i), data);
3911 }
3912 else if (fmt[i] == 'E')
3913 for (j = 0; j < XVECLEN (x, i); j++)
3914 find_used_regs (&XVECEXP (x, i, j), data);
3915 }
3916 }
3917
3918 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3919 Returns nonzero is successful. */
3920
3921 static int
3922 try_replace_reg (from, to, insn)
3923 rtx from, to, insn;
3924 {
3925 rtx note = find_reg_equal_equiv_note (insn);
3926 rtx src = 0;
3927 int success = 0;
3928 rtx set = single_set (insn);
3929
3930 validate_replace_src_group (from, to, insn);
3931 if (num_changes_pending () && apply_change_group ())
3932 success = 1;
3933
3934 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
3935 {
3936 /* If above failed and this is a single set, try to simplify the source of
3937 the set given our substitution. We could perhaps try this for multiple
3938 SETs, but it probably won't buy us anything. */
3939 src = simplify_replace_rtx (SET_SRC (set), from, to);
3940
3941 if (!rtx_equal_p (src, SET_SRC (set))
3942 && validate_change (insn, &SET_SRC (set), src, 0))
3943 success = 1;
3944
3945 /* If we've failed to do replacement, have a single SET, and don't already
3946 have a note, add a REG_EQUAL note to not lose information. */
3947 if (!success && note == 0 && set != 0)
3948 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3949 }
3950
3951 /* If there is already a NOTE, update the expression in it with our
3952 replacement. */
3953 else if (note != 0)
3954 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3955
3956 /* REG_EQUAL may get simplified into register.
3957 We don't allow that. Remove that note. This code ought
3958 not to happen, because previous code ought to synthesize
3959 reg-reg move, but be on the safe side. */
3960 if (note && REG_P (XEXP (note, 0)))
3961 remove_note (insn, note);
3962
3963 return success;
3964 }
3965
3966 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3967 NULL no such set is found. */
3968
3969 static struct expr *
3970 find_avail_set (regno, insn)
3971 int regno;
3972 rtx insn;
3973 {
3974 /* SET1 contains the last set found that can be returned to the caller for
3975 use in a substitution. */
3976 struct expr *set1 = 0;
3977
3978 /* Loops are not possible here. To get a loop we would need two sets
3979 available at the start of the block containing INSN. ie we would
3980 need two sets like this available at the start of the block:
3981
3982 (set (reg X) (reg Y))
3983 (set (reg Y) (reg X))
3984
3985 This can not happen since the set of (reg Y) would have killed the
3986 set of (reg X) making it unavailable at the start of this block. */
3987 while (1)
3988 {
3989 rtx src;
3990 struct expr *set = lookup_set (regno, &set_hash_table);
3991
3992 /* Find a set that is available at the start of the block
3993 which contains INSN. */
3994 while (set)
3995 {
3996 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3997 break;
3998 set = next_set (regno, set);
3999 }
4000
4001 /* If no available set was found we've reached the end of the
4002 (possibly empty) copy chain. */
4003 if (set == 0)
4004 break;
4005
4006 if (GET_CODE (set->expr) != SET)
4007 abort ();
4008
4009 src = SET_SRC (set->expr);
4010
4011 /* We know the set is available.
4012 Now check that SRC is ANTLOC (i.e. none of the source operands
4013 have changed since the start of the block).
4014
4015 If the source operand changed, we may still use it for the next
4016 iteration of this loop, but we may not use it for substitutions. */
4017
4018 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4019 set1 = set;
4020
4021 /* If the source of the set is anything except a register, then
4022 we have reached the end of the copy chain. */
4023 if (GET_CODE (src) != REG)
4024 break;
4025
4026 /* Follow the copy chain, ie start another iteration of the loop
4027 and see if we have an available copy into SRC. */
4028 regno = REGNO (src);
4029 }
4030
4031 /* SET1 holds the last set that was available and anticipatable at
4032 INSN. */
4033 return set1;
4034 }
4035
4036 /* Subroutine of cprop_insn that tries to propagate constants into
4037 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
4038 it is the instruction that immediately precedes JUMP, and must be a
4039 single SET of a register. FROM is what we will try to replace,
4040 SRC is the constant we will try to substitute for it. Returns nonzero
4041 if a change was made. */
4042
4043 static int
4044 cprop_jump (bb, setcc, jump, from, src)
4045 basic_block bb;
4046 rtx setcc;
4047 rtx jump;
4048 rtx from;
4049 rtx src;
4050 {
4051 rtx new, new_set;
4052 rtx set = pc_set (jump);
4053
4054 /* First substitute in the INSN condition as the SET_SRC of the JUMP,
4055 then substitute that given values in this expanded JUMP. */
4056 if (setcc != NULL
4057 && !modified_between_p (from, setcc, jump)
4058 && !modified_between_p (src, setcc, jump))
4059 {
4060 rtx setcc_set = single_set (setcc);
4061 new_set = simplify_replace_rtx (SET_SRC (set),
4062 SET_DEST (setcc_set),
4063 SET_SRC (setcc_set));
4064 }
4065 else
4066 new_set = set;
4067
4068 new = simplify_replace_rtx (new_set, from, src);
4069
4070 /* If no simplification can be made, then try the next
4071 register. */
4072 if (rtx_equal_p (new, new_set) || rtx_equal_p (new, SET_SRC (set)))
4073 return 0;
4074
4075 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4076 if (new == pc_rtx)
4077 delete_insn (jump);
4078 else
4079 {
4080 /* Ensure the value computed inside the jump insn to be equivalent
4081 to one computed by setcc. */
4082 if (setcc
4083 && modified_in_p (new, setcc))
4084 return 0;
4085 if (! validate_change (jump, &SET_SRC (set), new, 0))
4086 return 0;
4087
4088 /* If this has turned into an unconditional jump,
4089 then put a barrier after it so that the unreachable
4090 code will be deleted. */
4091 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4092 emit_barrier_after (jump);
4093 }
4094
4095 #ifdef HAVE_cc0
4096 /* Delete the cc0 setter. */
4097 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
4098 delete_insn (setcc);
4099 #endif
4100
4101 run_jump_opt_after_gcse = 1;
4102
4103 const_prop_count++;
4104 if (gcse_file != NULL)
4105 {
4106 fprintf (gcse_file,
4107 "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
4108 REGNO (from), INSN_UID (jump));
4109 print_rtl (gcse_file, src);
4110 fprintf (gcse_file, "\n");
4111 }
4112 purge_dead_edges (bb);
4113
4114 return 1;
4115 }
4116
4117 static bool
4118 constprop_register (insn, from, to, alter_jumps)
4119 rtx insn;
4120 rtx from;
4121 rtx to;
4122 int alter_jumps;
4123 {
4124 rtx sset;
4125
4126 /* Check for reg or cc0 setting instructions followed by
4127 conditional branch instructions first. */
4128 if (alter_jumps
4129 && (sset = single_set (insn)) != NULL
4130 && NEXT_INSN (insn)
4131 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
4132 {
4133 rtx dest = SET_DEST (sset);
4134 if ((REG_P (dest) || CC0_P (dest))
4135 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
4136 return 1;
4137 }
4138
4139 /* Handle normal insns next. */
4140 if (GET_CODE (insn) == INSN
4141 && try_replace_reg (from, to, insn))
4142 return 1;
4143
4144 /* Try to propagate a CONST_INT into a conditional jump.
4145 We're pretty specific about what we will handle in this
4146 code, we can extend this as necessary over time.
4147
4148 Right now the insn in question must look like
4149 (set (pc) (if_then_else ...)) */
4150 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
4151 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
4152 return 0;
4153 }
4154
4155 /* Perform constant and copy propagation on INSN.
4156 The result is nonzero if a change was made. */
4157
4158 static int
4159 cprop_insn (insn, alter_jumps)
4160 rtx insn;
4161 int alter_jumps;
4162 {
4163 struct reg_use *reg_used;
4164 int changed = 0;
4165 rtx note;
4166
4167 if (!INSN_P (insn))
4168 return 0;
4169
4170 reg_use_count = 0;
4171 note_uses (&PATTERN (insn), find_used_regs, NULL);
4172
4173 note = find_reg_equal_equiv_note (insn);
4174
4175 /* We may win even when propagating constants into notes. */
4176 if (note)
4177 find_used_regs (&XEXP (note, 0), NULL);
4178
4179 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4180 reg_used++, reg_use_count--)
4181 {
4182 unsigned int regno = REGNO (reg_used->reg_rtx);
4183 rtx pat, src;
4184 struct expr *set;
4185
4186 /* Ignore registers created by GCSE.
4187 We do this because ... */
4188 if (regno >= max_gcse_regno)
4189 continue;
4190
4191 /* If the register has already been set in this block, there's
4192 nothing we can do. */
4193 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4194 continue;
4195
4196 /* Find an assignment that sets reg_used and is available
4197 at the start of the block. */
4198 set = find_avail_set (regno, insn);
4199 if (! set)
4200 continue;
4201
4202 pat = set->expr;
4203 /* ??? We might be able to handle PARALLELs. Later. */
4204 if (GET_CODE (pat) != SET)
4205 abort ();
4206
4207 src = SET_SRC (pat);
4208
4209 /* Constant propagation. */
4210 if (CONSTANT_P (src))
4211 {
4212 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
4213 {
4214 changed = 1;
4215 const_prop_count++;
4216 if (gcse_file != NULL)
4217 {
4218 fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
4219 fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
4220 print_rtl (gcse_file, src);
4221 fprintf (gcse_file, "\n");
4222 }
4223 }
4224 }
4225 else if (GET_CODE (src) == REG
4226 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4227 && REGNO (src) != regno)
4228 {
4229 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4230 {
4231 changed = 1;
4232 copy_prop_count++;
4233 if (gcse_file != NULL)
4234 {
4235 fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
4236 regno, INSN_UID (insn));
4237 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4238 }
4239
4240 /* The original insn setting reg_used may or may not now be
4241 deletable. We leave the deletion to flow. */
4242 /* FIXME: If it turns out that the insn isn't deletable,
4243 then we may have unnecessarily extended register lifetimes
4244 and made things worse. */
4245 }
4246 }
4247 }
4248
4249 return changed;
4250 }
4251
4252 /* Like find_used_regs, but avoid recording uses that appear in
4253 input-output contexts such as zero_extract or pre_dec. This
4254 restricts the cases we consider to those for which local cprop
4255 can legitimately make replacements. */
4256
4257 static void
4258 local_cprop_find_used_regs (xptr, data)
4259 rtx *xptr;
4260 void *data;
4261 {
4262 rtx x = *xptr;
4263
4264 if (x == 0)
4265 return;
4266
4267 switch (GET_CODE (x))
4268 {
4269 case ZERO_EXTRACT:
4270 case SIGN_EXTRACT:
4271 case STRICT_LOW_PART:
4272 return;
4273
4274 case PRE_DEC:
4275 case PRE_INC:
4276 case POST_DEC:
4277 case POST_INC:
4278 case PRE_MODIFY:
4279 case POST_MODIFY:
4280 /* Can only legitimately appear this early in the context of
4281 stack pushes for function arguments, but handle all of the
4282 codes nonetheless. */
4283 return;
4284
4285 case SUBREG:
4286 /* Setting a subreg of a register larger than word_mode leaves
4287 the non-written words unchanged. */
4288 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
4289 return;
4290 break;
4291
4292 default:
4293 break;
4294 }
4295
4296 find_used_regs (xptr, data);
4297 }
4298
4299 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4300 their REG_EQUAL notes need updating. */
4301
4302 static bool
4303 do_local_cprop (x, insn, alter_jumps, libcall_sp)
4304 rtx x;
4305 rtx insn;
4306 int alter_jumps;
4307 rtx *libcall_sp;
4308 {
4309 rtx newreg = NULL, newcnst = NULL;
4310
4311 /* Rule out USE instructions and ASM statements as we don't want to
4312 change the hard registers mentioned. */
4313 if (GET_CODE (x) == REG
4314 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
4315 || (GET_CODE (PATTERN (insn)) != USE
4316 && asm_noperands (PATTERN (insn)) < 0)))
4317 {
4318 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
4319 struct elt_loc_list *l;
4320
4321 if (!val)
4322 return false;
4323 for (l = val->locs; l; l = l->next)
4324 {
4325 rtx this_rtx = l->loc;
4326 rtx note;
4327
4328 if (l->in_libcall)
4329 continue;
4330
4331 if (CONSTANT_P (this_rtx)
4332 && GET_CODE (this_rtx) != CONSTANT_P_RTX)
4333 newcnst = this_rtx;
4334 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
4335 /* Don't copy propagate if it has attached REG_EQUIV note.
4336 At this point this only function parameters should have
4337 REG_EQUIV notes and if the argument slot is used somewhere
4338 explicitly, it means address of parameter has been taken,
4339 so we should not extend the lifetime of the pseudo. */
4340 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
4341 || GET_CODE (XEXP (note, 0)) != MEM))
4342 newreg = this_rtx;
4343 }
4344 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
4345 {
4346 /* If we find a case where we can't fix the retval REG_EQUAL notes
4347 match the new register, we either have to abandon this replacement
4348 or fix delete_trivially_dead_insns to preserve the setting insn,
4349 or make it delete the REG_EUAQL note, and fix up all passes that
4350 require the REG_EQUAL note there. */
4351 if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
4352 abort ();
4353 if (gcse_file != NULL)
4354 {
4355 fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
4356 REGNO (x));
4357 fprintf (gcse_file, "insn %d with constant ",
4358 INSN_UID (insn));
4359 print_rtl (gcse_file, newcnst);
4360 fprintf (gcse_file, "\n");
4361 }
4362 const_prop_count++;
4363 return true;
4364 }
4365 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
4366 {
4367 adjust_libcall_notes (x, newreg, insn, libcall_sp);
4368 if (gcse_file != NULL)
4369 {
4370 fprintf (gcse_file,
4371 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
4372 REGNO (x), INSN_UID (insn));
4373 fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
4374 }
4375 copy_prop_count++;
4376 return true;
4377 }
4378 }
4379 return false;
4380 }
4381
4382 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4383 their REG_EQUAL notes need updating to reflect that OLDREG has been
4384 replaced with NEWVAL in INSN. Return true if all substitutions could
4385 be made. */
4386 static bool
4387 adjust_libcall_notes (oldreg, newval, insn, libcall_sp)
4388 rtx oldreg, newval, insn, *libcall_sp;
4389 {
4390 rtx end;
4391
4392 while ((end = *libcall_sp++))
4393 {
4394 rtx note = find_reg_equal_equiv_note (end);
4395
4396 if (! note)
4397 continue;
4398
4399 if (REG_P (newval))
4400 {
4401 if (reg_set_between_p (newval, PREV_INSN (insn), end))
4402 {
4403 do
4404 {
4405 note = find_reg_equal_equiv_note (end);
4406 if (! note)
4407 continue;
4408 if (reg_mentioned_p (newval, XEXP (note, 0)))
4409 return false;
4410 }
4411 while ((end = *libcall_sp++));
4412 return true;
4413 }
4414 }
4415 XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval);
4416 insn = end;
4417 }
4418 return true;
4419 }
4420
4421 #define MAX_NESTED_LIBCALLS 9
4422
4423 static void
4424 local_cprop_pass (alter_jumps)
4425 int alter_jumps;
4426 {
4427 rtx insn;
4428 struct reg_use *reg_used;
4429 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
4430 bool changed = false;
4431
4432 cselib_init ();
4433 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
4434 *libcall_sp = 0;
4435 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4436 {
4437 if (INSN_P (insn))
4438 {
4439 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4440
4441 if (note)
4442 {
4443 if (libcall_sp == libcall_stack)
4444 abort ();
4445 *--libcall_sp = XEXP (note, 0);
4446 }
4447 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4448 if (note)
4449 libcall_sp++;
4450 note = find_reg_equal_equiv_note (insn);
4451 do
4452 {
4453 reg_use_count = 0;
4454 note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL);
4455 if (note)
4456 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
4457
4458 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4459 reg_used++, reg_use_count--)
4460 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
4461 libcall_sp))
4462 {
4463 changed = true;
4464 break;
4465 }
4466 }
4467 while (reg_use_count);
4468 }
4469 cselib_process_insn (insn);
4470 }
4471 cselib_finish ();
4472 /* Global analysis may get into infinite loops for unreachable blocks. */
4473 if (changed && alter_jumps)
4474 {
4475 delete_unreachable_blocks ();
4476 free_reg_set_mem ();
4477 alloc_reg_set_mem (max_reg_num ());
4478 compute_sets (get_insns ());
4479 }
4480 }
4481
4482 /* Forward propagate copies. This includes copies and constants. Return
4483 nonzero if a change was made. */
4484
4485 static int
4486 cprop (alter_jumps)
4487 int alter_jumps;
4488 {
4489 int changed;
4490 basic_block bb;
4491 rtx insn;
4492
4493 /* Note we start at block 1. */
4494 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4495 {
4496 if (gcse_file != NULL)
4497 fprintf (gcse_file, "\n");
4498 return 0;
4499 }
4500
4501 changed = 0;
4502 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
4503 {
4504 /* Reset tables used to keep track of what's still valid [since the
4505 start of the block]. */
4506 reset_opr_set_tables ();
4507
4508 for (insn = bb->head;
4509 insn != NULL && insn != NEXT_INSN (bb->end);
4510 insn = NEXT_INSN (insn))
4511 if (INSN_P (insn))
4512 {
4513 changed |= cprop_insn (insn, alter_jumps);
4514
4515 /* Keep track of everything modified by this insn. */
4516 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4517 call mark_oprs_set if we turned the insn into a NOTE. */
4518 if (GET_CODE (insn) != NOTE)
4519 mark_oprs_set (insn);
4520 }
4521 }
4522
4523 if (gcse_file != NULL)
4524 fprintf (gcse_file, "\n");
4525
4526 return changed;
4527 }
4528
4529 /* Similar to get_condition, only the resulting condition must be
4530 valid at JUMP, instead of at EARLIEST.
4531
4532 This differs from noce_get_condition in ifcvt.c in that we prefer not to
4533 settle for the condition variable in the jump instruction being integral.
4534 We prefer to be able to record the value of a user variable, rather than
4535 the value of a temporary used in a condition. This could be solved by
4536 recording the value of *every* register scaned by canonicalize_condition,
4537 but this would require some code reorganization. */
4538
4539 static rtx
4540 fis_get_condition (jump)
4541 rtx jump;
4542 {
4543 rtx cond, set, tmp, insn, earliest;
4544 bool reverse;
4545
4546 if (! any_condjump_p (jump))
4547 return NULL_RTX;
4548
4549 set = pc_set (jump);
4550 cond = XEXP (SET_SRC (set), 0);
4551
4552 /* If this branches to JUMP_LABEL when the condition is false,
4553 reverse the condition. */
4554 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4555 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump));
4556
4557 /* Use canonicalize_condition to do the dirty work of manipulating
4558 MODE_CC values and COMPARE rtx codes. */
4559 tmp = canonicalize_condition (jump, cond, reverse, &earliest, NULL_RTX);
4560 if (!tmp)
4561 return NULL_RTX;
4562
4563 /* Verify that the given condition is valid at JUMP by virtue of not
4564 having been modified since EARLIEST. */
4565 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4566 if (INSN_P (insn) && modified_in_p (tmp, insn))
4567 break;
4568 if (insn == jump)
4569 return tmp;
4570
4571 /* The condition was modified. See if we can get a partial result
4572 that doesn't follow all the reversals. Perhaps combine can fold
4573 them together later. */
4574 tmp = XEXP (tmp, 0);
4575 if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT)
4576 return NULL_RTX;
4577 tmp = canonicalize_condition (jump, cond, reverse, &earliest, tmp);
4578 if (!tmp)
4579 return NULL_RTX;
4580
4581 /* For sanity's sake, re-validate the new result. */
4582 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4583 if (INSN_P (insn) && modified_in_p (tmp, insn))
4584 return NULL_RTX;
4585
4586 return tmp;
4587 }
4588
4589 /* Find the implicit sets of a function. An "implicit set" is a constraint
4590 on the value of a variable, implied by a conditional jump. For example,
4591 following "if (x == 2)", the then branch may be optimized as though the
4592 conditional performed an "explicit set", in this example, "x = 2". This
4593 function records the set patterns that are implicit at the start of each
4594 basic block. */
4595
4596 static void
4597 find_implicit_sets ()
4598 {
4599 basic_block bb, dest;
4600 unsigned int count;
4601 rtx cond, new;
4602
4603 count = 0;
4604 FOR_EACH_BB (bb)
4605 /* Check for more than one sucessor. */
4606 if (bb->succ && bb->succ->succ_next)
4607 {
4608 cond = fis_get_condition (bb->end);
4609
4610 if (cond
4611 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
4612 && GET_CODE (XEXP (cond, 0)) == REG
4613 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
4614 && CONSTANT_P (XEXP (cond, 1)))
4615 {
4616 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
4617 : FALLTHRU_EDGE (bb)->dest;
4618
4619 if (dest && ! dest->pred->pred_next
4620 && dest != EXIT_BLOCK_PTR)
4621 {
4622 new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
4623 XEXP (cond, 1));
4624 implicit_sets[dest->index] = new;
4625 if (gcse_file)
4626 {
4627 fprintf(gcse_file, "Implicit set of reg %d in ",
4628 REGNO (XEXP (cond, 0)));
4629 fprintf(gcse_file, "basic block %d\n", dest->index);
4630 }
4631 count++;
4632 }
4633 }
4634 }
4635
4636 if (gcse_file)
4637 fprintf (gcse_file, "Found %d implicit sets\n", count);
4638 }
4639
4640 /* Perform one copy/constant propagation pass.
4641 PASS is the pass count. If CPROP_JUMPS is true, perform constant
4642 propagation into conditional jumps. If BYPASS_JUMPS is true,
4643 perform conditional jump bypassing optimizations. */
4644
4645 static int
4646 one_cprop_pass (pass, cprop_jumps, bypass_jumps)
4647 int pass;
4648 int cprop_jumps;
4649 int bypass_jumps;
4650 {
4651 int changed = 0;
4652
4653 const_prop_count = 0;
4654 copy_prop_count = 0;
4655
4656 local_cprop_pass (cprop_jumps);
4657
4658 /* Determine implicit sets. */
4659 implicit_sets = (rtx *) xcalloc (last_basic_block, sizeof (rtx));
4660 find_implicit_sets ();
4661
4662 alloc_hash_table (max_cuid, &set_hash_table, 1);
4663 compute_hash_table (&set_hash_table);
4664
4665 /* Free implicit_sets before peak usage. */
4666 free (implicit_sets);
4667 implicit_sets = NULL;
4668
4669 if (gcse_file)
4670 dump_hash_table (gcse_file, "SET", &set_hash_table);
4671 if (set_hash_table.n_elems > 0)
4672 {
4673 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
4674 compute_cprop_data ();
4675 changed = cprop (cprop_jumps);
4676 if (bypass_jumps)
4677 changed |= bypass_conditional_jumps ();
4678 free_cprop_mem ();
4679 }
4680
4681 free_hash_table (&set_hash_table);
4682
4683 if (gcse_file)
4684 {
4685 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4686 current_function_name, pass, bytes_used);
4687 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4688 const_prop_count, copy_prop_count);
4689 }
4690 /* Global analysis may get into infinite loops for unreachable blocks. */
4691 if (changed && cprop_jumps)
4692 delete_unreachable_blocks ();
4693
4694 return changed;
4695 }
4696 \f
4697 /* Bypass conditional jumps. */
4698
4699 /* The value of last_basic_block at the beginning of the jump_bypass
4700 pass. The use of redirect_edge_and_branch_force may introduce new
4701 basic blocks, but the data flow analysis is only valid for basic
4702 block indices less than bypass_last_basic_block. */
4703
4704 static int bypass_last_basic_block;
4705
4706 /* Find a set of REGNO to a constant that is available at the end of basic
4707 block BB. Returns NULL if no such set is found. Based heavily upon
4708 find_avail_set. */
4709
4710 static struct expr *
4711 find_bypass_set (regno, bb)
4712 int regno;
4713 int bb;
4714 {
4715 struct expr *result = 0;
4716
4717 for (;;)
4718 {
4719 rtx src;
4720 struct expr *set = lookup_set (regno, &set_hash_table);
4721
4722 while (set)
4723 {
4724 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
4725 break;
4726 set = next_set (regno, set);
4727 }
4728
4729 if (set == 0)
4730 break;
4731
4732 if (GET_CODE (set->expr) != SET)
4733 abort ();
4734
4735 src = SET_SRC (set->expr);
4736 if (CONSTANT_P (src))
4737 result = set;
4738
4739 if (GET_CODE (src) != REG)
4740 break;
4741
4742 regno = REGNO (src);
4743 }
4744 return result;
4745 }
4746
4747
4748 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
4749 basic block BB which has more than one predecessor. If not NULL, SETCC
4750 is the first instruction of BB, which is immediately followed by JUMP_INSN
4751 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
4752 Returns nonzero if a change was made. */
4753
4754 static int
4755 bypass_block (bb, setcc, jump)
4756 basic_block bb;
4757 rtx setcc, jump;
4758 {
4759 rtx insn, note;
4760 edge e, enext;
4761 int i, change;
4762 int may_be_loop_header;
4763
4764 insn = (setcc != NULL) ? setcc : jump;
4765
4766 /* Determine set of register uses in INSN. */
4767 reg_use_count = 0;
4768 note_uses (&PATTERN (insn), find_used_regs, NULL);
4769 note = find_reg_equal_equiv_note (insn);
4770 if (note)
4771 find_used_regs (&XEXP (note, 0), NULL);
4772
4773 may_be_loop_header = false;
4774 for (e = bb->pred; e; e = e->pred_next)
4775 if (e->flags & EDGE_DFS_BACK)
4776 {
4777 may_be_loop_header = true;
4778 break;
4779 }
4780
4781 change = 0;
4782 for (e = bb->pred; e; e = enext)
4783 {
4784 enext = e->pred_next;
4785 if (e->flags & EDGE_COMPLEX)
4786 continue;
4787
4788 /* We can't redirect edges from new basic blocks. */
4789 if (e->src->index >= bypass_last_basic_block)
4790 continue;
4791
4792 /* The irreducible loops created by redirecting of edges entering the
4793 loop from outside would decrease effectivity of some of the following
4794 optimalizations, so prevent this. */
4795 if (may_be_loop_header
4796 && !(e->flags & EDGE_DFS_BACK))
4797 continue;
4798
4799 for (i = 0; i < reg_use_count; i++)
4800 {
4801 struct reg_use *reg_used = &reg_use_table[i];
4802 unsigned int regno = REGNO (reg_used->reg_rtx);
4803 basic_block dest, old_dest;
4804 struct expr *set;
4805 rtx src, new;
4806
4807 if (regno >= max_gcse_regno)
4808 continue;
4809
4810 set = find_bypass_set (regno, e->src->index);
4811
4812 if (! set)
4813 continue;
4814
4815 src = SET_SRC (pc_set (jump));
4816
4817 if (setcc != NULL)
4818 src = simplify_replace_rtx (src,
4819 SET_DEST (PATTERN (setcc)),
4820 SET_SRC (PATTERN (setcc)));
4821
4822 new = simplify_replace_rtx (src, reg_used->reg_rtx,
4823 SET_SRC (set->expr));
4824
4825 if (new == pc_rtx)
4826 dest = FALLTHRU_EDGE (bb)->dest;
4827 else if (GET_CODE (new) == LABEL_REF)
4828 dest = BLOCK_FOR_INSN (XEXP (new, 0));
4829 else
4830 dest = NULL;
4831
4832 old_dest = e->dest;
4833 if (dest != NULL
4834 && dest != old_dest
4835 && dest != EXIT_BLOCK_PTR)
4836 {
4837 redirect_edge_and_branch_force (e, dest);
4838
4839 /* Copy the register setter to the redirected edge.
4840 Don't copy CC0 setters, as CC0 is dead after jump. */
4841 if (setcc)
4842 {
4843 rtx pat = PATTERN (setcc);
4844 if (!CC0_P (SET_DEST (pat)))
4845 insert_insn_on_edge (copy_insn (pat), e);
4846 }
4847
4848 if (gcse_file != NULL)
4849 {
4850 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
4851 regno, INSN_UID (jump));
4852 print_rtl (gcse_file, SET_SRC (set->expr));
4853 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
4854 e->src->index, old_dest->index, dest->index);
4855 }
4856 change = 1;
4857 break;
4858 }
4859 }
4860 }
4861 return change;
4862 }
4863
4864 /* Find basic blocks with more than one predecessor that only contain a
4865 single conditional jump. If the result of the comparison is known at
4866 compile-time from any incoming edge, redirect that edge to the
4867 appropriate target. Returns nonzero if a change was made.
4868
4869 This function is now mis-named, because we also handle indirect jumps. */
4870
4871 static int
4872 bypass_conditional_jumps ()
4873 {
4874 basic_block bb;
4875 int changed;
4876 rtx setcc;
4877 rtx insn;
4878 rtx dest;
4879
4880 /* Note we start at block 1. */
4881 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4882 return 0;
4883
4884 bypass_last_basic_block = last_basic_block;
4885 mark_dfs_back_edges ();
4886
4887 changed = 0;
4888 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
4889 EXIT_BLOCK_PTR, next_bb)
4890 {
4891 /* Check for more than one predecessor. */
4892 if (bb->pred && bb->pred->pred_next)
4893 {
4894 setcc = NULL_RTX;
4895 for (insn = bb->head;
4896 insn != NULL && insn != NEXT_INSN (bb->end);
4897 insn = NEXT_INSN (insn))
4898 if (GET_CODE (insn) == INSN)
4899 {
4900 if (setcc)
4901 break;
4902 if (GET_CODE (PATTERN (insn)) != SET)
4903 break;
4904
4905 dest = SET_DEST (PATTERN (insn));
4906 if (REG_P (dest) || CC0_P (dest))
4907 setcc = insn;
4908 else
4909 break;
4910 }
4911 else if (GET_CODE (insn) == JUMP_INSN)
4912 {
4913 if ((any_condjump_p (insn) || computed_jump_p (insn))
4914 && onlyjump_p (insn))
4915 changed |= bypass_block (bb, setcc, insn);
4916 break;
4917 }
4918 else if (INSN_P (insn))
4919 break;
4920 }
4921 }
4922
4923 /* If we bypassed any register setting insns, we inserted a
4924 copy on the redirected edge. These need to be committed. */
4925 if (changed)
4926 commit_edge_insertions();
4927
4928 return changed;
4929 }
4930 \f
4931 /* Compute PRE+LCM working variables. */
4932
4933 /* Local properties of expressions. */
4934 /* Nonzero for expressions that are transparent in the block. */
4935 static sbitmap *transp;
4936
4937 /* Nonzero for expressions that are transparent at the end of the block.
4938 This is only zero for expressions killed by abnormal critical edge
4939 created by a calls. */
4940 static sbitmap *transpout;
4941
4942 /* Nonzero for expressions that are computed (available) in the block. */
4943 static sbitmap *comp;
4944
4945 /* Nonzero for expressions that are locally anticipatable in the block. */
4946 static sbitmap *antloc;
4947
4948 /* Nonzero for expressions where this block is an optimal computation
4949 point. */
4950 static sbitmap *pre_optimal;
4951
4952 /* Nonzero for expressions which are redundant in a particular block. */
4953 static sbitmap *pre_redundant;
4954
4955 /* Nonzero for expressions which should be inserted on a specific edge. */
4956 static sbitmap *pre_insert_map;
4957
4958 /* Nonzero for expressions which should be deleted in a specific block. */
4959 static sbitmap *pre_delete_map;
4960
4961 /* Contains the edge_list returned by pre_edge_lcm. */
4962 static struct edge_list *edge_list;
4963
4964 /* Redundant insns. */
4965 static sbitmap pre_redundant_insns;
4966
4967 /* Allocate vars used for PRE analysis. */
4968
4969 static void
4970 alloc_pre_mem (n_blocks, n_exprs)
4971 int n_blocks, n_exprs;
4972 {
4973 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4974 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4975 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4976
4977 pre_optimal = NULL;
4978 pre_redundant = NULL;
4979 pre_insert_map = NULL;
4980 pre_delete_map = NULL;
4981 ae_in = NULL;
4982 ae_out = NULL;
4983 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4984
4985 /* pre_insert and pre_delete are allocated later. */
4986 }
4987
4988 /* Free vars used for PRE analysis. */
4989
4990 static void
4991 free_pre_mem ()
4992 {
4993 sbitmap_vector_free (transp);
4994 sbitmap_vector_free (comp);
4995
4996 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4997
4998 if (pre_optimal)
4999 sbitmap_vector_free (pre_optimal);
5000 if (pre_redundant)
5001 sbitmap_vector_free (pre_redundant);
5002 if (pre_insert_map)
5003 sbitmap_vector_free (pre_insert_map);
5004 if (pre_delete_map)
5005 sbitmap_vector_free (pre_delete_map);
5006 if (ae_in)
5007 sbitmap_vector_free (ae_in);
5008 if (ae_out)
5009 sbitmap_vector_free (ae_out);
5010
5011 transp = comp = NULL;
5012 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
5013 ae_in = ae_out = NULL;
5014 }
5015
5016 /* Top level routine to do the dataflow analysis needed by PRE. */
5017
5018 static void
5019 compute_pre_data ()
5020 {
5021 sbitmap trapping_expr;
5022 basic_block bb;
5023 unsigned int ui;
5024
5025 compute_local_properties (transp, comp, antloc, &expr_hash_table);
5026 sbitmap_vector_zero (ae_kill, last_basic_block);
5027
5028 /* Collect expressions which might trap. */
5029 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
5030 sbitmap_zero (trapping_expr);
5031 for (ui = 0; ui < expr_hash_table.size; ui++)
5032 {
5033 struct expr *e;
5034 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
5035 if (may_trap_p (e->expr))
5036 SET_BIT (trapping_expr, e->bitmap_index);
5037 }
5038
5039 /* Compute ae_kill for each basic block using:
5040
5041 ~(TRANSP | COMP)
5042
5043 This is significantly faster than compute_ae_kill. */
5044
5045 FOR_EACH_BB (bb)
5046 {
5047 edge e;
5048
5049 /* If the current block is the destination of an abnormal edge, we
5050 kill all trapping expressions because we won't be able to properly
5051 place the instruction on the edge. So make them neither
5052 anticipatable nor transparent. This is fairly conservative. */
5053 for (e = bb->pred; e ; e = e->pred_next)
5054 if (e->flags & EDGE_ABNORMAL)
5055 {
5056 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
5057 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
5058 break;
5059 }
5060
5061 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
5062 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
5063 }
5064
5065 edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
5066 ae_kill, &pre_insert_map, &pre_delete_map);
5067 sbitmap_vector_free (antloc);
5068 antloc = NULL;
5069 sbitmap_vector_free (ae_kill);
5070 ae_kill = NULL;
5071 sbitmap_free (trapping_expr);
5072 }
5073 \f
5074 /* PRE utilities */
5075
5076 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
5077 block BB.
5078
5079 VISITED is a pointer to a working buffer for tracking which BB's have
5080 been visited. It is NULL for the top-level call.
5081
5082 We treat reaching expressions that go through blocks containing the same
5083 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
5084 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
5085 2 as not reaching. The intent is to improve the probability of finding
5086 only one reaching expression and to reduce register lifetimes by picking
5087 the closest such expression. */
5088
5089 static int
5090 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
5091 basic_block occr_bb;
5092 struct expr *expr;
5093 basic_block bb;
5094 char *visited;
5095 {
5096 edge pred;
5097
5098 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5099 {
5100 basic_block pred_bb = pred->src;
5101
5102 if (pred->src == ENTRY_BLOCK_PTR
5103 /* Has predecessor has already been visited? */
5104 || visited[pred_bb->index])
5105 ;/* Nothing to do. */
5106
5107 /* Does this predecessor generate this expression? */
5108 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
5109 {
5110 /* Is this the occurrence we're looking for?
5111 Note that there's only one generating occurrence per block
5112 so we just need to check the block number. */
5113 if (occr_bb == pred_bb)
5114 return 1;
5115
5116 visited[pred_bb->index] = 1;
5117 }
5118 /* Ignore this predecessor if it kills the expression. */
5119 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
5120 visited[pred_bb->index] = 1;
5121
5122 /* Neither gen nor kill. */
5123 else
5124 {
5125 visited[pred_bb->index] = 1;
5126 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
5127 return 1;
5128 }
5129 }
5130
5131 /* All paths have been checked. */
5132 return 0;
5133 }
5134
5135 /* The wrapper for pre_expr_reaches_here_work that ensures that any
5136 memory allocated for that function is returned. */
5137
5138 static int
5139 pre_expr_reaches_here_p (occr_bb, expr, bb)
5140 basic_block occr_bb;
5141 struct expr *expr;
5142 basic_block bb;
5143 {
5144 int rval;
5145 char *visited = (char *) xcalloc (last_basic_block, 1);
5146
5147 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
5148
5149 free (visited);
5150 return rval;
5151 }
5152 \f
5153
5154 /* Given an expr, generate RTL which we can insert at the end of a BB,
5155 or on an edge. Set the block number of any insns generated to
5156 the value of BB. */
5157
5158 static rtx
5159 process_insert_insn (expr)
5160 struct expr *expr;
5161 {
5162 rtx reg = expr->reaching_reg;
5163 rtx exp = copy_rtx (expr->expr);
5164 rtx pat;
5165
5166 start_sequence ();
5167
5168 /* If the expression is something that's an operand, like a constant,
5169 just copy it to a register. */
5170 if (general_operand (exp, GET_MODE (reg)))
5171 emit_move_insn (reg, exp);
5172
5173 /* Otherwise, make a new insn to compute this expression and make sure the
5174 insn will be recognized (this also adds any needed CLOBBERs). Copy the
5175 expression to make sure we don't have any sharing issues. */
5176 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
5177 abort ();
5178
5179 pat = get_insns ();
5180 end_sequence ();
5181
5182 return pat;
5183 }
5184
5185 /* Add EXPR to the end of basic block BB.
5186
5187 This is used by both the PRE and code hoisting.
5188
5189 For PRE, we want to verify that the expr is either transparent
5190 or locally anticipatable in the target block. This check makes
5191 no sense for code hoisting. */
5192
5193 static void
5194 insert_insn_end_bb (expr, bb, pre)
5195 struct expr *expr;
5196 basic_block bb;
5197 int pre;
5198 {
5199 rtx insn = bb->end;
5200 rtx new_insn;
5201 rtx reg = expr->reaching_reg;
5202 int regno = REGNO (reg);
5203 rtx pat, pat_end;
5204
5205 pat = process_insert_insn (expr);
5206 if (pat == NULL_RTX || ! INSN_P (pat))
5207 abort ();
5208
5209 pat_end = pat;
5210 while (NEXT_INSN (pat_end) != NULL_RTX)
5211 pat_end = NEXT_INSN (pat_end);
5212
5213 /* If the last insn is a jump, insert EXPR in front [taking care to
5214 handle cc0, etc. properly]. Similary we need to care trapping
5215 instructions in presence of non-call exceptions. */
5216
5217 if (GET_CODE (insn) == JUMP_INSN
5218 || (GET_CODE (insn) == INSN
5219 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
5220 {
5221 #ifdef HAVE_cc0
5222 rtx note;
5223 #endif
5224 /* It should always be the case that we can put these instructions
5225 anywhere in the basic block with performing PRE optimizations.
5226 Check this. */
5227 if (GET_CODE (insn) == INSN && pre
5228 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5229 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5230 abort ();
5231
5232 /* If this is a jump table, then we can't insert stuff here. Since
5233 we know the previous real insn must be the tablejump, we insert
5234 the new instruction just before the tablejump. */
5235 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
5236 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
5237 insn = prev_real_insn (insn);
5238
5239 #ifdef HAVE_cc0
5240 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
5241 if cc0 isn't set. */
5242 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
5243 if (note)
5244 insn = XEXP (note, 0);
5245 else
5246 {
5247 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
5248 if (maybe_cc0_setter
5249 && INSN_P (maybe_cc0_setter)
5250 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
5251 insn = maybe_cc0_setter;
5252 }
5253 #endif
5254 /* FIXME: What if something in cc0/jump uses value set in new insn? */
5255 new_insn = emit_insn_before (pat, insn);
5256 }
5257
5258 /* Likewise if the last insn is a call, as will happen in the presence
5259 of exception handling. */
5260 else if (GET_CODE (insn) == CALL_INSN
5261 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
5262 {
5263 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
5264 we search backward and place the instructions before the first
5265 parameter is loaded. Do this for everyone for consistency and a
5266 presumption that we'll get better code elsewhere as well.
5267
5268 It should always be the case that we can put these instructions
5269 anywhere in the basic block with performing PRE optimizations.
5270 Check this. */
5271
5272 if (pre
5273 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5274 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5275 abort ();
5276
5277 /* Since different machines initialize their parameter registers
5278 in different orders, assume nothing. Collect the set of all
5279 parameter registers. */
5280 insn = find_first_parameter_load (insn, bb->head);
5281
5282 /* If we found all the parameter loads, then we want to insert
5283 before the first parameter load.
5284
5285 If we did not find all the parameter loads, then we might have
5286 stopped on the head of the block, which could be a CODE_LABEL.
5287 If we inserted before the CODE_LABEL, then we would be putting
5288 the insn in the wrong basic block. In that case, put the insn
5289 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
5290 while (GET_CODE (insn) == CODE_LABEL
5291 || NOTE_INSN_BASIC_BLOCK_P (insn))
5292 insn = NEXT_INSN (insn);
5293
5294 new_insn = emit_insn_before (pat, insn);
5295 }
5296 else
5297 new_insn = emit_insn_after (pat, insn);
5298
5299 while (1)
5300 {
5301 if (INSN_P (pat))
5302 {
5303 add_label_notes (PATTERN (pat), new_insn);
5304 note_stores (PATTERN (pat), record_set_info, pat);
5305 }
5306 if (pat == pat_end)
5307 break;
5308 pat = NEXT_INSN (pat);
5309 }
5310
5311 gcse_create_count++;
5312
5313 if (gcse_file)
5314 {
5315 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
5316 bb->index, INSN_UID (new_insn));
5317 fprintf (gcse_file, "copying expression %d to reg %d\n",
5318 expr->bitmap_index, regno);
5319 }
5320 }
5321
5322 /* Insert partially redundant expressions on edges in the CFG to make
5323 the expressions fully redundant. */
5324
5325 static int
5326 pre_edge_insert (edge_list, index_map)
5327 struct edge_list *edge_list;
5328 struct expr **index_map;
5329 {
5330 int e, i, j, num_edges, set_size, did_insert = 0;
5331 sbitmap *inserted;
5332
5333 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
5334 if it reaches any of the deleted expressions. */
5335
5336 set_size = pre_insert_map[0]->size;
5337 num_edges = NUM_EDGES (edge_list);
5338 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
5339 sbitmap_vector_zero (inserted, num_edges);
5340
5341 for (e = 0; e < num_edges; e++)
5342 {
5343 int indx;
5344 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
5345
5346 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
5347 {
5348 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
5349
5350 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
5351 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
5352 {
5353 struct expr *expr = index_map[j];
5354 struct occr *occr;
5355
5356 /* Now look at each deleted occurrence of this expression. */
5357 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5358 {
5359 if (! occr->deleted_p)
5360 continue;
5361
5362 /* Insert this expression on this edge if if it would
5363 reach the deleted occurrence in BB. */
5364 if (!TEST_BIT (inserted[e], j))
5365 {
5366 rtx insn;
5367 edge eg = INDEX_EDGE (edge_list, e);
5368
5369 /* We can't insert anything on an abnormal and
5370 critical edge, so we insert the insn at the end of
5371 the previous block. There are several alternatives
5372 detailed in Morgans book P277 (sec 10.5) for
5373 handling this situation. This one is easiest for
5374 now. */
5375
5376 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
5377 insert_insn_end_bb (index_map[j], bb, 0);
5378 else
5379 {
5380 insn = process_insert_insn (index_map[j]);
5381 insert_insn_on_edge (insn, eg);
5382 }
5383
5384 if (gcse_file)
5385 {
5386 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
5387 bb->index,
5388 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
5389 fprintf (gcse_file, "copy expression %d\n",
5390 expr->bitmap_index);
5391 }
5392
5393 update_ld_motion_stores (expr);
5394 SET_BIT (inserted[e], j);
5395 did_insert = 1;
5396 gcse_create_count++;
5397 }
5398 }
5399 }
5400 }
5401 }
5402
5403 sbitmap_vector_free (inserted);
5404 return did_insert;
5405 }
5406
5407 /* Copy the result of INSN to REG. INDX is the expression number. */
5408
5409 static void
5410 pre_insert_copy_insn (expr, insn)
5411 struct expr *expr;
5412 rtx insn;
5413 {
5414 rtx reg = expr->reaching_reg;
5415 int regno = REGNO (reg);
5416 int indx = expr->bitmap_index;
5417 rtx set = single_set (insn);
5418 rtx new_insn;
5419
5420 if (!set)
5421 abort ();
5422
5423 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
5424
5425 /* Keep register set table up to date. */
5426 record_one_set (regno, new_insn);
5427
5428 gcse_create_count++;
5429
5430 if (gcse_file)
5431 fprintf (gcse_file,
5432 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
5433 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
5434 INSN_UID (insn), regno);
5435 update_ld_motion_stores (expr);
5436 }
5437
5438 /* Copy available expressions that reach the redundant expression
5439 to `reaching_reg'. */
5440
5441 static void
5442 pre_insert_copies ()
5443 {
5444 unsigned int i;
5445 struct expr *expr;
5446 struct occr *occr;
5447 struct occr *avail;
5448
5449 /* For each available expression in the table, copy the result to
5450 `reaching_reg' if the expression reaches a deleted one.
5451
5452 ??? The current algorithm is rather brute force.
5453 Need to do some profiling. */
5454
5455 for (i = 0; i < expr_hash_table.size; i++)
5456 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5457 {
5458 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
5459 we don't want to insert a copy here because the expression may not
5460 really be redundant. So only insert an insn if the expression was
5461 deleted. This test also avoids further processing if the
5462 expression wasn't deleted anywhere. */
5463 if (expr->reaching_reg == NULL)
5464 continue;
5465
5466 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5467 {
5468 if (! occr->deleted_p)
5469 continue;
5470
5471 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
5472 {
5473 rtx insn = avail->insn;
5474
5475 /* No need to handle this one if handled already. */
5476 if (avail->copied_p)
5477 continue;
5478
5479 /* Don't handle this one if it's a redundant one. */
5480 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
5481 continue;
5482
5483 /* Or if the expression doesn't reach the deleted one. */
5484 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
5485 expr,
5486 BLOCK_FOR_INSN (occr->insn)))
5487 continue;
5488
5489 /* Copy the result of avail to reaching_reg. */
5490 pre_insert_copy_insn (expr, insn);
5491 avail->copied_p = 1;
5492 }
5493 }
5494 }
5495 }
5496
5497 /* Emit move from SRC to DEST noting the equivalence with expression computed
5498 in INSN. */
5499 static rtx
5500 gcse_emit_move_after (src, dest, insn)
5501 rtx src, dest, insn;
5502 {
5503 rtx new;
5504 rtx set = single_set (insn), set2;
5505 rtx note;
5506 rtx eqv;
5507
5508 /* This should never fail since we're creating a reg->reg copy
5509 we've verified to be valid. */
5510
5511 new = emit_insn_after (gen_move_insn (dest, src), insn);
5512
5513 /* Note the equivalence for local CSE pass. */
5514 set2 = single_set (new);
5515 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
5516 return new;
5517 if ((note = find_reg_equal_equiv_note (insn)))
5518 eqv = XEXP (note, 0);
5519 else
5520 eqv = SET_SRC (set);
5521
5522 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
5523
5524 return new;
5525 }
5526
5527 /* Delete redundant computations.
5528 Deletion is done by changing the insn to copy the `reaching_reg' of
5529 the expression into the result of the SET. It is left to later passes
5530 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
5531
5532 Returns nonzero if a change is made. */
5533
5534 static int
5535 pre_delete ()
5536 {
5537 unsigned int i;
5538 int changed;
5539 struct expr *expr;
5540 struct occr *occr;
5541
5542 changed = 0;
5543 for (i = 0; i < expr_hash_table.size; i++)
5544 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5545 {
5546 int indx = expr->bitmap_index;
5547
5548 /* We only need to search antic_occr since we require
5549 ANTLOC != 0. */
5550
5551 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5552 {
5553 rtx insn = occr->insn;
5554 rtx set;
5555 basic_block bb = BLOCK_FOR_INSN (insn);
5556
5557 if (TEST_BIT (pre_delete_map[bb->index], indx))
5558 {
5559 set = single_set (insn);
5560 if (! set)
5561 abort ();
5562
5563 /* Create a pseudo-reg to store the result of reaching
5564 expressions into. Get the mode for the new pseudo from
5565 the mode of the original destination pseudo. */
5566 if (expr->reaching_reg == NULL)
5567 expr->reaching_reg
5568 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5569
5570 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
5571 delete_insn (insn);
5572 occr->deleted_p = 1;
5573 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
5574 changed = 1;
5575 gcse_subst_count++;
5576
5577 if (gcse_file)
5578 {
5579 fprintf (gcse_file,
5580 "PRE: redundant insn %d (expression %d) in ",
5581 INSN_UID (insn), indx);
5582 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
5583 bb->index, REGNO (expr->reaching_reg));
5584 }
5585 }
5586 }
5587 }
5588
5589 return changed;
5590 }
5591
5592 /* Perform GCSE optimizations using PRE.
5593 This is called by one_pre_gcse_pass after all the dataflow analysis
5594 has been done.
5595
5596 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5597 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5598 Compiler Design and Implementation.
5599
5600 ??? A new pseudo reg is created to hold the reaching expression. The nice
5601 thing about the classical approach is that it would try to use an existing
5602 reg. If the register can't be adequately optimized [i.e. we introduce
5603 reload problems], one could add a pass here to propagate the new register
5604 through the block.
5605
5606 ??? We don't handle single sets in PARALLELs because we're [currently] not
5607 able to copy the rest of the parallel when we insert copies to create full
5608 redundancies from partial redundancies. However, there's no reason why we
5609 can't handle PARALLELs in the cases where there are no partial
5610 redundancies. */
5611
5612 static int
5613 pre_gcse ()
5614 {
5615 unsigned int i;
5616 int did_insert, changed;
5617 struct expr **index_map;
5618 struct expr *expr;
5619
5620 /* Compute a mapping from expression number (`bitmap_index') to
5621 hash table entry. */
5622
5623 index_map = (struct expr **) xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
5624 for (i = 0; i < expr_hash_table.size; i++)
5625 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5626 index_map[expr->bitmap_index] = expr;
5627
5628 /* Reset bitmap used to track which insns are redundant. */
5629 pre_redundant_insns = sbitmap_alloc (max_cuid);
5630 sbitmap_zero (pre_redundant_insns);
5631
5632 /* Delete the redundant insns first so that
5633 - we know what register to use for the new insns and for the other
5634 ones with reaching expressions
5635 - we know which insns are redundant when we go to create copies */
5636
5637 changed = pre_delete ();
5638
5639 did_insert = pre_edge_insert (edge_list, index_map);
5640
5641 /* In other places with reaching expressions, copy the expression to the
5642 specially allocated pseudo-reg that reaches the redundant expr. */
5643 pre_insert_copies ();
5644 if (did_insert)
5645 {
5646 commit_edge_insertions ();
5647 changed = 1;
5648 }
5649
5650 free (index_map);
5651 sbitmap_free (pre_redundant_insns);
5652 return changed;
5653 }
5654
5655 /* Top level routine to perform one PRE GCSE pass.
5656
5657 Return nonzero if a change was made. */
5658
5659 static int
5660 one_pre_gcse_pass (pass)
5661 int pass;
5662 {
5663 int changed = 0;
5664
5665 gcse_subst_count = 0;
5666 gcse_create_count = 0;
5667
5668 alloc_hash_table (max_cuid, &expr_hash_table, 0);
5669 add_noreturn_fake_exit_edges ();
5670 if (flag_gcse_lm)
5671 compute_ld_motion_mems ();
5672
5673 compute_hash_table (&expr_hash_table);
5674 trim_ld_motion_mems ();
5675 if (gcse_file)
5676 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
5677
5678 if (expr_hash_table.n_elems > 0)
5679 {
5680 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
5681 compute_pre_data ();
5682 changed |= pre_gcse ();
5683 free_edge_list (edge_list);
5684 free_pre_mem ();
5685 }
5686
5687 free_ldst_mems ();
5688 remove_fake_edges ();
5689 free_hash_table (&expr_hash_table);
5690
5691 if (gcse_file)
5692 {
5693 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5694 current_function_name, pass, bytes_used);
5695 fprintf (gcse_file, "%d substs, %d insns created\n",
5696 gcse_subst_count, gcse_create_count);
5697 }
5698
5699 return changed;
5700 }
5701 \f
5702 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5703 If notes are added to an insn which references a CODE_LABEL, the
5704 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5705 because the following loop optimization pass requires them. */
5706
5707 /* ??? This is very similar to the loop.c add_label_notes function. We
5708 could probably share code here. */
5709
5710 /* ??? If there was a jump optimization pass after gcse and before loop,
5711 then we would not need to do this here, because jump would add the
5712 necessary REG_LABEL notes. */
5713
5714 static void
5715 add_label_notes (x, insn)
5716 rtx x;
5717 rtx insn;
5718 {
5719 enum rtx_code code = GET_CODE (x);
5720 int i, j;
5721 const char *fmt;
5722
5723 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5724 {
5725 /* This code used to ignore labels that referred to dispatch tables to
5726 avoid flow generating (slighly) worse code.
5727
5728 We no longer ignore such label references (see LABEL_REF handling in
5729 mark_jump_label for additional information). */
5730
5731 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5732 REG_NOTES (insn));
5733 if (LABEL_P (XEXP (x, 0)))
5734 LABEL_NUSES (XEXP (x, 0))++;
5735 return;
5736 }
5737
5738 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5739 {
5740 if (fmt[i] == 'e')
5741 add_label_notes (XEXP (x, i), insn);
5742 else if (fmt[i] == 'E')
5743 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5744 add_label_notes (XVECEXP (x, i, j), insn);
5745 }
5746 }
5747
5748 /* Compute transparent outgoing information for each block.
5749
5750 An expression is transparent to an edge unless it is killed by
5751 the edge itself. This can only happen with abnormal control flow,
5752 when the edge is traversed through a call. This happens with
5753 non-local labels and exceptions.
5754
5755 This would not be necessary if we split the edge. While this is
5756 normally impossible for abnormal critical edges, with some effort
5757 it should be possible with exception handling, since we still have
5758 control over which handler should be invoked. But due to increased
5759 EH table sizes, this may not be worthwhile. */
5760
5761 static void
5762 compute_transpout ()
5763 {
5764 basic_block bb;
5765 unsigned int i;
5766 struct expr *expr;
5767
5768 sbitmap_vector_ones (transpout, last_basic_block);
5769
5770 FOR_EACH_BB (bb)
5771 {
5772 /* Note that flow inserted a nop a the end of basic blocks that
5773 end in call instructions for reasons other than abnormal
5774 control flow. */
5775 if (GET_CODE (bb->end) != CALL_INSN)
5776 continue;
5777
5778 for (i = 0; i < expr_hash_table.size; i++)
5779 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
5780 if (GET_CODE (expr->expr) == MEM)
5781 {
5782 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5783 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5784 continue;
5785
5786 /* ??? Optimally, we would use interprocedural alias
5787 analysis to determine if this mem is actually killed
5788 by this call. */
5789 RESET_BIT (transpout[bb->index], expr->bitmap_index);
5790 }
5791 }
5792 }
5793
5794 /* Removal of useless null pointer checks */
5795
5796 /* Called via note_stores. X is set by SETTER. If X is a register we must
5797 invalidate nonnull_local and set nonnull_killed. DATA is really a
5798 `null_pointer_info *'.
5799
5800 We ignore hard registers. */
5801
5802 static void
5803 invalidate_nonnull_info (x, setter, data)
5804 rtx x;
5805 rtx setter ATTRIBUTE_UNUSED;
5806 void *data;
5807 {
5808 unsigned int regno;
5809 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5810
5811 while (GET_CODE (x) == SUBREG)
5812 x = SUBREG_REG (x);
5813
5814 /* Ignore anything that is not a register or is a hard register. */
5815 if (GET_CODE (x) != REG
5816 || REGNO (x) < npi->min_reg
5817 || REGNO (x) >= npi->max_reg)
5818 return;
5819
5820 regno = REGNO (x) - npi->min_reg;
5821
5822 RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
5823 SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
5824 }
5825
5826 /* Do null-pointer check elimination for the registers indicated in
5827 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5828 they are not our responsibility to free. */
5829
5830 static int
5831 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5832 nonnull_avout, npi)
5833 unsigned int *block_reg;
5834 sbitmap *nonnull_avin;
5835 sbitmap *nonnull_avout;
5836 struct null_pointer_info *npi;
5837 {
5838 basic_block bb, current_block;
5839 sbitmap *nonnull_local = npi->nonnull_local;
5840 sbitmap *nonnull_killed = npi->nonnull_killed;
5841 int something_changed = 0;
5842
5843 /* Compute local properties, nonnull and killed. A register will have
5844 the nonnull property if at the end of the current block its value is
5845 known to be nonnull. The killed property indicates that somewhere in
5846 the block any information we had about the register is killed.
5847
5848 Note that a register can have both properties in a single block. That
5849 indicates that it's killed, then later in the block a new value is
5850 computed. */
5851 sbitmap_vector_zero (nonnull_local, last_basic_block);
5852 sbitmap_vector_zero (nonnull_killed, last_basic_block);
5853
5854 FOR_EACH_BB (current_block)
5855 {
5856 rtx insn, stop_insn;
5857
5858 /* Set the current block for invalidate_nonnull_info. */
5859 npi->current_block = current_block;
5860
5861 /* Scan each insn in the basic block looking for memory references and
5862 register sets. */
5863 stop_insn = NEXT_INSN (current_block->end);
5864 for (insn = current_block->head;
5865 insn != stop_insn;
5866 insn = NEXT_INSN (insn))
5867 {
5868 rtx set;
5869 rtx reg;
5870
5871 /* Ignore anything that is not a normal insn. */
5872 if (! INSN_P (insn))
5873 continue;
5874
5875 /* Basically ignore anything that is not a simple SET. We do have
5876 to make sure to invalidate nonnull_local and set nonnull_killed
5877 for such insns though. */
5878 set = single_set (insn);
5879 if (!set)
5880 {
5881 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5882 continue;
5883 }
5884
5885 /* See if we've got a usable memory load. We handle it first
5886 in case it uses its address register as a dest (which kills
5887 the nonnull property). */
5888 if (GET_CODE (SET_SRC (set)) == MEM
5889 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5890 && REGNO (reg) >= npi->min_reg
5891 && REGNO (reg) < npi->max_reg)
5892 SET_BIT (nonnull_local[current_block->index],
5893 REGNO (reg) - npi->min_reg);
5894
5895 /* Now invalidate stuff clobbered by this insn. */
5896 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5897
5898 /* And handle stores, we do these last since any sets in INSN can
5899 not kill the nonnull property if it is derived from a MEM
5900 appearing in a SET_DEST. */
5901 if (GET_CODE (SET_DEST (set)) == MEM
5902 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5903 && REGNO (reg) >= npi->min_reg
5904 && REGNO (reg) < npi->max_reg)
5905 SET_BIT (nonnull_local[current_block->index],
5906 REGNO (reg) - npi->min_reg);
5907 }
5908 }
5909
5910 /* Now compute global properties based on the local properties. This
5911 is a classic global availability algorithm. */
5912 compute_available (nonnull_local, nonnull_killed,
5913 nonnull_avout, nonnull_avin);
5914
5915 /* Now look at each bb and see if it ends with a compare of a value
5916 against zero. */
5917 FOR_EACH_BB (bb)
5918 {
5919 rtx last_insn = bb->end;
5920 rtx condition, earliest;
5921 int compare_and_branch;
5922
5923 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5924 since BLOCK_REG[BB] is zero if this block did not end with a
5925 comparison against zero, this condition works. */
5926 if (block_reg[bb->index] < npi->min_reg
5927 || block_reg[bb->index] >= npi->max_reg)
5928 continue;
5929
5930 /* LAST_INSN is a conditional jump. Get its condition. */
5931 condition = get_condition (last_insn, &earliest);
5932
5933 /* If we can't determine the condition then skip. */
5934 if (! condition)
5935 continue;
5936
5937 /* Is the register known to have a nonzero value? */
5938 if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
5939 continue;
5940
5941 /* Try to compute whether the compare/branch at the loop end is one or
5942 two instructions. */
5943 if (earliest == last_insn)
5944 compare_and_branch = 1;
5945 else if (earliest == prev_nonnote_insn (last_insn))
5946 compare_and_branch = 2;
5947 else
5948 continue;
5949
5950 /* We know the register in this comparison is nonnull at exit from
5951 this block. We can optimize this comparison. */
5952 if (GET_CODE (condition) == NE)
5953 {
5954 rtx new_jump;
5955
5956 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
5957 last_insn);
5958 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5959 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5960 emit_barrier_after (new_jump);
5961 }
5962
5963 something_changed = 1;
5964 delete_insn (last_insn);
5965 if (compare_and_branch == 2)
5966 delete_insn (earliest);
5967 purge_dead_edges (bb);
5968
5969 /* Don't check this block again. (Note that BLOCK_END is
5970 invalid here; we deleted the last instruction in the
5971 block.) */
5972 block_reg[bb->index] = 0;
5973 }
5974
5975 return something_changed;
5976 }
5977
5978 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5979 at compile time.
5980
5981 This is conceptually similar to global constant/copy propagation and
5982 classic global CSE (it even uses the same dataflow equations as cprop).
5983
5984 If a register is used as memory address with the form (mem (reg)), then we
5985 know that REG can not be zero at that point in the program. Any instruction
5986 which sets REG "kills" this property.
5987
5988 So, if every path leading to a conditional branch has an available memory
5989 reference of that form, then we know the register can not have the value
5990 zero at the conditional branch.
5991
5992 So we merely need to compute the local properties and propagate that data
5993 around the cfg, then optimize where possible.
5994
5995 We run this pass two times. Once before CSE, then again after CSE. This
5996 has proven to be the most profitable approach. It is rare for new
5997 optimization opportunities of this nature to appear after the first CSE
5998 pass.
5999
6000 This could probably be integrated with global cprop with a little work. */
6001
6002 int
6003 delete_null_pointer_checks (f)
6004 rtx f ATTRIBUTE_UNUSED;
6005 {
6006 sbitmap *nonnull_avin, *nonnull_avout;
6007 unsigned int *block_reg;
6008 basic_block bb;
6009 int reg;
6010 int regs_per_pass;
6011 int max_reg;
6012 struct null_pointer_info npi;
6013 int something_changed = 0;
6014
6015 /* If we have only a single block, then there's nothing to do. */
6016 if (n_basic_blocks <= 1)
6017 return 0;
6018
6019 /* Trying to perform global optimizations on flow graphs which have
6020 a high connectivity will take a long time and is unlikely to be
6021 particularly useful.
6022
6023 In normal circumstances a cfg should have about twice as many edges
6024 as blocks. But we do not want to punish small functions which have
6025 a couple switch statements. So we require a relatively large number
6026 of basic blocks and the ratio of edges to blocks to be high. */
6027 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
6028 return 0;
6029
6030 /* We need four bitmaps, each with a bit for each register in each
6031 basic block. */
6032 max_reg = max_reg_num ();
6033 regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
6034
6035 /* Allocate bitmaps to hold local and global properties. */
6036 npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6037 npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6038 nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6039 nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6040
6041 /* Go through the basic blocks, seeing whether or not each block
6042 ends with a conditional branch whose condition is a comparison
6043 against zero. Record the register compared in BLOCK_REG. */
6044 block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
6045 FOR_EACH_BB (bb)
6046 {
6047 rtx last_insn = bb->end;
6048 rtx condition, earliest, reg;
6049
6050 /* We only want conditional branches. */
6051 if (GET_CODE (last_insn) != JUMP_INSN
6052 || !any_condjump_p (last_insn)
6053 || !onlyjump_p (last_insn))
6054 continue;
6055
6056 /* LAST_INSN is a conditional jump. Get its condition. */
6057 condition = get_condition (last_insn, &earliest);
6058
6059 /* If we were unable to get the condition, or it is not an equality
6060 comparison against zero then there's nothing we can do. */
6061 if (!condition
6062 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
6063 || GET_CODE (XEXP (condition, 1)) != CONST_INT
6064 || (XEXP (condition, 1)
6065 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
6066 continue;
6067
6068 /* We must be checking a register against zero. */
6069 reg = XEXP (condition, 0);
6070 if (GET_CODE (reg) != REG)
6071 continue;
6072
6073 block_reg[bb->index] = REGNO (reg);
6074 }
6075
6076 /* Go through the algorithm for each block of registers. */
6077 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
6078 {
6079 npi.min_reg = reg;
6080 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
6081 something_changed |= delete_null_pointer_checks_1 (block_reg,
6082 nonnull_avin,
6083 nonnull_avout,
6084 &npi);
6085 }
6086
6087 /* Free the table of registers compared at the end of every block. */
6088 free (block_reg);
6089
6090 /* Free bitmaps. */
6091 sbitmap_vector_free (npi.nonnull_local);
6092 sbitmap_vector_free (npi.nonnull_killed);
6093 sbitmap_vector_free (nonnull_avin);
6094 sbitmap_vector_free (nonnull_avout);
6095
6096 return something_changed;
6097 }
6098
6099 /* Code Hoisting variables and subroutines. */
6100
6101 /* Very busy expressions. */
6102 static sbitmap *hoist_vbein;
6103 static sbitmap *hoist_vbeout;
6104
6105 /* Hoistable expressions. */
6106 static sbitmap *hoist_exprs;
6107
6108 /* Dominator bitmaps. */
6109 dominance_info dominators;
6110
6111 /* ??? We could compute post dominators and run this algorithm in
6112 reverse to perform tail merging, doing so would probably be
6113 more effective than the tail merging code in jump.c.
6114
6115 It's unclear if tail merging could be run in parallel with
6116 code hoisting. It would be nice. */
6117
6118 /* Allocate vars used for code hoisting analysis. */
6119
6120 static void
6121 alloc_code_hoist_mem (n_blocks, n_exprs)
6122 int n_blocks, n_exprs;
6123 {
6124 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
6125 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
6126 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
6127
6128 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
6129 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
6130 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
6131 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
6132 }
6133
6134 /* Free vars used for code hoisting analysis. */
6135
6136 static void
6137 free_code_hoist_mem ()
6138 {
6139 sbitmap_vector_free (antloc);
6140 sbitmap_vector_free (transp);
6141 sbitmap_vector_free (comp);
6142
6143 sbitmap_vector_free (hoist_vbein);
6144 sbitmap_vector_free (hoist_vbeout);
6145 sbitmap_vector_free (hoist_exprs);
6146 sbitmap_vector_free (transpout);
6147
6148 free_dominance_info (dominators);
6149 }
6150
6151 /* Compute the very busy expressions at entry/exit from each block.
6152
6153 An expression is very busy if all paths from a given point
6154 compute the expression. */
6155
6156 static void
6157 compute_code_hoist_vbeinout ()
6158 {
6159 int changed, passes;
6160 basic_block bb;
6161
6162 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
6163 sbitmap_vector_zero (hoist_vbein, last_basic_block);
6164
6165 passes = 0;
6166 changed = 1;
6167
6168 while (changed)
6169 {
6170 changed = 0;
6171
6172 /* We scan the blocks in the reverse order to speed up
6173 the convergence. */
6174 FOR_EACH_BB_REVERSE (bb)
6175 {
6176 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
6177 hoist_vbeout[bb->index], transp[bb->index]);
6178 if (bb->next_bb != EXIT_BLOCK_PTR)
6179 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
6180 }
6181
6182 passes++;
6183 }
6184
6185 if (gcse_file)
6186 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
6187 }
6188
6189 /* Top level routine to do the dataflow analysis needed by code hoisting. */
6190
6191 static void
6192 compute_code_hoist_data ()
6193 {
6194 compute_local_properties (transp, comp, antloc, &expr_hash_table);
6195 compute_transpout ();
6196 compute_code_hoist_vbeinout ();
6197 dominators = calculate_dominance_info (CDI_DOMINATORS);
6198 if (gcse_file)
6199 fprintf (gcse_file, "\n");
6200 }
6201
6202 /* Determine if the expression identified by EXPR_INDEX would
6203 reach BB unimpared if it was placed at the end of EXPR_BB.
6204
6205 It's unclear exactly what Muchnick meant by "unimpared". It seems
6206 to me that the expression must either be computed or transparent in
6207 *every* block in the path(s) from EXPR_BB to BB. Any other definition
6208 would allow the expression to be hoisted out of loops, even if
6209 the expression wasn't a loop invariant.
6210
6211 Contrast this to reachability for PRE where an expression is
6212 considered reachable if *any* path reaches instead of *all*
6213 paths. */
6214
6215 static int
6216 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
6217 basic_block expr_bb;
6218 int expr_index;
6219 basic_block bb;
6220 char *visited;
6221 {
6222 edge pred;
6223 int visited_allocated_locally = 0;
6224
6225
6226 if (visited == NULL)
6227 {
6228 visited_allocated_locally = 1;
6229 visited = xcalloc (last_basic_block, 1);
6230 }
6231
6232 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
6233 {
6234 basic_block pred_bb = pred->src;
6235
6236 if (pred->src == ENTRY_BLOCK_PTR)
6237 break;
6238 else if (pred_bb == expr_bb)
6239 continue;
6240 else if (visited[pred_bb->index])
6241 continue;
6242
6243 /* Does this predecessor generate this expression? */
6244 else if (TEST_BIT (comp[pred_bb->index], expr_index))
6245 break;
6246 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
6247 break;
6248
6249 /* Not killed. */
6250 else
6251 {
6252 visited[pred_bb->index] = 1;
6253 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
6254 pred_bb, visited))
6255 break;
6256 }
6257 }
6258 if (visited_allocated_locally)
6259 free (visited);
6260
6261 return (pred == NULL);
6262 }
6263 \f
6264 /* Actually perform code hoisting. */
6265
6266 static void
6267 hoist_code ()
6268 {
6269 basic_block bb, dominated;
6270 basic_block *domby;
6271 unsigned int domby_len;
6272 unsigned int i,j;
6273 struct expr **index_map;
6274 struct expr *expr;
6275
6276 sbitmap_vector_zero (hoist_exprs, last_basic_block);
6277
6278 /* Compute a mapping from expression number (`bitmap_index') to
6279 hash table entry. */
6280
6281 index_map = (struct expr **) xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
6282 for (i = 0; i < expr_hash_table.size; i++)
6283 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
6284 index_map[expr->bitmap_index] = expr;
6285
6286 /* Walk over each basic block looking for potentially hoistable
6287 expressions, nothing gets hoisted from the entry block. */
6288 FOR_EACH_BB (bb)
6289 {
6290 int found = 0;
6291 int insn_inserted_p;
6292
6293 domby_len = get_dominated_by (dominators, bb, &domby);
6294 /* Examine each expression that is very busy at the exit of this
6295 block. These are the potentially hoistable expressions. */
6296 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
6297 {
6298 int hoistable = 0;
6299
6300 if (TEST_BIT (hoist_vbeout[bb->index], i)
6301 && TEST_BIT (transpout[bb->index], i))
6302 {
6303 /* We've found a potentially hoistable expression, now
6304 we look at every block BB dominates to see if it
6305 computes the expression. */
6306 for (j = 0; j < domby_len; j++)
6307 {
6308 dominated = domby[j];
6309 /* Ignore self dominance. */
6310 if (bb == dominated)
6311 continue;
6312 /* We've found a dominated block, now see if it computes
6313 the busy expression and whether or not moving that
6314 expression to the "beginning" of that block is safe. */
6315 if (!TEST_BIT (antloc[dominated->index], i))
6316 continue;
6317
6318 /* Note if the expression would reach the dominated block
6319 unimpared if it was placed at the end of BB.
6320
6321 Keep track of how many times this expression is hoistable
6322 from a dominated block into BB. */
6323 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6324 hoistable++;
6325 }
6326
6327 /* If we found more than one hoistable occurrence of this
6328 expression, then note it in the bitmap of expressions to
6329 hoist. It makes no sense to hoist things which are computed
6330 in only one BB, and doing so tends to pessimize register
6331 allocation. One could increase this value to try harder
6332 to avoid any possible code expansion due to register
6333 allocation issues; however experiments have shown that
6334 the vast majority of hoistable expressions are only movable
6335 from two successors, so raising this threshhold is likely
6336 to nullify any benefit we get from code hoisting. */
6337 if (hoistable > 1)
6338 {
6339 SET_BIT (hoist_exprs[bb->index], i);
6340 found = 1;
6341 }
6342 }
6343 }
6344 /* If we found nothing to hoist, then quit now. */
6345 if (! found)
6346 {
6347 free (domby);
6348 continue;
6349 }
6350
6351 /* Loop over all the hoistable expressions. */
6352 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
6353 {
6354 /* We want to insert the expression into BB only once, so
6355 note when we've inserted it. */
6356 insn_inserted_p = 0;
6357
6358 /* These tests should be the same as the tests above. */
6359 if (TEST_BIT (hoist_vbeout[bb->index], i))
6360 {
6361 /* We've found a potentially hoistable expression, now
6362 we look at every block BB dominates to see if it
6363 computes the expression. */
6364 for (j = 0; j < domby_len; j++)
6365 {
6366 dominated = domby[j];
6367 /* Ignore self dominance. */
6368 if (bb == dominated)
6369 continue;
6370
6371 /* We've found a dominated block, now see if it computes
6372 the busy expression and whether or not moving that
6373 expression to the "beginning" of that block is safe. */
6374 if (!TEST_BIT (antloc[dominated->index], i))
6375 continue;
6376
6377 /* The expression is computed in the dominated block and
6378 it would be safe to compute it at the start of the
6379 dominated block. Now we have to determine if the
6380 expression would reach the dominated block if it was
6381 placed at the end of BB. */
6382 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6383 {
6384 struct expr *expr = index_map[i];
6385 struct occr *occr = expr->antic_occr;
6386 rtx insn;
6387 rtx set;
6388
6389 /* Find the right occurrence of this expression. */
6390 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
6391 occr = occr->next;
6392
6393 /* Should never happen. */
6394 if (!occr)
6395 abort ();
6396
6397 insn = occr->insn;
6398
6399 set = single_set (insn);
6400 if (! set)
6401 abort ();
6402
6403 /* Create a pseudo-reg to store the result of reaching
6404 expressions into. Get the mode for the new pseudo
6405 from the mode of the original destination pseudo. */
6406 if (expr->reaching_reg == NULL)
6407 expr->reaching_reg
6408 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
6409
6410 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
6411 delete_insn (insn);
6412 occr->deleted_p = 1;
6413 if (!insn_inserted_p)
6414 {
6415 insert_insn_end_bb (index_map[i], bb, 0);
6416 insn_inserted_p = 1;
6417 }
6418 }
6419 }
6420 }
6421 }
6422 free (domby);
6423 }
6424
6425 free (index_map);
6426 }
6427
6428 /* Top level routine to perform one code hoisting (aka unification) pass
6429
6430 Return nonzero if a change was made. */
6431
6432 static int
6433 one_code_hoisting_pass ()
6434 {
6435 int changed = 0;
6436
6437 alloc_hash_table (max_cuid, &expr_hash_table, 0);
6438 compute_hash_table (&expr_hash_table);
6439 if (gcse_file)
6440 dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
6441
6442 if (expr_hash_table.n_elems > 0)
6443 {
6444 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
6445 compute_code_hoist_data ();
6446 hoist_code ();
6447 free_code_hoist_mem ();
6448 }
6449
6450 free_hash_table (&expr_hash_table);
6451
6452 return changed;
6453 }
6454 \f
6455 /* Here we provide the things required to do store motion towards
6456 the exit. In order for this to be effective, gcse also needed to
6457 be taught how to move a load when it is kill only by a store to itself.
6458
6459 int i;
6460 float a[10];
6461
6462 void foo(float scale)
6463 {
6464 for (i=0; i<10; i++)
6465 a[i] *= scale;
6466 }
6467
6468 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
6469 the load out since its live around the loop, and stored at the bottom
6470 of the loop.
6471
6472 The 'Load Motion' referred to and implemented in this file is
6473 an enhancement to gcse which when using edge based lcm, recognizes
6474 this situation and allows gcse to move the load out of the loop.
6475
6476 Once gcse has hoisted the load, store motion can then push this
6477 load towards the exit, and we end up with no loads or stores of 'i'
6478 in the loop. */
6479
6480 /* This will search the ldst list for a matching expression. If it
6481 doesn't find one, we create one and initialize it. */
6482
6483 static struct ls_expr *
6484 ldst_entry (x)
6485 rtx x;
6486 {
6487 struct ls_expr * ptr;
6488
6489 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6490 if (expr_equiv_p (ptr->pattern, x))
6491 break;
6492
6493 if (!ptr)
6494 {
6495 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
6496
6497 ptr->next = pre_ldst_mems;
6498 ptr->expr = NULL;
6499 ptr->pattern = x;
6500 ptr->loads = NULL_RTX;
6501 ptr->stores = NULL_RTX;
6502 ptr->reaching_reg = NULL_RTX;
6503 ptr->invalid = 0;
6504 ptr->index = 0;
6505 ptr->hash_index = 0;
6506 pre_ldst_mems = ptr;
6507 }
6508
6509 return ptr;
6510 }
6511
6512 /* Free up an individual ldst entry. */
6513
6514 static void
6515 free_ldst_entry (ptr)
6516 struct ls_expr * ptr;
6517 {
6518 free_INSN_LIST_list (& ptr->loads);
6519 free_INSN_LIST_list (& ptr->stores);
6520
6521 free (ptr);
6522 }
6523
6524 /* Free up all memory associated with the ldst list. */
6525
6526 static void
6527 free_ldst_mems ()
6528 {
6529 while (pre_ldst_mems)
6530 {
6531 struct ls_expr * tmp = pre_ldst_mems;
6532
6533 pre_ldst_mems = pre_ldst_mems->next;
6534
6535 free_ldst_entry (tmp);
6536 }
6537
6538 pre_ldst_mems = NULL;
6539 }
6540
6541 /* Dump debugging info about the ldst list. */
6542
6543 static void
6544 print_ldst_list (file)
6545 FILE * file;
6546 {
6547 struct ls_expr * ptr;
6548
6549 fprintf (file, "LDST list: \n");
6550
6551 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6552 {
6553 fprintf (file, " Pattern (%3d): ", ptr->index);
6554
6555 print_rtl (file, ptr->pattern);
6556
6557 fprintf (file, "\n Loads : ");
6558
6559 if (ptr->loads)
6560 print_rtl (file, ptr->loads);
6561 else
6562 fprintf (file, "(nil)");
6563
6564 fprintf (file, "\n Stores : ");
6565
6566 if (ptr->stores)
6567 print_rtl (file, ptr->stores);
6568 else
6569 fprintf (file, "(nil)");
6570
6571 fprintf (file, "\n\n");
6572 }
6573
6574 fprintf (file, "\n");
6575 }
6576
6577 /* Returns 1 if X is in the list of ldst only expressions. */
6578
6579 static struct ls_expr *
6580 find_rtx_in_ldst (x)
6581 rtx x;
6582 {
6583 struct ls_expr * ptr;
6584
6585 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6586 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6587 return ptr;
6588
6589 return NULL;
6590 }
6591
6592 /* Assign each element of the list of mems a monotonically increasing value. */
6593
6594 static int
6595 enumerate_ldsts ()
6596 {
6597 struct ls_expr * ptr;
6598 int n = 0;
6599
6600 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6601 ptr->index = n++;
6602
6603 return n;
6604 }
6605
6606 /* Return first item in the list. */
6607
6608 static inline struct ls_expr *
6609 first_ls_expr ()
6610 {
6611 return pre_ldst_mems;
6612 }
6613
6614 /* Return the next item in ther list after the specified one. */
6615
6616 static inline struct ls_expr *
6617 next_ls_expr (ptr)
6618 struct ls_expr * ptr;
6619 {
6620 return ptr->next;
6621 }
6622 \f
6623 /* Load Motion for loads which only kill themselves. */
6624
6625 /* Return true if x is a simple MEM operation, with no registers or
6626 side effects. These are the types of loads we consider for the
6627 ld_motion list, otherwise we let the usual aliasing take care of it. */
6628
6629 static int
6630 simple_mem (x)
6631 rtx x;
6632 {
6633 if (GET_CODE (x) != MEM)
6634 return 0;
6635
6636 if (MEM_VOLATILE_P (x))
6637 return 0;
6638
6639 if (GET_MODE (x) == BLKmode)
6640 return 0;
6641
6642 if (!rtx_varies_p (XEXP (x, 0), 0))
6643 return 1;
6644
6645 return 0;
6646 }
6647
6648 /* Make sure there isn't a buried reference in this pattern anywhere.
6649 If there is, invalidate the entry for it since we're not capable
6650 of fixing it up just yet.. We have to be sure we know about ALL
6651 loads since the aliasing code will allow all entries in the
6652 ld_motion list to not-alias itself. If we miss a load, we will get
6653 the wrong value since gcse might common it and we won't know to
6654 fix it up. */
6655
6656 static void
6657 invalidate_any_buried_refs (x)
6658 rtx x;
6659 {
6660 const char * fmt;
6661 int i, j;
6662 struct ls_expr * ptr;
6663
6664 /* Invalidate it in the list. */
6665 if (GET_CODE (x) == MEM && simple_mem (x))
6666 {
6667 ptr = ldst_entry (x);
6668 ptr->invalid = 1;
6669 }
6670
6671 /* Recursively process the insn. */
6672 fmt = GET_RTX_FORMAT (GET_CODE (x));
6673
6674 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6675 {
6676 if (fmt[i] == 'e')
6677 invalidate_any_buried_refs (XEXP (x, i));
6678 else if (fmt[i] == 'E')
6679 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6680 invalidate_any_buried_refs (XVECEXP (x, i, j));
6681 }
6682 }
6683
6684 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6685 being defined as MEM loads and stores to symbols, with no
6686 side effects and no registers in the expression. If there are any
6687 uses/defs which don't match this criteria, it is invalidated and
6688 trimmed out later. */
6689
6690 static void
6691 compute_ld_motion_mems ()
6692 {
6693 struct ls_expr * ptr;
6694 basic_block bb;
6695 rtx insn;
6696
6697 pre_ldst_mems = NULL;
6698
6699 FOR_EACH_BB (bb)
6700 {
6701 for (insn = bb->head;
6702 insn && insn != NEXT_INSN (bb->end);
6703 insn = NEXT_INSN (insn))
6704 {
6705 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6706 {
6707 if (GET_CODE (PATTERN (insn)) == SET)
6708 {
6709 rtx src = SET_SRC (PATTERN (insn));
6710 rtx dest = SET_DEST (PATTERN (insn));
6711
6712 /* Check for a simple LOAD... */
6713 if (GET_CODE (src) == MEM && simple_mem (src))
6714 {
6715 ptr = ldst_entry (src);
6716 if (GET_CODE (dest) == REG)
6717 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6718 else
6719 ptr->invalid = 1;
6720 }
6721 else
6722 {
6723 /* Make sure there isn't a buried load somewhere. */
6724 invalidate_any_buried_refs (src);
6725 }
6726
6727 /* Check for stores. Don't worry about aliased ones, they
6728 will block any movement we might do later. We only care
6729 about this exact pattern since those are the only
6730 circumstance that we will ignore the aliasing info. */
6731 if (GET_CODE (dest) == MEM && simple_mem (dest))
6732 {
6733 ptr = ldst_entry (dest);
6734
6735 if (GET_CODE (src) != MEM
6736 && GET_CODE (src) != ASM_OPERANDS)
6737 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6738 else
6739 ptr->invalid = 1;
6740 }
6741 }
6742 else
6743 invalidate_any_buried_refs (PATTERN (insn));
6744 }
6745 }
6746 }
6747 }
6748
6749 /* Remove any references that have been either invalidated or are not in the
6750 expression list for pre gcse. */
6751
6752 static void
6753 trim_ld_motion_mems ()
6754 {
6755 struct ls_expr * last = NULL;
6756 struct ls_expr * ptr = first_ls_expr ();
6757
6758 while (ptr != NULL)
6759 {
6760 int del = ptr->invalid;
6761 struct expr * expr = NULL;
6762
6763 /* Delete if entry has been made invalid. */
6764 if (!del)
6765 {
6766 unsigned int i;
6767
6768 del = 1;
6769 /* Delete if we cannot find this mem in the expression list. */
6770 for (i = 0; i < expr_hash_table.size && del; i++)
6771 {
6772 for (expr = expr_hash_table.table[i];
6773 expr != NULL;
6774 expr = expr->next_same_hash)
6775 if (expr_equiv_p (expr->expr, ptr->pattern))
6776 {
6777 del = 0;
6778 break;
6779 }
6780 }
6781 }
6782
6783 if (del)
6784 {
6785 if (last != NULL)
6786 {
6787 last->next = ptr->next;
6788 free_ldst_entry (ptr);
6789 ptr = last->next;
6790 }
6791 else
6792 {
6793 pre_ldst_mems = pre_ldst_mems->next;
6794 free_ldst_entry (ptr);
6795 ptr = pre_ldst_mems;
6796 }
6797 }
6798 else
6799 {
6800 /* Set the expression field if we are keeping it. */
6801 last = ptr;
6802 ptr->expr = expr;
6803 ptr = ptr->next;
6804 }
6805 }
6806
6807 /* Show the world what we've found. */
6808 if (gcse_file && pre_ldst_mems != NULL)
6809 print_ldst_list (gcse_file);
6810 }
6811
6812 /* This routine will take an expression which we are replacing with
6813 a reaching register, and update any stores that are needed if
6814 that expression is in the ld_motion list. Stores are updated by
6815 copying their SRC to the reaching register, and then storeing
6816 the reaching register into the store location. These keeps the
6817 correct value in the reaching register for the loads. */
6818
6819 static void
6820 update_ld_motion_stores (expr)
6821 struct expr * expr;
6822 {
6823 struct ls_expr * mem_ptr;
6824
6825 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6826 {
6827 /* We can try to find just the REACHED stores, but is shouldn't
6828 matter to set the reaching reg everywhere... some might be
6829 dead and should be eliminated later. */
6830
6831 /* We replace SET mem = expr with
6832 SET reg = expr
6833 SET mem = reg , where reg is the
6834 reaching reg used in the load. */
6835 rtx list = mem_ptr->stores;
6836
6837 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6838 {
6839 rtx insn = XEXP (list, 0);
6840 rtx pat = PATTERN (insn);
6841 rtx src = SET_SRC (pat);
6842 rtx reg = expr->reaching_reg;
6843 rtx copy, new;
6844
6845 /* If we've already copied it, continue. */
6846 if (expr->reaching_reg == src)
6847 continue;
6848
6849 if (gcse_file)
6850 {
6851 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6852 print_rtl (gcse_file, expr->reaching_reg);
6853 fprintf (gcse_file, ":\n ");
6854 print_inline_rtx (gcse_file, insn, 8);
6855 fprintf (gcse_file, "\n");
6856 }
6857
6858 copy = gen_move_insn ( reg, SET_SRC (pat));
6859 new = emit_insn_before (copy, insn);
6860 record_one_set (REGNO (reg), new);
6861 SET_SRC (pat) = reg;
6862
6863 /* un-recognize this pattern since it's probably different now. */
6864 INSN_CODE (insn) = -1;
6865 gcse_create_count++;
6866 }
6867 }
6868 }
6869 \f
6870 /* Store motion code. */
6871
6872 /* This is used to communicate the target bitvector we want to use in the
6873 reg_set_info routine when called via the note_stores mechanism. */
6874 static sbitmap * regvec;
6875
6876 /* Used in computing the reverse edge graph bit vectors. */
6877 static sbitmap * st_antloc;
6878
6879 /* Global holding the number of store expressions we are dealing with. */
6880 static int num_stores;
6881
6882 /* Checks to set if we need to mark a register set. Called from note_stores. */
6883
6884 static void
6885 reg_set_info (dest, setter, data)
6886 rtx dest, setter ATTRIBUTE_UNUSED;
6887 void * data ATTRIBUTE_UNUSED;
6888 {
6889 if (GET_CODE (dest) == SUBREG)
6890 dest = SUBREG_REG (dest);
6891
6892 if (GET_CODE (dest) == REG)
6893 SET_BIT (*regvec, REGNO (dest));
6894 }
6895
6896 /* Return nonzero if the register operands of expression X are killed
6897 anywhere in basic block BB. */
6898
6899 static int
6900 store_ops_ok (x, bb)
6901 rtx x;
6902 basic_block bb;
6903 {
6904 int i;
6905 enum rtx_code code;
6906 const char * fmt;
6907
6908 /* Repeat is used to turn tail-recursion into iteration. */
6909 repeat:
6910
6911 if (x == 0)
6912 return 1;
6913
6914 code = GET_CODE (x);
6915 switch (code)
6916 {
6917 case REG:
6918 /* If a reg has changed after us in this
6919 block, the operand has been killed. */
6920 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6921
6922 case MEM:
6923 x = XEXP (x, 0);
6924 goto repeat;
6925
6926 case PRE_DEC:
6927 case PRE_INC:
6928 case POST_DEC:
6929 case POST_INC:
6930 return 0;
6931
6932 case PC:
6933 case CC0: /*FIXME*/
6934 case CONST:
6935 case CONST_INT:
6936 case CONST_DOUBLE:
6937 case CONST_VECTOR:
6938 case SYMBOL_REF:
6939 case LABEL_REF:
6940 case ADDR_VEC:
6941 case ADDR_DIFF_VEC:
6942 return 1;
6943
6944 default:
6945 break;
6946 }
6947
6948 i = GET_RTX_LENGTH (code) - 1;
6949 fmt = GET_RTX_FORMAT (code);
6950
6951 for (; i >= 0; i--)
6952 {
6953 if (fmt[i] == 'e')
6954 {
6955 rtx tem = XEXP (x, i);
6956
6957 /* If we are about to do the last recursive call
6958 needed at this level, change it into iteration.
6959 This function is called enough to be worth it. */
6960 if (i == 0)
6961 {
6962 x = tem;
6963 goto repeat;
6964 }
6965
6966 if (! store_ops_ok (tem, bb))
6967 return 0;
6968 }
6969 else if (fmt[i] == 'E')
6970 {
6971 int j;
6972
6973 for (j = 0; j < XVECLEN (x, i); j++)
6974 {
6975 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6976 return 0;
6977 }
6978 }
6979 }
6980
6981 return 1;
6982 }
6983
6984 /* Determine whether insn is MEM store pattern that we will consider moving. */
6985
6986 static void
6987 find_moveable_store (insn)
6988 rtx insn;
6989 {
6990 struct ls_expr * ptr;
6991 rtx dest = PATTERN (insn);
6992
6993 if (GET_CODE (dest) != SET
6994 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6995 return;
6996
6997 dest = SET_DEST (dest);
6998
6999 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
7000 || GET_MODE (dest) == BLKmode)
7001 return;
7002
7003 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
7004 return;
7005
7006 if (rtx_varies_p (XEXP (dest, 0), 0))
7007 return;
7008
7009 ptr = ldst_entry (dest);
7010 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
7011 }
7012
7013 /* Perform store motion. Much like gcse, except we move expressions the
7014 other way by looking at the flowgraph in reverse. */
7015
7016 static int
7017 compute_store_table ()
7018 {
7019 int ret;
7020 basic_block bb;
7021 unsigned regno;
7022 rtx insn, pat;
7023
7024 max_gcse_regno = max_reg_num ();
7025
7026 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
7027 max_gcse_regno);
7028 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
7029 pre_ldst_mems = 0;
7030
7031 /* Find all the stores we care about. */
7032 FOR_EACH_BB (bb)
7033 {
7034 regvec = & (reg_set_in_block[bb->index]);
7035 for (insn = bb->end;
7036 insn && insn != PREV_INSN (bb->end);
7037 insn = PREV_INSN (insn))
7038 {
7039 /* Ignore anything that is not a normal insn. */
7040 if (! INSN_P (insn))
7041 continue;
7042
7043 if (GET_CODE (insn) == CALL_INSN)
7044 {
7045 bool clobbers_all = false;
7046 #ifdef NON_SAVING_SETJMP
7047 if (NON_SAVING_SETJMP
7048 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7049 clobbers_all = true;
7050 #endif
7051
7052 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7053 if (clobbers_all
7054 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7055 SET_BIT (reg_set_in_block[bb->index], regno);
7056 }
7057
7058 pat = PATTERN (insn);
7059 note_stores (pat, reg_set_info, NULL);
7060
7061 /* Now that we've marked regs, look for stores. */
7062 if (GET_CODE (pat) == SET)
7063 find_moveable_store (insn);
7064 }
7065 }
7066
7067 ret = enumerate_ldsts ();
7068
7069 if (gcse_file)
7070 {
7071 fprintf (gcse_file, "Store Motion Expressions.\n");
7072 print_ldst_list (gcse_file);
7073 }
7074
7075 return ret;
7076 }
7077
7078 /* Check to see if the load X is aliased with STORE_PATTERN. */
7079
7080 static int
7081 load_kills_store (x, store_pattern)
7082 rtx x, store_pattern;
7083 {
7084 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
7085 return 1;
7086 return 0;
7087 }
7088
7089 /* Go through the entire insn X, looking for any loads which might alias
7090 STORE_PATTERN. Return 1 if found. */
7091
7092 static int
7093 find_loads (x, store_pattern)
7094 rtx x, store_pattern;
7095 {
7096 const char * fmt;
7097 int i, j;
7098 int ret = 0;
7099
7100 if (!x)
7101 return 0;
7102
7103 if (GET_CODE (x) == SET)
7104 x = SET_SRC (x);
7105
7106 if (GET_CODE (x) == MEM)
7107 {
7108 if (load_kills_store (x, store_pattern))
7109 return 1;
7110 }
7111
7112 /* Recursively process the insn. */
7113 fmt = GET_RTX_FORMAT (GET_CODE (x));
7114
7115 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
7116 {
7117 if (fmt[i] == 'e')
7118 ret |= find_loads (XEXP (x, i), store_pattern);
7119 else if (fmt[i] == 'E')
7120 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7121 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
7122 }
7123 return ret;
7124 }
7125
7126 /* Check if INSN kills the store pattern X (is aliased with it).
7127 Return 1 if it it does. */
7128
7129 static int
7130 store_killed_in_insn (x, insn)
7131 rtx x, insn;
7132 {
7133 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7134 return 0;
7135
7136 if (GET_CODE (insn) == CALL_INSN)
7137 {
7138 /* A normal or pure call might read from pattern,
7139 but a const call will not. */
7140 return ! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn);
7141 }
7142
7143 if (GET_CODE (PATTERN (insn)) == SET)
7144 {
7145 rtx pat = PATTERN (insn);
7146 /* Check for memory stores to aliased objects. */
7147 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
7148 /* pretend its a load and check for aliasing. */
7149 if (find_loads (SET_DEST (pat), x))
7150 return 1;
7151 return find_loads (SET_SRC (pat), x);
7152 }
7153 else
7154 return find_loads (PATTERN (insn), x);
7155 }
7156
7157 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
7158 within basic block BB. */
7159
7160 static int
7161 store_killed_after (x, insn, bb)
7162 rtx x, insn;
7163 basic_block bb;
7164 {
7165 rtx last = bb->end;
7166
7167 if (insn == last)
7168 return 0;
7169
7170 /* Check if the register operands of the store are OK in this block.
7171 Note that if registers are changed ANYWHERE in the block, we'll
7172 decide we can't move it, regardless of whether it changed above
7173 or below the store. This could be improved by checking the register
7174 operands while looking for aliasing in each insn. */
7175 if (!store_ops_ok (XEXP (x, 0), bb))
7176 return 1;
7177
7178 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
7179 if (store_killed_in_insn (x, insn))
7180 return 1;
7181
7182 return 0;
7183 }
7184
7185 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
7186 within basic block BB. */
7187 static int
7188 store_killed_before (x, insn, bb)
7189 rtx x, insn;
7190 basic_block bb;
7191 {
7192 rtx first = bb->head;
7193
7194 if (insn == first)
7195 return store_killed_in_insn (x, insn);
7196
7197 /* Check if the register operands of the store are OK in this block.
7198 Note that if registers are changed ANYWHERE in the block, we'll
7199 decide we can't move it, regardless of whether it changed above
7200 or below the store. This could be improved by checking the register
7201 operands while looking for aliasing in each insn. */
7202 if (!store_ops_ok (XEXP (x, 0), bb))
7203 return 1;
7204
7205 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
7206 if (store_killed_in_insn (x, insn))
7207 return 1;
7208
7209 return 0;
7210 }
7211
7212 #define ANTIC_STORE_LIST(x) ((x)->loads)
7213 #define AVAIL_STORE_LIST(x) ((x)->stores)
7214
7215 /* Given the table of available store insns at the end of blocks,
7216 determine which ones are not killed by aliasing, and generate
7217 the appropriate vectors for gen and killed. */
7218 static void
7219 build_store_vectors ()
7220 {
7221 basic_block bb, b;
7222 rtx insn, st;
7223 struct ls_expr * ptr;
7224
7225 /* Build the gen_vector. This is any store in the table which is not killed
7226 by aliasing later in its block. */
7227 ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7228 sbitmap_vector_zero (ae_gen, last_basic_block);
7229
7230 st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7231 sbitmap_vector_zero (st_antloc, last_basic_block);
7232
7233 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7234 {
7235 /* Put all the stores into either the antic list, or the avail list,
7236 or both. */
7237 rtx store_list = ptr->stores;
7238 ptr->stores = NULL_RTX;
7239
7240 for (st = store_list; st != NULL; st = XEXP (st, 1))
7241 {
7242 insn = XEXP (st, 0);
7243 bb = BLOCK_FOR_INSN (insn);
7244
7245 if (!store_killed_after (ptr->pattern, insn, bb))
7246 {
7247 /* If we've already seen an available expression in this block,
7248 we can delete the one we saw already (It occurs earlier in
7249 the block), and replace it with this one). We'll copy the
7250 old SRC expression to an unused register in case there
7251 are any side effects. */
7252 if (TEST_BIT (ae_gen[bb->index], ptr->index))
7253 {
7254 /* Find previous store. */
7255 rtx st;
7256 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
7257 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
7258 break;
7259 if (st)
7260 {
7261 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
7262 if (gcse_file)
7263 fprintf (gcse_file, "Removing redundant store:\n");
7264 replace_store_insn (r, XEXP (st, 0), bb);
7265 XEXP (st, 0) = insn;
7266 continue;
7267 }
7268 }
7269 SET_BIT (ae_gen[bb->index], ptr->index);
7270 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
7271 AVAIL_STORE_LIST (ptr));
7272 }
7273
7274 if (!store_killed_before (ptr->pattern, insn, bb))
7275 {
7276 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
7277 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
7278 ANTIC_STORE_LIST (ptr));
7279 }
7280 }
7281
7282 /* Free the original list of store insns. */
7283 free_INSN_LIST_list (&store_list);
7284 }
7285
7286 ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7287 sbitmap_vector_zero (ae_kill, last_basic_block);
7288
7289 transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7290 sbitmap_vector_zero (transp, last_basic_block);
7291
7292 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7293 FOR_EACH_BB (b)
7294 {
7295 if (store_killed_after (ptr->pattern, b->head, b))
7296 {
7297 /* The anticipatable expression is not killed if it's gen'd. */
7298 /*
7299 We leave this check out for now. If we have a code sequence
7300 in a block which looks like:
7301 ST MEMa = x
7302 L y = MEMa
7303 ST MEMa = z
7304 We should flag this as having an ANTIC expression, NOT
7305 transparent, NOT killed, and AVAIL.
7306 Unfortunately, since we haven't re-written all loads to
7307 use the reaching reg, we'll end up doing an incorrect
7308 Load in the middle here if we push the store down. It happens in
7309 gcc.c-torture/execute/960311-1.c with -O3
7310 If we always kill it in this case, we'll sometimes do
7311 unnecessary work, but it shouldn't actually hurt anything.
7312 if (!TEST_BIT (ae_gen[b], ptr->index)). */
7313 SET_BIT (ae_kill[b->index], ptr->index);
7314 }
7315 else
7316 SET_BIT (transp[b->index], ptr->index);
7317 }
7318
7319 /* Any block with no exits calls some non-returning function, so
7320 we better mark the store killed here, or we might not store to
7321 it at all. If we knew it was abort, we wouldn't have to store,
7322 but we don't know that for sure. */
7323 if (gcse_file)
7324 {
7325 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
7326 print_ldst_list (gcse_file);
7327 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
7328 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
7329 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
7330 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
7331 }
7332 }
7333
7334 /* Insert an instruction at the beginning of a basic block, and update
7335 the BLOCK_HEAD if needed. */
7336
7337 static void
7338 insert_insn_start_bb (insn, bb)
7339 rtx insn;
7340 basic_block bb;
7341 {
7342 /* Insert at start of successor block. */
7343 rtx prev = PREV_INSN (bb->head);
7344 rtx before = bb->head;
7345 while (before != 0)
7346 {
7347 if (GET_CODE (before) != CODE_LABEL
7348 && (GET_CODE (before) != NOTE
7349 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
7350 break;
7351 prev = before;
7352 if (prev == bb->end)
7353 break;
7354 before = NEXT_INSN (before);
7355 }
7356
7357 insn = emit_insn_after (insn, prev);
7358
7359 if (gcse_file)
7360 {
7361 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
7362 bb->index);
7363 print_inline_rtx (gcse_file, insn, 6);
7364 fprintf (gcse_file, "\n");
7365 }
7366 }
7367
7368 /* This routine will insert a store on an edge. EXPR is the ldst entry for
7369 the memory reference, and E is the edge to insert it on. Returns nonzero
7370 if an edge insertion was performed. */
7371
7372 static int
7373 insert_store (expr, e)
7374 struct ls_expr * expr;
7375 edge e;
7376 {
7377 rtx reg, insn;
7378 basic_block bb;
7379 edge tmp;
7380
7381 /* We did all the deleted before this insert, so if we didn't delete a
7382 store, then we haven't set the reaching reg yet either. */
7383 if (expr->reaching_reg == NULL_RTX)
7384 return 0;
7385
7386 reg = expr->reaching_reg;
7387 insn = gen_move_insn (expr->pattern, reg);
7388
7389 /* If we are inserting this expression on ALL predecessor edges of a BB,
7390 insert it at the start of the BB, and reset the insert bits on the other
7391 edges so we don't try to insert it on the other edges. */
7392 bb = e->dest;
7393 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7394 {
7395 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7396 if (index == EDGE_INDEX_NO_EDGE)
7397 abort ();
7398 if (! TEST_BIT (pre_insert_map[index], expr->index))
7399 break;
7400 }
7401
7402 /* If tmp is NULL, we found an insertion on every edge, blank the
7403 insertion vector for these edges, and insert at the start of the BB. */
7404 if (!tmp && bb != EXIT_BLOCK_PTR)
7405 {
7406 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7407 {
7408 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7409 RESET_BIT (pre_insert_map[index], expr->index);
7410 }
7411 insert_insn_start_bb (insn, bb);
7412 return 0;
7413 }
7414
7415 /* We can't insert on this edge, so we'll insert at the head of the
7416 successors block. See Morgan, sec 10.5. */
7417 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
7418 {
7419 insert_insn_start_bb (insn, bb);
7420 return 0;
7421 }
7422
7423 insert_insn_on_edge (insn, e);
7424
7425 if (gcse_file)
7426 {
7427 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
7428 e->src->index, e->dest->index);
7429 print_inline_rtx (gcse_file, insn, 6);
7430 fprintf (gcse_file, "\n");
7431 }
7432
7433 return 1;
7434 }
7435
7436 /* This routine will replace a store with a SET to a specified register. */
7437
7438 static void
7439 replace_store_insn (reg, del, bb)
7440 rtx reg, del;
7441 basic_block bb;
7442 {
7443 rtx insn;
7444
7445 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
7446 insn = emit_insn_after (insn, del);
7447
7448 if (gcse_file)
7449 {
7450 fprintf (gcse_file,
7451 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
7452 print_inline_rtx (gcse_file, del, 6);
7453 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
7454 print_inline_rtx (gcse_file, insn, 6);
7455 fprintf (gcse_file, "\n");
7456 }
7457
7458 delete_insn (del);
7459 }
7460
7461
7462 /* Delete a store, but copy the value that would have been stored into
7463 the reaching_reg for later storing. */
7464
7465 static void
7466 delete_store (expr, bb)
7467 struct ls_expr * expr;
7468 basic_block bb;
7469 {
7470 rtx reg, i, del;
7471
7472 if (expr->reaching_reg == NULL_RTX)
7473 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7474
7475
7476 /* If there is more than 1 store, the earlier ones will be dead,
7477 but it doesn't hurt to replace them here. */
7478 reg = expr->reaching_reg;
7479
7480 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7481 {
7482 del = XEXP (i, 0);
7483 if (BLOCK_FOR_INSN (del) == bb)
7484 {
7485 /* We know there is only one since we deleted redundant
7486 ones during the available computation. */
7487 replace_store_insn (reg, del, bb);
7488 break;
7489 }
7490 }
7491 }
7492
7493 /* Free memory used by store motion. */
7494
7495 static void
7496 free_store_memory ()
7497 {
7498 free_ldst_mems ();
7499
7500 if (ae_gen)
7501 sbitmap_vector_free (ae_gen);
7502 if (ae_kill)
7503 sbitmap_vector_free (ae_kill);
7504 if (transp)
7505 sbitmap_vector_free (transp);
7506 if (st_antloc)
7507 sbitmap_vector_free (st_antloc);
7508 if (pre_insert_map)
7509 sbitmap_vector_free (pre_insert_map);
7510 if (pre_delete_map)
7511 sbitmap_vector_free (pre_delete_map);
7512 if (reg_set_in_block)
7513 sbitmap_vector_free (reg_set_in_block);
7514
7515 ae_gen = ae_kill = transp = st_antloc = NULL;
7516 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7517 }
7518
7519 /* Perform store motion. Much like gcse, except we move expressions the
7520 other way by looking at the flowgraph in reverse. */
7521
7522 static void
7523 store_motion ()
7524 {
7525 basic_block bb;
7526 int x;
7527 struct ls_expr * ptr;
7528 int update_flow = 0;
7529
7530 if (gcse_file)
7531 {
7532 fprintf (gcse_file, "before store motion\n");
7533 print_rtl (gcse_file, get_insns ());
7534 }
7535
7536
7537 init_alias_analysis ();
7538
7539 /* Find all the stores that are live to the end of their block. */
7540 num_stores = compute_store_table ();
7541 if (num_stores == 0)
7542 {
7543 sbitmap_vector_free (reg_set_in_block);
7544 end_alias_analysis ();
7545 return;
7546 }
7547
7548 /* Now compute whats actually available to move. */
7549 add_noreturn_fake_exit_edges ();
7550 build_store_vectors ();
7551
7552 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7553 st_antloc, ae_kill, &pre_insert_map,
7554 &pre_delete_map);
7555
7556 /* Now we want to insert the new stores which are going to be needed. */
7557 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7558 {
7559 FOR_EACH_BB (bb)
7560 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
7561 delete_store (ptr, bb);
7562
7563 for (x = 0; x < NUM_EDGES (edge_list); x++)
7564 if (TEST_BIT (pre_insert_map[x], ptr->index))
7565 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7566 }
7567
7568 if (update_flow)
7569 commit_edge_insertions ();
7570
7571 free_store_memory ();
7572 free_edge_list (edge_list);
7573 remove_fake_edges ();
7574 end_alias_analysis ();
7575 }
7576
7577 \f
7578 /* Entry point for jump bypassing optimization pass. */
7579
7580 int
7581 bypass_jumps (file)
7582 FILE *file;
7583 {
7584 int changed;
7585
7586 /* We do not construct an accurate cfg in functions which call
7587 setjmp, so just punt to be safe. */
7588 if (current_function_calls_setjmp)
7589 return 0;
7590
7591 /* For calling dump_foo fns from gdb. */
7592 debug_stderr = stderr;
7593 gcse_file = file;
7594
7595 /* Identify the basic block information for this function, including
7596 successors and predecessors. */
7597 max_gcse_regno = max_reg_num ();
7598
7599 if (file)
7600 dump_flow_info (file);
7601
7602 /* Return if there's nothing to do. */
7603 if (n_basic_blocks <= 1)
7604 return 0;
7605
7606 /* Trying to perform global optimizations on flow graphs which have
7607 a high connectivity will take a long time and is unlikely to be
7608 particularly useful.
7609
7610 In normal circumstances a cfg should have about twice as many edges
7611 as blocks. But we do not want to punish small functions which have
7612 a couple switch statements. So we require a relatively large number
7613 of basic blocks and the ratio of edges to blocks to be high. */
7614 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
7615 {
7616 if (warn_disabled_optimization)
7617 warning ("BYPASS disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
7618 n_basic_blocks, n_edges / n_basic_blocks);
7619 return 0;
7620 }
7621
7622 /* If allocating memory for the cprop bitmap would take up too much
7623 storage it's better just to disable the optimization. */
7624 if ((n_basic_blocks
7625 * SBITMAP_SET_SIZE (max_gcse_regno)
7626 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
7627 {
7628 if (warn_disabled_optimization)
7629 warning ("GCSE disabled: %d basic blocks and %d registers",
7630 n_basic_blocks, max_gcse_regno);
7631
7632 return 0;
7633 }
7634
7635 /* See what modes support reg/reg copy operations. */
7636 if (! can_copy_init_p)
7637 {
7638 compute_can_copy ();
7639 can_copy_init_p = 1;
7640 }
7641
7642 gcc_obstack_init (&gcse_obstack);
7643 bytes_used = 0;
7644
7645 /* We need alias. */
7646 init_alias_analysis ();
7647
7648 /* Record where pseudo-registers are set. This data is kept accurate
7649 during each pass. ??? We could also record hard-reg information here
7650 [since it's unchanging], however it is currently done during hash table
7651 computation.
7652
7653 It may be tempting to compute MEM set information here too, but MEM sets
7654 will be subject to code motion one day and thus we need to compute
7655 information about memory sets when we build the hash tables. */
7656
7657 alloc_reg_set_mem (max_gcse_regno);
7658 compute_sets (get_insns ());
7659
7660 max_gcse_regno = max_reg_num ();
7661 alloc_gcse_mem (get_insns ());
7662 changed = one_cprop_pass (1, 1, 1);
7663 free_gcse_mem ();
7664
7665 if (file)
7666 {
7667 fprintf (file, "BYPASS of %s: %d basic blocks, ",
7668 current_function_name, n_basic_blocks);
7669 fprintf (file, "%d bytes\n\n", bytes_used);
7670 }
7671
7672 obstack_free (&gcse_obstack, NULL);
7673 free_reg_set_mem ();
7674
7675 /* We are finished with alias. */
7676 end_alias_analysis ();
7677 allocate_reg_info (max_reg_num (), FALSE, FALSE);
7678
7679 return changed;
7680 }
7681
7682 #include "gt-gcse.h"