dbxout.c: Consistently use putc instead of fputc.
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /* TODO
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
30 - ability to realloc sbitmap vectors would allow one initial computation
31 of reg_set_in_block with only subsequent additions, rather than
32 recomputing it for each pass
33
34 */
35
36 /* References searched while implementing this.
37
38 Compilers Principles, Techniques and Tools
39 Aho, Sethi, Ullman
40 Addison-Wesley, 1988
41
42 Global Optimization by Suppression of Partial Redundancies
43 E. Morel, C. Renvoise
44 communications of the acm, Vol. 22, Num. 2, Feb. 1979
45
46 A Portable Machine-Independent Global Optimizer - Design and Measurements
47 Frederick Chow
48 Stanford Ph.D. thesis, Dec. 1983
49
50 A Fast Algorithm for Code Movement Optimization
51 D.M. Dhamdhere
52 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
53
54 A Solution to a Problem with Morel and Renvoise's
55 Global Optimization by Suppression of Partial Redundancies
56 K-H Drechsler, M.P. Stadel
57 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
58
59 Practical Adaptation of the Global Optimization
60 Algorithm of Morel and Renvoise
61 D.M. Dhamdhere
62 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
63
64 Efficiently Computing Static Single Assignment Form and the Control
65 Dependence Graph
66 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
67 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
68
69 Lazy Code Motion
70 J. Knoop, O. Ruthing, B. Steffen
71 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
72
73 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
74 Time for Reducible Flow Control
75 Thomas Ball
76 ACM Letters on Programming Languages and Systems,
77 Vol. 2, Num. 1-4, Mar-Dec 1993
78
79 An Efficient Representation for Sparse Sets
80 Preston Briggs, Linda Torczon
81 ACM Letters on Programming Languages and Systems,
82 Vol. 2, Num. 1-4, Mar-Dec 1993
83
84 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
85 K-H Drechsler, M.P. Stadel
86 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
87
88 Partial Dead Code Elimination
89 J. Knoop, O. Ruthing, B. Steffen
90 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
91
92 Effective Partial Redundancy Elimination
93 P. Briggs, K.D. Cooper
94 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
95
96 The Program Structure Tree: Computing Control Regions in Linear Time
97 R. Johnson, D. Pearson, K. Pingali
98 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
99
100 Optimal Code Motion: Theory and Practice
101 J. Knoop, O. Ruthing, B. Steffen
102 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
103
104 The power of assignment motion
105 J. Knoop, O. Ruthing, B. Steffen
106 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
107
108 Global code motion / global value numbering
109 C. Click
110 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
111
112 Value Driven Redundancy Elimination
113 L.T. Simpson
114 Rice University Ph.D. thesis, Apr. 1996
115
116 Value Numbering
117 L.T. Simpson
118 Massively Scalar Compiler Project, Rice University, Sep. 1996
119
120 High Performance Compilers for Parallel Computing
121 Michael Wolfe
122 Addison-Wesley, 1996
123
124 Advanced Compiler Design and Implementation
125 Steven Muchnick
126 Morgan Kaufmann, 1997
127
128 Building an Optimizing Compiler
129 Robert Morgan
130 Digital Press, 1998
131
132 People wishing to speed up the code here should read:
133 Elimination Algorithms for Data Flow Analysis
134 B.G. Ryder, M.C. Paull
135 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
136
137 How to Analyze Large Programs Efficiently and Informatively
138 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
139 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
140
141 People wishing to do something different can find various possibilities
142 in the above papers and elsewhere.
143 */
144
145 #include "config.h"
146 #include "system.h"
147 #include "toplev.h"
148
149 #include "rtl.h"
150 #include "tm_p.h"
151 #include "regs.h"
152 #include "hard-reg-set.h"
153 #include "flags.h"
154 #include "real.h"
155 #include "insn-config.h"
156 #include "recog.h"
157 #include "basic-block.h"
158 #include "output.h"
159 #include "function.h"
160 #include "expr.h"
161 #include "ggc.h"
162 #include "params.h"
163
164 #include "obstack.h"
165 #define obstack_chunk_alloc gmalloc
166 #define obstack_chunk_free free
167
168 /* Propagate flow information through back edges and thus enable PRE's
169 moving loop invariant calculations out of loops.
170
171 Originally this tended to create worse overall code, but several
172 improvements during the development of PRE seem to have made following
173 back edges generally a win.
174
175 Note much of the loop invariant code motion done here would normally
176 be done by loop.c, which has more heuristics for when to move invariants
177 out of loops. At some point we might need to move some of those
178 heuristics into gcse.c. */
179 #define FOLLOW_BACK_EDGES 1
180
181 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
182 are a superset of those done by GCSE.
183
184 We perform the following steps:
185
186 1) Compute basic block information.
187
188 2) Compute table of places where registers are set.
189
190 3) Perform copy/constant propagation.
191
192 4) Perform global cse.
193
194 5) Perform another pass of copy/constant propagation.
195
196 Two passes of copy/constant propagation are done because the first one
197 enables more GCSE and the second one helps to clean up the copies that
198 GCSE creates. This is needed more for PRE than for Classic because Classic
199 GCSE will try to use an existing register containing the common
200 subexpression rather than create a new one. This is harder to do for PRE
201 because of the code motion (which Classic GCSE doesn't do).
202
203 Expressions we are interested in GCSE-ing are of the form
204 (set (pseudo-reg) (expression)).
205 Function want_to_gcse_p says what these are.
206
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
209
210 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
211 assignment) based GVN (global value numbering). L. T. Simpson's paper
212 (Rice University) on value numbering is a useful reference for this.
213
214 **********************
215
216 We used to support multiple passes but there are diminishing returns in
217 doing so. The first pass usually makes 90% of the changes that are doable.
218 A second pass can make a few more changes made possible by the first pass.
219 Experiments show any further passes don't make enough changes to justify
220 the expense.
221
222 A study of spec92 using an unlimited number of passes:
223 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
224 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
225 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
226
227 It was found doing copy propagation between each pass enables further
228 substitutions.
229
230 PRE is quite expensive in complicated functions because the DFA can take
231 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
232 be modified if one wants to experiment.
233
234 **********************
235
236 The steps for PRE are:
237
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
239
240 2) Perform the data flow analysis for PRE.
241
242 3) Delete the redundant instructions
243
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
246
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
249
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
252
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
256
257 PRE GCSE depends heavily on the second CSE pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing register.
263
264 **********************
265
266 A fair bit of simplicity is created by creating small functions for simple
267 tasks, even when the function is only called in one place. This may
268 measurably slow things down [or may not] by creating more function call
269 overhead than is necessary. The source is laid out so that it's trivial
270 to make the affected functions inline so that one can measure what speed
271 up, if any, can be achieved, and maybe later when things settle things can
272 be rearranged.
273
274 Help stamp out big monolithic functions! */
275 \f
276 /* GCSE global vars. */
277
278 /* -dG dump file. */
279 static FILE *gcse_file;
280
281 /* Note whether or not we should run jump optimization after gcse. We
282 want to do this for two cases.
283
284 * If we changed any jumps via cprop.
285
286 * If we added any labels via edge splitting. */
287
288 static int run_jump_opt_after_gcse;
289
290 /* Bitmaps are normally not included in debugging dumps.
291 However it's useful to be able to print them from GDB.
292 We could create special functions for this, but it's simpler to
293 just allow passing stderr to the dump_foo fns. Since stderr can
294 be a macro, we store a copy here. */
295 static FILE *debug_stderr;
296
297 /* An obstack for our working variables. */
298 static struct obstack gcse_obstack;
299
300 /* Non-zero for each mode that supports (set (reg) (reg)).
301 This is trivially true for integer and floating point values.
302 It may or may not be true for condition codes. */
303 static char can_copy_p[(int) NUM_MACHINE_MODES];
304
305 /* Non-zero if can_copy_p has been initialized. */
306 static int can_copy_init_p;
307
308 struct reg_use {rtx reg_rtx; };
309
310 /* Hash table of expressions. */
311
312 struct expr
313 {
314 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
315 rtx expr;
316 /* Index in the available expression bitmaps. */
317 int bitmap_index;
318 /* Next entry with the same hash. */
319 struct expr *next_same_hash;
320 /* List of anticipatable occurrences in basic blocks in the function.
321 An "anticipatable occurrence" is one that is the first occurrence in the
322 basic block, the operands are not modified in the basic block prior
323 to the occurrence and the output is not used between the start of
324 the block and the occurrence. */
325 struct occr *antic_occr;
326 /* List of available occurrence in basic blocks in the function.
327 An "available occurrence" is one that is the last occurrence in the
328 basic block and the operands are not modified by following statements in
329 the basic block [including this insn]. */
330 struct occr *avail_occr;
331 /* Non-null if the computation is PRE redundant.
332 The value is the newly created pseudo-reg to record a copy of the
333 expression in all the places that reach the redundant copy. */
334 rtx reaching_reg;
335 };
336
337 /* Occurrence of an expression.
338 There is one per basic block. If a pattern appears more than once the
339 last appearance is used [or first for anticipatable expressions]. */
340
341 struct occr
342 {
343 /* Next occurrence of this expression. */
344 struct occr *next;
345 /* The insn that computes the expression. */
346 rtx insn;
347 /* Non-zero if this [anticipatable] occurrence has been deleted. */
348 char deleted_p;
349 /* Non-zero if this [available] occurrence has been copied to
350 reaching_reg. */
351 /* ??? This is mutually exclusive with deleted_p, so they could share
352 the same byte. */
353 char copied_p;
354 };
355
356 /* Expression and copy propagation hash tables.
357 Each hash table is an array of buckets.
358 ??? It is known that if it were an array of entries, structure elements
359 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
360 not clear whether in the final analysis a sufficient amount of memory would
361 be saved as the size of the available expression bitmaps would be larger
362 [one could build a mapping table without holes afterwards though].
363 Someday I'll perform the computation and figure it out. */
364
365 /* Total size of the expression hash table, in elements. */
366 static unsigned int expr_hash_table_size;
367
368 /* The table itself.
369 This is an array of `expr_hash_table_size' elements. */
370 static struct expr **expr_hash_table;
371
372 /* Total size of the copy propagation hash table, in elements. */
373 static unsigned int set_hash_table_size;
374
375 /* The table itself.
376 This is an array of `set_hash_table_size' elements. */
377 static struct expr **set_hash_table;
378
379 /* Mapping of uids to cuids.
380 Only real insns get cuids. */
381 static int *uid_cuid;
382
383 /* Highest UID in UID_CUID. */
384 static int max_uid;
385
386 /* Get the cuid of an insn. */
387 #ifdef ENABLE_CHECKING
388 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
389 #else
390 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
391 #endif
392
393 /* Number of cuids. */
394 static int max_cuid;
395
396 /* Mapping of cuids to insns. */
397 static rtx *cuid_insn;
398
399 /* Get insn from cuid. */
400 #define CUID_INSN(CUID) (cuid_insn[CUID])
401
402 /* Maximum register number in function prior to doing gcse + 1.
403 Registers created during this pass have regno >= max_gcse_regno.
404 This is named with "gcse" to not collide with global of same name. */
405 static unsigned int max_gcse_regno;
406
407 /* Maximum number of cse-able expressions found. */
408 static int n_exprs;
409
410 /* Maximum number of assignments for copy propagation found. */
411 static int n_sets;
412
413 /* Table of registers that are modified.
414
415 For each register, each element is a list of places where the pseudo-reg
416 is set.
417
418 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
419 requires knowledge of which blocks kill which regs [and thus could use
420 a bitmap instead of the lists `reg_set_table' uses].
421
422 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
423 num-regs) [however perhaps it may be useful to keep the data as is]. One
424 advantage of recording things this way is that `reg_set_table' is fairly
425 sparse with respect to pseudo regs but for hard regs could be fairly dense
426 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
427 up functions like compute_transp since in the case of pseudo-regs we only
428 need to iterate over the number of times a pseudo-reg is set, not over the
429 number of basic blocks [clearly there is a bit of a slow down in the cases
430 where a pseudo is set more than once in a block, however it is believed
431 that the net effect is to speed things up]. This isn't done for hard-regs
432 because recording call-clobbered hard-regs in `reg_set_table' at each
433 function call can consume a fair bit of memory, and iterating over
434 hard-regs stored this way in compute_transp will be more expensive. */
435
436 typedef struct reg_set
437 {
438 /* The next setting of this register. */
439 struct reg_set *next;
440 /* The insn where it was set. */
441 rtx insn;
442 } reg_set;
443
444 static reg_set **reg_set_table;
445
446 /* Size of `reg_set_table'.
447 The table starts out at max_gcse_regno + slop, and is enlarged as
448 necessary. */
449 static int reg_set_table_size;
450
451 /* Amount to grow `reg_set_table' by when it's full. */
452 #define REG_SET_TABLE_SLOP 100
453
454 /* This is a list of expressions which are MEMs and will be used by load
455 or store motion.
456 Load motion tracks MEMs which aren't killed by
457 anything except itself. (ie, loads and stores to a single location).
458 We can then allow movement of these MEM refs with a little special
459 allowance. (all stores copy the same value to the reaching reg used
460 for the loads). This means all values used to store into memory must have
461 no side effects so we can re-issue the setter value.
462 Store Motion uses this structure as an expression table to track stores
463 which look interesting, and might be moveable towards the exit block. */
464
465 struct ls_expr
466 {
467 struct expr * expr; /* Gcse expression reference for LM. */
468 rtx pattern; /* Pattern of this mem. */
469 rtx loads; /* INSN list of loads seen. */
470 rtx stores; /* INSN list of stores seen. */
471 struct ls_expr * next; /* Next in the list. */
472 int invalid; /* Invalid for some reason. */
473 int index; /* If it maps to a bitmap index. */
474 int hash_index; /* Index when in a hash table. */
475 rtx reaching_reg; /* Register to use when re-writing. */
476 };
477
478 /* Head of the list of load/store memory refs. */
479 static struct ls_expr * pre_ldst_mems = NULL;
480
481 /* Bitmap containing one bit for each register in the program.
482 Used when performing GCSE to track which registers have been set since
483 the start of the basic block. */
484 static sbitmap reg_set_bitmap;
485
486 /* For each block, a bitmap of registers set in the block.
487 This is used by expr_killed_p and compute_transp.
488 It is computed during hash table computation and not by compute_sets
489 as it includes registers added since the last pass (or between cprop and
490 gcse) and it's currently not easy to realloc sbitmap vectors. */
491 static sbitmap *reg_set_in_block;
492
493 /* Array, indexed by basic block number for a list of insns which modify
494 memory within that block. */
495 static rtx * modify_mem_list;
496
497 /* This array parallels modify_mem_list, but is kept canonicalized. */
498 static rtx * canon_modify_mem_list;
499 /* Various variables for statistics gathering. */
500
501 /* Memory used in a pass.
502 This isn't intended to be absolutely precise. Its intent is only
503 to keep an eye on memory usage. */
504 static int bytes_used;
505
506 /* GCSE substitutions made. */
507 static int gcse_subst_count;
508 /* Number of copy instructions created. */
509 static int gcse_create_count;
510 /* Number of constants propagated. */
511 static int const_prop_count;
512 /* Number of copys propagated. */
513 static int copy_prop_count;
514 \f
515 /* These variables are used by classic GCSE.
516 Normally they'd be defined a bit later, but `rd_gen' needs to
517 be declared sooner. */
518
519 /* Each block has a bitmap of each type.
520 The length of each blocks bitmap is:
521
522 max_cuid - for reaching definitions
523 n_exprs - for available expressions
524
525 Thus we view the bitmaps as 2 dimensional arrays. i.e.
526 rd_kill[block_num][cuid_num]
527 ae_kill[block_num][expr_num] */
528
529 /* For reaching defs */
530 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
531
532 /* for available exprs */
533 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
534
535 /* Objects of this type are passed around by the null-pointer check
536 removal routines. */
537 struct null_pointer_info
538 {
539 /* The basic block being processed. */
540 int current_block;
541 /* The first register to be handled in this pass. */
542 unsigned int min_reg;
543 /* One greater than the last register to be handled in this pass. */
544 unsigned int max_reg;
545 sbitmap *nonnull_local;
546 sbitmap *nonnull_killed;
547 };
548 \f
549 static void compute_can_copy PARAMS ((void));
550 static char *gmalloc PARAMS ((unsigned int));
551 static char *grealloc PARAMS ((char *, unsigned int));
552 static char *gcse_alloc PARAMS ((unsigned long));
553 static void alloc_gcse_mem PARAMS ((rtx));
554 static void free_gcse_mem PARAMS ((void));
555 static void alloc_reg_set_mem PARAMS ((int));
556 static void free_reg_set_mem PARAMS ((void));
557 static int get_bitmap_width PARAMS ((int, int, int));
558 static void record_one_set PARAMS ((int, rtx));
559 static void record_set_info PARAMS ((rtx, rtx, void *));
560 static void compute_sets PARAMS ((rtx));
561 static void hash_scan_insn PARAMS ((rtx, int, int));
562 static void hash_scan_set PARAMS ((rtx, rtx, int));
563 static void hash_scan_clobber PARAMS ((rtx, rtx));
564 static void hash_scan_call PARAMS ((rtx, rtx));
565 static int want_to_gcse_p PARAMS ((rtx));
566 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
567 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
568 static int oprs_available_p PARAMS ((rtx, rtx));
569 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
570 int, int));
571 static void insert_set_in_table PARAMS ((rtx, rtx));
572 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
573 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
574 static unsigned int hash_string_1 PARAMS ((const char *));
575 static unsigned int hash_set PARAMS ((int, int));
576 static int expr_equiv_p PARAMS ((rtx, rtx));
577 static void record_last_reg_set_info PARAMS ((rtx, int));
578 static void record_last_mem_set_info PARAMS ((rtx));
579 static void record_last_set_info PARAMS ((rtx, rtx, void *));
580 static void compute_hash_table PARAMS ((int));
581 static void alloc_set_hash_table PARAMS ((int));
582 static void free_set_hash_table PARAMS ((void));
583 static void compute_set_hash_table PARAMS ((void));
584 static void alloc_expr_hash_table PARAMS ((unsigned int));
585 static void free_expr_hash_table PARAMS ((void));
586 static void compute_expr_hash_table PARAMS ((void));
587 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
588 int, int));
589 static struct expr *lookup_expr PARAMS ((rtx));
590 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
591 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
592 static void reset_opr_set_tables PARAMS ((void));
593 static int oprs_not_set_p PARAMS ((rtx, rtx));
594 static void mark_call PARAMS ((rtx));
595 static void mark_set PARAMS ((rtx, rtx));
596 static void mark_clobber PARAMS ((rtx, rtx));
597 static void mark_oprs_set PARAMS ((rtx));
598 static void alloc_cprop_mem PARAMS ((int, int));
599 static void free_cprop_mem PARAMS ((void));
600 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
601 static void compute_transpout PARAMS ((void));
602 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
603 int));
604 static void compute_cprop_data PARAMS ((void));
605 static void find_used_regs PARAMS ((rtx *, void *));
606 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
607 static struct expr *find_avail_set PARAMS ((int, rtx));
608 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx));
609 #ifdef HAVE_cc0
610 static int cprop_cc0_jump PARAMS ((basic_block, rtx, struct reg_use *, rtx));
611 #endif
612 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
613 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
614 static void canon_list_insert PARAMS ((rtx, rtx, void *));
615 static int cprop_insn PARAMS ((basic_block, rtx, int));
616 static int cprop PARAMS ((int));
617 static int one_cprop_pass PARAMS ((int, int));
618 static void alloc_pre_mem PARAMS ((int, int));
619 static void free_pre_mem PARAMS ((void));
620 static void compute_pre_data PARAMS ((void));
621 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
622 basic_block));
623 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
624 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
625 static void pre_insert_copies PARAMS ((void));
626 static int pre_delete PARAMS ((void));
627 static int pre_gcse PARAMS ((void));
628 static int one_pre_gcse_pass PARAMS ((int));
629 static void add_label_notes PARAMS ((rtx, rtx));
630 static void alloc_code_hoist_mem PARAMS ((int, int));
631 static void free_code_hoist_mem PARAMS ((void));
632 static void compute_code_hoist_vbeinout PARAMS ((void));
633 static void compute_code_hoist_data PARAMS ((void));
634 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
635 char *));
636 static void hoist_code PARAMS ((void));
637 static int one_code_hoisting_pass PARAMS ((void));
638 static void alloc_rd_mem PARAMS ((int, int));
639 static void free_rd_mem PARAMS ((void));
640 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
641 static void compute_kill_rd PARAMS ((void));
642 static void compute_rd PARAMS ((void));
643 static void alloc_avail_expr_mem PARAMS ((int, int));
644 static void free_avail_expr_mem PARAMS ((void));
645 static void compute_ae_gen PARAMS ((void));
646 static int expr_killed_p PARAMS ((rtx, basic_block));
647 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
648 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
649 basic_block, int));
650 static rtx computing_insn PARAMS ((struct expr *, rtx));
651 static int def_reaches_here_p PARAMS ((rtx, rtx));
652 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
653 static int handle_avail_expr PARAMS ((rtx, struct expr *));
654 static int classic_gcse PARAMS ((void));
655 static int one_classic_gcse_pass PARAMS ((int));
656 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
657 static void delete_null_pointer_checks_1 PARAMS ((varray_type *, unsigned int *,
658 sbitmap *, sbitmap *,
659 struct null_pointer_info *));
660 static rtx process_insert_insn PARAMS ((struct expr *));
661 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
662 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
663 basic_block, int, char *));
664 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
665 basic_block, char *));
666 static struct ls_expr * ldst_entry PARAMS ((rtx));
667 static void free_ldst_entry PARAMS ((struct ls_expr *));
668 static void free_ldst_mems PARAMS ((void));
669 static void print_ldst_list PARAMS ((FILE *));
670 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
671 static int enumerate_ldsts PARAMS ((void));
672 static inline struct ls_expr * first_ls_expr PARAMS ((void));
673 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
674 static int simple_mem PARAMS ((rtx));
675 static void invalidate_any_buried_refs PARAMS ((rtx));
676 static void compute_ld_motion_mems PARAMS ((void));
677 static void trim_ld_motion_mems PARAMS ((void));
678 static void update_ld_motion_stores PARAMS ((struct expr *));
679 static void reg_set_info PARAMS ((rtx, rtx, void *));
680 static int store_ops_ok PARAMS ((rtx, basic_block));
681 static void find_moveable_store PARAMS ((rtx));
682 static int compute_store_table PARAMS ((void));
683 static int load_kills_store PARAMS ((rtx, rtx));
684 static int find_loads PARAMS ((rtx, rtx));
685 static int store_killed_in_insn PARAMS ((rtx, rtx));
686 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
687 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
688 static void build_store_vectors PARAMS ((void));
689 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
690 static int insert_store PARAMS ((struct ls_expr *, edge));
691 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
692 static void delete_store PARAMS ((struct ls_expr *,
693 basic_block));
694 static void free_store_memory PARAMS ((void));
695 static void store_motion PARAMS ((void));
696 \f
697 /* Entry point for global common subexpression elimination.
698 F is the first instruction in the function. */
699
700 int
701 gcse_main (f, file)
702 rtx f;
703 FILE *file;
704 {
705 int changed, pass;
706 /* Bytes used at start of pass. */
707 int initial_bytes_used;
708 /* Maximum number of bytes used by a pass. */
709 int max_pass_bytes;
710 /* Point to release obstack data from for each pass. */
711 char *gcse_obstack_bottom;
712
713 /* Insertion of instructions on edges can create new basic blocks; we
714 need the original basic block count so that we can properly deallocate
715 arrays sized on the number of basic blocks originally in the cfg. */
716 int orig_bb_count;
717 /* We do not construct an accurate cfg in functions which call
718 setjmp, so just punt to be safe. */
719 if (current_function_calls_setjmp)
720 return 0;
721
722 /* Assume that we do not need to run jump optimizations after gcse. */
723 run_jump_opt_after_gcse = 0;
724
725 /* For calling dump_foo fns from gdb. */
726 debug_stderr = stderr;
727 gcse_file = file;
728
729 /* Identify the basic block information for this function, including
730 successors and predecessors. */
731 max_gcse_regno = max_reg_num ();
732
733 if (file)
734 dump_flow_info (file);
735
736 orig_bb_count = n_basic_blocks;
737 /* Return if there's nothing to do. */
738 if (n_basic_blocks <= 1)
739 return 0;
740
741 /* Trying to perform global optimizations on flow graphs which have
742 a high connectivity will take a long time and is unlikely to be
743 particularly useful.
744
745 In normal circumstances a cfg should have about twice as many edges
746 as blocks. But we do not want to punish small functions which have
747 a couple switch statements. So we require a relatively large number
748 of basic blocks and the ratio of edges to blocks to be high. */
749 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
750 {
751 if (warn_disabled_optimization)
752 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
753 n_basic_blocks, n_edges / n_basic_blocks);
754 return 0;
755 }
756
757 /* If allocating memory for the cprop bitmap would take up too much
758 storage it's better just to disable the optimization. */
759 if ((n_basic_blocks
760 * SBITMAP_SET_SIZE (max_gcse_regno)
761 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
762 {
763 if (warn_disabled_optimization)
764 warning ("GCSE disabled: %d basic blocks and %d registers",
765 n_basic_blocks, max_gcse_regno);
766
767 return 0;
768 }
769
770 /* See what modes support reg/reg copy operations. */
771 if (! can_copy_init_p)
772 {
773 compute_can_copy ();
774 can_copy_init_p = 1;
775 }
776
777 gcc_obstack_init (&gcse_obstack);
778 bytes_used = 0;
779
780 /* We need alias. */
781 init_alias_analysis ();
782 /* Record where pseudo-registers are set. This data is kept accurate
783 during each pass. ??? We could also record hard-reg information here
784 [since it's unchanging], however it is currently done during hash table
785 computation.
786
787 It may be tempting to compute MEM set information here too, but MEM sets
788 will be subject to code motion one day and thus we need to compute
789 information about memory sets when we build the hash tables. */
790
791 alloc_reg_set_mem (max_gcse_regno);
792 compute_sets (f);
793
794 pass = 0;
795 initial_bytes_used = bytes_used;
796 max_pass_bytes = 0;
797 gcse_obstack_bottom = gcse_alloc (1);
798 changed = 1;
799 while (changed && pass < MAX_GCSE_PASSES)
800 {
801 changed = 0;
802 if (file)
803 fprintf (file, "GCSE pass %d\n\n", pass + 1);
804
805 /* Initialize bytes_used to the space for the pred/succ lists,
806 and the reg_set_table data. */
807 bytes_used = initial_bytes_used;
808
809 /* Each pass may create new registers, so recalculate each time. */
810 max_gcse_regno = max_reg_num ();
811
812 alloc_gcse_mem (f);
813
814 /* Don't allow constant propagation to modify jumps
815 during this pass. */
816 changed = one_cprop_pass (pass + 1, 0);
817
818 if (optimize_size)
819 changed |= one_classic_gcse_pass (pass + 1);
820 else
821 {
822 changed |= one_pre_gcse_pass (pass + 1);
823 /* We may have just created new basic blocks. Release and
824 recompute various things which are sized on the number of
825 basic blocks. */
826 if (changed)
827 {
828 int i;
829
830 for (i = 0; i < orig_bb_count; i++)
831 {
832 if (modify_mem_list[i])
833 free_INSN_LIST_list (modify_mem_list + i);
834 if (canon_modify_mem_list[i])
835 free_INSN_LIST_list (canon_modify_mem_list + i);
836 }
837 modify_mem_list
838 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
839 canon_modify_mem_list
840 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
841 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
842 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
843 orig_bb_count = n_basic_blocks;
844 }
845 free_reg_set_mem ();
846 alloc_reg_set_mem (max_reg_num ());
847 compute_sets (f);
848 run_jump_opt_after_gcse = 1;
849 }
850
851 if (max_pass_bytes < bytes_used)
852 max_pass_bytes = bytes_used;
853
854 /* Free up memory, then reallocate for code hoisting. We can
855 not re-use the existing allocated memory because the tables
856 will not have info for the insns or registers created by
857 partial redundancy elimination. */
858 free_gcse_mem ();
859
860 /* It does not make sense to run code hoisting unless we optimizing
861 for code size -- it rarely makes programs faster, and can make
862 them bigger if we did partial redundancy elimination (when optimizing
863 for space, we use a classic gcse algorithm instead of partial
864 redundancy algorithms). */
865 if (optimize_size)
866 {
867 max_gcse_regno = max_reg_num ();
868 alloc_gcse_mem (f);
869 changed |= one_code_hoisting_pass ();
870 free_gcse_mem ();
871
872 if (max_pass_bytes < bytes_used)
873 max_pass_bytes = bytes_used;
874 }
875
876 if (file)
877 {
878 fprintf (file, "\n");
879 fflush (file);
880 }
881
882 obstack_free (&gcse_obstack, gcse_obstack_bottom);
883 pass++;
884 }
885
886 /* Do one last pass of copy propagation, including cprop into
887 conditional jumps. */
888
889 max_gcse_regno = max_reg_num ();
890 alloc_gcse_mem (f);
891 /* This time, go ahead and allow cprop to alter jumps. */
892 one_cprop_pass (pass + 1, 1);
893 free_gcse_mem ();
894
895 if (file)
896 {
897 fprintf (file, "GCSE of %s: %d basic blocks, ",
898 current_function_name, n_basic_blocks);
899 fprintf (file, "%d pass%s, %d bytes\n\n",
900 pass, pass > 1 ? "es" : "", max_pass_bytes);
901 }
902
903 obstack_free (&gcse_obstack, NULL);
904 free_reg_set_mem ();
905 /* We are finished with alias. */
906 end_alias_analysis ();
907 allocate_reg_info (max_reg_num (), FALSE, FALSE);
908
909 if (!optimize_size && flag_gcse_sm)
910 store_motion ();
911 /* Record where pseudo-registers are set. */
912 return run_jump_opt_after_gcse;
913 }
914 \f
915 /* Misc. utilities. */
916
917 /* Compute which modes support reg/reg copy operations. */
918
919 static void
920 compute_can_copy ()
921 {
922 int i;
923 #ifndef AVOID_CCMODE_COPIES
924 rtx reg,insn;
925 #endif
926 memset (can_copy_p, 0, NUM_MACHINE_MODES);
927
928 start_sequence ();
929 for (i = 0; i < NUM_MACHINE_MODES; i++)
930 if (GET_MODE_CLASS (i) == MODE_CC)
931 {
932 #ifdef AVOID_CCMODE_COPIES
933 can_copy_p[i] = 0;
934 #else
935 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
936 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
937 if (recog (PATTERN (insn), insn, NULL) >= 0)
938 can_copy_p[i] = 1;
939 #endif
940 }
941 else
942 can_copy_p[i] = 1;
943
944 end_sequence ();
945 }
946 \f
947 /* Cover function to xmalloc to record bytes allocated. */
948
949 static char *
950 gmalloc (size)
951 unsigned int size;
952 {
953 bytes_used += size;
954 return xmalloc (size);
955 }
956
957 /* Cover function to xrealloc.
958 We don't record the additional size since we don't know it.
959 It won't affect memory usage stats much anyway. */
960
961 static char *
962 grealloc (ptr, size)
963 char *ptr;
964 unsigned int size;
965 {
966 return xrealloc (ptr, size);
967 }
968
969 /* Cover function to obstack_alloc.
970 We don't need to record the bytes allocated here since
971 obstack_chunk_alloc is set to gmalloc. */
972
973 static char *
974 gcse_alloc (size)
975 unsigned long size;
976 {
977 return (char *) obstack_alloc (&gcse_obstack, size);
978 }
979
980 /* Allocate memory for the cuid mapping array,
981 and reg/memory set tracking tables.
982
983 This is called at the start of each pass. */
984
985 static void
986 alloc_gcse_mem (f)
987 rtx f;
988 {
989 int i,n;
990 rtx insn;
991
992 /* Find the largest UID and create a mapping from UIDs to CUIDs.
993 CUIDs are like UIDs except they increase monotonically, have no gaps,
994 and only apply to real insns. */
995
996 max_uid = get_max_uid ();
997 n = (max_uid + 1) * sizeof (int);
998 uid_cuid = (int *) gmalloc (n);
999 memset ((char *) uid_cuid, 0, n);
1000 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1001 {
1002 if (INSN_P (insn))
1003 uid_cuid[INSN_UID (insn)] = i++;
1004 else
1005 uid_cuid[INSN_UID (insn)] = i;
1006 }
1007
1008 /* Create a table mapping cuids to insns. */
1009
1010 max_cuid = i;
1011 n = (max_cuid + 1) * sizeof (rtx);
1012 cuid_insn = (rtx *) gmalloc (n);
1013 memset ((char *) cuid_insn, 0, n);
1014 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1015 if (INSN_P (insn))
1016 CUID_INSN (i++) = insn;
1017
1018 /* Allocate vars to track sets of regs. */
1019 reg_set_bitmap = (sbitmap) sbitmap_alloc (max_gcse_regno);
1020
1021 /* Allocate vars to track sets of regs, memory per block. */
1022 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
1023 max_gcse_regno);
1024 /* Allocate array to keep a list of insns which modify memory in each
1025 basic block. */
1026 modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1027 canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1028 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1029 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1030 }
1031
1032 /* Free memory allocated by alloc_gcse_mem. */
1033
1034 static void
1035 free_gcse_mem ()
1036 {
1037 free (uid_cuid);
1038 free (cuid_insn);
1039
1040 free (reg_set_bitmap);
1041
1042 sbitmap_vector_free (reg_set_in_block);
1043 /* re-Cache any INSN_LIST nodes we have allocated. */
1044 {
1045 int i;
1046
1047 for (i = 0; i < n_basic_blocks; i++)
1048 {
1049 if (modify_mem_list[i])
1050 free_INSN_LIST_list (modify_mem_list + i);
1051 if (canon_modify_mem_list[i])
1052 free_INSN_LIST_list (canon_modify_mem_list + i);
1053 }
1054
1055 free (modify_mem_list);
1056 free (canon_modify_mem_list);
1057 modify_mem_list = 0;
1058 canon_modify_mem_list = 0;
1059 }
1060 }
1061
1062 /* Many of the global optimization algorithms work by solving dataflow
1063 equations for various expressions. Initially, some local value is
1064 computed for each expression in each block. Then, the values across the
1065 various blocks are combined (by following flow graph edges) to arrive at
1066 global values. Conceptually, each set of equations is independent. We
1067 may therefore solve all the equations in parallel, solve them one at a
1068 time, or pick any intermediate approach.
1069
1070 When you're going to need N two-dimensional bitmaps, each X (say, the
1071 number of blocks) by Y (say, the number of expressions), call this
1072 function. It's not important what X and Y represent; only that Y
1073 correspond to the things that can be done in parallel. This function will
1074 return an appropriate chunking factor C; you should solve C sets of
1075 equations in parallel. By going through this function, we can easily
1076 trade space against time; by solving fewer equations in parallel we use
1077 less space. */
1078
1079 static int
1080 get_bitmap_width (n, x, y)
1081 int n;
1082 int x;
1083 int y;
1084 {
1085 /* It's not really worth figuring out *exactly* how much memory will
1086 be used by a particular choice. The important thing is to get
1087 something approximately right. */
1088 size_t max_bitmap_memory = 10 * 1024 * 1024;
1089
1090 /* The number of bytes we'd use for a single column of minimum
1091 width. */
1092 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1093
1094 /* Often, it's reasonable just to solve all the equations in
1095 parallel. */
1096 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1097 return y;
1098
1099 /* Otherwise, pick the largest width we can, without going over the
1100 limit. */
1101 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1102 / column_size);
1103 }
1104 \f
1105 /* Compute the local properties of each recorded expression.
1106
1107 Local properties are those that are defined by the block, irrespective of
1108 other blocks.
1109
1110 An expression is transparent in a block if its operands are not modified
1111 in the block.
1112
1113 An expression is computed (locally available) in a block if it is computed
1114 at least once and expression would contain the same value if the
1115 computation was moved to the end of the block.
1116
1117 An expression is locally anticipatable in a block if it is computed at
1118 least once and expression would contain the same value if the computation
1119 was moved to the beginning of the block.
1120
1121 We call this routine for cprop, pre and code hoisting. They all compute
1122 basically the same information and thus can easily share this code.
1123
1124 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1125 properties. If NULL, then it is not necessary to compute or record that
1126 particular property.
1127
1128 SETP controls which hash table to look at. If zero, this routine looks at
1129 the expr hash table; if nonzero this routine looks at the set hash table.
1130 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1131 ABSALTERED. */
1132
1133 static void
1134 compute_local_properties (transp, comp, antloc, setp)
1135 sbitmap *transp;
1136 sbitmap *comp;
1137 sbitmap *antloc;
1138 int setp;
1139 {
1140 unsigned int i, hash_table_size;
1141 struct expr **hash_table;
1142
1143 /* Initialize any bitmaps that were passed in. */
1144 if (transp)
1145 {
1146 if (setp)
1147 sbitmap_vector_zero (transp, n_basic_blocks);
1148 else
1149 sbitmap_vector_ones (transp, n_basic_blocks);
1150 }
1151
1152 if (comp)
1153 sbitmap_vector_zero (comp, n_basic_blocks);
1154 if (antloc)
1155 sbitmap_vector_zero (antloc, n_basic_blocks);
1156
1157 /* We use the same code for cprop, pre and hoisting. For cprop
1158 we care about the set hash table, for pre and hoisting we
1159 care about the expr hash table. */
1160 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1161 hash_table = setp ? set_hash_table : expr_hash_table;
1162
1163 for (i = 0; i < hash_table_size; i++)
1164 {
1165 struct expr *expr;
1166
1167 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1168 {
1169 int indx = expr->bitmap_index;
1170 struct occr *occr;
1171
1172 /* The expression is transparent in this block if it is not killed.
1173 We start by assuming all are transparent [none are killed], and
1174 then reset the bits for those that are. */
1175 if (transp)
1176 compute_transp (expr->expr, indx, transp, setp);
1177
1178 /* The occurrences recorded in antic_occr are exactly those that
1179 we want to set to non-zero in ANTLOC. */
1180 if (antloc)
1181 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1182 {
1183 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1184
1185 /* While we're scanning the table, this is a good place to
1186 initialize this. */
1187 occr->deleted_p = 0;
1188 }
1189
1190 /* The occurrences recorded in avail_occr are exactly those that
1191 we want to set to non-zero in COMP. */
1192 if (comp)
1193 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1194 {
1195 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1196
1197 /* While we're scanning the table, this is a good place to
1198 initialize this. */
1199 occr->copied_p = 0;
1200 }
1201
1202 /* While we're scanning the table, this is a good place to
1203 initialize this. */
1204 expr->reaching_reg = 0;
1205 }
1206 }
1207 }
1208 \f
1209 /* Register set information.
1210
1211 `reg_set_table' records where each register is set or otherwise
1212 modified. */
1213
1214 static struct obstack reg_set_obstack;
1215
1216 static void
1217 alloc_reg_set_mem (n_regs)
1218 int n_regs;
1219 {
1220 unsigned int n;
1221
1222 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1223 n = reg_set_table_size * sizeof (struct reg_set *);
1224 reg_set_table = (struct reg_set **) gmalloc (n);
1225 memset ((char *) reg_set_table, 0, n);
1226
1227 gcc_obstack_init (&reg_set_obstack);
1228 }
1229
1230 static void
1231 free_reg_set_mem ()
1232 {
1233 free (reg_set_table);
1234 obstack_free (&reg_set_obstack, NULL);
1235 }
1236
1237 /* Record REGNO in the reg_set table. */
1238
1239 static void
1240 record_one_set (regno, insn)
1241 int regno;
1242 rtx insn;
1243 {
1244 /* Allocate a new reg_set element and link it onto the list. */
1245 struct reg_set *new_reg_info;
1246
1247 /* If the table isn't big enough, enlarge it. */
1248 if (regno >= reg_set_table_size)
1249 {
1250 int new_size = regno + REG_SET_TABLE_SLOP;
1251
1252 reg_set_table
1253 = (struct reg_set **) grealloc ((char *) reg_set_table,
1254 new_size * sizeof (struct reg_set *));
1255 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1256 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1257 reg_set_table_size = new_size;
1258 }
1259
1260 new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
1261 sizeof (struct reg_set));
1262 bytes_used += sizeof (struct reg_set);
1263 new_reg_info->insn = insn;
1264 new_reg_info->next = reg_set_table[regno];
1265 reg_set_table[regno] = new_reg_info;
1266 }
1267
1268 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1269 an insn. The DATA is really the instruction in which the SET is
1270 occurring. */
1271
1272 static void
1273 record_set_info (dest, setter, data)
1274 rtx dest, setter ATTRIBUTE_UNUSED;
1275 void *data;
1276 {
1277 rtx record_set_insn = (rtx) data;
1278
1279 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1280 record_one_set (REGNO (dest), record_set_insn);
1281 }
1282
1283 /* Scan the function and record each set of each pseudo-register.
1284
1285 This is called once, at the start of the gcse pass. See the comments for
1286 `reg_set_table' for further documenation. */
1287
1288 static void
1289 compute_sets (f)
1290 rtx f;
1291 {
1292 rtx insn;
1293
1294 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1295 if (INSN_P (insn))
1296 note_stores (PATTERN (insn), record_set_info, insn);
1297 }
1298 \f
1299 /* Hash table support. */
1300
1301 /* For each register, the cuid of the first/last insn in the block to set it,
1302 or -1 if not set. */
1303 #define NEVER_SET -1
1304 static int *reg_first_set;
1305 static int *reg_last_set;
1306
1307
1308 /* See whether X, the source of a set, is something we want to consider for
1309 GCSE. */
1310
1311 static int
1312 want_to_gcse_p (x)
1313 rtx x;
1314 {
1315 static rtx test_insn = 0;
1316 int num_clobbers = 0;
1317 int icode;
1318
1319 switch (GET_CODE (x))
1320 {
1321 case REG:
1322 case SUBREG:
1323 case CONST_INT:
1324 case CONST_DOUBLE:
1325 case CALL:
1326 return 0;
1327
1328 default:
1329 break;
1330 }
1331
1332 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1333 if (general_operand (x, GET_MODE (x)))
1334 return 1;
1335 else if (GET_MODE (x) == VOIDmode)
1336 return 0;
1337
1338 /* Otherwise, check if we can make a valid insn from it. First initialize
1339 our test insn if we haven't already. */
1340 if (test_insn == 0)
1341 {
1342 test_insn
1343 = make_insn_raw (gen_rtx_SET (VOIDmode,
1344 gen_rtx_REG (word_mode,
1345 FIRST_PSEUDO_REGISTER * 2),
1346 const0_rtx));
1347 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1348 ggc_add_rtx_root (&test_insn, 1);
1349 }
1350
1351 /* Now make an insn like the one we would make when GCSE'ing and see if
1352 valid. */
1353 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1354 SET_SRC (PATTERN (test_insn)) = x;
1355 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1356 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1357 }
1358
1359 /* Return non-zero if the operands of expression X are unchanged from the
1360 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1361 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1362
1363 static int
1364 oprs_unchanged_p (x, insn, avail_p)
1365 rtx x, insn;
1366 int avail_p;
1367 {
1368 int i, j;
1369 enum rtx_code code;
1370 const char *fmt;
1371
1372 if (x == 0)
1373 return 1;
1374
1375 code = GET_CODE (x);
1376 switch (code)
1377 {
1378 case REG:
1379 if (avail_p)
1380 return (reg_last_set[REGNO (x)] == NEVER_SET
1381 || reg_last_set[REGNO (x)] < INSN_CUID (insn));
1382 else
1383 return (reg_first_set[REGNO (x)] == NEVER_SET
1384 || reg_first_set[REGNO (x)] >= INSN_CUID (insn));
1385
1386 case MEM:
1387 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn), INSN_CUID (insn),
1388 x, avail_p))
1389 return 0;
1390 else
1391 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1392
1393 case PRE_DEC:
1394 case PRE_INC:
1395 case POST_DEC:
1396 case POST_INC:
1397 case PRE_MODIFY:
1398 case POST_MODIFY:
1399 return 0;
1400
1401 case PC:
1402 case CC0: /*FIXME*/
1403 case CONST:
1404 case CONST_INT:
1405 case CONST_DOUBLE:
1406 case SYMBOL_REF:
1407 case LABEL_REF:
1408 case ADDR_VEC:
1409 case ADDR_DIFF_VEC:
1410 return 1;
1411
1412 default:
1413 break;
1414 }
1415
1416 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1417 {
1418 if (fmt[i] == 'e')
1419 {
1420 /* If we are about to do the last recursive call needed at this
1421 level, change it into iteration. This function is called enough
1422 to be worth it. */
1423 if (i == 0)
1424 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1425
1426 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1427 return 0;
1428 }
1429 else if (fmt[i] == 'E')
1430 for (j = 0; j < XVECLEN (x, i); j++)
1431 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1432 return 0;
1433 }
1434
1435 return 1;
1436 }
1437
1438 /* Used for communication between mems_conflict_for_gcse_p and
1439 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1440 conflict between two memory references. */
1441 static int gcse_mems_conflict_p;
1442
1443 /* Used for communication between mems_conflict_for_gcse_p and
1444 load_killed_in_block_p. A memory reference for a load instruction,
1445 mems_conflict_for_gcse_p will see if a memory store conflicts with
1446 this memory load. */
1447 static rtx gcse_mem_operand;
1448
1449 /* DEST is the output of an instruction. If it is a memory reference, and
1450 possibly conflicts with the load found in gcse_mem_operand, then set
1451 gcse_mems_conflict_p to a nonzero value. */
1452
1453 static void
1454 mems_conflict_for_gcse_p (dest, setter, data)
1455 rtx dest, setter ATTRIBUTE_UNUSED;
1456 void *data ATTRIBUTE_UNUSED;
1457 {
1458 while (GET_CODE (dest) == SUBREG
1459 || GET_CODE (dest) == ZERO_EXTRACT
1460 || GET_CODE (dest) == SIGN_EXTRACT
1461 || GET_CODE (dest) == STRICT_LOW_PART)
1462 dest = XEXP (dest, 0);
1463
1464 /* If DEST is not a MEM, then it will not conflict with the load. Note
1465 that function calls are assumed to clobber memory, but are handled
1466 elsewhere. */
1467 if (GET_CODE (dest) != MEM)
1468 return;
1469
1470 /* If we are setting a MEM in our list of specially recognized MEMs,
1471 don't mark as killed this time. */
1472
1473 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1474 {
1475 if (!find_rtx_in_ldst (dest))
1476 gcse_mems_conflict_p = 1;
1477 return;
1478 }
1479
1480 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1481 rtx_addr_varies_p))
1482 gcse_mems_conflict_p = 1;
1483 }
1484
1485 /* Return nonzero if the expression in X (a memory reference) is killed
1486 in block BB before or after the insn with the CUID in UID_LIMIT.
1487 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1488 before UID_LIMIT.
1489
1490 To check the entire block, set UID_LIMIT to max_uid + 1 and
1491 AVAIL_P to 0. */
1492
1493 static int
1494 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1495 basic_block bb;
1496 int uid_limit;
1497 rtx x;
1498 int avail_p;
1499 {
1500 rtx list_entry = modify_mem_list[bb->index];
1501 while (list_entry)
1502 {
1503 rtx setter;
1504 /* Ignore entries in the list that do not apply. */
1505 if ((avail_p
1506 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1507 || (! avail_p
1508 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1509 {
1510 list_entry = XEXP (list_entry, 1);
1511 continue;
1512 }
1513
1514 setter = XEXP (list_entry, 0);
1515
1516 /* If SETTER is a call everything is clobbered. Note that calls
1517 to pure functions are never put on the list, so we need not
1518 worry about them. */
1519 if (GET_CODE (setter) == CALL_INSN)
1520 return 1;
1521
1522 /* SETTER must be an INSN of some kind that sets memory. Call
1523 note_stores to examine each hunk of memory that is modified.
1524
1525 The note_stores interface is pretty limited, so we have to
1526 communicate via global variables. Yuk. */
1527 gcse_mem_operand = x;
1528 gcse_mems_conflict_p = 0;
1529 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1530 if (gcse_mems_conflict_p)
1531 return 1;
1532 list_entry = XEXP (list_entry, 1);
1533 }
1534 return 0;
1535 }
1536
1537 /* Return non-zero if the operands of expression X are unchanged from
1538 the start of INSN's basic block up to but not including INSN. */
1539
1540 static int
1541 oprs_anticipatable_p (x, insn)
1542 rtx x, insn;
1543 {
1544 return oprs_unchanged_p (x, insn, 0);
1545 }
1546
1547 /* Return non-zero if the operands of expression X are unchanged from
1548 INSN to the end of INSN's basic block. */
1549
1550 static int
1551 oprs_available_p (x, insn)
1552 rtx x, insn;
1553 {
1554 return oprs_unchanged_p (x, insn, 1);
1555 }
1556
1557 /* Hash expression X.
1558
1559 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1560 indicating if a volatile operand is found or if the expression contains
1561 something we don't want to insert in the table.
1562
1563 ??? One might want to merge this with canon_hash. Later. */
1564
1565 static unsigned int
1566 hash_expr (x, mode, do_not_record_p, hash_table_size)
1567 rtx x;
1568 enum machine_mode mode;
1569 int *do_not_record_p;
1570 int hash_table_size;
1571 {
1572 unsigned int hash;
1573
1574 *do_not_record_p = 0;
1575
1576 hash = hash_expr_1 (x, mode, do_not_record_p);
1577 return hash % hash_table_size;
1578 }
1579
1580 /* Hash a string. Just add its bytes up. */
1581
1582 static inline unsigned
1583 hash_string_1 (ps)
1584 const char *ps;
1585 {
1586 unsigned hash = 0;
1587 const unsigned char *p = (const unsigned char *)ps;
1588
1589 if (p)
1590 while (*p)
1591 hash += *p++;
1592
1593 return hash;
1594 }
1595
1596 /* Subroutine of hash_expr to do the actual work. */
1597
1598 static unsigned int
1599 hash_expr_1 (x, mode, do_not_record_p)
1600 rtx x;
1601 enum machine_mode mode;
1602 int *do_not_record_p;
1603 {
1604 int i, j;
1605 unsigned hash = 0;
1606 enum rtx_code code;
1607 const char *fmt;
1608
1609 /* Used to turn recursion into iteration. We can't rely on GCC's
1610 tail-recursion eliminatio since we need to keep accumulating values
1611 in HASH. */
1612
1613 if (x == 0)
1614 return hash;
1615
1616 repeat:
1617 code = GET_CODE (x);
1618 switch (code)
1619 {
1620 case REG:
1621 hash += ((unsigned int) REG << 7) + REGNO (x);
1622 return hash;
1623
1624 case CONST_INT:
1625 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1626 + (unsigned int) INTVAL (x));
1627 return hash;
1628
1629 case CONST_DOUBLE:
1630 /* This is like the general case, except that it only counts
1631 the integers representing the constant. */
1632 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1633 if (GET_MODE (x) != VOIDmode)
1634 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1635 hash += (unsigned int) XWINT (x, i);
1636 else
1637 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1638 + (unsigned int) CONST_DOUBLE_HIGH (x));
1639 return hash;
1640
1641 /* Assume there is only one rtx object for any given label. */
1642 case LABEL_REF:
1643 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1644 differences and differences between each stage's debugging dumps. */
1645 hash += (((unsigned int) LABEL_REF << 7)
1646 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1647 return hash;
1648
1649 case SYMBOL_REF:
1650 {
1651 /* Don't hash on the symbol's address to avoid bootstrap differences.
1652 Different hash values may cause expressions to be recorded in
1653 different orders and thus different registers to be used in the
1654 final assembler. This also avoids differences in the dump files
1655 between various stages. */
1656 unsigned int h = 0;
1657 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1658
1659 while (*p)
1660 h += (h << 7) + *p++; /* ??? revisit */
1661
1662 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1663 return hash;
1664 }
1665
1666 case MEM:
1667 if (MEM_VOLATILE_P (x))
1668 {
1669 *do_not_record_p = 1;
1670 return 0;
1671 }
1672
1673 hash += (unsigned int) MEM;
1674 hash += MEM_ALIAS_SET (x);
1675 x = XEXP (x, 0);
1676 goto repeat;
1677
1678 case PRE_DEC:
1679 case PRE_INC:
1680 case POST_DEC:
1681 case POST_INC:
1682 case PC:
1683 case CC0:
1684 case CALL:
1685 case UNSPEC_VOLATILE:
1686 *do_not_record_p = 1;
1687 return 0;
1688
1689 case ASM_OPERANDS:
1690 if (MEM_VOLATILE_P (x))
1691 {
1692 *do_not_record_p = 1;
1693 return 0;
1694 }
1695 else
1696 {
1697 /* We don't want to take the filename and line into account. */
1698 hash += (unsigned) code + (unsigned) GET_MODE (x)
1699 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1700 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1701 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1702
1703 if (ASM_OPERANDS_INPUT_LENGTH (x))
1704 {
1705 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1706 {
1707 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1708 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1709 do_not_record_p)
1710 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1711 (x, i)));
1712 }
1713
1714 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1715 x = ASM_OPERANDS_INPUT (x, 0);
1716 mode = GET_MODE (x);
1717 goto repeat;
1718 }
1719 return hash;
1720 }
1721
1722 default:
1723 break;
1724 }
1725
1726 hash += (unsigned) code + (unsigned) GET_MODE (x);
1727 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1728 {
1729 if (fmt[i] == 'e')
1730 {
1731 /* If we are about to do the last recursive call
1732 needed at this level, change it into iteration.
1733 This function is called enough to be worth it. */
1734 if (i == 0)
1735 {
1736 x = XEXP (x, i);
1737 goto repeat;
1738 }
1739
1740 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1741 if (*do_not_record_p)
1742 return 0;
1743 }
1744
1745 else if (fmt[i] == 'E')
1746 for (j = 0; j < XVECLEN (x, i); j++)
1747 {
1748 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1749 if (*do_not_record_p)
1750 return 0;
1751 }
1752
1753 else if (fmt[i] == 's')
1754 hash += hash_string_1 (XSTR (x, i));
1755 else if (fmt[i] == 'i')
1756 hash += (unsigned int) XINT (x, i);
1757 else
1758 abort ();
1759 }
1760
1761 return hash;
1762 }
1763
1764 /* Hash a set of register REGNO.
1765
1766 Sets are hashed on the register that is set. This simplifies the PRE copy
1767 propagation code.
1768
1769 ??? May need to make things more elaborate. Later, as necessary. */
1770
1771 static unsigned int
1772 hash_set (regno, hash_table_size)
1773 int regno;
1774 int hash_table_size;
1775 {
1776 unsigned int hash;
1777
1778 hash = regno;
1779 return hash % hash_table_size;
1780 }
1781
1782 /* Return non-zero if exp1 is equivalent to exp2.
1783 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1784
1785 static int
1786 expr_equiv_p (x, y)
1787 rtx x, y;
1788 {
1789 register int i, j;
1790 register enum rtx_code code;
1791 register const char *fmt;
1792
1793 if (x == y)
1794 return 1;
1795
1796 if (x == 0 || y == 0)
1797 return x == y;
1798
1799 code = GET_CODE (x);
1800 if (code != GET_CODE (y))
1801 return 0;
1802
1803 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1804 if (GET_MODE (x) != GET_MODE (y))
1805 return 0;
1806
1807 switch (code)
1808 {
1809 case PC:
1810 case CC0:
1811 return x == y;
1812
1813 case CONST_INT:
1814 return INTVAL (x) == INTVAL (y);
1815
1816 case LABEL_REF:
1817 return XEXP (x, 0) == XEXP (y, 0);
1818
1819 case SYMBOL_REF:
1820 return XSTR (x, 0) == XSTR (y, 0);
1821
1822 case REG:
1823 return REGNO (x) == REGNO (y);
1824
1825 case MEM:
1826 /* Can't merge two expressions in different alias sets, since we can
1827 decide that the expression is transparent in a block when it isn't,
1828 due to it being set with the different alias set. */
1829 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1830 return 0;
1831 break;
1832
1833 /* For commutative operations, check both orders. */
1834 case PLUS:
1835 case MULT:
1836 case AND:
1837 case IOR:
1838 case XOR:
1839 case NE:
1840 case EQ:
1841 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1842 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1843 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1844 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1845
1846 case ASM_OPERANDS:
1847 /* We don't use the generic code below because we want to
1848 disregard filename and line numbers. */
1849
1850 /* A volatile asm isn't equivalent to any other. */
1851 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1852 return 0;
1853
1854 if (GET_MODE (x) != GET_MODE (y)
1855 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1856 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1857 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1858 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1859 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1860 return 0;
1861
1862 if (ASM_OPERANDS_INPUT_LENGTH (x))
1863 {
1864 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1865 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1866 ASM_OPERANDS_INPUT (y, i))
1867 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1868 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1869 return 0;
1870 }
1871
1872 return 1;
1873
1874 default:
1875 break;
1876 }
1877
1878 /* Compare the elements. If any pair of corresponding elements
1879 fail to match, return 0 for the whole thing. */
1880
1881 fmt = GET_RTX_FORMAT (code);
1882 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1883 {
1884 switch (fmt[i])
1885 {
1886 case 'e':
1887 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1888 return 0;
1889 break;
1890
1891 case 'E':
1892 if (XVECLEN (x, i) != XVECLEN (y, i))
1893 return 0;
1894 for (j = 0; j < XVECLEN (x, i); j++)
1895 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1896 return 0;
1897 break;
1898
1899 case 's':
1900 if (strcmp (XSTR (x, i), XSTR (y, i)))
1901 return 0;
1902 break;
1903
1904 case 'i':
1905 if (XINT (x, i) != XINT (y, i))
1906 return 0;
1907 break;
1908
1909 case 'w':
1910 if (XWINT (x, i) != XWINT (y, i))
1911 return 0;
1912 break;
1913
1914 case '0':
1915 break;
1916
1917 default:
1918 abort ();
1919 }
1920 }
1921
1922 return 1;
1923 }
1924
1925 /* Insert expression X in INSN in the hash table.
1926 If it is already present, record it as the last occurrence in INSN's
1927 basic block.
1928
1929 MODE is the mode of the value X is being stored into.
1930 It is only used if X is a CONST_INT.
1931
1932 ANTIC_P is non-zero if X is an anticipatable expression.
1933 AVAIL_P is non-zero if X is an available expression. */
1934
1935 static void
1936 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1937 rtx x;
1938 enum machine_mode mode;
1939 rtx insn;
1940 int antic_p, avail_p;
1941 {
1942 int found, do_not_record_p;
1943 unsigned int hash;
1944 struct expr *cur_expr, *last_expr = NULL;
1945 struct occr *antic_occr, *avail_occr;
1946 struct occr *last_occr = NULL;
1947
1948 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1949
1950 /* Do not insert expression in table if it contains volatile operands,
1951 or if hash_expr determines the expression is something we don't want
1952 to or can't handle. */
1953 if (do_not_record_p)
1954 return;
1955
1956 cur_expr = expr_hash_table[hash];
1957 found = 0;
1958
1959 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1960 {
1961 /* If the expression isn't found, save a pointer to the end of
1962 the list. */
1963 last_expr = cur_expr;
1964 cur_expr = cur_expr->next_same_hash;
1965 }
1966
1967 if (! found)
1968 {
1969 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1970 bytes_used += sizeof (struct expr);
1971 if (expr_hash_table[hash] == NULL)
1972 /* This is the first pattern that hashed to this index. */
1973 expr_hash_table[hash] = cur_expr;
1974 else
1975 /* Add EXPR to end of this hash chain. */
1976 last_expr->next_same_hash = cur_expr;
1977
1978 /* Set the fields of the expr element. */
1979 cur_expr->expr = x;
1980 cur_expr->bitmap_index = n_exprs++;
1981 cur_expr->next_same_hash = NULL;
1982 cur_expr->antic_occr = NULL;
1983 cur_expr->avail_occr = NULL;
1984 }
1985
1986 /* Now record the occurrence(s). */
1987 if (antic_p)
1988 {
1989 antic_occr = cur_expr->antic_occr;
1990
1991 /* Search for another occurrence in the same basic block. */
1992 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1993 {
1994 /* If an occurrence isn't found, save a pointer to the end of
1995 the list. */
1996 last_occr = antic_occr;
1997 antic_occr = antic_occr->next;
1998 }
1999
2000 if (antic_occr)
2001 /* Found another instance of the expression in the same basic block.
2002 Prefer the currently recorded one. We want the first one in the
2003 block and the block is scanned from start to end. */
2004 ; /* nothing to do */
2005 else
2006 {
2007 /* First occurrence of this expression in this basic block. */
2008 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2009 bytes_used += sizeof (struct occr);
2010 /* First occurrence of this expression in any block? */
2011 if (cur_expr->antic_occr == NULL)
2012 cur_expr->antic_occr = antic_occr;
2013 else
2014 last_occr->next = antic_occr;
2015
2016 antic_occr->insn = insn;
2017 antic_occr->next = NULL;
2018 }
2019 }
2020
2021 if (avail_p)
2022 {
2023 avail_occr = cur_expr->avail_occr;
2024
2025 /* Search for another occurrence in the same basic block. */
2026 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2027 {
2028 /* If an occurrence isn't found, save a pointer to the end of
2029 the list. */
2030 last_occr = avail_occr;
2031 avail_occr = avail_occr->next;
2032 }
2033
2034 if (avail_occr)
2035 /* Found another instance of the expression in the same basic block.
2036 Prefer this occurrence to the currently recorded one. We want
2037 the last one in the block and the block is scanned from start
2038 to end. */
2039 avail_occr->insn = insn;
2040 else
2041 {
2042 /* First occurrence of this expression in this basic block. */
2043 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2044 bytes_used += sizeof (struct occr);
2045
2046 /* First occurrence of this expression in any block? */
2047 if (cur_expr->avail_occr == NULL)
2048 cur_expr->avail_occr = avail_occr;
2049 else
2050 last_occr->next = avail_occr;
2051
2052 avail_occr->insn = insn;
2053 avail_occr->next = NULL;
2054 }
2055 }
2056 }
2057
2058 /* Insert pattern X in INSN in the hash table.
2059 X is a SET of a reg to either another reg or a constant.
2060 If it is already present, record it as the last occurrence in INSN's
2061 basic block. */
2062
2063 static void
2064 insert_set_in_table (x, insn)
2065 rtx x;
2066 rtx insn;
2067 {
2068 int found;
2069 unsigned int hash;
2070 struct expr *cur_expr, *last_expr = NULL;
2071 struct occr *cur_occr, *last_occr = NULL;
2072
2073 if (GET_CODE (x) != SET
2074 || GET_CODE (SET_DEST (x)) != REG)
2075 abort ();
2076
2077 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
2078
2079 cur_expr = set_hash_table[hash];
2080 found = 0;
2081
2082 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2083 {
2084 /* If the expression isn't found, save a pointer to the end of
2085 the list. */
2086 last_expr = cur_expr;
2087 cur_expr = cur_expr->next_same_hash;
2088 }
2089
2090 if (! found)
2091 {
2092 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2093 bytes_used += sizeof (struct expr);
2094 if (set_hash_table[hash] == NULL)
2095 /* This is the first pattern that hashed to this index. */
2096 set_hash_table[hash] = cur_expr;
2097 else
2098 /* Add EXPR to end of this hash chain. */
2099 last_expr->next_same_hash = cur_expr;
2100
2101 /* Set the fields of the expr element.
2102 We must copy X because it can be modified when copy propagation is
2103 performed on its operands. */
2104 cur_expr->expr = copy_rtx (x);
2105 cur_expr->bitmap_index = n_sets++;
2106 cur_expr->next_same_hash = NULL;
2107 cur_expr->antic_occr = NULL;
2108 cur_expr->avail_occr = NULL;
2109 }
2110
2111 /* Now record the occurrence. */
2112 cur_occr = cur_expr->avail_occr;
2113
2114 /* Search for another occurrence in the same basic block. */
2115 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2116 {
2117 /* If an occurrence isn't found, save a pointer to the end of
2118 the list. */
2119 last_occr = cur_occr;
2120 cur_occr = cur_occr->next;
2121 }
2122
2123 if (cur_occr)
2124 /* Found another instance of the expression in the same basic block.
2125 Prefer this occurrence to the currently recorded one. We want the
2126 last one in the block and the block is scanned from start to end. */
2127 cur_occr->insn = insn;
2128 else
2129 {
2130 /* First occurrence of this expression in this basic block. */
2131 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2132 bytes_used += sizeof (struct occr);
2133
2134 /* First occurrence of this expression in any block? */
2135 if (cur_expr->avail_occr == NULL)
2136 cur_expr->avail_occr = cur_occr;
2137 else
2138 last_occr->next = cur_occr;
2139
2140 cur_occr->insn = insn;
2141 cur_occr->next = NULL;
2142 }
2143 }
2144
2145 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2146 non-zero, this is for the assignment hash table, otherwise it is for the
2147 expression hash table. */
2148
2149 static void
2150 hash_scan_set (pat, insn, set_p)
2151 rtx pat, insn;
2152 int set_p;
2153 {
2154 rtx src = SET_SRC (pat);
2155 rtx dest = SET_DEST (pat);
2156 rtx note;
2157
2158 if (GET_CODE (src) == CALL)
2159 hash_scan_call (src, insn);
2160
2161 else if (GET_CODE (dest) == REG)
2162 {
2163 unsigned int regno = REGNO (dest);
2164 rtx tmp;
2165
2166 /* If this is a single set and we are doing constant propagation,
2167 see if a REG_NOTE shows this equivalent to a constant. */
2168 if (set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2169 && CONSTANT_P (XEXP (note, 0)))
2170 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2171
2172 /* Only record sets of pseudo-regs in the hash table. */
2173 if (! set_p
2174 && regno >= FIRST_PSEUDO_REGISTER
2175 /* Don't GCSE something if we can't do a reg/reg copy. */
2176 && can_copy_p [GET_MODE (dest)]
2177 /* Is SET_SRC something we want to gcse? */
2178 && want_to_gcse_p (src)
2179 /* Don't CSE a nop. */
2180 && ! set_noop_p (pat)
2181 /* Don't GCSE if it has attached REG_EQUIV note.
2182 At this point this only function parameters should have
2183 REG_EQUIV notes and if the argument slot is used somewhere
2184 explicitely, it means address of parameter has been taken,
2185 so we should not extend the lifetime of the pseudo. */
2186 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2187 || GET_CODE (XEXP (note, 0)) != MEM))
2188 {
2189 /* An expression is not anticipatable if its operands are
2190 modified before this insn or if this is not the only SET in
2191 this insn. */
2192 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2193 /* An expression is not available if its operands are
2194 subsequently modified, including this insn. */
2195 int avail_p = oprs_available_p (src, insn);
2196
2197 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
2198 }
2199
2200 /* Record sets for constant/copy propagation. */
2201 else if (set_p
2202 && regno >= FIRST_PSEUDO_REGISTER
2203 && ((GET_CODE (src) == REG
2204 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2205 && can_copy_p [GET_MODE (dest)]
2206 && REGNO (src) != regno)
2207 || GET_CODE (src) == CONST_INT
2208 || GET_CODE (src) == SYMBOL_REF
2209 || GET_CODE (src) == CONST_DOUBLE)
2210 /* A copy is not available if its src or dest is subsequently
2211 modified. Here we want to search from INSN+1 on, but
2212 oprs_available_p searches from INSN on. */
2213 && (insn == BLOCK_END (BLOCK_NUM (insn))
2214 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2215 && oprs_available_p (pat, tmp))))
2216 insert_set_in_table (pat, insn);
2217 }
2218 }
2219
2220 static void
2221 hash_scan_clobber (x, insn)
2222 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2223 {
2224 /* Currently nothing to do. */
2225 }
2226
2227 static void
2228 hash_scan_call (x, insn)
2229 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2230 {
2231 /* Currently nothing to do. */
2232 }
2233
2234 /* Process INSN and add hash table entries as appropriate.
2235
2236 Only available expressions that set a single pseudo-reg are recorded.
2237
2238 Single sets in a PARALLEL could be handled, but it's an extra complication
2239 that isn't dealt with right now. The trick is handling the CLOBBERs that
2240 are also in the PARALLEL. Later.
2241
2242 If SET_P is non-zero, this is for the assignment hash table,
2243 otherwise it is for the expression hash table.
2244 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2245 not record any expressions. */
2246
2247 static void
2248 hash_scan_insn (insn, set_p, in_libcall_block)
2249 rtx insn;
2250 int set_p;
2251 int in_libcall_block;
2252 {
2253 rtx pat = PATTERN (insn);
2254 int i;
2255
2256 if (in_libcall_block)
2257 return;
2258
2259 /* Pick out the sets of INSN and for other forms of instructions record
2260 what's been modified. */
2261
2262 if (GET_CODE (pat) == SET)
2263 hash_scan_set (pat, insn, set_p);
2264 else if (GET_CODE (pat) == PARALLEL)
2265 for (i = 0; i < XVECLEN (pat, 0); i++)
2266 {
2267 rtx x = XVECEXP (pat, 0, i);
2268
2269 if (GET_CODE (x) == SET)
2270 hash_scan_set (x, insn, set_p);
2271 else if (GET_CODE (x) == CLOBBER)
2272 hash_scan_clobber (x, insn);
2273 else if (GET_CODE (x) == CALL)
2274 hash_scan_call (x, insn);
2275 }
2276
2277 else if (GET_CODE (pat) == CLOBBER)
2278 hash_scan_clobber (pat, insn);
2279 else if (GET_CODE (pat) == CALL)
2280 hash_scan_call (pat, insn);
2281 }
2282
2283 static void
2284 dump_hash_table (file, name, table, table_size, total_size)
2285 FILE *file;
2286 const char *name;
2287 struct expr **table;
2288 int table_size, total_size;
2289 {
2290 int i;
2291 /* Flattened out table, so it's printed in proper order. */
2292 struct expr **flat_table;
2293 unsigned int *hash_val;
2294 struct expr *expr;
2295
2296 flat_table
2297 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
2298 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
2299
2300 for (i = 0; i < table_size; i++)
2301 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
2302 {
2303 flat_table[expr->bitmap_index] = expr;
2304 hash_val[expr->bitmap_index] = i;
2305 }
2306
2307 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2308 name, table_size, total_size);
2309
2310 for (i = 0; i < total_size; i++)
2311 if (flat_table[i] != 0)
2312 {
2313 expr = flat_table[i];
2314 fprintf (file, "Index %d (hash value %d)\n ",
2315 expr->bitmap_index, hash_val[i]);
2316 print_rtl (file, expr->expr);
2317 fprintf (file, "\n");
2318 }
2319
2320 fprintf (file, "\n");
2321
2322 free (flat_table);
2323 free (hash_val);
2324 }
2325
2326 /* Record register first/last/block set information for REGNO in INSN.
2327
2328 reg_first_set records the first place in the block where the register
2329 is set and is used to compute "anticipatability".
2330
2331 reg_last_set records the last place in the block where the register
2332 is set and is used to compute "availability".
2333
2334 reg_set_in_block records whether the register is set in the block
2335 and is used to compute "transparency". */
2336
2337 static void
2338 record_last_reg_set_info (insn, regno)
2339 rtx insn;
2340 int regno;
2341 {
2342 if (reg_first_set[regno] == NEVER_SET)
2343 reg_first_set[regno] = INSN_CUID (insn);
2344
2345 reg_last_set[regno] = INSN_CUID (insn);
2346 SET_BIT (reg_set_in_block[BLOCK_NUM (insn)], regno);
2347 }
2348
2349
2350 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2351 Note we store a pair of elements in the list, so they have to be
2352 taken off pairwise. */
2353
2354 static void
2355 canon_list_insert (dest, unused1, v_insn)
2356 rtx dest ATTRIBUTE_UNUSED;
2357 rtx unused1 ATTRIBUTE_UNUSED;
2358 void * v_insn;
2359 {
2360 rtx dest_addr, insn;
2361
2362 while (GET_CODE (dest) == SUBREG
2363 || GET_CODE (dest) == ZERO_EXTRACT
2364 || GET_CODE (dest) == SIGN_EXTRACT
2365 || GET_CODE (dest) == STRICT_LOW_PART)
2366 dest = XEXP (dest, 0);
2367
2368 /* If DEST is not a MEM, then it will not conflict with a load. Note
2369 that function calls are assumed to clobber memory, but are handled
2370 elsewhere. */
2371
2372 if (GET_CODE (dest) != MEM)
2373 return;
2374
2375 dest_addr = get_addr (XEXP (dest, 0));
2376 dest_addr = canon_rtx (dest_addr);
2377 insn = (rtx) v_insn;
2378
2379 canon_modify_mem_list[BLOCK_NUM (insn)] =
2380 alloc_INSN_LIST (dest_addr, canon_modify_mem_list[BLOCK_NUM (insn)]);
2381 canon_modify_mem_list[BLOCK_NUM (insn)] =
2382 alloc_INSN_LIST (dest, canon_modify_mem_list[BLOCK_NUM (insn)]);
2383 }
2384
2385 /* Record memory modification information for INSN. We do not actually care
2386 about the memory location(s) that are set, or even how they are set (consider
2387 a CALL_INSN). We merely need to record which insns modify memory. */
2388
2389 static void
2390 record_last_mem_set_info (insn)
2391 rtx insn;
2392 {
2393 /* load_killed_in_block_p will handle the case of calls clobbering
2394 everything. */
2395 modify_mem_list[BLOCK_NUM (insn)] =
2396 alloc_INSN_LIST (insn, modify_mem_list[BLOCK_NUM (insn)]);
2397
2398 if (GET_CODE (insn) == CALL_INSN)
2399 {
2400 /* Note that traversals of this loop (other than for free-ing)
2401 will break after encountering a CALL_INSN. So, there's no
2402 need to insert a pair of items, as canon_list_insert does. */
2403 canon_modify_mem_list[BLOCK_NUM (insn)] =
2404 alloc_INSN_LIST (insn, canon_modify_mem_list[BLOCK_NUM (insn)]);
2405 }
2406 else
2407 note_stores (PATTERN (insn), canon_list_insert, (void*)insn );
2408 }
2409
2410 /* Called from compute_hash_table via note_stores to handle one
2411 SET or CLOBBER in an insn. DATA is really the instruction in which
2412 the SET is taking place. */
2413
2414 static void
2415 record_last_set_info (dest, setter, data)
2416 rtx dest, setter ATTRIBUTE_UNUSED;
2417 void *data;
2418 {
2419 rtx last_set_insn = (rtx) data;
2420
2421 if (GET_CODE (dest) == SUBREG)
2422 dest = SUBREG_REG (dest);
2423
2424 if (GET_CODE (dest) == REG)
2425 record_last_reg_set_info (last_set_insn, REGNO (dest));
2426 else if (GET_CODE (dest) == MEM
2427 /* Ignore pushes, they clobber nothing. */
2428 && ! push_operand (dest, GET_MODE (dest)))
2429 record_last_mem_set_info (last_set_insn);
2430 }
2431
2432 /* Top level function to create an expression or assignment hash table.
2433
2434 Expression entries are placed in the hash table if
2435 - they are of the form (set (pseudo-reg) src),
2436 - src is something we want to perform GCSE on,
2437 - none of the operands are subsequently modified in the block
2438
2439 Assignment entries are placed in the hash table if
2440 - they are of the form (set (pseudo-reg) src),
2441 - src is something we want to perform const/copy propagation on,
2442 - none of the operands or target are subsequently modified in the block
2443
2444 Currently src must be a pseudo-reg or a const_int.
2445
2446 F is the first insn.
2447 SET_P is non-zero for computing the assignment hash table. */
2448
2449 static void
2450 compute_hash_table (set_p)
2451 int set_p;
2452 {
2453 int bb;
2454
2455 /* While we compute the hash table we also compute a bit array of which
2456 registers are set in which blocks.
2457 ??? This isn't needed during const/copy propagation, but it's cheap to
2458 compute. Later. */
2459 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
2460
2461 /* re-Cache any INSN_LIST nodes we have allocated. */
2462 {
2463 int i;
2464 for (i = 0; i < n_basic_blocks; i++)
2465 {
2466 if (modify_mem_list[i])
2467 free_INSN_LIST_list (modify_mem_list + i);
2468 if (canon_modify_mem_list[i])
2469 free_INSN_LIST_list (canon_modify_mem_list + i);
2470 }
2471 }
2472 /* Some working arrays used to track first and last set in each block. */
2473 /* ??? One could use alloca here, but at some size a threshold is crossed
2474 beyond which one should use malloc. Are we at that threshold here? */
2475 reg_first_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2476 reg_last_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2477
2478 for (bb = 0; bb < n_basic_blocks; bb++)
2479 {
2480 rtx insn;
2481 unsigned int regno;
2482 int in_libcall_block;
2483 unsigned int i;
2484
2485 /* First pass over the instructions records information used to
2486 determine when registers and memory are first and last set.
2487 ??? hard-reg reg_set_in_block computation
2488 could be moved to compute_sets since they currently don't change. */
2489
2490 for (i = 0; i < max_gcse_regno; i++)
2491 reg_first_set[i] = reg_last_set[i] = NEVER_SET;
2492
2493
2494 for (insn = BLOCK_HEAD (bb);
2495 insn && insn != NEXT_INSN (BLOCK_END (bb));
2496 insn = NEXT_INSN (insn))
2497 {
2498 #ifdef NON_SAVING_SETJMP
2499 if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
2500 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
2501 {
2502 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2503 record_last_reg_set_info (insn, regno);
2504 continue;
2505 }
2506 #endif
2507
2508 if (! INSN_P (insn))
2509 continue;
2510
2511 if (GET_CODE (insn) == CALL_INSN)
2512 {
2513 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2514 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2515 record_last_reg_set_info (insn, regno);
2516
2517 if (! CONST_CALL_P (insn))
2518 record_last_mem_set_info (insn);
2519 }
2520
2521 note_stores (PATTERN (insn), record_last_set_info, insn);
2522 }
2523
2524 /* The next pass builds the hash table. */
2525
2526 for (insn = BLOCK_HEAD (bb), in_libcall_block = 0;
2527 insn && insn != NEXT_INSN (BLOCK_END (bb));
2528 insn = NEXT_INSN (insn))
2529 if (INSN_P (insn))
2530 {
2531 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2532 in_libcall_block = 1;
2533 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
2534 in_libcall_block = 0;
2535 hash_scan_insn (insn, set_p, in_libcall_block);
2536 }
2537 }
2538
2539 free (reg_first_set);
2540 free (reg_last_set);
2541
2542 /* Catch bugs early. */
2543 reg_first_set = reg_last_set = 0;
2544 }
2545
2546 /* Allocate space for the set hash table.
2547 N_INSNS is the number of instructions in the function.
2548 It is used to determine the number of buckets to use. */
2549
2550 static void
2551 alloc_set_hash_table (n_insns)
2552 int n_insns;
2553 {
2554 int n;
2555
2556 set_hash_table_size = n_insns / 4;
2557 if (set_hash_table_size < 11)
2558 set_hash_table_size = 11;
2559
2560 /* Attempt to maintain efficient use of hash table.
2561 Making it an odd number is simplest for now.
2562 ??? Later take some measurements. */
2563 set_hash_table_size |= 1;
2564 n = set_hash_table_size * sizeof (struct expr *);
2565 set_hash_table = (struct expr **) gmalloc (n);
2566 }
2567
2568 /* Free things allocated by alloc_set_hash_table. */
2569
2570 static void
2571 free_set_hash_table ()
2572 {
2573 free (set_hash_table);
2574 }
2575
2576 /* Compute the hash table for doing copy/const propagation. */
2577
2578 static void
2579 compute_set_hash_table ()
2580 {
2581 /* Initialize count of number of entries in hash table. */
2582 n_sets = 0;
2583 memset ((char *) set_hash_table, 0,
2584 set_hash_table_size * sizeof (struct expr *));
2585
2586 compute_hash_table (1);
2587 }
2588
2589 /* Allocate space for the expression hash table.
2590 N_INSNS is the number of instructions in the function.
2591 It is used to determine the number of buckets to use. */
2592
2593 static void
2594 alloc_expr_hash_table (n_insns)
2595 unsigned int n_insns;
2596 {
2597 int n;
2598
2599 expr_hash_table_size = n_insns / 2;
2600 /* Make sure the amount is usable. */
2601 if (expr_hash_table_size < 11)
2602 expr_hash_table_size = 11;
2603
2604 /* Attempt to maintain efficient use of hash table.
2605 Making it an odd number is simplest for now.
2606 ??? Later take some measurements. */
2607 expr_hash_table_size |= 1;
2608 n = expr_hash_table_size * sizeof (struct expr *);
2609 expr_hash_table = (struct expr **) gmalloc (n);
2610 }
2611
2612 /* Free things allocated by alloc_expr_hash_table. */
2613
2614 static void
2615 free_expr_hash_table ()
2616 {
2617 free (expr_hash_table);
2618 }
2619
2620 /* Compute the hash table for doing GCSE. */
2621
2622 static void
2623 compute_expr_hash_table ()
2624 {
2625 /* Initialize count of number of entries in hash table. */
2626 n_exprs = 0;
2627 memset ((char *) expr_hash_table, 0,
2628 expr_hash_table_size * sizeof (struct expr *));
2629
2630 compute_hash_table (0);
2631 }
2632 \f
2633 /* Expression tracking support. */
2634
2635 /* Lookup pattern PAT in the expression table.
2636 The result is a pointer to the table entry, or NULL if not found. */
2637
2638 static struct expr *
2639 lookup_expr (pat)
2640 rtx pat;
2641 {
2642 int do_not_record_p;
2643 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2644 expr_hash_table_size);
2645 struct expr *expr;
2646
2647 if (do_not_record_p)
2648 return NULL;
2649
2650 expr = expr_hash_table[hash];
2651
2652 while (expr && ! expr_equiv_p (expr->expr, pat))
2653 expr = expr->next_same_hash;
2654
2655 return expr;
2656 }
2657
2658 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2659 matches it, otherwise return the first entry for REGNO. The result is a
2660 pointer to the table entry, or NULL if not found. */
2661
2662 static struct expr *
2663 lookup_set (regno, pat)
2664 unsigned int regno;
2665 rtx pat;
2666 {
2667 unsigned int hash = hash_set (regno, set_hash_table_size);
2668 struct expr *expr;
2669
2670 expr = set_hash_table[hash];
2671
2672 if (pat)
2673 {
2674 while (expr && ! expr_equiv_p (expr->expr, pat))
2675 expr = expr->next_same_hash;
2676 }
2677 else
2678 {
2679 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2680 expr = expr->next_same_hash;
2681 }
2682
2683 return expr;
2684 }
2685
2686 /* Return the next entry for REGNO in list EXPR. */
2687
2688 static struct expr *
2689 next_set (regno, expr)
2690 unsigned int regno;
2691 struct expr *expr;
2692 {
2693 do
2694 expr = expr->next_same_hash;
2695 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2696
2697 return expr;
2698 }
2699
2700 /* Reset tables used to keep track of what's still available [since the
2701 start of the block]. */
2702
2703 static void
2704 reset_opr_set_tables ()
2705 {
2706 /* Maintain a bitmap of which regs have been set since beginning of
2707 the block. */
2708 sbitmap_zero (reg_set_bitmap);
2709
2710 /* Also keep a record of the last instruction to modify memory.
2711 For now this is very trivial, we only record whether any memory
2712 location has been modified. */
2713 {
2714 int i;
2715
2716 /* re-Cache any INSN_LIST nodes we have allocated. */
2717 for (i = 0; i < n_basic_blocks; i++)
2718 {
2719 if (modify_mem_list[i])
2720 free_INSN_LIST_list (modify_mem_list + i);
2721 if (canon_modify_mem_list[i])
2722 free_INSN_LIST_list (canon_modify_mem_list + i);
2723 }
2724 }
2725 }
2726
2727 /* Return non-zero if the operands of X are not set before INSN in
2728 INSN's basic block. */
2729
2730 static int
2731 oprs_not_set_p (x, insn)
2732 rtx x, insn;
2733 {
2734 int i, j;
2735 enum rtx_code code;
2736 const char *fmt;
2737
2738 if (x == 0)
2739 return 1;
2740
2741 code = GET_CODE (x);
2742 switch (code)
2743 {
2744 case PC:
2745 case CC0:
2746 case CONST:
2747 case CONST_INT:
2748 case CONST_DOUBLE:
2749 case SYMBOL_REF:
2750 case LABEL_REF:
2751 case ADDR_VEC:
2752 case ADDR_DIFF_VEC:
2753 return 1;
2754
2755 case MEM:
2756 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2757 INSN_CUID (insn), x, 0))
2758 return 0;
2759 else
2760 return oprs_not_set_p (XEXP (x, 0), insn);
2761
2762 case REG:
2763 return ! TEST_BIT (reg_set_bitmap, REGNO (x));
2764
2765 default:
2766 break;
2767 }
2768
2769 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2770 {
2771 if (fmt[i] == 'e')
2772 {
2773 /* If we are about to do the last recursive call
2774 needed at this level, change it into iteration.
2775 This function is called enough to be worth it. */
2776 if (i == 0)
2777 return oprs_not_set_p (XEXP (x, i), insn);
2778
2779 if (! oprs_not_set_p (XEXP (x, i), insn))
2780 return 0;
2781 }
2782 else if (fmt[i] == 'E')
2783 for (j = 0; j < XVECLEN (x, i); j++)
2784 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2785 return 0;
2786 }
2787
2788 return 1;
2789 }
2790
2791 /* Mark things set by a CALL. */
2792
2793 static void
2794 mark_call (insn)
2795 rtx insn;
2796 {
2797 if (! CONST_CALL_P (insn))
2798 record_last_mem_set_info (insn);
2799 }
2800
2801 /* Mark things set by a SET. */
2802
2803 static void
2804 mark_set (pat, insn)
2805 rtx pat, insn;
2806 {
2807 rtx dest = SET_DEST (pat);
2808
2809 while (GET_CODE (dest) == SUBREG
2810 || GET_CODE (dest) == ZERO_EXTRACT
2811 || GET_CODE (dest) == SIGN_EXTRACT
2812 || GET_CODE (dest) == STRICT_LOW_PART)
2813 dest = XEXP (dest, 0);
2814
2815 if (GET_CODE (dest) == REG)
2816 SET_BIT (reg_set_bitmap, REGNO (dest));
2817 else if (GET_CODE (dest) == MEM)
2818 record_last_mem_set_info (insn);
2819
2820 if (GET_CODE (SET_SRC (pat)) == CALL)
2821 mark_call (insn);
2822 }
2823
2824 /* Record things set by a CLOBBER. */
2825
2826 static void
2827 mark_clobber (pat, insn)
2828 rtx pat, insn;
2829 {
2830 rtx clob = XEXP (pat, 0);
2831
2832 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2833 clob = XEXP (clob, 0);
2834
2835 if (GET_CODE (clob) == REG)
2836 SET_BIT (reg_set_bitmap, REGNO (clob));
2837 else
2838 record_last_mem_set_info (insn);
2839 }
2840
2841 /* Record things set by INSN.
2842 This data is used by oprs_not_set_p. */
2843
2844 static void
2845 mark_oprs_set (insn)
2846 rtx insn;
2847 {
2848 rtx pat = PATTERN (insn);
2849 int i;
2850
2851 if (GET_CODE (pat) == SET)
2852 mark_set (pat, insn);
2853 else if (GET_CODE (pat) == PARALLEL)
2854 for (i = 0; i < XVECLEN (pat, 0); i++)
2855 {
2856 rtx x = XVECEXP (pat, 0, i);
2857
2858 if (GET_CODE (x) == SET)
2859 mark_set (x, insn);
2860 else if (GET_CODE (x) == CLOBBER)
2861 mark_clobber (x, insn);
2862 else if (GET_CODE (x) == CALL)
2863 mark_call (insn);
2864 }
2865
2866 else if (GET_CODE (pat) == CLOBBER)
2867 mark_clobber (pat, insn);
2868 else if (GET_CODE (pat) == CALL)
2869 mark_call (insn);
2870 }
2871
2872 \f
2873 /* Classic GCSE reaching definition support. */
2874
2875 /* Allocate reaching def variables. */
2876
2877 static void
2878 alloc_rd_mem (n_blocks, n_insns)
2879 int n_blocks, n_insns;
2880 {
2881 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2882 sbitmap_vector_zero (rd_kill, n_basic_blocks);
2883
2884 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2885 sbitmap_vector_zero (rd_gen, n_basic_blocks);
2886
2887 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2888 sbitmap_vector_zero (reaching_defs, n_basic_blocks);
2889
2890 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2891 sbitmap_vector_zero (rd_out, n_basic_blocks);
2892 }
2893
2894 /* Free reaching def variables. */
2895
2896 static void
2897 free_rd_mem ()
2898 {
2899 sbitmap_vector_free (rd_kill);
2900 sbitmap_vector_free (rd_gen);
2901 sbitmap_vector_free (reaching_defs);
2902 sbitmap_vector_free (rd_out);
2903 }
2904
2905 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2906
2907 static void
2908 handle_rd_kill_set (insn, regno, bb)
2909 rtx insn;
2910 int regno;
2911 basic_block bb;
2912 {
2913 struct reg_set *this_reg;
2914
2915 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2916 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2917 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2918 }
2919
2920 /* Compute the set of kill's for reaching definitions. */
2921
2922 static void
2923 compute_kill_rd ()
2924 {
2925 int bb, cuid;
2926 unsigned int regno;
2927 int i;
2928
2929 /* For each block
2930 For each set bit in `gen' of the block (i.e each insn which
2931 generates a definition in the block)
2932 Call the reg set by the insn corresponding to that bit regx
2933 Look at the linked list starting at reg_set_table[regx]
2934 For each setting of regx in the linked list, which is not in
2935 this block
2936 Set the bit in `kill' corresponding to that insn. */
2937 for (bb = 0; bb < n_basic_blocks; bb++)
2938 for (cuid = 0; cuid < max_cuid; cuid++)
2939 if (TEST_BIT (rd_gen[bb], cuid))
2940 {
2941 rtx insn = CUID_INSN (cuid);
2942 rtx pat = PATTERN (insn);
2943
2944 if (GET_CODE (insn) == CALL_INSN)
2945 {
2946 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2947 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2948 handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
2949 }
2950
2951 if (GET_CODE (pat) == PARALLEL)
2952 {
2953 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2954 {
2955 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2956
2957 if ((code == SET || code == CLOBBER)
2958 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2959 handle_rd_kill_set (insn,
2960 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2961 BASIC_BLOCK (bb));
2962 }
2963 }
2964 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2965 /* Each setting of this register outside of this block
2966 must be marked in the set of kills in this block. */
2967 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
2968 }
2969 }
2970
2971 /* Compute the reaching definitions as in
2972 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2973 Chapter 10. It is the same algorithm as used for computing available
2974 expressions but applied to the gens and kills of reaching definitions. */
2975
2976 static void
2977 compute_rd ()
2978 {
2979 int bb, changed, passes;
2980
2981 for (bb = 0; bb < n_basic_blocks; bb++)
2982 sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
2983
2984 passes = 0;
2985 changed = 1;
2986 while (changed)
2987 {
2988 changed = 0;
2989 for (bb = 0; bb < n_basic_blocks; bb++)
2990 {
2991 sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
2992 changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
2993 reaching_defs[bb], rd_kill[bb]);
2994 }
2995 passes++;
2996 }
2997
2998 if (gcse_file)
2999 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3000 }
3001 \f
3002 /* Classic GCSE available expression support. */
3003
3004 /* Allocate memory for available expression computation. */
3005
3006 static void
3007 alloc_avail_expr_mem (n_blocks, n_exprs)
3008 int n_blocks, n_exprs;
3009 {
3010 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3011 sbitmap_vector_zero (ae_kill, n_basic_blocks);
3012
3013 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3014 sbitmap_vector_zero (ae_gen, n_basic_blocks);
3015
3016 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3017 sbitmap_vector_zero (ae_in, n_basic_blocks);
3018
3019 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3020 sbitmap_vector_zero (ae_out, n_basic_blocks);
3021 }
3022
3023 static void
3024 free_avail_expr_mem ()
3025 {
3026 sbitmap_vector_free (ae_kill);
3027 sbitmap_vector_free (ae_gen);
3028 sbitmap_vector_free (ae_in);
3029 sbitmap_vector_free (ae_out);
3030 }
3031
3032 /* Compute the set of available expressions generated in each basic block. */
3033
3034 static void
3035 compute_ae_gen ()
3036 {
3037 unsigned int i;
3038 struct expr *expr;
3039 struct occr *occr;
3040
3041 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3042 This is all we have to do because an expression is not recorded if it
3043 is not available, and the only expressions we want to work with are the
3044 ones that are recorded. */
3045 for (i = 0; i < expr_hash_table_size; i++)
3046 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
3047 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3048 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3049 }
3050
3051 /* Return non-zero if expression X is killed in BB. */
3052
3053 static int
3054 expr_killed_p (x, bb)
3055 rtx x;
3056 basic_block bb;
3057 {
3058 int i, j;
3059 enum rtx_code code;
3060 const char *fmt;
3061
3062 if (x == 0)
3063 return 1;
3064
3065 code = GET_CODE (x);
3066 switch (code)
3067 {
3068 case REG:
3069 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3070
3071 case MEM:
3072 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3073 return 1;
3074 else
3075 return expr_killed_p (XEXP (x, 0), bb);
3076
3077 case PC:
3078 case CC0: /*FIXME*/
3079 case CONST:
3080 case CONST_INT:
3081 case CONST_DOUBLE:
3082 case SYMBOL_REF:
3083 case LABEL_REF:
3084 case ADDR_VEC:
3085 case ADDR_DIFF_VEC:
3086 return 0;
3087
3088 default:
3089 break;
3090 }
3091
3092 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3093 {
3094 if (fmt[i] == 'e')
3095 {
3096 /* If we are about to do the last recursive call
3097 needed at this level, change it into iteration.
3098 This function is called enough to be worth it. */
3099 if (i == 0)
3100 return expr_killed_p (XEXP (x, i), bb);
3101 else if (expr_killed_p (XEXP (x, i), bb))
3102 return 1;
3103 }
3104 else if (fmt[i] == 'E')
3105 for (j = 0; j < XVECLEN (x, i); j++)
3106 if (expr_killed_p (XVECEXP (x, i, j), bb))
3107 return 1;
3108 }
3109
3110 return 0;
3111 }
3112
3113 /* Compute the set of available expressions killed in each basic block. */
3114
3115 static void
3116 compute_ae_kill (ae_gen, ae_kill)
3117 sbitmap *ae_gen, *ae_kill;
3118 {
3119 int bb;
3120 unsigned int i;
3121 struct expr *expr;
3122
3123 for (bb = 0; bb < n_basic_blocks; bb++)
3124 for (i = 0; i < expr_hash_table_size; i++)
3125 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
3126 {
3127 /* Skip EXPR if generated in this block. */
3128 if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
3129 continue;
3130
3131 if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
3132 SET_BIT (ae_kill[bb], expr->bitmap_index);
3133 }
3134 }
3135 \f
3136 /* Actually perform the Classic GCSE optimizations. */
3137
3138 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3139
3140 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3141 as a positive reach. We want to do this when there are two computations
3142 of the expression in the block.
3143
3144 VISITED is a pointer to a working buffer for tracking which BB's have
3145 been visited. It is NULL for the top-level call.
3146
3147 We treat reaching expressions that go through blocks containing the same
3148 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3149 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3150 2 as not reaching. The intent is to improve the probability of finding
3151 only one reaching expression and to reduce register lifetimes by picking
3152 the closest such expression. */
3153
3154 static int
3155 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3156 struct occr *occr;
3157 struct expr *expr;
3158 basic_block bb;
3159 int check_self_loop;
3160 char *visited;
3161 {
3162 edge pred;
3163
3164 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3165 {
3166 basic_block pred_bb = pred->src;
3167
3168 if (visited[pred_bb->index])
3169 /* This predecessor has already been visited. Nothing to do. */
3170 ;
3171 else if (pred_bb == bb)
3172 {
3173 /* BB loops on itself. */
3174 if (check_self_loop
3175 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3176 && BLOCK_NUM (occr->insn) == pred_bb->index)
3177 return 1;
3178
3179 visited[pred_bb->index] = 1;
3180 }
3181
3182 /* Ignore this predecessor if it kills the expression. */
3183 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3184 visited[pred_bb->index] = 1;
3185
3186 /* Does this predecessor generate this expression? */
3187 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3188 {
3189 /* Is this the occurrence we're looking for?
3190 Note that there's only one generating occurrence per block
3191 so we just need to check the block number. */
3192 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3193 return 1;
3194
3195 visited[pred_bb->index] = 1;
3196 }
3197
3198 /* Neither gen nor kill. */
3199 else
3200 {
3201 visited[pred_bb->index] = 1;
3202 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3203 visited))
3204
3205 return 1;
3206 }
3207 }
3208
3209 /* All paths have been checked. */
3210 return 0;
3211 }
3212
3213 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3214 memory allocated for that function is returned. */
3215
3216 static int
3217 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3218 struct occr *occr;
3219 struct expr *expr;
3220 basic_block bb;
3221 int check_self_loop;
3222 {
3223 int rval;
3224 char *visited = (char *) xcalloc (n_basic_blocks, 1);
3225
3226 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3227
3228 free (visited);
3229 return rval;
3230 }
3231
3232 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3233 If there is more than one such instruction, return NULL.
3234
3235 Called only by handle_avail_expr. */
3236
3237 static rtx
3238 computing_insn (expr, insn)
3239 struct expr *expr;
3240 rtx insn;
3241 {
3242 basic_block bb = BLOCK_FOR_INSN (insn);
3243
3244 if (expr->avail_occr->next == NULL)
3245 {
3246 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3247 /* The available expression is actually itself
3248 (i.e. a loop in the flow graph) so do nothing. */
3249 return NULL;
3250
3251 /* (FIXME) Case that we found a pattern that was created by
3252 a substitution that took place. */
3253 return expr->avail_occr->insn;
3254 }
3255 else
3256 {
3257 /* Pattern is computed more than once.
3258 Search backwards from this insn to see how many of these
3259 computations actually reach this insn. */
3260 struct occr *occr;
3261 rtx insn_computes_expr = NULL;
3262 int can_reach = 0;
3263
3264 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3265 {
3266 if (BLOCK_FOR_INSN (occr->insn) == bb)
3267 {
3268 /* The expression is generated in this block.
3269 The only time we care about this is when the expression
3270 is generated later in the block [and thus there's a loop].
3271 We let the normal cse pass handle the other cases. */
3272 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3273 && expr_reaches_here_p (occr, expr, bb, 1))
3274 {
3275 can_reach++;
3276 if (can_reach > 1)
3277 return NULL;
3278
3279 insn_computes_expr = occr->insn;
3280 }
3281 }
3282 else if (expr_reaches_here_p (occr, expr, bb, 0))
3283 {
3284 can_reach++;
3285 if (can_reach > 1)
3286 return NULL;
3287
3288 insn_computes_expr = occr->insn;
3289 }
3290 }
3291
3292 if (insn_computes_expr == NULL)
3293 abort ();
3294
3295 return insn_computes_expr;
3296 }
3297 }
3298
3299 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3300 Only called by can_disregard_other_sets. */
3301
3302 static int
3303 def_reaches_here_p (insn, def_insn)
3304 rtx insn, def_insn;
3305 {
3306 rtx reg;
3307
3308 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3309 return 1;
3310
3311 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3312 {
3313 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3314 {
3315 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3316 return 1;
3317 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3318 reg = XEXP (PATTERN (def_insn), 0);
3319 else if (GET_CODE (PATTERN (def_insn)) == SET)
3320 reg = SET_DEST (PATTERN (def_insn));
3321 else
3322 abort ();
3323
3324 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3325 }
3326 else
3327 return 0;
3328 }
3329
3330 return 0;
3331 }
3332
3333 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3334 value returned is the number of definitions that reach INSN. Returning a
3335 value of zero means that [maybe] more than one definition reaches INSN and
3336 the caller can't perform whatever optimization it is trying. i.e. it is
3337 always safe to return zero. */
3338
3339 static int
3340 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3341 struct reg_set **addr_this_reg;
3342 rtx insn;
3343 int for_combine;
3344 {
3345 int number_of_reaching_defs = 0;
3346 struct reg_set *this_reg;
3347
3348 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3349 if (def_reaches_here_p (insn, this_reg->insn))
3350 {
3351 number_of_reaching_defs++;
3352 /* Ignore parallels for now. */
3353 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3354 return 0;
3355
3356 if (!for_combine
3357 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3358 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3359 SET_SRC (PATTERN (insn)))))
3360 /* A setting of the reg to a different value reaches INSN. */
3361 return 0;
3362
3363 if (number_of_reaching_defs > 1)
3364 {
3365 /* If in this setting the value the register is being set to is
3366 equal to the previous value the register was set to and this
3367 setting reaches the insn we are trying to do the substitution
3368 on then we are ok. */
3369 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3370 return 0;
3371 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3372 SET_SRC (PATTERN (insn))))
3373 return 0;
3374 }
3375
3376 *addr_this_reg = this_reg;
3377 }
3378
3379 return number_of_reaching_defs;
3380 }
3381
3382 /* Expression computed by insn is available and the substitution is legal,
3383 so try to perform the substitution.
3384
3385 The result is non-zero if any changes were made. */
3386
3387 static int
3388 handle_avail_expr (insn, expr)
3389 rtx insn;
3390 struct expr *expr;
3391 {
3392 rtx pat, insn_computes_expr, expr_set;
3393 rtx to;
3394 struct reg_set *this_reg;
3395 int found_setting, use_src;
3396 int changed = 0;
3397
3398 /* We only handle the case where one computation of the expression
3399 reaches this instruction. */
3400 insn_computes_expr = computing_insn (expr, insn);
3401 if (insn_computes_expr == NULL)
3402 return 0;
3403 expr_set = single_set (insn_computes_expr);
3404 if (!expr_set)
3405 abort ();
3406
3407 found_setting = 0;
3408 use_src = 0;
3409
3410 /* At this point we know only one computation of EXPR outside of this
3411 block reaches this insn. Now try to find a register that the
3412 expression is computed into. */
3413 if (GET_CODE (SET_SRC (expr_set)) == REG)
3414 {
3415 /* This is the case when the available expression that reaches
3416 here has already been handled as an available expression. */
3417 unsigned int regnum_for_replacing
3418 = REGNO (SET_SRC (expr_set));
3419
3420 /* If the register was created by GCSE we can't use `reg_set_table',
3421 however we know it's set only once. */
3422 if (regnum_for_replacing >= max_gcse_regno
3423 /* If the register the expression is computed into is set only once,
3424 or only one set reaches this insn, we can use it. */
3425 || (((this_reg = reg_set_table[regnum_for_replacing]),
3426 this_reg->next == NULL)
3427 || can_disregard_other_sets (&this_reg, insn, 0)))
3428 {
3429 use_src = 1;
3430 found_setting = 1;
3431 }
3432 }
3433
3434 if (!found_setting)
3435 {
3436 unsigned int regnum_for_replacing
3437 = REGNO (SET_DEST (expr_set));
3438
3439 /* This shouldn't happen. */
3440 if (regnum_for_replacing >= max_gcse_regno)
3441 abort ();
3442
3443 this_reg = reg_set_table[regnum_for_replacing];
3444
3445 /* If the register the expression is computed into is set only once,
3446 or only one set reaches this insn, use it. */
3447 if (this_reg->next == NULL
3448 || can_disregard_other_sets (&this_reg, insn, 0))
3449 found_setting = 1;
3450 }
3451
3452 if (found_setting)
3453 {
3454 pat = PATTERN (insn);
3455 if (use_src)
3456 to = SET_SRC (expr_set);
3457 else
3458 to = SET_DEST (expr_set);
3459 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3460
3461 /* We should be able to ignore the return code from validate_change but
3462 to play it safe we check. */
3463 if (changed)
3464 {
3465 gcse_subst_count++;
3466 if (gcse_file != NULL)
3467 {
3468 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3469 INSN_UID (insn));
3470 fprintf (gcse_file, " reg %d %s insn %d\n",
3471 REGNO (to), use_src ? "from" : "set in",
3472 INSN_UID (insn_computes_expr));
3473 }
3474 }
3475 }
3476
3477 /* The register that the expr is computed into is set more than once. */
3478 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3479 {
3480 /* Insert an insn after insnx that copies the reg set in insnx
3481 into a new pseudo register call this new register REGN.
3482 From insnb until end of basic block or until REGB is set
3483 replace all uses of REGB with REGN. */
3484 rtx new_insn;
3485
3486 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3487
3488 /* Generate the new insn. */
3489 /* ??? If the change fails, we return 0, even though we created
3490 an insn. I think this is ok. */
3491 new_insn
3492 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3493 SET_DEST (expr_set)),
3494 insn_computes_expr);
3495
3496 /* Keep block number table up to date. */
3497 set_block_for_new_insns (new_insn, BLOCK_FOR_INSN (insn_computes_expr));
3498
3499 /* Keep register set table up to date. */
3500 record_one_set (REGNO (to), new_insn);
3501
3502 gcse_create_count++;
3503 if (gcse_file != NULL)
3504 {
3505 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3506 INSN_UID (NEXT_INSN (insn_computes_expr)),
3507 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3508 fprintf (gcse_file, ", computed in insn %d,\n",
3509 INSN_UID (insn_computes_expr));
3510 fprintf (gcse_file, " into newly allocated reg %d\n",
3511 REGNO (to));
3512 }
3513
3514 pat = PATTERN (insn);
3515
3516 /* Do register replacement for INSN. */
3517 changed = validate_change (insn, &SET_SRC (pat),
3518 SET_DEST (PATTERN
3519 (NEXT_INSN (insn_computes_expr))),
3520 0);
3521
3522 /* We should be able to ignore the return code from validate_change but
3523 to play it safe we check. */
3524 if (changed)
3525 {
3526 gcse_subst_count++;
3527 if (gcse_file != NULL)
3528 {
3529 fprintf (gcse_file,
3530 "GCSE: Replacing the source in insn %d with reg %d ",
3531 INSN_UID (insn),
3532 REGNO (SET_DEST (PATTERN (NEXT_INSN
3533 (insn_computes_expr)))));
3534 fprintf (gcse_file, "set in insn %d\n",
3535 INSN_UID (insn_computes_expr));
3536 }
3537 }
3538 }
3539
3540 return changed;
3541 }
3542
3543 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3544 the dataflow analysis has been done.
3545
3546 The result is non-zero if a change was made. */
3547
3548 static int
3549 classic_gcse ()
3550 {
3551 int bb, changed;
3552 rtx insn;
3553
3554 /* Note we start at block 1. */
3555
3556 changed = 0;
3557 for (bb = 1; bb < n_basic_blocks; bb++)
3558 {
3559 /* Reset tables used to keep track of what's still valid [since the
3560 start of the block]. */
3561 reset_opr_set_tables ();
3562
3563 for (insn = BLOCK_HEAD (bb);
3564 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3565 insn = NEXT_INSN (insn))
3566 {
3567 /* Is insn of form (set (pseudo-reg) ...)? */
3568 if (GET_CODE (insn) == INSN
3569 && GET_CODE (PATTERN (insn)) == SET
3570 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3571 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3572 {
3573 rtx pat = PATTERN (insn);
3574 rtx src = SET_SRC (pat);
3575 struct expr *expr;
3576
3577 if (want_to_gcse_p (src)
3578 /* Is the expression recorded? */
3579 && ((expr = lookup_expr (src)) != NULL)
3580 /* Is the expression available [at the start of the
3581 block]? */
3582 && TEST_BIT (ae_in[bb], expr->bitmap_index)
3583 /* Are the operands unchanged since the start of the
3584 block? */
3585 && oprs_not_set_p (src, insn))
3586 changed |= handle_avail_expr (insn, expr);
3587 }
3588
3589 /* Keep track of everything modified by this insn. */
3590 /* ??? Need to be careful w.r.t. mods done to INSN. */
3591 if (INSN_P (insn))
3592 mark_oprs_set (insn);
3593 }
3594 }
3595
3596 return changed;
3597 }
3598
3599 /* Top level routine to perform one classic GCSE pass.
3600
3601 Return non-zero if a change was made. */
3602
3603 static int
3604 one_classic_gcse_pass (pass)
3605 int pass;
3606 {
3607 int changed = 0;
3608
3609 gcse_subst_count = 0;
3610 gcse_create_count = 0;
3611
3612 alloc_expr_hash_table (max_cuid);
3613 alloc_rd_mem (n_basic_blocks, max_cuid);
3614 compute_expr_hash_table ();
3615 if (gcse_file)
3616 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3617 expr_hash_table_size, n_exprs);
3618
3619 if (n_exprs > 0)
3620 {
3621 compute_kill_rd ();
3622 compute_rd ();
3623 alloc_avail_expr_mem (n_basic_blocks, n_exprs);
3624 compute_ae_gen ();
3625 compute_ae_kill (ae_gen, ae_kill);
3626 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3627 changed = classic_gcse ();
3628 free_avail_expr_mem ();
3629 }
3630
3631 free_rd_mem ();
3632 free_expr_hash_table ();
3633
3634 if (gcse_file)
3635 {
3636 fprintf (gcse_file, "\n");
3637 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3638 current_function_name, pass, bytes_used, gcse_subst_count);
3639 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3640 }
3641
3642 return changed;
3643 }
3644 \f
3645 /* Compute copy/constant propagation working variables. */
3646
3647 /* Local properties of assignments. */
3648 static sbitmap *cprop_pavloc;
3649 static sbitmap *cprop_absaltered;
3650
3651 /* Global properties of assignments (computed from the local properties). */
3652 static sbitmap *cprop_avin;
3653 static sbitmap *cprop_avout;
3654
3655 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3656 basic blocks. N_SETS is the number of sets. */
3657
3658 static void
3659 alloc_cprop_mem (n_blocks, n_sets)
3660 int n_blocks, n_sets;
3661 {
3662 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3663 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3664
3665 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3666 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3667 }
3668
3669 /* Free vars used by copy/const propagation. */
3670
3671 static void
3672 free_cprop_mem ()
3673 {
3674 sbitmap_vector_free (cprop_pavloc);
3675 sbitmap_vector_free (cprop_absaltered);
3676 sbitmap_vector_free (cprop_avin);
3677 sbitmap_vector_free (cprop_avout);
3678 }
3679
3680 /* For each block, compute whether X is transparent. X is either an
3681 expression or an assignment [though we don't care which, for this context
3682 an assignment is treated as an expression]. For each block where an
3683 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3684 bit in BMAP. */
3685
3686 static void
3687 compute_transp (x, indx, bmap, set_p)
3688 rtx x;
3689 int indx;
3690 sbitmap *bmap;
3691 int set_p;
3692 {
3693 int bb, i, j;
3694 enum rtx_code code;
3695 reg_set *r;
3696 const char *fmt;
3697
3698 /* repeat is used to turn tail-recursion into iteration since GCC
3699 can't do it when there's no return value. */
3700 repeat:
3701
3702 if (x == 0)
3703 return;
3704
3705 code = GET_CODE (x);
3706 switch (code)
3707 {
3708 case REG:
3709 if (set_p)
3710 {
3711 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3712 {
3713 for (bb = 0; bb < n_basic_blocks; bb++)
3714 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3715 SET_BIT (bmap[bb], indx);
3716 }
3717 else
3718 {
3719 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3720 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3721 }
3722 }
3723 else
3724 {
3725 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3726 {
3727 for (bb = 0; bb < n_basic_blocks; bb++)
3728 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3729 RESET_BIT (bmap[bb], indx);
3730 }
3731 else
3732 {
3733 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3734 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3735 }
3736 }
3737
3738 return;
3739
3740 case MEM:
3741 for (bb = 0; bb < n_basic_blocks; bb++)
3742 {
3743 rtx list_entry = canon_modify_mem_list[bb];
3744
3745 while (list_entry)
3746 {
3747 rtx dest, dest_addr;
3748
3749 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3750 {
3751 if (set_p)
3752 SET_BIT (bmap[bb], indx);
3753 else
3754 RESET_BIT (bmap[bb], indx);
3755 break;
3756 }
3757 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3758 Examine each hunk of memory that is modified. */
3759
3760 dest = XEXP (list_entry, 0);
3761 list_entry = XEXP (list_entry, 1);
3762 dest_addr = XEXP (list_entry, 0);
3763
3764 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3765 x, rtx_addr_varies_p))
3766 {
3767 if (set_p)
3768 SET_BIT (bmap[bb], indx);
3769 else
3770 RESET_BIT (bmap[bb], indx);
3771 break;
3772 }
3773 list_entry = XEXP (list_entry, 1);
3774 }
3775 }
3776
3777 x = XEXP (x, 0);
3778 goto repeat;
3779
3780 case PC:
3781 case CC0: /*FIXME*/
3782 case CONST:
3783 case CONST_INT:
3784 case CONST_DOUBLE:
3785 case SYMBOL_REF:
3786 case LABEL_REF:
3787 case ADDR_VEC:
3788 case ADDR_DIFF_VEC:
3789 return;
3790
3791 default:
3792 break;
3793 }
3794
3795 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3796 {
3797 if (fmt[i] == 'e')
3798 {
3799 /* If we are about to do the last recursive call
3800 needed at this level, change it into iteration.
3801 This function is called enough to be worth it. */
3802 if (i == 0)
3803 {
3804 x = XEXP (x, i);
3805 goto repeat;
3806 }
3807
3808 compute_transp (XEXP (x, i), indx, bmap, set_p);
3809 }
3810 else if (fmt[i] == 'E')
3811 for (j = 0; j < XVECLEN (x, i); j++)
3812 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3813 }
3814 }
3815
3816 /* Top level routine to do the dataflow analysis needed by copy/const
3817 propagation. */
3818
3819 static void
3820 compute_cprop_data ()
3821 {
3822 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3823 compute_available (cprop_pavloc, cprop_absaltered,
3824 cprop_avout, cprop_avin);
3825 }
3826 \f
3827 /* Copy/constant propagation. */
3828
3829 /* Maximum number of register uses in an insn that we handle. */
3830 #define MAX_USES 8
3831
3832 /* Table of uses found in an insn.
3833 Allocated statically to avoid alloc/free complexity and overhead. */
3834 static struct reg_use reg_use_table[MAX_USES];
3835
3836 /* Index into `reg_use_table' while building it. */
3837 static int reg_use_count;
3838
3839 /* Set up a list of register numbers used in INSN. The found uses are stored
3840 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3841 and contains the number of uses in the table upon exit.
3842
3843 ??? If a register appears multiple times we will record it multiple times.
3844 This doesn't hurt anything but it will slow things down. */
3845
3846 static void
3847 find_used_regs (xptr, data)
3848 rtx *xptr;
3849 void *data ATTRIBUTE_UNUSED;
3850 {
3851 int i, j;
3852 enum rtx_code code;
3853 const char *fmt;
3854 rtx x = *xptr;
3855
3856 /* repeat is used to turn tail-recursion into iteration since GCC
3857 can't do it when there's no return value. */
3858 repeat:
3859 if (x == 0)
3860 return;
3861
3862 code = GET_CODE (x);
3863 if (REG_P (x))
3864 {
3865 if (reg_use_count == MAX_USES)
3866 return;
3867
3868 reg_use_table[reg_use_count].reg_rtx = x;
3869 reg_use_count++;
3870 }
3871
3872 /* Recursively scan the operands of this expression. */
3873
3874 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3875 {
3876 if (fmt[i] == 'e')
3877 {
3878 /* If we are about to do the last recursive call
3879 needed at this level, change it into iteration.
3880 This function is called enough to be worth it. */
3881 if (i == 0)
3882 {
3883 x = XEXP (x, 0);
3884 goto repeat;
3885 }
3886
3887 find_used_regs (&XEXP (x, i), data);
3888 }
3889 else if (fmt[i] == 'E')
3890 for (j = 0; j < XVECLEN (x, i); j++)
3891 find_used_regs (&XVECEXP (x, i, j), data);
3892 }
3893 }
3894
3895 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3896 Returns non-zero is successful. */
3897
3898 static int
3899 try_replace_reg (from, to, insn)
3900 rtx from, to, insn;
3901 {
3902 rtx note = find_reg_equal_equiv_note (insn);
3903 rtx src = 0;
3904 int success = 0;
3905 rtx set = single_set (insn);
3906
3907 success = validate_replace_src (from, to, insn);
3908
3909 /* If above failed and this is a single set, try to simplify the source of
3910 the set given our substitution. We could perhaps try this for multiple
3911 SETs, but it probably won't buy us anything. */
3912 if (!success && set != 0)
3913 {
3914 src = simplify_replace_rtx (SET_SRC (set), from, to);
3915
3916 if (!rtx_equal_p (src, SET_SRC (set))
3917 && validate_change (insn, &SET_SRC (set), src, 0))
3918 success = 1;
3919 }
3920
3921 /* If we've failed to do replacement, have a single SET, and don't already
3922 have a note, add a REG_EQUAL note to not lose information. */
3923 if (!success && note == 0 && set != 0)
3924 note = REG_NOTES (insn)
3925 = gen_rtx_EXPR_LIST (REG_EQUAL, src, REG_NOTES (insn));
3926
3927 /* If there is already a NOTE, update the expression in it with our
3928 replacement. */
3929 else if (note != 0)
3930 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3931
3932 /* REG_EQUAL may get simplified into register.
3933 We don't allow that. Remove that note. This code ought
3934 not to hapen, because previous code ought to syntetize
3935 reg-reg move, but be on the safe side. */
3936 if (note && REG_P (XEXP (note, 0)))
3937 remove_note (insn, note);
3938
3939 return success;
3940 }
3941
3942 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3943 NULL no such set is found. */
3944
3945 static struct expr *
3946 find_avail_set (regno, insn)
3947 int regno;
3948 rtx insn;
3949 {
3950 /* SET1 contains the last set found that can be returned to the caller for
3951 use in a substitution. */
3952 struct expr *set1 = 0;
3953
3954 /* Loops are not possible here. To get a loop we would need two sets
3955 available at the start of the block containing INSN. ie we would
3956 need two sets like this available at the start of the block:
3957
3958 (set (reg X) (reg Y))
3959 (set (reg Y) (reg X))
3960
3961 This can not happen since the set of (reg Y) would have killed the
3962 set of (reg X) making it unavailable at the start of this block. */
3963 while (1)
3964 {
3965 rtx src;
3966 struct expr *set = lookup_set (regno, NULL_RTX);
3967
3968 /* Find a set that is available at the start of the block
3969 which contains INSN. */
3970 while (set)
3971 {
3972 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3973 break;
3974 set = next_set (regno, set);
3975 }
3976
3977 /* If no available set was found we've reached the end of the
3978 (possibly empty) copy chain. */
3979 if (set == 0)
3980 break;
3981
3982 if (GET_CODE (set->expr) != SET)
3983 abort ();
3984
3985 src = SET_SRC (set->expr);
3986
3987 /* We know the set is available.
3988 Now check that SRC is ANTLOC (i.e. none of the source operands
3989 have changed since the start of the block).
3990
3991 If the source operand changed, we may still use it for the next
3992 iteration of this loop, but we may not use it for substitutions. */
3993
3994 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
3995 set1 = set;
3996
3997 /* If the source of the set is anything except a register, then
3998 we have reached the end of the copy chain. */
3999 if (GET_CODE (src) != REG)
4000 break;
4001
4002 /* Follow the copy chain, ie start another iteration of the loop
4003 and see if we have an available copy into SRC. */
4004 regno = REGNO (src);
4005 }
4006
4007 /* SET1 holds the last set that was available and anticipatable at
4008 INSN. */
4009 return set1;
4010 }
4011
4012 /* Subroutine of cprop_insn that tries to propagate constants into
4013 JUMP_INSNS. INSN must be a conditional jump. FROM is what we will try to
4014 replace, SRC is the constant we will try to substitute for it. Returns
4015 nonzero if a change was made. We know INSN has just a SET. */
4016
4017 static int
4018 cprop_jump (bb, insn, from, src)
4019 rtx insn;
4020 rtx from;
4021 rtx src;
4022 basic_block bb;
4023 {
4024 rtx set = PATTERN (insn);
4025 rtx new = simplify_replace_rtx (SET_SRC (set), from, src);
4026
4027 /* If no simplification can be made, then try the next
4028 register. */
4029 if (rtx_equal_p (new, SET_SRC (set)))
4030 return 0;
4031
4032 /* If this is now a no-op leave it that way, but update LABEL_NUSED if
4033 necessary. */
4034 if (new == pc_rtx)
4035 {
4036 SET_SRC (set) = new;
4037
4038 if (JUMP_LABEL (insn) != 0)
4039 --LABEL_NUSES (JUMP_LABEL (insn));
4040 }
4041
4042 /* Otherwise, this must be a valid instruction. */
4043 else if (! validate_change (insn, &SET_SRC (set), new, 0))
4044 return 0;
4045
4046 /* If this has turned into an unconditional jump,
4047 then put a barrier after it so that the unreachable
4048 code will be deleted. */
4049 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4050 emit_barrier_after (insn);
4051
4052 run_jump_opt_after_gcse = 1;
4053
4054 const_prop_count++;
4055 if (gcse_file != NULL)
4056 {
4057 fprintf (gcse_file,
4058 "CONST-PROP: Replacing reg %d in insn %d with constant ",
4059 REGNO (from), INSN_UID (insn));
4060 print_rtl (gcse_file, src);
4061 fprintf (gcse_file, "\n");
4062 }
4063 purge_dead_edges (bb);
4064
4065 return 1;
4066 }
4067
4068 #ifdef HAVE_cc0
4069
4070 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
4071 for machines that have CC0. INSN is a single set that stores into CC0;
4072 the insn following it is a conditional jump. REG_USED is the use we will
4073 try to replace, SRC is the constant we will try to substitute for it.
4074 Returns nonzero if a change was made. */
4075
4076 static int
4077 cprop_cc0_jump (bb, insn, reg_used, src)
4078 basic_block bb;
4079 rtx insn;
4080 struct reg_use *reg_used;
4081 rtx src;
4082 {
4083 /* First substitute in the SET_SRC of INSN, then substitute that for
4084 CC0 in JUMP. */
4085 rtx jump = NEXT_INSN (insn);
4086 rtx new_src = simplify_replace_rtx (SET_SRC (PATTERN (insn)),
4087 reg_used->reg_rtx, src);
4088
4089 if (! cprop_jump (bb, jump, cc0_rtx, new_src))
4090 return 0;
4091
4092 /* If we succeeded, delete the cc0 setter. */
4093 PUT_CODE (insn, NOTE);
4094 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4095 NOTE_SOURCE_FILE (insn) = 0;
4096
4097 return 1;
4098 }
4099 #endif
4100
4101 /* Perform constant and copy propagation on INSN.
4102 The result is non-zero if a change was made. */
4103
4104 static int
4105 cprop_insn (bb, insn, alter_jumps)
4106 basic_block bb;
4107 rtx insn;
4108 int alter_jumps;
4109 {
4110 struct reg_use *reg_used;
4111 int changed = 0;
4112 rtx note;
4113
4114 if (!INSN_P (insn))
4115 return 0;
4116
4117 reg_use_count = 0;
4118 note_uses (&PATTERN (insn), find_used_regs, NULL);
4119
4120 note = find_reg_equal_equiv_note (insn);
4121
4122 /* We may win even when propagating constants into notes. */
4123 if (note)
4124 find_used_regs (&XEXP (note, 0), NULL);
4125
4126 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4127 reg_used++, reg_use_count--)
4128 {
4129 unsigned int regno = REGNO (reg_used->reg_rtx);
4130 rtx pat, src;
4131 struct expr *set;
4132
4133 /* Ignore registers created by GCSE.
4134 We do this because ... */
4135 if (regno >= max_gcse_regno)
4136 continue;
4137
4138 /* If the register has already been set in this block, there's
4139 nothing we can do. */
4140 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4141 continue;
4142
4143 /* Find an assignment that sets reg_used and is available
4144 at the start of the block. */
4145 set = find_avail_set (regno, insn);
4146 if (! set)
4147 continue;
4148
4149 pat = set->expr;
4150 /* ??? We might be able to handle PARALLELs. Later. */
4151 if (GET_CODE (pat) != SET)
4152 abort ();
4153
4154 src = SET_SRC (pat);
4155
4156 /* Constant propagation. */
4157 if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE
4158 || GET_CODE (src) == SYMBOL_REF)
4159 {
4160 /* Handle normal insns first. */
4161 if (GET_CODE (insn) == INSN
4162 && try_replace_reg (reg_used->reg_rtx, src, insn))
4163 {
4164 changed = 1;
4165 const_prop_count++;
4166 if (gcse_file != NULL)
4167 {
4168 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
4169 regno);
4170 fprintf (gcse_file, "insn %d with constant ",
4171 INSN_UID (insn));
4172 print_rtl (gcse_file, src);
4173 fprintf (gcse_file, "\n");
4174 }
4175
4176 /* The original insn setting reg_used may or may not now be
4177 deletable. We leave the deletion to flow. */
4178 }
4179
4180 /* Try to propagate a CONST_INT into a conditional jump.
4181 We're pretty specific about what we will handle in this
4182 code, we can extend this as necessary over time.
4183
4184 Right now the insn in question must look like
4185 (set (pc) (if_then_else ...)) */
4186 else if (alter_jumps
4187 && GET_CODE (insn) == JUMP_INSN
4188 && condjump_p (insn)
4189 && ! simplejump_p (insn))
4190 changed |= cprop_jump (bb, insn, reg_used->reg_rtx, src);
4191
4192 #ifdef HAVE_cc0
4193 /* Similar code for machines that use a pair of CC0 setter and
4194 conditional jump insn. */
4195 else if (alter_jumps
4196 && GET_CODE (PATTERN (insn)) == SET
4197 && SET_DEST (PATTERN (insn)) == cc0_rtx
4198 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
4199 && condjump_p (NEXT_INSN (insn))
4200 && ! simplejump_p (NEXT_INSN (insn))
4201 && cprop_cc0_jump (bb, insn, reg_used, src))
4202 {
4203 changed = 1;
4204 break;
4205 }
4206 #endif
4207 }
4208 else if (GET_CODE (src) == REG
4209 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4210 && REGNO (src) != regno)
4211 {
4212 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4213 {
4214 changed = 1;
4215 copy_prop_count++;
4216 if (gcse_file != NULL)
4217 {
4218 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
4219 regno, INSN_UID (insn));
4220 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4221 }
4222
4223 /* The original insn setting reg_used may or may not now be
4224 deletable. We leave the deletion to flow. */
4225 /* FIXME: If it turns out that the insn isn't deletable,
4226 then we may have unnecessarily extended register lifetimes
4227 and made things worse. */
4228 }
4229 }
4230 }
4231
4232 return changed;
4233 }
4234
4235 /* Forward propagate copies. This includes copies and constants. Return
4236 non-zero if a change was made. */
4237
4238 static int
4239 cprop (alter_jumps)
4240 int alter_jumps;
4241 {
4242 int bb, changed;
4243 rtx insn;
4244
4245 /* Note we start at block 1. */
4246
4247 changed = 0;
4248 for (bb = 1; bb < n_basic_blocks; bb++)
4249 {
4250 /* Reset tables used to keep track of what's still valid [since the
4251 start of the block]. */
4252 reset_opr_set_tables ();
4253
4254 for (insn = BLOCK_HEAD (bb);
4255 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
4256 insn = NEXT_INSN (insn))
4257 if (INSN_P (insn))
4258 {
4259 changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
4260
4261 /* Keep track of everything modified by this insn. */
4262 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4263 call mark_oprs_set if we turned the insn into a NOTE. */
4264 if (GET_CODE (insn) != NOTE)
4265 mark_oprs_set (insn);
4266 }
4267 }
4268
4269 if (gcse_file != NULL)
4270 fprintf (gcse_file, "\n");
4271
4272 return changed;
4273 }
4274
4275 /* Perform one copy/constant propagation pass.
4276 F is the first insn in the function.
4277 PASS is the pass count. */
4278
4279 static int
4280 one_cprop_pass (pass, alter_jumps)
4281 int pass;
4282 int alter_jumps;
4283 {
4284 int changed = 0;
4285
4286 const_prop_count = 0;
4287 copy_prop_count = 0;
4288
4289 alloc_set_hash_table (max_cuid);
4290 compute_set_hash_table ();
4291 if (gcse_file)
4292 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
4293 n_sets);
4294 if (n_sets > 0)
4295 {
4296 alloc_cprop_mem (n_basic_blocks, n_sets);
4297 compute_cprop_data ();
4298 changed = cprop (alter_jumps);
4299 free_cprop_mem ();
4300 }
4301
4302 free_set_hash_table ();
4303
4304 if (gcse_file)
4305 {
4306 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4307 current_function_name, pass, bytes_used);
4308 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4309 const_prop_count, copy_prop_count);
4310 }
4311
4312 return changed;
4313 }
4314 \f
4315 /* Compute PRE+LCM working variables. */
4316
4317 /* Local properties of expressions. */
4318 /* Nonzero for expressions that are transparent in the block. */
4319 static sbitmap *transp;
4320
4321 /* Nonzero for expressions that are transparent at the end of the block.
4322 This is only zero for expressions killed by abnormal critical edge
4323 created by a calls. */
4324 static sbitmap *transpout;
4325
4326 /* Nonzero for expressions that are computed (available) in the block. */
4327 static sbitmap *comp;
4328
4329 /* Nonzero for expressions that are locally anticipatable in the block. */
4330 static sbitmap *antloc;
4331
4332 /* Nonzero for expressions where this block is an optimal computation
4333 point. */
4334 static sbitmap *pre_optimal;
4335
4336 /* Nonzero for expressions which are redundant in a particular block. */
4337 static sbitmap *pre_redundant;
4338
4339 /* Nonzero for expressions which should be inserted on a specific edge. */
4340 static sbitmap *pre_insert_map;
4341
4342 /* Nonzero for expressions which should be deleted in a specific block. */
4343 static sbitmap *pre_delete_map;
4344
4345 /* Contains the edge_list returned by pre_edge_lcm. */
4346 static struct edge_list *edge_list;
4347
4348 /* Redundant insns. */
4349 static sbitmap pre_redundant_insns;
4350
4351 /* Allocate vars used for PRE analysis. */
4352
4353 static void
4354 alloc_pre_mem (n_blocks, n_exprs)
4355 int n_blocks, n_exprs;
4356 {
4357 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4358 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4359 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4360
4361 pre_optimal = NULL;
4362 pre_redundant = NULL;
4363 pre_insert_map = NULL;
4364 pre_delete_map = NULL;
4365 ae_in = NULL;
4366 ae_out = NULL;
4367 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4368
4369 /* pre_insert and pre_delete are allocated later. */
4370 }
4371
4372 /* Free vars used for PRE analysis. */
4373
4374 static void
4375 free_pre_mem ()
4376 {
4377 sbitmap_vector_free (transp);
4378 sbitmap_vector_free (comp);
4379
4380 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4381
4382 if (pre_optimal)
4383 sbitmap_vector_free (pre_optimal);
4384 if (pre_redundant)
4385 sbitmap_vector_free (pre_redundant);
4386 if (pre_insert_map)
4387 sbitmap_vector_free (pre_insert_map);
4388 if (pre_delete_map)
4389 sbitmap_vector_free (pre_delete_map);
4390 if (ae_in)
4391 sbitmap_vector_free (ae_in);
4392 if (ae_out)
4393 sbitmap_vector_free (ae_out);
4394
4395 transp = comp = NULL;
4396 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4397 ae_in = ae_out = NULL;
4398 }
4399
4400 /* Top level routine to do the dataflow analysis needed by PRE. */
4401
4402 static void
4403 compute_pre_data ()
4404 {
4405 sbitmap trapping_expr;
4406 int i;
4407 unsigned int ui;
4408
4409 compute_local_properties (transp, comp, antloc, 0);
4410 sbitmap_vector_zero (ae_kill, n_basic_blocks);
4411
4412 /* Collect expressions which might trap. */
4413 trapping_expr = sbitmap_alloc (n_exprs);
4414 sbitmap_zero (trapping_expr);
4415 for (ui = 0; ui < expr_hash_table_size; ui++)
4416 {
4417 struct expr *e;
4418 for (e = expr_hash_table[ui]; e != NULL; e = e->next_same_hash)
4419 if (may_trap_p (e->expr))
4420 SET_BIT (trapping_expr, e->bitmap_index);
4421 }
4422
4423 /* Compute ae_kill for each basic block using:
4424
4425 ~(TRANSP | COMP)
4426
4427 This is significantly faster than compute_ae_kill. */
4428
4429 for (i = 0; i < n_basic_blocks; i++)
4430 {
4431 edge e;
4432
4433 /* If the current block is the destination of an abnormal edge, we
4434 kill all trapping expressions because we won't be able to properly
4435 place the instruction on the edge. So make them neither
4436 anticipatable nor transparent. This is fairly conservative. */
4437 for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
4438 if (e->flags & EDGE_ABNORMAL)
4439 {
4440 sbitmap_difference (antloc[i], antloc[i], trapping_expr);
4441 sbitmap_difference (transp[i], transp[i], trapping_expr);
4442 break;
4443 }
4444
4445 sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
4446 sbitmap_not (ae_kill[i], ae_kill[i]);
4447 }
4448
4449 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4450 ae_kill, &pre_insert_map, &pre_delete_map);
4451 sbitmap_vector_free (antloc);
4452 antloc = NULL;
4453 sbitmap_vector_free (ae_kill);
4454 ae_kill = NULL;
4455 free (trapping_expr);
4456 }
4457 \f
4458 /* PRE utilities */
4459
4460 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4461 block BB.
4462
4463 VISITED is a pointer to a working buffer for tracking which BB's have
4464 been visited. It is NULL for the top-level call.
4465
4466 We treat reaching expressions that go through blocks containing the same
4467 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4468 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4469 2 as not reaching. The intent is to improve the probability of finding
4470 only one reaching expression and to reduce register lifetimes by picking
4471 the closest such expression. */
4472
4473 static int
4474 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4475 basic_block occr_bb;
4476 struct expr *expr;
4477 basic_block bb;
4478 char *visited;
4479 {
4480 edge pred;
4481
4482 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
4483 {
4484 basic_block pred_bb = pred->src;
4485
4486 if (pred->src == ENTRY_BLOCK_PTR
4487 /* Has predecessor has already been visited? */
4488 || visited[pred_bb->index])
4489 ;/* Nothing to do. */
4490
4491 /* Does this predecessor generate this expression? */
4492 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
4493 {
4494 /* Is this the occurrence we're looking for?
4495 Note that there's only one generating occurrence per block
4496 so we just need to check the block number. */
4497 if (occr_bb == pred_bb)
4498 return 1;
4499
4500 visited[pred_bb->index] = 1;
4501 }
4502 /* Ignore this predecessor if it kills the expression. */
4503 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
4504 visited[pred_bb->index] = 1;
4505
4506 /* Neither gen nor kill. */
4507 else
4508 {
4509 visited[pred_bb->index] = 1;
4510 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4511 return 1;
4512 }
4513 }
4514
4515 /* All paths have been checked. */
4516 return 0;
4517 }
4518
4519 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4520 memory allocated for that function is returned. */
4521
4522 static int
4523 pre_expr_reaches_here_p (occr_bb, expr, bb)
4524 basic_block occr_bb;
4525 struct expr *expr;
4526 basic_block bb;
4527 {
4528 int rval;
4529 char *visited = (char *) xcalloc (n_basic_blocks, 1);
4530
4531 rval = pre_expr_reaches_here_p_work(occr_bb, expr, bb, visited);
4532
4533 free (visited);
4534 return rval;
4535 }
4536 \f
4537
4538 /* Given an expr, generate RTL which we can insert at the end of a BB,
4539 or on an edge. Set the block number of any insns generated to
4540 the value of BB. */
4541
4542 static rtx
4543 process_insert_insn (expr)
4544 struct expr *expr;
4545 {
4546 rtx reg = expr->reaching_reg;
4547 rtx exp = copy_rtx (expr->expr);
4548 rtx pat;
4549
4550 start_sequence ();
4551
4552 /* If the expression is something that's an operand, like a constant,
4553 just copy it to a register. */
4554 if (general_operand (exp, GET_MODE (reg)))
4555 emit_move_insn (reg, exp);
4556
4557 /* Otherwise, make a new insn to compute this expression and make sure the
4558 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4559 expression to make sure we don't have any sharing issues. */
4560 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
4561 abort ();
4562
4563 pat = gen_sequence ();
4564 end_sequence ();
4565
4566 return pat;
4567 }
4568
4569 /* Add EXPR to the end of basic block BB.
4570
4571 This is used by both the PRE and code hoisting.
4572
4573 For PRE, we want to verify that the expr is either transparent
4574 or locally anticipatable in the target block. This check makes
4575 no sense for code hoisting. */
4576
4577 static void
4578 insert_insn_end_bb (expr, bb, pre)
4579 struct expr *expr;
4580 basic_block bb;
4581 int pre;
4582 {
4583 rtx insn = bb->end;
4584 rtx new_insn;
4585 rtx reg = expr->reaching_reg;
4586 int regno = REGNO (reg);
4587 rtx pat;
4588 int i;
4589
4590 pat = process_insert_insn (expr);
4591
4592 /* If the last insn is a jump, insert EXPR in front [taking care to
4593 handle cc0, etc. properly]. */
4594
4595 if (GET_CODE (insn) == JUMP_INSN)
4596 {
4597 #ifdef HAVE_cc0
4598 rtx note;
4599 #endif
4600
4601 /* If this is a jump table, then we can't insert stuff here. Since
4602 we know the previous real insn must be the tablejump, we insert
4603 the new instruction just before the tablejump. */
4604 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4605 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4606 insn = prev_real_insn (insn);
4607
4608 #ifdef HAVE_cc0
4609 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4610 if cc0 isn't set. */
4611 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4612 if (note)
4613 insn = XEXP (note, 0);
4614 else
4615 {
4616 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4617 if (maybe_cc0_setter
4618 && INSN_P (maybe_cc0_setter)
4619 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4620 insn = maybe_cc0_setter;
4621 }
4622 #endif
4623 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4624 new_insn = emit_block_insn_before (pat, insn, bb);
4625 }
4626
4627 /* Likewise if the last insn is a call, as will happen in the presence
4628 of exception handling. */
4629 else if (GET_CODE (insn) == CALL_INSN)
4630 {
4631 HARD_REG_SET parm_regs;
4632 int nparm_regs;
4633 rtx p;
4634
4635 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4636 we search backward and place the instructions before the first
4637 parameter is loaded. Do this for everyone for consistency and a
4638 presumtion that we'll get better code elsewhere as well.
4639
4640 It should always be the case that we can put these instructions
4641 anywhere in the basic block with performing PRE optimizations.
4642 Check this. */
4643
4644 if (pre
4645 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4646 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4647 abort ();
4648
4649 /* Since different machines initialize their parameter registers
4650 in different orders, assume nothing. Collect the set of all
4651 parameter registers. */
4652 CLEAR_HARD_REG_SET (parm_regs);
4653 nparm_regs = 0;
4654 for (p = CALL_INSN_FUNCTION_USAGE (insn); p ; p = XEXP (p, 1))
4655 if (GET_CODE (XEXP (p, 0)) == USE
4656 && GET_CODE (XEXP (XEXP (p, 0), 0)) == REG)
4657 {
4658 if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER)
4659 abort ();
4660
4661 /* We only care about registers which can hold function
4662 arguments. */
4663 if (! FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4664 continue;
4665
4666 SET_HARD_REG_BIT (parm_regs, REGNO (XEXP (XEXP (p, 0), 0)));
4667 nparm_regs++;
4668 }
4669
4670 /* Search backward for the first set of a register in this set. */
4671 while (nparm_regs && bb->head != insn)
4672 {
4673 insn = PREV_INSN (insn);
4674 p = single_set (insn);
4675 if (p && GET_CODE (SET_DEST (p)) == REG
4676 && REGNO (SET_DEST (p)) < FIRST_PSEUDO_REGISTER
4677 && TEST_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p))))
4678 {
4679 CLEAR_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p)));
4680 nparm_regs--;
4681 }
4682 }
4683
4684 /* If we found all the parameter loads, then we want to insert
4685 before the first parameter load.
4686
4687 If we did not find all the parameter loads, then we might have
4688 stopped on the head of the block, which could be a CODE_LABEL.
4689 If we inserted before the CODE_LABEL, then we would be putting
4690 the insn in the wrong basic block. In that case, put the insn
4691 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4692 while (GET_CODE (insn) == CODE_LABEL
4693 || NOTE_INSN_BASIC_BLOCK_P (insn))
4694 insn = NEXT_INSN (insn);
4695
4696 new_insn = emit_block_insn_before (pat, insn, bb);
4697 }
4698 else
4699 {
4700 new_insn = emit_insn_after (pat, insn);
4701 bb->end = new_insn;
4702 }
4703
4704 /* Keep block number table up to date.
4705 Note, PAT could be a multiple insn sequence, we have to make
4706 sure that each insn in the sequence is handled. */
4707 if (GET_CODE (pat) == SEQUENCE)
4708 {
4709 for (i = 0; i < XVECLEN (pat, 0); i++)
4710 {
4711 rtx insn = XVECEXP (pat, 0, i);
4712
4713 set_block_for_insn (insn, bb);
4714 if (INSN_P (insn))
4715 add_label_notes (PATTERN (insn), new_insn);
4716
4717 note_stores (PATTERN (insn), record_set_info, insn);
4718 }
4719 }
4720 else
4721 {
4722 add_label_notes (SET_SRC (pat), new_insn);
4723 set_block_for_new_insns (new_insn, bb);
4724
4725 /* Keep register set table up to date. */
4726 record_one_set (regno, new_insn);
4727 }
4728
4729 gcse_create_count++;
4730
4731 if (gcse_file)
4732 {
4733 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4734 bb->index, INSN_UID (new_insn));
4735 fprintf (gcse_file, "copying expression %d to reg %d\n",
4736 expr->bitmap_index, regno);
4737 }
4738 }
4739
4740 /* Insert partially redundant expressions on edges in the CFG to make
4741 the expressions fully redundant. */
4742
4743 static int
4744 pre_edge_insert (edge_list, index_map)
4745 struct edge_list *edge_list;
4746 struct expr **index_map;
4747 {
4748 int e, i, j, num_edges, set_size, did_insert = 0;
4749 sbitmap *inserted;
4750
4751 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4752 if it reaches any of the deleted expressions. */
4753
4754 set_size = pre_insert_map[0]->size;
4755 num_edges = NUM_EDGES (edge_list);
4756 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4757 sbitmap_vector_zero (inserted, num_edges);
4758
4759 for (e = 0; e < num_edges; e++)
4760 {
4761 int indx;
4762 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4763
4764 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4765 {
4766 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4767
4768 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4769 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4770 {
4771 struct expr *expr = index_map[j];
4772 struct occr *occr;
4773
4774 /* Now look at each deleted occurence of this expression. */
4775 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4776 {
4777 if (! occr->deleted_p)
4778 continue;
4779
4780 /* Insert this expression on this edge if if it would
4781 reach the deleted occurence in BB. */
4782 if (!TEST_BIT (inserted[e], j))
4783 {
4784 rtx insn;
4785 edge eg = INDEX_EDGE (edge_list, e);
4786
4787 /* We can't insert anything on an abnormal and
4788 critical edge, so we insert the insn at the end of
4789 the previous block. There are several alternatives
4790 detailed in Morgans book P277 (sec 10.5) for
4791 handling this situation. This one is easiest for
4792 now. */
4793
4794 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
4795 insert_insn_end_bb (index_map[j], bb, 0);
4796 else
4797 {
4798 insn = process_insert_insn (index_map[j]);
4799 insert_insn_on_edge (insn, eg);
4800 }
4801
4802 if (gcse_file)
4803 {
4804 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4805 bb->index,
4806 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4807 fprintf (gcse_file, "copy expression %d\n",
4808 expr->bitmap_index);
4809 }
4810
4811 update_ld_motion_stores (expr);
4812 SET_BIT (inserted[e], j);
4813 did_insert = 1;
4814 gcse_create_count++;
4815 }
4816 }
4817 }
4818 }
4819 }
4820
4821 sbitmap_vector_free (inserted);
4822 return did_insert;
4823 }
4824
4825 /* Copy the result of INSN to REG. INDX is the expression number. */
4826
4827 static void
4828 pre_insert_copy_insn (expr, insn)
4829 struct expr *expr;
4830 rtx insn;
4831 {
4832 rtx reg = expr->reaching_reg;
4833 int regno = REGNO (reg);
4834 int indx = expr->bitmap_index;
4835 rtx set = single_set (insn);
4836 rtx new_insn;
4837 basic_block bb = BLOCK_FOR_INSN (insn);
4838
4839 if (!set)
4840 abort ();
4841
4842 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
4843
4844 /* Keep block number table up to date. */
4845 set_block_for_new_insns (new_insn, bb);
4846
4847 /* Keep register set table up to date. */
4848 record_one_set (regno, new_insn);
4849 if (insn == bb->end)
4850 bb->end = new_insn;
4851
4852 gcse_create_count++;
4853
4854 if (gcse_file)
4855 fprintf (gcse_file,
4856 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4857 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4858 INSN_UID (insn), regno);
4859 update_ld_motion_stores (expr);
4860 }
4861
4862 /* Copy available expressions that reach the redundant expression
4863 to `reaching_reg'. */
4864
4865 static void
4866 pre_insert_copies ()
4867 {
4868 unsigned int i;
4869 struct expr *expr;
4870 struct occr *occr;
4871 struct occr *avail;
4872
4873 /* For each available expression in the table, copy the result to
4874 `reaching_reg' if the expression reaches a deleted one.
4875
4876 ??? The current algorithm is rather brute force.
4877 Need to do some profiling. */
4878
4879 for (i = 0; i < expr_hash_table_size; i++)
4880 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4881 {
4882 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4883 we don't want to insert a copy here because the expression may not
4884 really be redundant. So only insert an insn if the expression was
4885 deleted. This test also avoids further processing if the
4886 expression wasn't deleted anywhere. */
4887 if (expr->reaching_reg == NULL)
4888 continue;
4889
4890 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4891 {
4892 if (! occr->deleted_p)
4893 continue;
4894
4895 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4896 {
4897 rtx insn = avail->insn;
4898
4899 /* No need to handle this one if handled already. */
4900 if (avail->copied_p)
4901 continue;
4902
4903 /* Don't handle this one if it's a redundant one. */
4904 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4905 continue;
4906
4907 /* Or if the expression doesn't reach the deleted one. */
4908 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4909 expr,
4910 BLOCK_FOR_INSN (occr->insn)))
4911 continue;
4912
4913 /* Copy the result of avail to reaching_reg. */
4914 pre_insert_copy_insn (expr, insn);
4915 avail->copied_p = 1;
4916 }
4917 }
4918 }
4919 }
4920
4921 /* Delete redundant computations.
4922 Deletion is done by changing the insn to copy the `reaching_reg' of
4923 the expression into the result of the SET. It is left to later passes
4924 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4925
4926 Returns non-zero if a change is made. */
4927
4928 static int
4929 pre_delete ()
4930 {
4931 unsigned int i;
4932 int changed;
4933 struct expr *expr;
4934 struct occr *occr;
4935
4936 changed = 0;
4937 for (i = 0; i < expr_hash_table_size; i++)
4938 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4939 {
4940 int indx = expr->bitmap_index;
4941
4942 /* We only need to search antic_occr since we require
4943 ANTLOC != 0. */
4944
4945 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4946 {
4947 rtx insn = occr->insn;
4948 rtx set;
4949 basic_block bb = BLOCK_FOR_INSN (insn);
4950
4951 if (TEST_BIT (pre_delete_map[bb->index], indx))
4952 {
4953 set = single_set (insn);
4954 if (! set)
4955 abort ();
4956
4957 /* Create a pseudo-reg to store the result of reaching
4958 expressions into. Get the mode for the new pseudo from
4959 the mode of the original destination pseudo. */
4960 if (expr->reaching_reg == NULL)
4961 expr->reaching_reg
4962 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4963
4964 /* In theory this should never fail since we're creating
4965 a reg->reg copy.
4966
4967 However, on the x86 some of the movXX patterns actually
4968 contain clobbers of scratch regs. This may cause the
4969 insn created by validate_change to not match any pattern
4970 and thus cause validate_change to fail. */
4971 if (validate_change (insn, &SET_SRC (set),
4972 expr->reaching_reg, 0))
4973 {
4974 occr->deleted_p = 1;
4975 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4976 changed = 1;
4977 gcse_subst_count++;
4978 }
4979
4980 if (gcse_file)
4981 {
4982 fprintf (gcse_file,
4983 "PRE: redundant insn %d (expression %d) in ",
4984 INSN_UID (insn), indx);
4985 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4986 bb->index, REGNO (expr->reaching_reg));
4987 }
4988 }
4989 }
4990 }
4991
4992 return changed;
4993 }
4994
4995 /* Perform GCSE optimizations using PRE.
4996 This is called by one_pre_gcse_pass after all the dataflow analysis
4997 has been done.
4998
4999 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5000 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5001 Compiler Design and Implementation.
5002
5003 ??? A new pseudo reg is created to hold the reaching expression. The nice
5004 thing about the classical approach is that it would try to use an existing
5005 reg. If the register can't be adequately optimized [i.e. we introduce
5006 reload problems], one could add a pass here to propagate the new register
5007 through the block.
5008
5009 ??? We don't handle single sets in PARALLELs because we're [currently] not
5010 able to copy the rest of the parallel when we insert copies to create full
5011 redundancies from partial redundancies. However, there's no reason why we
5012 can't handle PARALLELs in the cases where there are no partial
5013 redundancies. */
5014
5015 static int
5016 pre_gcse ()
5017 {
5018 unsigned int i;
5019 int did_insert, changed;
5020 struct expr **index_map;
5021 struct expr *expr;
5022
5023 /* Compute a mapping from expression number (`bitmap_index') to
5024 hash table entry. */
5025
5026 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5027 for (i = 0; i < expr_hash_table_size; i++)
5028 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5029 index_map[expr->bitmap_index] = expr;
5030
5031 /* Reset bitmap used to track which insns are redundant. */
5032 pre_redundant_insns = sbitmap_alloc (max_cuid);
5033 sbitmap_zero (pre_redundant_insns);
5034
5035 /* Delete the redundant insns first so that
5036 - we know what register to use for the new insns and for the other
5037 ones with reaching expressions
5038 - we know which insns are redundant when we go to create copies */
5039
5040 changed = pre_delete ();
5041
5042 did_insert = pre_edge_insert (edge_list, index_map);
5043
5044 /* In other places with reaching expressions, copy the expression to the
5045 specially allocated pseudo-reg that reaches the redundant expr. */
5046 pre_insert_copies ();
5047 if (did_insert)
5048 {
5049 commit_edge_insertions ();
5050 changed = 1;
5051 }
5052
5053 free (index_map);
5054 free (pre_redundant_insns);
5055 return changed;
5056 }
5057
5058 /* Top level routine to perform one PRE GCSE pass.
5059
5060 Return non-zero if a change was made. */
5061
5062 static int
5063 one_pre_gcse_pass (pass)
5064 int pass;
5065 {
5066 int changed = 0;
5067
5068 gcse_subst_count = 0;
5069 gcse_create_count = 0;
5070
5071 alloc_expr_hash_table (max_cuid);
5072 add_noreturn_fake_exit_edges ();
5073 if (flag_gcse_lm)
5074 compute_ld_motion_mems ();
5075
5076 compute_expr_hash_table ();
5077 trim_ld_motion_mems ();
5078 if (gcse_file)
5079 dump_hash_table (gcse_file, "Expression", expr_hash_table,
5080 expr_hash_table_size, n_exprs);
5081
5082 if (n_exprs > 0)
5083 {
5084 alloc_pre_mem (n_basic_blocks, n_exprs);
5085 compute_pre_data ();
5086 changed |= pre_gcse ();
5087 free_edge_list (edge_list);
5088 free_pre_mem ();
5089 }
5090
5091 free_ldst_mems ();
5092 remove_fake_edges ();
5093 free_expr_hash_table ();
5094
5095 if (gcse_file)
5096 {
5097 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5098 current_function_name, pass, bytes_used);
5099 fprintf (gcse_file, "%d substs, %d insns created\n",
5100 gcse_subst_count, gcse_create_count);
5101 }
5102
5103 return changed;
5104 }
5105 \f
5106 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5107 If notes are added to an insn which references a CODE_LABEL, the
5108 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5109 because the following loop optimization pass requires them. */
5110
5111 /* ??? This is very similar to the loop.c add_label_notes function. We
5112 could probably share code here. */
5113
5114 /* ??? If there was a jump optimization pass after gcse and before loop,
5115 then we would not need to do this here, because jump would add the
5116 necessary REG_LABEL notes. */
5117
5118 static void
5119 add_label_notes (x, insn)
5120 rtx x;
5121 rtx insn;
5122 {
5123 enum rtx_code code = GET_CODE (x);
5124 int i, j;
5125 const char *fmt;
5126
5127 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5128 {
5129 /* This code used to ignore labels that referred to dispatch tables to
5130 avoid flow generating (slighly) worse code.
5131
5132 We no longer ignore such label references (see LABEL_REF handling in
5133 mark_jump_label for additional information). */
5134
5135 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
5136 REG_NOTES (insn));
5137 if (LABEL_P (XEXP (x, 0)))
5138 LABEL_NUSES (XEXP (x, 0))++;
5139 return;
5140 }
5141
5142 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5143 {
5144 if (fmt[i] == 'e')
5145 add_label_notes (XEXP (x, i), insn);
5146 else if (fmt[i] == 'E')
5147 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5148 add_label_notes (XVECEXP (x, i, j), insn);
5149 }
5150 }
5151
5152 /* Compute transparent outgoing information for each block.
5153
5154 An expression is transparent to an edge unless it is killed by
5155 the edge itself. This can only happen with abnormal control flow,
5156 when the edge is traversed through a call. This happens with
5157 non-local labels and exceptions.
5158
5159 This would not be necessary if we split the edge. While this is
5160 normally impossible for abnormal critical edges, with some effort
5161 it should be possible with exception handling, since we still have
5162 control over which handler should be invoked. But due to increased
5163 EH table sizes, this may not be worthwhile. */
5164
5165 static void
5166 compute_transpout ()
5167 {
5168 int bb;
5169 unsigned int i;
5170 struct expr *expr;
5171
5172 sbitmap_vector_ones (transpout, n_basic_blocks);
5173
5174 for (bb = 0; bb < n_basic_blocks; ++bb)
5175 {
5176 /* Note that flow inserted a nop a the end of basic blocks that
5177 end in call instructions for reasons other than abnormal
5178 control flow. */
5179 if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
5180 continue;
5181
5182 for (i = 0; i < expr_hash_table_size; i++)
5183 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
5184 if (GET_CODE (expr->expr) == MEM)
5185 {
5186 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5187 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5188 continue;
5189
5190 /* ??? Optimally, we would use interprocedural alias
5191 analysis to determine if this mem is actually killed
5192 by this call. */
5193 RESET_BIT (transpout[bb], expr->bitmap_index);
5194 }
5195 }
5196 }
5197
5198 /* Removal of useless null pointer checks */
5199
5200 /* Called via note_stores. X is set by SETTER. If X is a register we must
5201 invalidate nonnull_local and set nonnull_killed. DATA is really a
5202 `null_pointer_info *'.
5203
5204 We ignore hard registers. */
5205
5206 static void
5207 invalidate_nonnull_info (x, setter, data)
5208 rtx x;
5209 rtx setter ATTRIBUTE_UNUSED;
5210 void *data;
5211 {
5212 unsigned int regno;
5213 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5214
5215 while (GET_CODE (x) == SUBREG)
5216 x = SUBREG_REG (x);
5217
5218 /* Ignore anything that is not a register or is a hard register. */
5219 if (GET_CODE (x) != REG
5220 || REGNO (x) < npi->min_reg
5221 || REGNO (x) >= npi->max_reg)
5222 return;
5223
5224 regno = REGNO (x) - npi->min_reg;
5225
5226 RESET_BIT (npi->nonnull_local[npi->current_block], regno);
5227 SET_BIT (npi->nonnull_killed[npi->current_block], regno);
5228 }
5229
5230 /* Do null-pointer check elimination for the registers indicated in
5231 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5232 they are not our responsibility to free. */
5233
5234 static void
5235 delete_null_pointer_checks_1 (delete_list, block_reg, nonnull_avin,
5236 nonnull_avout, npi)
5237 varray_type *delete_list;
5238 unsigned int *block_reg;
5239 sbitmap *nonnull_avin;
5240 sbitmap *nonnull_avout;
5241 struct null_pointer_info *npi;
5242 {
5243 int bb;
5244 int current_block;
5245 sbitmap *nonnull_local = npi->nonnull_local;
5246 sbitmap *nonnull_killed = npi->nonnull_killed;
5247
5248 /* Compute local properties, nonnull and killed. A register will have
5249 the nonnull property if at the end of the current block its value is
5250 known to be nonnull. The killed property indicates that somewhere in
5251 the block any information we had about the register is killed.
5252
5253 Note that a register can have both properties in a single block. That
5254 indicates that it's killed, then later in the block a new value is
5255 computed. */
5256 sbitmap_vector_zero (nonnull_local, n_basic_blocks);
5257 sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
5258
5259 for (current_block = 0; current_block < n_basic_blocks; current_block++)
5260 {
5261 rtx insn, stop_insn;
5262
5263 /* Set the current block for invalidate_nonnull_info. */
5264 npi->current_block = current_block;
5265
5266 /* Scan each insn in the basic block looking for memory references and
5267 register sets. */
5268 stop_insn = NEXT_INSN (BLOCK_END (current_block));
5269 for (insn = BLOCK_HEAD (current_block);
5270 insn != stop_insn;
5271 insn = NEXT_INSN (insn))
5272 {
5273 rtx set;
5274 rtx reg;
5275
5276 /* Ignore anything that is not a normal insn. */
5277 if (! INSN_P (insn))
5278 continue;
5279
5280 /* Basically ignore anything that is not a simple SET. We do have
5281 to make sure to invalidate nonnull_local and set nonnull_killed
5282 for such insns though. */
5283 set = single_set (insn);
5284 if (!set)
5285 {
5286 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5287 continue;
5288 }
5289
5290 /* See if we've got a useable memory load. We handle it first
5291 in case it uses its address register as a dest (which kills
5292 the nonnull property). */
5293 if (GET_CODE (SET_SRC (set)) == MEM
5294 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5295 && REGNO (reg) >= npi->min_reg
5296 && REGNO (reg) < npi->max_reg)
5297 SET_BIT (nonnull_local[current_block],
5298 REGNO (reg) - npi->min_reg);
5299
5300 /* Now invalidate stuff clobbered by this insn. */
5301 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5302
5303 /* And handle stores, we do these last since any sets in INSN can
5304 not kill the nonnull property if it is derived from a MEM
5305 appearing in a SET_DEST. */
5306 if (GET_CODE (SET_DEST (set)) == MEM
5307 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5308 && REGNO (reg) >= npi->min_reg
5309 && REGNO (reg) < npi->max_reg)
5310 SET_BIT (nonnull_local[current_block],
5311 REGNO (reg) - npi->min_reg);
5312 }
5313 }
5314
5315 /* Now compute global properties based on the local properties. This
5316 is a classic global availablity algorithm. */
5317 compute_available (nonnull_local, nonnull_killed,
5318 nonnull_avout, nonnull_avin);
5319
5320 /* Now look at each bb and see if it ends with a compare of a value
5321 against zero. */
5322 for (bb = 0; bb < n_basic_blocks; bb++)
5323 {
5324 rtx last_insn = BLOCK_END (bb);
5325 rtx condition, earliest;
5326 int compare_and_branch;
5327
5328 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5329 since BLOCK_REG[BB] is zero if this block did not end with a
5330 comparison against zero, this condition works. */
5331 if (block_reg[bb] < npi->min_reg
5332 || block_reg[bb] >= npi->max_reg)
5333 continue;
5334
5335 /* LAST_INSN is a conditional jump. Get its condition. */
5336 condition = get_condition (last_insn, &earliest);
5337
5338 /* If we can't determine the condition then skip. */
5339 if (! condition)
5340 continue;
5341
5342 /* Is the register known to have a nonzero value? */
5343 if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
5344 continue;
5345
5346 /* Try to compute whether the compare/branch at the loop end is one or
5347 two instructions. */
5348 if (earliest == last_insn)
5349 compare_and_branch = 1;
5350 else if (earliest == prev_nonnote_insn (last_insn))
5351 compare_and_branch = 2;
5352 else
5353 continue;
5354
5355 /* We know the register in this comparison is nonnull at exit from
5356 this block. We can optimize this comparison. */
5357 if (GET_CODE (condition) == NE)
5358 {
5359 rtx new_jump;
5360
5361 new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
5362 last_insn);
5363 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5364 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5365 emit_barrier_after (new_jump);
5366 }
5367 if (!*delete_list)
5368 VARRAY_RTX_INIT (*delete_list, 10, "delete_list");
5369
5370 VARRAY_PUSH_RTX (*delete_list, last_insn);
5371 if (compare_and_branch == 2)
5372 VARRAY_PUSH_RTX (*delete_list, earliest);
5373
5374 /* Don't check this block again. (Note that BLOCK_END is
5375 invalid here; we deleted the last instruction in the
5376 block.) */
5377 block_reg[bb] = 0;
5378 }
5379 }
5380
5381 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5382 at compile time.
5383
5384 This is conceptually similar to global constant/copy propagation and
5385 classic global CSE (it even uses the same dataflow equations as cprop).
5386
5387 If a register is used as memory address with the form (mem (reg)), then we
5388 know that REG can not be zero at that point in the program. Any instruction
5389 which sets REG "kills" this property.
5390
5391 So, if every path leading to a conditional branch has an available memory
5392 reference of that form, then we know the register can not have the value
5393 zero at the conditional branch.
5394
5395 So we merely need to compute the local properies and propagate that data
5396 around the cfg, then optimize where possible.
5397
5398 We run this pass two times. Once before CSE, then again after CSE. This
5399 has proven to be the most profitable approach. It is rare for new
5400 optimization opportunities of this nature to appear after the first CSE
5401 pass.
5402
5403 This could probably be integrated with global cprop with a little work. */
5404
5405 void
5406 delete_null_pointer_checks (f)
5407 rtx f ATTRIBUTE_UNUSED;
5408 {
5409 sbitmap *nonnull_avin, *nonnull_avout;
5410 unsigned int *block_reg;
5411 varray_type delete_list = NULL;
5412 int bb;
5413 int reg;
5414 int regs_per_pass;
5415 int max_reg;
5416 unsigned int i;
5417 struct null_pointer_info npi;
5418
5419 /* If we have only a single block, then there's nothing to do. */
5420 if (n_basic_blocks <= 1)
5421 return;
5422
5423 /* Trying to perform global optimizations on flow graphs which have
5424 a high connectivity will take a long time and is unlikely to be
5425 particularly useful.
5426
5427 In normal circumstances a cfg should have about twice as many edges
5428 as blocks. But we do not want to punish small functions which have
5429 a couple switch statements. So we require a relatively large number
5430 of basic blocks and the ratio of edges to blocks to be high. */
5431 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5432 return;
5433
5434 /* We need four bitmaps, each with a bit for each register in each
5435 basic block. */
5436 max_reg = max_reg_num ();
5437 regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
5438
5439 /* Allocate bitmaps to hold local and global properties. */
5440 npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5441 npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5442 nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5443 nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5444
5445 /* Go through the basic blocks, seeing whether or not each block
5446 ends with a conditional branch whose condition is a comparison
5447 against zero. Record the register compared in BLOCK_REG. */
5448 block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
5449 for (bb = 0; bb < n_basic_blocks; bb++)
5450 {
5451 rtx last_insn = BLOCK_END (bb);
5452 rtx condition, earliest, reg;
5453
5454 /* We only want conditional branches. */
5455 if (GET_CODE (last_insn) != JUMP_INSN
5456 || !any_condjump_p (last_insn)
5457 || !onlyjump_p (last_insn))
5458 continue;
5459
5460 /* LAST_INSN is a conditional jump. Get its condition. */
5461 condition = get_condition (last_insn, &earliest);
5462
5463 /* If we were unable to get the condition, or it is not a equality
5464 comparison against zero then there's nothing we can do. */
5465 if (!condition
5466 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5467 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5468 || (XEXP (condition, 1)
5469 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5470 continue;
5471
5472 /* We must be checking a register against zero. */
5473 reg = XEXP (condition, 0);
5474 if (GET_CODE (reg) != REG)
5475 continue;
5476
5477 block_reg[bb] = REGNO (reg);
5478 }
5479
5480 /* Go through the algorithm for each block of registers. */
5481 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5482 {
5483 npi.min_reg = reg;
5484 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5485 delete_null_pointer_checks_1 (&delete_list, block_reg, nonnull_avin,
5486 nonnull_avout, &npi);
5487 }
5488
5489 /* Now delete the instructions all at once. This breaks the CFG. */
5490 if (delete_list)
5491 {
5492 for (i = 0; i < VARRAY_ACTIVE_SIZE (delete_list); i++)
5493 delete_insn (VARRAY_RTX (delete_list, i));
5494 VARRAY_FREE (delete_list);
5495 }
5496
5497 /* Free the table of registers compared at the end of every block. */
5498 free (block_reg);
5499
5500 /* Free bitmaps. */
5501 sbitmap_vector_free (npi.nonnull_local);
5502 sbitmap_vector_free (npi.nonnull_killed);
5503 sbitmap_vector_free (nonnull_avin);
5504 sbitmap_vector_free (nonnull_avout);
5505 }
5506
5507 /* Code Hoisting variables and subroutines. */
5508
5509 /* Very busy expressions. */
5510 static sbitmap *hoist_vbein;
5511 static sbitmap *hoist_vbeout;
5512
5513 /* Hoistable expressions. */
5514 static sbitmap *hoist_exprs;
5515
5516 /* Dominator bitmaps. */
5517 static sbitmap *dominators;
5518
5519 /* ??? We could compute post dominators and run this algorithm in
5520 reverse to to perform tail merging, doing so would probably be
5521 more effective than the tail merging code in jump.c.
5522
5523 It's unclear if tail merging could be run in parallel with
5524 code hoisting. It would be nice. */
5525
5526 /* Allocate vars used for code hoisting analysis. */
5527
5528 static void
5529 alloc_code_hoist_mem (n_blocks, n_exprs)
5530 int n_blocks, n_exprs;
5531 {
5532 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5533 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5534 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5535
5536 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5537 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5538 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5539 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5540
5541 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5542 }
5543
5544 /* Free vars used for code hoisting analysis. */
5545
5546 static void
5547 free_code_hoist_mem ()
5548 {
5549 sbitmap_vector_free (antloc);
5550 sbitmap_vector_free (transp);
5551 sbitmap_vector_free (comp);
5552
5553 sbitmap_vector_free (hoist_vbein);
5554 sbitmap_vector_free (hoist_vbeout);
5555 sbitmap_vector_free (hoist_exprs);
5556 sbitmap_vector_free (transpout);
5557
5558 sbitmap_vector_free (dominators);
5559 }
5560
5561 /* Compute the very busy expressions at entry/exit from each block.
5562
5563 An expression is very busy if all paths from a given point
5564 compute the expression. */
5565
5566 static void
5567 compute_code_hoist_vbeinout ()
5568 {
5569 int bb, changed, passes;
5570
5571 sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
5572 sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
5573
5574 passes = 0;
5575 changed = 1;
5576
5577 while (changed)
5578 {
5579 changed = 0;
5580
5581 /* We scan the blocks in the reverse order to speed up
5582 the convergence. */
5583 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
5584 {
5585 changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
5586 hoist_vbeout[bb], transp[bb]);
5587 if (bb != n_basic_blocks - 1)
5588 sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
5589 }
5590
5591 passes++;
5592 }
5593
5594 if (gcse_file)
5595 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5596 }
5597
5598 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5599
5600 static void
5601 compute_code_hoist_data ()
5602 {
5603 compute_local_properties (transp, comp, antloc, 0);
5604 compute_transpout ();
5605 compute_code_hoist_vbeinout ();
5606 calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
5607 if (gcse_file)
5608 fprintf (gcse_file, "\n");
5609 }
5610
5611 /* Determine if the expression identified by EXPR_INDEX would
5612 reach BB unimpared if it was placed at the end of EXPR_BB.
5613
5614 It's unclear exactly what Muchnick meant by "unimpared". It seems
5615 to me that the expression must either be computed or transparent in
5616 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5617 would allow the expression to be hoisted out of loops, even if
5618 the expression wasn't a loop invariant.
5619
5620 Contrast this to reachability for PRE where an expression is
5621 considered reachable if *any* path reaches instead of *all*
5622 paths. */
5623
5624 static int
5625 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5626 basic_block expr_bb;
5627 int expr_index;
5628 basic_block bb;
5629 char *visited;
5630 {
5631 edge pred;
5632 int visited_allocated_locally = 0;
5633
5634
5635 if (visited == NULL)
5636 {
5637 visited_allocated_locally = 1;
5638 visited = xcalloc (n_basic_blocks, 1);
5639 }
5640
5641 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5642 {
5643 basic_block pred_bb = pred->src;
5644
5645 if (pred->src == ENTRY_BLOCK_PTR)
5646 break;
5647 else if (visited[pred_bb->index])
5648 continue;
5649
5650 /* Does this predecessor generate this expression? */
5651 else if (TEST_BIT (comp[pred_bb->index], expr_index))
5652 break;
5653 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
5654 break;
5655
5656 /* Not killed. */
5657 else
5658 {
5659 visited[pred_bb->index] = 1;
5660 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5661 pred_bb, visited))
5662 break;
5663 }
5664 }
5665 if (visited_allocated_locally)
5666 free (visited);
5667
5668 return (pred == NULL);
5669 }
5670 \f
5671 /* Actually perform code hoisting. */
5672
5673 static void
5674 hoist_code ()
5675 {
5676 int bb, dominated;
5677 unsigned int i;
5678 struct expr **index_map;
5679 struct expr *expr;
5680
5681 sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
5682
5683 /* Compute a mapping from expression number (`bitmap_index') to
5684 hash table entry. */
5685
5686 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5687 for (i = 0; i < expr_hash_table_size; i++)
5688 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5689 index_map[expr->bitmap_index] = expr;
5690
5691 /* Walk over each basic block looking for potentially hoistable
5692 expressions, nothing gets hoisted from the entry block. */
5693 for (bb = 0; bb < n_basic_blocks; bb++)
5694 {
5695 int found = 0;
5696 int insn_inserted_p;
5697
5698 /* Examine each expression that is very busy at the exit of this
5699 block. These are the potentially hoistable expressions. */
5700 for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
5701 {
5702 int hoistable = 0;
5703
5704 if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
5705 {
5706 /* We've found a potentially hoistable expression, now
5707 we look at every block BB dominates to see if it
5708 computes the expression. */
5709 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5710 {
5711 /* Ignore self dominance. */
5712 if (bb == dominated
5713 || ! TEST_BIT (dominators[dominated], bb))
5714 continue;
5715
5716 /* We've found a dominated block, now see if it computes
5717 the busy expression and whether or not moving that
5718 expression to the "beginning" of that block is safe. */
5719 if (!TEST_BIT (antloc[dominated], i))
5720 continue;
5721
5722 /* Note if the expression would reach the dominated block
5723 unimpared if it was placed at the end of BB.
5724
5725 Keep track of how many times this expression is hoistable
5726 from a dominated block into BB. */
5727 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5728 BASIC_BLOCK (dominated), NULL))
5729 hoistable++;
5730 }
5731
5732 /* If we found more than one hoistable occurence of this
5733 expression, then note it in the bitmap of expressions to
5734 hoist. It makes no sense to hoist things which are computed
5735 in only one BB, and doing so tends to pessimize register
5736 allocation. One could increase this value to try harder
5737 to avoid any possible code expansion due to register
5738 allocation issues; however experiments have shown that
5739 the vast majority of hoistable expressions are only movable
5740 from two successors, so raising this threshhold is likely
5741 to nullify any benefit we get from code hoisting. */
5742 if (hoistable > 1)
5743 {
5744 SET_BIT (hoist_exprs[bb], i);
5745 found = 1;
5746 }
5747 }
5748 }
5749
5750 /* If we found nothing to hoist, then quit now. */
5751 if (! found)
5752 continue;
5753
5754 /* Loop over all the hoistable expressions. */
5755 for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
5756 {
5757 /* We want to insert the expression into BB only once, so
5758 note when we've inserted it. */
5759 insn_inserted_p = 0;
5760
5761 /* These tests should be the same as the tests above. */
5762 if (TEST_BIT (hoist_vbeout[bb], i))
5763 {
5764 /* We've found a potentially hoistable expression, now
5765 we look at every block BB dominates to see if it
5766 computes the expression. */
5767 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5768 {
5769 /* Ignore self dominance. */
5770 if (bb == dominated
5771 || ! TEST_BIT (dominators[dominated], bb))
5772 continue;
5773
5774 /* We've found a dominated block, now see if it computes
5775 the busy expression and whether or not moving that
5776 expression to the "beginning" of that block is safe. */
5777 if (!TEST_BIT (antloc[dominated], i))
5778 continue;
5779
5780 /* The expression is computed in the dominated block and
5781 it would be safe to compute it at the start of the
5782 dominated block. Now we have to determine if the
5783 expresion would reach the dominated block if it was
5784 placed at the end of BB. */
5785 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5786 BASIC_BLOCK (dominated), NULL))
5787 {
5788 struct expr *expr = index_map[i];
5789 struct occr *occr = expr->antic_occr;
5790 rtx insn;
5791 rtx set;
5792
5793 /* Find the right occurence of this expression. */
5794 while (BLOCK_NUM (occr->insn) != dominated && occr)
5795 occr = occr->next;
5796
5797 /* Should never happen. */
5798 if (!occr)
5799 abort ();
5800
5801 insn = occr->insn;
5802
5803 set = single_set (insn);
5804 if (! set)
5805 abort ();
5806
5807 /* Create a pseudo-reg to store the result of reaching
5808 expressions into. Get the mode for the new pseudo
5809 from the mode of the original destination pseudo. */
5810 if (expr->reaching_reg == NULL)
5811 expr->reaching_reg
5812 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5813
5814 /* In theory this should never fail since we're creating
5815 a reg->reg copy.
5816
5817 However, on the x86 some of the movXX patterns
5818 actually contain clobbers of scratch regs. This may
5819 cause the insn created by validate_change to not
5820 match any pattern and thus cause validate_change to
5821 fail. */
5822 if (validate_change (insn, &SET_SRC (set),
5823 expr->reaching_reg, 0))
5824 {
5825 occr->deleted_p = 1;
5826 if (!insn_inserted_p)
5827 {
5828 insert_insn_end_bb (index_map[i],
5829 BASIC_BLOCK (bb), 0);
5830 insn_inserted_p = 1;
5831 }
5832 }
5833 }
5834 }
5835 }
5836 }
5837 }
5838
5839 free (index_map);
5840 }
5841
5842 /* Top level routine to perform one code hoisting (aka unification) pass
5843
5844 Return non-zero if a change was made. */
5845
5846 static int
5847 one_code_hoisting_pass ()
5848 {
5849 int changed = 0;
5850
5851 alloc_expr_hash_table (max_cuid);
5852 compute_expr_hash_table ();
5853 if (gcse_file)
5854 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
5855 expr_hash_table_size, n_exprs);
5856
5857 if (n_exprs > 0)
5858 {
5859 alloc_code_hoist_mem (n_basic_blocks, n_exprs);
5860 compute_code_hoist_data ();
5861 hoist_code ();
5862 free_code_hoist_mem ();
5863 }
5864
5865 free_expr_hash_table ();
5866
5867 return changed;
5868 }
5869 \f
5870 /* Here we provide the things required to do store motion towards
5871 the exit. In order for this to be effective, gcse also needed to
5872 be taught how to move a load when it is kill only by a store to itself.
5873
5874 int i;
5875 float a[10];
5876
5877 void foo(float scale)
5878 {
5879 for (i=0; i<10; i++)
5880 a[i] *= scale;
5881 }
5882
5883 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5884 the load out since its live around the loop, and stored at the bottom
5885 of the loop.
5886
5887 The 'Load Motion' referred to and implemented in this file is
5888 an enhancement to gcse which when using edge based lcm, recognizes
5889 this situation and allows gcse to move the load out of the loop.
5890
5891 Once gcse has hoisted the load, store motion can then push this
5892 load towards the exit, and we end up with no loads or stores of 'i'
5893 in the loop. */
5894
5895 /* This will search the ldst list for a matching expresion. If it
5896 doesn't find one, we create one and initialize it. */
5897
5898 static struct ls_expr *
5899 ldst_entry (x)
5900 rtx x;
5901 {
5902 struct ls_expr * ptr;
5903
5904 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5905 if (expr_equiv_p (ptr->pattern, x))
5906 break;
5907
5908 if (!ptr)
5909 {
5910 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
5911
5912 ptr->next = pre_ldst_mems;
5913 ptr->expr = NULL;
5914 ptr->pattern = x;
5915 ptr->loads = NULL_RTX;
5916 ptr->stores = NULL_RTX;
5917 ptr->reaching_reg = NULL_RTX;
5918 ptr->invalid = 0;
5919 ptr->index = 0;
5920 ptr->hash_index = 0;
5921 pre_ldst_mems = ptr;
5922 }
5923
5924 return ptr;
5925 }
5926
5927 /* Free up an individual ldst entry. */
5928
5929 static void
5930 free_ldst_entry (ptr)
5931 struct ls_expr * ptr;
5932 {
5933 free_INSN_LIST_list (& ptr->loads);
5934 free_INSN_LIST_list (& ptr->stores);
5935
5936 free (ptr);
5937 }
5938
5939 /* Free up all memory associated with the ldst list. */
5940
5941 static void
5942 free_ldst_mems ()
5943 {
5944 while (pre_ldst_mems)
5945 {
5946 struct ls_expr * tmp = pre_ldst_mems;
5947
5948 pre_ldst_mems = pre_ldst_mems->next;
5949
5950 free_ldst_entry (tmp);
5951 }
5952
5953 pre_ldst_mems = NULL;
5954 }
5955
5956 /* Dump debugging info about the ldst list. */
5957
5958 static void
5959 print_ldst_list (file)
5960 FILE * file;
5961 {
5962 struct ls_expr * ptr;
5963
5964 fprintf (file, "LDST list: \n");
5965
5966 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5967 {
5968 fprintf (file, " Pattern (%3d): ", ptr->index);
5969
5970 print_rtl (file, ptr->pattern);
5971
5972 fprintf (file, "\n Loads : ");
5973
5974 if (ptr->loads)
5975 print_rtl (file, ptr->loads);
5976 else
5977 fprintf (file, "(nil)");
5978
5979 fprintf (file, "\n Stores : ");
5980
5981 if (ptr->stores)
5982 print_rtl (file, ptr->stores);
5983 else
5984 fprintf (file, "(nil)");
5985
5986 fprintf (file, "\n\n");
5987 }
5988
5989 fprintf (file, "\n");
5990 }
5991
5992 /* Returns 1 if X is in the list of ldst only expressions. */
5993
5994 static struct ls_expr *
5995 find_rtx_in_ldst (x)
5996 rtx x;
5997 {
5998 struct ls_expr * ptr;
5999
6000 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6001 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6002 return ptr;
6003
6004 return NULL;
6005 }
6006
6007 /* Assign each element of the list of mems a monotonically increasing value. */
6008
6009 static int
6010 enumerate_ldsts ()
6011 {
6012 struct ls_expr * ptr;
6013 int n = 0;
6014
6015 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6016 ptr->index = n++;
6017
6018 return n;
6019 }
6020
6021 /* Return first item in the list. */
6022
6023 static inline struct ls_expr *
6024 first_ls_expr ()
6025 {
6026 return pre_ldst_mems;
6027 }
6028
6029 /* Return the next item in ther list after the specified one. */
6030
6031 static inline struct ls_expr *
6032 next_ls_expr (ptr)
6033 struct ls_expr * ptr;
6034 {
6035 return ptr->next;
6036 }
6037 \f
6038 /* Load Motion for loads which only kill themselves. */
6039
6040 /* Return true if x is a simple MEM operation, with no registers or
6041 side effects. These are the types of loads we consider for the
6042 ld_motion list, otherwise we let the usual aliasing take care of it. */
6043
6044 static int
6045 simple_mem (x)
6046 rtx x;
6047 {
6048 if (GET_CODE (x) != MEM)
6049 return 0;
6050
6051 if (MEM_VOLATILE_P (x))
6052 return 0;
6053
6054 if (GET_MODE (x) == BLKmode)
6055 return 0;
6056
6057 if (!rtx_varies_p (XEXP (x, 0), 0))
6058 return 1;
6059
6060 return 0;
6061 }
6062
6063 /* Make sure there isn't a buried reference in this pattern anywhere.
6064 If there is, invalidate the entry for it since we're not capable
6065 of fixing it up just yet.. We have to be sure we know about ALL
6066 loads since the aliasing code will allow all entries in the
6067 ld_motion list to not-alias itself. If we miss a load, we will get
6068 the wrong value since gcse might common it and we won't know to
6069 fix it up. */
6070
6071 static void
6072 invalidate_any_buried_refs (x)
6073 rtx x;
6074 {
6075 const char * fmt;
6076 int i,j;
6077 struct ls_expr * ptr;
6078
6079 /* Invalidate it in the list. */
6080 if (GET_CODE (x) == MEM && simple_mem (x))
6081 {
6082 ptr = ldst_entry (x);
6083 ptr->invalid = 1;
6084 }
6085
6086 /* Recursively process the insn. */
6087 fmt = GET_RTX_FORMAT (GET_CODE (x));
6088
6089 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6090 {
6091 if (fmt[i] == 'e')
6092 invalidate_any_buried_refs (XEXP (x, i));
6093 else if (fmt[i] == 'E')
6094 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6095 invalidate_any_buried_refs (XVECEXP (x, i, j));
6096 }
6097 }
6098
6099 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6100 being defined as MEM loads and stores to symbols, with no
6101 side effects and no registers in the expression. If there are any
6102 uses/defs which dont match this criteria, it is invalidated and
6103 trimmed out later. */
6104
6105 static void
6106 compute_ld_motion_mems ()
6107 {
6108 struct ls_expr * ptr;
6109 int bb;
6110 rtx insn;
6111
6112 pre_ldst_mems = NULL;
6113
6114 for (bb = 0; bb < n_basic_blocks; bb++)
6115 {
6116 for (insn = BLOCK_HEAD (bb);
6117 insn && insn != NEXT_INSN (BLOCK_END (bb));
6118 insn = NEXT_INSN (insn))
6119 {
6120 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6121 {
6122 if (GET_CODE (PATTERN (insn)) == SET)
6123 {
6124 rtx src = SET_SRC (PATTERN (insn));
6125 rtx dest = SET_DEST (PATTERN (insn));
6126
6127 /* Check for a simple LOAD... */
6128 if (GET_CODE (src) == MEM && simple_mem (src))
6129 {
6130 ptr = ldst_entry (src);
6131 if (GET_CODE (dest) == REG)
6132 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6133 else
6134 ptr->invalid = 1;
6135 }
6136 else
6137 {
6138 /* Make sure there isn't a buried load somewhere. */
6139 invalidate_any_buried_refs (src);
6140 }
6141
6142 /* Check for stores. Don't worry about aliased ones, they
6143 will block any movement we might do later. We only care
6144 about this exact pattern since those are the only
6145 circumstance that we will ignore the aliasing info. */
6146 if (GET_CODE (dest) == MEM && simple_mem (dest))
6147 {
6148 ptr = ldst_entry (dest);
6149
6150 if (GET_CODE (src) != MEM
6151 && GET_CODE (src) != ASM_OPERANDS)
6152 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6153 else
6154 ptr->invalid = 1;
6155 }
6156 }
6157 else
6158 invalidate_any_buried_refs (PATTERN (insn));
6159 }
6160 }
6161 }
6162 }
6163
6164 /* Remove any references that have been either invalidated or are not in the
6165 expression list for pre gcse. */
6166
6167 static void
6168 trim_ld_motion_mems ()
6169 {
6170 struct ls_expr * last = NULL;
6171 struct ls_expr * ptr = first_ls_expr ();
6172
6173 while (ptr != NULL)
6174 {
6175 int del = ptr->invalid;
6176 struct expr * expr = NULL;
6177
6178 /* Delete if entry has been made invalid. */
6179 if (!del)
6180 {
6181 unsigned int i;
6182
6183 del = 1;
6184 /* Delete if we cannot find this mem in the expression list. */
6185 for (i = 0; i < expr_hash_table_size && del; i++)
6186 {
6187 for (expr = expr_hash_table[i];
6188 expr != NULL;
6189 expr = expr->next_same_hash)
6190 if (expr_equiv_p (expr->expr, ptr->pattern))
6191 {
6192 del = 0;
6193 break;
6194 }
6195 }
6196 }
6197
6198 if (del)
6199 {
6200 if (last != NULL)
6201 {
6202 last->next = ptr->next;
6203 free_ldst_entry (ptr);
6204 ptr = last->next;
6205 }
6206 else
6207 {
6208 pre_ldst_mems = pre_ldst_mems->next;
6209 free_ldst_entry (ptr);
6210 ptr = pre_ldst_mems;
6211 }
6212 }
6213 else
6214 {
6215 /* Set the expression field if we are keeping it. */
6216 last = ptr;
6217 ptr->expr = expr;
6218 ptr = ptr->next;
6219 }
6220 }
6221
6222 /* Show the world what we've found. */
6223 if (gcse_file && pre_ldst_mems != NULL)
6224 print_ldst_list (gcse_file);
6225 }
6226
6227 /* This routine will take an expression which we are replacing with
6228 a reaching register, and update any stores that are needed if
6229 that expression is in the ld_motion list. Stores are updated by
6230 copying their SRC to the reaching register, and then storeing
6231 the reaching register into the store location. These keeps the
6232 correct value in the reaching register for the loads. */
6233
6234 static void
6235 update_ld_motion_stores (expr)
6236 struct expr * expr;
6237 {
6238 struct ls_expr * mem_ptr;
6239
6240 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6241 {
6242 /* We can try to find just the REACHED stores, but is shouldn't
6243 matter to set the reaching reg everywhere... some might be
6244 dead and should be eliminated later. */
6245
6246 /* We replace SET mem = expr with
6247 SET reg = expr
6248 SET mem = reg , where reg is the
6249 reaching reg used in the load. */
6250 rtx list = mem_ptr->stores;
6251
6252 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6253 {
6254 rtx insn = XEXP (list, 0);
6255 rtx pat = PATTERN (insn);
6256 rtx src = SET_SRC (pat);
6257 rtx reg = expr->reaching_reg;
6258 rtx copy, new;
6259
6260 /* If we've already copied it, continue. */
6261 if (expr->reaching_reg == src)
6262 continue;
6263
6264 if (gcse_file)
6265 {
6266 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6267 print_rtl (gcse_file, expr->reaching_reg);
6268 fprintf (gcse_file, ":\n ");
6269 print_inline_rtx (gcse_file, insn, 8);
6270 fprintf (gcse_file, "\n");
6271 }
6272
6273 copy = gen_move_insn ( reg, SET_SRC (pat));
6274 new = emit_insn_before (copy, insn);
6275 record_one_set (REGNO (reg), new);
6276 set_block_for_new_insns (new, BLOCK_FOR_INSN (insn));
6277 SET_SRC (pat) = reg;
6278
6279 /* un-recognize this pattern since it's probably different now. */
6280 INSN_CODE (insn) = -1;
6281 gcse_create_count++;
6282 }
6283 }
6284 }
6285 \f
6286 /* Store motion code. */
6287
6288 /* This is used to communicate the target bitvector we want to use in the
6289 reg_set_info routine when called via the note_stores mechanism. */
6290 static sbitmap * regvec;
6291
6292 /* Used in computing the reverse edge graph bit vectors. */
6293 static sbitmap * st_antloc;
6294
6295 /* Global holding the number of store expressions we are dealing with. */
6296 static int num_stores;
6297
6298 /* Checks to set if we need to mark a register set. Called from note_stores. */
6299
6300 static void
6301 reg_set_info (dest, setter, data)
6302 rtx dest, setter ATTRIBUTE_UNUSED;
6303 void * data ATTRIBUTE_UNUSED;
6304 {
6305 if (GET_CODE (dest) == SUBREG)
6306 dest = SUBREG_REG (dest);
6307
6308 if (GET_CODE (dest) == REG)
6309 SET_BIT (*regvec, REGNO (dest));
6310 }
6311
6312 /* Return non-zero if the register operands of expression X are killed
6313 anywhere in basic block BB. */
6314
6315 static int
6316 store_ops_ok (x, bb)
6317 rtx x;
6318 basic_block bb;
6319 {
6320 int i;
6321 enum rtx_code code;
6322 const char * fmt;
6323
6324 /* Repeat is used to turn tail-recursion into iteration. */
6325 repeat:
6326
6327 if (x == 0)
6328 return 1;
6329
6330 code = GET_CODE (x);
6331 switch (code)
6332 {
6333 case REG:
6334 /* If a reg has changed after us in this
6335 block, the operand has been killed. */
6336 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6337
6338 case MEM:
6339 x = XEXP (x, 0);
6340 goto repeat;
6341
6342 case PRE_DEC:
6343 case PRE_INC:
6344 case POST_DEC:
6345 case POST_INC:
6346 return 0;
6347
6348 case PC:
6349 case CC0: /*FIXME*/
6350 case CONST:
6351 case CONST_INT:
6352 case CONST_DOUBLE:
6353 case SYMBOL_REF:
6354 case LABEL_REF:
6355 case ADDR_VEC:
6356 case ADDR_DIFF_VEC:
6357 return 1;
6358
6359 default:
6360 break;
6361 }
6362
6363 i = GET_RTX_LENGTH (code) - 1;
6364 fmt = GET_RTX_FORMAT (code);
6365
6366 for (; i >= 0; i--)
6367 {
6368 if (fmt[i] == 'e')
6369 {
6370 rtx tem = XEXP (x, i);
6371
6372 /* If we are about to do the last recursive call
6373 needed at this level, change it into iteration.
6374 This function is called enough to be worth it. */
6375 if (i == 0)
6376 {
6377 x = tem;
6378 goto repeat;
6379 }
6380
6381 if (! store_ops_ok (tem, bb))
6382 return 0;
6383 }
6384 else if (fmt[i] == 'E')
6385 {
6386 int j;
6387
6388 for (j = 0; j < XVECLEN (x, i); j++)
6389 {
6390 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6391 return 0;
6392 }
6393 }
6394 }
6395
6396 return 1;
6397 }
6398
6399 /* Determine whether insn is MEM store pattern that we will consider moving. */
6400
6401 static void
6402 find_moveable_store (insn)
6403 rtx insn;
6404 {
6405 struct ls_expr * ptr;
6406 rtx dest = PATTERN (insn);
6407
6408 if (GET_CODE (dest) != SET
6409 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6410 return;
6411
6412 dest = SET_DEST (dest);
6413
6414 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6415 || GET_MODE (dest) == BLKmode)
6416 return;
6417
6418 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
6419 return;
6420
6421 if (rtx_varies_p (XEXP (dest, 0), 0))
6422 return;
6423
6424 ptr = ldst_entry (dest);
6425 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6426 }
6427
6428 /* Perform store motion. Much like gcse, except we move expressions the
6429 other way by looking at the flowgraph in reverse. */
6430
6431 static int
6432 compute_store_table ()
6433 {
6434 int bb, ret;
6435 unsigned regno;
6436 rtx insn, pat;
6437
6438 max_gcse_regno = max_reg_num ();
6439
6440 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
6441 max_gcse_regno);
6442 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
6443 pre_ldst_mems = 0;
6444
6445 /* Find all the stores we care about. */
6446 for (bb = 0; bb < n_basic_blocks; bb++)
6447 {
6448 regvec = & (reg_set_in_block[bb]);
6449 for (insn = BLOCK_END (bb);
6450 insn && insn != PREV_INSN (BLOCK_HEAD (bb));
6451 insn = PREV_INSN (insn))
6452 {
6453 #ifdef NON_SAVING_SETJMP
6454 if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
6455 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
6456 {
6457 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6458 SET_BIT (reg_set_in_block[bb], regno);
6459 continue;
6460 }
6461 #endif
6462 /* Ignore anything that is not a normal insn. */
6463 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6464 continue;
6465
6466 if (GET_CODE (insn) == CALL_INSN)
6467 {
6468 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6469 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
6470 SET_BIT (reg_set_in_block[bb], regno);
6471 }
6472
6473 pat = PATTERN (insn);
6474 note_stores (pat, reg_set_info, NULL);
6475
6476 /* Now that we've marked regs, look for stores. */
6477 if (GET_CODE (pat) == SET)
6478 find_moveable_store (insn);
6479 }
6480 }
6481
6482 ret = enumerate_ldsts ();
6483
6484 if (gcse_file)
6485 {
6486 fprintf (gcse_file, "Store Motion Expressions.\n");
6487 print_ldst_list (gcse_file);
6488 }
6489
6490 return ret;
6491 }
6492
6493 /* Check to see if the load X is aliased with STORE_PATTERN. */
6494
6495 static int
6496 load_kills_store (x, store_pattern)
6497 rtx x, store_pattern;
6498 {
6499 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
6500 return 1;
6501 return 0;
6502 }
6503
6504 /* Go through the entire insn X, looking for any loads which might alias
6505 STORE_PATTERN. Return 1 if found. */
6506
6507 static int
6508 find_loads (x, store_pattern)
6509 rtx x, store_pattern;
6510 {
6511 const char * fmt;
6512 int i,j;
6513 int ret = 0;
6514
6515 if (GET_CODE (x) == SET)
6516 x = SET_SRC (x);
6517
6518 if (GET_CODE (x) == MEM)
6519 {
6520 if (load_kills_store (x, store_pattern))
6521 return 1;
6522 }
6523
6524 /* Recursively process the insn. */
6525 fmt = GET_RTX_FORMAT (GET_CODE (x));
6526
6527 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
6528 {
6529 if (fmt[i] == 'e')
6530 ret |= find_loads (XEXP (x, i), store_pattern);
6531 else if (fmt[i] == 'E')
6532 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6533 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
6534 }
6535 return ret;
6536 }
6537
6538 /* Check if INSN kills the store pattern X (is aliased with it).
6539 Return 1 if it it does. */
6540
6541 static int
6542 store_killed_in_insn (x, insn)
6543 rtx x, insn;
6544 {
6545 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6546 return 0;
6547
6548 if (GET_CODE (insn) == CALL_INSN)
6549 {
6550 if (CONST_CALL_P (insn))
6551 return 0;
6552 else
6553 return 1;
6554 }
6555
6556 if (GET_CODE (PATTERN (insn)) == SET)
6557 {
6558 rtx pat = PATTERN (insn);
6559 /* Check for memory stores to aliased objects. */
6560 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
6561 /* pretend its a load and check for aliasing. */
6562 if (find_loads (SET_DEST (pat), x))
6563 return 1;
6564 return find_loads (SET_SRC (pat), x);
6565 }
6566 else
6567 return find_loads (PATTERN (insn), x);
6568 }
6569
6570 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6571 within basic block BB. */
6572
6573 static int
6574 store_killed_after (x, insn, bb)
6575 rtx x, insn;
6576 basic_block bb;
6577 {
6578 rtx last = bb->end;
6579
6580 if (insn == last)
6581 return 0;
6582
6583 /* Check if the register operands of the store are OK in this block.
6584 Note that if registers are changed ANYWHERE in the block, we'll
6585 decide we can't move it, regardless of whether it changed above
6586 or below the store. This could be improved by checking the register
6587 operands while lookinng for aliasing in each insn. */
6588 if (!store_ops_ok (XEXP (x, 0), bb))
6589 return 1;
6590
6591 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
6592 if (store_killed_in_insn (x, insn))
6593 return 1;
6594
6595 return 0;
6596 }
6597
6598 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
6599 within basic block BB. */
6600 static int
6601 store_killed_before (x, insn, bb)
6602 rtx x, insn;
6603 basic_block bb;
6604 {
6605 rtx first = bb->head;
6606
6607 if (insn == first)
6608 return store_killed_in_insn (x, insn);
6609
6610 /* Check if the register operands of the store are OK in this block.
6611 Note that if registers are changed ANYWHERE in the block, we'll
6612 decide we can't move it, regardless of whether it changed above
6613 or below the store. This could be improved by checking the register
6614 operands while lookinng for aliasing in each insn. */
6615 if (!store_ops_ok (XEXP (x, 0), bb))
6616 return 1;
6617
6618 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
6619 if (store_killed_in_insn (x, insn))
6620 return 1;
6621
6622 return 0;
6623 }
6624
6625 #define ANTIC_STORE_LIST(x) ((x)->loads)
6626 #define AVAIL_STORE_LIST(x) ((x)->stores)
6627
6628 /* Given the table of available store insns at the end of blocks,
6629 determine which ones are not killed by aliasing, and generate
6630 the appropriate vectors for gen and killed. */
6631 static void
6632 build_store_vectors ()
6633 {
6634 basic_block bb;
6635 int b;
6636 rtx insn, st;
6637 struct ls_expr * ptr;
6638
6639 /* Build the gen_vector. This is any store in the table which is not killed
6640 by aliasing later in its block. */
6641 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6642 sbitmap_vector_zero (ae_gen, n_basic_blocks);
6643
6644 st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6645 sbitmap_vector_zero (st_antloc, n_basic_blocks);
6646
6647 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6648 {
6649 /* Put all the stores into either the antic list, or the avail list,
6650 or both. */
6651 rtx store_list = ptr->stores;
6652 ptr->stores = NULL_RTX;
6653
6654 for (st = store_list; st != NULL; st = XEXP (st, 1))
6655 {
6656 insn = XEXP (st, 0);
6657 bb = BLOCK_FOR_INSN (insn);
6658
6659 if (!store_killed_after (ptr->pattern, insn, bb))
6660 {
6661 /* If we've already seen an availale expression in this block,
6662 we can delete the one we saw already (It occurs earlier in
6663 the block), and replace it with this one). We'll copy the
6664 old SRC expression to an unused register in case there
6665 are any side effects. */
6666 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6667 {
6668 /* Find previous store. */
6669 rtx st;
6670 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
6671 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
6672 break;
6673 if (st)
6674 {
6675 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6676 if (gcse_file)
6677 fprintf(gcse_file, "Removing redundant store:\n");
6678 replace_store_insn (r, XEXP (st, 0), bb);
6679 XEXP (st, 0) = insn;
6680 continue;
6681 }
6682 }
6683 SET_BIT (ae_gen[bb->index], ptr->index);
6684 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6685 AVAIL_STORE_LIST (ptr));
6686 }
6687
6688 if (!store_killed_before (ptr->pattern, insn, bb))
6689 {
6690 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
6691 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6692 ANTIC_STORE_LIST (ptr));
6693 }
6694 }
6695
6696 /* Free the original list of store insns. */
6697 free_INSN_LIST_list (&store_list);
6698 }
6699
6700 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6701 sbitmap_vector_zero (ae_kill, n_basic_blocks);
6702
6703 transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6704 sbitmap_vector_zero (transp, n_basic_blocks);
6705
6706 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6707 for (b = 0; b < n_basic_blocks; b++)
6708 {
6709 if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
6710 {
6711 /* The anticipatable expression is not killed if it's gen'd. */
6712 /*
6713 We leave this check out for now. If we have a code sequence
6714 in a block which looks like:
6715 ST MEMa = x
6716 L y = MEMa
6717 ST MEMa = z
6718 We should flag this as having an ANTIC expression, NOT
6719 transparent, NOT killed, and AVAIL.
6720 Unfortunately, since we haven't re-written all loads to
6721 use the reaching reg, we'll end up doing an incorrect
6722 Load in the middle here if we push the store down. It happens in
6723 gcc.c-torture/execute/960311-1.c with -O3
6724 If we always kill it in this case, we'll sometimes do
6725 uneccessary work, but it shouldn't actually hurt anything.
6726 if (!TEST_BIT (ae_gen[b], ptr->index)). */
6727 SET_BIT (ae_kill[b], ptr->index);
6728 }
6729 else
6730 SET_BIT (transp[b], ptr->index);
6731 }
6732
6733 /* Any block with no exits calls some non-returning function, so
6734 we better mark the store killed here, or we might not store to
6735 it at all. If we knew it was abort, we wouldn't have to store,
6736 but we don't know that for sure. */
6737 if (gcse_file)
6738 {
6739 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
6740 print_ldst_list (gcse_file);
6741 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
6742 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
6743 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
6744 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
6745 }
6746 }
6747
6748 /* Insert an instruction at the begining of a basic block, and update
6749 the BLOCK_HEAD if needed. */
6750
6751 static void
6752 insert_insn_start_bb (insn, bb)
6753 rtx insn;
6754 basic_block bb;
6755 {
6756 /* Insert at start of successor block. */
6757 rtx prev = PREV_INSN (bb->head);
6758 rtx before = bb->head;
6759 while (before != 0)
6760 {
6761 if (GET_CODE (before) != CODE_LABEL
6762 && (GET_CODE (before) != NOTE
6763 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6764 break;
6765 prev = before;
6766 if (prev == bb->end)
6767 break;
6768 before = NEXT_INSN (before);
6769 }
6770
6771 insn = emit_insn_after (insn, prev);
6772
6773 if (prev == bb->end)
6774 bb->end = insn;
6775
6776 set_block_for_new_insns (insn, bb);
6777
6778 if (gcse_file)
6779 {
6780 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6781 bb->index);
6782 print_inline_rtx (gcse_file, insn, 6);
6783 fprintf (gcse_file, "\n");
6784 }
6785 }
6786
6787 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6788 the memory reference, and E is the edge to insert it on. Returns non-zero
6789 if an edge insertion was performed. */
6790
6791 static int
6792 insert_store (expr, e)
6793 struct ls_expr * expr;
6794 edge e;
6795 {
6796 rtx reg, insn;
6797 basic_block bb;
6798 edge tmp;
6799
6800 /* We did all the deleted before this insert, so if we didn't delete a
6801 store, then we haven't set the reaching reg yet either. */
6802 if (expr->reaching_reg == NULL_RTX)
6803 return 0;
6804
6805 reg = expr->reaching_reg;
6806 insn = gen_move_insn (expr->pattern, reg);
6807
6808 /* If we are inserting this expression on ALL predecessor edges of a BB,
6809 insert it at the start of the BB, and reset the insert bits on the other
6810 edges so we don;t try to insert it on the other edges. */
6811 bb = e->dest;
6812 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6813 {
6814 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6815 if (index == EDGE_INDEX_NO_EDGE)
6816 abort ();
6817 if (! TEST_BIT (pre_insert_map[index], expr->index))
6818 break;
6819 }
6820
6821 /* If tmp is NULL, we found an insertion on every edge, blank the
6822 insertion vector for these edges, and insert at the start of the BB. */
6823 if (!tmp && bb != EXIT_BLOCK_PTR)
6824 {
6825 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6826 {
6827 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6828 RESET_BIT (pre_insert_map[index], expr->index);
6829 }
6830 insert_insn_start_bb (insn, bb);
6831 return 0;
6832 }
6833
6834 /* We can't insert on this edge, so we'll insert at the head of the
6835 successors block. See Morgan, sec 10.5. */
6836 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
6837 {
6838 insert_insn_start_bb (insn, bb);
6839 return 0;
6840 }
6841
6842 insert_insn_on_edge (insn, e);
6843
6844 if (gcse_file)
6845 {
6846 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6847 e->src->index, e->dest->index);
6848 print_inline_rtx (gcse_file, insn, 6);
6849 fprintf (gcse_file, "\n");
6850 }
6851
6852 return 1;
6853 }
6854
6855 /* This routine will replace a store with a SET to a specified register. */
6856
6857 static void
6858 replace_store_insn (reg, del, bb)
6859 rtx reg, del;
6860 basic_block bb;
6861 {
6862 rtx insn;
6863
6864 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
6865 insn = emit_insn_after (insn, del);
6866 set_block_for_new_insns (insn, bb);
6867
6868 if (gcse_file)
6869 {
6870 fprintf (gcse_file,
6871 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6872 print_inline_rtx (gcse_file, del, 6);
6873 fprintf(gcse_file, "\nSTORE MOTION replaced with insn:\n ");
6874 print_inline_rtx (gcse_file, insn, 6);
6875 fprintf(gcse_file, "\n");
6876 }
6877
6878 if (bb->end == del)
6879 bb->end = insn;
6880
6881 if (bb->head == del)
6882 bb->head = insn;
6883
6884 delete_insn (del);
6885 }
6886
6887
6888 /* Delete a store, but copy the value that would have been stored into
6889 the reaching_reg for later storing. */
6890
6891 static void
6892 delete_store (expr, bb)
6893 struct ls_expr * expr;
6894 basic_block bb;
6895 {
6896 rtx reg, i, del;
6897
6898 if (expr->reaching_reg == NULL_RTX)
6899 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6900
6901
6902 /* If there is more than 1 store, the earlier ones will be dead,
6903 but it doesn't hurt to replace them here. */
6904 reg = expr->reaching_reg;
6905
6906 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6907 {
6908 del = XEXP (i, 0);
6909 if (BLOCK_FOR_INSN (del) == bb)
6910 {
6911 /* We know there is only one since we deleted redundant
6912 ones during the available computation. */
6913 replace_store_insn (reg, del, bb);
6914 break;
6915 }
6916 }
6917 }
6918
6919 /* Free memory used by store motion. */
6920
6921 static void
6922 free_store_memory ()
6923 {
6924 free_ldst_mems ();
6925
6926 if (ae_gen)
6927 sbitmap_vector_free (ae_gen);
6928 if (ae_kill)
6929 sbitmap_vector_free (ae_kill);
6930 if (transp)
6931 sbitmap_vector_free (transp);
6932 if (st_antloc)
6933 sbitmap_vector_free (st_antloc);
6934 if (pre_insert_map)
6935 sbitmap_vector_free (pre_insert_map);
6936 if (pre_delete_map)
6937 sbitmap_vector_free (pre_delete_map);
6938 if (reg_set_in_block)
6939 sbitmap_vector_free (reg_set_in_block);
6940
6941 ae_gen = ae_kill = transp = st_antloc = NULL;
6942 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6943 }
6944
6945 /* Perform store motion. Much like gcse, except we move expressions the
6946 other way by looking at the flowgraph in reverse. */
6947
6948 static void
6949 store_motion ()
6950 {
6951 int x;
6952 struct ls_expr * ptr;
6953 int update_flow = 0;
6954
6955 if (gcse_file)
6956 {
6957 fprintf (gcse_file, "before store motion\n");
6958 print_rtl (gcse_file, get_insns ());
6959 }
6960
6961
6962 init_alias_analysis ();
6963
6964 /* Find all the stores that are live to the end of their block. */
6965 num_stores = compute_store_table ();
6966 if (num_stores == 0)
6967 {
6968 sbitmap_vector_free (reg_set_in_block);
6969 end_alias_analysis ();
6970 return;
6971 }
6972
6973 /* Now compute whats actually available to move. */
6974 add_noreturn_fake_exit_edges ();
6975 build_store_vectors ();
6976
6977 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
6978 st_antloc, ae_kill, &pre_insert_map,
6979 &pre_delete_map);
6980
6981 /* Now we want to insert the new stores which are going to be needed. */
6982 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6983 {
6984 for (x = 0; x < n_basic_blocks; x++)
6985 if (TEST_BIT (pre_delete_map[x], ptr->index))
6986 delete_store (ptr, BASIC_BLOCK (x));
6987
6988 for (x = 0; x < NUM_EDGES (edge_list); x++)
6989 if (TEST_BIT (pre_insert_map[x], ptr->index))
6990 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6991 }
6992
6993 if (update_flow)
6994 commit_edge_insertions ();
6995
6996 free_store_memory ();
6997 free_edge_list (edge_list);
6998 remove_fake_edges ();
6999 end_alias_analysis ();
7000 }