re PR rtl-optimization/11741 (internal compiler error at gcse.c:5318)
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
151
152 #include "rtl.h"
153 #include "tm_p.h"
154 #include "regs.h"
155 #include "hard-reg-set.h"
156 #include "flags.h"
157 #include "real.h"
158 #include "insn-config.h"
159 #include "recog.h"
160 #include "basic-block.h"
161 #include "output.h"
162 #include "function.h"
163 #include "expr.h"
164 #include "except.h"
165 #include "ggc.h"
166 #include "params.h"
167 #include "cselib.h"
168 #include "intl.h"
169 #include "obstack.h"
170
171 /* Propagate flow information through back edges and thus enable PRE's
172 moving loop invariant calculations out of loops.
173
174 Originally this tended to create worse overall code, but several
175 improvements during the development of PRE seem to have made following
176 back edges generally a win.
177
178 Note much of the loop invariant code motion done here would normally
179 be done by loop.c, which has more heuristics for when to move invariants
180 out of loops. At some point we might need to move some of those
181 heuristics into gcse.c. */
182
183 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
184 are a superset of those done by GCSE.
185
186 We perform the following steps:
187
188 1) Compute basic block information.
189
190 2) Compute table of places where registers are set.
191
192 3) Perform copy/constant propagation.
193
194 4) Perform global cse.
195
196 5) Perform another pass of copy/constant propagation.
197
198 Two passes of copy/constant propagation are done because the first one
199 enables more GCSE and the second one helps to clean up the copies that
200 GCSE creates. This is needed more for PRE than for Classic because Classic
201 GCSE will try to use an existing register containing the common
202 subexpression rather than create a new one. This is harder to do for PRE
203 because of the code motion (which Classic GCSE doesn't do).
204
205 Expressions we are interested in GCSE-ing are of the form
206 (set (pseudo-reg) (expression)).
207 Function want_to_gcse_p says what these are.
208
209 PRE handles moving invariant expressions out of loops (by treating them as
210 partially redundant).
211
212 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
213 assignment) based GVN (global value numbering). L. T. Simpson's paper
214 (Rice University) on value numbering is a useful reference for this.
215
216 **********************
217
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
222 the expense.
223
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228
229 It was found doing copy propagation between each pass enables further
230 substitutions.
231
232 PRE is quite expensive in complicated functions because the DFA can take
233 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
234 be modified if one wants to experiment.
235
236 **********************
237
238 The steps for PRE are:
239
240 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
241
242 2) Perform the data flow analysis for PRE.
243
244 3) Delete the redundant instructions
245
246 4) Insert the required copies [if any] that make the partially
247 redundant instructions fully redundant.
248
249 5) For other reaching expressions, insert an instruction to copy the value
250 to a newly created pseudo that will reach the redundant instruction.
251
252 The deletion is done first so that when we do insertions we
253 know which pseudo reg to use.
254
255 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
256 argue it is not. The number of iterations for the algorithm to converge
257 is typically 2-4 so I don't view it as that expensive (relatively speaking).
258
259 PRE GCSE depends heavily on the second CSE pass to clean up the copies
260 we create. To make an expression reach the place where it's redundant,
261 the result of the expression is copied to a new register, and the redundant
262 expression is deleted by replacing it with this new register. Classic GCSE
263 doesn't have this problem as much as it computes the reaching defs of
264 each register in each block and thus can try to use an existing register.
265
266 **********************
267
268 A fair bit of simplicity is created by creating small functions for simple
269 tasks, even when the function is only called in one place. This may
270 measurably slow things down [or may not] by creating more function call
271 overhead than is necessary. The source is laid out so that it's trivial
272 to make the affected functions inline so that one can measure what speed
273 up, if any, can be achieved, and maybe later when things settle things can
274 be rearranged.
275
276 Help stamp out big monolithic functions! */
277 \f
278 /* GCSE global vars. */
279
280 /* -dG dump file. */
281 static FILE *gcse_file;
282
283 /* Note whether or not we should run jump optimization after gcse. We
284 want to do this for two cases.
285
286 * If we changed any jumps via cprop.
287
288 * If we added any labels via edge splitting. */
289
290 static int run_jump_opt_after_gcse;
291
292 /* Bitmaps are normally not included in debugging dumps.
293 However it's useful to be able to print them from GDB.
294 We could create special functions for this, but it's simpler to
295 just allow passing stderr to the dump_foo fns. Since stderr can
296 be a macro, we store a copy here. */
297 static FILE *debug_stderr;
298
299 /* An obstack for our working variables. */
300 static struct obstack gcse_obstack;
301
302 struct reg_use {rtx reg_rtx; };
303
304 /* Hash table of expressions. */
305
306 struct expr
307 {
308 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
309 rtx expr;
310 /* Index in the available expression bitmaps. */
311 int bitmap_index;
312 /* Next entry with the same hash. */
313 struct expr *next_same_hash;
314 /* List of anticipatable occurrences in basic blocks in the function.
315 An "anticipatable occurrence" is one that is the first occurrence in the
316 basic block, the operands are not modified in the basic block prior
317 to the occurrence and the output is not used between the start of
318 the block and the occurrence. */
319 struct occr *antic_occr;
320 /* List of available occurrence in basic blocks in the function.
321 An "available occurrence" is one that is the last occurrence in the
322 basic block and the operands are not modified by following statements in
323 the basic block [including this insn]. */
324 struct occr *avail_occr;
325 /* Non-null if the computation is PRE redundant.
326 The value is the newly created pseudo-reg to record a copy of the
327 expression in all the places that reach the redundant copy. */
328 rtx reaching_reg;
329 };
330
331 /* Occurrence of an expression.
332 There is one per basic block. If a pattern appears more than once the
333 last appearance is used [or first for anticipatable expressions]. */
334
335 struct occr
336 {
337 /* Next occurrence of this expression. */
338 struct occr *next;
339 /* The insn that computes the expression. */
340 rtx insn;
341 /* Nonzero if this [anticipatable] occurrence has been deleted. */
342 char deleted_p;
343 /* Nonzero if this [available] occurrence has been copied to
344 reaching_reg. */
345 /* ??? This is mutually exclusive with deleted_p, so they could share
346 the same byte. */
347 char copied_p;
348 };
349
350 /* Expression and copy propagation hash tables.
351 Each hash table is an array of buckets.
352 ??? It is known that if it were an array of entries, structure elements
353 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
354 not clear whether in the final analysis a sufficient amount of memory would
355 be saved as the size of the available expression bitmaps would be larger
356 [one could build a mapping table without holes afterwards though].
357 Someday I'll perform the computation and figure it out. */
358
359 struct hash_table
360 {
361 /* The table itself.
362 This is an array of `expr_hash_table_size' elements. */
363 struct expr **table;
364
365 /* Size of the hash table, in elements. */
366 unsigned int size;
367
368 /* Number of hash table elements. */
369 unsigned int n_elems;
370
371 /* Whether the table is expression of copy propagation one. */
372 int set_p;
373 };
374
375 /* Expression hash table. */
376 static struct hash_table expr_hash_table;
377
378 /* Copy propagation hash table. */
379 static struct hash_table set_hash_table;
380
381 /* Mapping of uids to cuids.
382 Only real insns get cuids. */
383 static int *uid_cuid;
384
385 /* Highest UID in UID_CUID. */
386 static int max_uid;
387
388 /* Get the cuid of an insn. */
389 #ifdef ENABLE_CHECKING
390 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
391 #else
392 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
393 #endif
394
395 /* Number of cuids. */
396 static int max_cuid;
397
398 /* Mapping of cuids to insns. */
399 static rtx *cuid_insn;
400
401 /* Get insn from cuid. */
402 #define CUID_INSN(CUID) (cuid_insn[CUID])
403
404 /* Maximum register number in function prior to doing gcse + 1.
405 Registers created during this pass have regno >= max_gcse_regno.
406 This is named with "gcse" to not collide with global of same name. */
407 static unsigned int max_gcse_regno;
408
409 /* Table of registers that are modified.
410
411 For each register, each element is a list of places where the pseudo-reg
412 is set.
413
414 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
415 requires knowledge of which blocks kill which regs [and thus could use
416 a bitmap instead of the lists `reg_set_table' uses].
417
418 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
419 num-regs) [however perhaps it may be useful to keep the data as is]. One
420 advantage of recording things this way is that `reg_set_table' is fairly
421 sparse with respect to pseudo regs but for hard regs could be fairly dense
422 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
423 up functions like compute_transp since in the case of pseudo-regs we only
424 need to iterate over the number of times a pseudo-reg is set, not over the
425 number of basic blocks [clearly there is a bit of a slow down in the cases
426 where a pseudo is set more than once in a block, however it is believed
427 that the net effect is to speed things up]. This isn't done for hard-regs
428 because recording call-clobbered hard-regs in `reg_set_table' at each
429 function call can consume a fair bit of memory, and iterating over
430 hard-regs stored this way in compute_transp will be more expensive. */
431
432 typedef struct reg_set
433 {
434 /* The next setting of this register. */
435 struct reg_set *next;
436 /* The insn where it was set. */
437 rtx insn;
438 } reg_set;
439
440 static reg_set **reg_set_table;
441
442 /* Size of `reg_set_table'.
443 The table starts out at max_gcse_regno + slop, and is enlarged as
444 necessary. */
445 static int reg_set_table_size;
446
447 /* Amount to grow `reg_set_table' by when it's full. */
448 #define REG_SET_TABLE_SLOP 100
449
450 /* This is a list of expressions which are MEMs and will be used by load
451 or store motion.
452 Load motion tracks MEMs which aren't killed by
453 anything except itself. (ie, loads and stores to a single location).
454 We can then allow movement of these MEM refs with a little special
455 allowance. (all stores copy the same value to the reaching reg used
456 for the loads). This means all values used to store into memory must have
457 no side effects so we can re-issue the setter value.
458 Store Motion uses this structure as an expression table to track stores
459 which look interesting, and might be moveable towards the exit block. */
460
461 struct ls_expr
462 {
463 struct expr * expr; /* Gcse expression reference for LM. */
464 rtx pattern; /* Pattern of this mem. */
465 rtx pattern_regs; /* List of registers mentioned by the mem. */
466 rtx loads; /* INSN list of loads seen. */
467 rtx stores; /* INSN list of stores seen. */
468 struct ls_expr * next; /* Next in the list. */
469 int invalid; /* Invalid for some reason. */
470 int index; /* If it maps to a bitmap index. */
471 int hash_index; /* Index when in a hash table. */
472 rtx reaching_reg; /* Register to use when re-writing. */
473 };
474
475 /* Array of implicit set patterns indexed by basic block index. */
476 static rtx *implicit_sets;
477
478 /* Head of the list of load/store memory refs. */
479 static struct ls_expr * pre_ldst_mems = NULL;
480
481 /* Bitmap containing one bit for each register in the program.
482 Used when performing GCSE to track which registers have been set since
483 the start of the basic block. */
484 static regset reg_set_bitmap;
485
486 /* For each block, a bitmap of registers set in the block.
487 This is used by expr_killed_p and compute_transp.
488 It is computed during hash table computation and not by compute_sets
489 as it includes registers added since the last pass (or between cprop and
490 gcse) and it's currently not easy to realloc sbitmap vectors. */
491 static sbitmap *reg_set_in_block;
492
493 /* Array, indexed by basic block number for a list of insns which modify
494 memory within that block. */
495 static rtx * modify_mem_list;
496 bitmap modify_mem_list_set;
497
498 /* This array parallels modify_mem_list, but is kept canonicalized. */
499 static rtx * canon_modify_mem_list;
500 bitmap canon_modify_mem_list_set;
501 /* Various variables for statistics gathering. */
502
503 /* Memory used in a pass.
504 This isn't intended to be absolutely precise. Its intent is only
505 to keep an eye on memory usage. */
506 static int bytes_used;
507
508 /* GCSE substitutions made. */
509 static int gcse_subst_count;
510 /* Number of copy instructions created. */
511 static int gcse_create_count;
512 /* Number of constants propagated. */
513 static int const_prop_count;
514 /* Number of copys propagated. */
515 static int copy_prop_count;
516 \f
517 /* These variables are used by classic GCSE.
518 Normally they'd be defined a bit later, but `rd_gen' needs to
519 be declared sooner. */
520
521 /* Each block has a bitmap of each type.
522 The length of each blocks bitmap is:
523
524 max_cuid - for reaching definitions
525 n_exprs - for available expressions
526
527 Thus we view the bitmaps as 2 dimensional arrays. i.e.
528 rd_kill[block_num][cuid_num]
529 ae_kill[block_num][expr_num] */
530
531 /* For reaching defs */
532 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
533
534 /* for available exprs */
535 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
536
537 /* Objects of this type are passed around by the null-pointer check
538 removal routines. */
539 struct null_pointer_info
540 {
541 /* The basic block being processed. */
542 basic_block current_block;
543 /* The first register to be handled in this pass. */
544 unsigned int min_reg;
545 /* One greater than the last register to be handled in this pass. */
546 unsigned int max_reg;
547 sbitmap *nonnull_local;
548 sbitmap *nonnull_killed;
549 };
550 \f
551 static void compute_can_copy (void);
552 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
553 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
554 static void *grealloc (void *, size_t);
555 static void *gcse_alloc (unsigned long);
556 static void alloc_gcse_mem (rtx);
557 static void free_gcse_mem (void);
558 static void alloc_reg_set_mem (int);
559 static void free_reg_set_mem (void);
560 static int get_bitmap_width (int, int, int);
561 static void record_one_set (int, rtx);
562 static void replace_one_set (int, rtx, rtx);
563 static void record_set_info (rtx, rtx, void *);
564 static void compute_sets (rtx);
565 static void hash_scan_insn (rtx, struct hash_table *, int);
566 static void hash_scan_set (rtx, rtx, struct hash_table *);
567 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
568 static void hash_scan_call (rtx, rtx, struct hash_table *);
569 static int want_to_gcse_p (rtx);
570 static bool gcse_constant_p (rtx);
571 static int oprs_unchanged_p (rtx, rtx, int);
572 static int oprs_anticipatable_p (rtx, rtx);
573 static int oprs_available_p (rtx, rtx);
574 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
575 struct hash_table *);
576 static void insert_set_in_table (rtx, rtx, struct hash_table *);
577 static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
578 static unsigned int hash_expr_1 (rtx, enum machine_mode, int *);
579 static unsigned int hash_string_1 (const char *);
580 static unsigned int hash_set (int, int);
581 static int expr_equiv_p (rtx, rtx);
582 static void record_last_reg_set_info (rtx, int);
583 static void record_last_mem_set_info (rtx);
584 static void record_last_set_info (rtx, rtx, void *);
585 static void compute_hash_table (struct hash_table *);
586 static void alloc_hash_table (int, struct hash_table *, int);
587 static void free_hash_table (struct hash_table *);
588 static void compute_hash_table_work (struct hash_table *);
589 static void dump_hash_table (FILE *, const char *, struct hash_table *);
590 static struct expr *lookup_expr (rtx, struct hash_table *);
591 static struct expr *lookup_set (unsigned int, struct hash_table *);
592 static struct expr *next_set (unsigned int, struct expr *);
593 static void reset_opr_set_tables (void);
594 static int oprs_not_set_p (rtx, rtx);
595 static void mark_call (rtx);
596 static void mark_set (rtx, rtx);
597 static void mark_clobber (rtx, rtx);
598 static void mark_oprs_set (rtx);
599 static void alloc_cprop_mem (int, int);
600 static void free_cprop_mem (void);
601 static void compute_transp (rtx, int, sbitmap *, int);
602 static void compute_transpout (void);
603 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
604 struct hash_table *);
605 static void compute_cprop_data (void);
606 static void find_used_regs (rtx *, void *);
607 static int try_replace_reg (rtx, rtx, rtx);
608 static struct expr *find_avail_set (int, rtx);
609 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
610 static void mems_conflict_for_gcse_p (rtx, rtx, void *);
611 static int load_killed_in_block_p (basic_block, int, rtx, int);
612 static void canon_list_insert (rtx, rtx, void *);
613 static int cprop_insn (rtx, int);
614 static int cprop (int);
615 static void find_implicit_sets (void);
616 static int one_cprop_pass (int, int, int);
617 static bool constprop_register (rtx, rtx, rtx, int);
618 static struct expr *find_bypass_set (int, int);
619 static bool reg_killed_on_edge (rtx, edge);
620 static int bypass_block (basic_block, rtx, rtx);
621 static int bypass_conditional_jumps (void);
622 static void alloc_pre_mem (int, int);
623 static void free_pre_mem (void);
624 static void compute_pre_data (void);
625 static int pre_expr_reaches_here_p (basic_block, struct expr *,
626 basic_block);
627 static void insert_insn_end_bb (struct expr *, basic_block, int);
628 static void pre_insert_copy_insn (struct expr *, rtx);
629 static void pre_insert_copies (void);
630 static int pre_delete (void);
631 static int pre_gcse (void);
632 static int one_pre_gcse_pass (int);
633 static void add_label_notes (rtx, rtx);
634 static void alloc_code_hoist_mem (int, int);
635 static void free_code_hoist_mem (void);
636 static void compute_code_hoist_vbeinout (void);
637 static void compute_code_hoist_data (void);
638 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
639 static void hoist_code (void);
640 static int one_code_hoisting_pass (void);
641 static void alloc_rd_mem (int, int);
642 static void free_rd_mem (void);
643 static void handle_rd_kill_set (rtx, int, basic_block);
644 static void compute_kill_rd (void);
645 static void compute_rd (void);
646 static void alloc_avail_expr_mem (int, int);
647 static void free_avail_expr_mem (void);
648 static void compute_ae_gen (struct hash_table *);
649 static int expr_killed_p (rtx, basic_block);
650 static void compute_ae_kill (sbitmap *, sbitmap *, struct hash_table *);
651 static int expr_reaches_here_p (struct occr *, struct expr *, basic_block,
652 int);
653 static rtx computing_insn (struct expr *, rtx);
654 static int def_reaches_here_p (rtx, rtx);
655 static int can_disregard_other_sets (struct reg_set **, rtx, int);
656 static int handle_avail_expr (rtx, struct expr *);
657 static int classic_gcse (void);
658 static int one_classic_gcse_pass (int);
659 static void invalidate_nonnull_info (rtx, rtx, void *);
660 static int delete_null_pointer_checks_1 (unsigned int *, sbitmap *, sbitmap *,
661 struct null_pointer_info *);
662 static rtx process_insert_insn (struct expr *);
663 static int pre_edge_insert (struct edge_list *, struct expr **);
664 static int expr_reaches_here_p_work (struct occr *, struct expr *,
665 basic_block, int, char *);
666 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
667 basic_block, char *);
668 static struct ls_expr * ldst_entry (rtx);
669 static void free_ldst_entry (struct ls_expr *);
670 static void free_ldst_mems (void);
671 static void print_ldst_list (FILE *);
672 static struct ls_expr * find_rtx_in_ldst (rtx);
673 static int enumerate_ldsts (void);
674 static inline struct ls_expr * first_ls_expr (void);
675 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
676 static int simple_mem (rtx);
677 static void invalidate_any_buried_refs (rtx);
678 static void compute_ld_motion_mems (void);
679 static void trim_ld_motion_mems (void);
680 static void update_ld_motion_stores (struct expr *);
681 static void reg_set_info (rtx, rtx, void *);
682 static bool store_ops_ok (rtx, int *);
683 static rtx extract_mentioned_regs (rtx);
684 static rtx extract_mentioned_regs_helper (rtx, rtx);
685 static void find_moveable_store (rtx, int *, int *);
686 static int compute_store_table (void);
687 static bool load_kills_store (rtx, rtx, int);
688 static bool find_loads (rtx, rtx, int);
689 static bool store_killed_in_insn (rtx, rtx, rtx, int);
690 static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *);
691 static bool store_killed_before (rtx, rtx, rtx, basic_block, int *);
692 static void build_store_vectors (void);
693 static void insert_insn_start_bb (rtx, basic_block);
694 static int insert_store (struct ls_expr *, edge);
695 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
696 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
697 static void delete_store (struct ls_expr *, basic_block);
698 static void free_store_memory (void);
699 static void store_motion (void);
700 static void free_insn_expr_list_list (rtx *);
701 static void clear_modify_mem_tables (void);
702 static void free_modify_mem_tables (void);
703 static rtx gcse_emit_move_after (rtx, rtx, rtx);
704 static void local_cprop_find_used_regs (rtx *, void *);
705 static bool do_local_cprop (rtx, rtx, int, rtx*);
706 static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
707 static void local_cprop_pass (int);
708 static bool is_too_expensive (const char *);
709 \f
710
711 /* Entry point for global common subexpression elimination.
712 F is the first instruction in the function. */
713
714 int
715 gcse_main (rtx f, FILE *file)
716 {
717 int changed, pass;
718 /* Bytes used at start of pass. */
719 int initial_bytes_used;
720 /* Maximum number of bytes used by a pass. */
721 int max_pass_bytes;
722 /* Point to release obstack data from for each pass. */
723 char *gcse_obstack_bottom;
724
725 /* We do not construct an accurate cfg in functions which call
726 setjmp, so just punt to be safe. */
727 if (current_function_calls_setjmp)
728 return 0;
729
730 /* Assume that we do not need to run jump optimizations after gcse. */
731 run_jump_opt_after_gcse = 0;
732
733 /* For calling dump_foo fns from gdb. */
734 debug_stderr = stderr;
735 gcse_file = file;
736
737 /* Identify the basic block information for this function, including
738 successors and predecessors. */
739 max_gcse_regno = max_reg_num ();
740
741 if (file)
742 dump_flow_info (file);
743
744 /* Return if there's nothing to do, or it is too expensive. */
745 if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled")))
746 return 0;
747
748 gcc_obstack_init (&gcse_obstack);
749 bytes_used = 0;
750
751 /* We need alias. */
752 init_alias_analysis ();
753 /* Record where pseudo-registers are set. This data is kept accurate
754 during each pass. ??? We could also record hard-reg information here
755 [since it's unchanging], however it is currently done during hash table
756 computation.
757
758 It may be tempting to compute MEM set information here too, but MEM sets
759 will be subject to code motion one day and thus we need to compute
760 information about memory sets when we build the hash tables. */
761
762 alloc_reg_set_mem (max_gcse_regno);
763 compute_sets (f);
764
765 pass = 0;
766 initial_bytes_used = bytes_used;
767 max_pass_bytes = 0;
768 gcse_obstack_bottom = gcse_alloc (1);
769 changed = 1;
770 while (changed && pass < MAX_GCSE_PASSES)
771 {
772 changed = 0;
773 if (file)
774 fprintf (file, "GCSE pass %d\n\n", pass + 1);
775
776 /* Initialize bytes_used to the space for the pred/succ lists,
777 and the reg_set_table data. */
778 bytes_used = initial_bytes_used;
779
780 /* Each pass may create new registers, so recalculate each time. */
781 max_gcse_regno = max_reg_num ();
782
783 alloc_gcse_mem (f);
784
785 /* Don't allow constant propagation to modify jumps
786 during this pass. */
787 changed = one_cprop_pass (pass + 1, 0, 0);
788
789 if (optimize_size)
790 changed |= one_classic_gcse_pass (pass + 1);
791 else
792 {
793 changed |= one_pre_gcse_pass (pass + 1);
794 /* We may have just created new basic blocks. Release and
795 recompute various things which are sized on the number of
796 basic blocks. */
797 if (changed)
798 {
799 free_modify_mem_tables ();
800 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
801 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
802 }
803 free_reg_set_mem ();
804 alloc_reg_set_mem (max_reg_num ());
805 compute_sets (f);
806 run_jump_opt_after_gcse = 1;
807 }
808
809 if (max_pass_bytes < bytes_used)
810 max_pass_bytes = bytes_used;
811
812 /* Free up memory, then reallocate for code hoisting. We can
813 not re-use the existing allocated memory because the tables
814 will not have info for the insns or registers created by
815 partial redundancy elimination. */
816 free_gcse_mem ();
817
818 /* It does not make sense to run code hoisting unless we optimizing
819 for code size -- it rarely makes programs faster, and can make
820 them bigger if we did partial redundancy elimination (when optimizing
821 for space, we use a classic gcse algorithm instead of partial
822 redundancy algorithms). */
823 if (optimize_size)
824 {
825 max_gcse_regno = max_reg_num ();
826 alloc_gcse_mem (f);
827 changed |= one_code_hoisting_pass ();
828 free_gcse_mem ();
829
830 if (max_pass_bytes < bytes_used)
831 max_pass_bytes = bytes_used;
832 }
833
834 if (file)
835 {
836 fprintf (file, "\n");
837 fflush (file);
838 }
839
840 obstack_free (&gcse_obstack, gcse_obstack_bottom);
841 pass++;
842 }
843
844 /* Do one last pass of copy propagation, including cprop into
845 conditional jumps. */
846
847 max_gcse_regno = max_reg_num ();
848 alloc_gcse_mem (f);
849 /* This time, go ahead and allow cprop to alter jumps. */
850 one_cprop_pass (pass + 1, 1, 0);
851 free_gcse_mem ();
852
853 if (file)
854 {
855 fprintf (file, "GCSE of %s: %d basic blocks, ",
856 current_function_name, n_basic_blocks);
857 fprintf (file, "%d pass%s, %d bytes\n\n",
858 pass, pass > 1 ? "es" : "", max_pass_bytes);
859 }
860
861 obstack_free (&gcse_obstack, NULL);
862 free_reg_set_mem ();
863 /* We are finished with alias. */
864 end_alias_analysis ();
865 allocate_reg_info (max_reg_num (), FALSE, FALSE);
866
867 if (!optimize_size && flag_gcse_sm)
868 store_motion ();
869
870 /* Record where pseudo-registers are set. */
871 return run_jump_opt_after_gcse;
872 }
873 \f
874 /* Misc. utilities. */
875
876 /* Nonzero for each mode that supports (set (reg) (reg)).
877 This is trivially true for integer and floating point values.
878 It may or may not be true for condition codes. */
879 static char can_copy[(int) NUM_MACHINE_MODES];
880
881 /* Compute which modes support reg/reg copy operations. */
882
883 static void
884 compute_can_copy (void)
885 {
886 int i;
887 #ifndef AVOID_CCMODE_COPIES
888 rtx reg, insn;
889 #endif
890 memset (can_copy, 0, NUM_MACHINE_MODES);
891
892 start_sequence ();
893 for (i = 0; i < NUM_MACHINE_MODES; i++)
894 if (GET_MODE_CLASS (i) == MODE_CC)
895 {
896 #ifdef AVOID_CCMODE_COPIES
897 can_copy[i] = 0;
898 #else
899 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
900 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
901 if (recog (PATTERN (insn), insn, NULL) >= 0)
902 can_copy[i] = 1;
903 #endif
904 }
905 else
906 can_copy[i] = 1;
907
908 end_sequence ();
909 }
910
911 /* Returns whether the mode supports reg/reg copy operations. */
912
913 bool
914 can_copy_p (enum machine_mode mode)
915 {
916 static bool can_copy_init_p = false;
917
918 if (! can_copy_init_p)
919 {
920 compute_can_copy ();
921 can_copy_init_p = true;
922 }
923
924 return can_copy[mode] != 0;
925 }
926 \f
927 /* Cover function to xmalloc to record bytes allocated. */
928
929 static void *
930 gmalloc (size_t size)
931 {
932 bytes_used += size;
933 return xmalloc (size);
934 }
935
936 /* Cover function to xcalloc to record bytes allocated. */
937
938 static void *
939 gcalloc (size_t nelem, size_t elsize)
940 {
941 bytes_used += nelem * elsize;
942 return xcalloc (nelem, elsize);
943 }
944
945 /* Cover function to xrealloc.
946 We don't record the additional size since we don't know it.
947 It won't affect memory usage stats much anyway. */
948
949 static void *
950 grealloc (void *ptr, size_t size)
951 {
952 return xrealloc (ptr, size);
953 }
954
955 /* Cover function to obstack_alloc. */
956
957 static void *
958 gcse_alloc (unsigned long size)
959 {
960 bytes_used += size;
961 return obstack_alloc (&gcse_obstack, size);
962 }
963
964 /* Allocate memory for the cuid mapping array,
965 and reg/memory set tracking tables.
966
967 This is called at the start of each pass. */
968
969 static void
970 alloc_gcse_mem (rtx f)
971 {
972 int i;
973 rtx insn;
974
975 /* Find the largest UID and create a mapping from UIDs to CUIDs.
976 CUIDs are like UIDs except they increase monotonically, have no gaps,
977 and only apply to real insns. */
978
979 max_uid = get_max_uid ();
980 uid_cuid = gcalloc (max_uid + 1, sizeof (int));
981 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
982 {
983 if (INSN_P (insn))
984 uid_cuid[INSN_UID (insn)] = i++;
985 else
986 uid_cuid[INSN_UID (insn)] = i;
987 }
988
989 /* Create a table mapping cuids to insns. */
990
991 max_cuid = i;
992 cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
993 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
994 if (INSN_P (insn))
995 CUID_INSN (i++) = insn;
996
997 /* Allocate vars to track sets of regs. */
998 reg_set_bitmap = BITMAP_XMALLOC ();
999
1000 /* Allocate vars to track sets of regs, memory per block. */
1001 reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
1002 /* Allocate array to keep a list of insns which modify memory in each
1003 basic block. */
1004 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
1005 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
1006 modify_mem_list_set = BITMAP_XMALLOC ();
1007 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1008 }
1009
1010 /* Free memory allocated by alloc_gcse_mem. */
1011
1012 static void
1013 free_gcse_mem (void)
1014 {
1015 free (uid_cuid);
1016 free (cuid_insn);
1017
1018 BITMAP_XFREE (reg_set_bitmap);
1019
1020 sbitmap_vector_free (reg_set_in_block);
1021 free_modify_mem_tables ();
1022 BITMAP_XFREE (modify_mem_list_set);
1023 BITMAP_XFREE (canon_modify_mem_list_set);
1024 }
1025
1026 /* Many of the global optimization algorithms work by solving dataflow
1027 equations for various expressions. Initially, some local value is
1028 computed for each expression in each block. Then, the values across the
1029 various blocks are combined (by following flow graph edges) to arrive at
1030 global values. Conceptually, each set of equations is independent. We
1031 may therefore solve all the equations in parallel, solve them one at a
1032 time, or pick any intermediate approach.
1033
1034 When you're going to need N two-dimensional bitmaps, each X (say, the
1035 number of blocks) by Y (say, the number of expressions), call this
1036 function. It's not important what X and Y represent; only that Y
1037 correspond to the things that can be done in parallel. This function will
1038 return an appropriate chunking factor C; you should solve C sets of
1039 equations in parallel. By going through this function, we can easily
1040 trade space against time; by solving fewer equations in parallel we use
1041 less space. */
1042
1043 static int
1044 get_bitmap_width (int n, int x, int y)
1045 {
1046 /* It's not really worth figuring out *exactly* how much memory will
1047 be used by a particular choice. The important thing is to get
1048 something approximately right. */
1049 size_t max_bitmap_memory = 10 * 1024 * 1024;
1050
1051 /* The number of bytes we'd use for a single column of minimum
1052 width. */
1053 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1054
1055 /* Often, it's reasonable just to solve all the equations in
1056 parallel. */
1057 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1058 return y;
1059
1060 /* Otherwise, pick the largest width we can, without going over the
1061 limit. */
1062 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1063 / column_size);
1064 }
1065 \f
1066 /* Compute the local properties of each recorded expression.
1067
1068 Local properties are those that are defined by the block, irrespective of
1069 other blocks.
1070
1071 An expression is transparent in a block if its operands are not modified
1072 in the block.
1073
1074 An expression is computed (locally available) in a block if it is computed
1075 at least once and expression would contain the same value if the
1076 computation was moved to the end of the block.
1077
1078 An expression is locally anticipatable in a block if it is computed at
1079 least once and expression would contain the same value if the computation
1080 was moved to the beginning of the block.
1081
1082 We call this routine for cprop, pre and code hoisting. They all compute
1083 basically the same information and thus can easily share this code.
1084
1085 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1086 properties. If NULL, then it is not necessary to compute or record that
1087 particular property.
1088
1089 TABLE controls which hash table to look at. If it is set hash table,
1090 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1091 ABSALTERED. */
1092
1093 static void
1094 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc, struct hash_table *table)
1095 {
1096 unsigned int i;
1097
1098 /* Initialize any bitmaps that were passed in. */
1099 if (transp)
1100 {
1101 if (table->set_p)
1102 sbitmap_vector_zero (transp, last_basic_block);
1103 else
1104 sbitmap_vector_ones (transp, last_basic_block);
1105 }
1106
1107 if (comp)
1108 sbitmap_vector_zero (comp, last_basic_block);
1109 if (antloc)
1110 sbitmap_vector_zero (antloc, last_basic_block);
1111
1112 for (i = 0; i < table->size; i++)
1113 {
1114 struct expr *expr;
1115
1116 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1117 {
1118 int indx = expr->bitmap_index;
1119 struct occr *occr;
1120
1121 /* The expression is transparent in this block if it is not killed.
1122 We start by assuming all are transparent [none are killed], and
1123 then reset the bits for those that are. */
1124 if (transp)
1125 compute_transp (expr->expr, indx, transp, table->set_p);
1126
1127 /* The occurrences recorded in antic_occr are exactly those that
1128 we want to set to nonzero in ANTLOC. */
1129 if (antloc)
1130 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1131 {
1132 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1133
1134 /* While we're scanning the table, this is a good place to
1135 initialize this. */
1136 occr->deleted_p = 0;
1137 }
1138
1139 /* The occurrences recorded in avail_occr are exactly those that
1140 we want to set to nonzero in COMP. */
1141 if (comp)
1142 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1143 {
1144 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1145
1146 /* While we're scanning the table, this is a good place to
1147 initialize this. */
1148 occr->copied_p = 0;
1149 }
1150
1151 /* While we're scanning the table, this is a good place to
1152 initialize this. */
1153 expr->reaching_reg = 0;
1154 }
1155 }
1156 }
1157 \f
1158 /* Register set information.
1159
1160 `reg_set_table' records where each register is set or otherwise
1161 modified. */
1162
1163 static struct obstack reg_set_obstack;
1164
1165 static void
1166 alloc_reg_set_mem (int n_regs)
1167 {
1168 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1169 reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
1170
1171 gcc_obstack_init (&reg_set_obstack);
1172 }
1173
1174 static void
1175 free_reg_set_mem (void)
1176 {
1177 free (reg_set_table);
1178 obstack_free (&reg_set_obstack, NULL);
1179 }
1180
1181 /* An OLD_INSN that used to set REGNO was replaced by NEW_INSN.
1182 Update the corresponding `reg_set_table' entry accordingly.
1183 We assume that NEW_INSN is not already recorded in reg_set_table[regno]. */
1184
1185 static void
1186 replace_one_set (int regno, rtx old_insn, rtx new_insn)
1187 {
1188 struct reg_set *reg_info;
1189 if (regno >= reg_set_table_size)
1190 return;
1191 for (reg_info = reg_set_table[regno]; reg_info; reg_info = reg_info->next)
1192 if (reg_info->insn == old_insn)
1193 {
1194 reg_info->insn = new_insn;
1195 break;
1196 }
1197 }
1198
1199 /* Record REGNO in the reg_set table. */
1200
1201 static void
1202 record_one_set (int regno, rtx insn)
1203 {
1204 /* Allocate a new reg_set element and link it onto the list. */
1205 struct reg_set *new_reg_info;
1206
1207 /* If the table isn't big enough, enlarge it. */
1208 if (regno >= reg_set_table_size)
1209 {
1210 int new_size = regno + REG_SET_TABLE_SLOP;
1211
1212 reg_set_table = grealloc (reg_set_table,
1213 new_size * sizeof (struct reg_set *));
1214 memset (reg_set_table + reg_set_table_size, 0,
1215 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1216 reg_set_table_size = new_size;
1217 }
1218
1219 new_reg_info = obstack_alloc (&reg_set_obstack, sizeof (struct reg_set));
1220 bytes_used += sizeof (struct reg_set);
1221 new_reg_info->insn = insn;
1222 new_reg_info->next = reg_set_table[regno];
1223 reg_set_table[regno] = new_reg_info;
1224 }
1225
1226 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1227 an insn. The DATA is really the instruction in which the SET is
1228 occurring. */
1229
1230 static void
1231 record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1232 {
1233 rtx record_set_insn = (rtx) data;
1234
1235 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1236 record_one_set (REGNO (dest), record_set_insn);
1237 }
1238
1239 /* Scan the function and record each set of each pseudo-register.
1240
1241 This is called once, at the start of the gcse pass. See the comments for
1242 `reg_set_table' for further documentation. */
1243
1244 static void
1245 compute_sets (rtx f)
1246 {
1247 rtx insn;
1248
1249 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1250 if (INSN_P (insn))
1251 note_stores (PATTERN (insn), record_set_info, insn);
1252 }
1253 \f
1254 /* Hash table support. */
1255
1256 struct reg_avail_info
1257 {
1258 basic_block last_bb;
1259 int first_set;
1260 int last_set;
1261 };
1262
1263 static struct reg_avail_info *reg_avail_info;
1264 static basic_block current_bb;
1265
1266
1267 /* See whether X, the source of a set, is something we want to consider for
1268 GCSE. */
1269
1270 static GTY(()) rtx test_insn;
1271 static int
1272 want_to_gcse_p (rtx x)
1273 {
1274 int num_clobbers = 0;
1275 int icode;
1276
1277 switch (GET_CODE (x))
1278 {
1279 case REG:
1280 case SUBREG:
1281 case CONST_INT:
1282 case CONST_DOUBLE:
1283 case CONST_VECTOR:
1284 case CALL:
1285 case CONSTANT_P_RTX:
1286 return 0;
1287
1288 default:
1289 break;
1290 }
1291
1292 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1293 if (general_operand (x, GET_MODE (x)))
1294 return 1;
1295 else if (GET_MODE (x) == VOIDmode)
1296 return 0;
1297
1298 /* Otherwise, check if we can make a valid insn from it. First initialize
1299 our test insn if we haven't already. */
1300 if (test_insn == 0)
1301 {
1302 test_insn
1303 = make_insn_raw (gen_rtx_SET (VOIDmode,
1304 gen_rtx_REG (word_mode,
1305 FIRST_PSEUDO_REGISTER * 2),
1306 const0_rtx));
1307 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1308 }
1309
1310 /* Now make an insn like the one we would make when GCSE'ing and see if
1311 valid. */
1312 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1313 SET_SRC (PATTERN (test_insn)) = x;
1314 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1315 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1316 }
1317
1318 /* Return nonzero if the operands of expression X are unchanged from the
1319 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1320 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1321
1322 static int
1323 oprs_unchanged_p (rtx x, rtx insn, int avail_p)
1324 {
1325 int i, j;
1326 enum rtx_code code;
1327 const char *fmt;
1328
1329 if (x == 0)
1330 return 1;
1331
1332 code = GET_CODE (x);
1333 switch (code)
1334 {
1335 case REG:
1336 {
1337 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1338
1339 if (info->last_bb != current_bb)
1340 return 1;
1341 if (avail_p)
1342 return info->last_set < INSN_CUID (insn);
1343 else
1344 return info->first_set >= INSN_CUID (insn);
1345 }
1346
1347 case MEM:
1348 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1349 x, avail_p))
1350 return 0;
1351 else
1352 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1353
1354 case PRE_DEC:
1355 case PRE_INC:
1356 case POST_DEC:
1357 case POST_INC:
1358 case PRE_MODIFY:
1359 case POST_MODIFY:
1360 return 0;
1361
1362 case PC:
1363 case CC0: /*FIXME*/
1364 case CONST:
1365 case CONST_INT:
1366 case CONST_DOUBLE:
1367 case CONST_VECTOR:
1368 case SYMBOL_REF:
1369 case LABEL_REF:
1370 case ADDR_VEC:
1371 case ADDR_DIFF_VEC:
1372 return 1;
1373
1374 default:
1375 break;
1376 }
1377
1378 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1379 {
1380 if (fmt[i] == 'e')
1381 {
1382 /* If we are about to do the last recursive call needed at this
1383 level, change it into iteration. This function is called enough
1384 to be worth it. */
1385 if (i == 0)
1386 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1387
1388 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1389 return 0;
1390 }
1391 else if (fmt[i] == 'E')
1392 for (j = 0; j < XVECLEN (x, i); j++)
1393 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1394 return 0;
1395 }
1396
1397 return 1;
1398 }
1399
1400 /* Used for communication between mems_conflict_for_gcse_p and
1401 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1402 conflict between two memory references. */
1403 static int gcse_mems_conflict_p;
1404
1405 /* Used for communication between mems_conflict_for_gcse_p and
1406 load_killed_in_block_p. A memory reference for a load instruction,
1407 mems_conflict_for_gcse_p will see if a memory store conflicts with
1408 this memory load. */
1409 static rtx gcse_mem_operand;
1410
1411 /* DEST is the output of an instruction. If it is a memory reference, and
1412 possibly conflicts with the load found in gcse_mem_operand, then set
1413 gcse_mems_conflict_p to a nonzero value. */
1414
1415 static void
1416 mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED,
1417 void *data ATTRIBUTE_UNUSED)
1418 {
1419 while (GET_CODE (dest) == SUBREG
1420 || GET_CODE (dest) == ZERO_EXTRACT
1421 || GET_CODE (dest) == SIGN_EXTRACT
1422 || GET_CODE (dest) == STRICT_LOW_PART)
1423 dest = XEXP (dest, 0);
1424
1425 /* If DEST is not a MEM, then it will not conflict with the load. Note
1426 that function calls are assumed to clobber memory, but are handled
1427 elsewhere. */
1428 if (GET_CODE (dest) != MEM)
1429 return;
1430
1431 /* If we are setting a MEM in our list of specially recognized MEMs,
1432 don't mark as killed this time. */
1433
1434 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1435 {
1436 if (!find_rtx_in_ldst (dest))
1437 gcse_mems_conflict_p = 1;
1438 return;
1439 }
1440
1441 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1442 rtx_addr_varies_p))
1443 gcse_mems_conflict_p = 1;
1444 }
1445
1446 /* Return nonzero if the expression in X (a memory reference) is killed
1447 in block BB before or after the insn with the CUID in UID_LIMIT.
1448 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1449 before UID_LIMIT.
1450
1451 To check the entire block, set UID_LIMIT to max_uid + 1 and
1452 AVAIL_P to 0. */
1453
1454 static int
1455 load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
1456 {
1457 rtx list_entry = modify_mem_list[bb->index];
1458 while (list_entry)
1459 {
1460 rtx setter;
1461 /* Ignore entries in the list that do not apply. */
1462 if ((avail_p
1463 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1464 || (! avail_p
1465 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1466 {
1467 list_entry = XEXP (list_entry, 1);
1468 continue;
1469 }
1470
1471 setter = XEXP (list_entry, 0);
1472
1473 /* If SETTER is a call everything is clobbered. Note that calls
1474 to pure functions are never put on the list, so we need not
1475 worry about them. */
1476 if (GET_CODE (setter) == CALL_INSN)
1477 return 1;
1478
1479 /* SETTER must be an INSN of some kind that sets memory. Call
1480 note_stores to examine each hunk of memory that is modified.
1481
1482 The note_stores interface is pretty limited, so we have to
1483 communicate via global variables. Yuk. */
1484 gcse_mem_operand = x;
1485 gcse_mems_conflict_p = 0;
1486 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1487 if (gcse_mems_conflict_p)
1488 return 1;
1489 list_entry = XEXP (list_entry, 1);
1490 }
1491 return 0;
1492 }
1493
1494 /* Return nonzero if the operands of expression X are unchanged from
1495 the start of INSN's basic block up to but not including INSN. */
1496
1497 static int
1498 oprs_anticipatable_p (rtx x, rtx insn)
1499 {
1500 return oprs_unchanged_p (x, insn, 0);
1501 }
1502
1503 /* Return nonzero if the operands of expression X are unchanged from
1504 INSN to the end of INSN's basic block. */
1505
1506 static int
1507 oprs_available_p (rtx x, rtx insn)
1508 {
1509 return oprs_unchanged_p (x, insn, 1);
1510 }
1511
1512 /* Hash expression X.
1513
1514 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1515 indicating if a volatile operand is found or if the expression contains
1516 something we don't want to insert in the table.
1517
1518 ??? One might want to merge this with canon_hash. Later. */
1519
1520 static unsigned int
1521 hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p, int hash_table_size)
1522 {
1523 unsigned int hash;
1524
1525 *do_not_record_p = 0;
1526
1527 hash = hash_expr_1 (x, mode, do_not_record_p);
1528 return hash % hash_table_size;
1529 }
1530
1531 /* Hash a string. Just add its bytes up. */
1532
1533 static inline unsigned
1534 hash_string_1 (const char *ps)
1535 {
1536 unsigned hash = 0;
1537 const unsigned char *p = (const unsigned char *) ps;
1538
1539 if (p)
1540 while (*p)
1541 hash += *p++;
1542
1543 return hash;
1544 }
1545
1546 /* Subroutine of hash_expr to do the actual work. */
1547
1548 static unsigned int
1549 hash_expr_1 (rtx x, enum machine_mode mode, int *do_not_record_p)
1550 {
1551 int i, j;
1552 unsigned hash = 0;
1553 enum rtx_code code;
1554 const char *fmt;
1555
1556 /* Used to turn recursion into iteration. We can't rely on GCC's
1557 tail-recursion elimination since we need to keep accumulating values
1558 in HASH. */
1559
1560 if (x == 0)
1561 return hash;
1562
1563 repeat:
1564 code = GET_CODE (x);
1565 switch (code)
1566 {
1567 case REG:
1568 hash += ((unsigned int) REG << 7) + REGNO (x);
1569 return hash;
1570
1571 case CONST_INT:
1572 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1573 + (unsigned int) INTVAL (x));
1574 return hash;
1575
1576 case CONST_DOUBLE:
1577 /* This is like the general case, except that it only counts
1578 the integers representing the constant. */
1579 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1580 if (GET_MODE (x) != VOIDmode)
1581 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1582 hash += (unsigned int) XWINT (x, i);
1583 else
1584 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1585 + (unsigned int) CONST_DOUBLE_HIGH (x));
1586 return hash;
1587
1588 case CONST_VECTOR:
1589 {
1590 int units;
1591 rtx elt;
1592
1593 units = CONST_VECTOR_NUNITS (x);
1594
1595 for (i = 0; i < units; ++i)
1596 {
1597 elt = CONST_VECTOR_ELT (x, i);
1598 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1599 }
1600
1601 return hash;
1602 }
1603
1604 /* Assume there is only one rtx object for any given label. */
1605 case LABEL_REF:
1606 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1607 differences and differences between each stage's debugging dumps. */
1608 hash += (((unsigned int) LABEL_REF << 7)
1609 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1610 return hash;
1611
1612 case SYMBOL_REF:
1613 {
1614 /* Don't hash on the symbol's address to avoid bootstrap differences.
1615 Different hash values may cause expressions to be recorded in
1616 different orders and thus different registers to be used in the
1617 final assembler. This also avoids differences in the dump files
1618 between various stages. */
1619 unsigned int h = 0;
1620 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1621
1622 while (*p)
1623 h += (h << 7) + *p++; /* ??? revisit */
1624
1625 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1626 return hash;
1627 }
1628
1629 case MEM:
1630 if (MEM_VOLATILE_P (x))
1631 {
1632 *do_not_record_p = 1;
1633 return 0;
1634 }
1635
1636 hash += (unsigned int) MEM;
1637 /* We used alias set for hashing, but this is not good, since the alias
1638 set may differ in -fprofile-arcs and -fbranch-probabilities compilation
1639 causing the profiles to fail to match. */
1640 x = XEXP (x, 0);
1641 goto repeat;
1642
1643 case PRE_DEC:
1644 case PRE_INC:
1645 case POST_DEC:
1646 case POST_INC:
1647 case PC:
1648 case CC0:
1649 case CALL:
1650 case UNSPEC_VOLATILE:
1651 *do_not_record_p = 1;
1652 return 0;
1653
1654 case ASM_OPERANDS:
1655 if (MEM_VOLATILE_P (x))
1656 {
1657 *do_not_record_p = 1;
1658 return 0;
1659 }
1660 else
1661 {
1662 /* We don't want to take the filename and line into account. */
1663 hash += (unsigned) code + (unsigned) GET_MODE (x)
1664 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1665 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1666 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1667
1668 if (ASM_OPERANDS_INPUT_LENGTH (x))
1669 {
1670 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1671 {
1672 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1673 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1674 do_not_record_p)
1675 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1676 (x, i)));
1677 }
1678
1679 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1680 x = ASM_OPERANDS_INPUT (x, 0);
1681 mode = GET_MODE (x);
1682 goto repeat;
1683 }
1684 return hash;
1685 }
1686
1687 default:
1688 break;
1689 }
1690
1691 hash += (unsigned) code + (unsigned) GET_MODE (x);
1692 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1693 {
1694 if (fmt[i] == 'e')
1695 {
1696 /* If we are about to do the last recursive call
1697 needed at this level, change it into iteration.
1698 This function is called enough to be worth it. */
1699 if (i == 0)
1700 {
1701 x = XEXP (x, i);
1702 goto repeat;
1703 }
1704
1705 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1706 if (*do_not_record_p)
1707 return 0;
1708 }
1709
1710 else if (fmt[i] == 'E')
1711 for (j = 0; j < XVECLEN (x, i); j++)
1712 {
1713 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1714 if (*do_not_record_p)
1715 return 0;
1716 }
1717
1718 else if (fmt[i] == 's')
1719 hash += hash_string_1 (XSTR (x, i));
1720 else if (fmt[i] == 'i')
1721 hash += (unsigned int) XINT (x, i);
1722 else
1723 abort ();
1724 }
1725
1726 return hash;
1727 }
1728
1729 /* Hash a set of register REGNO.
1730
1731 Sets are hashed on the register that is set. This simplifies the PRE copy
1732 propagation code.
1733
1734 ??? May need to make things more elaborate. Later, as necessary. */
1735
1736 static unsigned int
1737 hash_set (int regno, int hash_table_size)
1738 {
1739 unsigned int hash;
1740
1741 hash = regno;
1742 return hash % hash_table_size;
1743 }
1744
1745 /* Return nonzero if exp1 is equivalent to exp2.
1746 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1747
1748 static int
1749 expr_equiv_p (rtx x, rtx y)
1750 {
1751 int i, j;
1752 enum rtx_code code;
1753 const char *fmt;
1754
1755 if (x == y)
1756 return 1;
1757
1758 if (x == 0 || y == 0)
1759 return 0;
1760
1761 code = GET_CODE (x);
1762 if (code != GET_CODE (y))
1763 return 0;
1764
1765 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1766 if (GET_MODE (x) != GET_MODE (y))
1767 return 0;
1768
1769 switch (code)
1770 {
1771 case PC:
1772 case CC0:
1773 case CONST_INT:
1774 return 0;
1775
1776 case LABEL_REF:
1777 return XEXP (x, 0) == XEXP (y, 0);
1778
1779 case SYMBOL_REF:
1780 return XSTR (x, 0) == XSTR (y, 0);
1781
1782 case REG:
1783 return REGNO (x) == REGNO (y);
1784
1785 case MEM:
1786 /* Can't merge two expressions in different alias sets, since we can
1787 decide that the expression is transparent in a block when it isn't,
1788 due to it being set with the different alias set. */
1789 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1790 return 0;
1791
1792 /* A volatile mem should not be considered equivalent to any other. */
1793 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1794 return 0;
1795 break;
1796
1797 /* For commutative operations, check both orders. */
1798 case PLUS:
1799 case MULT:
1800 case AND:
1801 case IOR:
1802 case XOR:
1803 case NE:
1804 case EQ:
1805 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1806 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1807 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1808 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1809
1810 case ASM_OPERANDS:
1811 /* We don't use the generic code below because we want to
1812 disregard filename and line numbers. */
1813
1814 /* A volatile asm isn't equivalent to any other. */
1815 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1816 return 0;
1817
1818 if (GET_MODE (x) != GET_MODE (y)
1819 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1820 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1821 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1822 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1823 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1824 return 0;
1825
1826 if (ASM_OPERANDS_INPUT_LENGTH (x))
1827 {
1828 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1829 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1830 ASM_OPERANDS_INPUT (y, i))
1831 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1832 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1833 return 0;
1834 }
1835
1836 return 1;
1837
1838 default:
1839 break;
1840 }
1841
1842 /* Compare the elements. If any pair of corresponding elements
1843 fail to match, return 0 for the whole thing. */
1844
1845 fmt = GET_RTX_FORMAT (code);
1846 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1847 {
1848 switch (fmt[i])
1849 {
1850 case 'e':
1851 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1852 return 0;
1853 break;
1854
1855 case 'E':
1856 if (XVECLEN (x, i) != XVECLEN (y, i))
1857 return 0;
1858 for (j = 0; j < XVECLEN (x, i); j++)
1859 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1860 return 0;
1861 break;
1862
1863 case 's':
1864 if (strcmp (XSTR (x, i), XSTR (y, i)))
1865 return 0;
1866 break;
1867
1868 case 'i':
1869 if (XINT (x, i) != XINT (y, i))
1870 return 0;
1871 break;
1872
1873 case 'w':
1874 if (XWINT (x, i) != XWINT (y, i))
1875 return 0;
1876 break;
1877
1878 case '0':
1879 break;
1880
1881 default:
1882 abort ();
1883 }
1884 }
1885
1886 return 1;
1887 }
1888
1889 /* Insert expression X in INSN in the hash TABLE.
1890 If it is already present, record it as the last occurrence in INSN's
1891 basic block.
1892
1893 MODE is the mode of the value X is being stored into.
1894 It is only used if X is a CONST_INT.
1895
1896 ANTIC_P is nonzero if X is an anticipatable expression.
1897 AVAIL_P is nonzero if X is an available expression. */
1898
1899 static void
1900 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1901 int avail_p, struct hash_table *table)
1902 {
1903 int found, do_not_record_p;
1904 unsigned int hash;
1905 struct expr *cur_expr, *last_expr = NULL;
1906 struct occr *antic_occr, *avail_occr;
1907 struct occr *last_occr = NULL;
1908
1909 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1910
1911 /* Do not insert expression in table if it contains volatile operands,
1912 or if hash_expr determines the expression is something we don't want
1913 to or can't handle. */
1914 if (do_not_record_p)
1915 return;
1916
1917 cur_expr = table->table[hash];
1918 found = 0;
1919
1920 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1921 {
1922 /* If the expression isn't found, save a pointer to the end of
1923 the list. */
1924 last_expr = cur_expr;
1925 cur_expr = cur_expr->next_same_hash;
1926 }
1927
1928 if (! found)
1929 {
1930 cur_expr = gcse_alloc (sizeof (struct expr));
1931 bytes_used += sizeof (struct expr);
1932 if (table->table[hash] == NULL)
1933 /* This is the first pattern that hashed to this index. */
1934 table->table[hash] = cur_expr;
1935 else
1936 /* Add EXPR to end of this hash chain. */
1937 last_expr->next_same_hash = cur_expr;
1938
1939 /* Set the fields of the expr element. */
1940 cur_expr->expr = x;
1941 cur_expr->bitmap_index = table->n_elems++;
1942 cur_expr->next_same_hash = NULL;
1943 cur_expr->antic_occr = NULL;
1944 cur_expr->avail_occr = NULL;
1945 }
1946
1947 /* Now record the occurrence(s). */
1948 if (antic_p)
1949 {
1950 antic_occr = cur_expr->antic_occr;
1951
1952 /* Search for another occurrence in the same basic block. */
1953 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1954 {
1955 /* If an occurrence isn't found, save a pointer to the end of
1956 the list. */
1957 last_occr = antic_occr;
1958 antic_occr = antic_occr->next;
1959 }
1960
1961 if (antic_occr)
1962 /* Found another instance of the expression in the same basic block.
1963 Prefer the currently recorded one. We want the first one in the
1964 block and the block is scanned from start to end. */
1965 ; /* nothing to do */
1966 else
1967 {
1968 /* First occurrence of this expression in this basic block. */
1969 antic_occr = gcse_alloc (sizeof (struct occr));
1970 bytes_used += sizeof (struct occr);
1971 /* First occurrence of this expression in any block? */
1972 if (cur_expr->antic_occr == NULL)
1973 cur_expr->antic_occr = antic_occr;
1974 else
1975 last_occr->next = antic_occr;
1976
1977 antic_occr->insn = insn;
1978 antic_occr->next = NULL;
1979 }
1980 }
1981
1982 if (avail_p)
1983 {
1984 avail_occr = cur_expr->avail_occr;
1985
1986 /* Search for another occurrence in the same basic block. */
1987 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
1988 {
1989 /* If an occurrence isn't found, save a pointer to the end of
1990 the list. */
1991 last_occr = avail_occr;
1992 avail_occr = avail_occr->next;
1993 }
1994
1995 if (avail_occr)
1996 /* Found another instance of the expression in the same basic block.
1997 Prefer this occurrence to the currently recorded one. We want
1998 the last one in the block and the block is scanned from start
1999 to end. */
2000 avail_occr->insn = insn;
2001 else
2002 {
2003 /* First occurrence of this expression in this basic block. */
2004 avail_occr = gcse_alloc (sizeof (struct occr));
2005 bytes_used += sizeof (struct occr);
2006
2007 /* First occurrence of this expression in any block? */
2008 if (cur_expr->avail_occr == NULL)
2009 cur_expr->avail_occr = avail_occr;
2010 else
2011 last_occr->next = avail_occr;
2012
2013 avail_occr->insn = insn;
2014 avail_occr->next = NULL;
2015 }
2016 }
2017 }
2018
2019 /* Insert pattern X in INSN in the hash table.
2020 X is a SET of a reg to either another reg or a constant.
2021 If it is already present, record it as the last occurrence in INSN's
2022 basic block. */
2023
2024 static void
2025 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
2026 {
2027 int found;
2028 unsigned int hash;
2029 struct expr *cur_expr, *last_expr = NULL;
2030 struct occr *cur_occr, *last_occr = NULL;
2031
2032 if (GET_CODE (x) != SET
2033 || GET_CODE (SET_DEST (x)) != REG)
2034 abort ();
2035
2036 hash = hash_set (REGNO (SET_DEST (x)), table->size);
2037
2038 cur_expr = table->table[hash];
2039 found = 0;
2040
2041 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2042 {
2043 /* If the expression isn't found, save a pointer to the end of
2044 the list. */
2045 last_expr = cur_expr;
2046 cur_expr = cur_expr->next_same_hash;
2047 }
2048
2049 if (! found)
2050 {
2051 cur_expr = gcse_alloc (sizeof (struct expr));
2052 bytes_used += sizeof (struct expr);
2053 if (table->table[hash] == NULL)
2054 /* This is the first pattern that hashed to this index. */
2055 table->table[hash] = cur_expr;
2056 else
2057 /* Add EXPR to end of this hash chain. */
2058 last_expr->next_same_hash = cur_expr;
2059
2060 /* Set the fields of the expr element.
2061 We must copy X because it can be modified when copy propagation is
2062 performed on its operands. */
2063 cur_expr->expr = copy_rtx (x);
2064 cur_expr->bitmap_index = table->n_elems++;
2065 cur_expr->next_same_hash = NULL;
2066 cur_expr->antic_occr = NULL;
2067 cur_expr->avail_occr = NULL;
2068 }
2069
2070 /* Now record the occurrence. */
2071 cur_occr = cur_expr->avail_occr;
2072
2073 /* Search for another occurrence in the same basic block. */
2074 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2075 {
2076 /* If an occurrence isn't found, save a pointer to the end of
2077 the list. */
2078 last_occr = cur_occr;
2079 cur_occr = cur_occr->next;
2080 }
2081
2082 if (cur_occr)
2083 /* Found another instance of the expression in the same basic block.
2084 Prefer this occurrence to the currently recorded one. We want the
2085 last one in the block and the block is scanned from start to end. */
2086 cur_occr->insn = insn;
2087 else
2088 {
2089 /* First occurrence of this expression in this basic block. */
2090 cur_occr = gcse_alloc (sizeof (struct occr));
2091 bytes_used += sizeof (struct occr);
2092
2093 /* First occurrence of this expression in any block? */
2094 if (cur_expr->avail_occr == NULL)
2095 cur_expr->avail_occr = cur_occr;
2096 else
2097 last_occr->next = cur_occr;
2098
2099 cur_occr->insn = insn;
2100 cur_occr->next = NULL;
2101 }
2102 }
2103
2104 /* Determine whether the rtx X should be treated as a constant for
2105 the purposes of GCSE's constant propagation. */
2106
2107 static bool
2108 gcse_constant_p (rtx x)
2109 {
2110 /* Consider a COMPARE of two integers constant. */
2111 if (GET_CODE (x) == COMPARE
2112 && GET_CODE (XEXP (x, 0)) == CONST_INT
2113 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2114 return true;
2115
2116
2117 /* Consider a COMPARE of the same registers is a constant
2118 if they are not floating point registers. */
2119 if (GET_CODE(x) == COMPARE
2120 && GET_CODE (XEXP (x, 0)) == REG
2121 && GET_CODE (XEXP (x, 1)) == REG
2122 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
2123 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
2124 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
2125 return true;
2126
2127 if (GET_CODE (x) == CONSTANT_P_RTX)
2128 return false;
2129
2130 return CONSTANT_P (x);
2131 }
2132
2133 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
2134 expression one). */
2135
2136 static void
2137 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
2138 {
2139 rtx src = SET_SRC (pat);
2140 rtx dest = SET_DEST (pat);
2141 rtx note;
2142
2143 if (GET_CODE (src) == CALL)
2144 hash_scan_call (src, insn, table);
2145
2146 else if (GET_CODE (dest) == REG)
2147 {
2148 unsigned int regno = REGNO (dest);
2149 rtx tmp;
2150
2151 /* If this is a single set and we are doing constant propagation,
2152 see if a REG_NOTE shows this equivalent to a constant. */
2153 if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2154 && gcse_constant_p (XEXP (note, 0)))
2155 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2156
2157 /* Only record sets of pseudo-regs in the hash table. */
2158 if (! table->set_p
2159 && regno >= FIRST_PSEUDO_REGISTER
2160 /* Don't GCSE something if we can't do a reg/reg copy. */
2161 && can_copy_p (GET_MODE (dest))
2162 /* GCSE commonly inserts instruction after the insn. We can't
2163 do that easily for EH_REGION notes so disable GCSE on these
2164 for now. */
2165 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2166 /* Is SET_SRC something we want to gcse? */
2167 && want_to_gcse_p (src)
2168 /* Don't CSE a nop. */
2169 && ! set_noop_p (pat)
2170 /* Don't GCSE if it has attached REG_EQUIV note.
2171 At this point this only function parameters should have
2172 REG_EQUIV notes and if the argument slot is used somewhere
2173 explicitly, it means address of parameter has been taken,
2174 so we should not extend the lifetime of the pseudo. */
2175 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2176 || GET_CODE (XEXP (note, 0)) != MEM))
2177 {
2178 /* An expression is not anticipatable if its operands are
2179 modified before this insn or if this is not the only SET in
2180 this insn. */
2181 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2182 /* An expression is not available if its operands are
2183 subsequently modified, including this insn. It's also not
2184 available if this is a branch, because we can't insert
2185 a set after the branch. */
2186 int avail_p = (oprs_available_p (src, insn)
2187 && ! JUMP_P (insn));
2188
2189 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
2190 }
2191
2192 /* Record sets for constant/copy propagation. */
2193 else if (table->set_p
2194 && regno >= FIRST_PSEUDO_REGISTER
2195 && ((GET_CODE (src) == REG
2196 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2197 && can_copy_p (GET_MODE (dest))
2198 && REGNO (src) != regno)
2199 || gcse_constant_p (src))
2200 /* A copy is not available if its src or dest is subsequently
2201 modified. Here we want to search from INSN+1 on, but
2202 oprs_available_p searches from INSN on. */
2203 && (insn == BLOCK_END (BLOCK_NUM (insn))
2204 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2205 && oprs_available_p (pat, tmp))))
2206 insert_set_in_table (pat, insn, table);
2207 }
2208 }
2209
2210 static void
2211 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
2212 struct hash_table *table ATTRIBUTE_UNUSED)
2213 {
2214 /* Currently nothing to do. */
2215 }
2216
2217 static void
2218 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
2219 struct hash_table *table ATTRIBUTE_UNUSED)
2220 {
2221 /* Currently nothing to do. */
2222 }
2223
2224 /* Process INSN and add hash table entries as appropriate.
2225
2226 Only available expressions that set a single pseudo-reg are recorded.
2227
2228 Single sets in a PARALLEL could be handled, but it's an extra complication
2229 that isn't dealt with right now. The trick is handling the CLOBBERs that
2230 are also in the PARALLEL. Later.
2231
2232 If SET_P is nonzero, this is for the assignment hash table,
2233 otherwise it is for the expression hash table.
2234 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2235 not record any expressions. */
2236
2237 static void
2238 hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
2239 {
2240 rtx pat = PATTERN (insn);
2241 int i;
2242
2243 if (in_libcall_block)
2244 return;
2245
2246 /* Pick out the sets of INSN and for other forms of instructions record
2247 what's been modified. */
2248
2249 if (GET_CODE (pat) == SET)
2250 hash_scan_set (pat, insn, table);
2251 else if (GET_CODE (pat) == PARALLEL)
2252 for (i = 0; i < XVECLEN (pat, 0); i++)
2253 {
2254 rtx x = XVECEXP (pat, 0, i);
2255
2256 if (GET_CODE (x) == SET)
2257 hash_scan_set (x, insn, table);
2258 else if (GET_CODE (x) == CLOBBER)
2259 hash_scan_clobber (x, insn, table);
2260 else if (GET_CODE (x) == CALL)
2261 hash_scan_call (x, insn, table);
2262 }
2263
2264 else if (GET_CODE (pat) == CLOBBER)
2265 hash_scan_clobber (pat, insn, table);
2266 else if (GET_CODE (pat) == CALL)
2267 hash_scan_call (pat, insn, table);
2268 }
2269
2270 static void
2271 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
2272 {
2273 int i;
2274 /* Flattened out table, so it's printed in proper order. */
2275 struct expr **flat_table;
2276 unsigned int *hash_val;
2277 struct expr *expr;
2278
2279 flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
2280 hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
2281
2282 for (i = 0; i < (int) table->size; i++)
2283 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
2284 {
2285 flat_table[expr->bitmap_index] = expr;
2286 hash_val[expr->bitmap_index] = i;
2287 }
2288
2289 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2290 name, table->size, table->n_elems);
2291
2292 for (i = 0; i < (int) table->n_elems; i++)
2293 if (flat_table[i] != 0)
2294 {
2295 expr = flat_table[i];
2296 fprintf (file, "Index %d (hash value %d)\n ",
2297 expr->bitmap_index, hash_val[i]);
2298 print_rtl (file, expr->expr);
2299 fprintf (file, "\n");
2300 }
2301
2302 fprintf (file, "\n");
2303
2304 free (flat_table);
2305 free (hash_val);
2306 }
2307
2308 /* Record register first/last/block set information for REGNO in INSN.
2309
2310 first_set records the first place in the block where the register
2311 is set and is used to compute "anticipatability".
2312
2313 last_set records the last place in the block where the register
2314 is set and is used to compute "availability".
2315
2316 last_bb records the block for which first_set and last_set are
2317 valid, as a quick test to invalidate them.
2318
2319 reg_set_in_block records whether the register is set in the block
2320 and is used to compute "transparency". */
2321
2322 static void
2323 record_last_reg_set_info (rtx insn, int regno)
2324 {
2325 struct reg_avail_info *info = &reg_avail_info[regno];
2326 int cuid = INSN_CUID (insn);
2327
2328 info->last_set = cuid;
2329 if (info->last_bb != current_bb)
2330 {
2331 info->last_bb = current_bb;
2332 info->first_set = cuid;
2333 SET_BIT (reg_set_in_block[current_bb->index], regno);
2334 }
2335 }
2336
2337
2338 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2339 Note we store a pair of elements in the list, so they have to be
2340 taken off pairwise. */
2341
2342 static void
2343 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED,
2344 void * v_insn)
2345 {
2346 rtx dest_addr, insn;
2347 int bb;
2348
2349 while (GET_CODE (dest) == SUBREG
2350 || GET_CODE (dest) == ZERO_EXTRACT
2351 || GET_CODE (dest) == SIGN_EXTRACT
2352 || GET_CODE (dest) == STRICT_LOW_PART)
2353 dest = XEXP (dest, 0);
2354
2355 /* If DEST is not a MEM, then it will not conflict with a load. Note
2356 that function calls are assumed to clobber memory, but are handled
2357 elsewhere. */
2358
2359 if (GET_CODE (dest) != MEM)
2360 return;
2361
2362 dest_addr = get_addr (XEXP (dest, 0));
2363 dest_addr = canon_rtx (dest_addr);
2364 insn = (rtx) v_insn;
2365 bb = BLOCK_NUM (insn);
2366
2367 canon_modify_mem_list[bb] =
2368 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
2369 canon_modify_mem_list[bb] =
2370 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
2371 bitmap_set_bit (canon_modify_mem_list_set, bb);
2372 }
2373
2374 /* Record memory modification information for INSN. We do not actually care
2375 about the memory location(s) that are set, or even how they are set (consider
2376 a CALL_INSN). We merely need to record which insns modify memory. */
2377
2378 static void
2379 record_last_mem_set_info (rtx insn)
2380 {
2381 int bb = BLOCK_NUM (insn);
2382
2383 /* load_killed_in_block_p will handle the case of calls clobbering
2384 everything. */
2385 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
2386 bitmap_set_bit (modify_mem_list_set, bb);
2387
2388 if (GET_CODE (insn) == CALL_INSN)
2389 {
2390 /* Note that traversals of this loop (other than for free-ing)
2391 will break after encountering a CALL_INSN. So, there's no
2392 need to insert a pair of items, as canon_list_insert does. */
2393 canon_modify_mem_list[bb] =
2394 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
2395 bitmap_set_bit (canon_modify_mem_list_set, bb);
2396 }
2397 else
2398 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2399 }
2400
2401 /* Called from compute_hash_table via note_stores to handle one
2402 SET or CLOBBER in an insn. DATA is really the instruction in which
2403 the SET is taking place. */
2404
2405 static void
2406 record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
2407 {
2408 rtx last_set_insn = (rtx) data;
2409
2410 if (GET_CODE (dest) == SUBREG)
2411 dest = SUBREG_REG (dest);
2412
2413 if (GET_CODE (dest) == REG)
2414 record_last_reg_set_info (last_set_insn, REGNO (dest));
2415 else if (GET_CODE (dest) == MEM
2416 /* Ignore pushes, they clobber nothing. */
2417 && ! push_operand (dest, GET_MODE (dest)))
2418 record_last_mem_set_info (last_set_insn);
2419 }
2420
2421 /* Top level function to create an expression or assignment hash table.
2422
2423 Expression entries are placed in the hash table if
2424 - they are of the form (set (pseudo-reg) src),
2425 - src is something we want to perform GCSE on,
2426 - none of the operands are subsequently modified in the block
2427
2428 Assignment entries are placed in the hash table if
2429 - they are of the form (set (pseudo-reg) src),
2430 - src is something we want to perform const/copy propagation on,
2431 - none of the operands or target are subsequently modified in the block
2432
2433 Currently src must be a pseudo-reg or a const_int.
2434
2435 TABLE is the table computed. */
2436
2437 static void
2438 compute_hash_table_work (struct hash_table *table)
2439 {
2440 unsigned int i;
2441
2442 /* While we compute the hash table we also compute a bit array of which
2443 registers are set in which blocks.
2444 ??? This isn't needed during const/copy propagation, but it's cheap to
2445 compute. Later. */
2446 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2447
2448 /* re-Cache any INSN_LIST nodes we have allocated. */
2449 clear_modify_mem_tables ();
2450 /* Some working arrays used to track first and last set in each block. */
2451 reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2452
2453 for (i = 0; i < max_gcse_regno; ++i)
2454 reg_avail_info[i].last_bb = NULL;
2455
2456 FOR_EACH_BB (current_bb)
2457 {
2458 rtx insn;
2459 unsigned int regno;
2460 int in_libcall_block;
2461
2462 /* First pass over the instructions records information used to
2463 determine when registers and memory are first and last set.
2464 ??? hard-reg reg_set_in_block computation
2465 could be moved to compute_sets since they currently don't change. */
2466
2467 for (insn = current_bb->head;
2468 insn && insn != NEXT_INSN (current_bb->end);
2469 insn = NEXT_INSN (insn))
2470 {
2471 if (! INSN_P (insn))
2472 continue;
2473
2474 if (GET_CODE (insn) == CALL_INSN)
2475 {
2476 bool clobbers_all = false;
2477 #ifdef NON_SAVING_SETJMP
2478 if (NON_SAVING_SETJMP
2479 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2480 clobbers_all = true;
2481 #endif
2482
2483 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2484 if (clobbers_all
2485 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2486 record_last_reg_set_info (insn, regno);
2487
2488 mark_call (insn);
2489 }
2490
2491 note_stores (PATTERN (insn), record_last_set_info, insn);
2492 }
2493
2494 /* Insert implicit sets in the hash table. */
2495 if (table->set_p
2496 && implicit_sets[current_bb->index] != NULL_RTX)
2497 hash_scan_set (implicit_sets[current_bb->index],
2498 current_bb->head, table);
2499
2500 /* The next pass builds the hash table. */
2501
2502 for (insn = current_bb->head, in_libcall_block = 0;
2503 insn && insn != NEXT_INSN (current_bb->end);
2504 insn = NEXT_INSN (insn))
2505 if (INSN_P (insn))
2506 {
2507 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2508 in_libcall_block = 1;
2509 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2510 in_libcall_block = 0;
2511 hash_scan_insn (insn, table, in_libcall_block);
2512 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2513 in_libcall_block = 0;
2514 }
2515 }
2516
2517 free (reg_avail_info);
2518 reg_avail_info = NULL;
2519 }
2520
2521 /* Allocate space for the set/expr hash TABLE.
2522 N_INSNS is the number of instructions in the function.
2523 It is used to determine the number of buckets to use.
2524 SET_P determines whether set or expression table will
2525 be created. */
2526
2527 static void
2528 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
2529 {
2530 int n;
2531
2532 table->size = n_insns / 4;
2533 if (table->size < 11)
2534 table->size = 11;
2535
2536 /* Attempt to maintain efficient use of hash table.
2537 Making it an odd number is simplest for now.
2538 ??? Later take some measurements. */
2539 table->size |= 1;
2540 n = table->size * sizeof (struct expr *);
2541 table->table = gmalloc (n);
2542 table->set_p = set_p;
2543 }
2544
2545 /* Free things allocated by alloc_hash_table. */
2546
2547 static void
2548 free_hash_table (struct hash_table *table)
2549 {
2550 free (table->table);
2551 }
2552
2553 /* Compute the hash TABLE for doing copy/const propagation or
2554 expression hash table. */
2555
2556 static void
2557 compute_hash_table (struct hash_table *table)
2558 {
2559 /* Initialize count of number of entries in hash table. */
2560 table->n_elems = 0;
2561 memset (table->table, 0, table->size * sizeof (struct expr *));
2562
2563 compute_hash_table_work (table);
2564 }
2565 \f
2566 /* Expression tracking support. */
2567
2568 /* Lookup pattern PAT in the expression TABLE.
2569 The result is a pointer to the table entry, or NULL if not found. */
2570
2571 static struct expr *
2572 lookup_expr (rtx pat, struct hash_table *table)
2573 {
2574 int do_not_record_p;
2575 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2576 table->size);
2577 struct expr *expr;
2578
2579 if (do_not_record_p)
2580 return NULL;
2581
2582 expr = table->table[hash];
2583
2584 while (expr && ! expr_equiv_p (expr->expr, pat))
2585 expr = expr->next_same_hash;
2586
2587 return expr;
2588 }
2589
2590 /* Lookup REGNO in the set TABLE. The result is a pointer to the
2591 table entry, or NULL if not found. */
2592
2593 static struct expr *
2594 lookup_set (unsigned int regno, struct hash_table *table)
2595 {
2596 unsigned int hash = hash_set (regno, table->size);
2597 struct expr *expr;
2598
2599 expr = table->table[hash];
2600
2601 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2602 expr = expr->next_same_hash;
2603
2604 return expr;
2605 }
2606
2607 /* Return the next entry for REGNO in list EXPR. */
2608
2609 static struct expr *
2610 next_set (unsigned int regno, struct expr *expr)
2611 {
2612 do
2613 expr = expr->next_same_hash;
2614 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2615
2616 return expr;
2617 }
2618
2619 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2620 types may be mixed. */
2621
2622 static void
2623 free_insn_expr_list_list (rtx *listp)
2624 {
2625 rtx list, next;
2626
2627 for (list = *listp; list ; list = next)
2628 {
2629 next = XEXP (list, 1);
2630 if (GET_CODE (list) == EXPR_LIST)
2631 free_EXPR_LIST_node (list);
2632 else
2633 free_INSN_LIST_node (list);
2634 }
2635
2636 *listp = NULL;
2637 }
2638
2639 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2640 static void
2641 clear_modify_mem_tables (void)
2642 {
2643 int i;
2644
2645 EXECUTE_IF_SET_IN_BITMAP
2646 (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
2647 bitmap_clear (modify_mem_list_set);
2648
2649 EXECUTE_IF_SET_IN_BITMAP
2650 (canon_modify_mem_list_set, 0, i,
2651 free_insn_expr_list_list (canon_modify_mem_list + i));
2652 bitmap_clear (canon_modify_mem_list_set);
2653 }
2654
2655 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2656
2657 static void
2658 free_modify_mem_tables (void)
2659 {
2660 clear_modify_mem_tables ();
2661 free (modify_mem_list);
2662 free (canon_modify_mem_list);
2663 modify_mem_list = 0;
2664 canon_modify_mem_list = 0;
2665 }
2666
2667 /* Reset tables used to keep track of what's still available [since the
2668 start of the block]. */
2669
2670 static void
2671 reset_opr_set_tables (void)
2672 {
2673 /* Maintain a bitmap of which regs have been set since beginning of
2674 the block. */
2675 CLEAR_REG_SET (reg_set_bitmap);
2676
2677 /* Also keep a record of the last instruction to modify memory.
2678 For now this is very trivial, we only record whether any memory
2679 location has been modified. */
2680 clear_modify_mem_tables ();
2681 }
2682
2683 /* Return nonzero if the operands of X are not set before INSN in
2684 INSN's basic block. */
2685
2686 static int
2687 oprs_not_set_p (rtx x, rtx insn)
2688 {
2689 int i, j;
2690 enum rtx_code code;
2691 const char *fmt;
2692
2693 if (x == 0)
2694 return 1;
2695
2696 code = GET_CODE (x);
2697 switch (code)
2698 {
2699 case PC:
2700 case CC0:
2701 case CONST:
2702 case CONST_INT:
2703 case CONST_DOUBLE:
2704 case CONST_VECTOR:
2705 case SYMBOL_REF:
2706 case LABEL_REF:
2707 case ADDR_VEC:
2708 case ADDR_DIFF_VEC:
2709 return 1;
2710
2711 case MEM:
2712 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2713 INSN_CUID (insn), x, 0))
2714 return 0;
2715 else
2716 return oprs_not_set_p (XEXP (x, 0), insn);
2717
2718 case REG:
2719 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2720
2721 default:
2722 break;
2723 }
2724
2725 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2726 {
2727 if (fmt[i] == 'e')
2728 {
2729 /* If we are about to do the last recursive call
2730 needed at this level, change it into iteration.
2731 This function is called enough to be worth it. */
2732 if (i == 0)
2733 return oprs_not_set_p (XEXP (x, i), insn);
2734
2735 if (! oprs_not_set_p (XEXP (x, i), insn))
2736 return 0;
2737 }
2738 else if (fmt[i] == 'E')
2739 for (j = 0; j < XVECLEN (x, i); j++)
2740 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2741 return 0;
2742 }
2743
2744 return 1;
2745 }
2746
2747 /* Mark things set by a CALL. */
2748
2749 static void
2750 mark_call (rtx insn)
2751 {
2752 if (! CONST_OR_PURE_CALL_P (insn))
2753 record_last_mem_set_info (insn);
2754 }
2755
2756 /* Mark things set by a SET. */
2757
2758 static void
2759 mark_set (rtx pat, rtx insn)
2760 {
2761 rtx dest = SET_DEST (pat);
2762
2763 while (GET_CODE (dest) == SUBREG
2764 || GET_CODE (dest) == ZERO_EXTRACT
2765 || GET_CODE (dest) == SIGN_EXTRACT
2766 || GET_CODE (dest) == STRICT_LOW_PART)
2767 dest = XEXP (dest, 0);
2768
2769 if (GET_CODE (dest) == REG)
2770 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2771 else if (GET_CODE (dest) == MEM)
2772 record_last_mem_set_info (insn);
2773
2774 if (GET_CODE (SET_SRC (pat)) == CALL)
2775 mark_call (insn);
2776 }
2777
2778 /* Record things set by a CLOBBER. */
2779
2780 static void
2781 mark_clobber (rtx pat, rtx insn)
2782 {
2783 rtx clob = XEXP (pat, 0);
2784
2785 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2786 clob = XEXP (clob, 0);
2787
2788 if (GET_CODE (clob) == REG)
2789 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2790 else
2791 record_last_mem_set_info (insn);
2792 }
2793
2794 /* Record things set by INSN.
2795 This data is used by oprs_not_set_p. */
2796
2797 static void
2798 mark_oprs_set (rtx insn)
2799 {
2800 rtx pat = PATTERN (insn);
2801 int i;
2802
2803 if (GET_CODE (pat) == SET)
2804 mark_set (pat, insn);
2805 else if (GET_CODE (pat) == PARALLEL)
2806 for (i = 0; i < XVECLEN (pat, 0); i++)
2807 {
2808 rtx x = XVECEXP (pat, 0, i);
2809
2810 if (GET_CODE (x) == SET)
2811 mark_set (x, insn);
2812 else if (GET_CODE (x) == CLOBBER)
2813 mark_clobber (x, insn);
2814 else if (GET_CODE (x) == CALL)
2815 mark_call (insn);
2816 }
2817
2818 else if (GET_CODE (pat) == CLOBBER)
2819 mark_clobber (pat, insn);
2820 else if (GET_CODE (pat) == CALL)
2821 mark_call (insn);
2822 }
2823
2824 \f
2825 /* Classic GCSE reaching definition support. */
2826
2827 /* Allocate reaching def variables. */
2828
2829 static void
2830 alloc_rd_mem (int n_blocks, int n_insns)
2831 {
2832 rd_kill = sbitmap_vector_alloc (n_blocks, n_insns);
2833 sbitmap_vector_zero (rd_kill, n_blocks);
2834
2835 rd_gen = sbitmap_vector_alloc (n_blocks, n_insns);
2836 sbitmap_vector_zero (rd_gen, n_blocks);
2837
2838 reaching_defs = sbitmap_vector_alloc (n_blocks, n_insns);
2839 sbitmap_vector_zero (reaching_defs, n_blocks);
2840
2841 rd_out = sbitmap_vector_alloc (n_blocks, n_insns);
2842 sbitmap_vector_zero (rd_out, n_blocks);
2843 }
2844
2845 /* Free reaching def variables. */
2846
2847 static void
2848 free_rd_mem (void)
2849 {
2850 sbitmap_vector_free (rd_kill);
2851 sbitmap_vector_free (rd_gen);
2852 sbitmap_vector_free (reaching_defs);
2853 sbitmap_vector_free (rd_out);
2854 }
2855
2856 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2857
2858 static void
2859 handle_rd_kill_set (rtx insn, int regno, basic_block bb)
2860 {
2861 struct reg_set *this_reg;
2862
2863 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2864 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2865 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2866 }
2867
2868 /* Compute the set of kill's for reaching definitions. */
2869
2870 static void
2871 compute_kill_rd (void)
2872 {
2873 int cuid;
2874 unsigned int regno;
2875 int i;
2876 basic_block bb;
2877
2878 /* For each block
2879 For each set bit in `gen' of the block (i.e each insn which
2880 generates a definition in the block)
2881 Call the reg set by the insn corresponding to that bit regx
2882 Look at the linked list starting at reg_set_table[regx]
2883 For each setting of regx in the linked list, which is not in
2884 this block
2885 Set the bit in `kill' corresponding to that insn. */
2886 FOR_EACH_BB (bb)
2887 for (cuid = 0; cuid < max_cuid; cuid++)
2888 if (TEST_BIT (rd_gen[bb->index], cuid))
2889 {
2890 rtx insn = CUID_INSN (cuid);
2891 rtx pat = PATTERN (insn);
2892
2893 if (GET_CODE (insn) == CALL_INSN)
2894 {
2895 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2896 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2897 handle_rd_kill_set (insn, regno, bb);
2898 }
2899
2900 if (GET_CODE (pat) == PARALLEL)
2901 {
2902 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2903 {
2904 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2905
2906 if ((code == SET || code == CLOBBER)
2907 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2908 handle_rd_kill_set (insn,
2909 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2910 bb);
2911 }
2912 }
2913 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2914 /* Each setting of this register outside of this block
2915 must be marked in the set of kills in this block. */
2916 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
2917 }
2918 }
2919
2920 /* Compute the reaching definitions as in
2921 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2922 Chapter 10. It is the same algorithm as used for computing available
2923 expressions but applied to the gens and kills of reaching definitions. */
2924
2925 static void
2926 compute_rd (void)
2927 {
2928 int changed, passes;
2929 basic_block bb;
2930
2931 FOR_EACH_BB (bb)
2932 sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
2933
2934 passes = 0;
2935 changed = 1;
2936 while (changed)
2937 {
2938 changed = 0;
2939 FOR_EACH_BB (bb)
2940 {
2941 sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
2942 changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
2943 reaching_defs[bb->index], rd_kill[bb->index]);
2944 }
2945 passes++;
2946 }
2947
2948 if (gcse_file)
2949 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
2950 }
2951 \f
2952 /* Classic GCSE available expression support. */
2953
2954 /* Allocate memory for available expression computation. */
2955
2956 static void
2957 alloc_avail_expr_mem (int n_blocks, int n_exprs)
2958 {
2959 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
2960 sbitmap_vector_zero (ae_kill, n_blocks);
2961
2962 ae_gen = sbitmap_vector_alloc (n_blocks, n_exprs);
2963 sbitmap_vector_zero (ae_gen, n_blocks);
2964
2965 ae_in = sbitmap_vector_alloc (n_blocks, n_exprs);
2966 sbitmap_vector_zero (ae_in, n_blocks);
2967
2968 ae_out = sbitmap_vector_alloc (n_blocks, n_exprs);
2969 sbitmap_vector_zero (ae_out, n_blocks);
2970 }
2971
2972 static void
2973 free_avail_expr_mem (void)
2974 {
2975 sbitmap_vector_free (ae_kill);
2976 sbitmap_vector_free (ae_gen);
2977 sbitmap_vector_free (ae_in);
2978 sbitmap_vector_free (ae_out);
2979 }
2980
2981 /* Compute the set of available expressions generated in each basic block. */
2982
2983 static void
2984 compute_ae_gen (struct hash_table *expr_hash_table)
2985 {
2986 unsigned int i;
2987 struct expr *expr;
2988 struct occr *occr;
2989
2990 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
2991 This is all we have to do because an expression is not recorded if it
2992 is not available, and the only expressions we want to work with are the
2993 ones that are recorded. */
2994 for (i = 0; i < expr_hash_table->size; i++)
2995 for (expr = expr_hash_table->table[i]; expr != 0; expr = expr->next_same_hash)
2996 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
2997 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
2998 }
2999
3000 /* Return nonzero if expression X is killed in BB. */
3001
3002 static int
3003 expr_killed_p (rtx x, basic_block bb)
3004 {
3005 int i, j;
3006 enum rtx_code code;
3007 const char *fmt;
3008
3009 if (x == 0)
3010 return 1;
3011
3012 code = GET_CODE (x);
3013 switch (code)
3014 {
3015 case REG:
3016 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3017
3018 case MEM:
3019 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3020 return 1;
3021 else
3022 return expr_killed_p (XEXP (x, 0), bb);
3023
3024 case PC:
3025 case CC0: /*FIXME*/
3026 case CONST:
3027 case CONST_INT:
3028 case CONST_DOUBLE:
3029 case CONST_VECTOR:
3030 case SYMBOL_REF:
3031 case LABEL_REF:
3032 case ADDR_VEC:
3033 case ADDR_DIFF_VEC:
3034 return 0;
3035
3036 default:
3037 break;
3038 }
3039
3040 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3041 {
3042 if (fmt[i] == 'e')
3043 {
3044 /* If we are about to do the last recursive call
3045 needed at this level, change it into iteration.
3046 This function is called enough to be worth it. */
3047 if (i == 0)
3048 return expr_killed_p (XEXP (x, i), bb);
3049 else if (expr_killed_p (XEXP (x, i), bb))
3050 return 1;
3051 }
3052 else if (fmt[i] == 'E')
3053 for (j = 0; j < XVECLEN (x, i); j++)
3054 if (expr_killed_p (XVECEXP (x, i, j), bb))
3055 return 1;
3056 }
3057
3058 return 0;
3059 }
3060
3061 /* Compute the set of available expressions killed in each basic block. */
3062
3063 static void
3064 compute_ae_kill (sbitmap *ae_gen, sbitmap *ae_kill,
3065 struct hash_table *expr_hash_table)
3066 {
3067 basic_block bb;
3068 unsigned int i;
3069 struct expr *expr;
3070
3071 FOR_EACH_BB (bb)
3072 for (i = 0; i < expr_hash_table->size; i++)
3073 for (expr = expr_hash_table->table[i]; expr; expr = expr->next_same_hash)
3074 {
3075 /* Skip EXPR if generated in this block. */
3076 if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
3077 continue;
3078
3079 if (expr_killed_p (expr->expr, bb))
3080 SET_BIT (ae_kill[bb->index], expr->bitmap_index);
3081 }
3082 }
3083 \f
3084 /* Actually perform the Classic GCSE optimizations. */
3085
3086 /* Return nonzero if occurrence OCCR of expression EXPR reaches block BB.
3087
3088 CHECK_SELF_LOOP is nonzero if we should consider a block reaching itself
3089 as a positive reach. We want to do this when there are two computations
3090 of the expression in the block.
3091
3092 VISITED is a pointer to a working buffer for tracking which BB's have
3093 been visited. It is NULL for the top-level call.
3094
3095 We treat reaching expressions that go through blocks containing the same
3096 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3097 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3098 2 as not reaching. The intent is to improve the probability of finding
3099 only one reaching expression and to reduce register lifetimes by picking
3100 the closest such expression. */
3101
3102 static int
3103 expr_reaches_here_p_work (struct occr *occr, struct expr *expr,
3104 basic_block bb, int check_self_loop, char *visited)
3105 {
3106 edge pred;
3107
3108 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3109 {
3110 basic_block pred_bb = pred->src;
3111
3112 if (visited[pred_bb->index])
3113 /* This predecessor has already been visited. Nothing to do. */
3114 ;
3115 else if (pred_bb == bb)
3116 {
3117 /* BB loops on itself. */
3118 if (check_self_loop
3119 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3120 && BLOCK_NUM (occr->insn) == pred_bb->index)
3121 return 1;
3122
3123 visited[pred_bb->index] = 1;
3124 }
3125
3126 /* Ignore this predecessor if it kills the expression. */
3127 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3128 visited[pred_bb->index] = 1;
3129
3130 /* Does this predecessor generate this expression? */
3131 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3132 {
3133 /* Is this the occurrence we're looking for?
3134 Note that there's only one generating occurrence per block
3135 so we just need to check the block number. */
3136 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3137 return 1;
3138
3139 visited[pred_bb->index] = 1;
3140 }
3141
3142 /* Neither gen nor kill. */
3143 else
3144 {
3145 visited[pred_bb->index] = 1;
3146 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3147 visited))
3148
3149 return 1;
3150 }
3151 }
3152
3153 /* All paths have been checked. */
3154 return 0;
3155 }
3156
3157 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3158 memory allocated for that function is returned. */
3159
3160 static int
3161 expr_reaches_here_p (struct occr *occr, struct expr *expr, basic_block bb,
3162 int check_self_loop)
3163 {
3164 int rval;
3165 char *visited = xcalloc (last_basic_block, 1);
3166
3167 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3168
3169 free (visited);
3170 return rval;
3171 }
3172
3173 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3174 If there is more than one such instruction, return NULL.
3175
3176 Called only by handle_avail_expr. */
3177
3178 static rtx
3179 computing_insn (struct expr *expr, rtx insn)
3180 {
3181 basic_block bb = BLOCK_FOR_INSN (insn);
3182
3183 if (expr->avail_occr->next == NULL)
3184 {
3185 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3186 /* The available expression is actually itself
3187 (i.e. a loop in the flow graph) so do nothing. */
3188 return NULL;
3189
3190 /* (FIXME) Case that we found a pattern that was created by
3191 a substitution that took place. */
3192 return expr->avail_occr->insn;
3193 }
3194 else
3195 {
3196 /* Pattern is computed more than once.
3197 Search backwards from this insn to see how many of these
3198 computations actually reach this insn. */
3199 struct occr *occr;
3200 rtx insn_computes_expr = NULL;
3201 int can_reach = 0;
3202
3203 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3204 {
3205 if (BLOCK_FOR_INSN (occr->insn) == bb)
3206 {
3207 /* The expression is generated in this block.
3208 The only time we care about this is when the expression
3209 is generated later in the block [and thus there's a loop].
3210 We let the normal cse pass handle the other cases. */
3211 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3212 && expr_reaches_here_p (occr, expr, bb, 1))
3213 {
3214 can_reach++;
3215 if (can_reach > 1)
3216 return NULL;
3217
3218 insn_computes_expr = occr->insn;
3219 }
3220 }
3221 else if (expr_reaches_here_p (occr, expr, bb, 0))
3222 {
3223 can_reach++;
3224 if (can_reach > 1)
3225 return NULL;
3226
3227 insn_computes_expr = occr->insn;
3228 }
3229 }
3230
3231 if (insn_computes_expr == NULL)
3232 abort ();
3233
3234 return insn_computes_expr;
3235 }
3236 }
3237
3238 /* Return nonzero if the definition in DEF_INSN can reach INSN.
3239 Only called by can_disregard_other_sets. */
3240
3241 static int
3242 def_reaches_here_p (rtx insn, rtx def_insn)
3243 {
3244 rtx reg;
3245
3246 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3247 return 1;
3248
3249 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3250 {
3251 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3252 {
3253 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3254 return 1;
3255 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3256 reg = XEXP (PATTERN (def_insn), 0);
3257 else if (GET_CODE (PATTERN (def_insn)) == SET)
3258 reg = SET_DEST (PATTERN (def_insn));
3259 else
3260 abort ();
3261
3262 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3263 }
3264 else
3265 return 0;
3266 }
3267
3268 return 0;
3269 }
3270
3271 /* Return nonzero if *ADDR_THIS_REG can only have one value at INSN. The
3272 value returned is the number of definitions that reach INSN. Returning a
3273 value of zero means that [maybe] more than one definition reaches INSN and
3274 the caller can't perform whatever optimization it is trying. i.e. it is
3275 always safe to return zero. */
3276
3277 static int
3278 can_disregard_other_sets (struct reg_set **addr_this_reg, rtx insn, int for_combine)
3279 {
3280 int number_of_reaching_defs = 0;
3281 struct reg_set *this_reg;
3282
3283 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3284 if (def_reaches_here_p (insn, this_reg->insn))
3285 {
3286 number_of_reaching_defs++;
3287 /* Ignore parallels for now. */
3288 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3289 return 0;
3290
3291 if (!for_combine
3292 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3293 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3294 SET_SRC (PATTERN (insn)))))
3295 /* A setting of the reg to a different value reaches INSN. */
3296 return 0;
3297
3298 if (number_of_reaching_defs > 1)
3299 {
3300 /* If in this setting the value the register is being set to is
3301 equal to the previous value the register was set to and this
3302 setting reaches the insn we are trying to do the substitution
3303 on then we are ok. */
3304 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3305 return 0;
3306 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3307 SET_SRC (PATTERN (insn))))
3308 return 0;
3309 }
3310
3311 *addr_this_reg = this_reg;
3312 }
3313
3314 return number_of_reaching_defs;
3315 }
3316
3317 /* Expression computed by insn is available and the substitution is legal,
3318 so try to perform the substitution.
3319
3320 The result is nonzero if any changes were made. */
3321
3322 static int
3323 handle_avail_expr (rtx insn, struct expr *expr)
3324 {
3325 rtx pat, insn_computes_expr, expr_set;
3326 rtx to;
3327 struct reg_set *this_reg;
3328 int found_setting, use_src;
3329 int changed = 0;
3330
3331 /* We only handle the case where one computation of the expression
3332 reaches this instruction. */
3333 insn_computes_expr = computing_insn (expr, insn);
3334 if (insn_computes_expr == NULL)
3335 return 0;
3336 expr_set = single_set (insn_computes_expr);
3337 if (!expr_set)
3338 abort ();
3339
3340 found_setting = 0;
3341 use_src = 0;
3342
3343 /* At this point we know only one computation of EXPR outside of this
3344 block reaches this insn. Now try to find a register that the
3345 expression is computed into. */
3346 if (GET_CODE (SET_SRC (expr_set)) == REG)
3347 {
3348 /* This is the case when the available expression that reaches
3349 here has already been handled as an available expression. */
3350 unsigned int regnum_for_replacing
3351 = REGNO (SET_SRC (expr_set));
3352
3353 /* If the register was created by GCSE we can't use `reg_set_table',
3354 however we know it's set only once. */
3355 if (regnum_for_replacing >= max_gcse_regno
3356 /* If the register the expression is computed into is set only once,
3357 or only one set reaches this insn, we can use it. */
3358 || (((this_reg = reg_set_table[regnum_for_replacing]),
3359 this_reg->next == NULL)
3360 || can_disregard_other_sets (&this_reg, insn, 0)))
3361 {
3362 use_src = 1;
3363 found_setting = 1;
3364 }
3365 }
3366
3367 if (!found_setting)
3368 {
3369 unsigned int regnum_for_replacing
3370 = REGNO (SET_DEST (expr_set));
3371
3372 /* This shouldn't happen. */
3373 if (regnum_for_replacing >= max_gcse_regno)
3374 abort ();
3375
3376 this_reg = reg_set_table[regnum_for_replacing];
3377
3378 /* If the register the expression is computed into is set only once,
3379 or only one set reaches this insn, use it. */
3380 if (this_reg->next == NULL
3381 || can_disregard_other_sets (&this_reg, insn, 0))
3382 found_setting = 1;
3383 }
3384
3385 if (found_setting)
3386 {
3387 pat = PATTERN (insn);
3388 if (use_src)
3389 to = SET_SRC (expr_set);
3390 else
3391 to = SET_DEST (expr_set);
3392 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3393
3394 /* We should be able to ignore the return code from validate_change but
3395 to play it safe we check. */
3396 if (changed)
3397 {
3398 gcse_subst_count++;
3399 if (gcse_file != NULL)
3400 {
3401 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3402 INSN_UID (insn));
3403 fprintf (gcse_file, " reg %d %s insn %d\n",
3404 REGNO (to), use_src ? "from" : "set in",
3405 INSN_UID (insn_computes_expr));
3406 }
3407 }
3408 }
3409
3410 /* The register that the expr is computed into is set more than once. */
3411 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3412 {
3413 /* Insert an insn after insnx that copies the reg set in insnx
3414 into a new pseudo register call this new register REGN.
3415 From insnb until end of basic block or until REGB is set
3416 replace all uses of REGB with REGN. */
3417 rtx new_insn;
3418
3419 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3420
3421 /* Generate the new insn. */
3422 /* ??? If the change fails, we return 0, even though we created
3423 an insn. I think this is ok. */
3424 new_insn
3425 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3426 SET_DEST (expr_set)),
3427 insn_computes_expr);
3428
3429 /* Keep register set table up to date. */
3430 record_one_set (REGNO (to), new_insn);
3431
3432 gcse_create_count++;
3433 if (gcse_file != NULL)
3434 {
3435 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3436 INSN_UID (NEXT_INSN (insn_computes_expr)),
3437 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3438 fprintf (gcse_file, ", computed in insn %d,\n",
3439 INSN_UID (insn_computes_expr));
3440 fprintf (gcse_file, " into newly allocated reg %d\n",
3441 REGNO (to));
3442 }
3443
3444 pat = PATTERN (insn);
3445
3446 /* Do register replacement for INSN. */
3447 changed = validate_change (insn, &SET_SRC (pat),
3448 SET_DEST (PATTERN
3449 (NEXT_INSN (insn_computes_expr))),
3450 0);
3451
3452 /* We should be able to ignore the return code from validate_change but
3453 to play it safe we check. */
3454 if (changed)
3455 {
3456 gcse_subst_count++;
3457 if (gcse_file != NULL)
3458 {
3459 fprintf (gcse_file,
3460 "GCSE: Replacing the source in insn %d with reg %d ",
3461 INSN_UID (insn),
3462 REGNO (SET_DEST (PATTERN (NEXT_INSN
3463 (insn_computes_expr)))));
3464 fprintf (gcse_file, "set in insn %d\n",
3465 INSN_UID (insn_computes_expr));
3466 }
3467 }
3468 }
3469
3470 return changed;
3471 }
3472
3473 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3474 the dataflow analysis has been done.
3475
3476 The result is nonzero if a change was made. */
3477
3478 static int
3479 classic_gcse (void)
3480 {
3481 int changed;
3482 rtx insn;
3483 basic_block bb;
3484
3485 /* Note we start at block 1. */
3486
3487 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3488 return 0;
3489
3490 changed = 0;
3491 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3492 {
3493 /* Reset tables used to keep track of what's still valid [since the
3494 start of the block]. */
3495 reset_opr_set_tables ();
3496
3497 for (insn = bb->head;
3498 insn != NULL && insn != NEXT_INSN (bb->end);
3499 insn = NEXT_INSN (insn))
3500 {
3501 /* Is insn of form (set (pseudo-reg) ...)? */
3502 if (GET_CODE (insn) == INSN
3503 && GET_CODE (PATTERN (insn)) == SET
3504 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3505 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3506 {
3507 rtx pat = PATTERN (insn);
3508 rtx src = SET_SRC (pat);
3509 struct expr *expr;
3510
3511 if (want_to_gcse_p (src)
3512 /* Is the expression recorded? */
3513 && ((expr = lookup_expr (src, &expr_hash_table)) != NULL)
3514 /* Is the expression available [at the start of the
3515 block]? */
3516 && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
3517 /* Are the operands unchanged since the start of the
3518 block? */
3519 && oprs_not_set_p (src, insn))
3520 changed |= handle_avail_expr (insn, expr);
3521 }
3522
3523 /* Keep track of everything modified by this insn. */
3524 /* ??? Need to be careful w.r.t. mods done to INSN. */
3525 if (INSN_P (insn))
3526 mark_oprs_set (insn);
3527 }
3528 }
3529
3530 return changed;
3531 }
3532
3533 /* Top level routine to perform one classic GCSE pass.
3534
3535 Return nonzero if a change was made. */
3536
3537 static int
3538 one_classic_gcse_pass (int pass)
3539 {
3540 int changed = 0;
3541
3542 gcse_subst_count = 0;
3543 gcse_create_count = 0;
3544
3545 alloc_hash_table (max_cuid, &expr_hash_table, 0);
3546 alloc_rd_mem (last_basic_block, max_cuid);
3547 compute_hash_table (&expr_hash_table);
3548 if (gcse_file)
3549 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
3550
3551 if (expr_hash_table.n_elems > 0)
3552 {
3553 compute_kill_rd ();
3554 compute_rd ();
3555 alloc_avail_expr_mem (last_basic_block, expr_hash_table.n_elems);
3556 compute_ae_gen (&expr_hash_table);
3557 compute_ae_kill (ae_gen, ae_kill, &expr_hash_table);
3558 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3559 changed = classic_gcse ();
3560 free_avail_expr_mem ();
3561 }
3562
3563 free_rd_mem ();
3564 free_hash_table (&expr_hash_table);
3565
3566 if (gcse_file)
3567 {
3568 fprintf (gcse_file, "\n");
3569 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3570 current_function_name, pass, bytes_used, gcse_subst_count);
3571 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3572 }
3573
3574 return changed;
3575 }
3576 \f
3577 /* Compute copy/constant propagation working variables. */
3578
3579 /* Local properties of assignments. */
3580 static sbitmap *cprop_pavloc;
3581 static sbitmap *cprop_absaltered;
3582
3583 /* Global properties of assignments (computed from the local properties). */
3584 static sbitmap *cprop_avin;
3585 static sbitmap *cprop_avout;
3586
3587 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3588 basic blocks. N_SETS is the number of sets. */
3589
3590 static void
3591 alloc_cprop_mem (int n_blocks, int n_sets)
3592 {
3593 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3594 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3595
3596 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3597 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3598 }
3599
3600 /* Free vars used by copy/const propagation. */
3601
3602 static void
3603 free_cprop_mem (void)
3604 {
3605 sbitmap_vector_free (cprop_pavloc);
3606 sbitmap_vector_free (cprop_absaltered);
3607 sbitmap_vector_free (cprop_avin);
3608 sbitmap_vector_free (cprop_avout);
3609 }
3610
3611 /* For each block, compute whether X is transparent. X is either an
3612 expression or an assignment [though we don't care which, for this context
3613 an assignment is treated as an expression]. For each block where an
3614 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3615 bit in BMAP. */
3616
3617 static void
3618 compute_transp (rtx x, int indx, sbitmap *bmap, int set_p)
3619 {
3620 int i, j;
3621 basic_block bb;
3622 enum rtx_code code;
3623 reg_set *r;
3624 const char *fmt;
3625
3626 /* repeat is used to turn tail-recursion into iteration since GCC
3627 can't do it when there's no return value. */
3628 repeat:
3629
3630 if (x == 0)
3631 return;
3632
3633 code = GET_CODE (x);
3634 switch (code)
3635 {
3636 case REG:
3637 if (set_p)
3638 {
3639 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3640 {
3641 FOR_EACH_BB (bb)
3642 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3643 SET_BIT (bmap[bb->index], indx);
3644 }
3645 else
3646 {
3647 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3648 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3649 }
3650 }
3651 else
3652 {
3653 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3654 {
3655 FOR_EACH_BB (bb)
3656 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3657 RESET_BIT (bmap[bb->index], indx);
3658 }
3659 else
3660 {
3661 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3662 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3663 }
3664 }
3665
3666 return;
3667
3668 case MEM:
3669 FOR_EACH_BB (bb)
3670 {
3671 rtx list_entry = canon_modify_mem_list[bb->index];
3672
3673 while (list_entry)
3674 {
3675 rtx dest, dest_addr;
3676
3677 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3678 {
3679 if (set_p)
3680 SET_BIT (bmap[bb->index], indx);
3681 else
3682 RESET_BIT (bmap[bb->index], indx);
3683 break;
3684 }
3685 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3686 Examine each hunk of memory that is modified. */
3687
3688 dest = XEXP (list_entry, 0);
3689 list_entry = XEXP (list_entry, 1);
3690 dest_addr = XEXP (list_entry, 0);
3691
3692 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3693 x, rtx_addr_varies_p))
3694 {
3695 if (set_p)
3696 SET_BIT (bmap[bb->index], indx);
3697 else
3698 RESET_BIT (bmap[bb->index], indx);
3699 break;
3700 }
3701 list_entry = XEXP (list_entry, 1);
3702 }
3703 }
3704
3705 x = XEXP (x, 0);
3706 goto repeat;
3707
3708 case PC:
3709 case CC0: /*FIXME*/
3710 case CONST:
3711 case CONST_INT:
3712 case CONST_DOUBLE:
3713 case CONST_VECTOR:
3714 case SYMBOL_REF:
3715 case LABEL_REF:
3716 case ADDR_VEC:
3717 case ADDR_DIFF_VEC:
3718 return;
3719
3720 default:
3721 break;
3722 }
3723
3724 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3725 {
3726 if (fmt[i] == 'e')
3727 {
3728 /* If we are about to do the last recursive call
3729 needed at this level, change it into iteration.
3730 This function is called enough to be worth it. */
3731 if (i == 0)
3732 {
3733 x = XEXP (x, i);
3734 goto repeat;
3735 }
3736
3737 compute_transp (XEXP (x, i), indx, bmap, set_p);
3738 }
3739 else if (fmt[i] == 'E')
3740 for (j = 0; j < XVECLEN (x, i); j++)
3741 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3742 }
3743 }
3744
3745 /* Top level routine to do the dataflow analysis needed by copy/const
3746 propagation. */
3747
3748 static void
3749 compute_cprop_data (void)
3750 {
3751 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
3752 compute_available (cprop_pavloc, cprop_absaltered,
3753 cprop_avout, cprop_avin);
3754 }
3755 \f
3756 /* Copy/constant propagation. */
3757
3758 /* Maximum number of register uses in an insn that we handle. */
3759 #define MAX_USES 8
3760
3761 /* Table of uses found in an insn.
3762 Allocated statically to avoid alloc/free complexity and overhead. */
3763 static struct reg_use reg_use_table[MAX_USES];
3764
3765 /* Index into `reg_use_table' while building it. */
3766 static int reg_use_count;
3767
3768 /* Set up a list of register numbers used in INSN. The found uses are stored
3769 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3770 and contains the number of uses in the table upon exit.
3771
3772 ??? If a register appears multiple times we will record it multiple times.
3773 This doesn't hurt anything but it will slow things down. */
3774
3775 static void
3776 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
3777 {
3778 int i, j;
3779 enum rtx_code code;
3780 const char *fmt;
3781 rtx x = *xptr;
3782
3783 /* repeat is used to turn tail-recursion into iteration since GCC
3784 can't do it when there's no return value. */
3785 repeat:
3786 if (x == 0)
3787 return;
3788
3789 code = GET_CODE (x);
3790 if (REG_P (x))
3791 {
3792 if (reg_use_count == MAX_USES)
3793 return;
3794
3795 reg_use_table[reg_use_count].reg_rtx = x;
3796 reg_use_count++;
3797 }
3798
3799 /* Recursively scan the operands of this expression. */
3800
3801 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3802 {
3803 if (fmt[i] == 'e')
3804 {
3805 /* If we are about to do the last recursive call
3806 needed at this level, change it into iteration.
3807 This function is called enough to be worth it. */
3808 if (i == 0)
3809 {
3810 x = XEXP (x, 0);
3811 goto repeat;
3812 }
3813
3814 find_used_regs (&XEXP (x, i), data);
3815 }
3816 else if (fmt[i] == 'E')
3817 for (j = 0; j < XVECLEN (x, i); j++)
3818 find_used_regs (&XVECEXP (x, i, j), data);
3819 }
3820 }
3821
3822 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3823 Returns nonzero is successful. */
3824
3825 static int
3826 try_replace_reg (rtx from, rtx to, rtx insn)
3827 {
3828 rtx note = find_reg_equal_equiv_note (insn);
3829 rtx src = 0;
3830 int success = 0;
3831 rtx set = single_set (insn);
3832
3833 validate_replace_src_group (from, to, insn);
3834 if (num_changes_pending () && apply_change_group ())
3835 success = 1;
3836
3837 /* Try to simplify SET_SRC if we have substituted a constant. */
3838 if (success && set && CONSTANT_P (to))
3839 {
3840 src = simplify_rtx (SET_SRC (set));
3841
3842 if (src)
3843 validate_change (insn, &SET_SRC (set), src, 0);
3844 }
3845
3846 /* If there is already a NOTE, update the expression in it with our
3847 replacement. */
3848 if (note != 0)
3849 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3850
3851 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
3852 {
3853 /* If above failed and this is a single set, try to simplify the source of
3854 the set given our substitution. We could perhaps try this for multiple
3855 SETs, but it probably won't buy us anything. */
3856 src = simplify_replace_rtx (SET_SRC (set), from, to);
3857
3858 if (!rtx_equal_p (src, SET_SRC (set))
3859 && validate_change (insn, &SET_SRC (set), src, 0))
3860 success = 1;
3861
3862 /* If we've failed to do replacement, have a single SET, don't already
3863 have a note, and have no special SET, add a REG_EQUAL note to not
3864 lose information. */
3865 if (!success && note == 0 && set != 0
3866 && GET_CODE (XEXP (set, 0)) != ZERO_EXTRACT
3867 && GET_CODE (XEXP (set, 0)) != SIGN_EXTRACT)
3868 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3869 }
3870
3871 /* REG_EQUAL may get simplified into register.
3872 We don't allow that. Remove that note. This code ought
3873 not to happen, because previous code ought to synthesize
3874 reg-reg move, but be on the safe side. */
3875 if (note && REG_P (XEXP (note, 0)))
3876 remove_note (insn, note);
3877
3878 return success;
3879 }
3880
3881 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3882 NULL no such set is found. */
3883
3884 static struct expr *
3885 find_avail_set (int regno, rtx insn)
3886 {
3887 /* SET1 contains the last set found that can be returned to the caller for
3888 use in a substitution. */
3889 struct expr *set1 = 0;
3890
3891 /* Loops are not possible here. To get a loop we would need two sets
3892 available at the start of the block containing INSN. ie we would
3893 need two sets like this available at the start of the block:
3894
3895 (set (reg X) (reg Y))
3896 (set (reg Y) (reg X))
3897
3898 This can not happen since the set of (reg Y) would have killed the
3899 set of (reg X) making it unavailable at the start of this block. */
3900 while (1)
3901 {
3902 rtx src;
3903 struct expr *set = lookup_set (regno, &set_hash_table);
3904
3905 /* Find a set that is available at the start of the block
3906 which contains INSN. */
3907 while (set)
3908 {
3909 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3910 break;
3911 set = next_set (regno, set);
3912 }
3913
3914 /* If no available set was found we've reached the end of the
3915 (possibly empty) copy chain. */
3916 if (set == 0)
3917 break;
3918
3919 if (GET_CODE (set->expr) != SET)
3920 abort ();
3921
3922 src = SET_SRC (set->expr);
3923
3924 /* We know the set is available.
3925 Now check that SRC is ANTLOC (i.e. none of the source operands
3926 have changed since the start of the block).
3927
3928 If the source operand changed, we may still use it for the next
3929 iteration of this loop, but we may not use it for substitutions. */
3930
3931 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
3932 set1 = set;
3933
3934 /* If the source of the set is anything except a register, then
3935 we have reached the end of the copy chain. */
3936 if (GET_CODE (src) != REG)
3937 break;
3938
3939 /* Follow the copy chain, ie start another iteration of the loop
3940 and see if we have an available copy into SRC. */
3941 regno = REGNO (src);
3942 }
3943
3944 /* SET1 holds the last set that was available and anticipatable at
3945 INSN. */
3946 return set1;
3947 }
3948
3949 /* Subroutine of cprop_insn that tries to propagate constants into
3950 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
3951 it is the instruction that immediately precedes JUMP, and must be a
3952 single SET of a register. FROM is what we will try to replace,
3953 SRC is the constant we will try to substitute for it. Returns nonzero
3954 if a change was made. */
3955
3956 static int
3957 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
3958 {
3959 rtx new, set_src, note_src;
3960 rtx set = pc_set (jump);
3961 rtx note = find_reg_equal_equiv_note (jump);
3962
3963 if (note)
3964 {
3965 note_src = XEXP (note, 0);
3966 if (GET_CODE (note_src) == EXPR_LIST)
3967 note_src = NULL_RTX;
3968 }
3969 else note_src = NULL_RTX;
3970
3971 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
3972 set_src = note_src ? note_src : SET_SRC (set);
3973
3974 /* First substitute the SETCC condition into the JUMP instruction,
3975 then substitute that given values into this expanded JUMP. */
3976 if (setcc != NULL_RTX
3977 && !modified_between_p (from, setcc, jump)
3978 && !modified_between_p (src, setcc, jump))
3979 {
3980 rtx setcc_src;
3981 rtx setcc_set = single_set (setcc);
3982 rtx setcc_note = find_reg_equal_equiv_note (setcc);
3983 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
3984 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
3985 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
3986 setcc_src);
3987 }
3988 else
3989 setcc = NULL_RTX;
3990
3991 new = simplify_replace_rtx (set_src, from, src);
3992
3993 /* If no simplification can be made, then try the next register. */
3994 if (rtx_equal_p (new, SET_SRC (set)))
3995 return 0;
3996
3997 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
3998 if (new == pc_rtx)
3999 delete_insn (jump);
4000 else
4001 {
4002 /* Ensure the value computed inside the jump insn to be equivalent
4003 to one computed by setcc. */
4004 if (setcc && modified_in_p (new, setcc))
4005 return 0;
4006 if (! validate_change (jump, &SET_SRC (set), new, 0))
4007 {
4008 /* When (some) constants are not valid in a comparison, and there
4009 are two registers to be replaced by constants before the entire
4010 comparison can be folded into a constant, we need to keep
4011 intermediate information in REG_EQUAL notes. For targets with
4012 separate compare insns, such notes are added by try_replace_reg.
4013 When we have a combined compare-and-branch instruction, however,
4014 we need to attach a note to the branch itself to make this
4015 optimization work. */
4016
4017 if (!rtx_equal_p (new, note_src))
4018 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
4019 return 0;
4020 }
4021
4022 /* Remove REG_EQUAL note after simplification. */
4023 if (note_src)
4024 remove_note (jump, note);
4025
4026 /* If this has turned into an unconditional jump,
4027 then put a barrier after it so that the unreachable
4028 code will be deleted. */
4029 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4030 emit_barrier_after (jump);
4031 }
4032
4033 #ifdef HAVE_cc0
4034 /* Delete the cc0 setter. */
4035 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
4036 delete_insn (setcc);
4037 #endif
4038
4039 run_jump_opt_after_gcse = 1;
4040
4041 const_prop_count++;
4042 if (gcse_file != NULL)
4043 {
4044 fprintf (gcse_file,
4045 "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
4046 REGNO (from), INSN_UID (jump));
4047 print_rtl (gcse_file, src);
4048 fprintf (gcse_file, "\n");
4049 }
4050 purge_dead_edges (bb);
4051
4052 return 1;
4053 }
4054
4055 static bool
4056 constprop_register (rtx insn, rtx from, rtx to, int alter_jumps)
4057 {
4058 rtx sset;
4059
4060 /* Check for reg or cc0 setting instructions followed by
4061 conditional branch instructions first. */
4062 if (alter_jumps
4063 && (sset = single_set (insn)) != NULL
4064 && NEXT_INSN (insn)
4065 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
4066 {
4067 rtx dest = SET_DEST (sset);
4068 if ((REG_P (dest) || CC0_P (dest))
4069 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
4070 return 1;
4071 }
4072
4073 /* Handle normal insns next. */
4074 if (GET_CODE (insn) == INSN
4075 && try_replace_reg (from, to, insn))
4076 return 1;
4077
4078 /* Try to propagate a CONST_INT into a conditional jump.
4079 We're pretty specific about what we will handle in this
4080 code, we can extend this as necessary over time.
4081
4082 Right now the insn in question must look like
4083 (set (pc) (if_then_else ...)) */
4084 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
4085 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
4086 return 0;
4087 }
4088
4089 /* Perform constant and copy propagation on INSN.
4090 The result is nonzero if a change was made. */
4091
4092 static int
4093 cprop_insn (rtx insn, int alter_jumps)
4094 {
4095 struct reg_use *reg_used;
4096 int changed = 0;
4097 rtx note;
4098
4099 if (!INSN_P (insn))
4100 return 0;
4101
4102 reg_use_count = 0;
4103 note_uses (&PATTERN (insn), find_used_regs, NULL);
4104
4105 note = find_reg_equal_equiv_note (insn);
4106
4107 /* We may win even when propagating constants into notes. */
4108 if (note)
4109 find_used_regs (&XEXP (note, 0), NULL);
4110
4111 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4112 reg_used++, reg_use_count--)
4113 {
4114 unsigned int regno = REGNO (reg_used->reg_rtx);
4115 rtx pat, src;
4116 struct expr *set;
4117
4118 /* Ignore registers created by GCSE.
4119 We do this because ... */
4120 if (regno >= max_gcse_regno)
4121 continue;
4122
4123 /* If the register has already been set in this block, there's
4124 nothing we can do. */
4125 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4126 continue;
4127
4128 /* Find an assignment that sets reg_used and is available
4129 at the start of the block. */
4130 set = find_avail_set (regno, insn);
4131 if (! set)
4132 continue;
4133
4134 pat = set->expr;
4135 /* ??? We might be able to handle PARALLELs. Later. */
4136 if (GET_CODE (pat) != SET)
4137 abort ();
4138
4139 src = SET_SRC (pat);
4140
4141 /* Constant propagation. */
4142 if (gcse_constant_p (src))
4143 {
4144 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
4145 {
4146 changed = 1;
4147 const_prop_count++;
4148 if (gcse_file != NULL)
4149 {
4150 fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
4151 fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
4152 print_rtl (gcse_file, src);
4153 fprintf (gcse_file, "\n");
4154 }
4155 if (INSN_DELETED_P (insn))
4156 return 1;
4157 }
4158 }
4159 else if (GET_CODE (src) == REG
4160 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4161 && REGNO (src) != regno)
4162 {
4163 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4164 {
4165 changed = 1;
4166 copy_prop_count++;
4167 if (gcse_file != NULL)
4168 {
4169 fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
4170 regno, INSN_UID (insn));
4171 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4172 }
4173
4174 /* The original insn setting reg_used may or may not now be
4175 deletable. We leave the deletion to flow. */
4176 /* FIXME: If it turns out that the insn isn't deletable,
4177 then we may have unnecessarily extended register lifetimes
4178 and made things worse. */
4179 }
4180 }
4181 }
4182
4183 return changed;
4184 }
4185
4186 /* Like find_used_regs, but avoid recording uses that appear in
4187 input-output contexts such as zero_extract or pre_dec. This
4188 restricts the cases we consider to those for which local cprop
4189 can legitimately make replacements. */
4190
4191 static void
4192 local_cprop_find_used_regs (rtx *xptr, void *data)
4193 {
4194 rtx x = *xptr;
4195
4196 if (x == 0)
4197 return;
4198
4199 switch (GET_CODE (x))
4200 {
4201 case ZERO_EXTRACT:
4202 case SIGN_EXTRACT:
4203 case STRICT_LOW_PART:
4204 return;
4205
4206 case PRE_DEC:
4207 case PRE_INC:
4208 case POST_DEC:
4209 case POST_INC:
4210 case PRE_MODIFY:
4211 case POST_MODIFY:
4212 /* Can only legitimately appear this early in the context of
4213 stack pushes for function arguments, but handle all of the
4214 codes nonetheless. */
4215 return;
4216
4217 case SUBREG:
4218 /* Setting a subreg of a register larger than word_mode leaves
4219 the non-written words unchanged. */
4220 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
4221 return;
4222 break;
4223
4224 default:
4225 break;
4226 }
4227
4228 find_used_regs (xptr, data);
4229 }
4230
4231 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4232 their REG_EQUAL notes need updating. */
4233
4234 static bool
4235 do_local_cprop (rtx x, rtx insn, int alter_jumps, rtx *libcall_sp)
4236 {
4237 rtx newreg = NULL, newcnst = NULL;
4238
4239 /* Rule out USE instructions and ASM statements as we don't want to
4240 change the hard registers mentioned. */
4241 if (GET_CODE (x) == REG
4242 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
4243 || (GET_CODE (PATTERN (insn)) != USE
4244 && asm_noperands (PATTERN (insn)) < 0)))
4245 {
4246 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
4247 struct elt_loc_list *l;
4248
4249 if (!val)
4250 return false;
4251 for (l = val->locs; l; l = l->next)
4252 {
4253 rtx this_rtx = l->loc;
4254 rtx note;
4255
4256 if (l->in_libcall)
4257 continue;
4258
4259 if (gcse_constant_p (this_rtx))
4260 newcnst = this_rtx;
4261 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
4262 /* Don't copy propagate if it has attached REG_EQUIV note.
4263 At this point this only function parameters should have
4264 REG_EQUIV notes and if the argument slot is used somewhere
4265 explicitly, it means address of parameter has been taken,
4266 so we should not extend the lifetime of the pseudo. */
4267 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
4268 || GET_CODE (XEXP (note, 0)) != MEM))
4269 newreg = this_rtx;
4270 }
4271 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
4272 {
4273 /* If we find a case where we can't fix the retval REG_EQUAL notes
4274 match the new register, we either have to abandon this replacement
4275 or fix delete_trivially_dead_insns to preserve the setting insn,
4276 or make it delete the REG_EUAQL note, and fix up all passes that
4277 require the REG_EQUAL note there. */
4278 if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
4279 abort ();
4280 if (gcse_file != NULL)
4281 {
4282 fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
4283 REGNO (x));
4284 fprintf (gcse_file, "insn %d with constant ",
4285 INSN_UID (insn));
4286 print_rtl (gcse_file, newcnst);
4287 fprintf (gcse_file, "\n");
4288 }
4289 const_prop_count++;
4290 return true;
4291 }
4292 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
4293 {
4294 adjust_libcall_notes (x, newreg, insn, libcall_sp);
4295 if (gcse_file != NULL)
4296 {
4297 fprintf (gcse_file,
4298 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
4299 REGNO (x), INSN_UID (insn));
4300 fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
4301 }
4302 copy_prop_count++;
4303 return true;
4304 }
4305 }
4306 return false;
4307 }
4308
4309 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4310 their REG_EQUAL notes need updating to reflect that OLDREG has been
4311 replaced with NEWVAL in INSN. Return true if all substitutions could
4312 be made. */
4313 static bool
4314 adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
4315 {
4316 rtx end;
4317
4318 while ((end = *libcall_sp++))
4319 {
4320 rtx note = find_reg_equal_equiv_note (end);
4321
4322 if (! note)
4323 continue;
4324
4325 if (REG_P (newval))
4326 {
4327 if (reg_set_between_p (newval, PREV_INSN (insn), end))
4328 {
4329 do
4330 {
4331 note = find_reg_equal_equiv_note (end);
4332 if (! note)
4333 continue;
4334 if (reg_mentioned_p (newval, XEXP (note, 0)))
4335 return false;
4336 }
4337 while ((end = *libcall_sp++));
4338 return true;
4339 }
4340 }
4341 XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval);
4342 insn = end;
4343 }
4344 return true;
4345 }
4346
4347 #define MAX_NESTED_LIBCALLS 9
4348
4349 static void
4350 local_cprop_pass (int alter_jumps)
4351 {
4352 rtx insn;
4353 struct reg_use *reg_used;
4354 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
4355 bool changed = false;
4356
4357 cselib_init ();
4358 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
4359 *libcall_sp = 0;
4360 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4361 {
4362 if (INSN_P (insn))
4363 {
4364 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4365
4366 if (note)
4367 {
4368 if (libcall_sp == libcall_stack)
4369 abort ();
4370 *--libcall_sp = XEXP (note, 0);
4371 }
4372 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4373 if (note)
4374 libcall_sp++;
4375 note = find_reg_equal_equiv_note (insn);
4376 do
4377 {
4378 reg_use_count = 0;
4379 note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL);
4380 if (note)
4381 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
4382
4383 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4384 reg_used++, reg_use_count--)
4385 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
4386 libcall_sp))
4387 {
4388 changed = true;
4389 break;
4390 }
4391 if (INSN_DELETED_P (insn))
4392 break;
4393 }
4394 while (reg_use_count);
4395 }
4396 cselib_process_insn (insn);
4397 }
4398 cselib_finish ();
4399 /* Global analysis may get into infinite loops for unreachable blocks. */
4400 if (changed && alter_jumps)
4401 {
4402 delete_unreachable_blocks ();
4403 free_reg_set_mem ();
4404 alloc_reg_set_mem (max_reg_num ());
4405 compute_sets (get_insns ());
4406 }
4407 }
4408
4409 /* Forward propagate copies. This includes copies and constants. Return
4410 nonzero if a change was made. */
4411
4412 static int
4413 cprop (int alter_jumps)
4414 {
4415 int changed;
4416 basic_block bb;
4417 rtx insn;
4418
4419 /* Note we start at block 1. */
4420 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4421 {
4422 if (gcse_file != NULL)
4423 fprintf (gcse_file, "\n");
4424 return 0;
4425 }
4426
4427 changed = 0;
4428 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
4429 {
4430 /* Reset tables used to keep track of what's still valid [since the
4431 start of the block]. */
4432 reset_opr_set_tables ();
4433
4434 for (insn = bb->head;
4435 insn != NULL && insn != NEXT_INSN (bb->end);
4436 insn = NEXT_INSN (insn))
4437 if (INSN_P (insn))
4438 {
4439 changed |= cprop_insn (insn, alter_jumps);
4440
4441 /* Keep track of everything modified by this insn. */
4442 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4443 call mark_oprs_set if we turned the insn into a NOTE. */
4444 if (GET_CODE (insn) != NOTE)
4445 mark_oprs_set (insn);
4446 }
4447 }
4448
4449 if (gcse_file != NULL)
4450 fprintf (gcse_file, "\n");
4451
4452 return changed;
4453 }
4454
4455 /* Similar to get_condition, only the resulting condition must be
4456 valid at JUMP, instead of at EARLIEST.
4457
4458 This differs from noce_get_condition in ifcvt.c in that we prefer not to
4459 settle for the condition variable in the jump instruction being integral.
4460 We prefer to be able to record the value of a user variable, rather than
4461 the value of a temporary used in a condition. This could be solved by
4462 recording the value of *every* register scaned by canonicalize_condition,
4463 but this would require some code reorganization. */
4464
4465 rtx
4466 fis_get_condition (rtx jump)
4467 {
4468 rtx cond, set, tmp, insn, earliest;
4469 bool reverse;
4470
4471 if (! any_condjump_p (jump))
4472 return NULL_RTX;
4473
4474 set = pc_set (jump);
4475 cond = XEXP (SET_SRC (set), 0);
4476
4477 /* If this branches to JUMP_LABEL when the condition is false,
4478 reverse the condition. */
4479 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4480 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump));
4481
4482 /* Use canonicalize_condition to do the dirty work of manipulating
4483 MODE_CC values and COMPARE rtx codes. */
4484 tmp = canonicalize_condition (jump, cond, reverse, &earliest, NULL_RTX,
4485 false);
4486 if (!tmp)
4487 return NULL_RTX;
4488
4489 /* Verify that the given condition is valid at JUMP by virtue of not
4490 having been modified since EARLIEST. */
4491 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4492 if (INSN_P (insn) && modified_in_p (tmp, insn))
4493 break;
4494 if (insn == jump)
4495 return tmp;
4496
4497 /* The condition was modified. See if we can get a partial result
4498 that doesn't follow all the reversals. Perhaps combine can fold
4499 them together later. */
4500 tmp = XEXP (tmp, 0);
4501 if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT)
4502 return NULL_RTX;
4503 tmp = canonicalize_condition (jump, cond, reverse, &earliest, tmp,
4504 false);
4505 if (!tmp)
4506 return NULL_RTX;
4507
4508 /* For sanity's sake, re-validate the new result. */
4509 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4510 if (INSN_P (insn) && modified_in_p (tmp, insn))
4511 return NULL_RTX;
4512
4513 return tmp;
4514 }
4515
4516 /* Find the implicit sets of a function. An "implicit set" is a constraint
4517 on the value of a variable, implied by a conditional jump. For example,
4518 following "if (x == 2)", the then branch may be optimized as though the
4519 conditional performed an "explicit set", in this example, "x = 2". This
4520 function records the set patterns that are implicit at the start of each
4521 basic block. */
4522
4523 static void
4524 find_implicit_sets (void)
4525 {
4526 basic_block bb, dest;
4527 unsigned int count;
4528 rtx cond, new;
4529
4530 count = 0;
4531 FOR_EACH_BB (bb)
4532 /* Check for more than one successor. */
4533 if (bb->succ && bb->succ->succ_next)
4534 {
4535 cond = fis_get_condition (bb->end);
4536
4537 if (cond
4538 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
4539 && GET_CODE (XEXP (cond, 0)) == REG
4540 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
4541 && gcse_constant_p (XEXP (cond, 1)))
4542 {
4543 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
4544 : FALLTHRU_EDGE (bb)->dest;
4545
4546 if (dest && ! dest->pred->pred_next
4547 && dest != EXIT_BLOCK_PTR)
4548 {
4549 new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
4550 XEXP (cond, 1));
4551 implicit_sets[dest->index] = new;
4552 if (gcse_file)
4553 {
4554 fprintf(gcse_file, "Implicit set of reg %d in ",
4555 REGNO (XEXP (cond, 0)));
4556 fprintf(gcse_file, "basic block %d\n", dest->index);
4557 }
4558 count++;
4559 }
4560 }
4561 }
4562
4563 if (gcse_file)
4564 fprintf (gcse_file, "Found %d implicit sets\n", count);
4565 }
4566
4567 /* Perform one copy/constant propagation pass.
4568 PASS is the pass count. If CPROP_JUMPS is true, perform constant
4569 propagation into conditional jumps. If BYPASS_JUMPS is true,
4570 perform conditional jump bypassing optimizations. */
4571
4572 static int
4573 one_cprop_pass (int pass, int cprop_jumps, int bypass_jumps)
4574 {
4575 int changed = 0;
4576
4577 const_prop_count = 0;
4578 copy_prop_count = 0;
4579
4580 local_cprop_pass (cprop_jumps);
4581
4582 /* Determine implicit sets. */
4583 implicit_sets = xcalloc (last_basic_block, sizeof (rtx));
4584 find_implicit_sets ();
4585
4586 alloc_hash_table (max_cuid, &set_hash_table, 1);
4587 compute_hash_table (&set_hash_table);
4588
4589 /* Free implicit_sets before peak usage. */
4590 free (implicit_sets);
4591 implicit_sets = NULL;
4592
4593 if (gcse_file)
4594 dump_hash_table (gcse_file, "SET", &set_hash_table);
4595 if (set_hash_table.n_elems > 0)
4596 {
4597 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
4598 compute_cprop_data ();
4599 changed = cprop (cprop_jumps);
4600 if (bypass_jumps)
4601 changed |= bypass_conditional_jumps ();
4602 free_cprop_mem ();
4603 }
4604
4605 free_hash_table (&set_hash_table);
4606
4607 if (gcse_file)
4608 {
4609 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4610 current_function_name, pass, bytes_used);
4611 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4612 const_prop_count, copy_prop_count);
4613 }
4614 /* Global analysis may get into infinite loops for unreachable blocks. */
4615 if (changed && cprop_jumps)
4616 delete_unreachable_blocks ();
4617
4618 return changed;
4619 }
4620 \f
4621 /* Bypass conditional jumps. */
4622
4623 /* The value of last_basic_block at the beginning of the jump_bypass
4624 pass. The use of redirect_edge_and_branch_force may introduce new
4625 basic blocks, but the data flow analysis is only valid for basic
4626 block indices less than bypass_last_basic_block. */
4627
4628 static int bypass_last_basic_block;
4629
4630 /* Find a set of REGNO to a constant that is available at the end of basic
4631 block BB. Returns NULL if no such set is found. Based heavily upon
4632 find_avail_set. */
4633
4634 static struct expr *
4635 find_bypass_set (int regno, int bb)
4636 {
4637 struct expr *result = 0;
4638
4639 for (;;)
4640 {
4641 rtx src;
4642 struct expr *set = lookup_set (regno, &set_hash_table);
4643
4644 while (set)
4645 {
4646 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
4647 break;
4648 set = next_set (regno, set);
4649 }
4650
4651 if (set == 0)
4652 break;
4653
4654 if (GET_CODE (set->expr) != SET)
4655 abort ();
4656
4657 src = SET_SRC (set->expr);
4658 if (gcse_constant_p (src))
4659 result = set;
4660
4661 if (GET_CODE (src) != REG)
4662 break;
4663
4664 regno = REGNO (src);
4665 }
4666 return result;
4667 }
4668
4669
4670 /* Subroutine of bypass_block that checks whether a pseudo is killed by
4671 any of the instructions inserted on an edge. Jump bypassing places
4672 condition code setters on CFG edges using insert_insn_on_edge. This
4673 function is required to check that our data flow analysis is still
4674 valid prior to commit_edge_insertions. */
4675
4676 static bool
4677 reg_killed_on_edge (rtx reg, edge e)
4678 {
4679 rtx insn;
4680
4681 for (insn = e->insns; insn; insn = NEXT_INSN (insn))
4682 if (INSN_P (insn) && reg_set_p (reg, insn))
4683 return true;
4684
4685 return false;
4686 }
4687
4688 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
4689 basic block BB which has more than one predecessor. If not NULL, SETCC
4690 is the first instruction of BB, which is immediately followed by JUMP_INSN
4691 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
4692 Returns nonzero if a change was made.
4693
4694 During the jump bypassing pass, we may place copies of SETCC instructions
4695 on CFG edges. The following routine must be careful to pay attention to
4696 these inserted insns when performing its transformations. */
4697
4698 static int
4699 bypass_block (basic_block bb, rtx setcc, rtx jump)
4700 {
4701 rtx insn, note;
4702 edge e, enext, edest;
4703 int i, change;
4704 int may_be_loop_header;
4705
4706 insn = (setcc != NULL) ? setcc : jump;
4707
4708 /* Determine set of register uses in INSN. */
4709 reg_use_count = 0;
4710 note_uses (&PATTERN (insn), find_used_regs, NULL);
4711 note = find_reg_equal_equiv_note (insn);
4712 if (note)
4713 find_used_regs (&XEXP (note, 0), NULL);
4714
4715 may_be_loop_header = false;
4716 for (e = bb->pred; e; e = e->pred_next)
4717 if (e->flags & EDGE_DFS_BACK)
4718 {
4719 may_be_loop_header = true;
4720 break;
4721 }
4722
4723 change = 0;
4724 for (e = bb->pred; e; e = enext)
4725 {
4726 enext = e->pred_next;
4727 if (e->flags & EDGE_COMPLEX)
4728 continue;
4729
4730 /* We can't redirect edges from new basic blocks. */
4731 if (e->src->index >= bypass_last_basic_block)
4732 continue;
4733
4734 /* The irreducible loops created by redirecting of edges entering the
4735 loop from outside would decrease effectiveness of some of the following
4736 optimizations, so prevent this. */
4737 if (may_be_loop_header
4738 && !(e->flags & EDGE_DFS_BACK))
4739 continue;
4740
4741 for (i = 0; i < reg_use_count; i++)
4742 {
4743 struct reg_use *reg_used = &reg_use_table[i];
4744 unsigned int regno = REGNO (reg_used->reg_rtx);
4745 basic_block dest, old_dest;
4746 struct expr *set;
4747 rtx src, new;
4748
4749 if (regno >= max_gcse_regno)
4750 continue;
4751
4752 set = find_bypass_set (regno, e->src->index);
4753
4754 if (! set)
4755 continue;
4756
4757 /* Check the data flow is valid after edge insertions. */
4758 if (e->insns && reg_killed_on_edge (reg_used->reg_rtx, e))
4759 continue;
4760
4761 src = SET_SRC (pc_set (jump));
4762
4763 if (setcc != NULL)
4764 src = simplify_replace_rtx (src,
4765 SET_DEST (PATTERN (setcc)),
4766 SET_SRC (PATTERN (setcc)));
4767
4768 new = simplify_replace_rtx (src, reg_used->reg_rtx,
4769 SET_SRC (set->expr));
4770
4771 /* Jump bypassing may have already placed instructions on
4772 edges of the CFG. We can't bypass an outgoing edge that
4773 has instructions associated with it, as these insns won't
4774 get executed if the incoming edge is redirected. */
4775
4776 if (new == pc_rtx)
4777 {
4778 edest = FALLTHRU_EDGE (bb);
4779 dest = edest->insns ? NULL : edest->dest;
4780 }
4781 else if (GET_CODE (new) == LABEL_REF)
4782 {
4783 dest = BLOCK_FOR_INSN (XEXP (new, 0));
4784 /* Don't bypass edges containing instructions. */
4785 for (edest = bb->succ; edest; edest = edest->succ_next)
4786 if (edest->dest == dest && edest->insns)
4787 {
4788 dest = NULL;
4789 break;
4790 }
4791 }
4792 else
4793 dest = NULL;
4794
4795 old_dest = e->dest;
4796 if (dest != NULL
4797 && dest != old_dest
4798 && dest != EXIT_BLOCK_PTR)
4799 {
4800 redirect_edge_and_branch_force (e, dest);
4801
4802 /* Copy the register setter to the redirected edge.
4803 Don't copy CC0 setters, as CC0 is dead after jump. */
4804 if (setcc)
4805 {
4806 rtx pat = PATTERN (setcc);
4807 if (!CC0_P (SET_DEST (pat)))
4808 insert_insn_on_edge (copy_insn (pat), e);
4809 }
4810
4811 if (gcse_file != NULL)
4812 {
4813 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
4814 regno, INSN_UID (jump));
4815 print_rtl (gcse_file, SET_SRC (set->expr));
4816 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
4817 e->src->index, old_dest->index, dest->index);
4818 }
4819 change = 1;
4820 break;
4821 }
4822 }
4823 }
4824 return change;
4825 }
4826
4827 /* Find basic blocks with more than one predecessor that only contain a
4828 single conditional jump. If the result of the comparison is known at
4829 compile-time from any incoming edge, redirect that edge to the
4830 appropriate target. Returns nonzero if a change was made.
4831
4832 This function is now mis-named, because we also handle indirect jumps. */
4833
4834 static int
4835 bypass_conditional_jumps (void)
4836 {
4837 basic_block bb;
4838 int changed;
4839 rtx setcc;
4840 rtx insn;
4841 rtx dest;
4842
4843 /* Note we start at block 1. */
4844 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4845 return 0;
4846
4847 bypass_last_basic_block = last_basic_block;
4848 mark_dfs_back_edges ();
4849
4850 changed = 0;
4851 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
4852 EXIT_BLOCK_PTR, next_bb)
4853 {
4854 /* Check for more than one predecessor. */
4855 if (bb->pred && bb->pred->pred_next)
4856 {
4857 setcc = NULL_RTX;
4858 for (insn = bb->head;
4859 insn != NULL && insn != NEXT_INSN (bb->end);
4860 insn = NEXT_INSN (insn))
4861 if (GET_CODE (insn) == INSN)
4862 {
4863 if (setcc)
4864 break;
4865 if (GET_CODE (PATTERN (insn)) != SET)
4866 break;
4867
4868 dest = SET_DEST (PATTERN (insn));
4869 if (REG_P (dest) || CC0_P (dest))
4870 setcc = insn;
4871 else
4872 break;
4873 }
4874 else if (GET_CODE (insn) == JUMP_INSN)
4875 {
4876 if ((any_condjump_p (insn) || computed_jump_p (insn))
4877 && onlyjump_p (insn))
4878 changed |= bypass_block (bb, setcc, insn);
4879 break;
4880 }
4881 else if (INSN_P (insn))
4882 break;
4883 }
4884 }
4885
4886 /* If we bypassed any register setting insns, we inserted a
4887 copy on the redirected edge. These need to be committed. */
4888 if (changed)
4889 commit_edge_insertions();
4890
4891 return changed;
4892 }
4893 \f
4894 /* Compute PRE+LCM working variables. */
4895
4896 /* Local properties of expressions. */
4897 /* Nonzero for expressions that are transparent in the block. */
4898 static sbitmap *transp;
4899
4900 /* Nonzero for expressions that are transparent at the end of the block.
4901 This is only zero for expressions killed by abnormal critical edge
4902 created by a calls. */
4903 static sbitmap *transpout;
4904
4905 /* Nonzero for expressions that are computed (available) in the block. */
4906 static sbitmap *comp;
4907
4908 /* Nonzero for expressions that are locally anticipatable in the block. */
4909 static sbitmap *antloc;
4910
4911 /* Nonzero for expressions where this block is an optimal computation
4912 point. */
4913 static sbitmap *pre_optimal;
4914
4915 /* Nonzero for expressions which are redundant in a particular block. */
4916 static sbitmap *pre_redundant;
4917
4918 /* Nonzero for expressions which should be inserted on a specific edge. */
4919 static sbitmap *pre_insert_map;
4920
4921 /* Nonzero for expressions which should be deleted in a specific block. */
4922 static sbitmap *pre_delete_map;
4923
4924 /* Contains the edge_list returned by pre_edge_lcm. */
4925 static struct edge_list *edge_list;
4926
4927 /* Redundant insns. */
4928 static sbitmap pre_redundant_insns;
4929
4930 /* Allocate vars used for PRE analysis. */
4931
4932 static void
4933 alloc_pre_mem (int n_blocks, int n_exprs)
4934 {
4935 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4936 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4937 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4938
4939 pre_optimal = NULL;
4940 pre_redundant = NULL;
4941 pre_insert_map = NULL;
4942 pre_delete_map = NULL;
4943 ae_in = NULL;
4944 ae_out = NULL;
4945 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4946
4947 /* pre_insert and pre_delete are allocated later. */
4948 }
4949
4950 /* Free vars used for PRE analysis. */
4951
4952 static void
4953 free_pre_mem (void)
4954 {
4955 sbitmap_vector_free (transp);
4956 sbitmap_vector_free (comp);
4957
4958 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4959
4960 if (pre_optimal)
4961 sbitmap_vector_free (pre_optimal);
4962 if (pre_redundant)
4963 sbitmap_vector_free (pre_redundant);
4964 if (pre_insert_map)
4965 sbitmap_vector_free (pre_insert_map);
4966 if (pre_delete_map)
4967 sbitmap_vector_free (pre_delete_map);
4968 if (ae_in)
4969 sbitmap_vector_free (ae_in);
4970 if (ae_out)
4971 sbitmap_vector_free (ae_out);
4972
4973 transp = comp = NULL;
4974 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4975 ae_in = ae_out = NULL;
4976 }
4977
4978 /* Top level routine to do the dataflow analysis needed by PRE. */
4979
4980 static void
4981 compute_pre_data (void)
4982 {
4983 sbitmap trapping_expr;
4984 basic_block bb;
4985 unsigned int ui;
4986
4987 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4988 sbitmap_vector_zero (ae_kill, last_basic_block);
4989
4990 /* Collect expressions which might trap. */
4991 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
4992 sbitmap_zero (trapping_expr);
4993 for (ui = 0; ui < expr_hash_table.size; ui++)
4994 {
4995 struct expr *e;
4996 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
4997 if (may_trap_p (e->expr))
4998 SET_BIT (trapping_expr, e->bitmap_index);
4999 }
5000
5001 /* Compute ae_kill for each basic block using:
5002
5003 ~(TRANSP | COMP)
5004
5005 This is significantly faster than compute_ae_kill. */
5006
5007 FOR_EACH_BB (bb)
5008 {
5009 edge e;
5010
5011 /* If the current block is the destination of an abnormal edge, we
5012 kill all trapping expressions because we won't be able to properly
5013 place the instruction on the edge. So make them neither
5014 anticipatable nor transparent. This is fairly conservative. */
5015 for (e = bb->pred; e ; e = e->pred_next)
5016 if (e->flags & EDGE_ABNORMAL)
5017 {
5018 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
5019 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
5020 break;
5021 }
5022
5023 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
5024 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
5025 }
5026
5027 edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
5028 ae_kill, &pre_insert_map, &pre_delete_map);
5029 sbitmap_vector_free (antloc);
5030 antloc = NULL;
5031 sbitmap_vector_free (ae_kill);
5032 ae_kill = NULL;
5033 sbitmap_free (trapping_expr);
5034 }
5035 \f
5036 /* PRE utilities */
5037
5038 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
5039 block BB.
5040
5041 VISITED is a pointer to a working buffer for tracking which BB's have
5042 been visited. It is NULL for the top-level call.
5043
5044 We treat reaching expressions that go through blocks containing the same
5045 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
5046 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
5047 2 as not reaching. The intent is to improve the probability of finding
5048 only one reaching expression and to reduce register lifetimes by picking
5049 the closest such expression. */
5050
5051 static int
5052 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
5053 {
5054 edge pred;
5055
5056 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5057 {
5058 basic_block pred_bb = pred->src;
5059
5060 if (pred->src == ENTRY_BLOCK_PTR
5061 /* Has predecessor has already been visited? */
5062 || visited[pred_bb->index])
5063 ;/* Nothing to do. */
5064
5065 /* Does this predecessor generate this expression? */
5066 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
5067 {
5068 /* Is this the occurrence we're looking for?
5069 Note that there's only one generating occurrence per block
5070 so we just need to check the block number. */
5071 if (occr_bb == pred_bb)
5072 return 1;
5073
5074 visited[pred_bb->index] = 1;
5075 }
5076 /* Ignore this predecessor if it kills the expression. */
5077 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
5078 visited[pred_bb->index] = 1;
5079
5080 /* Neither gen nor kill. */
5081 else
5082 {
5083 visited[pred_bb->index] = 1;
5084 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
5085 return 1;
5086 }
5087 }
5088
5089 /* All paths have been checked. */
5090 return 0;
5091 }
5092
5093 /* The wrapper for pre_expr_reaches_here_work that ensures that any
5094 memory allocated for that function is returned. */
5095
5096 static int
5097 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
5098 {
5099 int rval;
5100 char *visited = xcalloc (last_basic_block, 1);
5101
5102 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
5103
5104 free (visited);
5105 return rval;
5106 }
5107 \f
5108
5109 /* Given an expr, generate RTL which we can insert at the end of a BB,
5110 or on an edge. Set the block number of any insns generated to
5111 the value of BB. */
5112
5113 static rtx
5114 process_insert_insn (struct expr *expr)
5115 {
5116 rtx reg = expr->reaching_reg;
5117 rtx exp = copy_rtx (expr->expr);
5118 rtx pat;
5119
5120 start_sequence ();
5121
5122 /* If the expression is something that's an operand, like a constant,
5123 just copy it to a register. */
5124 if (general_operand (exp, GET_MODE (reg)))
5125 emit_move_insn (reg, exp);
5126
5127 /* Otherwise, make a new insn to compute this expression and make sure the
5128 insn will be recognized (this also adds any needed CLOBBERs). Copy the
5129 expression to make sure we don't have any sharing issues. */
5130 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
5131 abort ();
5132
5133 pat = get_insns ();
5134 end_sequence ();
5135
5136 return pat;
5137 }
5138
5139 /* Add EXPR to the end of basic block BB.
5140
5141 This is used by both the PRE and code hoisting.
5142
5143 For PRE, we want to verify that the expr is either transparent
5144 or locally anticipatable in the target block. This check makes
5145 no sense for code hoisting. */
5146
5147 static void
5148 insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
5149 {
5150 rtx insn = bb->end;
5151 rtx new_insn;
5152 rtx reg = expr->reaching_reg;
5153 int regno = REGNO (reg);
5154 rtx pat, pat_end;
5155
5156 pat = process_insert_insn (expr);
5157 if (pat == NULL_RTX || ! INSN_P (pat))
5158 abort ();
5159
5160 pat_end = pat;
5161 while (NEXT_INSN (pat_end) != NULL_RTX)
5162 pat_end = NEXT_INSN (pat_end);
5163
5164 /* If the last insn is a jump, insert EXPR in front [taking care to
5165 handle cc0, etc. properly]. Similarly we need to care trapping
5166 instructions in presence of non-call exceptions. */
5167
5168 if (GET_CODE (insn) == JUMP_INSN
5169 || (GET_CODE (insn) == INSN
5170 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
5171 {
5172 #ifdef HAVE_cc0
5173 rtx note;
5174 #endif
5175 /* It should always be the case that we can put these instructions
5176 anywhere in the basic block with performing PRE optimizations.
5177 Check this. */
5178 if (GET_CODE (insn) == INSN && pre
5179 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5180 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5181 abort ();
5182
5183 /* If this is a jump table, then we can't insert stuff here. Since
5184 we know the previous real insn must be the tablejump, we insert
5185 the new instruction just before the tablejump. */
5186 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
5187 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
5188 insn = prev_real_insn (insn);
5189
5190 #ifdef HAVE_cc0
5191 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
5192 if cc0 isn't set. */
5193 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
5194 if (note)
5195 insn = XEXP (note, 0);
5196 else
5197 {
5198 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
5199 if (maybe_cc0_setter
5200 && INSN_P (maybe_cc0_setter)
5201 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
5202 insn = maybe_cc0_setter;
5203 }
5204 #endif
5205 /* FIXME: What if something in cc0/jump uses value set in new insn? */
5206 new_insn = emit_insn_before (pat, insn);
5207 }
5208
5209 /* Likewise if the last insn is a call, as will happen in the presence
5210 of exception handling. */
5211 else if (GET_CODE (insn) == CALL_INSN
5212 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
5213 {
5214 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
5215 we search backward and place the instructions before the first
5216 parameter is loaded. Do this for everyone for consistency and a
5217 presumption that we'll get better code elsewhere as well.
5218
5219 It should always be the case that we can put these instructions
5220 anywhere in the basic block with performing PRE optimizations.
5221 Check this. */
5222
5223 if (pre
5224 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5225 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5226 abort ();
5227
5228 /* Since different machines initialize their parameter registers
5229 in different orders, assume nothing. Collect the set of all
5230 parameter registers. */
5231 insn = find_first_parameter_load (insn, bb->head);
5232
5233 /* If we found all the parameter loads, then we want to insert
5234 before the first parameter load.
5235
5236 If we did not find all the parameter loads, then we might have
5237 stopped on the head of the block, which could be a CODE_LABEL.
5238 If we inserted before the CODE_LABEL, then we would be putting
5239 the insn in the wrong basic block. In that case, put the insn
5240 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
5241 while (GET_CODE (insn) == CODE_LABEL
5242 || NOTE_INSN_BASIC_BLOCK_P (insn))
5243 insn = NEXT_INSN (insn);
5244
5245 new_insn = emit_insn_before (pat, insn);
5246 }
5247 else
5248 new_insn = emit_insn_after (pat, insn);
5249
5250 while (1)
5251 {
5252 if (INSN_P (pat))
5253 {
5254 add_label_notes (PATTERN (pat), new_insn);
5255 note_stores (PATTERN (pat), record_set_info, pat);
5256 }
5257 if (pat == pat_end)
5258 break;
5259 pat = NEXT_INSN (pat);
5260 }
5261
5262 gcse_create_count++;
5263
5264 if (gcse_file)
5265 {
5266 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
5267 bb->index, INSN_UID (new_insn));
5268 fprintf (gcse_file, "copying expression %d to reg %d\n",
5269 expr->bitmap_index, regno);
5270 }
5271 }
5272
5273 /* Insert partially redundant expressions on edges in the CFG to make
5274 the expressions fully redundant. */
5275
5276 static int
5277 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
5278 {
5279 int e, i, j, num_edges, set_size, did_insert = 0;
5280 sbitmap *inserted;
5281
5282 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
5283 if it reaches any of the deleted expressions. */
5284
5285 set_size = pre_insert_map[0]->size;
5286 num_edges = NUM_EDGES (edge_list);
5287 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
5288 sbitmap_vector_zero (inserted, num_edges);
5289
5290 for (e = 0; e < num_edges; e++)
5291 {
5292 int indx;
5293 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
5294
5295 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
5296 {
5297 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
5298
5299 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
5300 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
5301 {
5302 struct expr *expr = index_map[j];
5303 struct occr *occr;
5304
5305 /* Now look at each deleted occurrence of this expression. */
5306 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5307 {
5308 if (! occr->deleted_p)
5309 continue;
5310
5311 /* Insert this expression on this edge if if it would
5312 reach the deleted occurrence in BB. */
5313 if (!TEST_BIT (inserted[e], j))
5314 {
5315 rtx insn;
5316 edge eg = INDEX_EDGE (edge_list, e);
5317
5318 /* We can't insert anything on an abnormal and
5319 critical edge, so we insert the insn at the end of
5320 the previous block. There are several alternatives
5321 detailed in Morgans book P277 (sec 10.5) for
5322 handling this situation. This one is easiest for
5323 now. */
5324
5325 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
5326 insert_insn_end_bb (index_map[j], bb, 0);
5327 else
5328 {
5329 insn = process_insert_insn (index_map[j]);
5330 insert_insn_on_edge (insn, eg);
5331 }
5332
5333 if (gcse_file)
5334 {
5335 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
5336 bb->index,
5337 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
5338 fprintf (gcse_file, "copy expression %d\n",
5339 expr->bitmap_index);
5340 }
5341
5342 update_ld_motion_stores (expr);
5343 SET_BIT (inserted[e], j);
5344 did_insert = 1;
5345 gcse_create_count++;
5346 }
5347 }
5348 }
5349 }
5350 }
5351
5352 sbitmap_vector_free (inserted);
5353 return did_insert;
5354 }
5355
5356 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
5357 Given "old_reg <- expr" (INSN), instead of adding after it
5358 reaching_reg <- old_reg
5359 it's better to do the following:
5360 reaching_reg <- expr
5361 old_reg <- reaching_reg
5362 because this way copy propagation can discover additional PRE
5363 opportunuties. But if this fails, we try the old way. */
5364
5365 static void
5366 pre_insert_copy_insn (struct expr *expr, rtx insn)
5367 {
5368 rtx reg = expr->reaching_reg;
5369 int regno = REGNO (reg);
5370 int indx = expr->bitmap_index;
5371 rtx pat = PATTERN (insn);
5372 rtx set, new_insn;
5373 rtx old_reg;
5374 int i;
5375
5376 /* This block matches the logic in hash_scan_insn. */
5377 if (GET_CODE (pat) == SET)
5378 set = pat;
5379 else if (GET_CODE (pat) == PARALLEL)
5380 {
5381 /* Search through the parallel looking for the set whose
5382 source was the expression that we're interested in. */
5383 set = NULL_RTX;
5384 for (i = 0; i < XVECLEN (pat, 0); i++)
5385 {
5386 rtx x = XVECEXP (pat, 0, i);
5387 if (GET_CODE (x) == SET
5388 && expr_equiv_p (SET_SRC (x), expr->expr))
5389 {
5390 set = x;
5391 break;
5392 }
5393 }
5394 }
5395 else
5396 abort ();
5397
5398 old_reg = SET_DEST (set);
5399
5400 /* Check if we can modify the set destination in the original insn. */
5401 if (validate_change (insn, &SET_DEST (set), reg, 0))
5402 {
5403 new_insn = gen_move_insn (old_reg, reg);
5404 new_insn = emit_insn_after (new_insn, insn);
5405
5406 /* Keep register set table up to date. */
5407 replace_one_set (REGNO (old_reg), insn, new_insn);
5408 record_one_set (regno, insn);
5409 }
5410 else
5411 {
5412 new_insn = gen_move_insn (reg, old_reg);
5413 new_insn = emit_insn_after (new_insn, insn);
5414
5415 /* Keep register set table up to date. */
5416 record_one_set (regno, new_insn);
5417 }
5418
5419 gcse_create_count++;
5420
5421 if (gcse_file)
5422 fprintf (gcse_file,
5423 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
5424 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
5425 INSN_UID (insn), regno);
5426 update_ld_motion_stores (expr);
5427 }
5428
5429 /* Copy available expressions that reach the redundant expression
5430 to `reaching_reg'. */
5431
5432 static void
5433 pre_insert_copies (void)
5434 {
5435 unsigned int i;
5436 struct expr *expr;
5437 struct occr *occr;
5438 struct occr *avail;
5439
5440 /* For each available expression in the table, copy the result to
5441 `reaching_reg' if the expression reaches a deleted one.
5442
5443 ??? The current algorithm is rather brute force.
5444 Need to do some profiling. */
5445
5446 for (i = 0; i < expr_hash_table.size; i++)
5447 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5448 {
5449 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
5450 we don't want to insert a copy here because the expression may not
5451 really be redundant. So only insert an insn if the expression was
5452 deleted. This test also avoids further processing if the
5453 expression wasn't deleted anywhere. */
5454 if (expr->reaching_reg == NULL)
5455 continue;
5456
5457 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5458 {
5459 if (! occr->deleted_p)
5460 continue;
5461
5462 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
5463 {
5464 rtx insn = avail->insn;
5465
5466 /* No need to handle this one if handled already. */
5467 if (avail->copied_p)
5468 continue;
5469
5470 /* Don't handle this one if it's a redundant one. */
5471 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
5472 continue;
5473
5474 /* Or if the expression doesn't reach the deleted one. */
5475 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
5476 expr,
5477 BLOCK_FOR_INSN (occr->insn)))
5478 continue;
5479
5480 /* Copy the result of avail to reaching_reg. */
5481 pre_insert_copy_insn (expr, insn);
5482 avail->copied_p = 1;
5483 }
5484 }
5485 }
5486 }
5487
5488 /* Emit move from SRC to DEST noting the equivalence with expression computed
5489 in INSN. */
5490 static rtx
5491 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
5492 {
5493 rtx new;
5494 rtx set = single_set (insn), set2;
5495 rtx note;
5496 rtx eqv;
5497
5498 /* This should never fail since we're creating a reg->reg copy
5499 we've verified to be valid. */
5500
5501 new = emit_insn_after (gen_move_insn (dest, src), insn);
5502
5503 /* Note the equivalence for local CSE pass. */
5504 set2 = single_set (new);
5505 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
5506 return new;
5507 if ((note = find_reg_equal_equiv_note (insn)))
5508 eqv = XEXP (note, 0);
5509 else
5510 eqv = SET_SRC (set);
5511
5512 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
5513
5514 return new;
5515 }
5516
5517 /* Delete redundant computations.
5518 Deletion is done by changing the insn to copy the `reaching_reg' of
5519 the expression into the result of the SET. It is left to later passes
5520 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
5521
5522 Returns nonzero if a change is made. */
5523
5524 static int
5525 pre_delete (void)
5526 {
5527 unsigned int i;
5528 int changed;
5529 struct expr *expr;
5530 struct occr *occr;
5531
5532 changed = 0;
5533 for (i = 0; i < expr_hash_table.size; i++)
5534 for (expr = expr_hash_table.table[i];
5535 expr != NULL;
5536 expr = expr->next_same_hash)
5537 {
5538 int indx = expr->bitmap_index;
5539
5540 /* We only need to search antic_occr since we require
5541 ANTLOC != 0. */
5542
5543 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5544 {
5545 rtx insn = occr->insn;
5546 rtx set;
5547 basic_block bb = BLOCK_FOR_INSN (insn);
5548
5549 /* We only delete insns that have a single_set. */
5550 if (TEST_BIT (pre_delete_map[bb->index], indx)
5551 && (set = single_set (insn)) != 0)
5552 {
5553 /* Create a pseudo-reg to store the result of reaching
5554 expressions into. Get the mode for the new pseudo from
5555 the mode of the original destination pseudo. */
5556 if (expr->reaching_reg == NULL)
5557 expr->reaching_reg
5558 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5559
5560 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
5561 delete_insn (insn);
5562 occr->deleted_p = 1;
5563 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
5564 changed = 1;
5565 gcse_subst_count++;
5566
5567 if (gcse_file)
5568 {
5569 fprintf (gcse_file,
5570 "PRE: redundant insn %d (expression %d) in ",
5571 INSN_UID (insn), indx);
5572 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
5573 bb->index, REGNO (expr->reaching_reg));
5574 }
5575 }
5576 }
5577 }
5578
5579 return changed;
5580 }
5581
5582 /* Perform GCSE optimizations using PRE.
5583 This is called by one_pre_gcse_pass after all the dataflow analysis
5584 has been done.
5585
5586 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5587 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5588 Compiler Design and Implementation.
5589
5590 ??? A new pseudo reg is created to hold the reaching expression. The nice
5591 thing about the classical approach is that it would try to use an existing
5592 reg. If the register can't be adequately optimized [i.e. we introduce
5593 reload problems], one could add a pass here to propagate the new register
5594 through the block.
5595
5596 ??? We don't handle single sets in PARALLELs because we're [currently] not
5597 able to copy the rest of the parallel when we insert copies to create full
5598 redundancies from partial redundancies. However, there's no reason why we
5599 can't handle PARALLELs in the cases where there are no partial
5600 redundancies. */
5601
5602 static int
5603 pre_gcse (void)
5604 {
5605 unsigned int i;
5606 int did_insert, changed;
5607 struct expr **index_map;
5608 struct expr *expr;
5609
5610 /* Compute a mapping from expression number (`bitmap_index') to
5611 hash table entry. */
5612
5613 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
5614 for (i = 0; i < expr_hash_table.size; i++)
5615 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5616 index_map[expr->bitmap_index] = expr;
5617
5618 /* Reset bitmap used to track which insns are redundant. */
5619 pre_redundant_insns = sbitmap_alloc (max_cuid);
5620 sbitmap_zero (pre_redundant_insns);
5621
5622 /* Delete the redundant insns first so that
5623 - we know what register to use for the new insns and for the other
5624 ones with reaching expressions
5625 - we know which insns are redundant when we go to create copies */
5626
5627 changed = pre_delete ();
5628
5629 did_insert = pre_edge_insert (edge_list, index_map);
5630
5631 /* In other places with reaching expressions, copy the expression to the
5632 specially allocated pseudo-reg that reaches the redundant expr. */
5633 pre_insert_copies ();
5634 if (did_insert)
5635 {
5636 commit_edge_insertions ();
5637 changed = 1;
5638 }
5639
5640 free (index_map);
5641 sbitmap_free (pre_redundant_insns);
5642 return changed;
5643 }
5644
5645 /* Top level routine to perform one PRE GCSE pass.
5646
5647 Return nonzero if a change was made. */
5648
5649 static int
5650 one_pre_gcse_pass (int pass)
5651 {
5652 int changed = 0;
5653
5654 gcse_subst_count = 0;
5655 gcse_create_count = 0;
5656
5657 alloc_hash_table (max_cuid, &expr_hash_table, 0);
5658 add_noreturn_fake_exit_edges ();
5659 if (flag_gcse_lm)
5660 compute_ld_motion_mems ();
5661
5662 compute_hash_table (&expr_hash_table);
5663 trim_ld_motion_mems ();
5664 if (gcse_file)
5665 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
5666
5667 if (expr_hash_table.n_elems > 0)
5668 {
5669 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
5670 compute_pre_data ();
5671 changed |= pre_gcse ();
5672 free_edge_list (edge_list);
5673 free_pre_mem ();
5674 }
5675
5676 free_ldst_mems ();
5677 remove_fake_edges ();
5678 free_hash_table (&expr_hash_table);
5679
5680 if (gcse_file)
5681 {
5682 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5683 current_function_name, pass, bytes_used);
5684 fprintf (gcse_file, "%d substs, %d insns created\n",
5685 gcse_subst_count, gcse_create_count);
5686 }
5687
5688 return changed;
5689 }
5690 \f
5691 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5692 If notes are added to an insn which references a CODE_LABEL, the
5693 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5694 because the following loop optimization pass requires them. */
5695
5696 /* ??? This is very similar to the loop.c add_label_notes function. We
5697 could probably share code here. */
5698
5699 /* ??? If there was a jump optimization pass after gcse and before loop,
5700 then we would not need to do this here, because jump would add the
5701 necessary REG_LABEL notes. */
5702
5703 static void
5704 add_label_notes (rtx x, rtx insn)
5705 {
5706 enum rtx_code code = GET_CODE (x);
5707 int i, j;
5708 const char *fmt;
5709
5710 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5711 {
5712 /* This code used to ignore labels that referred to dispatch tables to
5713 avoid flow generating (slightly) worse code.
5714
5715 We no longer ignore such label references (see LABEL_REF handling in
5716 mark_jump_label for additional information). */
5717
5718 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5719 REG_NOTES (insn));
5720 if (LABEL_P (XEXP (x, 0)))
5721 LABEL_NUSES (XEXP (x, 0))++;
5722 return;
5723 }
5724
5725 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5726 {
5727 if (fmt[i] == 'e')
5728 add_label_notes (XEXP (x, i), insn);
5729 else if (fmt[i] == 'E')
5730 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5731 add_label_notes (XVECEXP (x, i, j), insn);
5732 }
5733 }
5734
5735 /* Compute transparent outgoing information for each block.
5736
5737 An expression is transparent to an edge unless it is killed by
5738 the edge itself. This can only happen with abnormal control flow,
5739 when the edge is traversed through a call. This happens with
5740 non-local labels and exceptions.
5741
5742 This would not be necessary if we split the edge. While this is
5743 normally impossible for abnormal critical edges, with some effort
5744 it should be possible with exception handling, since we still have
5745 control over which handler should be invoked. But due to increased
5746 EH table sizes, this may not be worthwhile. */
5747
5748 static void
5749 compute_transpout (void)
5750 {
5751 basic_block bb;
5752 unsigned int i;
5753 struct expr *expr;
5754
5755 sbitmap_vector_ones (transpout, last_basic_block);
5756
5757 FOR_EACH_BB (bb)
5758 {
5759 /* Note that flow inserted a nop a the end of basic blocks that
5760 end in call instructions for reasons other than abnormal
5761 control flow. */
5762 if (GET_CODE (bb->end) != CALL_INSN)
5763 continue;
5764
5765 for (i = 0; i < expr_hash_table.size; i++)
5766 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
5767 if (GET_CODE (expr->expr) == MEM)
5768 {
5769 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5770 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5771 continue;
5772
5773 /* ??? Optimally, we would use interprocedural alias
5774 analysis to determine if this mem is actually killed
5775 by this call. */
5776 RESET_BIT (transpout[bb->index], expr->bitmap_index);
5777 }
5778 }
5779 }
5780
5781 /* Removal of useless null pointer checks */
5782
5783 /* Called via note_stores. X is set by SETTER. If X is a register we must
5784 invalidate nonnull_local and set nonnull_killed. DATA is really a
5785 `null_pointer_info *'.
5786
5787 We ignore hard registers. */
5788
5789 static void
5790 invalidate_nonnull_info (rtx x, rtx setter ATTRIBUTE_UNUSED, void *data)
5791 {
5792 unsigned int regno;
5793 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5794
5795 while (GET_CODE (x) == SUBREG)
5796 x = SUBREG_REG (x);
5797
5798 /* Ignore anything that is not a register or is a hard register. */
5799 if (GET_CODE (x) != REG
5800 || REGNO (x) < npi->min_reg
5801 || REGNO (x) >= npi->max_reg)
5802 return;
5803
5804 regno = REGNO (x) - npi->min_reg;
5805
5806 RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
5807 SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
5808 }
5809
5810 /* Do null-pointer check elimination for the registers indicated in
5811 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5812 they are not our responsibility to free. */
5813
5814 static int
5815 delete_null_pointer_checks_1 (unsigned int *block_reg, sbitmap *nonnull_avin,
5816 sbitmap *nonnull_avout,
5817 struct null_pointer_info *npi)
5818 {
5819 basic_block bb, current_block;
5820 sbitmap *nonnull_local = npi->nonnull_local;
5821 sbitmap *nonnull_killed = npi->nonnull_killed;
5822 int something_changed = 0;
5823
5824 /* Compute local properties, nonnull and killed. A register will have
5825 the nonnull property if at the end of the current block its value is
5826 known to be nonnull. The killed property indicates that somewhere in
5827 the block any information we had about the register is killed.
5828
5829 Note that a register can have both properties in a single block. That
5830 indicates that it's killed, then later in the block a new value is
5831 computed. */
5832 sbitmap_vector_zero (nonnull_local, last_basic_block);
5833 sbitmap_vector_zero (nonnull_killed, last_basic_block);
5834
5835 FOR_EACH_BB (current_block)
5836 {
5837 rtx insn, stop_insn;
5838
5839 /* Set the current block for invalidate_nonnull_info. */
5840 npi->current_block = current_block;
5841
5842 /* Scan each insn in the basic block looking for memory references and
5843 register sets. */
5844 stop_insn = NEXT_INSN (current_block->end);
5845 for (insn = current_block->head;
5846 insn != stop_insn;
5847 insn = NEXT_INSN (insn))
5848 {
5849 rtx set;
5850 rtx reg;
5851
5852 /* Ignore anything that is not a normal insn. */
5853 if (! INSN_P (insn))
5854 continue;
5855
5856 /* Basically ignore anything that is not a simple SET. We do have
5857 to make sure to invalidate nonnull_local and set nonnull_killed
5858 for such insns though. */
5859 set = single_set (insn);
5860 if (!set)
5861 {
5862 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5863 continue;
5864 }
5865
5866 /* See if we've got a usable memory load. We handle it first
5867 in case it uses its address register as a dest (which kills
5868 the nonnull property). */
5869 if (GET_CODE (SET_SRC (set)) == MEM
5870 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5871 && REGNO (reg) >= npi->min_reg
5872 && REGNO (reg) < npi->max_reg)
5873 SET_BIT (nonnull_local[current_block->index],
5874 REGNO (reg) - npi->min_reg);
5875
5876 /* Now invalidate stuff clobbered by this insn. */
5877 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5878
5879 /* And handle stores, we do these last since any sets in INSN can
5880 not kill the nonnull property if it is derived from a MEM
5881 appearing in a SET_DEST. */
5882 if (GET_CODE (SET_DEST (set)) == MEM
5883 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5884 && REGNO (reg) >= npi->min_reg
5885 && REGNO (reg) < npi->max_reg)
5886 SET_BIT (nonnull_local[current_block->index],
5887 REGNO (reg) - npi->min_reg);
5888 }
5889 }
5890
5891 /* Now compute global properties based on the local properties. This
5892 is a classic global availability algorithm. */
5893 compute_available (nonnull_local, nonnull_killed,
5894 nonnull_avout, nonnull_avin);
5895
5896 /* Now look at each bb and see if it ends with a compare of a value
5897 against zero. */
5898 FOR_EACH_BB (bb)
5899 {
5900 rtx last_insn = bb->end;
5901 rtx condition, earliest;
5902 int compare_and_branch;
5903
5904 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5905 since BLOCK_REG[BB] is zero if this block did not end with a
5906 comparison against zero, this condition works. */
5907 if (block_reg[bb->index] < npi->min_reg
5908 || block_reg[bb->index] >= npi->max_reg)
5909 continue;
5910
5911 /* LAST_INSN is a conditional jump. Get its condition. */
5912 condition = get_condition (last_insn, &earliest, false);
5913
5914 /* If we can't determine the condition then skip. */
5915 if (! condition)
5916 continue;
5917
5918 /* Is the register known to have a nonzero value? */
5919 if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
5920 continue;
5921
5922 /* Try to compute whether the compare/branch at the loop end is one or
5923 two instructions. */
5924 if (earliest == last_insn)
5925 compare_and_branch = 1;
5926 else if (earliest == prev_nonnote_insn (last_insn))
5927 compare_and_branch = 2;
5928 else
5929 continue;
5930
5931 /* We know the register in this comparison is nonnull at exit from
5932 this block. We can optimize this comparison. */
5933 if (GET_CODE (condition) == NE)
5934 {
5935 rtx new_jump;
5936
5937 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
5938 last_insn);
5939 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5940 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5941 emit_barrier_after (new_jump);
5942 }
5943
5944 something_changed = 1;
5945 delete_insn (last_insn);
5946 if (compare_and_branch == 2)
5947 delete_insn (earliest);
5948 purge_dead_edges (bb);
5949
5950 /* Don't check this block again. (Note that BLOCK_END is
5951 invalid here; we deleted the last instruction in the
5952 block.) */
5953 block_reg[bb->index] = 0;
5954 }
5955
5956 return something_changed;
5957 }
5958
5959 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5960 at compile time.
5961
5962 This is conceptually similar to global constant/copy propagation and
5963 classic global CSE (it even uses the same dataflow equations as cprop).
5964
5965 If a register is used as memory address with the form (mem (reg)), then we
5966 know that REG can not be zero at that point in the program. Any instruction
5967 which sets REG "kills" this property.
5968
5969 So, if every path leading to a conditional branch has an available memory
5970 reference of that form, then we know the register can not have the value
5971 zero at the conditional branch.
5972
5973 So we merely need to compute the local properties and propagate that data
5974 around the cfg, then optimize where possible.
5975
5976 We run this pass two times. Once before CSE, then again after CSE. This
5977 has proven to be the most profitable approach. It is rare for new
5978 optimization opportunities of this nature to appear after the first CSE
5979 pass.
5980
5981 This could probably be integrated with global cprop with a little work. */
5982
5983 int
5984 delete_null_pointer_checks (rtx f ATTRIBUTE_UNUSED)
5985 {
5986 sbitmap *nonnull_avin, *nonnull_avout;
5987 unsigned int *block_reg;
5988 basic_block bb;
5989 int reg;
5990 int regs_per_pass;
5991 int max_reg = max_reg_num ();
5992 struct null_pointer_info npi;
5993 int something_changed = 0;
5994
5995 /* If we have only a single block, or it is too expensive, give up. */
5996 if (n_basic_blocks <= 1
5997 || is_too_expensive (_ ("NULL pointer checks disabled")))
5998 return 0;
5999
6000 /* We need four bitmaps, each with a bit for each register in each
6001 basic block. */
6002 regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
6003
6004 /* Allocate bitmaps to hold local and global properties. */
6005 npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6006 npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6007 nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6008 nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6009
6010 /* Go through the basic blocks, seeing whether or not each block
6011 ends with a conditional branch whose condition is a comparison
6012 against zero. Record the register compared in BLOCK_REG. */
6013 block_reg = xcalloc (last_basic_block, sizeof (int));
6014 FOR_EACH_BB (bb)
6015 {
6016 rtx last_insn = bb->end;
6017 rtx condition, earliest, reg;
6018
6019 /* We only want conditional branches. */
6020 if (GET_CODE (last_insn) != JUMP_INSN
6021 || !any_condjump_p (last_insn)
6022 || !onlyjump_p (last_insn))
6023 continue;
6024
6025 /* LAST_INSN is a conditional jump. Get its condition. */
6026 condition = get_condition (last_insn, &earliest, false);
6027
6028 /* If we were unable to get the condition, or it is not an equality
6029 comparison against zero then there's nothing we can do. */
6030 if (!condition
6031 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
6032 || GET_CODE (XEXP (condition, 1)) != CONST_INT
6033 || (XEXP (condition, 1)
6034 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
6035 continue;
6036
6037 /* We must be checking a register against zero. */
6038 reg = XEXP (condition, 0);
6039 if (GET_CODE (reg) != REG)
6040 continue;
6041
6042 block_reg[bb->index] = REGNO (reg);
6043 }
6044
6045 /* Go through the algorithm for each block of registers. */
6046 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
6047 {
6048 npi.min_reg = reg;
6049 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
6050 something_changed |= delete_null_pointer_checks_1 (block_reg,
6051 nonnull_avin,
6052 nonnull_avout,
6053 &npi);
6054 }
6055
6056 /* Free the table of registers compared at the end of every block. */
6057 free (block_reg);
6058
6059 /* Free bitmaps. */
6060 sbitmap_vector_free (npi.nonnull_local);
6061 sbitmap_vector_free (npi.nonnull_killed);
6062 sbitmap_vector_free (nonnull_avin);
6063 sbitmap_vector_free (nonnull_avout);
6064
6065 return something_changed;
6066 }
6067
6068 /* Code Hoisting variables and subroutines. */
6069
6070 /* Very busy expressions. */
6071 static sbitmap *hoist_vbein;
6072 static sbitmap *hoist_vbeout;
6073
6074 /* Hoistable expressions. */
6075 static sbitmap *hoist_exprs;
6076
6077 /* Dominator bitmaps. */
6078 dominance_info dominators;
6079
6080 /* ??? We could compute post dominators and run this algorithm in
6081 reverse to perform tail merging, doing so would probably be
6082 more effective than the tail merging code in jump.c.
6083
6084 It's unclear if tail merging could be run in parallel with
6085 code hoisting. It would be nice. */
6086
6087 /* Allocate vars used for code hoisting analysis. */
6088
6089 static void
6090 alloc_code_hoist_mem (int n_blocks, int n_exprs)
6091 {
6092 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
6093 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
6094 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
6095
6096 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
6097 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
6098 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
6099 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
6100 }
6101
6102 /* Free vars used for code hoisting analysis. */
6103
6104 static void
6105 free_code_hoist_mem (void)
6106 {
6107 sbitmap_vector_free (antloc);
6108 sbitmap_vector_free (transp);
6109 sbitmap_vector_free (comp);
6110
6111 sbitmap_vector_free (hoist_vbein);
6112 sbitmap_vector_free (hoist_vbeout);
6113 sbitmap_vector_free (hoist_exprs);
6114 sbitmap_vector_free (transpout);
6115
6116 free_dominance_info (dominators);
6117 }
6118
6119 /* Compute the very busy expressions at entry/exit from each block.
6120
6121 An expression is very busy if all paths from a given point
6122 compute the expression. */
6123
6124 static void
6125 compute_code_hoist_vbeinout (void)
6126 {
6127 int changed, passes;
6128 basic_block bb;
6129
6130 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
6131 sbitmap_vector_zero (hoist_vbein, last_basic_block);
6132
6133 passes = 0;
6134 changed = 1;
6135
6136 while (changed)
6137 {
6138 changed = 0;
6139
6140 /* We scan the blocks in the reverse order to speed up
6141 the convergence. */
6142 FOR_EACH_BB_REVERSE (bb)
6143 {
6144 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
6145 hoist_vbeout[bb->index], transp[bb->index]);
6146 if (bb->next_bb != EXIT_BLOCK_PTR)
6147 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
6148 }
6149
6150 passes++;
6151 }
6152
6153 if (gcse_file)
6154 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
6155 }
6156
6157 /* Top level routine to do the dataflow analysis needed by code hoisting. */
6158
6159 static void
6160 compute_code_hoist_data (void)
6161 {
6162 compute_local_properties (transp, comp, antloc, &expr_hash_table);
6163 compute_transpout ();
6164 compute_code_hoist_vbeinout ();
6165 dominators = calculate_dominance_info (CDI_DOMINATORS);
6166 if (gcse_file)
6167 fprintf (gcse_file, "\n");
6168 }
6169
6170 /* Determine if the expression identified by EXPR_INDEX would
6171 reach BB unimpared if it was placed at the end of EXPR_BB.
6172
6173 It's unclear exactly what Muchnick meant by "unimpared". It seems
6174 to me that the expression must either be computed or transparent in
6175 *every* block in the path(s) from EXPR_BB to BB. Any other definition
6176 would allow the expression to be hoisted out of loops, even if
6177 the expression wasn't a loop invariant.
6178
6179 Contrast this to reachability for PRE where an expression is
6180 considered reachable if *any* path reaches instead of *all*
6181 paths. */
6182
6183 static int
6184 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
6185 {
6186 edge pred;
6187 int visited_allocated_locally = 0;
6188
6189
6190 if (visited == NULL)
6191 {
6192 visited_allocated_locally = 1;
6193 visited = xcalloc (last_basic_block, 1);
6194 }
6195
6196 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
6197 {
6198 basic_block pred_bb = pred->src;
6199
6200 if (pred->src == ENTRY_BLOCK_PTR)
6201 break;
6202 else if (pred_bb == expr_bb)
6203 continue;
6204 else if (visited[pred_bb->index])
6205 continue;
6206
6207 /* Does this predecessor generate this expression? */
6208 else if (TEST_BIT (comp[pred_bb->index], expr_index))
6209 break;
6210 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
6211 break;
6212
6213 /* Not killed. */
6214 else
6215 {
6216 visited[pred_bb->index] = 1;
6217 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
6218 pred_bb, visited))
6219 break;
6220 }
6221 }
6222 if (visited_allocated_locally)
6223 free (visited);
6224
6225 return (pred == NULL);
6226 }
6227 \f
6228 /* Actually perform code hoisting. */
6229
6230 static void
6231 hoist_code (void)
6232 {
6233 basic_block bb, dominated;
6234 basic_block *domby;
6235 unsigned int domby_len;
6236 unsigned int i,j;
6237 struct expr **index_map;
6238 struct expr *expr;
6239
6240 sbitmap_vector_zero (hoist_exprs, last_basic_block);
6241
6242 /* Compute a mapping from expression number (`bitmap_index') to
6243 hash table entry. */
6244
6245 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
6246 for (i = 0; i < expr_hash_table.size; i++)
6247 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
6248 index_map[expr->bitmap_index] = expr;
6249
6250 /* Walk over each basic block looking for potentially hoistable
6251 expressions, nothing gets hoisted from the entry block. */
6252 FOR_EACH_BB (bb)
6253 {
6254 int found = 0;
6255 int insn_inserted_p;
6256
6257 domby_len = get_dominated_by (dominators, bb, &domby);
6258 /* Examine each expression that is very busy at the exit of this
6259 block. These are the potentially hoistable expressions. */
6260 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
6261 {
6262 int hoistable = 0;
6263
6264 if (TEST_BIT (hoist_vbeout[bb->index], i)
6265 && TEST_BIT (transpout[bb->index], i))
6266 {
6267 /* We've found a potentially hoistable expression, now
6268 we look at every block BB dominates to see if it
6269 computes the expression. */
6270 for (j = 0; j < domby_len; j++)
6271 {
6272 dominated = domby[j];
6273 /* Ignore self dominance. */
6274 if (bb == dominated)
6275 continue;
6276 /* We've found a dominated block, now see if it computes
6277 the busy expression and whether or not moving that
6278 expression to the "beginning" of that block is safe. */
6279 if (!TEST_BIT (antloc[dominated->index], i))
6280 continue;
6281
6282 /* Note if the expression would reach the dominated block
6283 unimpared if it was placed at the end of BB.
6284
6285 Keep track of how many times this expression is hoistable
6286 from a dominated block into BB. */
6287 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6288 hoistable++;
6289 }
6290
6291 /* If we found more than one hoistable occurrence of this
6292 expression, then note it in the bitmap of expressions to
6293 hoist. It makes no sense to hoist things which are computed
6294 in only one BB, and doing so tends to pessimize register
6295 allocation. One could increase this value to try harder
6296 to avoid any possible code expansion due to register
6297 allocation issues; however experiments have shown that
6298 the vast majority of hoistable expressions are only movable
6299 from two successors, so raising this threshold is likely
6300 to nullify any benefit we get from code hoisting. */
6301 if (hoistable > 1)
6302 {
6303 SET_BIT (hoist_exprs[bb->index], i);
6304 found = 1;
6305 }
6306 }
6307 }
6308 /* If we found nothing to hoist, then quit now. */
6309 if (! found)
6310 {
6311 free (domby);
6312 continue;
6313 }
6314
6315 /* Loop over all the hoistable expressions. */
6316 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
6317 {
6318 /* We want to insert the expression into BB only once, so
6319 note when we've inserted it. */
6320 insn_inserted_p = 0;
6321
6322 /* These tests should be the same as the tests above. */
6323 if (TEST_BIT (hoist_vbeout[bb->index], i))
6324 {
6325 /* We've found a potentially hoistable expression, now
6326 we look at every block BB dominates to see if it
6327 computes the expression. */
6328 for (j = 0; j < domby_len; j++)
6329 {
6330 dominated = domby[j];
6331 /* Ignore self dominance. */
6332 if (bb == dominated)
6333 continue;
6334
6335 /* We've found a dominated block, now see if it computes
6336 the busy expression and whether or not moving that
6337 expression to the "beginning" of that block is safe. */
6338 if (!TEST_BIT (antloc[dominated->index], i))
6339 continue;
6340
6341 /* The expression is computed in the dominated block and
6342 it would be safe to compute it at the start of the
6343 dominated block. Now we have to determine if the
6344 expression would reach the dominated block if it was
6345 placed at the end of BB. */
6346 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6347 {
6348 struct expr *expr = index_map[i];
6349 struct occr *occr = expr->antic_occr;
6350 rtx insn;
6351 rtx set;
6352
6353 /* Find the right occurrence of this expression. */
6354 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
6355 occr = occr->next;
6356
6357 /* Should never happen. */
6358 if (!occr)
6359 abort ();
6360
6361 insn = occr->insn;
6362
6363 set = single_set (insn);
6364 if (! set)
6365 abort ();
6366
6367 /* Create a pseudo-reg to store the result of reaching
6368 expressions into. Get the mode for the new pseudo
6369 from the mode of the original destination pseudo. */
6370 if (expr->reaching_reg == NULL)
6371 expr->reaching_reg
6372 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
6373
6374 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
6375 delete_insn (insn);
6376 occr->deleted_p = 1;
6377 if (!insn_inserted_p)
6378 {
6379 insert_insn_end_bb (index_map[i], bb, 0);
6380 insn_inserted_p = 1;
6381 }
6382 }
6383 }
6384 }
6385 }
6386 free (domby);
6387 }
6388
6389 free (index_map);
6390 }
6391
6392 /* Top level routine to perform one code hoisting (aka unification) pass
6393
6394 Return nonzero if a change was made. */
6395
6396 static int
6397 one_code_hoisting_pass (void)
6398 {
6399 int changed = 0;
6400
6401 alloc_hash_table (max_cuid, &expr_hash_table, 0);
6402 compute_hash_table (&expr_hash_table);
6403 if (gcse_file)
6404 dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
6405
6406 if (expr_hash_table.n_elems > 0)
6407 {
6408 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
6409 compute_code_hoist_data ();
6410 hoist_code ();
6411 free_code_hoist_mem ();
6412 }
6413
6414 free_hash_table (&expr_hash_table);
6415
6416 return changed;
6417 }
6418 \f
6419 /* Here we provide the things required to do store motion towards
6420 the exit. In order for this to be effective, gcse also needed to
6421 be taught how to move a load when it is kill only by a store to itself.
6422
6423 int i;
6424 float a[10];
6425
6426 void foo(float scale)
6427 {
6428 for (i=0; i<10; i++)
6429 a[i] *= scale;
6430 }
6431
6432 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
6433 the load out since its live around the loop, and stored at the bottom
6434 of the loop.
6435
6436 The 'Load Motion' referred to and implemented in this file is
6437 an enhancement to gcse which when using edge based lcm, recognizes
6438 this situation and allows gcse to move the load out of the loop.
6439
6440 Once gcse has hoisted the load, store motion can then push this
6441 load towards the exit, and we end up with no loads or stores of 'i'
6442 in the loop. */
6443
6444 /* This will search the ldst list for a matching expression. If it
6445 doesn't find one, we create one and initialize it. */
6446
6447 static struct ls_expr *
6448 ldst_entry (rtx x)
6449 {
6450 struct ls_expr * ptr;
6451
6452 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6453 if (expr_equiv_p (ptr->pattern, x))
6454 break;
6455
6456 if (!ptr)
6457 {
6458 ptr = xmalloc (sizeof (struct ls_expr));
6459
6460 ptr->next = pre_ldst_mems;
6461 ptr->expr = NULL;
6462 ptr->pattern = x;
6463 ptr->pattern_regs = NULL_RTX;
6464 ptr->loads = NULL_RTX;
6465 ptr->stores = NULL_RTX;
6466 ptr->reaching_reg = NULL_RTX;
6467 ptr->invalid = 0;
6468 ptr->index = 0;
6469 ptr->hash_index = 0;
6470 pre_ldst_mems = ptr;
6471 }
6472
6473 return ptr;
6474 }
6475
6476 /* Free up an individual ldst entry. */
6477
6478 static void
6479 free_ldst_entry (struct ls_expr * ptr)
6480 {
6481 free_INSN_LIST_list (& ptr->loads);
6482 free_INSN_LIST_list (& ptr->stores);
6483
6484 free (ptr);
6485 }
6486
6487 /* Free up all memory associated with the ldst list. */
6488
6489 static void
6490 free_ldst_mems (void)
6491 {
6492 while (pre_ldst_mems)
6493 {
6494 struct ls_expr * tmp = pre_ldst_mems;
6495
6496 pre_ldst_mems = pre_ldst_mems->next;
6497
6498 free_ldst_entry (tmp);
6499 }
6500
6501 pre_ldst_mems = NULL;
6502 }
6503
6504 /* Dump debugging info about the ldst list. */
6505
6506 static void
6507 print_ldst_list (FILE * file)
6508 {
6509 struct ls_expr * ptr;
6510
6511 fprintf (file, "LDST list: \n");
6512
6513 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6514 {
6515 fprintf (file, " Pattern (%3d): ", ptr->index);
6516
6517 print_rtl (file, ptr->pattern);
6518
6519 fprintf (file, "\n Loads : ");
6520
6521 if (ptr->loads)
6522 print_rtl (file, ptr->loads);
6523 else
6524 fprintf (file, "(nil)");
6525
6526 fprintf (file, "\n Stores : ");
6527
6528 if (ptr->stores)
6529 print_rtl (file, ptr->stores);
6530 else
6531 fprintf (file, "(nil)");
6532
6533 fprintf (file, "\n\n");
6534 }
6535
6536 fprintf (file, "\n");
6537 }
6538
6539 /* Returns 1 if X is in the list of ldst only expressions. */
6540
6541 static struct ls_expr *
6542 find_rtx_in_ldst (rtx x)
6543 {
6544 struct ls_expr * ptr;
6545
6546 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6547 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6548 return ptr;
6549
6550 return NULL;
6551 }
6552
6553 /* Assign each element of the list of mems a monotonically increasing value. */
6554
6555 static int
6556 enumerate_ldsts (void)
6557 {
6558 struct ls_expr * ptr;
6559 int n = 0;
6560
6561 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6562 ptr->index = n++;
6563
6564 return n;
6565 }
6566
6567 /* Return first item in the list. */
6568
6569 static inline struct ls_expr *
6570 first_ls_expr (void)
6571 {
6572 return pre_ldst_mems;
6573 }
6574
6575 /* Return the next item in the list after the specified one. */
6576
6577 static inline struct ls_expr *
6578 next_ls_expr (struct ls_expr * ptr)
6579 {
6580 return ptr->next;
6581 }
6582 \f
6583 /* Load Motion for loads which only kill themselves. */
6584
6585 /* Return true if x is a simple MEM operation, with no registers or
6586 side effects. These are the types of loads we consider for the
6587 ld_motion list, otherwise we let the usual aliasing take care of it. */
6588
6589 static int
6590 simple_mem (rtx x)
6591 {
6592 if (GET_CODE (x) != MEM)
6593 return 0;
6594
6595 if (MEM_VOLATILE_P (x))
6596 return 0;
6597
6598 if (GET_MODE (x) == BLKmode)
6599 return 0;
6600
6601 /* If we are handling exceptions, we must be careful with memory references
6602 that may trap. If we are not, the behavior is undefined, so we may just
6603 continue. */
6604 if (flag_non_call_exceptions && may_trap_p (x))
6605 return 0;
6606
6607 if (side_effects_p (x))
6608 return 0;
6609
6610 /* Do not consider function arguments passed on stack. */
6611 if (reg_mentioned_p (stack_pointer_rtx, x))
6612 return 0;
6613
6614 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
6615 return 0;
6616
6617 return 1;
6618 }
6619
6620 /* Make sure there isn't a buried reference in this pattern anywhere.
6621 If there is, invalidate the entry for it since we're not capable
6622 of fixing it up just yet.. We have to be sure we know about ALL
6623 loads since the aliasing code will allow all entries in the
6624 ld_motion list to not-alias itself. If we miss a load, we will get
6625 the wrong value since gcse might common it and we won't know to
6626 fix it up. */
6627
6628 static void
6629 invalidate_any_buried_refs (rtx x)
6630 {
6631 const char * fmt;
6632 int i, j;
6633 struct ls_expr * ptr;
6634
6635 /* Invalidate it in the list. */
6636 if (GET_CODE (x) == MEM && simple_mem (x))
6637 {
6638 ptr = ldst_entry (x);
6639 ptr->invalid = 1;
6640 }
6641
6642 /* Recursively process the insn. */
6643 fmt = GET_RTX_FORMAT (GET_CODE (x));
6644
6645 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6646 {
6647 if (fmt[i] == 'e')
6648 invalidate_any_buried_refs (XEXP (x, i));
6649 else if (fmt[i] == 'E')
6650 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6651 invalidate_any_buried_refs (XVECEXP (x, i, j));
6652 }
6653 }
6654
6655 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6656 being defined as MEM loads and stores to symbols, with no side effects
6657 and no registers in the expression. For a MEM destination, we also
6658 check that the insn is still valid if we replace the destination with a
6659 REG, as is done in update_ld_motion_stores. If there are any uses/defs
6660 which don't match this criteria, they are invalidated and trimmed out
6661 later. */
6662
6663 static void
6664 compute_ld_motion_mems (void)
6665 {
6666 struct ls_expr * ptr;
6667 basic_block bb;
6668 rtx insn;
6669
6670 pre_ldst_mems = NULL;
6671
6672 FOR_EACH_BB (bb)
6673 {
6674 for (insn = bb->head;
6675 insn && insn != NEXT_INSN (bb->end);
6676 insn = NEXT_INSN (insn))
6677 {
6678 if (INSN_P (insn))
6679 {
6680 if (GET_CODE (PATTERN (insn)) == SET)
6681 {
6682 rtx src = SET_SRC (PATTERN (insn));
6683 rtx dest = SET_DEST (PATTERN (insn));
6684
6685 /* Check for a simple LOAD... */
6686 if (GET_CODE (src) == MEM && simple_mem (src))
6687 {
6688 ptr = ldst_entry (src);
6689 if (GET_CODE (dest) == REG)
6690 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6691 else
6692 ptr->invalid = 1;
6693 }
6694 else
6695 {
6696 /* Make sure there isn't a buried load somewhere. */
6697 invalidate_any_buried_refs (src);
6698 }
6699
6700 /* Check for stores. Don't worry about aliased ones, they
6701 will block any movement we might do later. We only care
6702 about this exact pattern since those are the only
6703 circumstance that we will ignore the aliasing info. */
6704 if (GET_CODE (dest) == MEM && simple_mem (dest))
6705 {
6706 ptr = ldst_entry (dest);
6707
6708 if (GET_CODE (src) != MEM
6709 && GET_CODE (src) != ASM_OPERANDS
6710 /* Check for REG manually since want_to_gcse_p
6711 returns 0 for all REGs. */
6712 && (REG_P (src) || want_to_gcse_p (src)))
6713 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6714 else
6715 ptr->invalid = 1;
6716 }
6717 }
6718 else
6719 invalidate_any_buried_refs (PATTERN (insn));
6720 }
6721 }
6722 }
6723 }
6724
6725 /* Remove any references that have been either invalidated or are not in the
6726 expression list for pre gcse. */
6727
6728 static void
6729 trim_ld_motion_mems (void)
6730 {
6731 struct ls_expr * last = NULL;
6732 struct ls_expr * ptr = first_ls_expr ();
6733
6734 while (ptr != NULL)
6735 {
6736 int del = ptr->invalid;
6737 struct expr * expr = NULL;
6738
6739 /* Delete if entry has been made invalid. */
6740 if (!del)
6741 {
6742 unsigned int i;
6743
6744 del = 1;
6745 /* Delete if we cannot find this mem in the expression list. */
6746 for (i = 0; i < expr_hash_table.size && del; i++)
6747 {
6748 for (expr = expr_hash_table.table[i];
6749 expr != NULL;
6750 expr = expr->next_same_hash)
6751 if (expr_equiv_p (expr->expr, ptr->pattern))
6752 {
6753 del = 0;
6754 break;
6755 }
6756 }
6757 }
6758
6759 if (del)
6760 {
6761 if (last != NULL)
6762 {
6763 last->next = ptr->next;
6764 free_ldst_entry (ptr);
6765 ptr = last->next;
6766 }
6767 else
6768 {
6769 pre_ldst_mems = pre_ldst_mems->next;
6770 free_ldst_entry (ptr);
6771 ptr = pre_ldst_mems;
6772 }
6773 }
6774 else
6775 {
6776 /* Set the expression field if we are keeping it. */
6777 last = ptr;
6778 ptr->expr = expr;
6779 ptr = ptr->next;
6780 }
6781 }
6782
6783 /* Show the world what we've found. */
6784 if (gcse_file && pre_ldst_mems != NULL)
6785 print_ldst_list (gcse_file);
6786 }
6787
6788 /* This routine will take an expression which we are replacing with
6789 a reaching register, and update any stores that are needed if
6790 that expression is in the ld_motion list. Stores are updated by
6791 copying their SRC to the reaching register, and then storing
6792 the reaching register into the store location. These keeps the
6793 correct value in the reaching register for the loads. */
6794
6795 static void
6796 update_ld_motion_stores (struct expr * expr)
6797 {
6798 struct ls_expr * mem_ptr;
6799
6800 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6801 {
6802 /* We can try to find just the REACHED stores, but is shouldn't
6803 matter to set the reaching reg everywhere... some might be
6804 dead and should be eliminated later. */
6805
6806 /* We replace (set mem expr) with (set reg expr) (set mem reg)
6807 where reg is the reaching reg used in the load. We checked in
6808 compute_ld_motion_mems that we can replace (set mem expr) with
6809 (set reg expr) in that insn. */
6810 rtx list = mem_ptr->stores;
6811
6812 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6813 {
6814 rtx insn = XEXP (list, 0);
6815 rtx pat = PATTERN (insn);
6816 rtx src = SET_SRC (pat);
6817 rtx reg = expr->reaching_reg;
6818 rtx copy, new;
6819
6820 /* If we've already copied it, continue. */
6821 if (expr->reaching_reg == src)
6822 continue;
6823
6824 if (gcse_file)
6825 {
6826 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6827 print_rtl (gcse_file, expr->reaching_reg);
6828 fprintf (gcse_file, ":\n ");
6829 print_inline_rtx (gcse_file, insn, 8);
6830 fprintf (gcse_file, "\n");
6831 }
6832
6833 copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
6834 new = emit_insn_before (copy, insn);
6835 record_one_set (REGNO (reg), new);
6836 SET_SRC (pat) = reg;
6837
6838 /* un-recognize this pattern since it's probably different now. */
6839 INSN_CODE (insn) = -1;
6840 gcse_create_count++;
6841 }
6842 }
6843 }
6844 \f
6845 /* Store motion code. */
6846
6847 #define ANTIC_STORE_LIST(x) ((x)->loads)
6848 #define AVAIL_STORE_LIST(x) ((x)->stores)
6849 #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg)
6850
6851 /* This is used to communicate the target bitvector we want to use in the
6852 reg_set_info routine when called via the note_stores mechanism. */
6853 static int * regvec;
6854
6855 /* And current insn, for the same routine. */
6856 static rtx compute_store_table_current_insn;
6857
6858 /* Used in computing the reverse edge graph bit vectors. */
6859 static sbitmap * st_antloc;
6860
6861 /* Global holding the number of store expressions we are dealing with. */
6862 static int num_stores;
6863
6864 /* Checks to set if we need to mark a register set. Called from note_stores. */
6865
6866 static void
6867 reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED,
6868 void *data ATTRIBUTE_UNUSED)
6869 {
6870 if (GET_CODE (dest) == SUBREG)
6871 dest = SUBREG_REG (dest);
6872
6873 if (GET_CODE (dest) == REG)
6874 regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
6875 }
6876
6877 /* Return zero if some of the registers in list X are killed
6878 due to set of registers in bitmap REGS_SET. */
6879
6880 static bool
6881 store_ops_ok (rtx x, int *regs_set)
6882 {
6883 rtx reg;
6884
6885 for (; x; x = XEXP (x, 1))
6886 {
6887 reg = XEXP (x, 0);
6888 if (regs_set[REGNO(reg)])
6889 return false;
6890 }
6891
6892 return true;
6893 }
6894
6895 /* Returns a list of registers mentioned in X. */
6896 static rtx
6897 extract_mentioned_regs (rtx x)
6898 {
6899 return extract_mentioned_regs_helper (x, NULL_RTX);
6900 }
6901
6902 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
6903 registers. */
6904 static rtx
6905 extract_mentioned_regs_helper (rtx x, rtx accum)
6906 {
6907 int i;
6908 enum rtx_code code;
6909 const char * fmt;
6910
6911 /* Repeat is used to turn tail-recursion into iteration. */
6912 repeat:
6913
6914 if (x == 0)
6915 return accum;
6916
6917 code = GET_CODE (x);
6918 switch (code)
6919 {
6920 case REG:
6921 return alloc_EXPR_LIST (0, x, accum);
6922
6923 case MEM:
6924 x = XEXP (x, 0);
6925 goto repeat;
6926
6927 case PRE_DEC:
6928 case PRE_INC:
6929 case POST_DEC:
6930 case POST_INC:
6931 /* We do not run this function with arguments having side effects. */
6932 abort ();
6933
6934 case PC:
6935 case CC0: /*FIXME*/
6936 case CONST:
6937 case CONST_INT:
6938 case CONST_DOUBLE:
6939 case CONST_VECTOR:
6940 case SYMBOL_REF:
6941 case LABEL_REF:
6942 case ADDR_VEC:
6943 case ADDR_DIFF_VEC:
6944 return accum;
6945
6946 default:
6947 break;
6948 }
6949
6950 i = GET_RTX_LENGTH (code) - 1;
6951 fmt = GET_RTX_FORMAT (code);
6952
6953 for (; i >= 0; i--)
6954 {
6955 if (fmt[i] == 'e')
6956 {
6957 rtx tem = XEXP (x, i);
6958
6959 /* If we are about to do the last recursive call
6960 needed at this level, change it into iteration. */
6961 if (i == 0)
6962 {
6963 x = tem;
6964 goto repeat;
6965 }
6966
6967 accum = extract_mentioned_regs_helper (tem, accum);
6968 }
6969 else if (fmt[i] == 'E')
6970 {
6971 int j;
6972
6973 for (j = 0; j < XVECLEN (x, i); j++)
6974 accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
6975 }
6976 }
6977
6978 return accum;
6979 }
6980
6981 /* Determine whether INSN is MEM store pattern that we will consider moving.
6982 REGS_SET_BEFORE is bitmap of registers set before (and including) the
6983 current insn, REGS_SET_AFTER is bitmap of registers set after (and
6984 including) the insn in this basic block. We must be passing through BB from
6985 head to end, as we are using this fact to speed things up.
6986
6987 The results are stored this way:
6988
6989 -- the first anticipatable expression is added into ANTIC_STORE_LIST
6990 -- if the processed expression is not anticipatable, NULL_RTX is added
6991 there instead, so that we can use it as indicator that no further
6992 expression of this type may be anticipatable
6993 -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
6994 consequently, all of them but this head are dead and may be deleted.
6995 -- if the expression is not available, the insn due to that it fails to be
6996 available is stored in reaching_reg.
6997
6998 The things are complicated a bit by fact that there already may be stores
6999 to the same MEM from other blocks; also caller must take care of the
7000 necessary cleanup of the temporary markers after end of the basic block.
7001 */
7002
7003 static void
7004 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
7005 {
7006 struct ls_expr * ptr;
7007 rtx dest, set, tmp;
7008 int check_anticipatable, check_available;
7009 basic_block bb = BLOCK_FOR_INSN (insn);
7010
7011 set = single_set (insn);
7012 if (!set)
7013 return;
7014
7015 dest = SET_DEST (set);
7016
7017 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
7018 || GET_MODE (dest) == BLKmode)
7019 return;
7020
7021 if (side_effects_p (dest))
7022 return;
7023
7024 /* If we are handling exceptions, we must be careful with memory references
7025 that may trap. If we are not, the behavior is undefined, so we may just
7026 continue. */
7027 if (flag_non_call_exceptions && may_trap_p (dest))
7028 return;
7029
7030 ptr = ldst_entry (dest);
7031 if (!ptr->pattern_regs)
7032 ptr->pattern_regs = extract_mentioned_regs (dest);
7033
7034 /* Do not check for anticipatability if we either found one anticipatable
7035 store already, or tested for one and found out that it was killed. */
7036 check_anticipatable = 0;
7037 if (!ANTIC_STORE_LIST (ptr))
7038 check_anticipatable = 1;
7039 else
7040 {
7041 tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
7042 if (tmp != NULL_RTX
7043 && BLOCK_FOR_INSN (tmp) != bb)
7044 check_anticipatable = 1;
7045 }
7046 if (check_anticipatable)
7047 {
7048 if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
7049 tmp = NULL_RTX;
7050 else
7051 tmp = insn;
7052 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
7053 ANTIC_STORE_LIST (ptr));
7054 }
7055
7056 /* It is not necessary to check whether store is available if we did
7057 it successfully before; if we failed before, do not bother to check
7058 until we reach the insn that caused us to fail. */
7059 check_available = 0;
7060 if (!AVAIL_STORE_LIST (ptr))
7061 check_available = 1;
7062 else
7063 {
7064 tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
7065 if (BLOCK_FOR_INSN (tmp) != bb)
7066 check_available = 1;
7067 }
7068 if (check_available)
7069 {
7070 /* Check that we have already reached the insn at that the check
7071 failed last time. */
7072 if (LAST_AVAIL_CHECK_FAILURE (ptr))
7073 {
7074 for (tmp = bb->end;
7075 tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
7076 tmp = PREV_INSN (tmp))
7077 continue;
7078 if (tmp == insn)
7079 check_available = 0;
7080 }
7081 else
7082 check_available = store_killed_after (dest, ptr->pattern_regs, insn,
7083 bb, regs_set_after,
7084 &LAST_AVAIL_CHECK_FAILURE (ptr));
7085 }
7086 if (!check_available)
7087 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
7088 }
7089
7090 /* Find available and anticipatable stores. */
7091
7092 static int
7093 compute_store_table (void)
7094 {
7095 int ret;
7096 basic_block bb;
7097 unsigned regno;
7098 rtx insn, pat, tmp;
7099 int *last_set_in, *already_set;
7100 struct ls_expr * ptr, **prev_next_ptr_ptr;
7101
7102 max_gcse_regno = max_reg_num ();
7103
7104 reg_set_in_block = sbitmap_vector_alloc (last_basic_block,
7105 max_gcse_regno);
7106 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
7107 pre_ldst_mems = 0;
7108 last_set_in = xmalloc (sizeof (int) * max_gcse_regno);
7109 already_set = xmalloc (sizeof (int) * max_gcse_regno);
7110
7111 /* Find all the stores we care about. */
7112 FOR_EACH_BB (bb)
7113 {
7114 /* First compute the registers set in this block. */
7115 memset (last_set_in, 0, sizeof (int) * max_gcse_regno);
7116 regvec = last_set_in;
7117
7118 for (insn = bb->head;
7119 insn != NEXT_INSN (bb->end);
7120 insn = NEXT_INSN (insn))
7121 {
7122 if (! INSN_P (insn))
7123 continue;
7124
7125 if (GET_CODE (insn) == CALL_INSN)
7126 {
7127 bool clobbers_all = false;
7128 #ifdef NON_SAVING_SETJMP
7129 if (NON_SAVING_SETJMP
7130 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7131 clobbers_all = true;
7132 #endif
7133
7134 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7135 if (clobbers_all
7136 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7137 last_set_in[regno] = INSN_UID (insn);
7138 }
7139
7140 pat = PATTERN (insn);
7141 compute_store_table_current_insn = insn;
7142 note_stores (pat, reg_set_info, NULL);
7143 }
7144
7145 /* Record the set registers. */
7146 for (regno = 0; regno < max_gcse_regno; regno++)
7147 if (last_set_in[regno])
7148 SET_BIT (reg_set_in_block[bb->index], regno);
7149
7150 /* Now find the stores. */
7151 memset (already_set, 0, sizeof (int) * max_gcse_regno);
7152 regvec = already_set;
7153 for (insn = bb->head;
7154 insn != NEXT_INSN (bb->end);
7155 insn = NEXT_INSN (insn))
7156 {
7157 if (! INSN_P (insn))
7158 continue;
7159
7160 if (GET_CODE (insn) == CALL_INSN)
7161 {
7162 bool clobbers_all = false;
7163 #ifdef NON_SAVING_SETJMP
7164 if (NON_SAVING_SETJMP
7165 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7166 clobbers_all = true;
7167 #endif
7168
7169 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7170 if (clobbers_all
7171 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7172 already_set[regno] = 1;
7173 }
7174
7175 pat = PATTERN (insn);
7176 note_stores (pat, reg_set_info, NULL);
7177
7178 /* Now that we've marked regs, look for stores. */
7179 find_moveable_store (insn, already_set, last_set_in);
7180
7181 /* Unmark regs that are no longer set. */
7182 for (regno = 0; regno < max_gcse_regno; regno++)
7183 if (last_set_in[regno] == INSN_UID (insn))
7184 last_set_in[regno] = 0;
7185 }
7186
7187 /* Clear temporary marks. */
7188 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7189 {
7190 LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
7191 if (ANTIC_STORE_LIST (ptr)
7192 && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
7193 ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
7194 }
7195 }
7196
7197 /* Remove the stores that are not available anywhere, as there will
7198 be no opportunity to optimize them. */
7199 for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
7200 ptr != NULL;
7201 ptr = *prev_next_ptr_ptr)
7202 {
7203 if (!AVAIL_STORE_LIST (ptr))
7204 {
7205 *prev_next_ptr_ptr = ptr->next;
7206 free_ldst_entry (ptr);
7207 }
7208 else
7209 prev_next_ptr_ptr = &ptr->next;
7210 }
7211
7212 ret = enumerate_ldsts ();
7213
7214 if (gcse_file)
7215 {
7216 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
7217 print_ldst_list (gcse_file);
7218 }
7219
7220 free (last_set_in);
7221 free (already_set);
7222 return ret;
7223 }
7224
7225 /* Check to see if the load X is aliased with STORE_PATTERN.
7226 AFTER is true if we are checking the case when STORE_PATTERN occurs
7227 after the X. */
7228
7229 static bool
7230 load_kills_store (rtx x, rtx store_pattern, int after)
7231 {
7232 if (after)
7233 return anti_dependence (x, store_pattern);
7234 else
7235 return true_dependence (store_pattern, GET_MODE (store_pattern), x,
7236 rtx_addr_varies_p);
7237 }
7238
7239 /* Go through the entire insn X, looking for any loads which might alias
7240 STORE_PATTERN. Return true if found.
7241 AFTER is true if we are checking the case when STORE_PATTERN occurs
7242 after the insn X. */
7243
7244 static bool
7245 find_loads (rtx x, rtx store_pattern, int after)
7246 {
7247 const char * fmt;
7248 int i, j;
7249 int ret = false;
7250
7251 if (!x)
7252 return false;
7253
7254 if (GET_CODE (x) == SET)
7255 x = SET_SRC (x);
7256
7257 if (GET_CODE (x) == MEM)
7258 {
7259 if (load_kills_store (x, store_pattern, after))
7260 return true;
7261 }
7262
7263 /* Recursively process the insn. */
7264 fmt = GET_RTX_FORMAT (GET_CODE (x));
7265
7266 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
7267 {
7268 if (fmt[i] == 'e')
7269 ret |= find_loads (XEXP (x, i), store_pattern, after);
7270 else if (fmt[i] == 'E')
7271 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7272 ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
7273 }
7274 return ret;
7275 }
7276
7277 /* Check if INSN kills the store pattern X (is aliased with it).
7278 AFTER is true if we are checking the case when store X occurs
7279 after the insn. Return true if it it does. */
7280
7281 static bool
7282 store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
7283 {
7284 rtx reg, base, note;
7285
7286 if (!INSN_P (insn))
7287 return false;
7288
7289 if (GET_CODE (insn) == CALL_INSN)
7290 {
7291 /* A normal or pure call might read from pattern,
7292 but a const call will not. */
7293 if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
7294 return true;
7295
7296 /* But even a const call reads its parameters. Check whether the
7297 base of some of registers used in mem is stack pointer. */
7298 for (reg = x_regs; reg; reg = XEXP (reg, 1))
7299 {
7300 base = find_base_term (XEXP (reg, 0));
7301 if (!base
7302 || (GET_CODE (base) == ADDRESS
7303 && GET_MODE (base) == Pmode
7304 && XEXP (base, 0) == stack_pointer_rtx))
7305 return true;
7306 }
7307
7308 return false;
7309 }
7310
7311 if (GET_CODE (PATTERN (insn)) == SET)
7312 {
7313 rtx pat = PATTERN (insn);
7314 rtx dest = SET_DEST (pat);
7315
7316 if (GET_CODE (dest) == SIGN_EXTRACT
7317 || GET_CODE (dest) == ZERO_EXTRACT)
7318 dest = XEXP (dest, 0);
7319
7320 /* Check for memory stores to aliased objects. */
7321 if (GET_CODE (dest) == MEM
7322 && !expr_equiv_p (dest, x))
7323 {
7324 if (after)
7325 {
7326 if (output_dependence (dest, x))
7327 return true;
7328 }
7329 else
7330 {
7331 if (output_dependence (x, dest))
7332 return true;
7333 }
7334 }
7335 if (find_loads (SET_SRC (pat), x, after))
7336 return true;
7337 }
7338 else if (find_loads (PATTERN (insn), x, after))
7339 return true;
7340
7341 /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
7342 location aliased with X, then this insn kills X. */
7343 note = find_reg_equal_equiv_note (insn);
7344 if (! note)
7345 return false;
7346 note = XEXP (note, 0);
7347
7348 /* However, if the note represents a must alias rather than a may
7349 alias relationship, then it does not kill X. */
7350 if (expr_equiv_p (note, x))
7351 return false;
7352
7353 /* See if there are any aliased loads in the note. */
7354 return find_loads (note, x, after);
7355 }
7356
7357 /* Returns true if the expression X is loaded or clobbered on or after INSN
7358 within basic block BB. REGS_SET_AFTER is bitmap of registers set in
7359 or after the insn. X_REGS is list of registers mentioned in X. If the store
7360 is killed, return the last insn in that it occurs in FAIL_INSN. */
7361
7362 static bool
7363 store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb,
7364 int *regs_set_after, rtx *fail_insn)
7365 {
7366 rtx last = bb->end, act;
7367
7368 if (!store_ops_ok (x_regs, regs_set_after))
7369 {
7370 /* We do not know where it will happen. */
7371 if (fail_insn)
7372 *fail_insn = NULL_RTX;
7373 return true;
7374 }
7375
7376 /* Scan from the end, so that fail_insn is determined correctly. */
7377 for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
7378 if (store_killed_in_insn (x, x_regs, act, false))
7379 {
7380 if (fail_insn)
7381 *fail_insn = act;
7382 return true;
7383 }
7384
7385 return false;
7386 }
7387
7388 /* Returns true if the expression X is loaded or clobbered on or before INSN
7389 within basic block BB. X_REGS is list of registers mentioned in X.
7390 REGS_SET_BEFORE is bitmap of registers set before or in this insn. */
7391 static bool
7392 store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb,
7393 int *regs_set_before)
7394 {
7395 rtx first = bb->head;
7396
7397 if (!store_ops_ok (x_regs, regs_set_before))
7398 return true;
7399
7400 for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
7401 if (store_killed_in_insn (x, x_regs, insn, true))
7402 return true;
7403
7404 return false;
7405 }
7406
7407 /* Fill in available, anticipatable, transparent and kill vectors in
7408 STORE_DATA, based on lists of available and anticipatable stores. */
7409 static void
7410 build_store_vectors (void)
7411 {
7412 basic_block bb;
7413 int *regs_set_in_block;
7414 rtx insn, st;
7415 struct ls_expr * ptr;
7416 unsigned regno;
7417
7418 /* Build the gen_vector. This is any store in the table which is not killed
7419 by aliasing later in its block. */
7420 ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
7421 sbitmap_vector_zero (ae_gen, last_basic_block);
7422
7423 st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
7424 sbitmap_vector_zero (st_antloc, last_basic_block);
7425
7426 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7427 {
7428 for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
7429 {
7430 insn = XEXP (st, 0);
7431 bb = BLOCK_FOR_INSN (insn);
7432
7433 /* If we've already seen an available expression in this block,
7434 we can delete this one (It occurs earlier in the block). We'll
7435 copy the SRC expression to an unused register in case there
7436 are any side effects. */
7437 if (TEST_BIT (ae_gen[bb->index], ptr->index))
7438 {
7439 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
7440 if (gcse_file)
7441 fprintf (gcse_file, "Removing redundant store:\n");
7442 replace_store_insn (r, XEXP (st, 0), bb, ptr);
7443 continue;
7444 }
7445 SET_BIT (ae_gen[bb->index], ptr->index);
7446 }
7447
7448 for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
7449 {
7450 insn = XEXP (st, 0);
7451 bb = BLOCK_FOR_INSN (insn);
7452 SET_BIT (st_antloc[bb->index], ptr->index);
7453 }
7454 }
7455
7456 ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
7457 sbitmap_vector_zero (ae_kill, last_basic_block);
7458
7459 transp = sbitmap_vector_alloc (last_basic_block, num_stores);
7460 sbitmap_vector_zero (transp, last_basic_block);
7461 regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno);
7462
7463 FOR_EACH_BB (bb)
7464 {
7465 for (regno = 0; regno < max_gcse_regno; regno++)
7466 regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno);
7467
7468 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7469 {
7470 if (store_killed_after (ptr->pattern, ptr->pattern_regs, bb->head,
7471 bb, regs_set_in_block, NULL))
7472 {
7473 /* It should not be necessary to consider the expression
7474 killed if it is both anticipatable and available. */
7475 if (!TEST_BIT (st_antloc[bb->index], ptr->index)
7476 || !TEST_BIT (ae_gen[bb->index], ptr->index))
7477 SET_BIT (ae_kill[bb->index], ptr->index);
7478 }
7479 else
7480 SET_BIT (transp[bb->index], ptr->index);
7481 }
7482 }
7483
7484 free (regs_set_in_block);
7485
7486 if (gcse_file)
7487 {
7488 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
7489 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
7490 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
7491 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
7492 }
7493 }
7494
7495 /* Insert an instruction at the beginning of a basic block, and update
7496 the BLOCK_HEAD if needed. */
7497
7498 static void
7499 insert_insn_start_bb (rtx insn, basic_block bb)
7500 {
7501 /* Insert at start of successor block. */
7502 rtx prev = PREV_INSN (bb->head);
7503 rtx before = bb->head;
7504 while (before != 0)
7505 {
7506 if (GET_CODE (before) != CODE_LABEL
7507 && (GET_CODE (before) != NOTE
7508 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
7509 break;
7510 prev = before;
7511 if (prev == bb->end)
7512 break;
7513 before = NEXT_INSN (before);
7514 }
7515
7516 insn = emit_insn_after (insn, prev);
7517
7518 if (gcse_file)
7519 {
7520 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
7521 bb->index);
7522 print_inline_rtx (gcse_file, insn, 6);
7523 fprintf (gcse_file, "\n");
7524 }
7525 }
7526
7527 /* This routine will insert a store on an edge. EXPR is the ldst entry for
7528 the memory reference, and E is the edge to insert it on. Returns nonzero
7529 if an edge insertion was performed. */
7530
7531 static int
7532 insert_store (struct ls_expr * expr, edge e)
7533 {
7534 rtx reg, insn;
7535 basic_block bb;
7536 edge tmp;
7537
7538 /* We did all the deleted before this insert, so if we didn't delete a
7539 store, then we haven't set the reaching reg yet either. */
7540 if (expr->reaching_reg == NULL_RTX)
7541 return 0;
7542
7543 if (e->flags & EDGE_FAKE)
7544 return 0;
7545
7546 reg = expr->reaching_reg;
7547 insn = gen_move_insn (copy_rtx (expr->pattern), reg);
7548
7549 /* If we are inserting this expression on ALL predecessor edges of a BB,
7550 insert it at the start of the BB, and reset the insert bits on the other
7551 edges so we don't try to insert it on the other edges. */
7552 bb = e->dest;
7553 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7554 if (!(tmp->flags & EDGE_FAKE))
7555 {
7556 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7557 if (index == EDGE_INDEX_NO_EDGE)
7558 abort ();
7559 if (! TEST_BIT (pre_insert_map[index], expr->index))
7560 break;
7561 }
7562
7563 /* If tmp is NULL, we found an insertion on every edge, blank the
7564 insertion vector for these edges, and insert at the start of the BB. */
7565 if (!tmp && bb != EXIT_BLOCK_PTR)
7566 {
7567 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7568 {
7569 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7570 RESET_BIT (pre_insert_map[index], expr->index);
7571 }
7572 insert_insn_start_bb (insn, bb);
7573 return 0;
7574 }
7575
7576 /* We can't insert on this edge, so we'll insert at the head of the
7577 successors block. See Morgan, sec 10.5. */
7578 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
7579 {
7580 insert_insn_start_bb (insn, bb);
7581 return 0;
7582 }
7583
7584 insert_insn_on_edge (insn, e);
7585
7586 if (gcse_file)
7587 {
7588 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
7589 e->src->index, e->dest->index);
7590 print_inline_rtx (gcse_file, insn, 6);
7591 fprintf (gcse_file, "\n");
7592 }
7593
7594 return 1;
7595 }
7596
7597 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
7598 memory location in SMEXPR set in basic block BB.
7599
7600 This could be rather expensive. */
7601
7602 static void
7603 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
7604 {
7605 edge *stack = xmalloc (sizeof (edge) * n_basic_blocks), act;
7606 sbitmap visited = sbitmap_alloc (last_basic_block);
7607 int stack_top = 0;
7608 rtx last, insn, note;
7609 rtx mem = smexpr->pattern;
7610
7611 sbitmap_zero (visited);
7612 act = bb->succ;
7613
7614 while (1)
7615 {
7616 if (!act)
7617 {
7618 if (!stack_top)
7619 {
7620 free (stack);
7621 sbitmap_free (visited);
7622 return;
7623 }
7624 act = stack[--stack_top];
7625 }
7626 bb = act->dest;
7627
7628 if (bb == EXIT_BLOCK_PTR
7629 || TEST_BIT (visited, bb->index)
7630 || TEST_BIT (ae_kill[bb->index], smexpr->index))
7631 {
7632 act = act->succ_next;
7633 continue;
7634 }
7635 SET_BIT (visited, bb->index);
7636
7637 if (TEST_BIT (st_antloc[bb->index], smexpr->index))
7638 {
7639 for (last = ANTIC_STORE_LIST (smexpr);
7640 BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
7641 last = XEXP (last, 1))
7642 continue;
7643 last = XEXP (last, 0);
7644 }
7645 else
7646 last = NEXT_INSN (bb->end);
7647
7648 for (insn = bb->head; insn != last; insn = NEXT_INSN (insn))
7649 if (INSN_P (insn))
7650 {
7651 note = find_reg_equal_equiv_note (insn);
7652 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
7653 continue;
7654
7655 if (gcse_file)
7656 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
7657 INSN_UID (insn));
7658 remove_note (insn, note);
7659 }
7660 act = act->succ_next;
7661 if (bb->succ)
7662 {
7663 if (act)
7664 stack[stack_top++] = act;
7665 act = bb->succ;
7666 }
7667 }
7668 }
7669
7670 /* This routine will replace a store with a SET to a specified register. */
7671
7672 static void
7673 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
7674 {
7675 rtx insn, mem, note, set, ptr;
7676
7677 mem = smexpr->pattern;
7678 insn = gen_move_insn (reg, SET_SRC (single_set (del)));
7679 insn = emit_insn_after (insn, del);
7680
7681 if (gcse_file)
7682 {
7683 fprintf (gcse_file,
7684 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
7685 print_inline_rtx (gcse_file, del, 6);
7686 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
7687 print_inline_rtx (gcse_file, insn, 6);
7688 fprintf (gcse_file, "\n");
7689 }
7690
7691 for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
7692 if (XEXP (ptr, 0) == del)
7693 {
7694 XEXP (ptr, 0) = insn;
7695 break;
7696 }
7697 delete_insn (del);
7698
7699 /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
7700 they are no longer accurate provided that they are reached by this
7701 definition, so drop them. */
7702 for (; insn != NEXT_INSN (bb->end); insn = NEXT_INSN (insn))
7703 if (INSN_P (insn))
7704 {
7705 set = single_set (insn);
7706 if (!set)
7707 continue;
7708 if (expr_equiv_p (SET_DEST (set), mem))
7709 return;
7710 note = find_reg_equal_equiv_note (insn);
7711 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
7712 continue;
7713
7714 if (gcse_file)
7715 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
7716 INSN_UID (insn));
7717 remove_note (insn, note);
7718 }
7719 remove_reachable_equiv_notes (bb, smexpr);
7720 }
7721
7722
7723 /* Delete a store, but copy the value that would have been stored into
7724 the reaching_reg for later storing. */
7725
7726 static void
7727 delete_store (struct ls_expr * expr, basic_block bb)
7728 {
7729 rtx reg, i, del;
7730
7731 if (expr->reaching_reg == NULL_RTX)
7732 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7733
7734 reg = expr->reaching_reg;
7735
7736 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7737 {
7738 del = XEXP (i, 0);
7739 if (BLOCK_FOR_INSN (del) == bb)
7740 {
7741 /* We know there is only one since we deleted redundant
7742 ones during the available computation. */
7743 replace_store_insn (reg, del, bb, expr);
7744 break;
7745 }
7746 }
7747 }
7748
7749 /* Free memory used by store motion. */
7750
7751 static void
7752 free_store_memory (void)
7753 {
7754 free_ldst_mems ();
7755
7756 if (ae_gen)
7757 sbitmap_vector_free (ae_gen);
7758 if (ae_kill)
7759 sbitmap_vector_free (ae_kill);
7760 if (transp)
7761 sbitmap_vector_free (transp);
7762 if (st_antloc)
7763 sbitmap_vector_free (st_antloc);
7764 if (pre_insert_map)
7765 sbitmap_vector_free (pre_insert_map);
7766 if (pre_delete_map)
7767 sbitmap_vector_free (pre_delete_map);
7768 if (reg_set_in_block)
7769 sbitmap_vector_free (reg_set_in_block);
7770
7771 ae_gen = ae_kill = transp = st_antloc = NULL;
7772 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7773 }
7774
7775 /* Perform store motion. Much like gcse, except we move expressions the
7776 other way by looking at the flowgraph in reverse. */
7777
7778 static void
7779 store_motion (void)
7780 {
7781 basic_block bb;
7782 int x;
7783 struct ls_expr * ptr;
7784 int update_flow = 0;
7785
7786 if (gcse_file)
7787 {
7788 fprintf (gcse_file, "before store motion\n");
7789 print_rtl (gcse_file, get_insns ());
7790 }
7791
7792 init_alias_analysis ();
7793
7794 /* Find all the available and anticipatable stores. */
7795 num_stores = compute_store_table ();
7796 if (num_stores == 0)
7797 {
7798 sbitmap_vector_free (reg_set_in_block);
7799 end_alias_analysis ();
7800 return;
7801 }
7802
7803 /* Now compute kill & transp vectors. */
7804 build_store_vectors ();
7805 add_noreturn_fake_exit_edges ();
7806 connect_infinite_loops_to_exit ();
7807
7808 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7809 st_antloc, ae_kill, &pre_insert_map,
7810 &pre_delete_map);
7811
7812 /* Now we want to insert the new stores which are going to be needed. */
7813 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7814 {
7815 FOR_EACH_BB (bb)
7816 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
7817 delete_store (ptr, bb);
7818
7819 for (x = 0; x < NUM_EDGES (edge_list); x++)
7820 if (TEST_BIT (pre_insert_map[x], ptr->index))
7821 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7822 }
7823
7824 if (update_flow)
7825 commit_edge_insertions ();
7826
7827 free_store_memory ();
7828 free_edge_list (edge_list);
7829 remove_fake_edges ();
7830 end_alias_analysis ();
7831 }
7832
7833 \f
7834 /* Entry point for jump bypassing optimization pass. */
7835
7836 int
7837 bypass_jumps (FILE *file)
7838 {
7839 int changed;
7840
7841 /* We do not construct an accurate cfg in functions which call
7842 setjmp, so just punt to be safe. */
7843 if (current_function_calls_setjmp)
7844 return 0;
7845
7846 /* For calling dump_foo fns from gdb. */
7847 debug_stderr = stderr;
7848 gcse_file = file;
7849
7850 /* Identify the basic block information for this function, including
7851 successors and predecessors. */
7852 max_gcse_regno = max_reg_num ();
7853
7854 if (file)
7855 dump_flow_info (file);
7856
7857 /* Return if there's nothing to do, or it is too expensive */
7858 if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled")))
7859 return 0;
7860
7861 gcc_obstack_init (&gcse_obstack);
7862 bytes_used = 0;
7863
7864 /* We need alias. */
7865 init_alias_analysis ();
7866
7867 /* Record where pseudo-registers are set. This data is kept accurate
7868 during each pass. ??? We could also record hard-reg information here
7869 [since it's unchanging], however it is currently done during hash table
7870 computation.
7871
7872 It may be tempting to compute MEM set information here too, but MEM sets
7873 will be subject to code motion one day and thus we need to compute
7874 information about memory sets when we build the hash tables. */
7875
7876 alloc_reg_set_mem (max_gcse_regno);
7877 compute_sets (get_insns ());
7878
7879 max_gcse_regno = max_reg_num ();
7880 alloc_gcse_mem (get_insns ());
7881 changed = one_cprop_pass (1, 1, 1);
7882 free_gcse_mem ();
7883
7884 if (file)
7885 {
7886 fprintf (file, "BYPASS of %s: %d basic blocks, ",
7887 current_function_name, n_basic_blocks);
7888 fprintf (file, "%d bytes\n\n", bytes_used);
7889 }
7890
7891 obstack_free (&gcse_obstack, NULL);
7892 free_reg_set_mem ();
7893
7894 /* We are finished with alias. */
7895 end_alias_analysis ();
7896 allocate_reg_info (max_reg_num (), FALSE, FALSE);
7897
7898 return changed;
7899 }
7900
7901 /* Return true if the graph is too expensive to optimize. PASS is the
7902 optimization about to be performed. */
7903
7904 static bool
7905 is_too_expensive (const char *pass)
7906 {
7907 /* Trying to perform global optimizations on flow graphs which have
7908 a high connectivity will take a long time and is unlikely to be
7909 particularly useful.
7910
7911 In normal circumstances a cfg should have about twice as many
7912 edges as blocks. But we do not want to punish small functions
7913 which have a couple switch statements. Rather than simply
7914 threshold the number of blocks, uses something with a more
7915 graceful degradation. */
7916 if (n_edges > 20000 + n_basic_blocks * 4)
7917 {
7918 if (warn_disabled_optimization)
7919 warning ("%s: %d basic blocks and %d edges/basic block",
7920 pass, n_basic_blocks, n_edges / n_basic_blocks);
7921
7922 return true;
7923 }
7924
7925 /* If allocating memory for the cprop bitmap would take up too much
7926 storage it's better just to disable the optimization. */
7927 if ((n_basic_blocks
7928 * SBITMAP_SET_SIZE (max_reg_num ())
7929 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
7930 {
7931 if (warn_disabled_optimization)
7932 warning ("%s: %d basic blocks and %d registers",
7933 pass, n_basic_blocks, max_reg_num ());
7934
7935 return true;
7936 }
7937
7938 return false;
7939 }
7940
7941 #include "gt-gcse.h"