fold-const.c (maybe_lvalue_p): Return false for M(IN|AX)_EXPR.
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* TODO
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
30
31 */
32
33 /* References searched while implementing this.
34
35 Compilers Principles, Techniques and Tools
36 Aho, Sethi, Ullman
37 Addison-Wesley, 1988
38
39 Global Optimization by Suppression of Partial Redundancies
40 E. Morel, C. Renvoise
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
42
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
44 Frederick Chow
45 Stanford Ph.D. thesis, Dec. 1983
46
47 A Fast Algorithm for Code Movement Optimization
48 D.M. Dhamdhere
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
50
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
55
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
58 D.M. Dhamdhere
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
60
61 Efficiently Computing Static Single Assignment Form and the Control
62 Dependence Graph
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
65
66 Lazy Code Motion
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
69
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
72 Thomas Ball
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
75
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
80
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
84
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
88
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
100
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
104
105 Global code motion / global value numbering
106 C. Click
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Value Driven Redundancy Elimination
110 L.T. Simpson
111 Rice University Ph.D. thesis, Apr. 1996
112
113 Value Numbering
114 L.T. Simpson
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
116
117 High Performance Compilers for Parallel Computing
118 Michael Wolfe
119 Addison-Wesley, 1996
120
121 Advanced Compiler Design and Implementation
122 Steven Muchnick
123 Morgan Kaufmann, 1997
124
125 Building an Optimizing Compiler
126 Robert Morgan
127 Digital Press, 1998
128
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
133
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
137
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
140 */
141
142 #include "config.h"
143 #include "system.h"
144 #include "coretypes.h"
145 #include "tm.h"
146 #include "toplev.h"
147
148 #include "rtl.h"
149 #include "tree.h"
150 #include "tm_p.h"
151 #include "regs.h"
152 #include "hard-reg-set.h"
153 #include "flags.h"
154 #include "real.h"
155 #include "insn-config.h"
156 #include "recog.h"
157 #include "basic-block.h"
158 #include "output.h"
159 #include "function.h"
160 #include "expr.h"
161 #include "except.h"
162 #include "ggc.h"
163 #include "params.h"
164 #include "cselib.h"
165 #include "intl.h"
166 #include "obstack.h"
167 #include "timevar.h"
168 #include "tree-pass.h"
169 #include "hashtab.h"
170 #include "df.h"
171 #include "dbgcnt.h"
172 #include "target.h"
173
174 /* Propagate flow information through back edges and thus enable PRE's
175 moving loop invariant calculations out of loops.
176
177 Originally this tended to create worse overall code, but several
178 improvements during the development of PRE seem to have made following
179 back edges generally a win.
180
181 Note much of the loop invariant code motion done here would normally
182 be done by loop.c, which has more heuristics for when to move invariants
183 out of loops. At some point we might need to move some of those
184 heuristics into gcse.c. */
185
186 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
187 are a superset of those done by GCSE.
188
189 We perform the following steps:
190
191 1) Compute table of places where registers are set.
192
193 2) Perform copy/constant propagation.
194
195 3) Perform global cse using lazy code motion if not optimizing
196 for size, or code hoisting if we are.
197
198 4) Perform another pass of copy/constant propagation. Try to bypass
199 conditional jumps if the condition can be computed from a value of
200 an incoming edge.
201
202 5) Perform store motion.
203
204 Two passes of copy/constant propagation are done because the first one
205 enables more GCSE and the second one helps to clean up the copies that
206 GCSE creates. This is needed more for PRE than for Classic because Classic
207 GCSE will try to use an existing register containing the common
208 subexpression rather than create a new one. This is harder to do for PRE
209 because of the code motion (which Classic GCSE doesn't do).
210
211 Expressions we are interested in GCSE-ing are of the form
212 (set (pseudo-reg) (expression)).
213 Function want_to_gcse_p says what these are.
214
215 In addition, expressions in REG_EQUAL notes are candidates for GXSE-ing.
216 This allows PRE to hoist expressions that are expressed in multiple insns,
217 such as comprex address calculations (e.g. for PIC code, or loads with a
218 high part and as lowe part).
219
220 PRE handles moving invariant expressions out of loops (by treating them as
221 partially redundant).
222
223 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
224 assignment) based GVN (global value numbering). L. T. Simpson's paper
225 (Rice University) on value numbering is a useful reference for this.
226
227 **********************
228
229 We used to support multiple passes but there are diminishing returns in
230 doing so. The first pass usually makes 90% of the changes that are doable.
231 A second pass can make a few more changes made possible by the first pass.
232 Experiments show any further passes don't make enough changes to justify
233 the expense.
234
235 A study of spec92 using an unlimited number of passes:
236 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
237 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
238 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
239
240 It was found doing copy propagation between each pass enables further
241 substitutions.
242
243 This study was done before expressions in REG_EQUAL notes were added as
244 candidate expressions for optimization, and before the GIMPLE optimizers
245 were added. Probably, multiple passes is even less efficient now than
246 at the time when the study was conducted.
247
248 PRE is quite expensive in complicated functions because the DFA can take
249 a while to converge. Hence we only perform one pass.
250
251 **********************
252
253 The steps for PRE are:
254
255 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
256
257 2) Perform the data flow analysis for PRE.
258
259 3) Delete the redundant instructions
260
261 4) Insert the required copies [if any] that make the partially
262 redundant instructions fully redundant.
263
264 5) For other reaching expressions, insert an instruction to copy the value
265 to a newly created pseudo that will reach the redundant instruction.
266
267 The deletion is done first so that when we do insertions we
268 know which pseudo reg to use.
269
270 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
271 argue it is not. The number of iterations for the algorithm to converge
272 is typically 2-4 so I don't view it as that expensive (relatively speaking).
273
274 PRE GCSE depends heavily on the second CSE pass to clean up the copies
275 we create. To make an expression reach the place where it's redundant,
276 the result of the expression is copied to a new register, and the redundant
277 expression is deleted by replacing it with this new register. Classic GCSE
278 doesn't have this problem as much as it computes the reaching defs of
279 each register in each block and thus can try to use an existing
280 register. */
281 \f
282 /* GCSE global vars. */
283
284 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
285 int flag_rerun_cse_after_global_opts;
286
287 /* An obstack for our working variables. */
288 static struct obstack gcse_obstack;
289
290 struct reg_use {rtx reg_rtx; };
291
292 /* Hash table of expressions. */
293
294 struct expr
295 {
296 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
297 rtx expr;
298 /* Index in the available expression bitmaps. */
299 int bitmap_index;
300 /* Next entry with the same hash. */
301 struct expr *next_same_hash;
302 /* List of anticipatable occurrences in basic blocks in the function.
303 An "anticipatable occurrence" is one that is the first occurrence in the
304 basic block, the operands are not modified in the basic block prior
305 to the occurrence and the output is not used between the start of
306 the block and the occurrence. */
307 struct occr *antic_occr;
308 /* List of available occurrence in basic blocks in the function.
309 An "available occurrence" is one that is the last occurrence in the
310 basic block and the operands are not modified by following statements in
311 the basic block [including this insn]. */
312 struct occr *avail_occr;
313 /* Non-null if the computation is PRE redundant.
314 The value is the newly created pseudo-reg to record a copy of the
315 expression in all the places that reach the redundant copy. */
316 rtx reaching_reg;
317 };
318
319 /* Occurrence of an expression.
320 There is one per basic block. If a pattern appears more than once the
321 last appearance is used [or first for anticipatable expressions]. */
322
323 struct occr
324 {
325 /* Next occurrence of this expression. */
326 struct occr *next;
327 /* The insn that computes the expression. */
328 rtx insn;
329 /* Nonzero if this [anticipatable] occurrence has been deleted. */
330 char deleted_p;
331 /* Nonzero if this [available] occurrence has been copied to
332 reaching_reg. */
333 /* ??? This is mutually exclusive with deleted_p, so they could share
334 the same byte. */
335 char copied_p;
336 };
337
338 /* Expression and copy propagation hash tables.
339 Each hash table is an array of buckets.
340 ??? It is known that if it were an array of entries, structure elements
341 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
342 not clear whether in the final analysis a sufficient amount of memory would
343 be saved as the size of the available expression bitmaps would be larger
344 [one could build a mapping table without holes afterwards though].
345 Someday I'll perform the computation and figure it out. */
346
347 struct hash_table_d
348 {
349 /* The table itself.
350 This is an array of `expr_hash_table_size' elements. */
351 struct expr **table;
352
353 /* Size of the hash table, in elements. */
354 unsigned int size;
355
356 /* Number of hash table elements. */
357 unsigned int n_elems;
358
359 /* Whether the table is expression of copy propagation one. */
360 int set_p;
361 };
362
363 /* Expression hash table. */
364 static struct hash_table_d expr_hash_table;
365
366 /* Copy propagation hash table. */
367 static struct hash_table_d set_hash_table;
368
369 /* This is a list of expressions which are MEMs and will be used by load
370 or store motion.
371 Load motion tracks MEMs which aren't killed by
372 anything except itself. (i.e., loads and stores to a single location).
373 We can then allow movement of these MEM refs with a little special
374 allowance. (all stores copy the same value to the reaching reg used
375 for the loads). This means all values used to store into memory must have
376 no side effects so we can re-issue the setter value.
377 Store Motion uses this structure as an expression table to track stores
378 which look interesting, and might be moveable towards the exit block. */
379
380 struct ls_expr
381 {
382 struct expr * expr; /* Gcse expression reference for LM. */
383 rtx pattern; /* Pattern of this mem. */
384 rtx pattern_regs; /* List of registers mentioned by the mem. */
385 rtx loads; /* INSN list of loads seen. */
386 rtx stores; /* INSN list of stores seen. */
387 struct ls_expr * next; /* Next in the list. */
388 int invalid; /* Invalid for some reason. */
389 int index; /* If it maps to a bitmap index. */
390 unsigned int hash_index; /* Index when in a hash table. */
391 rtx reaching_reg; /* Register to use when re-writing. */
392 };
393
394 /* Array of implicit set patterns indexed by basic block index. */
395 static rtx *implicit_sets;
396
397 /* Head of the list of load/store memory refs. */
398 static struct ls_expr * pre_ldst_mems = NULL;
399
400 /* Hashtable for the load/store memory refs. */
401 static htab_t pre_ldst_table = NULL;
402
403 /* Bitmap containing one bit for each register in the program.
404 Used when performing GCSE to track which registers have been set since
405 the start of the basic block. */
406 static regset reg_set_bitmap;
407
408 /* Array, indexed by basic block number for a list of insns which modify
409 memory within that block. */
410 static rtx * modify_mem_list;
411 static bitmap modify_mem_list_set;
412
413 /* This array parallels modify_mem_list, but is kept canonicalized. */
414 static rtx * canon_modify_mem_list;
415
416 /* Bitmap indexed by block numbers to record which blocks contain
417 function calls. */
418 static bitmap blocks_with_calls;
419
420 /* Various variables for statistics gathering. */
421
422 /* Memory used in a pass.
423 This isn't intended to be absolutely precise. Its intent is only
424 to keep an eye on memory usage. */
425 static int bytes_used;
426
427 /* GCSE substitutions made. */
428 static int gcse_subst_count;
429 /* Number of copy instructions created. */
430 static int gcse_create_count;
431 /* Number of local constants propagated. */
432 static int local_const_prop_count;
433 /* Number of local copies propagated. */
434 static int local_copy_prop_count;
435 /* Number of global constants propagated. */
436 static int global_const_prop_count;
437 /* Number of global copies propagated. */
438 static int global_copy_prop_count;
439 \f
440 /* For available exprs */
441 static sbitmap *ae_kill;
442 \f
443 static void compute_can_copy (void);
444 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
445 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
446 static void *gcse_alloc (unsigned long);
447 static void alloc_gcse_mem (void);
448 static void free_gcse_mem (void);
449 static void hash_scan_insn (rtx, struct hash_table_d *);
450 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
451 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
452 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
453 static int want_to_gcse_p (rtx);
454 static bool gcse_constant_p (const_rtx);
455 static int oprs_unchanged_p (const_rtx, const_rtx, int);
456 static int oprs_anticipatable_p (const_rtx, const_rtx);
457 static int oprs_available_p (const_rtx, const_rtx);
458 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
459 struct hash_table_d *);
460 static void insert_set_in_table (rtx, rtx, struct hash_table_d *);
461 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
462 static unsigned int hash_set (int, int);
463 static int expr_equiv_p (const_rtx, const_rtx);
464 static void record_last_reg_set_info (rtx, int);
465 static void record_last_mem_set_info (rtx);
466 static void record_last_set_info (rtx, const_rtx, void *);
467 static void compute_hash_table (struct hash_table_d *);
468 static void alloc_hash_table (struct hash_table_d *, int);
469 static void free_hash_table (struct hash_table_d *);
470 static void compute_hash_table_work (struct hash_table_d *);
471 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
472 static struct expr *lookup_set (unsigned int, struct hash_table_d *);
473 static struct expr *next_set (unsigned int, struct expr *);
474 static void reset_opr_set_tables (void);
475 static int oprs_not_set_p (const_rtx, const_rtx);
476 static void mark_call (rtx);
477 static void mark_set (rtx, rtx);
478 static void mark_clobber (rtx, rtx);
479 static void mark_oprs_set (rtx);
480 static void alloc_cprop_mem (int, int);
481 static void free_cprop_mem (void);
482 static void compute_transp (const_rtx, int, sbitmap *, int);
483 static void compute_transpout (void);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table_d *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
506 basic_block);
507 static void insert_insn_end_basic_block (struct expr *, basic_block, int);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
519 static int hoist_code (void);
520 static int one_code_hoisting_pass (void);
521 static rtx process_insert_insn (struct expr *);
522 static int pre_edge_insert (struct edge_list *, struct expr **);
523 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
524 basic_block, char *);
525 static struct ls_expr * ldst_entry (rtx);
526 static void free_ldst_entry (struct ls_expr *);
527 static void free_ldst_mems (void);
528 static void print_ldst_list (FILE *);
529 static struct ls_expr * find_rtx_in_ldst (rtx);
530 static inline struct ls_expr * first_ls_expr (void);
531 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
532 static int simple_mem (const_rtx);
533 static void invalidate_any_buried_refs (rtx);
534 static void compute_ld_motion_mems (void);
535 static void trim_ld_motion_mems (void);
536 static void update_ld_motion_stores (struct expr *);
537 static void free_insn_expr_list_list (rtx *);
538 static void clear_modify_mem_tables (void);
539 static void free_modify_mem_tables (void);
540 static rtx gcse_emit_move_after (rtx, rtx, rtx);
541 static void local_cprop_find_used_regs (rtx *, void *);
542 static bool do_local_cprop (rtx, rtx);
543 static int local_cprop_pass (void);
544 static bool is_too_expensive (const char *);
545
546 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
547 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
548
549 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
550 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
551
552 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
553 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
554
555 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
556 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
557 \f
558 /* Misc. utilities. */
559
560 /* Nonzero for each mode that supports (set (reg) (reg)).
561 This is trivially true for integer and floating point values.
562 It may or may not be true for condition codes. */
563 static char can_copy[(int) NUM_MACHINE_MODES];
564
565 /* Compute which modes support reg/reg copy operations. */
566
567 static void
568 compute_can_copy (void)
569 {
570 int i;
571 #ifndef AVOID_CCMODE_COPIES
572 rtx reg, insn;
573 #endif
574 memset (can_copy, 0, NUM_MACHINE_MODES);
575
576 start_sequence ();
577 for (i = 0; i < NUM_MACHINE_MODES; i++)
578 if (GET_MODE_CLASS (i) == MODE_CC)
579 {
580 #ifdef AVOID_CCMODE_COPIES
581 can_copy[i] = 0;
582 #else
583 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
584 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
585 if (recog (PATTERN (insn), insn, NULL) >= 0)
586 can_copy[i] = 1;
587 #endif
588 }
589 else
590 can_copy[i] = 1;
591
592 end_sequence ();
593 }
594
595 /* Returns whether the mode supports reg/reg copy operations. */
596
597 bool
598 can_copy_p (enum machine_mode mode)
599 {
600 static bool can_copy_init_p = false;
601
602 if (! can_copy_init_p)
603 {
604 compute_can_copy ();
605 can_copy_init_p = true;
606 }
607
608 return can_copy[mode] != 0;
609 }
610
611 \f
612 /* Cover function to xmalloc to record bytes allocated. */
613
614 static void *
615 gmalloc (size_t size)
616 {
617 bytes_used += size;
618 return xmalloc (size);
619 }
620
621 /* Cover function to xcalloc to record bytes allocated. */
622
623 static void *
624 gcalloc (size_t nelem, size_t elsize)
625 {
626 bytes_used += nelem * elsize;
627 return xcalloc (nelem, elsize);
628 }
629
630 /* Cover function to obstack_alloc. */
631
632 static void *
633 gcse_alloc (unsigned long size)
634 {
635 bytes_used += size;
636 return obstack_alloc (&gcse_obstack, size);
637 }
638
639 /* Allocate memory for the reg/memory set tracking tables.
640 This is called at the start of each pass. */
641
642 static void
643 alloc_gcse_mem (void)
644 {
645 /* Allocate vars to track sets of regs. */
646 reg_set_bitmap = BITMAP_ALLOC (NULL);
647
648 /* Allocate array to keep a list of insns which modify memory in each
649 basic block. */
650 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
651 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
652 modify_mem_list_set = BITMAP_ALLOC (NULL);
653 blocks_with_calls = BITMAP_ALLOC (NULL);
654 }
655
656 /* Free memory allocated by alloc_gcse_mem. */
657
658 static void
659 free_gcse_mem (void)
660 {
661 free_modify_mem_tables ();
662 BITMAP_FREE (modify_mem_list_set);
663 BITMAP_FREE (blocks_with_calls);
664 }
665 \f
666 /* Compute the local properties of each recorded expression.
667
668 Local properties are those that are defined by the block, irrespective of
669 other blocks.
670
671 An expression is transparent in a block if its operands are not modified
672 in the block.
673
674 An expression is computed (locally available) in a block if it is computed
675 at least once and expression would contain the same value if the
676 computation was moved to the end of the block.
677
678 An expression is locally anticipatable in a block if it is computed at
679 least once and expression would contain the same value if the computation
680 was moved to the beginning of the block.
681
682 We call this routine for cprop, pre and code hoisting. They all compute
683 basically the same information and thus can easily share this code.
684
685 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
686 properties. If NULL, then it is not necessary to compute or record that
687 particular property.
688
689 TABLE controls which hash table to look at. If it is set hash table,
690 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
691 ABSALTERED. */
692
693 static void
694 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
695 struct hash_table_d *table)
696 {
697 unsigned int i;
698
699 /* Initialize any bitmaps that were passed in. */
700 if (transp)
701 {
702 if (table->set_p)
703 sbitmap_vector_zero (transp, last_basic_block);
704 else
705 sbitmap_vector_ones (transp, last_basic_block);
706 }
707
708 if (comp)
709 sbitmap_vector_zero (comp, last_basic_block);
710 if (antloc)
711 sbitmap_vector_zero (antloc, last_basic_block);
712
713 for (i = 0; i < table->size; i++)
714 {
715 struct expr *expr;
716
717 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
718 {
719 int indx = expr->bitmap_index;
720 struct occr *occr;
721
722 /* The expression is transparent in this block if it is not killed.
723 We start by assuming all are transparent [none are killed], and
724 then reset the bits for those that are. */
725 if (transp)
726 compute_transp (expr->expr, indx, transp, table->set_p);
727
728 /* The occurrences recorded in antic_occr are exactly those that
729 we want to set to nonzero in ANTLOC. */
730 if (antloc)
731 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
732 {
733 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
734
735 /* While we're scanning the table, this is a good place to
736 initialize this. */
737 occr->deleted_p = 0;
738 }
739
740 /* The occurrences recorded in avail_occr are exactly those that
741 we want to set to nonzero in COMP. */
742 if (comp)
743 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
744 {
745 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
746
747 /* While we're scanning the table, this is a good place to
748 initialize this. */
749 occr->copied_p = 0;
750 }
751
752 /* While we're scanning the table, this is a good place to
753 initialize this. */
754 expr->reaching_reg = 0;
755 }
756 }
757 }
758 \f
759 /* Hash table support. */
760
761 struct reg_avail_info
762 {
763 basic_block last_bb;
764 int first_set;
765 int last_set;
766 };
767
768 static struct reg_avail_info *reg_avail_info;
769 static basic_block current_bb;
770
771
772 /* See whether X, the source of a set, is something we want to consider for
773 GCSE. */
774
775 static int
776 want_to_gcse_p (rtx x)
777 {
778 #ifdef STACK_REGS
779 /* On register stack architectures, don't GCSE constants from the
780 constant pool, as the benefits are often swamped by the overhead
781 of shuffling the register stack between basic blocks. */
782 if (IS_STACK_MODE (GET_MODE (x)))
783 x = avoid_constant_pool_reference (x);
784 #endif
785
786 switch (GET_CODE (x))
787 {
788 case REG:
789 case SUBREG:
790 case CONST_INT:
791 case CONST_DOUBLE:
792 case CONST_FIXED:
793 case CONST_VECTOR:
794 case CALL:
795 return 0;
796
797 default:
798 return can_assign_to_reg_without_clobbers_p (x);
799 }
800 }
801
802 /* Used internally by can_assign_to_reg_without_clobbers_p. */
803
804 static GTY(()) rtx test_insn;
805
806 /* Return true if we can assign X to a pseudo register such that the
807 resulting insn does not result in clobbering a hard register as a
808 side-effect.
809
810 Additionally, if the target requires it, check that the resulting insn
811 can be copied. If it cannot, this means that X is special and probably
812 has hidden side-effects we don't want to mess with.
813
814 This function is typically used by code motion passes, to verify
815 that it is safe to insert an insn without worrying about clobbering
816 maybe live hard regs. */
817
818 bool
819 can_assign_to_reg_without_clobbers_p (rtx x)
820 {
821 int num_clobbers = 0;
822 int icode;
823
824 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
825 if (general_operand (x, GET_MODE (x)))
826 return 1;
827 else if (GET_MODE (x) == VOIDmode)
828 return 0;
829
830 /* Otherwise, check if we can make a valid insn from it. First initialize
831 our test insn if we haven't already. */
832 if (test_insn == 0)
833 {
834 test_insn
835 = make_insn_raw (gen_rtx_SET (VOIDmode,
836 gen_rtx_REG (word_mode,
837 FIRST_PSEUDO_REGISTER * 2),
838 const0_rtx));
839 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
840 }
841
842 /* Now make an insn like the one we would make when GCSE'ing and see if
843 valid. */
844 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
845 SET_SRC (PATTERN (test_insn)) = x;
846
847 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
848 if (icode < 0)
849 return false;
850
851 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
852 return false;
853
854 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
855 return false;
856
857 return true;
858 }
859
860 /* Return nonzero if the operands of expression X are unchanged from the
861 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
862 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
863
864 static int
865 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
866 {
867 int i, j;
868 enum rtx_code code;
869 const char *fmt;
870
871 if (x == 0)
872 return 1;
873
874 code = GET_CODE (x);
875 switch (code)
876 {
877 case REG:
878 {
879 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
880
881 if (info->last_bb != current_bb)
882 return 1;
883 if (avail_p)
884 return info->last_set < DF_INSN_LUID (insn);
885 else
886 return info->first_set >= DF_INSN_LUID (insn);
887 }
888
889 case MEM:
890 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
891 x, avail_p))
892 return 0;
893 else
894 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
895
896 case PRE_DEC:
897 case PRE_INC:
898 case POST_DEC:
899 case POST_INC:
900 case PRE_MODIFY:
901 case POST_MODIFY:
902 return 0;
903
904 case PC:
905 case CC0: /*FIXME*/
906 case CONST:
907 case CONST_INT:
908 case CONST_DOUBLE:
909 case CONST_FIXED:
910 case CONST_VECTOR:
911 case SYMBOL_REF:
912 case LABEL_REF:
913 case ADDR_VEC:
914 case ADDR_DIFF_VEC:
915 return 1;
916
917 default:
918 break;
919 }
920
921 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
922 {
923 if (fmt[i] == 'e')
924 {
925 /* If we are about to do the last recursive call needed at this
926 level, change it into iteration. This function is called enough
927 to be worth it. */
928 if (i == 0)
929 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
930
931 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
932 return 0;
933 }
934 else if (fmt[i] == 'E')
935 for (j = 0; j < XVECLEN (x, i); j++)
936 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
937 return 0;
938 }
939
940 return 1;
941 }
942
943 /* Used for communication between mems_conflict_for_gcse_p and
944 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
945 conflict between two memory references. */
946 static int gcse_mems_conflict_p;
947
948 /* Used for communication between mems_conflict_for_gcse_p and
949 load_killed_in_block_p. A memory reference for a load instruction,
950 mems_conflict_for_gcse_p will see if a memory store conflicts with
951 this memory load. */
952 static const_rtx gcse_mem_operand;
953
954 /* DEST is the output of an instruction. If it is a memory reference, and
955 possibly conflicts with the load found in gcse_mem_operand, then set
956 gcse_mems_conflict_p to a nonzero value. */
957
958 static void
959 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
960 void *data ATTRIBUTE_UNUSED)
961 {
962 while (GET_CODE (dest) == SUBREG
963 || GET_CODE (dest) == ZERO_EXTRACT
964 || GET_CODE (dest) == STRICT_LOW_PART)
965 dest = XEXP (dest, 0);
966
967 /* If DEST is not a MEM, then it will not conflict with the load. Note
968 that function calls are assumed to clobber memory, but are handled
969 elsewhere. */
970 if (! MEM_P (dest))
971 return;
972
973 /* If we are setting a MEM in our list of specially recognized MEMs,
974 don't mark as killed this time. */
975
976 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
977 {
978 if (!find_rtx_in_ldst (dest))
979 gcse_mems_conflict_p = 1;
980 return;
981 }
982
983 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
984 rtx_addr_varies_p))
985 gcse_mems_conflict_p = 1;
986 }
987
988 /* Return nonzero if the expression in X (a memory reference) is killed
989 in block BB before or after the insn with the LUID in UID_LIMIT.
990 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
991 before UID_LIMIT.
992
993 To check the entire block, set UID_LIMIT to max_uid + 1 and
994 AVAIL_P to 0. */
995
996 static int
997 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
998 {
999 rtx list_entry = modify_mem_list[bb->index];
1000
1001 /* If this is a readonly then we aren't going to be changing it. */
1002 if (MEM_READONLY_P (x))
1003 return 0;
1004
1005 while (list_entry)
1006 {
1007 rtx setter;
1008 /* Ignore entries in the list that do not apply. */
1009 if ((avail_p
1010 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1011 || (! avail_p
1012 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1013 {
1014 list_entry = XEXP (list_entry, 1);
1015 continue;
1016 }
1017
1018 setter = XEXP (list_entry, 0);
1019
1020 /* If SETTER is a call everything is clobbered. Note that calls
1021 to pure functions are never put on the list, so we need not
1022 worry about them. */
1023 if (CALL_P (setter))
1024 return 1;
1025
1026 /* SETTER must be an INSN of some kind that sets memory. Call
1027 note_stores to examine each hunk of memory that is modified.
1028
1029 The note_stores interface is pretty limited, so we have to
1030 communicate via global variables. Yuk. */
1031 gcse_mem_operand = x;
1032 gcse_mems_conflict_p = 0;
1033 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1034 if (gcse_mems_conflict_p)
1035 return 1;
1036 list_entry = XEXP (list_entry, 1);
1037 }
1038 return 0;
1039 }
1040
1041 /* Return nonzero if the operands of expression X are unchanged from
1042 the start of INSN's basic block up to but not including INSN. */
1043
1044 static int
1045 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1046 {
1047 return oprs_unchanged_p (x, insn, 0);
1048 }
1049
1050 /* Return nonzero if the operands of expression X are unchanged from
1051 INSN to the end of INSN's basic block. */
1052
1053 static int
1054 oprs_available_p (const_rtx x, const_rtx insn)
1055 {
1056 return oprs_unchanged_p (x, insn, 1);
1057 }
1058
1059 /* Hash expression X.
1060
1061 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1062 indicating if a volatile operand is found or if the expression contains
1063 something we don't want to insert in the table. HASH_TABLE_SIZE is
1064 the current size of the hash table to be probed. */
1065
1066 static unsigned int
1067 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1068 int hash_table_size)
1069 {
1070 unsigned int hash;
1071
1072 *do_not_record_p = 0;
1073
1074 hash = hash_rtx (x, mode, do_not_record_p,
1075 NULL, /*have_reg_qty=*/false);
1076 return hash % hash_table_size;
1077 }
1078
1079 /* Hash a set of register REGNO.
1080
1081 Sets are hashed on the register that is set. This simplifies the PRE copy
1082 propagation code.
1083
1084 ??? May need to make things more elaborate. Later, as necessary. */
1085
1086 static unsigned int
1087 hash_set (int regno, int hash_table_size)
1088 {
1089 unsigned int hash;
1090
1091 hash = regno;
1092 return hash % hash_table_size;
1093 }
1094
1095 /* Return nonzero if exp1 is equivalent to exp2. */
1096
1097 static int
1098 expr_equiv_p (const_rtx x, const_rtx y)
1099 {
1100 return exp_equiv_p (x, y, 0, true);
1101 }
1102
1103 /* Insert expression X in INSN in the hash TABLE.
1104 If it is already present, record it as the last occurrence in INSN's
1105 basic block.
1106
1107 MODE is the mode of the value X is being stored into.
1108 It is only used if X is a CONST_INT.
1109
1110 ANTIC_P is nonzero if X is an anticipatable expression.
1111 AVAIL_P is nonzero if X is an available expression. */
1112
1113 static void
1114 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1115 int avail_p, struct hash_table_d *table)
1116 {
1117 int found, do_not_record_p;
1118 unsigned int hash;
1119 struct expr *cur_expr, *last_expr = NULL;
1120 struct occr *antic_occr, *avail_occr;
1121
1122 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1123
1124 /* Do not insert expression in table if it contains volatile operands,
1125 or if hash_expr determines the expression is something we don't want
1126 to or can't handle. */
1127 if (do_not_record_p)
1128 return;
1129
1130 cur_expr = table->table[hash];
1131 found = 0;
1132
1133 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1134 {
1135 /* If the expression isn't found, save a pointer to the end of
1136 the list. */
1137 last_expr = cur_expr;
1138 cur_expr = cur_expr->next_same_hash;
1139 }
1140
1141 if (! found)
1142 {
1143 cur_expr = GOBNEW (struct expr);
1144 bytes_used += sizeof (struct expr);
1145 if (table->table[hash] == NULL)
1146 /* This is the first pattern that hashed to this index. */
1147 table->table[hash] = cur_expr;
1148 else
1149 /* Add EXPR to end of this hash chain. */
1150 last_expr->next_same_hash = cur_expr;
1151
1152 /* Set the fields of the expr element. */
1153 cur_expr->expr = x;
1154 cur_expr->bitmap_index = table->n_elems++;
1155 cur_expr->next_same_hash = NULL;
1156 cur_expr->antic_occr = NULL;
1157 cur_expr->avail_occr = NULL;
1158 }
1159
1160 /* Now record the occurrence(s). */
1161 if (antic_p)
1162 {
1163 antic_occr = cur_expr->antic_occr;
1164
1165 if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1166 antic_occr = NULL;
1167
1168 if (antic_occr)
1169 /* Found another instance of the expression in the same basic block.
1170 Prefer the currently recorded one. We want the first one in the
1171 block and the block is scanned from start to end. */
1172 ; /* nothing to do */
1173 else
1174 {
1175 /* First occurrence of this expression in this basic block. */
1176 antic_occr = GOBNEW (struct occr);
1177 bytes_used += sizeof (struct occr);
1178 antic_occr->insn = insn;
1179 antic_occr->next = cur_expr->antic_occr;
1180 antic_occr->deleted_p = 0;
1181 cur_expr->antic_occr = antic_occr;
1182 }
1183 }
1184
1185 if (avail_p)
1186 {
1187 avail_occr = cur_expr->avail_occr;
1188
1189 if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1190 {
1191 /* Found another instance of the expression in the same basic block.
1192 Prefer this occurrence to the currently recorded one. We want
1193 the last one in the block and the block is scanned from start
1194 to end. */
1195 avail_occr->insn = insn;
1196 }
1197 else
1198 {
1199 /* First occurrence of this expression in this basic block. */
1200 avail_occr = GOBNEW (struct occr);
1201 bytes_used += sizeof (struct occr);
1202 avail_occr->insn = insn;
1203 avail_occr->next = cur_expr->avail_occr;
1204 avail_occr->deleted_p = 0;
1205 cur_expr->avail_occr = avail_occr;
1206 }
1207 }
1208 }
1209
1210 /* Insert pattern X in INSN in the hash table.
1211 X is a SET of a reg to either another reg or a constant.
1212 If it is already present, record it as the last occurrence in INSN's
1213 basic block. */
1214
1215 static void
1216 insert_set_in_table (rtx x, rtx insn, struct hash_table_d *table)
1217 {
1218 int found;
1219 unsigned int hash;
1220 struct expr *cur_expr, *last_expr = NULL;
1221 struct occr *cur_occr;
1222
1223 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1224
1225 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1226
1227 cur_expr = table->table[hash];
1228 found = 0;
1229
1230 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1231 {
1232 /* If the expression isn't found, save a pointer to the end of
1233 the list. */
1234 last_expr = cur_expr;
1235 cur_expr = cur_expr->next_same_hash;
1236 }
1237
1238 if (! found)
1239 {
1240 cur_expr = GOBNEW (struct expr);
1241 bytes_used += sizeof (struct expr);
1242 if (table->table[hash] == NULL)
1243 /* This is the first pattern that hashed to this index. */
1244 table->table[hash] = cur_expr;
1245 else
1246 /* Add EXPR to end of this hash chain. */
1247 last_expr->next_same_hash = cur_expr;
1248
1249 /* Set the fields of the expr element.
1250 We must copy X because it can be modified when copy propagation is
1251 performed on its operands. */
1252 cur_expr->expr = copy_rtx (x);
1253 cur_expr->bitmap_index = table->n_elems++;
1254 cur_expr->next_same_hash = NULL;
1255 cur_expr->antic_occr = NULL;
1256 cur_expr->avail_occr = NULL;
1257 }
1258
1259 /* Now record the occurrence. */
1260 cur_occr = cur_expr->avail_occr;
1261
1262 if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1263 {
1264 /* Found another instance of the expression in the same basic block.
1265 Prefer this occurrence to the currently recorded one. We want
1266 the last one in the block and the block is scanned from start
1267 to end. */
1268 cur_occr->insn = insn;
1269 }
1270 else
1271 {
1272 /* First occurrence of this expression in this basic block. */
1273 cur_occr = GOBNEW (struct occr);
1274 bytes_used += sizeof (struct occr);
1275 cur_occr->insn = insn;
1276 cur_occr->next = cur_expr->avail_occr;
1277 cur_occr->deleted_p = 0;
1278 cur_expr->avail_occr = cur_occr;
1279 }
1280 }
1281
1282 /* Determine whether the rtx X should be treated as a constant for
1283 the purposes of GCSE's constant propagation. */
1284
1285 static bool
1286 gcse_constant_p (const_rtx x)
1287 {
1288 /* Consider a COMPARE of two integers constant. */
1289 if (GET_CODE (x) == COMPARE
1290 && CONST_INT_P (XEXP (x, 0))
1291 && CONST_INT_P (XEXP (x, 1)))
1292 return true;
1293
1294 /* Consider a COMPARE of the same registers is a constant
1295 if they are not floating point registers. */
1296 if (GET_CODE(x) == COMPARE
1297 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1298 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1299 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1300 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1301 return true;
1302
1303 /* Since X might be inserted more than once we have to take care that it
1304 is sharable. */
1305 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1306 }
1307
1308 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1309 expression one). */
1310
1311 static void
1312 hash_scan_set (rtx pat, rtx insn, struct hash_table_d *table)
1313 {
1314 rtx src = SET_SRC (pat);
1315 rtx dest = SET_DEST (pat);
1316 rtx note;
1317
1318 if (GET_CODE (src) == CALL)
1319 hash_scan_call (src, insn, table);
1320
1321 else if (REG_P (dest))
1322 {
1323 unsigned int regno = REGNO (dest);
1324 rtx tmp;
1325
1326 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1327
1328 This allows us to do a single GCSE pass and still eliminate
1329 redundant constants, addresses or other expressions that are
1330 constructed with multiple instructions.
1331
1332 However, keep the original SRC if INSN is a simple reg-reg move. In
1333 In this case, there will almost always be a REG_EQUAL note on the
1334 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1335 for INSN, we miss copy propagation opportunities and we perform the
1336 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1337 do more than one PRE GCSE pass.
1338
1339 Note that this does not impede profitable constant propagations. We
1340 "look through" reg-reg sets in lookup_avail_set. */
1341 note = find_reg_equal_equiv_note (insn);
1342 if (note != 0
1343 && REG_NOTE_KIND (note) == REG_EQUAL
1344 && !REG_P (src)
1345 && (table->set_p
1346 ? gcse_constant_p (XEXP (note, 0))
1347 : want_to_gcse_p (XEXP (note, 0))))
1348 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1349
1350 /* Only record sets of pseudo-regs in the hash table. */
1351 if (! table->set_p
1352 && regno >= FIRST_PSEUDO_REGISTER
1353 /* Don't GCSE something if we can't do a reg/reg copy. */
1354 && can_copy_p (GET_MODE (dest))
1355 /* GCSE commonly inserts instruction after the insn. We can't
1356 do that easily for EH edges so disable GCSE on these for now. */
1357 /* ??? We can now easily create new EH landing pads at the
1358 gimple level, for splitting edges; there's no reason we
1359 can't do the same thing at the rtl level. */
1360 && !can_throw_internal (insn)
1361 /* Is SET_SRC something we want to gcse? */
1362 && want_to_gcse_p (src)
1363 /* Don't CSE a nop. */
1364 && ! set_noop_p (pat)
1365 /* Don't GCSE if it has attached REG_EQUIV note.
1366 At this point this only function parameters should have
1367 REG_EQUIV notes and if the argument slot is used somewhere
1368 explicitly, it means address of parameter has been taken,
1369 so we should not extend the lifetime of the pseudo. */
1370 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1371 {
1372 /* An expression is not anticipatable if its operands are
1373 modified before this insn or if this is not the only SET in
1374 this insn. The latter condition does not have to mean that
1375 SRC itself is not anticipatable, but we just will not be
1376 able to handle code motion of insns with multiple sets. */
1377 int antic_p = oprs_anticipatable_p (src, insn)
1378 && !multiple_sets (insn);
1379 /* An expression is not available if its operands are
1380 subsequently modified, including this insn. It's also not
1381 available if this is a branch, because we can't insert
1382 a set after the branch. */
1383 int avail_p = (oprs_available_p (src, insn)
1384 && ! JUMP_P (insn));
1385
1386 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1387 }
1388
1389 /* Record sets for constant/copy propagation. */
1390 else if (table->set_p
1391 && regno >= FIRST_PSEUDO_REGISTER
1392 && ((REG_P (src)
1393 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1394 && can_copy_p (GET_MODE (dest))
1395 && REGNO (src) != regno)
1396 || gcse_constant_p (src))
1397 /* A copy is not available if its src or dest is subsequently
1398 modified. Here we want to search from INSN+1 on, but
1399 oprs_available_p searches from INSN on. */
1400 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1401 || (tmp = next_nonnote_insn (insn)) == NULL_RTX
1402 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1403 || oprs_available_p (pat, tmp)))
1404 insert_set_in_table (pat, insn, table);
1405 }
1406 /* In case of store we want to consider the memory value as available in
1407 the REG stored in that memory. This makes it possible to remove
1408 redundant loads from due to stores to the same location. */
1409 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1410 {
1411 unsigned int regno = REGNO (src);
1412
1413 /* Do not do this for constant/copy propagation. */
1414 if (! table->set_p
1415 /* Only record sets of pseudo-regs in the hash table. */
1416 && regno >= FIRST_PSEUDO_REGISTER
1417 /* Don't GCSE something if we can't do a reg/reg copy. */
1418 && can_copy_p (GET_MODE (src))
1419 /* GCSE commonly inserts instruction after the insn. We can't
1420 do that easily for EH edges so disable GCSE on these for now. */
1421 && !can_throw_internal (insn)
1422 /* Is SET_DEST something we want to gcse? */
1423 && want_to_gcse_p (dest)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1432 || ! MEM_P (XEXP (note, 0))))
1433 {
1434 /* Stores are never anticipatable. */
1435 int antic_p = 0;
1436 /* An expression is not available if its operands are
1437 subsequently modified, including this insn. It's also not
1438 available if this is a branch, because we can't insert
1439 a set after the branch. */
1440 int avail_p = oprs_available_p (dest, insn)
1441 && ! JUMP_P (insn);
1442
1443 /* Record the memory expression (DEST) in the hash table. */
1444 insert_expr_in_table (dest, GET_MODE (dest), insn,
1445 antic_p, avail_p, table);
1446 }
1447 }
1448 }
1449
1450 static void
1451 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1452 struct hash_table_d *table ATTRIBUTE_UNUSED)
1453 {
1454 /* Currently nothing to do. */
1455 }
1456
1457 static void
1458 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1459 struct hash_table_d *table ATTRIBUTE_UNUSED)
1460 {
1461 /* Currently nothing to do. */
1462 }
1463
1464 /* Process INSN and add hash table entries as appropriate.
1465
1466 Only available expressions that set a single pseudo-reg are recorded.
1467
1468 Single sets in a PARALLEL could be handled, but it's an extra complication
1469 that isn't dealt with right now. The trick is handling the CLOBBERs that
1470 are also in the PARALLEL. Later.
1471
1472 If SET_P is nonzero, this is for the assignment hash table,
1473 otherwise it is for the expression hash table. */
1474
1475 static void
1476 hash_scan_insn (rtx insn, struct hash_table_d *table)
1477 {
1478 rtx pat = PATTERN (insn);
1479 int i;
1480
1481 /* Pick out the sets of INSN and for other forms of instructions record
1482 what's been modified. */
1483
1484 if (GET_CODE (pat) == SET)
1485 hash_scan_set (pat, insn, table);
1486 else if (GET_CODE (pat) == PARALLEL)
1487 for (i = 0; i < XVECLEN (pat, 0); i++)
1488 {
1489 rtx x = XVECEXP (pat, 0, i);
1490
1491 if (GET_CODE (x) == SET)
1492 hash_scan_set (x, insn, table);
1493 else if (GET_CODE (x) == CLOBBER)
1494 hash_scan_clobber (x, insn, table);
1495 else if (GET_CODE (x) == CALL)
1496 hash_scan_call (x, insn, table);
1497 }
1498
1499 else if (GET_CODE (pat) == CLOBBER)
1500 hash_scan_clobber (pat, insn, table);
1501 else if (GET_CODE (pat) == CALL)
1502 hash_scan_call (pat, insn, table);
1503 }
1504
1505 static void
1506 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1507 {
1508 int i;
1509 /* Flattened out table, so it's printed in proper order. */
1510 struct expr **flat_table;
1511 unsigned int *hash_val;
1512 struct expr *expr;
1513
1514 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1515 hash_val = XNEWVEC (unsigned int, table->n_elems);
1516
1517 for (i = 0; i < (int) table->size; i++)
1518 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1519 {
1520 flat_table[expr->bitmap_index] = expr;
1521 hash_val[expr->bitmap_index] = i;
1522 }
1523
1524 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1525 name, table->size, table->n_elems);
1526
1527 for (i = 0; i < (int) table->n_elems; i++)
1528 if (flat_table[i] != 0)
1529 {
1530 expr = flat_table[i];
1531 fprintf (file, "Index %d (hash value %d)\n ",
1532 expr->bitmap_index, hash_val[i]);
1533 print_rtl (file, expr->expr);
1534 fprintf (file, "\n");
1535 }
1536
1537 fprintf (file, "\n");
1538
1539 free (flat_table);
1540 free (hash_val);
1541 }
1542
1543 /* Record register first/last/block set information for REGNO in INSN.
1544
1545 first_set records the first place in the block where the register
1546 is set and is used to compute "anticipatability".
1547
1548 last_set records the last place in the block where the register
1549 is set and is used to compute "availability".
1550
1551 last_bb records the block for which first_set and last_set are
1552 valid, as a quick test to invalidate them. */
1553
1554 static void
1555 record_last_reg_set_info (rtx insn, int regno)
1556 {
1557 struct reg_avail_info *info = &reg_avail_info[regno];
1558 int luid = DF_INSN_LUID (insn);
1559
1560 info->last_set = luid;
1561 if (info->last_bb != current_bb)
1562 {
1563 info->last_bb = current_bb;
1564 info->first_set = luid;
1565 }
1566 }
1567
1568
1569 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1570 Note we store a pair of elements in the list, so they have to be
1571 taken off pairwise. */
1572
1573 static void
1574 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1575 void * v_insn)
1576 {
1577 rtx dest_addr, insn;
1578 int bb;
1579
1580 while (GET_CODE (dest) == SUBREG
1581 || GET_CODE (dest) == ZERO_EXTRACT
1582 || GET_CODE (dest) == STRICT_LOW_PART)
1583 dest = XEXP (dest, 0);
1584
1585 /* If DEST is not a MEM, then it will not conflict with a load. Note
1586 that function calls are assumed to clobber memory, but are handled
1587 elsewhere. */
1588
1589 if (! MEM_P (dest))
1590 return;
1591
1592 dest_addr = get_addr (XEXP (dest, 0));
1593 dest_addr = canon_rtx (dest_addr);
1594 insn = (rtx) v_insn;
1595 bb = BLOCK_NUM (insn);
1596
1597 canon_modify_mem_list[bb] =
1598 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1599 canon_modify_mem_list[bb] =
1600 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1601 }
1602
1603 /* Record memory modification information for INSN. We do not actually care
1604 about the memory location(s) that are set, or even how they are set (consider
1605 a CALL_INSN). We merely need to record which insns modify memory. */
1606
1607 static void
1608 record_last_mem_set_info (rtx insn)
1609 {
1610 int bb = BLOCK_NUM (insn);
1611
1612 /* load_killed_in_block_p will handle the case of calls clobbering
1613 everything. */
1614 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1615 bitmap_set_bit (modify_mem_list_set, bb);
1616
1617 if (CALL_P (insn))
1618 {
1619 /* Note that traversals of this loop (other than for free-ing)
1620 will break after encountering a CALL_INSN. So, there's no
1621 need to insert a pair of items, as canon_list_insert does. */
1622 canon_modify_mem_list[bb] =
1623 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1624 bitmap_set_bit (blocks_with_calls, bb);
1625 }
1626 else
1627 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1628 }
1629
1630 /* Called from compute_hash_table via note_stores to handle one
1631 SET or CLOBBER in an insn. DATA is really the instruction in which
1632 the SET is taking place. */
1633
1634 static void
1635 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1636 {
1637 rtx last_set_insn = (rtx) data;
1638
1639 if (GET_CODE (dest) == SUBREG)
1640 dest = SUBREG_REG (dest);
1641
1642 if (REG_P (dest))
1643 record_last_reg_set_info (last_set_insn, REGNO (dest));
1644 else if (MEM_P (dest)
1645 /* Ignore pushes, they clobber nothing. */
1646 && ! push_operand (dest, GET_MODE (dest)))
1647 record_last_mem_set_info (last_set_insn);
1648 }
1649
1650 /* Top level function to create an expression or assignment hash table.
1651
1652 Expression entries are placed in the hash table if
1653 - they are of the form (set (pseudo-reg) src),
1654 - src is something we want to perform GCSE on,
1655 - none of the operands are subsequently modified in the block
1656
1657 Assignment entries are placed in the hash table if
1658 - they are of the form (set (pseudo-reg) src),
1659 - src is something we want to perform const/copy propagation on,
1660 - none of the operands or target are subsequently modified in the block
1661
1662 Currently src must be a pseudo-reg or a const_int.
1663
1664 TABLE is the table computed. */
1665
1666 static void
1667 compute_hash_table_work (struct hash_table_d *table)
1668 {
1669 int i;
1670
1671 /* re-Cache any INSN_LIST nodes we have allocated. */
1672 clear_modify_mem_tables ();
1673 /* Some working arrays used to track first and last set in each block. */
1674 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1675
1676 for (i = 0; i < max_reg_num (); ++i)
1677 reg_avail_info[i].last_bb = NULL;
1678
1679 FOR_EACH_BB (current_bb)
1680 {
1681 rtx insn;
1682 unsigned int regno;
1683
1684 /* First pass over the instructions records information used to
1685 determine when registers and memory are first and last set. */
1686 FOR_BB_INSNS (current_bb, insn)
1687 {
1688 if (! INSN_P (insn))
1689 continue;
1690
1691 if (CALL_P (insn))
1692 {
1693 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1694 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1695 record_last_reg_set_info (insn, regno);
1696
1697 mark_call (insn);
1698 }
1699
1700 note_stores (PATTERN (insn), record_last_set_info, insn);
1701 }
1702
1703 /* Insert implicit sets in the hash table. */
1704 if (table->set_p
1705 && implicit_sets[current_bb->index] != NULL_RTX)
1706 hash_scan_set (implicit_sets[current_bb->index],
1707 BB_HEAD (current_bb), table);
1708
1709 /* The next pass builds the hash table. */
1710 FOR_BB_INSNS (current_bb, insn)
1711 if (INSN_P (insn))
1712 hash_scan_insn (insn, table);
1713 }
1714
1715 free (reg_avail_info);
1716 reg_avail_info = NULL;
1717 }
1718
1719 /* Allocate space for the set/expr hash TABLE.
1720 It is used to determine the number of buckets to use.
1721 SET_P determines whether set or expression table will
1722 be created. */
1723
1724 static void
1725 alloc_hash_table (struct hash_table_d *table, int set_p)
1726 {
1727 int n;
1728
1729 n = get_max_insn_count ();
1730
1731 table->size = n / 4;
1732 if (table->size < 11)
1733 table->size = 11;
1734
1735 /* Attempt to maintain efficient use of hash table.
1736 Making it an odd number is simplest for now.
1737 ??? Later take some measurements. */
1738 table->size |= 1;
1739 n = table->size * sizeof (struct expr *);
1740 table->table = GNEWVAR (struct expr *, n);
1741 table->set_p = set_p;
1742 }
1743
1744 /* Free things allocated by alloc_hash_table. */
1745
1746 static void
1747 free_hash_table (struct hash_table_d *table)
1748 {
1749 free (table->table);
1750 }
1751
1752 /* Compute the hash TABLE for doing copy/const propagation or
1753 expression hash table. */
1754
1755 static void
1756 compute_hash_table (struct hash_table_d *table)
1757 {
1758 /* Initialize count of number of entries in hash table. */
1759 table->n_elems = 0;
1760 memset (table->table, 0, table->size * sizeof (struct expr *));
1761
1762 compute_hash_table_work (table);
1763 }
1764 \f
1765 /* Expression tracking support. */
1766
1767 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1768 table entry, or NULL if not found. */
1769
1770 static struct expr *
1771 lookup_set (unsigned int regno, struct hash_table_d *table)
1772 {
1773 unsigned int hash = hash_set (regno, table->size);
1774 struct expr *expr;
1775
1776 expr = table->table[hash];
1777
1778 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1779 expr = expr->next_same_hash;
1780
1781 return expr;
1782 }
1783
1784 /* Return the next entry for REGNO in list EXPR. */
1785
1786 static struct expr *
1787 next_set (unsigned int regno, struct expr *expr)
1788 {
1789 do
1790 expr = expr->next_same_hash;
1791 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1792
1793 return expr;
1794 }
1795
1796 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1797 types may be mixed. */
1798
1799 static void
1800 free_insn_expr_list_list (rtx *listp)
1801 {
1802 rtx list, next;
1803
1804 for (list = *listp; list ; list = next)
1805 {
1806 next = XEXP (list, 1);
1807 if (GET_CODE (list) == EXPR_LIST)
1808 free_EXPR_LIST_node (list);
1809 else
1810 free_INSN_LIST_node (list);
1811 }
1812
1813 *listp = NULL;
1814 }
1815
1816 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1817 static void
1818 clear_modify_mem_tables (void)
1819 {
1820 unsigned i;
1821 bitmap_iterator bi;
1822
1823 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1824 {
1825 free_INSN_LIST_list (modify_mem_list + i);
1826 free_insn_expr_list_list (canon_modify_mem_list + i);
1827 }
1828 bitmap_clear (modify_mem_list_set);
1829 bitmap_clear (blocks_with_calls);
1830 }
1831
1832 /* Release memory used by modify_mem_list_set. */
1833
1834 static void
1835 free_modify_mem_tables (void)
1836 {
1837 clear_modify_mem_tables ();
1838 free (modify_mem_list);
1839 free (canon_modify_mem_list);
1840 modify_mem_list = 0;
1841 canon_modify_mem_list = 0;
1842 }
1843
1844 /* Reset tables used to keep track of what's still available [since the
1845 start of the block]. */
1846
1847 static void
1848 reset_opr_set_tables (void)
1849 {
1850 /* Maintain a bitmap of which regs have been set since beginning of
1851 the block. */
1852 CLEAR_REG_SET (reg_set_bitmap);
1853
1854 /* Also keep a record of the last instruction to modify memory.
1855 For now this is very trivial, we only record whether any memory
1856 location has been modified. */
1857 clear_modify_mem_tables ();
1858 }
1859
1860 /* Return nonzero if the operands of X are not set before INSN in
1861 INSN's basic block. */
1862
1863 static int
1864 oprs_not_set_p (const_rtx x, const_rtx insn)
1865 {
1866 int i, j;
1867 enum rtx_code code;
1868 const char *fmt;
1869
1870 if (x == 0)
1871 return 1;
1872
1873 code = GET_CODE (x);
1874 switch (code)
1875 {
1876 case PC:
1877 case CC0:
1878 case CONST:
1879 case CONST_INT:
1880 case CONST_DOUBLE:
1881 case CONST_FIXED:
1882 case CONST_VECTOR:
1883 case SYMBOL_REF:
1884 case LABEL_REF:
1885 case ADDR_VEC:
1886 case ADDR_DIFF_VEC:
1887 return 1;
1888
1889 case MEM:
1890 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1891 DF_INSN_LUID (insn), x, 0))
1892 return 0;
1893 else
1894 return oprs_not_set_p (XEXP (x, 0), insn);
1895
1896 case REG:
1897 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1898
1899 default:
1900 break;
1901 }
1902
1903 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1904 {
1905 if (fmt[i] == 'e')
1906 {
1907 /* If we are about to do the last recursive call
1908 needed at this level, change it into iteration.
1909 This function is called enough to be worth it. */
1910 if (i == 0)
1911 return oprs_not_set_p (XEXP (x, i), insn);
1912
1913 if (! oprs_not_set_p (XEXP (x, i), insn))
1914 return 0;
1915 }
1916 else if (fmt[i] == 'E')
1917 for (j = 0; j < XVECLEN (x, i); j++)
1918 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1919 return 0;
1920 }
1921
1922 return 1;
1923 }
1924
1925 /* Mark things set by a CALL. */
1926
1927 static void
1928 mark_call (rtx insn)
1929 {
1930 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1931 record_last_mem_set_info (insn);
1932 }
1933
1934 /* Mark things set by a SET. */
1935
1936 static void
1937 mark_set (rtx pat, rtx insn)
1938 {
1939 rtx dest = SET_DEST (pat);
1940
1941 while (GET_CODE (dest) == SUBREG
1942 || GET_CODE (dest) == ZERO_EXTRACT
1943 || GET_CODE (dest) == STRICT_LOW_PART)
1944 dest = XEXP (dest, 0);
1945
1946 if (REG_P (dest))
1947 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
1948 else if (MEM_P (dest))
1949 record_last_mem_set_info (insn);
1950
1951 if (GET_CODE (SET_SRC (pat)) == CALL)
1952 mark_call (insn);
1953 }
1954
1955 /* Record things set by a CLOBBER. */
1956
1957 static void
1958 mark_clobber (rtx pat, rtx insn)
1959 {
1960 rtx clob = XEXP (pat, 0);
1961
1962 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
1963 clob = XEXP (clob, 0);
1964
1965 if (REG_P (clob))
1966 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
1967 else
1968 record_last_mem_set_info (insn);
1969 }
1970
1971 /* Record things set by INSN.
1972 This data is used by oprs_not_set_p. */
1973
1974 static void
1975 mark_oprs_set (rtx insn)
1976 {
1977 rtx pat = PATTERN (insn);
1978 int i;
1979
1980 if (GET_CODE (pat) == SET)
1981 mark_set (pat, insn);
1982 else if (GET_CODE (pat) == PARALLEL)
1983 for (i = 0; i < XVECLEN (pat, 0); i++)
1984 {
1985 rtx x = XVECEXP (pat, 0, i);
1986
1987 if (GET_CODE (x) == SET)
1988 mark_set (x, insn);
1989 else if (GET_CODE (x) == CLOBBER)
1990 mark_clobber (x, insn);
1991 else if (GET_CODE (x) == CALL)
1992 mark_call (insn);
1993 }
1994
1995 else if (GET_CODE (pat) == CLOBBER)
1996 mark_clobber (pat, insn);
1997 else if (GET_CODE (pat) == CALL)
1998 mark_call (insn);
1999 }
2000
2001 \f
2002 /* Compute copy/constant propagation working variables. */
2003
2004 /* Local properties of assignments. */
2005 static sbitmap *cprop_pavloc;
2006 static sbitmap *cprop_absaltered;
2007
2008 /* Global properties of assignments (computed from the local properties). */
2009 static sbitmap *cprop_avin;
2010 static sbitmap *cprop_avout;
2011
2012 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2013 basic blocks. N_SETS is the number of sets. */
2014
2015 static void
2016 alloc_cprop_mem (int n_blocks, int n_sets)
2017 {
2018 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2019 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2020
2021 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2022 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2023 }
2024
2025 /* Free vars used by copy/const propagation. */
2026
2027 static void
2028 free_cprop_mem (void)
2029 {
2030 sbitmap_vector_free (cprop_pavloc);
2031 sbitmap_vector_free (cprop_absaltered);
2032 sbitmap_vector_free (cprop_avin);
2033 sbitmap_vector_free (cprop_avout);
2034 }
2035
2036 /* For each block, compute whether X is transparent. X is either an
2037 expression or an assignment [though we don't care which, for this context
2038 an assignment is treated as an expression]. For each block where an
2039 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2040 bit in BMAP. */
2041
2042 static void
2043 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2044 {
2045 int i, j;
2046 enum rtx_code code;
2047 const char *fmt;
2048
2049 /* repeat is used to turn tail-recursion into iteration since GCC
2050 can't do it when there's no return value. */
2051 repeat:
2052
2053 if (x == 0)
2054 return;
2055
2056 code = GET_CODE (x);
2057 switch (code)
2058 {
2059 case REG:
2060 if (set_p)
2061 {
2062 df_ref def;
2063 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2064 def;
2065 def = DF_REF_NEXT_REG (def))
2066 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2067 }
2068 else
2069 {
2070 df_ref def;
2071 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2072 def;
2073 def = DF_REF_NEXT_REG (def))
2074 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2075 }
2076
2077 return;
2078
2079 case MEM:
2080 if (! MEM_READONLY_P (x))
2081 {
2082 bitmap_iterator bi;
2083 unsigned bb_index;
2084
2085 /* First handle all the blocks with calls. We don't need to
2086 do any list walking for them. */
2087 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2088 {
2089 if (set_p)
2090 SET_BIT (bmap[bb_index], indx);
2091 else
2092 RESET_BIT (bmap[bb_index], indx);
2093 }
2094
2095 /* Now iterate over the blocks which have memory modifications
2096 but which do not have any calls. */
2097 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2098 blocks_with_calls,
2099 0, bb_index, bi)
2100 {
2101 rtx list_entry = canon_modify_mem_list[bb_index];
2102
2103 while (list_entry)
2104 {
2105 rtx dest, dest_addr;
2106
2107 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2108 Examine each hunk of memory that is modified. */
2109
2110 dest = XEXP (list_entry, 0);
2111 list_entry = XEXP (list_entry, 1);
2112 dest_addr = XEXP (list_entry, 0);
2113
2114 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2115 x, NULL_RTX, rtx_addr_varies_p))
2116 {
2117 if (set_p)
2118 SET_BIT (bmap[bb_index], indx);
2119 else
2120 RESET_BIT (bmap[bb_index], indx);
2121 break;
2122 }
2123 list_entry = XEXP (list_entry, 1);
2124 }
2125 }
2126 }
2127
2128 x = XEXP (x, 0);
2129 goto repeat;
2130
2131 case PC:
2132 case CC0: /*FIXME*/
2133 case CONST:
2134 case CONST_INT:
2135 case CONST_DOUBLE:
2136 case CONST_FIXED:
2137 case CONST_VECTOR:
2138 case SYMBOL_REF:
2139 case LABEL_REF:
2140 case ADDR_VEC:
2141 case ADDR_DIFF_VEC:
2142 return;
2143
2144 default:
2145 break;
2146 }
2147
2148 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2149 {
2150 if (fmt[i] == 'e')
2151 {
2152 /* If we are about to do the last recursive call
2153 needed at this level, change it into iteration.
2154 This function is called enough to be worth it. */
2155 if (i == 0)
2156 {
2157 x = XEXP (x, i);
2158 goto repeat;
2159 }
2160
2161 compute_transp (XEXP (x, i), indx, bmap, set_p);
2162 }
2163 else if (fmt[i] == 'E')
2164 for (j = 0; j < XVECLEN (x, i); j++)
2165 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2166 }
2167 }
2168
2169 /* Top level routine to do the dataflow analysis needed by copy/const
2170 propagation. */
2171
2172 static void
2173 compute_cprop_data (void)
2174 {
2175 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2176 compute_available (cprop_pavloc, cprop_absaltered,
2177 cprop_avout, cprop_avin);
2178 }
2179 \f
2180 /* Copy/constant propagation. */
2181
2182 /* Maximum number of register uses in an insn that we handle. */
2183 #define MAX_USES 8
2184
2185 /* Table of uses found in an insn.
2186 Allocated statically to avoid alloc/free complexity and overhead. */
2187 static struct reg_use reg_use_table[MAX_USES];
2188
2189 /* Index into `reg_use_table' while building it. */
2190 static int reg_use_count;
2191
2192 /* Set up a list of register numbers used in INSN. The found uses are stored
2193 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2194 and contains the number of uses in the table upon exit.
2195
2196 ??? If a register appears multiple times we will record it multiple times.
2197 This doesn't hurt anything but it will slow things down. */
2198
2199 static void
2200 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2201 {
2202 int i, j;
2203 enum rtx_code code;
2204 const char *fmt;
2205 rtx x = *xptr;
2206
2207 /* repeat is used to turn tail-recursion into iteration since GCC
2208 can't do it when there's no return value. */
2209 repeat:
2210 if (x == 0)
2211 return;
2212
2213 code = GET_CODE (x);
2214 if (REG_P (x))
2215 {
2216 if (reg_use_count == MAX_USES)
2217 return;
2218
2219 reg_use_table[reg_use_count].reg_rtx = x;
2220 reg_use_count++;
2221 }
2222
2223 /* Recursively scan the operands of this expression. */
2224
2225 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2226 {
2227 if (fmt[i] == 'e')
2228 {
2229 /* If we are about to do the last recursive call
2230 needed at this level, change it into iteration.
2231 This function is called enough to be worth it. */
2232 if (i == 0)
2233 {
2234 x = XEXP (x, 0);
2235 goto repeat;
2236 }
2237
2238 find_used_regs (&XEXP (x, i), data);
2239 }
2240 else if (fmt[i] == 'E')
2241 for (j = 0; j < XVECLEN (x, i); j++)
2242 find_used_regs (&XVECEXP (x, i, j), data);
2243 }
2244 }
2245
2246 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2247 Returns nonzero is successful. */
2248
2249 static int
2250 try_replace_reg (rtx from, rtx to, rtx insn)
2251 {
2252 rtx note = find_reg_equal_equiv_note (insn);
2253 rtx src = 0;
2254 int success = 0;
2255 rtx set = single_set (insn);
2256
2257 /* Usually we substitute easy stuff, so we won't copy everything.
2258 We however need to take care to not duplicate non-trivial CONST
2259 expressions. */
2260 to = copy_rtx (to);
2261
2262 validate_replace_src_group (from, to, insn);
2263 if (num_changes_pending () && apply_change_group ())
2264 success = 1;
2265
2266 /* Try to simplify SET_SRC if we have substituted a constant. */
2267 if (success && set && CONSTANT_P (to))
2268 {
2269 src = simplify_rtx (SET_SRC (set));
2270
2271 if (src)
2272 validate_change (insn, &SET_SRC (set), src, 0);
2273 }
2274
2275 /* If there is already a REG_EQUAL note, update the expression in it
2276 with our replacement. */
2277 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2278 set_unique_reg_note (insn, REG_EQUAL,
2279 simplify_replace_rtx (XEXP (note, 0), from,
2280 copy_rtx (to)));
2281 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2282 {
2283 /* If above failed and this is a single set, try to simplify the source of
2284 the set given our substitution. We could perhaps try this for multiple
2285 SETs, but it probably won't buy us anything. */
2286 src = simplify_replace_rtx (SET_SRC (set), from, to);
2287
2288 if (!rtx_equal_p (src, SET_SRC (set))
2289 && validate_change (insn, &SET_SRC (set), src, 0))
2290 success = 1;
2291
2292 /* If we've failed to do replacement, have a single SET, don't already
2293 have a note, and have no special SET, add a REG_EQUAL note to not
2294 lose information. */
2295 if (!success && note == 0 && set != 0
2296 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2297 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2298 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2299 }
2300
2301 /* REG_EQUAL may get simplified into register.
2302 We don't allow that. Remove that note. This code ought
2303 not to happen, because previous code ought to synthesize
2304 reg-reg move, but be on the safe side. */
2305 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2306 remove_note (insn, note);
2307
2308 return success;
2309 }
2310
2311 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2312 NULL no such set is found. */
2313
2314 static struct expr *
2315 find_avail_set (int regno, rtx insn)
2316 {
2317 /* SET1 contains the last set found that can be returned to the caller for
2318 use in a substitution. */
2319 struct expr *set1 = 0;
2320
2321 /* Loops are not possible here. To get a loop we would need two sets
2322 available at the start of the block containing INSN. i.e. we would
2323 need two sets like this available at the start of the block:
2324
2325 (set (reg X) (reg Y))
2326 (set (reg Y) (reg X))
2327
2328 This can not happen since the set of (reg Y) would have killed the
2329 set of (reg X) making it unavailable at the start of this block. */
2330 while (1)
2331 {
2332 rtx src;
2333 struct expr *set = lookup_set (regno, &set_hash_table);
2334
2335 /* Find a set that is available at the start of the block
2336 which contains INSN. */
2337 while (set)
2338 {
2339 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2340 break;
2341 set = next_set (regno, set);
2342 }
2343
2344 /* If no available set was found we've reached the end of the
2345 (possibly empty) copy chain. */
2346 if (set == 0)
2347 break;
2348
2349 gcc_assert (GET_CODE (set->expr) == SET);
2350
2351 src = SET_SRC (set->expr);
2352
2353 /* We know the set is available.
2354 Now check that SRC is ANTLOC (i.e. none of the source operands
2355 have changed since the start of the block).
2356
2357 If the source operand changed, we may still use it for the next
2358 iteration of this loop, but we may not use it for substitutions. */
2359
2360 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2361 set1 = set;
2362
2363 /* If the source of the set is anything except a register, then
2364 we have reached the end of the copy chain. */
2365 if (! REG_P (src))
2366 break;
2367
2368 /* Follow the copy chain, i.e. start another iteration of the loop
2369 and see if we have an available copy into SRC. */
2370 regno = REGNO (src);
2371 }
2372
2373 /* SET1 holds the last set that was available and anticipatable at
2374 INSN. */
2375 return set1;
2376 }
2377
2378 /* Subroutine of cprop_insn that tries to propagate constants into
2379 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2380 it is the instruction that immediately precedes JUMP, and must be a
2381 single SET of a register. FROM is what we will try to replace,
2382 SRC is the constant we will try to substitute for it. Returns nonzero
2383 if a change was made. */
2384
2385 static int
2386 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2387 {
2388 rtx new_rtx, set_src, note_src;
2389 rtx set = pc_set (jump);
2390 rtx note = find_reg_equal_equiv_note (jump);
2391
2392 if (note)
2393 {
2394 note_src = XEXP (note, 0);
2395 if (GET_CODE (note_src) == EXPR_LIST)
2396 note_src = NULL_RTX;
2397 }
2398 else note_src = NULL_RTX;
2399
2400 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2401 set_src = note_src ? note_src : SET_SRC (set);
2402
2403 /* First substitute the SETCC condition into the JUMP instruction,
2404 then substitute that given values into this expanded JUMP. */
2405 if (setcc != NULL_RTX
2406 && !modified_between_p (from, setcc, jump)
2407 && !modified_between_p (src, setcc, jump))
2408 {
2409 rtx setcc_src;
2410 rtx setcc_set = single_set (setcc);
2411 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2412 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2413 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2414 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2415 setcc_src);
2416 }
2417 else
2418 setcc = NULL_RTX;
2419
2420 new_rtx = simplify_replace_rtx (set_src, from, src);
2421
2422 /* If no simplification can be made, then try the next register. */
2423 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2424 return 0;
2425
2426 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2427 if (new_rtx == pc_rtx)
2428 delete_insn (jump);
2429 else
2430 {
2431 /* Ensure the value computed inside the jump insn to be equivalent
2432 to one computed by setcc. */
2433 if (setcc && modified_in_p (new_rtx, setcc))
2434 return 0;
2435 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2436 {
2437 /* When (some) constants are not valid in a comparison, and there
2438 are two registers to be replaced by constants before the entire
2439 comparison can be folded into a constant, we need to keep
2440 intermediate information in REG_EQUAL notes. For targets with
2441 separate compare insns, such notes are added by try_replace_reg.
2442 When we have a combined compare-and-branch instruction, however,
2443 we need to attach a note to the branch itself to make this
2444 optimization work. */
2445
2446 if (!rtx_equal_p (new_rtx, note_src))
2447 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2448 return 0;
2449 }
2450
2451 /* Remove REG_EQUAL note after simplification. */
2452 if (note_src)
2453 remove_note (jump, note);
2454 }
2455
2456 #ifdef HAVE_cc0
2457 /* Delete the cc0 setter. */
2458 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2459 delete_insn (setcc);
2460 #endif
2461
2462 global_const_prop_count++;
2463 if (dump_file != NULL)
2464 {
2465 fprintf (dump_file,
2466 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2467 REGNO (from), INSN_UID (jump));
2468 print_rtl (dump_file, src);
2469 fprintf (dump_file, "\n");
2470 }
2471 purge_dead_edges (bb);
2472
2473 /* If a conditional jump has been changed into unconditional jump, remove
2474 the jump and make the edge fallthru - this is always called in
2475 cfglayout mode. */
2476 if (new_rtx != pc_rtx && simplejump_p (jump))
2477 {
2478 edge e;
2479 edge_iterator ei;
2480
2481 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2482 if (e->dest != EXIT_BLOCK_PTR
2483 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2484 {
2485 e->flags |= EDGE_FALLTHRU;
2486 break;
2487 }
2488 delete_insn (jump);
2489 }
2490
2491 return 1;
2492 }
2493
2494 static bool
2495 constprop_register (rtx insn, rtx from, rtx to)
2496 {
2497 rtx sset;
2498
2499 /* Check for reg or cc0 setting instructions followed by
2500 conditional branch instructions first. */
2501 if ((sset = single_set (insn)) != NULL
2502 && NEXT_INSN (insn)
2503 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2504 {
2505 rtx dest = SET_DEST (sset);
2506 if ((REG_P (dest) || CC0_P (dest))
2507 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2508 return 1;
2509 }
2510
2511 /* Handle normal insns next. */
2512 if (NONJUMP_INSN_P (insn)
2513 && try_replace_reg (from, to, insn))
2514 return 1;
2515
2516 /* Try to propagate a CONST_INT into a conditional jump.
2517 We're pretty specific about what we will handle in this
2518 code, we can extend this as necessary over time.
2519
2520 Right now the insn in question must look like
2521 (set (pc) (if_then_else ...)) */
2522 else if (any_condjump_p (insn) && onlyjump_p (insn))
2523 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2524 return 0;
2525 }
2526
2527 /* Perform constant and copy propagation on INSN.
2528 The result is nonzero if a change was made. */
2529
2530 static int
2531 cprop_insn (rtx insn)
2532 {
2533 struct reg_use *reg_used;
2534 int changed = 0;
2535 rtx note;
2536
2537 if (!INSN_P (insn))
2538 return 0;
2539
2540 reg_use_count = 0;
2541 note_uses (&PATTERN (insn), find_used_regs, NULL);
2542
2543 note = find_reg_equal_equiv_note (insn);
2544
2545 /* We may win even when propagating constants into notes. */
2546 if (note)
2547 find_used_regs (&XEXP (note, 0), NULL);
2548
2549 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2550 reg_used++, reg_use_count--)
2551 {
2552 unsigned int regno = REGNO (reg_used->reg_rtx);
2553 rtx pat, src;
2554 struct expr *set;
2555
2556 /* If the register has already been set in this block, there's
2557 nothing we can do. */
2558 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2559 continue;
2560
2561 /* Find an assignment that sets reg_used and is available
2562 at the start of the block. */
2563 set = find_avail_set (regno, insn);
2564 if (! set)
2565 continue;
2566
2567 pat = set->expr;
2568 /* ??? We might be able to handle PARALLELs. Later. */
2569 gcc_assert (GET_CODE (pat) == SET);
2570
2571 src = SET_SRC (pat);
2572
2573 /* Constant propagation. */
2574 if (gcse_constant_p (src))
2575 {
2576 if (constprop_register (insn, reg_used->reg_rtx, src))
2577 {
2578 changed = 1;
2579 global_const_prop_count++;
2580 if (dump_file != NULL)
2581 {
2582 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2583 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2584 print_rtl (dump_file, src);
2585 fprintf (dump_file, "\n");
2586 }
2587 if (INSN_DELETED_P (insn))
2588 return 1;
2589 }
2590 }
2591 else if (REG_P (src)
2592 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2593 && REGNO (src) != regno)
2594 {
2595 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2596 {
2597 changed = 1;
2598 global_copy_prop_count++;
2599 if (dump_file != NULL)
2600 {
2601 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2602 regno, INSN_UID (insn));
2603 fprintf (dump_file, " with reg %d\n", REGNO (src));
2604 }
2605
2606 /* The original insn setting reg_used may or may not now be
2607 deletable. We leave the deletion to flow. */
2608 /* FIXME: If it turns out that the insn isn't deletable,
2609 then we may have unnecessarily extended register lifetimes
2610 and made things worse. */
2611 }
2612 }
2613 }
2614
2615 if (changed && DEBUG_INSN_P (insn))
2616 return 0;
2617
2618 return changed;
2619 }
2620
2621 /* Like find_used_regs, but avoid recording uses that appear in
2622 input-output contexts such as zero_extract or pre_dec. This
2623 restricts the cases we consider to those for which local cprop
2624 can legitimately make replacements. */
2625
2626 static void
2627 local_cprop_find_used_regs (rtx *xptr, void *data)
2628 {
2629 rtx x = *xptr;
2630
2631 if (x == 0)
2632 return;
2633
2634 switch (GET_CODE (x))
2635 {
2636 case ZERO_EXTRACT:
2637 case SIGN_EXTRACT:
2638 case STRICT_LOW_PART:
2639 return;
2640
2641 case PRE_DEC:
2642 case PRE_INC:
2643 case POST_DEC:
2644 case POST_INC:
2645 case PRE_MODIFY:
2646 case POST_MODIFY:
2647 /* Can only legitimately appear this early in the context of
2648 stack pushes for function arguments, but handle all of the
2649 codes nonetheless. */
2650 return;
2651
2652 case SUBREG:
2653 /* Setting a subreg of a register larger than word_mode leaves
2654 the non-written words unchanged. */
2655 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2656 return;
2657 break;
2658
2659 default:
2660 break;
2661 }
2662
2663 find_used_regs (xptr, data);
2664 }
2665
2666 /* Try to perform local const/copy propagation on X in INSN. */
2667
2668 static bool
2669 do_local_cprop (rtx x, rtx insn)
2670 {
2671 rtx newreg = NULL, newcnst = NULL;
2672
2673 /* Rule out USE instructions and ASM statements as we don't want to
2674 change the hard registers mentioned. */
2675 if (REG_P (x)
2676 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2677 || (GET_CODE (PATTERN (insn)) != USE
2678 && asm_noperands (PATTERN (insn)) < 0)))
2679 {
2680 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
2681 struct elt_loc_list *l;
2682
2683 if (!val)
2684 return false;
2685 for (l = val->locs; l; l = l->next)
2686 {
2687 rtx this_rtx = l->loc;
2688 rtx note;
2689
2690 if (gcse_constant_p (this_rtx))
2691 newcnst = this_rtx;
2692 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2693 /* Don't copy propagate if it has attached REG_EQUIV note.
2694 At this point this only function parameters should have
2695 REG_EQUIV notes and if the argument slot is used somewhere
2696 explicitly, it means address of parameter has been taken,
2697 so we should not extend the lifetime of the pseudo. */
2698 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2699 || ! MEM_P (XEXP (note, 0))))
2700 newreg = this_rtx;
2701 }
2702 if (newcnst && constprop_register (insn, x, newcnst))
2703 {
2704 if (dump_file != NULL)
2705 {
2706 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2707 REGNO (x));
2708 fprintf (dump_file, "insn %d with constant ",
2709 INSN_UID (insn));
2710 print_rtl (dump_file, newcnst);
2711 fprintf (dump_file, "\n");
2712 }
2713 local_const_prop_count++;
2714 return true;
2715 }
2716 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2717 {
2718 if (dump_file != NULL)
2719 {
2720 fprintf (dump_file,
2721 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2722 REGNO (x), INSN_UID (insn));
2723 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2724 }
2725 local_copy_prop_count++;
2726 return true;
2727 }
2728 }
2729 return false;
2730 }
2731
2732 /* Do local const/copy propagation (i.e. within each basic block). */
2733
2734 static int
2735 local_cprop_pass (void)
2736 {
2737 basic_block bb;
2738 rtx insn;
2739 struct reg_use *reg_used;
2740 bool changed = false;
2741
2742 cselib_init (false);
2743 FOR_EACH_BB (bb)
2744 {
2745 FOR_BB_INSNS (bb, insn)
2746 {
2747 if (INSN_P (insn))
2748 {
2749 rtx note = find_reg_equal_equiv_note (insn);
2750 do
2751 {
2752 reg_use_count = 0;
2753 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2754 NULL);
2755 if (note)
2756 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2757
2758 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2759 reg_used++, reg_use_count--)
2760 {
2761 if (do_local_cprop (reg_used->reg_rtx, insn))
2762 {
2763 changed = true;
2764 break;
2765 }
2766 }
2767 if (INSN_DELETED_P (insn))
2768 break;
2769 }
2770 while (reg_use_count);
2771 }
2772 cselib_process_insn (insn);
2773 }
2774
2775 /* Forget everything at the end of a basic block. */
2776 cselib_clear_table ();
2777 }
2778
2779 cselib_finish ();
2780
2781 return changed;
2782 }
2783
2784 /* Similar to get_condition, only the resulting condition must be
2785 valid at JUMP, instead of at EARLIEST.
2786
2787 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2788 settle for the condition variable in the jump instruction being integral.
2789 We prefer to be able to record the value of a user variable, rather than
2790 the value of a temporary used in a condition. This could be solved by
2791 recording the value of *every* register scanned by canonicalize_condition,
2792 but this would require some code reorganization. */
2793
2794 rtx
2795 fis_get_condition (rtx jump)
2796 {
2797 return get_condition (jump, NULL, false, true);
2798 }
2799
2800 /* Check the comparison COND to see if we can safely form an implicit set from
2801 it. COND is either an EQ or NE comparison. */
2802
2803 static bool
2804 implicit_set_cond_p (const_rtx cond)
2805 {
2806 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2807 const_rtx cst = XEXP (cond, 1);
2808
2809 /* We can't perform this optimization if either operand might be or might
2810 contain a signed zero. */
2811 if (HONOR_SIGNED_ZEROS (mode))
2812 {
2813 /* It is sufficient to check if CST is or contains a zero. We must
2814 handle float, complex, and vector. If any subpart is a zero, then
2815 the optimization can't be performed. */
2816 /* ??? The complex and vector checks are not implemented yet. We just
2817 always return zero for them. */
2818 if (GET_CODE (cst) == CONST_DOUBLE)
2819 {
2820 REAL_VALUE_TYPE d;
2821 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2822 if (REAL_VALUES_EQUAL (d, dconst0))
2823 return 0;
2824 }
2825 else
2826 return 0;
2827 }
2828
2829 return gcse_constant_p (cst);
2830 }
2831
2832 /* Find the implicit sets of a function. An "implicit set" is a constraint
2833 on the value of a variable, implied by a conditional jump. For example,
2834 following "if (x == 2)", the then branch may be optimized as though the
2835 conditional performed an "explicit set", in this example, "x = 2". This
2836 function records the set patterns that are implicit at the start of each
2837 basic block.
2838
2839 FIXME: This would be more effective if critical edges are pre-split. As
2840 it is now, we can't record implicit sets for blocks that have
2841 critical successor edges. This results in missed optimizations
2842 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2843
2844 static void
2845 find_implicit_sets (void)
2846 {
2847 basic_block bb, dest;
2848 unsigned int count;
2849 rtx cond, new_rtx;
2850
2851 count = 0;
2852 FOR_EACH_BB (bb)
2853 /* Check for more than one successor. */
2854 if (EDGE_COUNT (bb->succs) > 1)
2855 {
2856 cond = fis_get_condition (BB_END (bb));
2857
2858 if (cond
2859 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2860 && REG_P (XEXP (cond, 0))
2861 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2862 && implicit_set_cond_p (cond))
2863 {
2864 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2865 : FALLTHRU_EDGE (bb)->dest;
2866
2867 if (dest
2868 /* Record nothing for a critical edge. */
2869 && single_pred_p (dest)
2870 && dest != EXIT_BLOCK_PTR)
2871 {
2872 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2873 XEXP (cond, 1));
2874 implicit_sets[dest->index] = new_rtx;
2875 if (dump_file)
2876 {
2877 fprintf(dump_file, "Implicit set of reg %d in ",
2878 REGNO (XEXP (cond, 0)));
2879 fprintf(dump_file, "basic block %d\n", dest->index);
2880 }
2881 count++;
2882 }
2883 }
2884 }
2885
2886 if (dump_file)
2887 fprintf (dump_file, "Found %d implicit sets\n", count);
2888 }
2889
2890 /* Bypass conditional jumps. */
2891
2892 /* The value of last_basic_block at the beginning of the jump_bypass
2893 pass. The use of redirect_edge_and_branch_force may introduce new
2894 basic blocks, but the data flow analysis is only valid for basic
2895 block indices less than bypass_last_basic_block. */
2896
2897 static int bypass_last_basic_block;
2898
2899 /* Find a set of REGNO to a constant that is available at the end of basic
2900 block BB. Returns NULL if no such set is found. Based heavily upon
2901 find_avail_set. */
2902
2903 static struct expr *
2904 find_bypass_set (int regno, int bb)
2905 {
2906 struct expr *result = 0;
2907
2908 for (;;)
2909 {
2910 rtx src;
2911 struct expr *set = lookup_set (regno, &set_hash_table);
2912
2913 while (set)
2914 {
2915 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2916 break;
2917 set = next_set (regno, set);
2918 }
2919
2920 if (set == 0)
2921 break;
2922
2923 gcc_assert (GET_CODE (set->expr) == SET);
2924
2925 src = SET_SRC (set->expr);
2926 if (gcse_constant_p (src))
2927 result = set;
2928
2929 if (! REG_P (src))
2930 break;
2931
2932 regno = REGNO (src);
2933 }
2934 return result;
2935 }
2936
2937
2938 /* Subroutine of bypass_block that checks whether a pseudo is killed by
2939 any of the instructions inserted on an edge. Jump bypassing places
2940 condition code setters on CFG edges using insert_insn_on_edge. This
2941 function is required to check that our data flow analysis is still
2942 valid prior to commit_edge_insertions. */
2943
2944 static bool
2945 reg_killed_on_edge (const_rtx reg, const_edge e)
2946 {
2947 rtx insn;
2948
2949 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
2950 if (INSN_P (insn) && reg_set_p (reg, insn))
2951 return true;
2952
2953 return false;
2954 }
2955
2956 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
2957 basic block BB which has more than one predecessor. If not NULL, SETCC
2958 is the first instruction of BB, which is immediately followed by JUMP_INSN
2959 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
2960 Returns nonzero if a change was made.
2961
2962 During the jump bypassing pass, we may place copies of SETCC instructions
2963 on CFG edges. The following routine must be careful to pay attention to
2964 these inserted insns when performing its transformations. */
2965
2966 static int
2967 bypass_block (basic_block bb, rtx setcc, rtx jump)
2968 {
2969 rtx insn, note;
2970 edge e, edest;
2971 int i, change;
2972 int may_be_loop_header;
2973 unsigned removed_p;
2974 edge_iterator ei;
2975
2976 insn = (setcc != NULL) ? setcc : jump;
2977
2978 /* Determine set of register uses in INSN. */
2979 reg_use_count = 0;
2980 note_uses (&PATTERN (insn), find_used_regs, NULL);
2981 note = find_reg_equal_equiv_note (insn);
2982 if (note)
2983 find_used_regs (&XEXP (note, 0), NULL);
2984
2985 may_be_loop_header = false;
2986 FOR_EACH_EDGE (e, ei, bb->preds)
2987 if (e->flags & EDGE_DFS_BACK)
2988 {
2989 may_be_loop_header = true;
2990 break;
2991 }
2992
2993 change = 0;
2994 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
2995 {
2996 removed_p = 0;
2997
2998 if (e->flags & EDGE_COMPLEX)
2999 {
3000 ei_next (&ei);
3001 continue;
3002 }
3003
3004 /* We can't redirect edges from new basic blocks. */
3005 if (e->src->index >= bypass_last_basic_block)
3006 {
3007 ei_next (&ei);
3008 continue;
3009 }
3010
3011 /* The irreducible loops created by redirecting of edges entering the
3012 loop from outside would decrease effectiveness of some of the following
3013 optimizations, so prevent this. */
3014 if (may_be_loop_header
3015 && !(e->flags & EDGE_DFS_BACK))
3016 {
3017 ei_next (&ei);
3018 continue;
3019 }
3020
3021 for (i = 0; i < reg_use_count; i++)
3022 {
3023 struct reg_use *reg_used = &reg_use_table[i];
3024 unsigned int regno = REGNO (reg_used->reg_rtx);
3025 basic_block dest, old_dest;
3026 struct expr *set;
3027 rtx src, new_rtx;
3028
3029 set = find_bypass_set (regno, e->src->index);
3030
3031 if (! set)
3032 continue;
3033
3034 /* Check the data flow is valid after edge insertions. */
3035 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3036 continue;
3037
3038 src = SET_SRC (pc_set (jump));
3039
3040 if (setcc != NULL)
3041 src = simplify_replace_rtx (src,
3042 SET_DEST (PATTERN (setcc)),
3043 SET_SRC (PATTERN (setcc)));
3044
3045 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3046 SET_SRC (set->expr));
3047
3048 /* Jump bypassing may have already placed instructions on
3049 edges of the CFG. We can't bypass an outgoing edge that
3050 has instructions associated with it, as these insns won't
3051 get executed if the incoming edge is redirected. */
3052
3053 if (new_rtx == pc_rtx)
3054 {
3055 edest = FALLTHRU_EDGE (bb);
3056 dest = edest->insns.r ? NULL : edest->dest;
3057 }
3058 else if (GET_CODE (new_rtx) == LABEL_REF)
3059 {
3060 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3061 /* Don't bypass edges containing instructions. */
3062 edest = find_edge (bb, dest);
3063 if (edest && edest->insns.r)
3064 dest = NULL;
3065 }
3066 else
3067 dest = NULL;
3068
3069 /* Avoid unification of the edge with other edges from original
3070 branch. We would end up emitting the instruction on "both"
3071 edges. */
3072
3073 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3074 && find_edge (e->src, dest))
3075 dest = NULL;
3076
3077 old_dest = e->dest;
3078 if (dest != NULL
3079 && dest != old_dest
3080 && dest != EXIT_BLOCK_PTR)
3081 {
3082 redirect_edge_and_branch_force (e, dest);
3083
3084 /* Copy the register setter to the redirected edge.
3085 Don't copy CC0 setters, as CC0 is dead after jump. */
3086 if (setcc)
3087 {
3088 rtx pat = PATTERN (setcc);
3089 if (!CC0_P (SET_DEST (pat)))
3090 insert_insn_on_edge (copy_insn (pat), e);
3091 }
3092
3093 if (dump_file != NULL)
3094 {
3095 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3096 "in jump_insn %d equals constant ",
3097 regno, INSN_UID (jump));
3098 print_rtl (dump_file, SET_SRC (set->expr));
3099 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3100 e->src->index, old_dest->index, dest->index);
3101 }
3102 change = 1;
3103 removed_p = 1;
3104 break;
3105 }
3106 }
3107 if (!removed_p)
3108 ei_next (&ei);
3109 }
3110 return change;
3111 }
3112
3113 /* Find basic blocks with more than one predecessor that only contain a
3114 single conditional jump. If the result of the comparison is known at
3115 compile-time from any incoming edge, redirect that edge to the
3116 appropriate target. Returns nonzero if a change was made.
3117
3118 This function is now mis-named, because we also handle indirect jumps. */
3119
3120 static int
3121 bypass_conditional_jumps (void)
3122 {
3123 basic_block bb;
3124 int changed;
3125 rtx setcc;
3126 rtx insn;
3127 rtx dest;
3128
3129 /* Note we start at block 1. */
3130 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3131 return 0;
3132
3133 bypass_last_basic_block = last_basic_block;
3134 mark_dfs_back_edges ();
3135
3136 changed = 0;
3137 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3138 EXIT_BLOCK_PTR, next_bb)
3139 {
3140 /* Check for more than one predecessor. */
3141 if (!single_pred_p (bb))
3142 {
3143 setcc = NULL_RTX;
3144 FOR_BB_INSNS (bb, insn)
3145 if (DEBUG_INSN_P (insn))
3146 continue;
3147 else if (NONJUMP_INSN_P (insn))
3148 {
3149 if (setcc)
3150 break;
3151 if (GET_CODE (PATTERN (insn)) != SET)
3152 break;
3153
3154 dest = SET_DEST (PATTERN (insn));
3155 if (REG_P (dest) || CC0_P (dest))
3156 setcc = insn;
3157 else
3158 break;
3159 }
3160 else if (JUMP_P (insn))
3161 {
3162 if ((any_condjump_p (insn) || computed_jump_p (insn))
3163 && onlyjump_p (insn))
3164 changed |= bypass_block (bb, setcc, insn);
3165 break;
3166 }
3167 else if (INSN_P (insn))
3168 break;
3169 }
3170 }
3171
3172 /* If we bypassed any register setting insns, we inserted a
3173 copy on the redirected edge. These need to be committed. */
3174 if (changed)
3175 commit_edge_insertions ();
3176
3177 return changed;
3178 }
3179 \f
3180 /* Compute PRE+LCM working variables. */
3181
3182 /* Local properties of expressions. */
3183 /* Nonzero for expressions that are transparent in the block. */
3184 static sbitmap *transp;
3185
3186 /* Nonzero for expressions that are transparent at the end of the block.
3187 This is only zero for expressions killed by abnormal critical edge
3188 created by a calls. */
3189 static sbitmap *transpout;
3190
3191 /* Nonzero for expressions that are computed (available) in the block. */
3192 static sbitmap *comp;
3193
3194 /* Nonzero for expressions that are locally anticipatable in the block. */
3195 static sbitmap *antloc;
3196
3197 /* Nonzero for expressions where this block is an optimal computation
3198 point. */
3199 static sbitmap *pre_optimal;
3200
3201 /* Nonzero for expressions which are redundant in a particular block. */
3202 static sbitmap *pre_redundant;
3203
3204 /* Nonzero for expressions which should be inserted on a specific edge. */
3205 static sbitmap *pre_insert_map;
3206
3207 /* Nonzero for expressions which should be deleted in a specific block. */
3208 static sbitmap *pre_delete_map;
3209
3210 /* Contains the edge_list returned by pre_edge_lcm. */
3211 static struct edge_list *edge_list;
3212
3213 /* Allocate vars used for PRE analysis. */
3214
3215 static void
3216 alloc_pre_mem (int n_blocks, int n_exprs)
3217 {
3218 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3219 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3220 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3221
3222 pre_optimal = NULL;
3223 pre_redundant = NULL;
3224 pre_insert_map = NULL;
3225 pre_delete_map = NULL;
3226 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3227
3228 /* pre_insert and pre_delete are allocated later. */
3229 }
3230
3231 /* Free vars used for PRE analysis. */
3232
3233 static void
3234 free_pre_mem (void)
3235 {
3236 sbitmap_vector_free (transp);
3237 sbitmap_vector_free (comp);
3238
3239 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3240
3241 if (pre_optimal)
3242 sbitmap_vector_free (pre_optimal);
3243 if (pre_redundant)
3244 sbitmap_vector_free (pre_redundant);
3245 if (pre_insert_map)
3246 sbitmap_vector_free (pre_insert_map);
3247 if (pre_delete_map)
3248 sbitmap_vector_free (pre_delete_map);
3249
3250 transp = comp = NULL;
3251 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3252 }
3253
3254 /* Top level routine to do the dataflow analysis needed by PRE. */
3255
3256 static void
3257 compute_pre_data (void)
3258 {
3259 sbitmap trapping_expr;
3260 basic_block bb;
3261 unsigned int ui;
3262
3263 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3264 sbitmap_vector_zero (ae_kill, last_basic_block);
3265
3266 /* Collect expressions which might trap. */
3267 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3268 sbitmap_zero (trapping_expr);
3269 for (ui = 0; ui < expr_hash_table.size; ui++)
3270 {
3271 struct expr *e;
3272 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3273 if (may_trap_p (e->expr))
3274 SET_BIT (trapping_expr, e->bitmap_index);
3275 }
3276
3277 /* Compute ae_kill for each basic block using:
3278
3279 ~(TRANSP | COMP)
3280 */
3281
3282 FOR_EACH_BB (bb)
3283 {
3284 edge e;
3285 edge_iterator ei;
3286
3287 /* If the current block is the destination of an abnormal edge, we
3288 kill all trapping expressions because we won't be able to properly
3289 place the instruction on the edge. So make them neither
3290 anticipatable nor transparent. This is fairly conservative. */
3291 FOR_EACH_EDGE (e, ei, bb->preds)
3292 if (e->flags & EDGE_ABNORMAL)
3293 {
3294 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3295 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3296 break;
3297 }
3298
3299 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3300 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3301 }
3302
3303 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3304 ae_kill, &pre_insert_map, &pre_delete_map);
3305 sbitmap_vector_free (antloc);
3306 antloc = NULL;
3307 sbitmap_vector_free (ae_kill);
3308 ae_kill = NULL;
3309 sbitmap_free (trapping_expr);
3310 }
3311 \f
3312 /* PRE utilities */
3313
3314 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3315 block BB.
3316
3317 VISITED is a pointer to a working buffer for tracking which BB's have
3318 been visited. It is NULL for the top-level call.
3319
3320 We treat reaching expressions that go through blocks containing the same
3321 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3322 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3323 2 as not reaching. The intent is to improve the probability of finding
3324 only one reaching expression and to reduce register lifetimes by picking
3325 the closest such expression. */
3326
3327 static int
3328 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3329 {
3330 edge pred;
3331 edge_iterator ei;
3332
3333 FOR_EACH_EDGE (pred, ei, bb->preds)
3334 {
3335 basic_block pred_bb = pred->src;
3336
3337 if (pred->src == ENTRY_BLOCK_PTR
3338 /* Has predecessor has already been visited? */
3339 || visited[pred_bb->index])
3340 ;/* Nothing to do. */
3341
3342 /* Does this predecessor generate this expression? */
3343 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3344 {
3345 /* Is this the occurrence we're looking for?
3346 Note that there's only one generating occurrence per block
3347 so we just need to check the block number. */
3348 if (occr_bb == pred_bb)
3349 return 1;
3350
3351 visited[pred_bb->index] = 1;
3352 }
3353 /* Ignore this predecessor if it kills the expression. */
3354 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3355 visited[pred_bb->index] = 1;
3356
3357 /* Neither gen nor kill. */
3358 else
3359 {
3360 visited[pred_bb->index] = 1;
3361 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3362 return 1;
3363 }
3364 }
3365
3366 /* All paths have been checked. */
3367 return 0;
3368 }
3369
3370 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3371 memory allocated for that function is returned. */
3372
3373 static int
3374 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3375 {
3376 int rval;
3377 char *visited = XCNEWVEC (char, last_basic_block);
3378
3379 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3380
3381 free (visited);
3382 return rval;
3383 }
3384 \f
3385
3386 /* Given an expr, generate RTL which we can insert at the end of a BB,
3387 or on an edge. Set the block number of any insns generated to
3388 the value of BB. */
3389
3390 static rtx
3391 process_insert_insn (struct expr *expr)
3392 {
3393 rtx reg = expr->reaching_reg;
3394 rtx exp = copy_rtx (expr->expr);
3395 rtx pat;
3396
3397 start_sequence ();
3398
3399 /* If the expression is something that's an operand, like a constant,
3400 just copy it to a register. */
3401 if (general_operand (exp, GET_MODE (reg)))
3402 emit_move_insn (reg, exp);
3403
3404 /* Otherwise, make a new insn to compute this expression and make sure the
3405 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3406 expression to make sure we don't have any sharing issues. */
3407 else
3408 {
3409 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3410
3411 if (insn_invalid_p (insn))
3412 gcc_unreachable ();
3413 }
3414
3415
3416 pat = get_insns ();
3417 end_sequence ();
3418
3419 return pat;
3420 }
3421
3422 /* Add EXPR to the end of basic block BB.
3423
3424 This is used by both the PRE and code hoisting.
3425
3426 For PRE, we want to verify that the expr is either transparent
3427 or locally anticipatable in the target block. This check makes
3428 no sense for code hoisting. */
3429
3430 static void
3431 insert_insn_end_basic_block (struct expr *expr, basic_block bb, int pre)
3432 {
3433 rtx insn = BB_END (bb);
3434 rtx new_insn;
3435 rtx reg = expr->reaching_reg;
3436 int regno = REGNO (reg);
3437 rtx pat, pat_end;
3438
3439 pat = process_insert_insn (expr);
3440 gcc_assert (pat && INSN_P (pat));
3441
3442 pat_end = pat;
3443 while (NEXT_INSN (pat_end) != NULL_RTX)
3444 pat_end = NEXT_INSN (pat_end);
3445
3446 /* If the last insn is a jump, insert EXPR in front [taking care to
3447 handle cc0, etc. properly]. Similarly we need to care trapping
3448 instructions in presence of non-call exceptions. */
3449
3450 if (JUMP_P (insn)
3451 || (NONJUMP_INSN_P (insn)
3452 && (!single_succ_p (bb)
3453 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3454 {
3455 #ifdef HAVE_cc0
3456 rtx note;
3457 #endif
3458 /* It should always be the case that we can put these instructions
3459 anywhere in the basic block with performing PRE optimizations.
3460 Check this. */
3461 gcc_assert (!NONJUMP_INSN_P (insn) || !pre
3462 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3463 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3464
3465 /* If this is a jump table, then we can't insert stuff here. Since
3466 we know the previous real insn must be the tablejump, we insert
3467 the new instruction just before the tablejump. */
3468 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3469 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3470 insn = prev_real_insn (insn);
3471
3472 #ifdef HAVE_cc0
3473 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3474 if cc0 isn't set. */
3475 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3476 if (note)
3477 insn = XEXP (note, 0);
3478 else
3479 {
3480 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3481 if (maybe_cc0_setter
3482 && INSN_P (maybe_cc0_setter)
3483 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3484 insn = maybe_cc0_setter;
3485 }
3486 #endif
3487 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3488 new_insn = emit_insn_before_noloc (pat, insn, bb);
3489 }
3490
3491 /* Likewise if the last insn is a call, as will happen in the presence
3492 of exception handling. */
3493 else if (CALL_P (insn)
3494 && (!single_succ_p (bb)
3495 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3496 {
3497 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
3498 we search backward and place the instructions before the first
3499 parameter is loaded. Do this for everyone for consistency and a
3500 presumption that we'll get better code elsewhere as well.
3501
3502 It should always be the case that we can put these instructions
3503 anywhere in the basic block with performing PRE optimizations.
3504 Check this. */
3505
3506 gcc_assert (!pre
3507 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3508 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3509
3510 /* Since different machines initialize their parameter registers
3511 in different orders, assume nothing. Collect the set of all
3512 parameter registers. */
3513 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3514
3515 /* If we found all the parameter loads, then we want to insert
3516 before the first parameter load.
3517
3518 If we did not find all the parameter loads, then we might have
3519 stopped on the head of the block, which could be a CODE_LABEL.
3520 If we inserted before the CODE_LABEL, then we would be putting
3521 the insn in the wrong basic block. In that case, put the insn
3522 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3523 while (LABEL_P (insn)
3524 || NOTE_INSN_BASIC_BLOCK_P (insn))
3525 insn = NEXT_INSN (insn);
3526
3527 new_insn = emit_insn_before_noloc (pat, insn, bb);
3528 }
3529 else
3530 new_insn = emit_insn_after_noloc (pat, insn, bb);
3531
3532 while (1)
3533 {
3534 if (INSN_P (pat))
3535 add_label_notes (PATTERN (pat), new_insn);
3536 if (pat == pat_end)
3537 break;
3538 pat = NEXT_INSN (pat);
3539 }
3540
3541 gcse_create_count++;
3542
3543 if (dump_file)
3544 {
3545 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3546 bb->index, INSN_UID (new_insn));
3547 fprintf (dump_file, "copying expression %d to reg %d\n",
3548 expr->bitmap_index, regno);
3549 }
3550 }
3551
3552 /* Insert partially redundant expressions on edges in the CFG to make
3553 the expressions fully redundant. */
3554
3555 static int
3556 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3557 {
3558 int e, i, j, num_edges, set_size, did_insert = 0;
3559 sbitmap *inserted;
3560
3561 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3562 if it reaches any of the deleted expressions. */
3563
3564 set_size = pre_insert_map[0]->size;
3565 num_edges = NUM_EDGES (edge_list);
3566 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3567 sbitmap_vector_zero (inserted, num_edges);
3568
3569 for (e = 0; e < num_edges; e++)
3570 {
3571 int indx;
3572 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3573
3574 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3575 {
3576 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3577
3578 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3579 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3580 {
3581 struct expr *expr = index_map[j];
3582 struct occr *occr;
3583
3584 /* Now look at each deleted occurrence of this expression. */
3585 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3586 {
3587 if (! occr->deleted_p)
3588 continue;
3589
3590 /* Insert this expression on this edge if it would
3591 reach the deleted occurrence in BB. */
3592 if (!TEST_BIT (inserted[e], j))
3593 {
3594 rtx insn;
3595 edge eg = INDEX_EDGE (edge_list, e);
3596
3597 /* We can't insert anything on an abnormal and
3598 critical edge, so we insert the insn at the end of
3599 the previous block. There are several alternatives
3600 detailed in Morgans book P277 (sec 10.5) for
3601 handling this situation. This one is easiest for
3602 now. */
3603
3604 if (eg->flags & EDGE_ABNORMAL)
3605 insert_insn_end_basic_block (index_map[j], bb, 0);
3606 else
3607 {
3608 insn = process_insert_insn (index_map[j]);
3609 insert_insn_on_edge (insn, eg);
3610 }
3611
3612 if (dump_file)
3613 {
3614 fprintf (dump_file, "PRE: edge (%d,%d), ",
3615 bb->index,
3616 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3617 fprintf (dump_file, "copy expression %d\n",
3618 expr->bitmap_index);
3619 }
3620
3621 update_ld_motion_stores (expr);
3622 SET_BIT (inserted[e], j);
3623 did_insert = 1;
3624 gcse_create_count++;
3625 }
3626 }
3627 }
3628 }
3629 }
3630
3631 sbitmap_vector_free (inserted);
3632 return did_insert;
3633 }
3634
3635 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3636 Given "old_reg <- expr" (INSN), instead of adding after it
3637 reaching_reg <- old_reg
3638 it's better to do the following:
3639 reaching_reg <- expr
3640 old_reg <- reaching_reg
3641 because this way copy propagation can discover additional PRE
3642 opportunities. But if this fails, we try the old way.
3643 When "expr" is a store, i.e.
3644 given "MEM <- old_reg", instead of adding after it
3645 reaching_reg <- old_reg
3646 it's better to add it before as follows:
3647 reaching_reg <- old_reg
3648 MEM <- reaching_reg. */
3649
3650 static void
3651 pre_insert_copy_insn (struct expr *expr, rtx insn)
3652 {
3653 rtx reg = expr->reaching_reg;
3654 int regno = REGNO (reg);
3655 int indx = expr->bitmap_index;
3656 rtx pat = PATTERN (insn);
3657 rtx set, first_set, new_insn;
3658 rtx old_reg;
3659 int i;
3660
3661 /* This block matches the logic in hash_scan_insn. */
3662 switch (GET_CODE (pat))
3663 {
3664 case SET:
3665 set = pat;
3666 break;
3667
3668 case PARALLEL:
3669 /* Search through the parallel looking for the set whose
3670 source was the expression that we're interested in. */
3671 first_set = NULL_RTX;
3672 set = NULL_RTX;
3673 for (i = 0; i < XVECLEN (pat, 0); i++)
3674 {
3675 rtx x = XVECEXP (pat, 0, i);
3676 if (GET_CODE (x) == SET)
3677 {
3678 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3679 may not find an equivalent expression, but in this
3680 case the PARALLEL will have a single set. */
3681 if (first_set == NULL_RTX)
3682 first_set = x;
3683 if (expr_equiv_p (SET_SRC (x), expr->expr))
3684 {
3685 set = x;
3686 break;
3687 }
3688 }
3689 }
3690
3691 gcc_assert (first_set);
3692 if (set == NULL_RTX)
3693 set = first_set;
3694 break;
3695
3696 default:
3697 gcc_unreachable ();
3698 }
3699
3700 if (REG_P (SET_DEST (set)))
3701 {
3702 old_reg = SET_DEST (set);
3703 /* Check if we can modify the set destination in the original insn. */
3704 if (validate_change (insn, &SET_DEST (set), reg, 0))
3705 {
3706 new_insn = gen_move_insn (old_reg, reg);
3707 new_insn = emit_insn_after (new_insn, insn);
3708 }
3709 else
3710 {
3711 new_insn = gen_move_insn (reg, old_reg);
3712 new_insn = emit_insn_after (new_insn, insn);
3713 }
3714 }
3715 else /* This is possible only in case of a store to memory. */
3716 {
3717 old_reg = SET_SRC (set);
3718 new_insn = gen_move_insn (reg, old_reg);
3719
3720 /* Check if we can modify the set source in the original insn. */
3721 if (validate_change (insn, &SET_SRC (set), reg, 0))
3722 new_insn = emit_insn_before (new_insn, insn);
3723 else
3724 new_insn = emit_insn_after (new_insn, insn);
3725 }
3726
3727 gcse_create_count++;
3728
3729 if (dump_file)
3730 fprintf (dump_file,
3731 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3732 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
3733 INSN_UID (insn), regno);
3734 }
3735
3736 /* Copy available expressions that reach the redundant expression
3737 to `reaching_reg'. */
3738
3739 static void
3740 pre_insert_copies (void)
3741 {
3742 unsigned int i, added_copy;
3743 struct expr *expr;
3744 struct occr *occr;
3745 struct occr *avail;
3746
3747 /* For each available expression in the table, copy the result to
3748 `reaching_reg' if the expression reaches a deleted one.
3749
3750 ??? The current algorithm is rather brute force.
3751 Need to do some profiling. */
3752
3753 for (i = 0; i < expr_hash_table.size; i++)
3754 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3755 {
3756 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3757 we don't want to insert a copy here because the expression may not
3758 really be redundant. So only insert an insn if the expression was
3759 deleted. This test also avoids further processing if the
3760 expression wasn't deleted anywhere. */
3761 if (expr->reaching_reg == NULL)
3762 continue;
3763
3764 /* Set when we add a copy for that expression. */
3765 added_copy = 0;
3766
3767 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3768 {
3769 if (! occr->deleted_p)
3770 continue;
3771
3772 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3773 {
3774 rtx insn = avail->insn;
3775
3776 /* No need to handle this one if handled already. */
3777 if (avail->copied_p)
3778 continue;
3779
3780 /* Don't handle this one if it's a redundant one. */
3781 if (INSN_DELETED_P (insn))
3782 continue;
3783
3784 /* Or if the expression doesn't reach the deleted one. */
3785 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3786 expr,
3787 BLOCK_FOR_INSN (occr->insn)))
3788 continue;
3789
3790 added_copy = 1;
3791
3792 /* Copy the result of avail to reaching_reg. */
3793 pre_insert_copy_insn (expr, insn);
3794 avail->copied_p = 1;
3795 }
3796 }
3797
3798 if (added_copy)
3799 update_ld_motion_stores (expr);
3800 }
3801 }
3802
3803 /* Emit move from SRC to DEST noting the equivalence with expression computed
3804 in INSN. */
3805 static rtx
3806 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3807 {
3808 rtx new_rtx;
3809 rtx set = single_set (insn), set2;
3810 rtx note;
3811 rtx eqv;
3812
3813 /* This should never fail since we're creating a reg->reg copy
3814 we've verified to be valid. */
3815
3816 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3817
3818 /* Note the equivalence for local CSE pass. */
3819 set2 = single_set (new_rtx);
3820 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3821 return new_rtx;
3822 if ((note = find_reg_equal_equiv_note (insn)))
3823 eqv = XEXP (note, 0);
3824 else
3825 eqv = SET_SRC (set);
3826
3827 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
3828
3829 return new_rtx;
3830 }
3831
3832 /* Delete redundant computations.
3833 Deletion is done by changing the insn to copy the `reaching_reg' of
3834 the expression into the result of the SET. It is left to later passes
3835 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3836
3837 Returns nonzero if a change is made. */
3838
3839 static int
3840 pre_delete (void)
3841 {
3842 unsigned int i;
3843 int changed;
3844 struct expr *expr;
3845 struct occr *occr;
3846
3847 changed = 0;
3848 for (i = 0; i < expr_hash_table.size; i++)
3849 for (expr = expr_hash_table.table[i];
3850 expr != NULL;
3851 expr = expr->next_same_hash)
3852 {
3853 int indx = expr->bitmap_index;
3854
3855 /* We only need to search antic_occr since we require
3856 ANTLOC != 0. */
3857
3858 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3859 {
3860 rtx insn = occr->insn;
3861 rtx set;
3862 basic_block bb = BLOCK_FOR_INSN (insn);
3863
3864 /* We only delete insns that have a single_set. */
3865 if (TEST_BIT (pre_delete_map[bb->index], indx)
3866 && (set = single_set (insn)) != 0
3867 && dbg_cnt (pre_insn))
3868 {
3869 /* Create a pseudo-reg to store the result of reaching
3870 expressions into. Get the mode for the new pseudo from
3871 the mode of the original destination pseudo. */
3872 if (expr->reaching_reg == NULL)
3873 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
3874
3875 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
3876 delete_insn (insn);
3877 occr->deleted_p = 1;
3878 changed = 1;
3879 gcse_subst_count++;
3880
3881 if (dump_file)
3882 {
3883 fprintf (dump_file,
3884 "PRE: redundant insn %d (expression %d) in ",
3885 INSN_UID (insn), indx);
3886 fprintf (dump_file, "bb %d, reaching reg is %d\n",
3887 bb->index, REGNO (expr->reaching_reg));
3888 }
3889 }
3890 }
3891 }
3892
3893 return changed;
3894 }
3895
3896 /* Perform GCSE optimizations using PRE.
3897 This is called by one_pre_gcse_pass after all the dataflow analysis
3898 has been done.
3899
3900 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
3901 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
3902 Compiler Design and Implementation.
3903
3904 ??? A new pseudo reg is created to hold the reaching expression. The nice
3905 thing about the classical approach is that it would try to use an existing
3906 reg. If the register can't be adequately optimized [i.e. we introduce
3907 reload problems], one could add a pass here to propagate the new register
3908 through the block.
3909
3910 ??? We don't handle single sets in PARALLELs because we're [currently] not
3911 able to copy the rest of the parallel when we insert copies to create full
3912 redundancies from partial redundancies. However, there's no reason why we
3913 can't handle PARALLELs in the cases where there are no partial
3914 redundancies. */
3915
3916 static int
3917 pre_gcse (void)
3918 {
3919 unsigned int i;
3920 int did_insert, changed;
3921 struct expr **index_map;
3922 struct expr *expr;
3923
3924 /* Compute a mapping from expression number (`bitmap_index') to
3925 hash table entry. */
3926
3927 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3928 for (i = 0; i < expr_hash_table.size; i++)
3929 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3930 index_map[expr->bitmap_index] = expr;
3931
3932 /* Delete the redundant insns first so that
3933 - we know what register to use for the new insns and for the other
3934 ones with reaching expressions
3935 - we know which insns are redundant when we go to create copies */
3936
3937 changed = pre_delete ();
3938 did_insert = pre_edge_insert (edge_list, index_map);
3939
3940 /* In other places with reaching expressions, copy the expression to the
3941 specially allocated pseudo-reg that reaches the redundant expr. */
3942 pre_insert_copies ();
3943 if (did_insert)
3944 {
3945 commit_edge_insertions ();
3946 changed = 1;
3947 }
3948
3949 free (index_map);
3950 return changed;
3951 }
3952
3953 /* Top level routine to perform one PRE GCSE pass.
3954
3955 Return nonzero if a change was made. */
3956
3957 static int
3958 one_pre_gcse_pass (void)
3959 {
3960 int changed = 0;
3961
3962 gcse_subst_count = 0;
3963 gcse_create_count = 0;
3964
3965 /* Return if there's nothing to do, or it is too expensive. */
3966 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3967 || is_too_expensive (_("PRE disabled")))
3968 return 0;
3969
3970 /* We need alias. */
3971 init_alias_analysis ();
3972
3973 bytes_used = 0;
3974 gcc_obstack_init (&gcse_obstack);
3975 alloc_gcse_mem ();
3976
3977 alloc_hash_table (&expr_hash_table, 0);
3978 add_noreturn_fake_exit_edges ();
3979 if (flag_gcse_lm)
3980 compute_ld_motion_mems ();
3981
3982 compute_hash_table (&expr_hash_table);
3983 trim_ld_motion_mems ();
3984 if (dump_file)
3985 dump_hash_table (dump_file, "Expression", &expr_hash_table);
3986
3987 if (expr_hash_table.n_elems > 0)
3988 {
3989 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
3990 compute_pre_data ();
3991 changed |= pre_gcse ();
3992 free_edge_list (edge_list);
3993 free_pre_mem ();
3994 }
3995
3996 free_ldst_mems ();
3997 remove_fake_exit_edges ();
3998 free_hash_table (&expr_hash_table);
3999
4000 free_gcse_mem ();
4001 obstack_free (&gcse_obstack, NULL);
4002
4003 /* We are finished with alias. */
4004 end_alias_analysis ();
4005
4006 if (dump_file)
4007 {
4008 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4009 current_function_name (), n_basic_blocks, bytes_used);
4010 fprintf (dump_file, "%d substs, %d insns created\n",
4011 gcse_subst_count, gcse_create_count);
4012 }
4013
4014 return changed;
4015 }
4016 \f
4017 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4018 to INSN. If such notes are added to an insn which references a
4019 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4020 that note, because the following loop optimization pass requires
4021 them. */
4022
4023 /* ??? If there was a jump optimization pass after gcse and before loop,
4024 then we would not need to do this here, because jump would add the
4025 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4026
4027 static void
4028 add_label_notes (rtx x, rtx insn)
4029 {
4030 enum rtx_code code = GET_CODE (x);
4031 int i, j;
4032 const char *fmt;
4033
4034 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4035 {
4036 /* This code used to ignore labels that referred to dispatch tables to
4037 avoid flow generating (slightly) worse code.
4038
4039 We no longer ignore such label references (see LABEL_REF handling in
4040 mark_jump_label for additional information). */
4041
4042 /* There's no reason for current users to emit jump-insns with
4043 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4044 notes. */
4045 gcc_assert (!JUMP_P (insn));
4046 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4047
4048 if (LABEL_P (XEXP (x, 0)))
4049 LABEL_NUSES (XEXP (x, 0))++;
4050
4051 return;
4052 }
4053
4054 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4055 {
4056 if (fmt[i] == 'e')
4057 add_label_notes (XEXP (x, i), insn);
4058 else if (fmt[i] == 'E')
4059 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4060 add_label_notes (XVECEXP (x, i, j), insn);
4061 }
4062 }
4063
4064 /* Compute transparent outgoing information for each block.
4065
4066 An expression is transparent to an edge unless it is killed by
4067 the edge itself. This can only happen with abnormal control flow,
4068 when the edge is traversed through a call. This happens with
4069 non-local labels and exceptions.
4070
4071 This would not be necessary if we split the edge. While this is
4072 normally impossible for abnormal critical edges, with some effort
4073 it should be possible with exception handling, since we still have
4074 control over which handler should be invoked. But due to increased
4075 EH table sizes, this may not be worthwhile. */
4076
4077 static void
4078 compute_transpout (void)
4079 {
4080 basic_block bb;
4081 unsigned int i;
4082 struct expr *expr;
4083
4084 sbitmap_vector_ones (transpout, last_basic_block);
4085
4086 FOR_EACH_BB (bb)
4087 {
4088 /* Note that flow inserted a nop at the end of basic blocks that
4089 end in call instructions for reasons other than abnormal
4090 control flow. */
4091 if (! CALL_P (BB_END (bb)))
4092 continue;
4093
4094 for (i = 0; i < expr_hash_table.size; i++)
4095 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4096 if (MEM_P (expr->expr))
4097 {
4098 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4099 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4100 continue;
4101
4102 /* ??? Optimally, we would use interprocedural alias
4103 analysis to determine if this mem is actually killed
4104 by this call. */
4105 RESET_BIT (transpout[bb->index], expr->bitmap_index);
4106 }
4107 }
4108 }
4109
4110 /* Code Hoisting variables and subroutines. */
4111
4112 /* Very busy expressions. */
4113 static sbitmap *hoist_vbein;
4114 static sbitmap *hoist_vbeout;
4115
4116 /* Hoistable expressions. */
4117 static sbitmap *hoist_exprs;
4118
4119 /* ??? We could compute post dominators and run this algorithm in
4120 reverse to perform tail merging, doing so would probably be
4121 more effective than the tail merging code in jump.c.
4122
4123 It's unclear if tail merging could be run in parallel with
4124 code hoisting. It would be nice. */
4125
4126 /* Allocate vars used for code hoisting analysis. */
4127
4128 static void
4129 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4130 {
4131 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4132 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4133 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4134
4135 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4136 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4137 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4138 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4139 }
4140
4141 /* Free vars used for code hoisting analysis. */
4142
4143 static void
4144 free_code_hoist_mem (void)
4145 {
4146 sbitmap_vector_free (antloc);
4147 sbitmap_vector_free (transp);
4148 sbitmap_vector_free (comp);
4149
4150 sbitmap_vector_free (hoist_vbein);
4151 sbitmap_vector_free (hoist_vbeout);
4152 sbitmap_vector_free (hoist_exprs);
4153 sbitmap_vector_free (transpout);
4154
4155 free_dominance_info (CDI_DOMINATORS);
4156 }
4157
4158 /* Compute the very busy expressions at entry/exit from each block.
4159
4160 An expression is very busy if all paths from a given point
4161 compute the expression. */
4162
4163 static void
4164 compute_code_hoist_vbeinout (void)
4165 {
4166 int changed, passes;
4167 basic_block bb;
4168
4169 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4170 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4171
4172 passes = 0;
4173 changed = 1;
4174
4175 while (changed)
4176 {
4177 changed = 0;
4178
4179 /* We scan the blocks in the reverse order to speed up
4180 the convergence. */
4181 FOR_EACH_BB_REVERSE (bb)
4182 {
4183 if (bb->next_bb != EXIT_BLOCK_PTR)
4184 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4185 hoist_vbein, bb->index);
4186
4187 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4188 antloc[bb->index],
4189 hoist_vbeout[bb->index],
4190 transp[bb->index]);
4191 }
4192
4193 passes++;
4194 }
4195
4196 if (dump_file)
4197 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4198 }
4199
4200 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4201
4202 static void
4203 compute_code_hoist_data (void)
4204 {
4205 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4206 compute_transpout ();
4207 compute_code_hoist_vbeinout ();
4208 calculate_dominance_info (CDI_DOMINATORS);
4209 if (dump_file)
4210 fprintf (dump_file, "\n");
4211 }
4212
4213 /* Determine if the expression identified by EXPR_INDEX would
4214 reach BB unimpared if it was placed at the end of EXPR_BB.
4215
4216 It's unclear exactly what Muchnick meant by "unimpared". It seems
4217 to me that the expression must either be computed or transparent in
4218 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4219 would allow the expression to be hoisted out of loops, even if
4220 the expression wasn't a loop invariant.
4221
4222 Contrast this to reachability for PRE where an expression is
4223 considered reachable if *any* path reaches instead of *all*
4224 paths. */
4225
4226 static int
4227 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4228 {
4229 edge pred;
4230 edge_iterator ei;
4231 int visited_allocated_locally = 0;
4232
4233
4234 if (visited == NULL)
4235 {
4236 visited_allocated_locally = 1;
4237 visited = XCNEWVEC (char, last_basic_block);
4238 }
4239
4240 FOR_EACH_EDGE (pred, ei, bb->preds)
4241 {
4242 basic_block pred_bb = pred->src;
4243
4244 if (pred->src == ENTRY_BLOCK_PTR)
4245 break;
4246 else if (pred_bb == expr_bb)
4247 continue;
4248 else if (visited[pred_bb->index])
4249 continue;
4250
4251 /* Does this predecessor generate this expression? */
4252 else if (TEST_BIT (comp[pred_bb->index], expr_index))
4253 break;
4254 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4255 break;
4256
4257 /* Not killed. */
4258 else
4259 {
4260 visited[pred_bb->index] = 1;
4261 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4262 pred_bb, visited))
4263 break;
4264 }
4265 }
4266 if (visited_allocated_locally)
4267 free (visited);
4268
4269 return (pred == NULL);
4270 }
4271 \f
4272 /* Actually perform code hoisting. */
4273
4274 static int
4275 hoist_code (void)
4276 {
4277 basic_block bb, dominated;
4278 VEC (basic_block, heap) *domby;
4279 unsigned int i,j;
4280 struct expr **index_map;
4281 struct expr *expr;
4282 int changed = 0;
4283
4284 sbitmap_vector_zero (hoist_exprs, last_basic_block);
4285
4286 /* Compute a mapping from expression number (`bitmap_index') to
4287 hash table entry. */
4288
4289 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4290 for (i = 0; i < expr_hash_table.size; i++)
4291 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4292 index_map[expr->bitmap_index] = expr;
4293
4294 /* Walk over each basic block looking for potentially hoistable
4295 expressions, nothing gets hoisted from the entry block. */
4296 FOR_EACH_BB (bb)
4297 {
4298 int found = 0;
4299 int insn_inserted_p;
4300
4301 domby = get_dominated_by (CDI_DOMINATORS, bb);
4302 /* Examine each expression that is very busy at the exit of this
4303 block. These are the potentially hoistable expressions. */
4304 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4305 {
4306 int hoistable = 0;
4307
4308 if (TEST_BIT (hoist_vbeout[bb->index], i)
4309 && TEST_BIT (transpout[bb->index], i))
4310 {
4311 /* We've found a potentially hoistable expression, now
4312 we look at every block BB dominates to see if it
4313 computes the expression. */
4314 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4315 {
4316 /* Ignore self dominance. */
4317 if (bb == dominated)
4318 continue;
4319 /* We've found a dominated block, now see if it computes
4320 the busy expression and whether or not moving that
4321 expression to the "beginning" of that block is safe. */
4322 if (!TEST_BIT (antloc[dominated->index], i))
4323 continue;
4324
4325 /* Note if the expression would reach the dominated block
4326 unimpared if it was placed at the end of BB.
4327
4328 Keep track of how many times this expression is hoistable
4329 from a dominated block into BB. */
4330 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4331 hoistable++;
4332 }
4333
4334 /* If we found more than one hoistable occurrence of this
4335 expression, then note it in the bitmap of expressions to
4336 hoist. It makes no sense to hoist things which are computed
4337 in only one BB, and doing so tends to pessimize register
4338 allocation. One could increase this value to try harder
4339 to avoid any possible code expansion due to register
4340 allocation issues; however experiments have shown that
4341 the vast majority of hoistable expressions are only movable
4342 from two successors, so raising this threshold is likely
4343 to nullify any benefit we get from code hoisting. */
4344 if (hoistable > 1)
4345 {
4346 SET_BIT (hoist_exprs[bb->index], i);
4347 found = 1;
4348 }
4349 }
4350 }
4351 /* If we found nothing to hoist, then quit now. */
4352 if (! found)
4353 {
4354 VEC_free (basic_block, heap, domby);
4355 continue;
4356 }
4357
4358 /* Loop over all the hoistable expressions. */
4359 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4360 {
4361 /* We want to insert the expression into BB only once, so
4362 note when we've inserted it. */
4363 insn_inserted_p = 0;
4364
4365 /* These tests should be the same as the tests above. */
4366 if (TEST_BIT (hoist_exprs[bb->index], i))
4367 {
4368 /* We've found a potentially hoistable expression, now
4369 we look at every block BB dominates to see if it
4370 computes the expression. */
4371 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4372 {
4373 /* Ignore self dominance. */
4374 if (bb == dominated)
4375 continue;
4376
4377 /* We've found a dominated block, now see if it computes
4378 the busy expression and whether or not moving that
4379 expression to the "beginning" of that block is safe. */
4380 if (!TEST_BIT (antloc[dominated->index], i))
4381 continue;
4382
4383 /* The expression is computed in the dominated block and
4384 it would be safe to compute it at the start of the
4385 dominated block. Now we have to determine if the
4386 expression would reach the dominated block if it was
4387 placed at the end of BB. */
4388 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4389 {
4390 struct expr *expr = index_map[i];
4391 struct occr *occr = expr->antic_occr;
4392 rtx insn;
4393 rtx set;
4394
4395 /* Find the right occurrence of this expression. */
4396 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4397 occr = occr->next;
4398
4399 gcc_assert (occr);
4400 insn = occr->insn;
4401 set = single_set (insn);
4402 gcc_assert (set);
4403
4404 /* Create a pseudo-reg to store the result of reaching
4405 expressions into. Get the mode for the new pseudo
4406 from the mode of the original destination pseudo. */
4407 if (expr->reaching_reg == NULL)
4408 expr->reaching_reg
4409 = gen_reg_rtx_and_attrs (SET_DEST (set));
4410
4411 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4412 delete_insn (insn);
4413 occr->deleted_p = 1;
4414 changed = 1;
4415 gcse_subst_count++;
4416
4417 if (!insn_inserted_p)
4418 {
4419 insert_insn_end_basic_block (index_map[i], bb, 0);
4420 insn_inserted_p = 1;
4421 }
4422 }
4423 }
4424 }
4425 }
4426 VEC_free (basic_block, heap, domby);
4427 }
4428
4429 free (index_map);
4430
4431 return changed;
4432 }
4433
4434 /* Top level routine to perform one code hoisting (aka unification) pass
4435
4436 Return nonzero if a change was made. */
4437
4438 static int
4439 one_code_hoisting_pass (void)
4440 {
4441 int changed = 0;
4442
4443 gcse_subst_count = 0;
4444 gcse_create_count = 0;
4445
4446 /* Return if there's nothing to do, or it is too expensive. */
4447 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4448 || is_too_expensive (_("GCSE disabled")))
4449 return 0;
4450
4451 /* We need alias. */
4452 init_alias_analysis ();
4453
4454 bytes_used = 0;
4455 gcc_obstack_init (&gcse_obstack);
4456 alloc_gcse_mem ();
4457
4458 alloc_hash_table (&expr_hash_table, 0);
4459 compute_hash_table (&expr_hash_table);
4460 if (dump_file)
4461 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4462
4463 if (expr_hash_table.n_elems > 0)
4464 {
4465 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4466 compute_code_hoist_data ();
4467 changed = hoist_code ();
4468 free_code_hoist_mem ();
4469 }
4470
4471 free_hash_table (&expr_hash_table);
4472 free_gcse_mem ();
4473 obstack_free (&gcse_obstack, NULL);
4474
4475 /* We are finished with alias. */
4476 end_alias_analysis ();
4477
4478 if (dump_file)
4479 {
4480 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4481 current_function_name (), n_basic_blocks, bytes_used);
4482 fprintf (dump_file, "%d substs, %d insns created\n",
4483 gcse_subst_count, gcse_create_count);
4484 }
4485
4486 return changed;
4487 }
4488 \f
4489 /* Here we provide the things required to do store motion towards
4490 the exit. In order for this to be effective, gcse also needed to
4491 be taught how to move a load when it is kill only by a store to itself.
4492
4493 int i;
4494 float a[10];
4495
4496 void foo(float scale)
4497 {
4498 for (i=0; i<10; i++)
4499 a[i] *= scale;
4500 }
4501
4502 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4503 the load out since its live around the loop, and stored at the bottom
4504 of the loop.
4505
4506 The 'Load Motion' referred to and implemented in this file is
4507 an enhancement to gcse which when using edge based lcm, recognizes
4508 this situation and allows gcse to move the load out of the loop.
4509
4510 Once gcse has hoisted the load, store motion can then push this
4511 load towards the exit, and we end up with no loads or stores of 'i'
4512 in the loop. */
4513
4514 static hashval_t
4515 pre_ldst_expr_hash (const void *p)
4516 {
4517 int do_not_record_p = 0;
4518 const struct ls_expr *const x = (const struct ls_expr *) p;
4519 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4520 }
4521
4522 static int
4523 pre_ldst_expr_eq (const void *p1, const void *p2)
4524 {
4525 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4526 *const ptr2 = (const struct ls_expr *) p2;
4527 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4528 }
4529
4530 /* This will search the ldst list for a matching expression. If it
4531 doesn't find one, we create one and initialize it. */
4532
4533 static struct ls_expr *
4534 ldst_entry (rtx x)
4535 {
4536 int do_not_record_p = 0;
4537 struct ls_expr * ptr;
4538 unsigned int hash;
4539 void **slot;
4540 struct ls_expr e;
4541
4542 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4543 NULL, /*have_reg_qty=*/false);
4544
4545 e.pattern = x;
4546 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4547 if (*slot)
4548 return (struct ls_expr *)*slot;
4549
4550 ptr = XNEW (struct ls_expr);
4551
4552 ptr->next = pre_ldst_mems;
4553 ptr->expr = NULL;
4554 ptr->pattern = x;
4555 ptr->pattern_regs = NULL_RTX;
4556 ptr->loads = NULL_RTX;
4557 ptr->stores = NULL_RTX;
4558 ptr->reaching_reg = NULL_RTX;
4559 ptr->invalid = 0;
4560 ptr->index = 0;
4561 ptr->hash_index = hash;
4562 pre_ldst_mems = ptr;
4563 *slot = ptr;
4564
4565 return ptr;
4566 }
4567
4568 /* Free up an individual ldst entry. */
4569
4570 static void
4571 free_ldst_entry (struct ls_expr * ptr)
4572 {
4573 free_INSN_LIST_list (& ptr->loads);
4574 free_INSN_LIST_list (& ptr->stores);
4575
4576 free (ptr);
4577 }
4578
4579 /* Free up all memory associated with the ldst list. */
4580
4581 static void
4582 free_ldst_mems (void)
4583 {
4584 if (pre_ldst_table)
4585 htab_delete (pre_ldst_table);
4586 pre_ldst_table = NULL;
4587
4588 while (pre_ldst_mems)
4589 {
4590 struct ls_expr * tmp = pre_ldst_mems;
4591
4592 pre_ldst_mems = pre_ldst_mems->next;
4593
4594 free_ldst_entry (tmp);
4595 }
4596
4597 pre_ldst_mems = NULL;
4598 }
4599
4600 /* Dump debugging info about the ldst list. */
4601
4602 static void
4603 print_ldst_list (FILE * file)
4604 {
4605 struct ls_expr * ptr;
4606
4607 fprintf (file, "LDST list: \n");
4608
4609 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4610 {
4611 fprintf (file, " Pattern (%3d): ", ptr->index);
4612
4613 print_rtl (file, ptr->pattern);
4614
4615 fprintf (file, "\n Loads : ");
4616
4617 if (ptr->loads)
4618 print_rtl (file, ptr->loads);
4619 else
4620 fprintf (file, "(nil)");
4621
4622 fprintf (file, "\n Stores : ");
4623
4624 if (ptr->stores)
4625 print_rtl (file, ptr->stores);
4626 else
4627 fprintf (file, "(nil)");
4628
4629 fprintf (file, "\n\n");
4630 }
4631
4632 fprintf (file, "\n");
4633 }
4634
4635 /* Returns 1 if X is in the list of ldst only expressions. */
4636
4637 static struct ls_expr *
4638 find_rtx_in_ldst (rtx x)
4639 {
4640 struct ls_expr e;
4641 void **slot;
4642 if (!pre_ldst_table)
4643 return NULL;
4644 e.pattern = x;
4645 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4646 if (!slot || ((struct ls_expr *)*slot)->invalid)
4647 return NULL;
4648 return (struct ls_expr *) *slot;
4649 }
4650
4651 /* Return first item in the list. */
4652
4653 static inline struct ls_expr *
4654 first_ls_expr (void)
4655 {
4656 return pre_ldst_mems;
4657 }
4658
4659 /* Return the next item in the list after the specified one. */
4660
4661 static inline struct ls_expr *
4662 next_ls_expr (struct ls_expr * ptr)
4663 {
4664 return ptr->next;
4665 }
4666 \f
4667 /* Load Motion for loads which only kill themselves. */
4668
4669 /* Return true if x is a simple MEM operation, with no registers or
4670 side effects. These are the types of loads we consider for the
4671 ld_motion list, otherwise we let the usual aliasing take care of it. */
4672
4673 static int
4674 simple_mem (const_rtx x)
4675 {
4676 if (! MEM_P (x))
4677 return 0;
4678
4679 if (MEM_VOLATILE_P (x))
4680 return 0;
4681
4682 if (GET_MODE (x) == BLKmode)
4683 return 0;
4684
4685 /* If we are handling exceptions, we must be careful with memory references
4686 that may trap. If we are not, the behavior is undefined, so we may just
4687 continue. */
4688 if (flag_non_call_exceptions && may_trap_p (x))
4689 return 0;
4690
4691 if (side_effects_p (x))
4692 return 0;
4693
4694 /* Do not consider function arguments passed on stack. */
4695 if (reg_mentioned_p (stack_pointer_rtx, x))
4696 return 0;
4697
4698 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4699 return 0;
4700
4701 return 1;
4702 }
4703
4704 /* Make sure there isn't a buried reference in this pattern anywhere.
4705 If there is, invalidate the entry for it since we're not capable
4706 of fixing it up just yet.. We have to be sure we know about ALL
4707 loads since the aliasing code will allow all entries in the
4708 ld_motion list to not-alias itself. If we miss a load, we will get
4709 the wrong value since gcse might common it and we won't know to
4710 fix it up. */
4711
4712 static void
4713 invalidate_any_buried_refs (rtx x)
4714 {
4715 const char * fmt;
4716 int i, j;
4717 struct ls_expr * ptr;
4718
4719 /* Invalidate it in the list. */
4720 if (MEM_P (x) && simple_mem (x))
4721 {
4722 ptr = ldst_entry (x);
4723 ptr->invalid = 1;
4724 }
4725
4726 /* Recursively process the insn. */
4727 fmt = GET_RTX_FORMAT (GET_CODE (x));
4728
4729 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4730 {
4731 if (fmt[i] == 'e')
4732 invalidate_any_buried_refs (XEXP (x, i));
4733 else if (fmt[i] == 'E')
4734 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4735 invalidate_any_buried_refs (XVECEXP (x, i, j));
4736 }
4737 }
4738
4739 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4740 being defined as MEM loads and stores to symbols, with no side effects
4741 and no registers in the expression. For a MEM destination, we also
4742 check that the insn is still valid if we replace the destination with a
4743 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4744 which don't match this criteria, they are invalidated and trimmed out
4745 later. */
4746
4747 static void
4748 compute_ld_motion_mems (void)
4749 {
4750 struct ls_expr * ptr;
4751 basic_block bb;
4752 rtx insn;
4753
4754 pre_ldst_mems = NULL;
4755 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4756 pre_ldst_expr_eq, NULL);
4757
4758 FOR_EACH_BB (bb)
4759 {
4760 FOR_BB_INSNS (bb, insn)
4761 {
4762 if (NONDEBUG_INSN_P (insn))
4763 {
4764 if (GET_CODE (PATTERN (insn)) == SET)
4765 {
4766 rtx src = SET_SRC (PATTERN (insn));
4767 rtx dest = SET_DEST (PATTERN (insn));
4768
4769 /* Check for a simple LOAD... */
4770 if (MEM_P (src) && simple_mem (src))
4771 {
4772 ptr = ldst_entry (src);
4773 if (REG_P (dest))
4774 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
4775 else
4776 ptr->invalid = 1;
4777 }
4778 else
4779 {
4780 /* Make sure there isn't a buried load somewhere. */
4781 invalidate_any_buried_refs (src);
4782 }
4783
4784 /* Check for stores. Don't worry about aliased ones, they
4785 will block any movement we might do later. We only care
4786 about this exact pattern since those are the only
4787 circumstance that we will ignore the aliasing info. */
4788 if (MEM_P (dest) && simple_mem (dest))
4789 {
4790 ptr = ldst_entry (dest);
4791
4792 if (! MEM_P (src)
4793 && GET_CODE (src) != ASM_OPERANDS
4794 /* Check for REG manually since want_to_gcse_p
4795 returns 0 for all REGs. */
4796 && can_assign_to_reg_without_clobbers_p (src))
4797 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
4798 else
4799 ptr->invalid = 1;
4800 }
4801 }
4802 else
4803 invalidate_any_buried_refs (PATTERN (insn));
4804 }
4805 }
4806 }
4807 }
4808
4809 /* Remove any references that have been either invalidated or are not in the
4810 expression list for pre gcse. */
4811
4812 static void
4813 trim_ld_motion_mems (void)
4814 {
4815 struct ls_expr * * last = & pre_ldst_mems;
4816 struct ls_expr * ptr = pre_ldst_mems;
4817
4818 while (ptr != NULL)
4819 {
4820 struct expr * expr;
4821
4822 /* Delete if entry has been made invalid. */
4823 if (! ptr->invalid)
4824 {
4825 /* Delete if we cannot find this mem in the expression list. */
4826 unsigned int hash = ptr->hash_index % expr_hash_table.size;
4827
4828 for (expr = expr_hash_table.table[hash];
4829 expr != NULL;
4830 expr = expr->next_same_hash)
4831 if (expr_equiv_p (expr->expr, ptr->pattern))
4832 break;
4833 }
4834 else
4835 expr = (struct expr *) 0;
4836
4837 if (expr)
4838 {
4839 /* Set the expression field if we are keeping it. */
4840 ptr->expr = expr;
4841 last = & ptr->next;
4842 ptr = ptr->next;
4843 }
4844 else
4845 {
4846 *last = ptr->next;
4847 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
4848 free_ldst_entry (ptr);
4849 ptr = * last;
4850 }
4851 }
4852
4853 /* Show the world what we've found. */
4854 if (dump_file && pre_ldst_mems != NULL)
4855 print_ldst_list (dump_file);
4856 }
4857
4858 /* This routine will take an expression which we are replacing with
4859 a reaching register, and update any stores that are needed if
4860 that expression is in the ld_motion list. Stores are updated by
4861 copying their SRC to the reaching register, and then storing
4862 the reaching register into the store location. These keeps the
4863 correct value in the reaching register for the loads. */
4864
4865 static void
4866 update_ld_motion_stores (struct expr * expr)
4867 {
4868 struct ls_expr * mem_ptr;
4869
4870 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
4871 {
4872 /* We can try to find just the REACHED stores, but is shouldn't
4873 matter to set the reaching reg everywhere... some might be
4874 dead and should be eliminated later. */
4875
4876 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4877 where reg is the reaching reg used in the load. We checked in
4878 compute_ld_motion_mems that we can replace (set mem expr) with
4879 (set reg expr) in that insn. */
4880 rtx list = mem_ptr->stores;
4881
4882 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4883 {
4884 rtx insn = XEXP (list, 0);
4885 rtx pat = PATTERN (insn);
4886 rtx src = SET_SRC (pat);
4887 rtx reg = expr->reaching_reg;
4888 rtx copy, new_rtx;
4889
4890 /* If we've already copied it, continue. */
4891 if (expr->reaching_reg == src)
4892 continue;
4893
4894 if (dump_file)
4895 {
4896 fprintf (dump_file, "PRE: store updated with reaching reg ");
4897 print_rtl (dump_file, expr->reaching_reg);
4898 fprintf (dump_file, ":\n ");
4899 print_inline_rtx (dump_file, insn, 8);
4900 fprintf (dump_file, "\n");
4901 }
4902
4903 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4904 new_rtx = emit_insn_before (copy, insn);
4905 SET_SRC (pat) = reg;
4906 df_insn_rescan (insn);
4907
4908 /* un-recognize this pattern since it's probably different now. */
4909 INSN_CODE (insn) = -1;
4910 gcse_create_count++;
4911 }
4912 }
4913 }
4914 \f
4915 /* Return true if the graph is too expensive to optimize. PASS is the
4916 optimization about to be performed. */
4917
4918 static bool
4919 is_too_expensive (const char *pass)
4920 {
4921 /* Trying to perform global optimizations on flow graphs which have
4922 a high connectivity will take a long time and is unlikely to be
4923 particularly useful.
4924
4925 In normal circumstances a cfg should have about twice as many
4926 edges as blocks. But we do not want to punish small functions
4927 which have a couple switch statements. Rather than simply
4928 threshold the number of blocks, uses something with a more
4929 graceful degradation. */
4930 if (n_edges > 20000 + n_basic_blocks * 4)
4931 {
4932 warning (OPT_Wdisabled_optimization,
4933 "%s: %d basic blocks and %d edges/basic block",
4934 pass, n_basic_blocks, n_edges / n_basic_blocks);
4935
4936 return true;
4937 }
4938
4939 /* If allocating memory for the cprop bitmap would take up too much
4940 storage it's better just to disable the optimization. */
4941 if ((n_basic_blocks
4942 * SBITMAP_SET_SIZE (max_reg_num ())
4943 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4944 {
4945 warning (OPT_Wdisabled_optimization,
4946 "%s: %d basic blocks and %d registers",
4947 pass, n_basic_blocks, max_reg_num ());
4948
4949 return true;
4950 }
4951
4952 return false;
4953 }
4954
4955 \f
4956 /* Main function for the CPROP pass. */
4957
4958 static int
4959 one_cprop_pass (void)
4960 {
4961 int changed = 0;
4962
4963 /* Return if there's nothing to do, or it is too expensive. */
4964 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4965 || is_too_expensive (_ ("const/copy propagation disabled")))
4966 return 0;
4967
4968 global_const_prop_count = local_const_prop_count = 0;
4969 global_copy_prop_count = local_copy_prop_count = 0;
4970
4971 bytes_used = 0;
4972 gcc_obstack_init (&gcse_obstack);
4973 alloc_gcse_mem ();
4974
4975 /* Do a local const/copy propagation pass first. The global pass
4976 only handles global opportunities.
4977 If the local pass changes something, remove any unreachable blocks
4978 because the CPROP global dataflow analysis may get into infinite
4979 loops for CFGs with unreachable blocks.
4980
4981 FIXME: This local pass should not be necessary after CSE (but for
4982 some reason it still is). It is also (proven) not necessary
4983 to run the local pass right after FWPWOP.
4984
4985 FIXME: The global analysis would not get into infinite loops if it
4986 would use the DF solver (via df_simple_dataflow) instead of
4987 the solver implemented in this file. */
4988 if (local_cprop_pass ())
4989 {
4990 delete_unreachable_blocks ();
4991 df_analyze ();
4992 }
4993
4994 /* Determine implicit sets. */
4995 implicit_sets = XCNEWVEC (rtx, last_basic_block);
4996 find_implicit_sets ();
4997
4998 alloc_hash_table (&set_hash_table, 1);
4999 compute_hash_table (&set_hash_table);
5000
5001 /* Free implicit_sets before peak usage. */
5002 free (implicit_sets);
5003 implicit_sets = NULL;
5004
5005 if (dump_file)
5006 dump_hash_table (dump_file, "SET", &set_hash_table);
5007 if (set_hash_table.n_elems > 0)
5008 {
5009 basic_block bb;
5010 rtx insn;
5011
5012 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
5013 compute_cprop_data ();
5014
5015 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
5016 {
5017 /* Reset tables used to keep track of what's still valid [since
5018 the start of the block]. */
5019 reset_opr_set_tables ();
5020
5021 FOR_BB_INSNS (bb, insn)
5022 if (INSN_P (insn))
5023 {
5024 changed |= cprop_insn (insn);
5025
5026 /* Keep track of everything modified by this insn. */
5027 /* ??? Need to be careful w.r.t. mods done to INSN.
5028 Don't call mark_oprs_set if we turned the
5029 insn into a NOTE. */
5030 if (! NOTE_P (insn))
5031 mark_oprs_set (insn);
5032 }
5033 }
5034
5035 changed |= bypass_conditional_jumps ();
5036 free_cprop_mem ();
5037 }
5038
5039 free_hash_table (&set_hash_table);
5040 free_gcse_mem ();
5041 obstack_free (&gcse_obstack, NULL);
5042
5043 if (dump_file)
5044 {
5045 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5046 current_function_name (), n_basic_blocks, bytes_used);
5047 fprintf (dump_file, "%d local const props, %d local copy props, ",
5048 local_const_prop_count, local_copy_prop_count);
5049 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
5050 global_const_prop_count, global_copy_prop_count);
5051 }
5052
5053 return changed;
5054 }
5055
5056 \f
5057 /* All the passes implemented in this file. Each pass has its
5058 own gate and execute function, and at the end of the file a
5059 pass definition for passes.c.
5060
5061 We do not construct an accurate cfg in functions which call
5062 setjmp, so none of these passes runs if the function calls
5063 setjmp.
5064 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5065
5066 static bool
5067 gate_rtl_cprop (void)
5068 {
5069 return optimize > 0 && flag_gcse
5070 && !cfun->calls_setjmp
5071 && dbg_cnt (cprop);
5072 }
5073
5074 static unsigned int
5075 execute_rtl_cprop (void)
5076 {
5077 delete_unreachable_blocks ();
5078 df_note_add_problem ();
5079 df_set_flags (DF_LR_RUN_DCE);
5080 df_analyze ();
5081 flag_rerun_cse_after_global_opts |= one_cprop_pass ();
5082 return 0;
5083 }
5084
5085 static bool
5086 gate_rtl_pre (void)
5087 {
5088 return optimize > 0 && flag_gcse
5089 && !cfun->calls_setjmp
5090 && optimize_function_for_speed_p (cfun)
5091 && dbg_cnt (pre);
5092 }
5093
5094 static unsigned int
5095 execute_rtl_pre (void)
5096 {
5097 delete_unreachable_blocks ();
5098 df_note_add_problem ();
5099 df_analyze ();
5100 flag_rerun_cse_after_global_opts |= one_pre_gcse_pass ();
5101 return 0;
5102 }
5103
5104 static bool
5105 gate_rtl_hoist (void)
5106 {
5107 return optimize > 0 && flag_gcse
5108 && !cfun->calls_setjmp
5109 /* It does not make sense to run code hoisting unless we are optimizing
5110 for code size -- it rarely makes programs faster, and can make then
5111 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5112 && optimize_function_for_size_p (cfun)
5113 && dbg_cnt (hoist);
5114 }
5115
5116 static unsigned int
5117 execute_rtl_hoist (void)
5118 {
5119 delete_unreachable_blocks ();
5120 df_note_add_problem ();
5121 df_analyze ();
5122 flag_rerun_cse_after_global_opts |= one_code_hoisting_pass ();
5123 return 0;
5124 }
5125
5126 struct rtl_opt_pass pass_rtl_cprop =
5127 {
5128 {
5129 RTL_PASS,
5130 "cprop", /* name */
5131 gate_rtl_cprop, /* gate */
5132 execute_rtl_cprop, /* execute */
5133 NULL, /* sub */
5134 NULL, /* next */
5135 0, /* static_pass_number */
5136 TV_CPROP, /* tv_id */
5137 PROP_cfglayout, /* properties_required */
5138 0, /* properties_provided */
5139 0, /* properties_destroyed */
5140 0, /* todo_flags_start */
5141 TODO_df_finish | TODO_verify_rtl_sharing |
5142 TODO_dump_func |
5143 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5144 }
5145 };
5146
5147 struct rtl_opt_pass pass_rtl_pre =
5148 {
5149 {
5150 RTL_PASS,
5151 "pre", /* name */
5152 gate_rtl_pre, /* gate */
5153 execute_rtl_pre, /* execute */
5154 NULL, /* sub */
5155 NULL, /* next */
5156 0, /* static_pass_number */
5157 TV_PRE, /* tv_id */
5158 PROP_cfglayout, /* properties_required */
5159 0, /* properties_provided */
5160 0, /* properties_destroyed */
5161 0, /* todo_flags_start */
5162 TODO_df_finish | TODO_verify_rtl_sharing |
5163 TODO_dump_func |
5164 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5165 }
5166 };
5167
5168 struct rtl_opt_pass pass_rtl_hoist =
5169 {
5170 {
5171 RTL_PASS,
5172 "hoist", /* name */
5173 gate_rtl_hoist, /* gate */
5174 execute_rtl_hoist, /* execute */
5175 NULL, /* sub */
5176 NULL, /* next */
5177 0, /* static_pass_number */
5178 TV_HOIST, /* tv_id */
5179 PROP_cfglayout, /* properties_required */
5180 0, /* properties_provided */
5181 0, /* properties_destroyed */
5182 0, /* todo_flags_start */
5183 TODO_df_finish | TODO_verify_rtl_sharing |
5184 TODO_dump_func |
5185 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5186 }
5187 };
5188
5189 #include "gt-gcse.h"