* doc/contrib.texi (Contributors): Add Ira Rosen.
[gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "tm.h"
139 #include "diagnostic-core.h"
140 #include "toplev.h"
141 #include "hard-reg-set.h"
142 #include "rtl.h"
143 #include "hash-set.h"
144 #include "machmode.h"
145 #include "vec.h"
146 #include "double-int.h"
147 #include "input.h"
148 #include "alias.h"
149 #include "symtab.h"
150 #include "wide-int.h"
151 #include "inchash.h"
152 #include "tree.h"
153 #include "tm_p.h"
154 #include "regs.h"
155 #include "ira.h"
156 #include "flags.h"
157 #include "insn-config.h"
158 #include "recog.h"
159 #include "predict.h"
160 #include "function.h"
161 #include "dominance.h"
162 #include "cfg.h"
163 #include "cfgrtl.h"
164 #include "cfganal.h"
165 #include "lcm.h"
166 #include "cfgcleanup.h"
167 #include "basic-block.h"
168 #include "hashtab.h"
169 #include "statistics.h"
170 #include "real.h"
171 #include "fixed-value.h"
172 #include "expmed.h"
173 #include "dojump.h"
174 #include "explow.h"
175 #include "calls.h"
176 #include "emit-rtl.h"
177 #include "varasm.h"
178 #include "stmt.h"
179 #include "expr.h"
180 #include "except.h"
181 #include "ggc.h"
182 #include "params.h"
183 #include "cselib.h"
184 #include "intl.h"
185 #include "obstack.h"
186 #include "tree-pass.h"
187 #include "hash-table.h"
188 #include "df.h"
189 #include "dbgcnt.h"
190 #include "target.h"
191 #include "gcse.h"
192 #include "gcse-common.h"
193
194 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
195 are a superset of those done by classic GCSE.
196
197 Two passes of copy/constant propagation are done around PRE or hoisting
198 because the first one enables more GCSE and the second one helps to clean
199 up the copies that PRE and HOIST create. This is needed more for PRE than
200 for HOIST because code hoisting will try to use an existing register
201 containing the common subexpression rather than create a new one. This is
202 harder to do for PRE because of the code motion (which HOIST doesn't do).
203
204 Expressions we are interested in GCSE-ing are of the form
205 (set (pseudo-reg) (expression)).
206 Function want_to_gcse_p says what these are.
207
208 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
209 This allows PRE to hoist expressions that are expressed in multiple insns,
210 such as complex address calculations (e.g. for PIC code, or loads with a
211 high part and a low part).
212
213 PRE handles moving invariant expressions out of loops (by treating them as
214 partially redundant).
215
216 **********************
217
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
222 the expense.
223
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228
229 It was found doing copy propagation between each pass enables further
230 substitutions.
231
232 This study was done before expressions in REG_EQUAL notes were added as
233 candidate expressions for optimization, and before the GIMPLE optimizers
234 were added. Probably, multiple passes is even less efficient now than
235 at the time when the study was conducted.
236
237 PRE is quite expensive in complicated functions because the DFA can take
238 a while to converge. Hence we only perform one pass.
239
240 **********************
241
242 The steps for PRE are:
243
244 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
245
246 2) Perform the data flow analysis for PRE.
247
248 3) Delete the redundant instructions
249
250 4) Insert the required copies [if any] that make the partially
251 redundant instructions fully redundant.
252
253 5) For other reaching expressions, insert an instruction to copy the value
254 to a newly created pseudo that will reach the redundant instruction.
255
256 The deletion is done first so that when we do insertions we
257 know which pseudo reg to use.
258
259 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
260 argue it is not. The number of iterations for the algorithm to converge
261 is typically 2-4 so I don't view it as that expensive (relatively speaking).
262
263 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
264 we create. To make an expression reach the place where it's redundant,
265 the result of the expression is copied to a new register, and the redundant
266 expression is deleted by replacing it with this new register. Classic GCSE
267 doesn't have this problem as much as it computes the reaching defs of
268 each register in each block and thus can try to use an existing
269 register. */
270 \f
271 /* GCSE global vars. */
272
273 struct target_gcse default_target_gcse;
274 #if SWITCHABLE_TARGET
275 struct target_gcse *this_target_gcse = &default_target_gcse;
276 #endif
277
278 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
279 int flag_rerun_cse_after_global_opts;
280
281 /* An obstack for our working variables. */
282 static struct obstack gcse_obstack;
283
284 /* Hash table of expressions. */
285
286 struct gcse_expr
287 {
288 /* The expression. */
289 rtx expr;
290 /* Index in the available expression bitmaps. */
291 int bitmap_index;
292 /* Next entry with the same hash. */
293 struct gcse_expr *next_same_hash;
294 /* List of anticipatable occurrences in basic blocks in the function.
295 An "anticipatable occurrence" is one that is the first occurrence in the
296 basic block, the operands are not modified in the basic block prior
297 to the occurrence and the output is not used between the start of
298 the block and the occurrence. */
299 struct gcse_occr *antic_occr;
300 /* List of available occurrence in basic blocks in the function.
301 An "available occurrence" is one that is the last occurrence in the
302 basic block and the operands are not modified by following statements in
303 the basic block [including this insn]. */
304 struct gcse_occr *avail_occr;
305 /* Non-null if the computation is PRE redundant.
306 The value is the newly created pseudo-reg to record a copy of the
307 expression in all the places that reach the redundant copy. */
308 rtx reaching_reg;
309 /* Maximum distance in instructions this expression can travel.
310 We avoid moving simple expressions for more than a few instructions
311 to keep register pressure under control.
312 A value of "0" removes restrictions on how far the expression can
313 travel. */
314 int max_distance;
315 };
316
317 /* Occurrence of an expression.
318 There is one per basic block. If a pattern appears more than once the
319 last appearance is used [or first for anticipatable expressions]. */
320
321 struct gcse_occr
322 {
323 /* Next occurrence of this expression. */
324 struct gcse_occr *next;
325 /* The insn that computes the expression. */
326 rtx_insn *insn;
327 /* Nonzero if this [anticipatable] occurrence has been deleted. */
328 char deleted_p;
329 /* Nonzero if this [available] occurrence has been copied to
330 reaching_reg. */
331 /* ??? This is mutually exclusive with deleted_p, so they could share
332 the same byte. */
333 char copied_p;
334 };
335
336 typedef struct gcse_occr *occr_t;
337
338 /* Expression hash tables.
339 Each hash table is an array of buckets.
340 ??? It is known that if it were an array of entries, structure elements
341 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
342 not clear whether in the final analysis a sufficient amount of memory would
343 be saved as the size of the available expression bitmaps would be larger
344 [one could build a mapping table without holes afterwards though].
345 Someday I'll perform the computation and figure it out. */
346
347 struct gcse_hash_table_d
348 {
349 /* The table itself.
350 This is an array of `expr_hash_table_size' elements. */
351 struct gcse_expr **table;
352
353 /* Size of the hash table, in elements. */
354 unsigned int size;
355
356 /* Number of hash table elements. */
357 unsigned int n_elems;
358 };
359
360 /* Expression hash table. */
361 static struct gcse_hash_table_d expr_hash_table;
362
363 /* This is a list of expressions which are MEMs and will be used by load
364 or store motion.
365 Load motion tracks MEMs which aren't killed by anything except itself,
366 i.e. loads and stores to a single location.
367 We can then allow movement of these MEM refs with a little special
368 allowance. (all stores copy the same value to the reaching reg used
369 for the loads). This means all values used to store into memory must have
370 no side effects so we can re-issue the setter value. */
371
372 struct ls_expr
373 {
374 struct gcse_expr * expr; /* Gcse expression reference for LM. */
375 rtx pattern; /* Pattern of this mem. */
376 rtx pattern_regs; /* List of registers mentioned by the mem. */
377 rtx_insn_list *loads; /* INSN list of loads seen. */
378 rtx_insn_list *stores; /* INSN list of stores seen. */
379 struct ls_expr * next; /* Next in the list. */
380 int invalid; /* Invalid for some reason. */
381 int index; /* If it maps to a bitmap index. */
382 unsigned int hash_index; /* Index when in a hash table. */
383 rtx reaching_reg; /* Register to use when re-writing. */
384 };
385
386 /* Head of the list of load/store memory refs. */
387 static struct ls_expr * pre_ldst_mems = NULL;
388
389 struct pre_ldst_expr_hasher : typed_noop_remove <ls_expr>
390 {
391 typedef ls_expr value_type;
392 typedef value_type compare_type;
393 static inline hashval_t hash (const value_type *);
394 static inline bool equal (const value_type *, const compare_type *);
395 };
396
397 /* Hashtable helpers. */
398 inline hashval_t
399 pre_ldst_expr_hasher::hash (const value_type *x)
400 {
401 int do_not_record_p = 0;
402 return
403 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
404 }
405
406 static int expr_equiv_p (const_rtx, const_rtx);
407
408 inline bool
409 pre_ldst_expr_hasher::equal (const value_type *ptr1,
410 const compare_type *ptr2)
411 {
412 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
413 }
414
415 /* Hashtable for the load/store memory refs. */
416 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
417
418 /* Bitmap containing one bit for each register in the program.
419 Used when performing GCSE to track which registers have been set since
420 the start of the basic block. */
421 static regset reg_set_bitmap;
422
423 /* Array, indexed by basic block number for a list of insns which modify
424 memory within that block. */
425 static vec<rtx_insn *> *modify_mem_list;
426 static bitmap modify_mem_list_set;
427
428 /* This array parallels modify_mem_list, except that it stores MEMs
429 being set and their canonicalized memory addresses. */
430 static vec<modify_pair> *canon_modify_mem_list;
431
432 /* Bitmap indexed by block numbers to record which blocks contain
433 function calls. */
434 static bitmap blocks_with_calls;
435
436 /* Various variables for statistics gathering. */
437
438 /* Memory used in a pass.
439 This isn't intended to be absolutely precise. Its intent is only
440 to keep an eye on memory usage. */
441 static int bytes_used;
442
443 /* GCSE substitutions made. */
444 static int gcse_subst_count;
445 /* Number of copy instructions created. */
446 static int gcse_create_count;
447 \f
448 /* Doing code hoisting. */
449 static bool doing_code_hoisting_p = false;
450 \f
451 /* For available exprs */
452 static sbitmap *ae_kill;
453 \f
454 /* Data stored for each basic block. */
455 struct bb_data
456 {
457 /* Maximal register pressure inside basic block for given register class
458 (defined only for the pressure classes). */
459 int max_reg_pressure[N_REG_CLASSES];
460 /* Recorded register pressure of basic block before trying to hoist
461 an expression. Will be used to restore the register pressure
462 if the expression should not be hoisted. */
463 int old_pressure;
464 /* Recorded register live_in info of basic block during code hoisting
465 process. BACKUP is used to record live_in info before trying to
466 hoist an expression, and will be used to restore LIVE_IN if the
467 expression should not be hoisted. */
468 bitmap live_in, backup;
469 };
470
471 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
472
473 static basic_block curr_bb;
474
475 /* Current register pressure for each pressure class. */
476 static int curr_reg_pressure[N_REG_CLASSES];
477 \f
478
479 static void compute_can_copy (void);
480 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
481 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
482 static void *gcse_alloc (unsigned long);
483 static void alloc_gcse_mem (void);
484 static void free_gcse_mem (void);
485 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
486 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
487 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
488 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
489 static int want_to_gcse_p (rtx, int *);
490 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
491 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
492 static int oprs_available_p (const_rtx, const rtx_insn *);
493 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
494 int, struct gcse_hash_table_d *);
495 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
496 static void record_last_reg_set_info (rtx, int);
497 static void record_last_mem_set_info (rtx_insn *);
498 static void record_last_set_info (rtx, const_rtx, void *);
499 static void compute_hash_table (struct gcse_hash_table_d *);
500 static void alloc_hash_table (struct gcse_hash_table_d *);
501 static void free_hash_table (struct gcse_hash_table_d *);
502 static void compute_hash_table_work (struct gcse_hash_table_d *);
503 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
504 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
505 struct gcse_hash_table_d *);
506 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
507 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
508 static void alloc_pre_mem (int, int);
509 static void free_pre_mem (void);
510 static struct edge_list *compute_pre_data (void);
511 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
512 basic_block);
513 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
514 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
515 static void pre_insert_copies (void);
516 static int pre_delete (void);
517 static int pre_gcse (struct edge_list *);
518 static int one_pre_gcse_pass (void);
519 static void add_label_notes (rtx, rtx);
520 static void alloc_code_hoist_mem (int, int);
521 static void free_code_hoist_mem (void);
522 static void compute_code_hoist_vbeinout (void);
523 static void compute_code_hoist_data (void);
524 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *, basic_block,
525 sbitmap, int, int *, enum reg_class,
526 int *, bitmap, rtx_insn *);
527 static int hoist_code (void);
528 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
529 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
530 static int one_code_hoisting_pass (void);
531 static rtx_insn *process_insert_insn (struct gcse_expr *);
532 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
533 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
534 basic_block, char *);
535 static struct ls_expr * ldst_entry (rtx);
536 static void free_ldst_entry (struct ls_expr *);
537 static void free_ld_motion_mems (void);
538 static void print_ldst_list (FILE *);
539 static struct ls_expr * find_rtx_in_ldst (rtx);
540 static int simple_mem (const_rtx);
541 static void invalidate_any_buried_refs (rtx);
542 static void compute_ld_motion_mems (void);
543 static void trim_ld_motion_mems (void);
544 static void update_ld_motion_stores (struct gcse_expr *);
545 static void clear_modify_mem_tables (void);
546 static void free_modify_mem_tables (void);
547 static rtx gcse_emit_move_after (rtx, rtx, rtx_insn *);
548 static bool is_too_expensive (const char *);
549
550 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
551 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
552
553 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
554 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
555
556 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
557 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
558
559 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
560 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
561 \f
562 /* Misc. utilities. */
563
564 #define can_copy \
565 (this_target_gcse->x_can_copy)
566 #define can_copy_init_p \
567 (this_target_gcse->x_can_copy_init_p)
568
569 /* Compute which modes support reg/reg copy operations. */
570
571 static void
572 compute_can_copy (void)
573 {
574 int i;
575 #ifndef AVOID_CCMODE_COPIES
576 rtx reg, insn;
577 #endif
578 memset (can_copy, 0, NUM_MACHINE_MODES);
579
580 start_sequence ();
581 for (i = 0; i < NUM_MACHINE_MODES; i++)
582 if (GET_MODE_CLASS (i) == MODE_CC)
583 {
584 #ifdef AVOID_CCMODE_COPIES
585 can_copy[i] = 0;
586 #else
587 reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
588 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
589 if (recog (PATTERN (insn), insn, NULL) >= 0)
590 can_copy[i] = 1;
591 #endif
592 }
593 else
594 can_copy[i] = 1;
595
596 end_sequence ();
597 }
598
599 /* Returns whether the mode supports reg/reg copy operations. */
600
601 bool
602 can_copy_p (machine_mode mode)
603 {
604 if (! can_copy_init_p)
605 {
606 compute_can_copy ();
607 can_copy_init_p = true;
608 }
609
610 return can_copy[mode] != 0;
611 }
612 \f
613 /* Cover function to xmalloc to record bytes allocated. */
614
615 static void *
616 gmalloc (size_t size)
617 {
618 bytes_used += size;
619 return xmalloc (size);
620 }
621
622 /* Cover function to xcalloc to record bytes allocated. */
623
624 static void *
625 gcalloc (size_t nelem, size_t elsize)
626 {
627 bytes_used += nelem * elsize;
628 return xcalloc (nelem, elsize);
629 }
630
631 /* Cover function to obstack_alloc. */
632
633 static void *
634 gcse_alloc (unsigned long size)
635 {
636 bytes_used += size;
637 return obstack_alloc (&gcse_obstack, size);
638 }
639
640 /* Allocate memory for the reg/memory set tracking tables.
641 This is called at the start of each pass. */
642
643 static void
644 alloc_gcse_mem (void)
645 {
646 /* Allocate vars to track sets of regs. */
647 reg_set_bitmap = ALLOC_REG_SET (NULL);
648
649 /* Allocate array to keep a list of insns which modify memory in each
650 basic block. The two typedefs are needed to work around the
651 pre-processor limitation with template types in macro arguments. */
652 typedef vec<rtx_insn *> vec_rtx_heap;
653 typedef vec<modify_pair> vec_modify_pair_heap;
654 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
655 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
656 last_basic_block_for_fn (cfun));
657 modify_mem_list_set = BITMAP_ALLOC (NULL);
658 blocks_with_calls = BITMAP_ALLOC (NULL);
659 }
660
661 /* Free memory allocated by alloc_gcse_mem. */
662
663 static void
664 free_gcse_mem (void)
665 {
666 FREE_REG_SET (reg_set_bitmap);
667
668 free_modify_mem_tables ();
669 BITMAP_FREE (modify_mem_list_set);
670 BITMAP_FREE (blocks_with_calls);
671 }
672 \f
673 /* Compute the local properties of each recorded expression.
674
675 Local properties are those that are defined by the block, irrespective of
676 other blocks.
677
678 An expression is transparent in a block if its operands are not modified
679 in the block.
680
681 An expression is computed (locally available) in a block if it is computed
682 at least once and expression would contain the same value if the
683 computation was moved to the end of the block.
684
685 An expression is locally anticipatable in a block if it is computed at
686 least once and expression would contain the same value if the computation
687 was moved to the beginning of the block.
688
689 We call this routine for pre and code hoisting. They all compute
690 basically the same information and thus can easily share this code.
691
692 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
693 properties. If NULL, then it is not necessary to compute or record that
694 particular property.
695
696 TABLE controls which hash table to look at. */
697
698 static void
699 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
700 struct gcse_hash_table_d *table)
701 {
702 unsigned int i;
703
704 /* Initialize any bitmaps that were passed in. */
705 if (transp)
706 {
707 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
708 }
709
710 if (comp)
711 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
712 if (antloc)
713 bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
714
715 for (i = 0; i < table->size; i++)
716 {
717 struct gcse_expr *expr;
718
719 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
720 {
721 int indx = expr->bitmap_index;
722 struct gcse_occr *occr;
723
724 /* The expression is transparent in this block if it is not killed.
725 We start by assuming all are transparent [none are killed], and
726 then reset the bits for those that are. */
727 if (transp)
728 compute_transp (expr->expr, indx, transp,
729 blocks_with_calls,
730 modify_mem_list_set,
731 canon_modify_mem_list);
732
733 /* The occurrences recorded in antic_occr are exactly those that
734 we want to set to nonzero in ANTLOC. */
735 if (antloc)
736 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
737 {
738 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
739
740 /* While we're scanning the table, this is a good place to
741 initialize this. */
742 occr->deleted_p = 0;
743 }
744
745 /* The occurrences recorded in avail_occr are exactly those that
746 we want to set to nonzero in COMP. */
747 if (comp)
748 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
749 {
750 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
751
752 /* While we're scanning the table, this is a good place to
753 initialize this. */
754 occr->copied_p = 0;
755 }
756
757 /* While we're scanning the table, this is a good place to
758 initialize this. */
759 expr->reaching_reg = 0;
760 }
761 }
762 }
763 \f
764 /* Hash table support. */
765
766 struct reg_avail_info
767 {
768 basic_block last_bb;
769 int first_set;
770 int last_set;
771 };
772
773 static struct reg_avail_info *reg_avail_info;
774 static basic_block current_bb;
775
776 /* See whether X, the source of a set, is something we want to consider for
777 GCSE. */
778
779 static int
780 want_to_gcse_p (rtx x, int *max_distance_ptr)
781 {
782 #ifdef STACK_REGS
783 /* On register stack architectures, don't GCSE constants from the
784 constant pool, as the benefits are often swamped by the overhead
785 of shuffling the register stack between basic blocks. */
786 if (IS_STACK_MODE (GET_MODE (x)))
787 x = avoid_constant_pool_reference (x);
788 #endif
789
790 /* GCSE'ing constants:
791
792 We do not specifically distinguish between constant and non-constant
793 expressions in PRE and Hoist. We use set_src_cost below to limit
794 the maximum distance simple expressions can travel.
795
796 Nevertheless, constants are much easier to GCSE, and, hence,
797 it is easy to overdo the optimizations. Usually, excessive PRE and
798 Hoisting of constant leads to increased register pressure.
799
800 RA can deal with this by rematerialing some of the constants.
801 Therefore, it is important that the back-end generates sets of constants
802 in a way that allows reload rematerialize them under high register
803 pressure, i.e., a pseudo register with REG_EQUAL to constant
804 is set only once. Failing to do so will result in IRA/reload
805 spilling such constants under high register pressure instead of
806 rematerializing them. */
807
808 switch (GET_CODE (x))
809 {
810 case REG:
811 case SUBREG:
812 case CALL:
813 return 0;
814
815 CASE_CONST_ANY:
816 if (!doing_code_hoisting_p)
817 /* Do not PRE constants. */
818 return 0;
819
820 /* FALLTHRU */
821
822 default:
823 if (doing_code_hoisting_p)
824 /* PRE doesn't implement max_distance restriction. */
825 {
826 int cost;
827 int max_distance;
828
829 gcc_assert (!optimize_function_for_speed_p (cfun)
830 && optimize_function_for_size_p (cfun));
831 cost = set_src_cost (x, 0);
832
833 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
834 {
835 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
836 if (max_distance == 0)
837 return 0;
838
839 gcc_assert (max_distance > 0);
840 }
841 else
842 max_distance = 0;
843
844 if (max_distance_ptr)
845 *max_distance_ptr = max_distance;
846 }
847
848 return can_assign_to_reg_without_clobbers_p (x);
849 }
850 }
851
852 /* Used internally by can_assign_to_reg_without_clobbers_p. */
853
854 static GTY(()) rtx_insn *test_insn;
855
856 /* Return true if we can assign X to a pseudo register such that the
857 resulting insn does not result in clobbering a hard register as a
858 side-effect.
859
860 Additionally, if the target requires it, check that the resulting insn
861 can be copied. If it cannot, this means that X is special and probably
862 has hidden side-effects we don't want to mess with.
863
864 This function is typically used by code motion passes, to verify
865 that it is safe to insert an insn without worrying about clobbering
866 maybe live hard regs. */
867
868 bool
869 can_assign_to_reg_without_clobbers_p (rtx x)
870 {
871 int num_clobbers = 0;
872 int icode;
873 bool can_assign = false;
874
875 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
876 if (general_operand (x, GET_MODE (x)))
877 return 1;
878 else if (GET_MODE (x) == VOIDmode)
879 return 0;
880
881 /* Otherwise, check if we can make a valid insn from it. First initialize
882 our test insn if we haven't already. */
883 if (test_insn == 0)
884 {
885 test_insn
886 = make_insn_raw (gen_rtx_SET (VOIDmode,
887 gen_rtx_REG (word_mode,
888 FIRST_PSEUDO_REGISTER * 2),
889 const0_rtx));
890 SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
891 INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
892 }
893
894 /* Now make an insn like the one we would make when GCSE'ing and see if
895 valid. */
896 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
897 SET_SRC (PATTERN (test_insn)) = x;
898
899 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
900
901 /* If the test insn is valid and doesn't need clobbers, and the target also
902 has no objections, we're good. */
903 if (icode >= 0
904 && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
905 && ! (targetm.cannot_copy_insn_p
906 && targetm.cannot_copy_insn_p (test_insn)))
907 can_assign = true;
908
909 /* Make sure test_insn doesn't have any pointers into GC space. */
910 SET_SRC (PATTERN (test_insn)) = NULL_RTX;
911
912 return can_assign;
913 }
914
915 /* Return nonzero if the operands of expression X are unchanged from the
916 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
917 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
918
919 static int
920 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
921 {
922 int i, j;
923 enum rtx_code code;
924 const char *fmt;
925
926 if (x == 0)
927 return 1;
928
929 code = GET_CODE (x);
930 switch (code)
931 {
932 case REG:
933 {
934 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
935
936 if (info->last_bb != current_bb)
937 return 1;
938 if (avail_p)
939 return info->last_set < DF_INSN_LUID (insn);
940 else
941 return info->first_set >= DF_INSN_LUID (insn);
942 }
943
944 case MEM:
945 if (! flag_gcse_lm
946 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
947 x, avail_p))
948 return 0;
949 else
950 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
951
952 case PRE_DEC:
953 case PRE_INC:
954 case POST_DEC:
955 case POST_INC:
956 case PRE_MODIFY:
957 case POST_MODIFY:
958 return 0;
959
960 case PC:
961 case CC0: /*FIXME*/
962 case CONST:
963 CASE_CONST_ANY:
964 case SYMBOL_REF:
965 case LABEL_REF:
966 case ADDR_VEC:
967 case ADDR_DIFF_VEC:
968 return 1;
969
970 default:
971 break;
972 }
973
974 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
975 {
976 if (fmt[i] == 'e')
977 {
978 /* If we are about to do the last recursive call needed at this
979 level, change it into iteration. This function is called enough
980 to be worth it. */
981 if (i == 0)
982 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
983
984 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
985 return 0;
986 }
987 else if (fmt[i] == 'E')
988 for (j = 0; j < XVECLEN (x, i); j++)
989 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
990 return 0;
991 }
992
993 return 1;
994 }
995
996 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
997
998 struct mem_conflict_info
999 {
1000 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
1001 see if a memory store conflicts with this memory load. */
1002 const_rtx mem;
1003
1004 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
1005 references. */
1006 bool conflict;
1007 };
1008
1009 /* DEST is the output of an instruction. If it is a memory reference and
1010 possibly conflicts with the load found in DATA, then communicate this
1011 information back through DATA. */
1012
1013 static void
1014 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
1015 void *data)
1016 {
1017 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
1018
1019 while (GET_CODE (dest) == SUBREG
1020 || GET_CODE (dest) == ZERO_EXTRACT
1021 || GET_CODE (dest) == STRICT_LOW_PART)
1022 dest = XEXP (dest, 0);
1023
1024 /* If DEST is not a MEM, then it will not conflict with the load. Note
1025 that function calls are assumed to clobber memory, but are handled
1026 elsewhere. */
1027 if (! MEM_P (dest))
1028 return;
1029
1030 /* If we are setting a MEM in our list of specially recognized MEMs,
1031 don't mark as killed this time. */
1032 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1033 {
1034 if (!find_rtx_in_ldst (dest))
1035 mci->conflict = true;
1036 return;
1037 }
1038
1039 if (true_dependence (dest, GET_MODE (dest), mci->mem))
1040 mci->conflict = true;
1041 }
1042
1043 /* Return nonzero if the expression in X (a memory reference) is killed
1044 in block BB before or after the insn with the LUID in UID_LIMIT.
1045 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1046 before UID_LIMIT.
1047
1048 To check the entire block, set UID_LIMIT to max_uid + 1 and
1049 AVAIL_P to 0. */
1050
1051 static int
1052 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1053 int avail_p)
1054 {
1055 vec<rtx_insn *> list = modify_mem_list[bb->index];
1056 rtx_insn *setter;
1057 unsigned ix;
1058
1059 /* If this is a readonly then we aren't going to be changing it. */
1060 if (MEM_READONLY_P (x))
1061 return 0;
1062
1063 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1064 {
1065 struct mem_conflict_info mci;
1066
1067 /* Ignore entries in the list that do not apply. */
1068 if ((avail_p
1069 && DF_INSN_LUID (setter) < uid_limit)
1070 || (! avail_p
1071 && DF_INSN_LUID (setter) > uid_limit))
1072 continue;
1073
1074 /* If SETTER is a call everything is clobbered. Note that calls
1075 to pure functions are never put on the list, so we need not
1076 worry about them. */
1077 if (CALL_P (setter))
1078 return 1;
1079
1080 /* SETTER must be an INSN of some kind that sets memory. Call
1081 note_stores to examine each hunk of memory that is modified. */
1082 mci.mem = x;
1083 mci.conflict = false;
1084 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1085 if (mci.conflict)
1086 return 1;
1087 }
1088 return 0;
1089 }
1090
1091 /* Return nonzero if the operands of expression X are unchanged from
1092 the start of INSN's basic block up to but not including INSN. */
1093
1094 static int
1095 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1096 {
1097 return oprs_unchanged_p (x, insn, 0);
1098 }
1099
1100 /* Return nonzero if the operands of expression X are unchanged from
1101 INSN to the end of INSN's basic block. */
1102
1103 static int
1104 oprs_available_p (const_rtx x, const rtx_insn *insn)
1105 {
1106 return oprs_unchanged_p (x, insn, 1);
1107 }
1108
1109 /* Hash expression X.
1110
1111 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1112 indicating if a volatile operand is found or if the expression contains
1113 something we don't want to insert in the table. HASH_TABLE_SIZE is
1114 the current size of the hash table to be probed. */
1115
1116 static unsigned int
1117 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1118 int hash_table_size)
1119 {
1120 unsigned int hash;
1121
1122 *do_not_record_p = 0;
1123
1124 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1125 return hash % hash_table_size;
1126 }
1127
1128 /* Return nonzero if exp1 is equivalent to exp2. */
1129
1130 static int
1131 expr_equiv_p (const_rtx x, const_rtx y)
1132 {
1133 return exp_equiv_p (x, y, 0, true);
1134 }
1135
1136 /* Insert expression X in INSN in the hash TABLE.
1137 If it is already present, record it as the last occurrence in INSN's
1138 basic block.
1139
1140 MODE is the mode of the value X is being stored into.
1141 It is only used if X is a CONST_INT.
1142
1143 ANTIC_P is nonzero if X is an anticipatable expression.
1144 AVAIL_P is nonzero if X is an available expression.
1145
1146 MAX_DISTANCE is the maximum distance in instructions this expression can
1147 be moved. */
1148
1149 static void
1150 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1151 int antic_p,
1152 int avail_p, int max_distance, struct gcse_hash_table_d *table)
1153 {
1154 int found, do_not_record_p;
1155 unsigned int hash;
1156 struct gcse_expr *cur_expr, *last_expr = NULL;
1157 struct gcse_occr *antic_occr, *avail_occr;
1158
1159 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1160
1161 /* Do not insert expression in table if it contains volatile operands,
1162 or if hash_expr determines the expression is something we don't want
1163 to or can't handle. */
1164 if (do_not_record_p)
1165 return;
1166
1167 cur_expr = table->table[hash];
1168 found = 0;
1169
1170 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1171 {
1172 /* If the expression isn't found, save a pointer to the end of
1173 the list. */
1174 last_expr = cur_expr;
1175 cur_expr = cur_expr->next_same_hash;
1176 }
1177
1178 if (! found)
1179 {
1180 cur_expr = GOBNEW (struct gcse_expr);
1181 bytes_used += sizeof (struct gcse_expr);
1182 if (table->table[hash] == NULL)
1183 /* This is the first pattern that hashed to this index. */
1184 table->table[hash] = cur_expr;
1185 else
1186 /* Add EXPR to end of this hash chain. */
1187 last_expr->next_same_hash = cur_expr;
1188
1189 /* Set the fields of the expr element. */
1190 cur_expr->expr = x;
1191 cur_expr->bitmap_index = table->n_elems++;
1192 cur_expr->next_same_hash = NULL;
1193 cur_expr->antic_occr = NULL;
1194 cur_expr->avail_occr = NULL;
1195 gcc_assert (max_distance >= 0);
1196 cur_expr->max_distance = max_distance;
1197 }
1198 else
1199 gcc_assert (cur_expr->max_distance == max_distance);
1200
1201 /* Now record the occurrence(s). */
1202 if (antic_p)
1203 {
1204 antic_occr = cur_expr->antic_occr;
1205
1206 if (antic_occr
1207 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1208 antic_occr = NULL;
1209
1210 if (antic_occr)
1211 /* Found another instance of the expression in the same basic block.
1212 Prefer the currently recorded one. We want the first one in the
1213 block and the block is scanned from start to end. */
1214 ; /* nothing to do */
1215 else
1216 {
1217 /* First occurrence of this expression in this basic block. */
1218 antic_occr = GOBNEW (struct gcse_occr);
1219 bytes_used += sizeof (struct gcse_occr);
1220 antic_occr->insn = insn;
1221 antic_occr->next = cur_expr->antic_occr;
1222 antic_occr->deleted_p = 0;
1223 cur_expr->antic_occr = antic_occr;
1224 }
1225 }
1226
1227 if (avail_p)
1228 {
1229 avail_occr = cur_expr->avail_occr;
1230
1231 if (avail_occr
1232 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1233 {
1234 /* Found another instance of the expression in the same basic block.
1235 Prefer this occurrence to the currently recorded one. We want
1236 the last one in the block and the block is scanned from start
1237 to end. */
1238 avail_occr->insn = insn;
1239 }
1240 else
1241 {
1242 /* First occurrence of this expression in this basic block. */
1243 avail_occr = GOBNEW (struct gcse_occr);
1244 bytes_used += sizeof (struct gcse_occr);
1245 avail_occr->insn = insn;
1246 avail_occr->next = cur_expr->avail_occr;
1247 avail_occr->deleted_p = 0;
1248 cur_expr->avail_occr = avail_occr;
1249 }
1250 }
1251 }
1252
1253 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1254
1255 static void
1256 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1257 {
1258 rtx src = SET_SRC (set);
1259 rtx dest = SET_DEST (set);
1260 rtx note;
1261
1262 if (GET_CODE (src) == CALL)
1263 hash_scan_call (src, insn, table);
1264
1265 else if (REG_P (dest))
1266 {
1267 unsigned int regno = REGNO (dest);
1268 int max_distance = 0;
1269
1270 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1271
1272 This allows us to do a single GCSE pass and still eliminate
1273 redundant constants, addresses or other expressions that are
1274 constructed with multiple instructions.
1275
1276 However, keep the original SRC if INSN is a simple reg-reg move.
1277 In this case, there will almost always be a REG_EQUAL note on the
1278 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1279 for INSN, we miss copy propagation opportunities and we perform the
1280 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1281 do more than one PRE GCSE pass.
1282
1283 Note that this does not impede profitable constant propagations. We
1284 "look through" reg-reg sets in lookup_avail_set. */
1285 note = find_reg_equal_equiv_note (insn);
1286 if (note != 0
1287 && REG_NOTE_KIND (note) == REG_EQUAL
1288 && !REG_P (src)
1289 && want_to_gcse_p (XEXP (note, 0), NULL))
1290 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1291
1292 /* Only record sets of pseudo-regs in the hash table. */
1293 if (regno >= FIRST_PSEUDO_REGISTER
1294 /* Don't GCSE something if we can't do a reg/reg copy. */
1295 && can_copy_p (GET_MODE (dest))
1296 /* GCSE commonly inserts instruction after the insn. We can't
1297 do that easily for EH edges so disable GCSE on these for now. */
1298 /* ??? We can now easily create new EH landing pads at the
1299 gimple level, for splitting edges; there's no reason we
1300 can't do the same thing at the rtl level. */
1301 && !can_throw_internal (insn)
1302 /* Is SET_SRC something we want to gcse? */
1303 && want_to_gcse_p (src, &max_distance)
1304 /* Don't CSE a nop. */
1305 && ! set_noop_p (set)
1306 /* Don't GCSE if it has attached REG_EQUIV note.
1307 At this point this only function parameters should have
1308 REG_EQUIV notes and if the argument slot is used somewhere
1309 explicitly, it means address of parameter has been taken,
1310 so we should not extend the lifetime of the pseudo. */
1311 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1312 {
1313 /* An expression is not anticipatable if its operands are
1314 modified before this insn or if this is not the only SET in
1315 this insn. The latter condition does not have to mean that
1316 SRC itself is not anticipatable, but we just will not be
1317 able to handle code motion of insns with multiple sets. */
1318 int antic_p = oprs_anticipatable_p (src, insn)
1319 && !multiple_sets (insn);
1320 /* An expression is not available if its operands are
1321 subsequently modified, including this insn. It's also not
1322 available if this is a branch, because we can't insert
1323 a set after the branch. */
1324 int avail_p = (oprs_available_p (src, insn)
1325 && ! JUMP_P (insn));
1326
1327 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1328 max_distance, table);
1329 }
1330 }
1331 /* In case of store we want to consider the memory value as available in
1332 the REG stored in that memory. This makes it possible to remove
1333 redundant loads from due to stores to the same location. */
1334 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1335 {
1336 unsigned int regno = REGNO (src);
1337 int max_distance = 0;
1338
1339 /* Only record sets of pseudo-regs in the hash table. */
1340 if (regno >= FIRST_PSEUDO_REGISTER
1341 /* Don't GCSE something if we can't do a reg/reg copy. */
1342 && can_copy_p (GET_MODE (src))
1343 /* GCSE commonly inserts instruction after the insn. We can't
1344 do that easily for EH edges so disable GCSE on these for now. */
1345 && !can_throw_internal (insn)
1346 /* Is SET_DEST something we want to gcse? */
1347 && want_to_gcse_p (dest, &max_distance)
1348 /* Don't CSE a nop. */
1349 && ! set_noop_p (set)
1350 /* Don't GCSE if it has attached REG_EQUIV note.
1351 At this point this only function parameters should have
1352 REG_EQUIV notes and if the argument slot is used somewhere
1353 explicitly, it means address of parameter has been taken,
1354 so we should not extend the lifetime of the pseudo. */
1355 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1356 || ! MEM_P (XEXP (note, 0))))
1357 {
1358 /* Stores are never anticipatable. */
1359 int antic_p = 0;
1360 /* An expression is not available if its operands are
1361 subsequently modified, including this insn. It's also not
1362 available if this is a branch, because we can't insert
1363 a set after the branch. */
1364 int avail_p = oprs_available_p (dest, insn)
1365 && ! JUMP_P (insn);
1366
1367 /* Record the memory expression (DEST) in the hash table. */
1368 insert_expr_in_table (dest, GET_MODE (dest), insn,
1369 antic_p, avail_p, max_distance, table);
1370 }
1371 }
1372 }
1373
1374 static void
1375 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1376 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1377 {
1378 /* Currently nothing to do. */
1379 }
1380
1381 static void
1382 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1383 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1384 {
1385 /* Currently nothing to do. */
1386 }
1387
1388 /* Process INSN and add hash table entries as appropriate. */
1389
1390 static void
1391 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1392 {
1393 rtx pat = PATTERN (insn);
1394 int i;
1395
1396 /* Pick out the sets of INSN and for other forms of instructions record
1397 what's been modified. */
1398
1399 if (GET_CODE (pat) == SET)
1400 hash_scan_set (pat, insn, table);
1401
1402 else if (GET_CODE (pat) == CLOBBER)
1403 hash_scan_clobber (pat, insn, table);
1404
1405 else if (GET_CODE (pat) == CALL)
1406 hash_scan_call (pat, insn, table);
1407
1408 else if (GET_CODE (pat) == PARALLEL)
1409 for (i = 0; i < XVECLEN (pat, 0); i++)
1410 {
1411 rtx x = XVECEXP (pat, 0, i);
1412
1413 if (GET_CODE (x) == SET)
1414 hash_scan_set (x, insn, table);
1415 else if (GET_CODE (x) == CLOBBER)
1416 hash_scan_clobber (x, insn, table);
1417 else if (GET_CODE (x) == CALL)
1418 hash_scan_call (x, insn, table);
1419 }
1420 }
1421
1422 /* Dump the hash table TABLE to file FILE under the name NAME. */
1423
1424 static void
1425 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1426 {
1427 int i;
1428 /* Flattened out table, so it's printed in proper order. */
1429 struct gcse_expr **flat_table;
1430 unsigned int *hash_val;
1431 struct gcse_expr *expr;
1432
1433 flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1434 hash_val = XNEWVEC (unsigned int, table->n_elems);
1435
1436 for (i = 0; i < (int) table->size; i++)
1437 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1438 {
1439 flat_table[expr->bitmap_index] = expr;
1440 hash_val[expr->bitmap_index] = i;
1441 }
1442
1443 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1444 name, table->size, table->n_elems);
1445
1446 for (i = 0; i < (int) table->n_elems; i++)
1447 if (flat_table[i] != 0)
1448 {
1449 expr = flat_table[i];
1450 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1451 expr->bitmap_index, hash_val[i], expr->max_distance);
1452 print_rtl (file, expr->expr);
1453 fprintf (file, "\n");
1454 }
1455
1456 fprintf (file, "\n");
1457
1458 free (flat_table);
1459 free (hash_val);
1460 }
1461
1462 /* Record register first/last/block set information for REGNO in INSN.
1463
1464 first_set records the first place in the block where the register
1465 is set and is used to compute "anticipatability".
1466
1467 last_set records the last place in the block where the register
1468 is set and is used to compute "availability".
1469
1470 last_bb records the block for which first_set and last_set are
1471 valid, as a quick test to invalidate them. */
1472
1473 static void
1474 record_last_reg_set_info (rtx insn, int regno)
1475 {
1476 struct reg_avail_info *info = &reg_avail_info[regno];
1477 int luid = DF_INSN_LUID (insn);
1478
1479 info->last_set = luid;
1480 if (info->last_bb != current_bb)
1481 {
1482 info->last_bb = current_bb;
1483 info->first_set = luid;
1484 }
1485 }
1486
1487 /* Record memory modification information for INSN. We do not actually care
1488 about the memory location(s) that are set, or even how they are set (consider
1489 a CALL_INSN). We merely need to record which insns modify memory. */
1490
1491 static void
1492 record_last_mem_set_info (rtx_insn *insn)
1493 {
1494 if (! flag_gcse_lm)
1495 return;
1496
1497 record_last_mem_set_info_common (insn, modify_mem_list,
1498 canon_modify_mem_list,
1499 modify_mem_list_set,
1500 blocks_with_calls);
1501 }
1502
1503 /* Called from compute_hash_table via note_stores to handle one
1504 SET or CLOBBER in an insn. DATA is really the instruction in which
1505 the SET is taking place. */
1506
1507 static void
1508 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1509 {
1510 rtx_insn *last_set_insn = (rtx_insn *) data;
1511
1512 if (GET_CODE (dest) == SUBREG)
1513 dest = SUBREG_REG (dest);
1514
1515 if (REG_P (dest))
1516 record_last_reg_set_info (last_set_insn, REGNO (dest));
1517 else if (MEM_P (dest)
1518 /* Ignore pushes, they clobber nothing. */
1519 && ! push_operand (dest, GET_MODE (dest)))
1520 record_last_mem_set_info (last_set_insn);
1521 }
1522
1523 /* Top level function to create an expression hash table.
1524
1525 Expression entries are placed in the hash table if
1526 - they are of the form (set (pseudo-reg) src),
1527 - src is something we want to perform GCSE on,
1528 - none of the operands are subsequently modified in the block
1529
1530 Currently src must be a pseudo-reg or a const_int.
1531
1532 TABLE is the table computed. */
1533
1534 static void
1535 compute_hash_table_work (struct gcse_hash_table_d *table)
1536 {
1537 int i;
1538
1539 /* re-Cache any INSN_LIST nodes we have allocated. */
1540 clear_modify_mem_tables ();
1541 /* Some working arrays used to track first and last set in each block. */
1542 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1543
1544 for (i = 0; i < max_reg_num (); ++i)
1545 reg_avail_info[i].last_bb = NULL;
1546
1547 FOR_EACH_BB_FN (current_bb, cfun)
1548 {
1549 rtx_insn *insn;
1550 unsigned int regno;
1551
1552 /* First pass over the instructions records information used to
1553 determine when registers and memory are first and last set. */
1554 FOR_BB_INSNS (current_bb, insn)
1555 {
1556 if (!NONDEBUG_INSN_P (insn))
1557 continue;
1558
1559 if (CALL_P (insn))
1560 {
1561 hard_reg_set_iterator hrsi;
1562 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1563 0, regno, hrsi)
1564 record_last_reg_set_info (insn, regno);
1565
1566 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1567 record_last_mem_set_info (insn);
1568 }
1569
1570 note_stores (PATTERN (insn), record_last_set_info, insn);
1571 }
1572
1573 /* The next pass builds the hash table. */
1574 FOR_BB_INSNS (current_bb, insn)
1575 if (NONDEBUG_INSN_P (insn))
1576 hash_scan_insn (insn, table);
1577 }
1578
1579 free (reg_avail_info);
1580 reg_avail_info = NULL;
1581 }
1582
1583 /* Allocate space for the set/expr hash TABLE.
1584 It is used to determine the number of buckets to use. */
1585
1586 static void
1587 alloc_hash_table (struct gcse_hash_table_d *table)
1588 {
1589 int n;
1590
1591 n = get_max_insn_count ();
1592
1593 table->size = n / 4;
1594 if (table->size < 11)
1595 table->size = 11;
1596
1597 /* Attempt to maintain efficient use of hash table.
1598 Making it an odd number is simplest for now.
1599 ??? Later take some measurements. */
1600 table->size |= 1;
1601 n = table->size * sizeof (struct gcse_expr *);
1602 table->table = GNEWVAR (struct gcse_expr *, n);
1603 }
1604
1605 /* Free things allocated by alloc_hash_table. */
1606
1607 static void
1608 free_hash_table (struct gcse_hash_table_d *table)
1609 {
1610 free (table->table);
1611 }
1612
1613 /* Compute the expression hash table TABLE. */
1614
1615 static void
1616 compute_hash_table (struct gcse_hash_table_d *table)
1617 {
1618 /* Initialize count of number of entries in hash table. */
1619 table->n_elems = 0;
1620 memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1621
1622 compute_hash_table_work (table);
1623 }
1624 \f
1625 /* Expression tracking support. */
1626
1627 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1628 static void
1629 clear_modify_mem_tables (void)
1630 {
1631 unsigned i;
1632 bitmap_iterator bi;
1633
1634 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1635 {
1636 modify_mem_list[i].release ();
1637 canon_modify_mem_list[i].release ();
1638 }
1639 bitmap_clear (modify_mem_list_set);
1640 bitmap_clear (blocks_with_calls);
1641 }
1642
1643 /* Release memory used by modify_mem_list_set. */
1644
1645 static void
1646 free_modify_mem_tables (void)
1647 {
1648 clear_modify_mem_tables ();
1649 free (modify_mem_list);
1650 free (canon_modify_mem_list);
1651 modify_mem_list = 0;
1652 canon_modify_mem_list = 0;
1653 }
1654 \f
1655 /* Compute PRE+LCM working variables. */
1656
1657 /* Local properties of expressions. */
1658
1659 /* Nonzero for expressions that are transparent in the block. */
1660 static sbitmap *transp;
1661
1662 /* Nonzero for expressions that are computed (available) in the block. */
1663 static sbitmap *comp;
1664
1665 /* Nonzero for expressions that are locally anticipatable in the block. */
1666 static sbitmap *antloc;
1667
1668 /* Nonzero for expressions where this block is an optimal computation
1669 point. */
1670 static sbitmap *pre_optimal;
1671
1672 /* Nonzero for expressions which are redundant in a particular block. */
1673 static sbitmap *pre_redundant;
1674
1675 /* Nonzero for expressions which should be inserted on a specific edge. */
1676 static sbitmap *pre_insert_map;
1677
1678 /* Nonzero for expressions which should be deleted in a specific block. */
1679 static sbitmap *pre_delete_map;
1680
1681 /* Allocate vars used for PRE analysis. */
1682
1683 static void
1684 alloc_pre_mem (int n_blocks, int n_exprs)
1685 {
1686 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1687 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1688 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1689
1690 pre_optimal = NULL;
1691 pre_redundant = NULL;
1692 pre_insert_map = NULL;
1693 pre_delete_map = NULL;
1694 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1695
1696 /* pre_insert and pre_delete are allocated later. */
1697 }
1698
1699 /* Free vars used for PRE analysis. */
1700
1701 static void
1702 free_pre_mem (void)
1703 {
1704 sbitmap_vector_free (transp);
1705 sbitmap_vector_free (comp);
1706
1707 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1708
1709 if (pre_optimal)
1710 sbitmap_vector_free (pre_optimal);
1711 if (pre_redundant)
1712 sbitmap_vector_free (pre_redundant);
1713 if (pre_insert_map)
1714 sbitmap_vector_free (pre_insert_map);
1715 if (pre_delete_map)
1716 sbitmap_vector_free (pre_delete_map);
1717
1718 transp = comp = NULL;
1719 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1720 }
1721
1722 /* Remove certain expressions from anticipatable and transparent
1723 sets of basic blocks that have incoming abnormal edge.
1724 For PRE remove potentially trapping expressions to avoid placing
1725 them on abnormal edges. For hoisting remove memory references that
1726 can be clobbered by calls. */
1727
1728 static void
1729 prune_expressions (bool pre_p)
1730 {
1731 sbitmap prune_exprs;
1732 struct gcse_expr *expr;
1733 unsigned int ui;
1734 basic_block bb;
1735
1736 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1737 bitmap_clear (prune_exprs);
1738 for (ui = 0; ui < expr_hash_table.size; ui++)
1739 {
1740 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1741 {
1742 /* Note potentially trapping expressions. */
1743 if (may_trap_p (expr->expr))
1744 {
1745 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1746 continue;
1747 }
1748
1749 if (!pre_p && MEM_P (expr->expr))
1750 /* Note memory references that can be clobbered by a call.
1751 We do not split abnormal edges in hoisting, so would
1752 a memory reference get hoisted along an abnormal edge,
1753 it would be placed /before/ the call. Therefore, only
1754 constant memory references can be hoisted along abnormal
1755 edges. */
1756 {
1757 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1758 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1759 continue;
1760
1761 if (MEM_READONLY_P (expr->expr)
1762 && !MEM_VOLATILE_P (expr->expr)
1763 && MEM_NOTRAP_P (expr->expr))
1764 /* Constant memory reference, e.g., a PIC address. */
1765 continue;
1766
1767 /* ??? Optimally, we would use interprocedural alias
1768 analysis to determine if this mem is actually killed
1769 by this call. */
1770
1771 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1772 }
1773 }
1774 }
1775
1776 FOR_EACH_BB_FN (bb, cfun)
1777 {
1778 edge e;
1779 edge_iterator ei;
1780
1781 /* If the current block is the destination of an abnormal edge, we
1782 kill all trapping (for PRE) and memory (for hoist) expressions
1783 because we won't be able to properly place the instruction on
1784 the edge. So make them neither anticipatable nor transparent.
1785 This is fairly conservative.
1786
1787 ??? For hoisting it may be necessary to check for set-and-jump
1788 instructions here, not just for abnormal edges. The general problem
1789 is that when an expression cannot not be placed right at the end of
1790 a basic block we should account for any side-effects of a subsequent
1791 jump instructions that could clobber the expression. It would
1792 be best to implement this check along the lines of
1793 should_hoist_expr_to_dom where the target block is already known
1794 and, hence, there's no need to conservatively prune expressions on
1795 "intermediate" set-and-jump instructions. */
1796 FOR_EACH_EDGE (e, ei, bb->preds)
1797 if ((e->flags & EDGE_ABNORMAL)
1798 && (pre_p || CALL_P (BB_END (e->src))))
1799 {
1800 bitmap_and_compl (antloc[bb->index],
1801 antloc[bb->index], prune_exprs);
1802 bitmap_and_compl (transp[bb->index],
1803 transp[bb->index], prune_exprs);
1804 break;
1805 }
1806 }
1807
1808 sbitmap_free (prune_exprs);
1809 }
1810
1811 /* It may be necessary to insert a large number of insns on edges to
1812 make the existing occurrences of expressions fully redundant. This
1813 routine examines the set of insertions and deletions and if the ratio
1814 of insertions to deletions is too high for a particular expression, then
1815 the expression is removed from the insertion/deletion sets.
1816
1817 N_ELEMS is the number of elements in the hash table. */
1818
1819 static void
1820 prune_insertions_deletions (int n_elems)
1821 {
1822 sbitmap_iterator sbi;
1823 sbitmap prune_exprs;
1824
1825 /* We always use I to iterate over blocks/edges and J to iterate over
1826 expressions. */
1827 unsigned int i, j;
1828
1829 /* Counts for the number of times an expression needs to be inserted and
1830 number of times an expression can be removed as a result. */
1831 int *insertions = GCNEWVEC (int, n_elems);
1832 int *deletions = GCNEWVEC (int, n_elems);
1833
1834 /* Set of expressions which require too many insertions relative to
1835 the number of deletions achieved. We will prune these out of the
1836 insertion/deletion sets. */
1837 prune_exprs = sbitmap_alloc (n_elems);
1838 bitmap_clear (prune_exprs);
1839
1840 /* Iterate over the edges counting the number of times each expression
1841 needs to be inserted. */
1842 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1843 {
1844 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1845 insertions[j]++;
1846 }
1847
1848 /* Similarly for deletions, but those occur in blocks rather than on
1849 edges. */
1850 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1851 {
1852 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1853 deletions[j]++;
1854 }
1855
1856 /* Now that we have accurate counts, iterate over the elements in the
1857 hash table and see if any need too many insertions relative to the
1858 number of evaluations that can be removed. If so, mark them in
1859 PRUNE_EXPRS. */
1860 for (j = 0; j < (unsigned) n_elems; j++)
1861 if (deletions[j]
1862 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1863 bitmap_set_bit (prune_exprs, j);
1864
1865 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1866 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1867 {
1868 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1869 bitmap_clear_bit (pre_insert_map[i], j);
1870
1871 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1872 bitmap_clear_bit (pre_delete_map[i], j);
1873 }
1874
1875 sbitmap_free (prune_exprs);
1876 free (insertions);
1877 free (deletions);
1878 }
1879
1880 /* Top level routine to do the dataflow analysis needed by PRE. */
1881
1882 static struct edge_list *
1883 compute_pre_data (void)
1884 {
1885 struct edge_list *edge_list;
1886 basic_block bb;
1887
1888 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1889 prune_expressions (true);
1890 bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1891
1892 /* Compute ae_kill for each basic block using:
1893
1894 ~(TRANSP | COMP)
1895 */
1896
1897 FOR_EACH_BB_FN (bb, cfun)
1898 {
1899 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1900 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1901 }
1902
1903 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1904 ae_kill, &pre_insert_map, &pre_delete_map);
1905 sbitmap_vector_free (antloc);
1906 antloc = NULL;
1907 sbitmap_vector_free (ae_kill);
1908 ae_kill = NULL;
1909
1910 prune_insertions_deletions (expr_hash_table.n_elems);
1911
1912 return edge_list;
1913 }
1914 \f
1915 /* PRE utilities */
1916
1917 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1918 block BB.
1919
1920 VISITED is a pointer to a working buffer for tracking which BB's have
1921 been visited. It is NULL for the top-level call.
1922
1923 We treat reaching expressions that go through blocks containing the same
1924 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1925 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1926 2 as not reaching. The intent is to improve the probability of finding
1927 only one reaching expression and to reduce register lifetimes by picking
1928 the closest such expression. */
1929
1930 static int
1931 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1932 basic_block bb, char *visited)
1933 {
1934 edge pred;
1935 edge_iterator ei;
1936
1937 FOR_EACH_EDGE (pred, ei, bb->preds)
1938 {
1939 basic_block pred_bb = pred->src;
1940
1941 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1942 /* Has predecessor has already been visited? */
1943 || visited[pred_bb->index])
1944 ;/* Nothing to do. */
1945
1946 /* Does this predecessor generate this expression? */
1947 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1948 {
1949 /* Is this the occurrence we're looking for?
1950 Note that there's only one generating occurrence per block
1951 so we just need to check the block number. */
1952 if (occr_bb == pred_bb)
1953 return 1;
1954
1955 visited[pred_bb->index] = 1;
1956 }
1957 /* Ignore this predecessor if it kills the expression. */
1958 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1959 visited[pred_bb->index] = 1;
1960
1961 /* Neither gen nor kill. */
1962 else
1963 {
1964 visited[pred_bb->index] = 1;
1965 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1966 return 1;
1967 }
1968 }
1969
1970 /* All paths have been checked. */
1971 return 0;
1972 }
1973
1974 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1975 memory allocated for that function is returned. */
1976
1977 static int
1978 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1979 {
1980 int rval;
1981 char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1982
1983 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1984
1985 free (visited);
1986 return rval;
1987 }
1988 \f
1989 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
1990
1991 static rtx_insn *
1992 process_insert_insn (struct gcse_expr *expr)
1993 {
1994 rtx reg = expr->reaching_reg;
1995 /* Copy the expression to make sure we don't have any sharing issues. */
1996 rtx exp = copy_rtx (expr->expr);
1997 rtx_insn *pat;
1998
1999 start_sequence ();
2000
2001 /* If the expression is something that's an operand, like a constant,
2002 just copy it to a register. */
2003 if (general_operand (exp, GET_MODE (reg)))
2004 emit_move_insn (reg, exp);
2005
2006 /* Otherwise, make a new insn to compute this expression and make sure the
2007 insn will be recognized (this also adds any needed CLOBBERs). */
2008 else
2009 {
2010 rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2011
2012 if (insn_invalid_p (insn, false))
2013 gcc_unreachable ();
2014 }
2015
2016 pat = get_insns ();
2017 end_sequence ();
2018
2019 return pat;
2020 }
2021
2022 /* Add EXPR to the end of basic block BB.
2023
2024 This is used by both the PRE and code hoisting. */
2025
2026 static void
2027 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2028 {
2029 rtx_insn *insn = BB_END (bb);
2030 rtx_insn *new_insn;
2031 rtx reg = expr->reaching_reg;
2032 int regno = REGNO (reg);
2033 rtx_insn *pat, *pat_end;
2034
2035 pat = process_insert_insn (expr);
2036 gcc_assert (pat && INSN_P (pat));
2037
2038 pat_end = pat;
2039 while (NEXT_INSN (pat_end) != NULL_RTX)
2040 pat_end = NEXT_INSN (pat_end);
2041
2042 /* If the last insn is a jump, insert EXPR in front [taking care to
2043 handle cc0, etc. properly]. Similarly we need to care trapping
2044 instructions in presence of non-call exceptions. */
2045
2046 if (JUMP_P (insn)
2047 || (NONJUMP_INSN_P (insn)
2048 && (!single_succ_p (bb)
2049 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2050 {
2051 #ifdef HAVE_cc0
2052 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2053 if cc0 isn't set. */
2054 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2055 if (note)
2056 insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2057 else
2058 {
2059 rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2060 if (maybe_cc0_setter
2061 && INSN_P (maybe_cc0_setter)
2062 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2063 insn = maybe_cc0_setter;
2064 }
2065 #endif
2066 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2067 new_insn = emit_insn_before_noloc (pat, insn, bb);
2068 }
2069
2070 /* Likewise if the last insn is a call, as will happen in the presence
2071 of exception handling. */
2072 else if (CALL_P (insn)
2073 && (!single_succ_p (bb)
2074 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2075 {
2076 /* Keeping in mind targets with small register classes and parameters
2077 in registers, we search backward and place the instructions before
2078 the first parameter is loaded. Do this for everyone for consistency
2079 and a presumption that we'll get better code elsewhere as well. */
2080
2081 /* Since different machines initialize their parameter registers
2082 in different orders, assume nothing. Collect the set of all
2083 parameter registers. */
2084 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2085
2086 /* If we found all the parameter loads, then we want to insert
2087 before the first parameter load.
2088
2089 If we did not find all the parameter loads, then we might have
2090 stopped on the head of the block, which could be a CODE_LABEL.
2091 If we inserted before the CODE_LABEL, then we would be putting
2092 the insn in the wrong basic block. In that case, put the insn
2093 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2094 while (LABEL_P (insn)
2095 || NOTE_INSN_BASIC_BLOCK_P (insn))
2096 insn = NEXT_INSN (insn);
2097
2098 new_insn = emit_insn_before_noloc (pat, insn, bb);
2099 }
2100 else
2101 new_insn = emit_insn_after_noloc (pat, insn, bb);
2102
2103 while (1)
2104 {
2105 if (INSN_P (pat))
2106 add_label_notes (PATTERN (pat), new_insn);
2107 if (pat == pat_end)
2108 break;
2109 pat = NEXT_INSN (pat);
2110 }
2111
2112 gcse_create_count++;
2113
2114 if (dump_file)
2115 {
2116 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2117 bb->index, INSN_UID (new_insn));
2118 fprintf (dump_file, "copying expression %d to reg %d\n",
2119 expr->bitmap_index, regno);
2120 }
2121 }
2122
2123 /* Insert partially redundant expressions on edges in the CFG to make
2124 the expressions fully redundant. */
2125
2126 static int
2127 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2128 {
2129 int e, i, j, num_edges, set_size, did_insert = 0;
2130 sbitmap *inserted;
2131
2132 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2133 if it reaches any of the deleted expressions. */
2134
2135 set_size = pre_insert_map[0]->size;
2136 num_edges = NUM_EDGES (edge_list);
2137 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2138 bitmap_vector_clear (inserted, num_edges);
2139
2140 for (e = 0; e < num_edges; e++)
2141 {
2142 int indx;
2143 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2144
2145 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2146 {
2147 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2148
2149 for (j = indx;
2150 insert && j < (int) expr_hash_table.n_elems;
2151 j++, insert >>= 1)
2152 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2153 {
2154 struct gcse_expr *expr = index_map[j];
2155 struct gcse_occr *occr;
2156
2157 /* Now look at each deleted occurrence of this expression. */
2158 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2159 {
2160 if (! occr->deleted_p)
2161 continue;
2162
2163 /* Insert this expression on this edge if it would
2164 reach the deleted occurrence in BB. */
2165 if (!bitmap_bit_p (inserted[e], j))
2166 {
2167 rtx_insn *insn;
2168 edge eg = INDEX_EDGE (edge_list, e);
2169
2170 /* We can't insert anything on an abnormal and
2171 critical edge, so we insert the insn at the end of
2172 the previous block. There are several alternatives
2173 detailed in Morgans book P277 (sec 10.5) for
2174 handling this situation. This one is easiest for
2175 now. */
2176
2177 if (eg->flags & EDGE_ABNORMAL)
2178 insert_insn_end_basic_block (index_map[j], bb);
2179 else
2180 {
2181 insn = process_insert_insn (index_map[j]);
2182 insert_insn_on_edge (insn, eg);
2183 }
2184
2185 if (dump_file)
2186 {
2187 fprintf (dump_file, "PRE: edge (%d,%d), ",
2188 bb->index,
2189 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2190 fprintf (dump_file, "copy expression %d\n",
2191 expr->bitmap_index);
2192 }
2193
2194 update_ld_motion_stores (expr);
2195 bitmap_set_bit (inserted[e], j);
2196 did_insert = 1;
2197 gcse_create_count++;
2198 }
2199 }
2200 }
2201 }
2202 }
2203
2204 sbitmap_vector_free (inserted);
2205 return did_insert;
2206 }
2207
2208 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2209 Given "old_reg <- expr" (INSN), instead of adding after it
2210 reaching_reg <- old_reg
2211 it's better to do the following:
2212 reaching_reg <- expr
2213 old_reg <- reaching_reg
2214 because this way copy propagation can discover additional PRE
2215 opportunities. But if this fails, we try the old way.
2216 When "expr" is a store, i.e.
2217 given "MEM <- old_reg", instead of adding after it
2218 reaching_reg <- old_reg
2219 it's better to add it before as follows:
2220 reaching_reg <- old_reg
2221 MEM <- reaching_reg. */
2222
2223 static void
2224 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2225 {
2226 rtx reg = expr->reaching_reg;
2227 int regno = REGNO (reg);
2228 int indx = expr->bitmap_index;
2229 rtx pat = PATTERN (insn);
2230 rtx set, first_set, new_insn;
2231 rtx old_reg;
2232 int i;
2233
2234 /* This block matches the logic in hash_scan_insn. */
2235 switch (GET_CODE (pat))
2236 {
2237 case SET:
2238 set = pat;
2239 break;
2240
2241 case PARALLEL:
2242 /* Search through the parallel looking for the set whose
2243 source was the expression that we're interested in. */
2244 first_set = NULL_RTX;
2245 set = NULL_RTX;
2246 for (i = 0; i < XVECLEN (pat, 0); i++)
2247 {
2248 rtx x = XVECEXP (pat, 0, i);
2249 if (GET_CODE (x) == SET)
2250 {
2251 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2252 may not find an equivalent expression, but in this
2253 case the PARALLEL will have a single set. */
2254 if (first_set == NULL_RTX)
2255 first_set = x;
2256 if (expr_equiv_p (SET_SRC (x), expr->expr))
2257 {
2258 set = x;
2259 break;
2260 }
2261 }
2262 }
2263
2264 gcc_assert (first_set);
2265 if (set == NULL_RTX)
2266 set = first_set;
2267 break;
2268
2269 default:
2270 gcc_unreachable ();
2271 }
2272
2273 if (REG_P (SET_DEST (set)))
2274 {
2275 old_reg = SET_DEST (set);
2276 /* Check if we can modify the set destination in the original insn. */
2277 if (validate_change (insn, &SET_DEST (set), reg, 0))
2278 {
2279 new_insn = gen_move_insn (old_reg, reg);
2280 new_insn = emit_insn_after (new_insn, insn);
2281 }
2282 else
2283 {
2284 new_insn = gen_move_insn (reg, old_reg);
2285 new_insn = emit_insn_after (new_insn, insn);
2286 }
2287 }
2288 else /* This is possible only in case of a store to memory. */
2289 {
2290 old_reg = SET_SRC (set);
2291 new_insn = gen_move_insn (reg, old_reg);
2292
2293 /* Check if we can modify the set source in the original insn. */
2294 if (validate_change (insn, &SET_SRC (set), reg, 0))
2295 new_insn = emit_insn_before (new_insn, insn);
2296 else
2297 new_insn = emit_insn_after (new_insn, insn);
2298 }
2299
2300 gcse_create_count++;
2301
2302 if (dump_file)
2303 fprintf (dump_file,
2304 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2305 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2306 INSN_UID (insn), regno);
2307 }
2308
2309 /* Copy available expressions that reach the redundant expression
2310 to `reaching_reg'. */
2311
2312 static void
2313 pre_insert_copies (void)
2314 {
2315 unsigned int i, added_copy;
2316 struct gcse_expr *expr;
2317 struct gcse_occr *occr;
2318 struct gcse_occr *avail;
2319
2320 /* For each available expression in the table, copy the result to
2321 `reaching_reg' if the expression reaches a deleted one.
2322
2323 ??? The current algorithm is rather brute force.
2324 Need to do some profiling. */
2325
2326 for (i = 0; i < expr_hash_table.size; i++)
2327 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2328 {
2329 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2330 we don't want to insert a copy here because the expression may not
2331 really be redundant. So only insert an insn if the expression was
2332 deleted. This test also avoids further processing if the
2333 expression wasn't deleted anywhere. */
2334 if (expr->reaching_reg == NULL)
2335 continue;
2336
2337 /* Set when we add a copy for that expression. */
2338 added_copy = 0;
2339
2340 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2341 {
2342 if (! occr->deleted_p)
2343 continue;
2344
2345 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2346 {
2347 rtx_insn *insn = avail->insn;
2348
2349 /* No need to handle this one if handled already. */
2350 if (avail->copied_p)
2351 continue;
2352
2353 /* Don't handle this one if it's a redundant one. */
2354 if (insn->deleted ())
2355 continue;
2356
2357 /* Or if the expression doesn't reach the deleted one. */
2358 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2359 expr,
2360 BLOCK_FOR_INSN (occr->insn)))
2361 continue;
2362
2363 added_copy = 1;
2364
2365 /* Copy the result of avail to reaching_reg. */
2366 pre_insert_copy_insn (expr, insn);
2367 avail->copied_p = 1;
2368 }
2369 }
2370
2371 if (added_copy)
2372 update_ld_motion_stores (expr);
2373 }
2374 }
2375
2376 struct set_data
2377 {
2378 rtx_insn *insn;
2379 const_rtx set;
2380 int nsets;
2381 };
2382
2383 /* Increment number of sets and record set in DATA. */
2384
2385 static void
2386 record_set_data (rtx dest, const_rtx set, void *data)
2387 {
2388 struct set_data *s = (struct set_data *)data;
2389
2390 if (GET_CODE (set) == SET)
2391 {
2392 /* We allow insns having multiple sets, where all but one are
2393 dead as single set insns. In the common case only a single
2394 set is present, so we want to avoid checking for REG_UNUSED
2395 notes unless necessary. */
2396 if (s->nsets == 1
2397 && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2398 && !side_effects_p (s->set))
2399 s->nsets = 0;
2400
2401 if (!s->nsets)
2402 {
2403 /* Record this set. */
2404 s->nsets += 1;
2405 s->set = set;
2406 }
2407 else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2408 || side_effects_p (set))
2409 s->nsets += 1;
2410 }
2411 }
2412
2413 static const_rtx
2414 single_set_gcse (rtx_insn *insn)
2415 {
2416 struct set_data s;
2417 rtx pattern;
2418
2419 gcc_assert (INSN_P (insn));
2420
2421 /* Optimize common case. */
2422 pattern = PATTERN (insn);
2423 if (GET_CODE (pattern) == SET)
2424 return pattern;
2425
2426 s.insn = insn;
2427 s.nsets = 0;
2428 note_stores (pattern, record_set_data, &s);
2429
2430 /* Considered invariant insns have exactly one set. */
2431 gcc_assert (s.nsets == 1);
2432 return s.set;
2433 }
2434
2435 /* Emit move from SRC to DEST noting the equivalence with expression computed
2436 in INSN. */
2437
2438 static rtx
2439 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2440 {
2441 rtx_insn *new_rtx;
2442 const_rtx set = single_set_gcse (insn);
2443 rtx set2;
2444 rtx note;
2445 rtx eqv = NULL_RTX;
2446
2447 /* This should never fail since we're creating a reg->reg copy
2448 we've verified to be valid. */
2449
2450 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2451
2452 /* Note the equivalence for local CSE pass. Take the note from the old
2453 set if there was one. Otherwise record the SET_SRC from the old set
2454 unless DEST is also an operand of the SET_SRC. */
2455 set2 = single_set (new_rtx);
2456 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2457 return new_rtx;
2458 if ((note = find_reg_equal_equiv_note (insn)))
2459 eqv = XEXP (note, 0);
2460 else if (! REG_P (dest)
2461 || ! reg_mentioned_p (dest, SET_SRC (set)))
2462 eqv = SET_SRC (set);
2463
2464 if (eqv != NULL_RTX)
2465 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2466
2467 return new_rtx;
2468 }
2469
2470 /* Delete redundant computations.
2471 Deletion is done by changing the insn to copy the `reaching_reg' of
2472 the expression into the result of the SET. It is left to later passes
2473 to propagate the copy or eliminate it.
2474
2475 Return nonzero if a change is made. */
2476
2477 static int
2478 pre_delete (void)
2479 {
2480 unsigned int i;
2481 int changed;
2482 struct gcse_expr *expr;
2483 struct gcse_occr *occr;
2484
2485 changed = 0;
2486 for (i = 0; i < expr_hash_table.size; i++)
2487 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2488 {
2489 int indx = expr->bitmap_index;
2490
2491 /* We only need to search antic_occr since we require ANTLOC != 0. */
2492 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2493 {
2494 rtx_insn *insn = occr->insn;
2495 rtx set;
2496 basic_block bb = BLOCK_FOR_INSN (insn);
2497
2498 /* We only delete insns that have a single_set. */
2499 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2500 && (set = single_set (insn)) != 0
2501 && dbg_cnt (pre_insn))
2502 {
2503 /* Create a pseudo-reg to store the result of reaching
2504 expressions into. Get the mode for the new pseudo from
2505 the mode of the original destination pseudo. */
2506 if (expr->reaching_reg == NULL)
2507 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2508
2509 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2510 delete_insn (insn);
2511 occr->deleted_p = 1;
2512 changed = 1;
2513 gcse_subst_count++;
2514
2515 if (dump_file)
2516 {
2517 fprintf (dump_file,
2518 "PRE: redundant insn %d (expression %d) in ",
2519 INSN_UID (insn), indx);
2520 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2521 bb->index, REGNO (expr->reaching_reg));
2522 }
2523 }
2524 }
2525 }
2526
2527 return changed;
2528 }
2529
2530 /* Perform GCSE optimizations using PRE.
2531 This is called by one_pre_gcse_pass after all the dataflow analysis
2532 has been done.
2533
2534 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2535 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2536 Compiler Design and Implementation.
2537
2538 ??? A new pseudo reg is created to hold the reaching expression. The nice
2539 thing about the classical approach is that it would try to use an existing
2540 reg. If the register can't be adequately optimized [i.e. we introduce
2541 reload problems], one could add a pass here to propagate the new register
2542 through the block.
2543
2544 ??? We don't handle single sets in PARALLELs because we're [currently] not
2545 able to copy the rest of the parallel when we insert copies to create full
2546 redundancies from partial redundancies. However, there's no reason why we
2547 can't handle PARALLELs in the cases where there are no partial
2548 redundancies. */
2549
2550 static int
2551 pre_gcse (struct edge_list *edge_list)
2552 {
2553 unsigned int i;
2554 int did_insert, changed;
2555 struct gcse_expr **index_map;
2556 struct gcse_expr *expr;
2557
2558 /* Compute a mapping from expression number (`bitmap_index') to
2559 hash table entry. */
2560
2561 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2562 for (i = 0; i < expr_hash_table.size; i++)
2563 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2564 index_map[expr->bitmap_index] = expr;
2565
2566 /* Delete the redundant insns first so that
2567 - we know what register to use for the new insns and for the other
2568 ones with reaching expressions
2569 - we know which insns are redundant when we go to create copies */
2570
2571 changed = pre_delete ();
2572 did_insert = pre_edge_insert (edge_list, index_map);
2573
2574 /* In other places with reaching expressions, copy the expression to the
2575 specially allocated pseudo-reg that reaches the redundant expr. */
2576 pre_insert_copies ();
2577 if (did_insert)
2578 {
2579 commit_edge_insertions ();
2580 changed = 1;
2581 }
2582
2583 free (index_map);
2584 return changed;
2585 }
2586
2587 /* Top level routine to perform one PRE GCSE pass.
2588
2589 Return nonzero if a change was made. */
2590
2591 static int
2592 one_pre_gcse_pass (void)
2593 {
2594 int changed = 0;
2595
2596 gcse_subst_count = 0;
2597 gcse_create_count = 0;
2598
2599 /* Return if there's nothing to do, or it is too expensive. */
2600 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2601 || is_too_expensive (_("PRE disabled")))
2602 return 0;
2603
2604 /* We need alias. */
2605 init_alias_analysis ();
2606
2607 bytes_used = 0;
2608 gcc_obstack_init (&gcse_obstack);
2609 alloc_gcse_mem ();
2610
2611 alloc_hash_table (&expr_hash_table);
2612 add_noreturn_fake_exit_edges ();
2613 if (flag_gcse_lm)
2614 compute_ld_motion_mems ();
2615
2616 compute_hash_table (&expr_hash_table);
2617 if (flag_gcse_lm)
2618 trim_ld_motion_mems ();
2619 if (dump_file)
2620 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2621
2622 if (expr_hash_table.n_elems > 0)
2623 {
2624 struct edge_list *edge_list;
2625 alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2626 edge_list = compute_pre_data ();
2627 changed |= pre_gcse (edge_list);
2628 free_edge_list (edge_list);
2629 free_pre_mem ();
2630 }
2631
2632 if (flag_gcse_lm)
2633 free_ld_motion_mems ();
2634 remove_fake_exit_edges ();
2635 free_hash_table (&expr_hash_table);
2636
2637 free_gcse_mem ();
2638 obstack_free (&gcse_obstack, NULL);
2639
2640 /* We are finished with alias. */
2641 end_alias_analysis ();
2642
2643 if (dump_file)
2644 {
2645 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2646 current_function_name (), n_basic_blocks_for_fn (cfun),
2647 bytes_used);
2648 fprintf (dump_file, "%d substs, %d insns created\n",
2649 gcse_subst_count, gcse_create_count);
2650 }
2651
2652 return changed;
2653 }
2654 \f
2655 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2656 to INSN. If such notes are added to an insn which references a
2657 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2658 that note, because the following loop optimization pass requires
2659 them. */
2660
2661 /* ??? If there was a jump optimization pass after gcse and before loop,
2662 then we would not need to do this here, because jump would add the
2663 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2664
2665 static void
2666 add_label_notes (rtx x, rtx insn)
2667 {
2668 enum rtx_code code = GET_CODE (x);
2669 int i, j;
2670 const char *fmt;
2671
2672 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2673 {
2674 /* This code used to ignore labels that referred to dispatch tables to
2675 avoid flow generating (slightly) worse code.
2676
2677 We no longer ignore such label references (see LABEL_REF handling in
2678 mark_jump_label for additional information). */
2679
2680 /* There's no reason for current users to emit jump-insns with
2681 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2682 notes. */
2683 gcc_assert (!JUMP_P (insn));
2684 add_reg_note (insn, REG_LABEL_OPERAND, LABEL_REF_LABEL (x));
2685
2686 if (LABEL_P (LABEL_REF_LABEL (x)))
2687 LABEL_NUSES (LABEL_REF_LABEL (x))++;
2688
2689 return;
2690 }
2691
2692 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2693 {
2694 if (fmt[i] == 'e')
2695 add_label_notes (XEXP (x, i), insn);
2696 else if (fmt[i] == 'E')
2697 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2698 add_label_notes (XVECEXP (x, i, j), insn);
2699 }
2700 }
2701
2702 /* Code Hoisting variables and subroutines. */
2703
2704 /* Very busy expressions. */
2705 static sbitmap *hoist_vbein;
2706 static sbitmap *hoist_vbeout;
2707
2708 /* ??? We could compute post dominators and run this algorithm in
2709 reverse to perform tail merging, doing so would probably be
2710 more effective than the tail merging code in jump.c.
2711
2712 It's unclear if tail merging could be run in parallel with
2713 code hoisting. It would be nice. */
2714
2715 /* Allocate vars used for code hoisting analysis. */
2716
2717 static void
2718 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2719 {
2720 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2721 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2722 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2723
2724 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2725 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2726 }
2727
2728 /* Free vars used for code hoisting analysis. */
2729
2730 static void
2731 free_code_hoist_mem (void)
2732 {
2733 sbitmap_vector_free (antloc);
2734 sbitmap_vector_free (transp);
2735 sbitmap_vector_free (comp);
2736
2737 sbitmap_vector_free (hoist_vbein);
2738 sbitmap_vector_free (hoist_vbeout);
2739
2740 free_dominance_info (CDI_DOMINATORS);
2741 }
2742
2743 /* Compute the very busy expressions at entry/exit from each block.
2744
2745 An expression is very busy if all paths from a given point
2746 compute the expression. */
2747
2748 static void
2749 compute_code_hoist_vbeinout (void)
2750 {
2751 int changed, passes;
2752 basic_block bb;
2753
2754 bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2755 bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2756
2757 passes = 0;
2758 changed = 1;
2759
2760 while (changed)
2761 {
2762 changed = 0;
2763
2764 /* We scan the blocks in the reverse order to speed up
2765 the convergence. */
2766 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2767 {
2768 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2769 {
2770 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2771 hoist_vbein, bb);
2772
2773 /* Include expressions in VBEout that are calculated
2774 in BB and available at its end. */
2775 bitmap_ior (hoist_vbeout[bb->index],
2776 hoist_vbeout[bb->index], comp[bb->index]);
2777 }
2778
2779 changed |= bitmap_or_and (hoist_vbein[bb->index],
2780 antloc[bb->index],
2781 hoist_vbeout[bb->index],
2782 transp[bb->index]);
2783 }
2784
2785 passes++;
2786 }
2787
2788 if (dump_file)
2789 {
2790 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2791
2792 FOR_EACH_BB_FN (bb, cfun)
2793 {
2794 fprintf (dump_file, "vbein (%d): ", bb->index);
2795 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2796 fprintf (dump_file, "vbeout(%d): ", bb->index);
2797 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2798 }
2799 }
2800 }
2801
2802 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2803
2804 static void
2805 compute_code_hoist_data (void)
2806 {
2807 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2808 prune_expressions (false);
2809 compute_code_hoist_vbeinout ();
2810 calculate_dominance_info (CDI_DOMINATORS);
2811 if (dump_file)
2812 fprintf (dump_file, "\n");
2813 }
2814
2815 /* Update register pressure for BB when hoisting an expression from
2816 instruction FROM, if live ranges of inputs are shrunk. Also
2817 maintain live_in information if live range of register referred
2818 in FROM is shrunk.
2819
2820 Return 0 if register pressure doesn't change, otherwise return
2821 the number by which register pressure is decreased.
2822
2823 NOTE: Register pressure won't be increased in this function. */
2824
2825 static int
2826 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2827 {
2828 rtx dreg;
2829 rtx_insn *insn;
2830 basic_block succ_bb;
2831 df_ref use, op_ref;
2832 edge succ;
2833 edge_iterator ei;
2834 int decreased_pressure = 0;
2835 int nregs;
2836 enum reg_class pressure_class;
2837
2838 FOR_EACH_INSN_USE (use, from)
2839 {
2840 dreg = DF_REF_REAL_REG (use);
2841 /* The live range of register is shrunk only if it isn't:
2842 1. referred on any path from the end of this block to EXIT, or
2843 2. referred by insns other than FROM in this block. */
2844 FOR_EACH_EDGE (succ, ei, bb->succs)
2845 {
2846 succ_bb = succ->dest;
2847 if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2848 continue;
2849
2850 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2851 break;
2852 }
2853 if (succ != NULL)
2854 continue;
2855
2856 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2857 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2858 {
2859 if (!DF_REF_INSN_INFO (op_ref))
2860 continue;
2861
2862 insn = DF_REF_INSN (op_ref);
2863 if (BLOCK_FOR_INSN (insn) == bb
2864 && NONDEBUG_INSN_P (insn) && insn != from)
2865 break;
2866 }
2867
2868 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2869 /* Decrease register pressure and update live_in information for
2870 this block. */
2871 if (!op_ref && pressure_class != NO_REGS)
2872 {
2873 decreased_pressure += nregs;
2874 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2875 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2876 }
2877 }
2878 return decreased_pressure;
2879 }
2880
2881 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2882 flow graph, if it can reach BB unimpared. Stop the search if the
2883 expression would need to be moved more than DISTANCE instructions.
2884
2885 DISTANCE is the number of instructions through which EXPR can be
2886 hoisted up in flow graph.
2887
2888 BB_SIZE points to an array which contains the number of instructions
2889 for each basic block.
2890
2891 PRESSURE_CLASS and NREGS are register class and number of hard registers
2892 for storing EXPR.
2893
2894 HOISTED_BBS points to a bitmap indicating basic blocks through which
2895 EXPR is hoisted.
2896
2897 FROM is the instruction from which EXPR is hoisted.
2898
2899 It's unclear exactly what Muchnick meant by "unimpared". It seems
2900 to me that the expression must either be computed or transparent in
2901 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2902 would allow the expression to be hoisted out of loops, even if
2903 the expression wasn't a loop invariant.
2904
2905 Contrast this to reachability for PRE where an expression is
2906 considered reachable if *any* path reaches instead of *all*
2907 paths. */
2908
2909 static int
2910 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2911 basic_block bb, sbitmap visited, int distance,
2912 int *bb_size, enum reg_class pressure_class,
2913 int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2914 {
2915 unsigned int i;
2916 edge pred;
2917 edge_iterator ei;
2918 sbitmap_iterator sbi;
2919 int visited_allocated_locally = 0;
2920 int decreased_pressure = 0;
2921
2922 if (flag_ira_hoist_pressure)
2923 {
2924 /* Record old information of basic block BB when it is visited
2925 at the first time. */
2926 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2927 {
2928 struct bb_data *data = BB_DATA (bb);
2929 bitmap_copy (data->backup, data->live_in);
2930 data->old_pressure = data->max_reg_pressure[pressure_class];
2931 }
2932 decreased_pressure = update_bb_reg_pressure (bb, from);
2933 }
2934 /* Terminate the search if distance, for which EXPR is allowed to move,
2935 is exhausted. */
2936 if (distance > 0)
2937 {
2938 if (flag_ira_hoist_pressure)
2939 {
2940 /* Prefer to hoist EXPR if register pressure is decreased. */
2941 if (decreased_pressure > *nregs)
2942 distance += bb_size[bb->index];
2943 /* Let EXPR be hoisted through basic block at no cost if one
2944 of following conditions is satisfied:
2945
2946 1. The basic block has low register pressure.
2947 2. Register pressure won't be increases after hoisting EXPR.
2948
2949 Constant expressions is handled conservatively, because
2950 hoisting constant expression aggressively results in worse
2951 code. This decision is made by the observation of CSiBE
2952 on ARM target, while it has no obvious effect on other
2953 targets like x86, x86_64, mips and powerpc. */
2954 else if (CONST_INT_P (expr->expr)
2955 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2956 >= ira_class_hard_regs_num[pressure_class]
2957 && decreased_pressure < *nregs))
2958 distance -= bb_size[bb->index];
2959 }
2960 else
2961 distance -= bb_size[bb->index];
2962
2963 if (distance <= 0)
2964 return 0;
2965 }
2966 else
2967 gcc_assert (distance == 0);
2968
2969 if (visited == NULL)
2970 {
2971 visited_allocated_locally = 1;
2972 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2973 bitmap_clear (visited);
2974 }
2975
2976 FOR_EACH_EDGE (pred, ei, bb->preds)
2977 {
2978 basic_block pred_bb = pred->src;
2979
2980 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2981 break;
2982 else if (pred_bb == expr_bb)
2983 continue;
2984 else if (bitmap_bit_p (visited, pred_bb->index))
2985 continue;
2986 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2987 break;
2988 /* Not killed. */
2989 else
2990 {
2991 bitmap_set_bit (visited, pred_bb->index);
2992 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2993 visited, distance, bb_size,
2994 pressure_class, nregs,
2995 hoisted_bbs, from))
2996 break;
2997 }
2998 }
2999 if (visited_allocated_locally)
3000 {
3001 /* If EXPR can be hoisted to expr_bb, record basic blocks through
3002 which EXPR is hoisted in hoisted_bbs. */
3003 if (flag_ira_hoist_pressure && !pred)
3004 {
3005 /* Record the basic block from which EXPR is hoisted. */
3006 bitmap_set_bit (visited, bb->index);
3007 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3008 bitmap_set_bit (hoisted_bbs, i);
3009 }
3010 sbitmap_free (visited);
3011 }
3012
3013 return (pred == NULL);
3014 }
3015 \f
3016 /* Find occurrence in BB. */
3017
3018 static struct gcse_occr *
3019 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3020 {
3021 /* Find the right occurrence of this expression. */
3022 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3023 occr = occr->next;
3024
3025 return occr;
3026 }
3027
3028 /* Actually perform code hoisting.
3029
3030 The code hoisting pass can hoist multiple computations of the same
3031 expression along dominated path to a dominating basic block, like
3032 from b2/b3 to b1 as depicted below:
3033
3034 b1 ------
3035 /\ |
3036 / \ |
3037 bx by distance
3038 / \ |
3039 / \ |
3040 b2 b3 ------
3041
3042 Unfortunately code hoisting generally extends the live range of an
3043 output pseudo register, which increases register pressure and hurts
3044 register allocation. To address this issue, an attribute MAX_DISTANCE
3045 is computed and attached to each expression. The attribute is computed
3046 from rtx cost of the corresponding expression and it's used to control
3047 how long the expression can be hoisted up in flow graph. As the
3048 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3049 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3050 register pressure if live ranges of inputs are shrunk.
3051
3052 Option "-fira-hoist-pressure" implements register pressure directed
3053 hoist based on upper method. The rationale is:
3054 1. Calculate register pressure for each basic block by reusing IRA
3055 facility.
3056 2. When expression is hoisted through one basic block, GCC checks
3057 the change of live ranges for inputs/output. The basic block's
3058 register pressure will be increased because of extended live
3059 range of output. However, register pressure will be decreased
3060 if the live ranges of inputs are shrunk.
3061 3. After knowing how hoisting affects register pressure, GCC prefers
3062 to hoist the expression if it can decrease register pressure, by
3063 increasing DISTANCE of the corresponding expression.
3064 4. If hoisting the expression increases register pressure, GCC checks
3065 register pressure of the basic block and decrease DISTANCE only if
3066 the register pressure is high. In other words, expression will be
3067 hoisted through at no cost if the basic block has low register
3068 pressure.
3069 5. Update register pressure information for basic blocks through
3070 which expression is hoisted. */
3071
3072 static int
3073 hoist_code (void)
3074 {
3075 basic_block bb, dominated;
3076 vec<basic_block> dom_tree_walk;
3077 unsigned int dom_tree_walk_index;
3078 vec<basic_block> domby;
3079 unsigned int i, j, k;
3080 struct gcse_expr **index_map;
3081 struct gcse_expr *expr;
3082 int *to_bb_head;
3083 int *bb_size;
3084 int changed = 0;
3085 struct bb_data *data;
3086 /* Basic blocks that have occurrences reachable from BB. */
3087 bitmap from_bbs;
3088 /* Basic blocks through which expr is hoisted. */
3089 bitmap hoisted_bbs = NULL;
3090 bitmap_iterator bi;
3091
3092 /* Compute a mapping from expression number (`bitmap_index') to
3093 hash table entry. */
3094
3095 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3096 for (i = 0; i < expr_hash_table.size; i++)
3097 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3098 index_map[expr->bitmap_index] = expr;
3099
3100 /* Calculate sizes of basic blocks and note how far
3101 each instruction is from the start of its block. We then use this
3102 data to restrict distance an expression can travel. */
3103
3104 to_bb_head = XCNEWVEC (int, get_max_uid ());
3105 bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3106
3107 FOR_EACH_BB_FN (bb, cfun)
3108 {
3109 rtx_insn *insn;
3110 int to_head;
3111
3112 to_head = 0;
3113 FOR_BB_INSNS (bb, insn)
3114 {
3115 /* Don't count debug instructions to avoid them affecting
3116 decision choices. */
3117 if (NONDEBUG_INSN_P (insn))
3118 to_bb_head[INSN_UID (insn)] = to_head++;
3119 }
3120
3121 bb_size[bb->index] = to_head;
3122 }
3123
3124 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3125 && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3126 == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3127
3128 from_bbs = BITMAP_ALLOC (NULL);
3129 if (flag_ira_hoist_pressure)
3130 hoisted_bbs = BITMAP_ALLOC (NULL);
3131
3132 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3133 ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3134
3135 /* Walk over each basic block looking for potentially hoistable
3136 expressions, nothing gets hoisted from the entry block. */
3137 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3138 {
3139 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3140
3141 if (domby.length () == 0)
3142 continue;
3143
3144 /* Examine each expression that is very busy at the exit of this
3145 block. These are the potentially hoistable expressions. */
3146 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3147 {
3148 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3149 {
3150 int nregs = 0;
3151 enum reg_class pressure_class = NO_REGS;
3152 /* Current expression. */
3153 struct gcse_expr *expr = index_map[i];
3154 /* Number of occurrences of EXPR that can be hoisted to BB. */
3155 int hoistable = 0;
3156 /* Occurrences reachable from BB. */
3157 vec<occr_t> occrs_to_hoist = vNULL;
3158 /* We want to insert the expression into BB only once, so
3159 note when we've inserted it. */
3160 int insn_inserted_p;
3161 occr_t occr;
3162
3163 /* If an expression is computed in BB and is available at end of
3164 BB, hoist all occurrences dominated by BB to BB. */
3165 if (bitmap_bit_p (comp[bb->index], i))
3166 {
3167 occr = find_occr_in_bb (expr->antic_occr, bb);
3168
3169 if (occr)
3170 {
3171 /* An occurrence might've been already deleted
3172 while processing a dominator of BB. */
3173 if (!occr->deleted_p)
3174 {
3175 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3176 hoistable++;
3177 }
3178 }
3179 else
3180 hoistable++;
3181 }
3182
3183 /* We've found a potentially hoistable expression, now
3184 we look at every block BB dominates to see if it
3185 computes the expression. */
3186 FOR_EACH_VEC_ELT (domby, j, dominated)
3187 {
3188 int max_distance;
3189
3190 /* Ignore self dominance. */
3191 if (bb == dominated)
3192 continue;
3193 /* We've found a dominated block, now see if it computes
3194 the busy expression and whether or not moving that
3195 expression to the "beginning" of that block is safe. */
3196 if (!bitmap_bit_p (antloc[dominated->index], i))
3197 continue;
3198
3199 occr = find_occr_in_bb (expr->antic_occr, dominated);
3200 gcc_assert (occr);
3201
3202 /* An occurrence might've been already deleted
3203 while processing a dominator of BB. */
3204 if (occr->deleted_p)
3205 continue;
3206 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3207
3208 max_distance = expr->max_distance;
3209 if (max_distance > 0)
3210 /* Adjust MAX_DISTANCE to account for the fact that
3211 OCCR won't have to travel all of DOMINATED, but
3212 only part of it. */
3213 max_distance += (bb_size[dominated->index]
3214 - to_bb_head[INSN_UID (occr->insn)]);
3215
3216 pressure_class = get_pressure_class_and_nregs (occr->insn,
3217 &nregs);
3218
3219 /* Note if the expression should be hoisted from the dominated
3220 block to BB if it can reach DOMINATED unimpared.
3221
3222 Keep track of how many times this expression is hoistable
3223 from a dominated block into BB. */
3224 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3225 max_distance, bb_size,
3226 pressure_class, &nregs,
3227 hoisted_bbs, occr->insn))
3228 {
3229 hoistable++;
3230 occrs_to_hoist.safe_push (occr);
3231 bitmap_set_bit (from_bbs, dominated->index);
3232 }
3233 }
3234
3235 /* If we found more than one hoistable occurrence of this
3236 expression, then note it in the vector of expressions to
3237 hoist. It makes no sense to hoist things which are computed
3238 in only one BB, and doing so tends to pessimize register
3239 allocation. One could increase this value to try harder
3240 to avoid any possible code expansion due to register
3241 allocation issues; however experiments have shown that
3242 the vast majority of hoistable expressions are only movable
3243 from two successors, so raising this threshold is likely
3244 to nullify any benefit we get from code hoisting. */
3245 if (hoistable > 1 && dbg_cnt (hoist_insn))
3246 {
3247 /* If (hoistable != vec::length), then there is
3248 an occurrence of EXPR in BB itself. Don't waste
3249 time looking for LCA in this case. */
3250 if ((unsigned) hoistable == occrs_to_hoist.length ())
3251 {
3252 basic_block lca;
3253
3254 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3255 from_bbs);
3256 if (lca != bb)
3257 /* Punt, it's better to hoist these occurrences to
3258 LCA. */
3259 occrs_to_hoist.release ();
3260 }
3261 }
3262 else
3263 /* Punt, no point hoisting a single occurrence. */
3264 occrs_to_hoist.release ();
3265
3266 if (flag_ira_hoist_pressure
3267 && !occrs_to_hoist.is_empty ())
3268 {
3269 /* Increase register pressure of basic blocks to which
3270 expr is hoisted because of extended live range of
3271 output. */
3272 data = BB_DATA (bb);
3273 data->max_reg_pressure[pressure_class] += nregs;
3274 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3275 {
3276 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3277 data->max_reg_pressure[pressure_class] += nregs;
3278 }
3279 }
3280 else if (flag_ira_hoist_pressure)
3281 {
3282 /* Restore register pressure and live_in info for basic
3283 blocks recorded in hoisted_bbs when expr will not be
3284 hoisted. */
3285 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3286 {
3287 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3288 bitmap_copy (data->live_in, data->backup);
3289 data->max_reg_pressure[pressure_class]
3290 = data->old_pressure;
3291 }
3292 }
3293
3294 if (flag_ira_hoist_pressure)
3295 bitmap_clear (hoisted_bbs);
3296
3297 insn_inserted_p = 0;
3298
3299 /* Walk through occurrences of I'th expressions we want
3300 to hoist to BB and make the transformations. */
3301 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3302 {
3303 rtx_insn *insn;
3304 const_rtx set;
3305
3306 gcc_assert (!occr->deleted_p);
3307
3308 insn = occr->insn;
3309 set = single_set_gcse (insn);
3310
3311 /* Create a pseudo-reg to store the result of reaching
3312 expressions into. Get the mode for the new pseudo
3313 from the mode of the original destination pseudo.
3314
3315 It is important to use new pseudos whenever we
3316 emit a set. This will allow reload to use
3317 rematerialization for such registers. */
3318 if (!insn_inserted_p)
3319 expr->reaching_reg
3320 = gen_reg_rtx_and_attrs (SET_DEST (set));
3321
3322 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3323 insn);
3324 delete_insn (insn);
3325 occr->deleted_p = 1;
3326 changed = 1;
3327 gcse_subst_count++;
3328
3329 if (!insn_inserted_p)
3330 {
3331 insert_insn_end_basic_block (expr, bb);
3332 insn_inserted_p = 1;
3333 }
3334 }
3335
3336 occrs_to_hoist.release ();
3337 bitmap_clear (from_bbs);
3338 }
3339 }
3340 domby.release ();
3341 }
3342
3343 dom_tree_walk.release ();
3344 BITMAP_FREE (from_bbs);
3345 if (flag_ira_hoist_pressure)
3346 BITMAP_FREE (hoisted_bbs);
3347
3348 free (bb_size);
3349 free (to_bb_head);
3350 free (index_map);
3351
3352 return changed;
3353 }
3354
3355 /* Return pressure class and number of needed hard registers (through
3356 *NREGS) of register REGNO. */
3357 static enum reg_class
3358 get_regno_pressure_class (int regno, int *nregs)
3359 {
3360 if (regno >= FIRST_PSEUDO_REGISTER)
3361 {
3362 enum reg_class pressure_class;
3363
3364 pressure_class = reg_allocno_class (regno);
3365 pressure_class = ira_pressure_class_translate[pressure_class];
3366 *nregs
3367 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3368 return pressure_class;
3369 }
3370 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3371 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3372 {
3373 *nregs = 1;
3374 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3375 }
3376 else
3377 {
3378 *nregs = 0;
3379 return NO_REGS;
3380 }
3381 }
3382
3383 /* Return pressure class and number of hard registers (through *NREGS)
3384 for destination of INSN. */
3385 static enum reg_class
3386 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3387 {
3388 rtx reg;
3389 enum reg_class pressure_class;
3390 const_rtx set = single_set_gcse (insn);
3391
3392 reg = SET_DEST (set);
3393 if (GET_CODE (reg) == SUBREG)
3394 reg = SUBREG_REG (reg);
3395 if (MEM_P (reg))
3396 {
3397 *nregs = 0;
3398 pressure_class = NO_REGS;
3399 }
3400 else
3401 {
3402 gcc_assert (REG_P (reg));
3403 pressure_class = reg_allocno_class (REGNO (reg));
3404 pressure_class = ira_pressure_class_translate[pressure_class];
3405 *nregs
3406 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3407 }
3408 return pressure_class;
3409 }
3410
3411 /* Increase (if INCR_P) or decrease current register pressure for
3412 register REGNO. */
3413 static void
3414 change_pressure (int regno, bool incr_p)
3415 {
3416 int nregs;
3417 enum reg_class pressure_class;
3418
3419 pressure_class = get_regno_pressure_class (regno, &nregs);
3420 if (! incr_p)
3421 curr_reg_pressure[pressure_class] -= nregs;
3422 else
3423 {
3424 curr_reg_pressure[pressure_class] += nregs;
3425 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3426 < curr_reg_pressure[pressure_class])
3427 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3428 = curr_reg_pressure[pressure_class];
3429 }
3430 }
3431
3432 /* Calculate register pressure for each basic block by walking insns
3433 from last to first. */
3434 static void
3435 calculate_bb_reg_pressure (void)
3436 {
3437 int i;
3438 unsigned int j;
3439 rtx_insn *insn;
3440 basic_block bb;
3441 bitmap curr_regs_live;
3442 bitmap_iterator bi;
3443
3444
3445 ira_setup_eliminable_regset ();
3446 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3447 FOR_EACH_BB_FN (bb, cfun)
3448 {
3449 curr_bb = bb;
3450 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3451 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3452 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3453 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3454 for (i = 0; i < ira_pressure_classes_num; i++)
3455 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3456 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3457 change_pressure (j, true);
3458
3459 FOR_BB_INSNS_REVERSE (bb, insn)
3460 {
3461 rtx dreg;
3462 int regno;
3463 df_ref def, use;
3464
3465 if (! NONDEBUG_INSN_P (insn))
3466 continue;
3467
3468 FOR_EACH_INSN_DEF (def, insn)
3469 {
3470 dreg = DF_REF_REAL_REG (def);
3471 gcc_assert (REG_P (dreg));
3472 regno = REGNO (dreg);
3473 if (!(DF_REF_FLAGS (def)
3474 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3475 {
3476 if (bitmap_clear_bit (curr_regs_live, regno))
3477 change_pressure (regno, false);
3478 }
3479 }
3480
3481 FOR_EACH_INSN_USE (use, insn)
3482 {
3483 dreg = DF_REF_REAL_REG (use);
3484 gcc_assert (REG_P (dreg));
3485 regno = REGNO (dreg);
3486 if (bitmap_set_bit (curr_regs_live, regno))
3487 change_pressure (regno, true);
3488 }
3489 }
3490 }
3491 BITMAP_FREE (curr_regs_live);
3492
3493 if (dump_file == NULL)
3494 return;
3495
3496 fprintf (dump_file, "\nRegister Pressure: \n");
3497 FOR_EACH_BB_FN (bb, cfun)
3498 {
3499 fprintf (dump_file, " Basic block %d: \n", bb->index);
3500 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3501 {
3502 enum reg_class pressure_class;
3503
3504 pressure_class = ira_pressure_classes[i];
3505 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3506 continue;
3507
3508 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3509 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3510 }
3511 }
3512 fprintf (dump_file, "\n");
3513 }
3514
3515 /* Top level routine to perform one code hoisting (aka unification) pass
3516
3517 Return nonzero if a change was made. */
3518
3519 static int
3520 one_code_hoisting_pass (void)
3521 {
3522 int changed = 0;
3523
3524 gcse_subst_count = 0;
3525 gcse_create_count = 0;
3526
3527 /* Return if there's nothing to do, or it is too expensive. */
3528 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3529 || is_too_expensive (_("GCSE disabled")))
3530 return 0;
3531
3532 doing_code_hoisting_p = true;
3533
3534 /* Calculate register pressure for each basic block. */
3535 if (flag_ira_hoist_pressure)
3536 {
3537 regstat_init_n_sets_and_refs ();
3538 ira_set_pseudo_classes (false, dump_file);
3539 alloc_aux_for_blocks (sizeof (struct bb_data));
3540 calculate_bb_reg_pressure ();
3541 regstat_free_n_sets_and_refs ();
3542 }
3543
3544 /* We need alias. */
3545 init_alias_analysis ();
3546
3547 bytes_used = 0;
3548 gcc_obstack_init (&gcse_obstack);
3549 alloc_gcse_mem ();
3550
3551 alloc_hash_table (&expr_hash_table);
3552 compute_hash_table (&expr_hash_table);
3553 if (dump_file)
3554 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3555
3556 if (expr_hash_table.n_elems > 0)
3557 {
3558 alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3559 expr_hash_table.n_elems);
3560 compute_code_hoist_data ();
3561 changed = hoist_code ();
3562 free_code_hoist_mem ();
3563 }
3564
3565 if (flag_ira_hoist_pressure)
3566 {
3567 free_aux_for_blocks ();
3568 free_reg_info ();
3569 }
3570 free_hash_table (&expr_hash_table);
3571 free_gcse_mem ();
3572 obstack_free (&gcse_obstack, NULL);
3573
3574 /* We are finished with alias. */
3575 end_alias_analysis ();
3576
3577 if (dump_file)
3578 {
3579 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3580 current_function_name (), n_basic_blocks_for_fn (cfun),
3581 bytes_used);
3582 fprintf (dump_file, "%d substs, %d insns created\n",
3583 gcse_subst_count, gcse_create_count);
3584 }
3585
3586 doing_code_hoisting_p = false;
3587
3588 return changed;
3589 }
3590 \f
3591 /* Here we provide the things required to do store motion towards the exit.
3592 In order for this to be effective, gcse also needed to be taught how to
3593 move a load when it is killed only by a store to itself.
3594
3595 int i;
3596 float a[10];
3597
3598 void foo(float scale)
3599 {
3600 for (i=0; i<10; i++)
3601 a[i] *= scale;
3602 }
3603
3604 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3605 the load out since its live around the loop, and stored at the bottom
3606 of the loop.
3607
3608 The 'Load Motion' referred to and implemented in this file is
3609 an enhancement to gcse which when using edge based LCM, recognizes
3610 this situation and allows gcse to move the load out of the loop.
3611
3612 Once gcse has hoisted the load, store motion can then push this
3613 load towards the exit, and we end up with no loads or stores of 'i'
3614 in the loop. */
3615
3616 /* This will search the ldst list for a matching expression. If it
3617 doesn't find one, we create one and initialize it. */
3618
3619 static struct ls_expr *
3620 ldst_entry (rtx x)
3621 {
3622 int do_not_record_p = 0;
3623 struct ls_expr * ptr;
3624 unsigned int hash;
3625 ls_expr **slot;
3626 struct ls_expr e;
3627
3628 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3629 NULL, /*have_reg_qty=*/false);
3630
3631 e.pattern = x;
3632 slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3633 if (*slot)
3634 return *slot;
3635
3636 ptr = XNEW (struct ls_expr);
3637
3638 ptr->next = pre_ldst_mems;
3639 ptr->expr = NULL;
3640 ptr->pattern = x;
3641 ptr->pattern_regs = NULL_RTX;
3642 ptr->loads = NULL;
3643 ptr->stores = NULL;
3644 ptr->reaching_reg = NULL_RTX;
3645 ptr->invalid = 0;
3646 ptr->index = 0;
3647 ptr->hash_index = hash;
3648 pre_ldst_mems = ptr;
3649 *slot = ptr;
3650
3651 return ptr;
3652 }
3653
3654 /* Free up an individual ldst entry. */
3655
3656 static void
3657 free_ldst_entry (struct ls_expr * ptr)
3658 {
3659 free_INSN_LIST_list (& ptr->loads);
3660 free_INSN_LIST_list (& ptr->stores);
3661
3662 free (ptr);
3663 }
3664
3665 /* Free up all memory associated with the ldst list. */
3666
3667 static void
3668 free_ld_motion_mems (void)
3669 {
3670 delete pre_ldst_table;
3671 pre_ldst_table = NULL;
3672
3673 while (pre_ldst_mems)
3674 {
3675 struct ls_expr * tmp = pre_ldst_mems;
3676
3677 pre_ldst_mems = pre_ldst_mems->next;
3678
3679 free_ldst_entry (tmp);
3680 }
3681
3682 pre_ldst_mems = NULL;
3683 }
3684
3685 /* Dump debugging info about the ldst list. */
3686
3687 static void
3688 print_ldst_list (FILE * file)
3689 {
3690 struct ls_expr * ptr;
3691
3692 fprintf (file, "LDST list: \n");
3693
3694 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3695 {
3696 fprintf (file, " Pattern (%3d): ", ptr->index);
3697
3698 print_rtl (file, ptr->pattern);
3699
3700 fprintf (file, "\n Loads : ");
3701
3702 if (ptr->loads)
3703 print_rtl (file, ptr->loads);
3704 else
3705 fprintf (file, "(nil)");
3706
3707 fprintf (file, "\n Stores : ");
3708
3709 if (ptr->stores)
3710 print_rtl (file, ptr->stores);
3711 else
3712 fprintf (file, "(nil)");
3713
3714 fprintf (file, "\n\n");
3715 }
3716
3717 fprintf (file, "\n");
3718 }
3719
3720 /* Returns 1 if X is in the list of ldst only expressions. */
3721
3722 static struct ls_expr *
3723 find_rtx_in_ldst (rtx x)
3724 {
3725 struct ls_expr e;
3726 ls_expr **slot;
3727 if (!pre_ldst_table)
3728 return NULL;
3729 e.pattern = x;
3730 slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3731 if (!slot || (*slot)->invalid)
3732 return NULL;
3733 return *slot;
3734 }
3735 \f
3736 /* Load Motion for loads which only kill themselves. */
3737
3738 /* Return true if x, a MEM, is a simple access with no side effects.
3739 These are the types of loads we consider for the ld_motion list,
3740 otherwise we let the usual aliasing take care of it. */
3741
3742 static int
3743 simple_mem (const_rtx x)
3744 {
3745 if (MEM_VOLATILE_P (x))
3746 return 0;
3747
3748 if (GET_MODE (x) == BLKmode)
3749 return 0;
3750
3751 /* If we are handling exceptions, we must be careful with memory references
3752 that may trap. If we are not, the behavior is undefined, so we may just
3753 continue. */
3754 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3755 return 0;
3756
3757 if (side_effects_p (x))
3758 return 0;
3759
3760 /* Do not consider function arguments passed on stack. */
3761 if (reg_mentioned_p (stack_pointer_rtx, x))
3762 return 0;
3763
3764 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3765 return 0;
3766
3767 return 1;
3768 }
3769
3770 /* Make sure there isn't a buried reference in this pattern anywhere.
3771 If there is, invalidate the entry for it since we're not capable
3772 of fixing it up just yet.. We have to be sure we know about ALL
3773 loads since the aliasing code will allow all entries in the
3774 ld_motion list to not-alias itself. If we miss a load, we will get
3775 the wrong value since gcse might common it and we won't know to
3776 fix it up. */
3777
3778 static void
3779 invalidate_any_buried_refs (rtx x)
3780 {
3781 const char * fmt;
3782 int i, j;
3783 struct ls_expr * ptr;
3784
3785 /* Invalidate it in the list. */
3786 if (MEM_P (x) && simple_mem (x))
3787 {
3788 ptr = ldst_entry (x);
3789 ptr->invalid = 1;
3790 }
3791
3792 /* Recursively process the insn. */
3793 fmt = GET_RTX_FORMAT (GET_CODE (x));
3794
3795 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3796 {
3797 if (fmt[i] == 'e')
3798 invalidate_any_buried_refs (XEXP (x, i));
3799 else if (fmt[i] == 'E')
3800 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3801 invalidate_any_buried_refs (XVECEXP (x, i, j));
3802 }
3803 }
3804
3805 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3806 being defined as MEM loads and stores to symbols, with no side effects
3807 and no registers in the expression. For a MEM destination, we also
3808 check that the insn is still valid if we replace the destination with a
3809 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3810 which don't match this criteria, they are invalidated and trimmed out
3811 later. */
3812
3813 static void
3814 compute_ld_motion_mems (void)
3815 {
3816 struct ls_expr * ptr;
3817 basic_block bb;
3818 rtx_insn *insn;
3819
3820 pre_ldst_mems = NULL;
3821 pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3822
3823 FOR_EACH_BB_FN (bb, cfun)
3824 {
3825 FOR_BB_INSNS (bb, insn)
3826 {
3827 if (NONDEBUG_INSN_P (insn))
3828 {
3829 if (GET_CODE (PATTERN (insn)) == SET)
3830 {
3831 rtx src = SET_SRC (PATTERN (insn));
3832 rtx dest = SET_DEST (PATTERN (insn));
3833 rtx note = find_reg_equal_equiv_note (insn);
3834 rtx src_eq;
3835
3836 /* Check for a simple LOAD... */
3837 if (MEM_P (src) && simple_mem (src))
3838 {
3839 ptr = ldst_entry (src);
3840 if (REG_P (dest))
3841 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3842 else
3843 ptr->invalid = 1;
3844 }
3845 else
3846 {
3847 /* Make sure there isn't a buried load somewhere. */
3848 invalidate_any_buried_refs (src);
3849 }
3850
3851 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
3852 src_eq = XEXP (note, 0);
3853 else
3854 src_eq = NULL_RTX;
3855
3856 if (src_eq != NULL_RTX
3857 && !(MEM_P (src_eq) && simple_mem (src_eq)))
3858 invalidate_any_buried_refs (src_eq);
3859
3860 /* Check for stores. Don't worry about aliased ones, they
3861 will block any movement we might do later. We only care
3862 about this exact pattern since those are the only
3863 circumstance that we will ignore the aliasing info. */
3864 if (MEM_P (dest) && simple_mem (dest))
3865 {
3866 ptr = ldst_entry (dest);
3867
3868 if (! MEM_P (src)
3869 && GET_CODE (src) != ASM_OPERANDS
3870 /* Check for REG manually since want_to_gcse_p
3871 returns 0 for all REGs. */
3872 && can_assign_to_reg_without_clobbers_p (src))
3873 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3874 else
3875 ptr->invalid = 1;
3876 }
3877 }
3878 else
3879 invalidate_any_buried_refs (PATTERN (insn));
3880 }
3881 }
3882 }
3883 }
3884
3885 /* Remove any references that have been either invalidated or are not in the
3886 expression list for pre gcse. */
3887
3888 static void
3889 trim_ld_motion_mems (void)
3890 {
3891 struct ls_expr * * last = & pre_ldst_mems;
3892 struct ls_expr * ptr = pre_ldst_mems;
3893
3894 while (ptr != NULL)
3895 {
3896 struct gcse_expr * expr;
3897
3898 /* Delete if entry has been made invalid. */
3899 if (! ptr->invalid)
3900 {
3901 /* Delete if we cannot find this mem in the expression list. */
3902 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3903
3904 for (expr = expr_hash_table.table[hash];
3905 expr != NULL;
3906 expr = expr->next_same_hash)
3907 if (expr_equiv_p (expr->expr, ptr->pattern))
3908 break;
3909 }
3910 else
3911 expr = (struct gcse_expr *) 0;
3912
3913 if (expr)
3914 {
3915 /* Set the expression field if we are keeping it. */
3916 ptr->expr = expr;
3917 last = & ptr->next;
3918 ptr = ptr->next;
3919 }
3920 else
3921 {
3922 *last = ptr->next;
3923 pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3924 free_ldst_entry (ptr);
3925 ptr = * last;
3926 }
3927 }
3928
3929 /* Show the world what we've found. */
3930 if (dump_file && pre_ldst_mems != NULL)
3931 print_ldst_list (dump_file);
3932 }
3933
3934 /* This routine will take an expression which we are replacing with
3935 a reaching register, and update any stores that are needed if
3936 that expression is in the ld_motion list. Stores are updated by
3937 copying their SRC to the reaching register, and then storing
3938 the reaching register into the store location. These keeps the
3939 correct value in the reaching register for the loads. */
3940
3941 static void
3942 update_ld_motion_stores (struct gcse_expr * expr)
3943 {
3944 struct ls_expr * mem_ptr;
3945
3946 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3947 {
3948 /* We can try to find just the REACHED stores, but is shouldn't
3949 matter to set the reaching reg everywhere... some might be
3950 dead and should be eliminated later. */
3951
3952 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3953 where reg is the reaching reg used in the load. We checked in
3954 compute_ld_motion_mems that we can replace (set mem expr) with
3955 (set reg expr) in that insn. */
3956 rtx list = mem_ptr->stores;
3957
3958 for ( ; list != NULL_RTX; list = XEXP (list, 1))
3959 {
3960 rtx_insn *insn = as_a <rtx_insn *> (XEXP (list, 0));
3961 rtx pat = PATTERN (insn);
3962 rtx src = SET_SRC (pat);
3963 rtx reg = expr->reaching_reg;
3964 rtx copy;
3965
3966 /* If we've already copied it, continue. */
3967 if (expr->reaching_reg == src)
3968 continue;
3969
3970 if (dump_file)
3971 {
3972 fprintf (dump_file, "PRE: store updated with reaching reg ");
3973 print_rtl (dump_file, reg);
3974 fprintf (dump_file, ":\n ");
3975 print_inline_rtx (dump_file, insn, 8);
3976 fprintf (dump_file, "\n");
3977 }
3978
3979 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3980 emit_insn_before (copy, insn);
3981 SET_SRC (pat) = reg;
3982 df_insn_rescan (insn);
3983
3984 /* un-recognize this pattern since it's probably different now. */
3985 INSN_CODE (insn) = -1;
3986 gcse_create_count++;
3987 }
3988 }
3989 }
3990 \f
3991 /* Return true if the graph is too expensive to optimize. PASS is the
3992 optimization about to be performed. */
3993
3994 static bool
3995 is_too_expensive (const char *pass)
3996 {
3997 /* Trying to perform global optimizations on flow graphs which have
3998 a high connectivity will take a long time and is unlikely to be
3999 particularly useful.
4000
4001 In normal circumstances a cfg should have about twice as many
4002 edges as blocks. But we do not want to punish small functions
4003 which have a couple switch statements. Rather than simply
4004 threshold the number of blocks, uses something with a more
4005 graceful degradation. */
4006 if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
4007 {
4008 warning (OPT_Wdisabled_optimization,
4009 "%s: %d basic blocks and %d edges/basic block",
4010 pass, n_basic_blocks_for_fn (cfun),
4011 n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
4012
4013 return true;
4014 }
4015
4016 /* If allocating memory for the dataflow bitmaps would take up too much
4017 storage it's better just to disable the optimization. */
4018 if ((n_basic_blocks_for_fn (cfun)
4019 * SBITMAP_SET_SIZE (max_reg_num ())
4020 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4021 {
4022 warning (OPT_Wdisabled_optimization,
4023 "%s: %d basic blocks and %d registers",
4024 pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
4025
4026 return true;
4027 }
4028
4029 return false;
4030 }
4031 \f
4032 static unsigned int
4033 execute_rtl_pre (void)
4034 {
4035 int changed;
4036 delete_unreachable_blocks ();
4037 df_analyze ();
4038 changed = one_pre_gcse_pass ();
4039 flag_rerun_cse_after_global_opts |= changed;
4040 if (changed)
4041 cleanup_cfg (0);
4042 return 0;
4043 }
4044
4045 static unsigned int
4046 execute_rtl_hoist (void)
4047 {
4048 int changed;
4049 delete_unreachable_blocks ();
4050 df_analyze ();
4051 changed = one_code_hoisting_pass ();
4052 flag_rerun_cse_after_global_opts |= changed;
4053 if (changed)
4054 cleanup_cfg (0);
4055 return 0;
4056 }
4057
4058 namespace {
4059
4060 const pass_data pass_data_rtl_pre =
4061 {
4062 RTL_PASS, /* type */
4063 "rtl pre", /* name */
4064 OPTGROUP_NONE, /* optinfo_flags */
4065 TV_PRE, /* tv_id */
4066 PROP_cfglayout, /* properties_required */
4067 0, /* properties_provided */
4068 0, /* properties_destroyed */
4069 0, /* todo_flags_start */
4070 TODO_df_finish, /* todo_flags_finish */
4071 };
4072
4073 class pass_rtl_pre : public rtl_opt_pass
4074 {
4075 public:
4076 pass_rtl_pre (gcc::context *ctxt)
4077 : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4078 {}
4079
4080 /* opt_pass methods: */
4081 virtual bool gate (function *);
4082 virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4083
4084 }; // class pass_rtl_pre
4085
4086 /* We do not construct an accurate cfg in functions which call
4087 setjmp, so none of these passes runs if the function calls
4088 setjmp.
4089 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4090
4091 bool
4092 pass_rtl_pre::gate (function *fun)
4093 {
4094 return optimize > 0 && flag_gcse
4095 && !fun->calls_setjmp
4096 && optimize_function_for_speed_p (fun)
4097 && dbg_cnt (pre);
4098 }
4099
4100 } // anon namespace
4101
4102 rtl_opt_pass *
4103 make_pass_rtl_pre (gcc::context *ctxt)
4104 {
4105 return new pass_rtl_pre (ctxt);
4106 }
4107
4108 namespace {
4109
4110 const pass_data pass_data_rtl_hoist =
4111 {
4112 RTL_PASS, /* type */
4113 "hoist", /* name */
4114 OPTGROUP_NONE, /* optinfo_flags */
4115 TV_HOIST, /* tv_id */
4116 PROP_cfglayout, /* properties_required */
4117 0, /* properties_provided */
4118 0, /* properties_destroyed */
4119 0, /* todo_flags_start */
4120 TODO_df_finish, /* todo_flags_finish */
4121 };
4122
4123 class pass_rtl_hoist : public rtl_opt_pass
4124 {
4125 public:
4126 pass_rtl_hoist (gcc::context *ctxt)
4127 : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4128 {}
4129
4130 /* opt_pass methods: */
4131 virtual bool gate (function *);
4132 virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4133
4134 }; // class pass_rtl_hoist
4135
4136 bool
4137 pass_rtl_hoist::gate (function *)
4138 {
4139 return optimize > 0 && flag_gcse
4140 && !cfun->calls_setjmp
4141 /* It does not make sense to run code hoisting unless we are optimizing
4142 for code size -- it rarely makes programs faster, and can make then
4143 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4144 && optimize_function_for_size_p (cfun)
4145 && dbg_cnt (hoist);
4146 }
4147
4148 } // anon namespace
4149
4150 rtl_opt_pass *
4151 make_pass_rtl_hoist (gcc::context *ctxt)
4152 {
4153 return new pass_rtl_hoist (ctxt);
4154 }
4155
4156 /* Reset all state within gcse.c so that we can rerun the compiler
4157 within the same process. For use by toplev::finalize. */
4158
4159 void
4160 gcse_c_finalize (void)
4161 {
4162 test_insn = NULL;
4163 }
4164
4165 #include "gt-gcse.h"