re PR tree-optimization/54570 (FAIL: gcc.dg/builtin-object-size-8.c execution test)
[gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* TODO
22 - reordering of memory allocation and freeing to be more space efficient
23 - calc rough register pressure information and use the info to drive all
24 kinds of code motion (including code hoisting) in a unified way.
25 */
26
27 /* References searched while implementing this.
28
29 Compilers Principles, Techniques and Tools
30 Aho, Sethi, Ullman
31 Addison-Wesley, 1988
32
33 Global Optimization by Suppression of Partial Redundancies
34 E. Morel, C. Renvoise
35 communications of the acm, Vol. 22, Num. 2, Feb. 1979
36
37 A Portable Machine-Independent Global Optimizer - Design and Measurements
38 Frederick Chow
39 Stanford Ph.D. thesis, Dec. 1983
40
41 A Fast Algorithm for Code Movement Optimization
42 D.M. Dhamdhere
43 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
44
45 A Solution to a Problem with Morel and Renvoise's
46 Global Optimization by Suppression of Partial Redundancies
47 K-H Drechsler, M.P. Stadel
48 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
49
50 Practical Adaptation of the Global Optimization
51 Algorithm of Morel and Renvoise
52 D.M. Dhamdhere
53 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
54
55 Efficiently Computing Static Single Assignment Form and the Control
56 Dependence Graph
57 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
58 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
59
60 Lazy Code Motion
61 J. Knoop, O. Ruthing, B. Steffen
62 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
63
64 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
65 Time for Reducible Flow Control
66 Thomas Ball
67 ACM Letters on Programming Languages and Systems,
68 Vol. 2, Num. 1-4, Mar-Dec 1993
69
70 An Efficient Representation for Sparse Sets
71 Preston Briggs, Linda Torczon
72 ACM Letters on Programming Languages and Systems,
73 Vol. 2, Num. 1-4, Mar-Dec 1993
74
75 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
76 K-H Drechsler, M.P. Stadel
77 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
78
79 Partial Dead Code Elimination
80 J. Knoop, O. Ruthing, B. Steffen
81 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
82
83 Effective Partial Redundancy Elimination
84 P. Briggs, K.D. Cooper
85 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
86
87 The Program Structure Tree: Computing Control Regions in Linear Time
88 R. Johnson, D. Pearson, K. Pingali
89 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
90
91 Optimal Code Motion: Theory and Practice
92 J. Knoop, O. Ruthing, B. Steffen
93 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
94
95 The power of assignment motion
96 J. Knoop, O. Ruthing, B. Steffen
97 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
98
99 Global code motion / global value numbering
100 C. Click
101 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
102
103 Value Driven Redundancy Elimination
104 L.T. Simpson
105 Rice University Ph.D. thesis, Apr. 1996
106
107 Value Numbering
108 L.T. Simpson
109 Massively Scalar Compiler Project, Rice University, Sep. 1996
110
111 High Performance Compilers for Parallel Computing
112 Michael Wolfe
113 Addison-Wesley, 1996
114
115 Advanced Compiler Design and Implementation
116 Steven Muchnick
117 Morgan Kaufmann, 1997
118
119 Building an Optimizing Compiler
120 Robert Morgan
121 Digital Press, 1998
122
123 People wishing to speed up the code here should read:
124 Elimination Algorithms for Data Flow Analysis
125 B.G. Ryder, M.C. Paull
126 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
127
128 How to Analyze Large Programs Efficiently and Informatively
129 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
130 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
131
132 People wishing to do something different can find various possibilities
133 in the above papers and elsewhere.
134 */
135
136 #include "config.h"
137 #include "system.h"
138 #include "coretypes.h"
139 #include "tm.h"
140 #include "diagnostic-core.h"
141 #include "toplev.h"
142
143 #include "hard-reg-set.h"
144 #include "rtl.h"
145 #include "tree.h"
146 #include "tm_p.h"
147 #include "regs.h"
148 #include "ira.h"
149 #include "flags.h"
150 #include "insn-config.h"
151 #include "recog.h"
152 #include "basic-block.h"
153 #include "function.h"
154 #include "expr.h"
155 #include "except.h"
156 #include "ggc.h"
157 #include "params.h"
158 #include "cselib.h"
159 #include "intl.h"
160 #include "obstack.h"
161 #include "tree-pass.h"
162 #include "hashtab.h"
163 #include "df.h"
164 #include "dbgcnt.h"
165 #include "target.h"
166 #include "gcse.h"
167
168 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
169 are a superset of those done by classic GCSE.
170
171 Two passes of copy/constant propagation are done around PRE or hoisting
172 because the first one enables more GCSE and the second one helps to clean
173 up the copies that PRE and HOIST create. This is needed more for PRE than
174 for HOIST because code hoisting will try to use an existing register
175 containing the common subexpression rather than create a new one. This is
176 harder to do for PRE because of the code motion (which HOIST doesn't do).
177
178 Expressions we are interested in GCSE-ing are of the form
179 (set (pseudo-reg) (expression)).
180 Function want_to_gcse_p says what these are.
181
182 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
183 This allows PRE to hoist expressions that are expressed in multiple insns,
184 such as complex address calculations (e.g. for PIC code, or loads with a
185 high part and a low part).
186
187 PRE handles moving invariant expressions out of loops (by treating them as
188 partially redundant).
189
190 **********************
191
192 We used to support multiple passes but there are diminishing returns in
193 doing so. The first pass usually makes 90% of the changes that are doable.
194 A second pass can make a few more changes made possible by the first pass.
195 Experiments show any further passes don't make enough changes to justify
196 the expense.
197
198 A study of spec92 using an unlimited number of passes:
199 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
200 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
201 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
202
203 It was found doing copy propagation between each pass enables further
204 substitutions.
205
206 This study was done before expressions in REG_EQUAL notes were added as
207 candidate expressions for optimization, and before the GIMPLE optimizers
208 were added. Probably, multiple passes is even less efficient now than
209 at the time when the study was conducted.
210
211 PRE is quite expensive in complicated functions because the DFA can take
212 a while to converge. Hence we only perform one pass.
213
214 **********************
215
216 The steps for PRE are:
217
218 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
219
220 2) Perform the data flow analysis for PRE.
221
222 3) Delete the redundant instructions
223
224 4) Insert the required copies [if any] that make the partially
225 redundant instructions fully redundant.
226
227 5) For other reaching expressions, insert an instruction to copy the value
228 to a newly created pseudo that will reach the redundant instruction.
229
230 The deletion is done first so that when we do insertions we
231 know which pseudo reg to use.
232
233 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
234 argue it is not. The number of iterations for the algorithm to converge
235 is typically 2-4 so I don't view it as that expensive (relatively speaking).
236
237 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
238 we create. To make an expression reach the place where it's redundant,
239 the result of the expression is copied to a new register, and the redundant
240 expression is deleted by replacing it with this new register. Classic GCSE
241 doesn't have this problem as much as it computes the reaching defs of
242 each register in each block and thus can try to use an existing
243 register. */
244 \f
245 /* GCSE global vars. */
246
247 struct target_gcse default_target_gcse;
248 #if SWITCHABLE_TARGET
249 struct target_gcse *this_target_gcse = &default_target_gcse;
250 #endif
251
252 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
253 int flag_rerun_cse_after_global_opts;
254
255 /* An obstack for our working variables. */
256 static struct obstack gcse_obstack;
257
258 /* Hash table of expressions. */
259
260 struct expr
261 {
262 /* The expression. */
263 rtx expr;
264 /* Index in the available expression bitmaps. */
265 int bitmap_index;
266 /* Next entry with the same hash. */
267 struct expr *next_same_hash;
268 /* List of anticipatable occurrences in basic blocks in the function.
269 An "anticipatable occurrence" is one that is the first occurrence in the
270 basic block, the operands are not modified in the basic block prior
271 to the occurrence and the output is not used between the start of
272 the block and the occurrence. */
273 struct occr *antic_occr;
274 /* List of available occurrence in basic blocks in the function.
275 An "available occurrence" is one that is the last occurrence in the
276 basic block and the operands are not modified by following statements in
277 the basic block [including this insn]. */
278 struct occr *avail_occr;
279 /* Non-null if the computation is PRE redundant.
280 The value is the newly created pseudo-reg to record a copy of the
281 expression in all the places that reach the redundant copy. */
282 rtx reaching_reg;
283 /* Maximum distance in instructions this expression can travel.
284 We avoid moving simple expressions for more than a few instructions
285 to keep register pressure under control.
286 A value of "0" removes restrictions on how far the expression can
287 travel. */
288 int max_distance;
289 };
290
291 /* Occurrence of an expression.
292 There is one per basic block. If a pattern appears more than once the
293 last appearance is used [or first for anticipatable expressions]. */
294
295 struct occr
296 {
297 /* Next occurrence of this expression. */
298 struct occr *next;
299 /* The insn that computes the expression. */
300 rtx insn;
301 /* Nonzero if this [anticipatable] occurrence has been deleted. */
302 char deleted_p;
303 /* Nonzero if this [available] occurrence has been copied to
304 reaching_reg. */
305 /* ??? This is mutually exclusive with deleted_p, so they could share
306 the same byte. */
307 char copied_p;
308 };
309
310 typedef struct occr *occr_t;
311
312 /* Expression hash tables.
313 Each hash table is an array of buckets.
314 ??? It is known that if it were an array of entries, structure elements
315 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
316 not clear whether in the final analysis a sufficient amount of memory would
317 be saved as the size of the available expression bitmaps would be larger
318 [one could build a mapping table without holes afterwards though].
319 Someday I'll perform the computation and figure it out. */
320
321 struct hash_table_d
322 {
323 /* The table itself.
324 This is an array of `expr_hash_table_size' elements. */
325 struct expr **table;
326
327 /* Size of the hash table, in elements. */
328 unsigned int size;
329
330 /* Number of hash table elements. */
331 unsigned int n_elems;
332 };
333
334 /* Expression hash table. */
335 static struct hash_table_d expr_hash_table;
336
337 /* This is a list of expressions which are MEMs and will be used by load
338 or store motion.
339 Load motion tracks MEMs which aren't killed by anything except itself,
340 i.e. loads and stores to a single location.
341 We can then allow movement of these MEM refs with a little special
342 allowance. (all stores copy the same value to the reaching reg used
343 for the loads). This means all values used to store into memory must have
344 no side effects so we can re-issue the setter value. */
345
346 struct ls_expr
347 {
348 struct expr * expr; /* Gcse expression reference for LM. */
349 rtx pattern; /* Pattern of this mem. */
350 rtx pattern_regs; /* List of registers mentioned by the mem. */
351 rtx loads; /* INSN list of loads seen. */
352 rtx stores; /* INSN list of stores seen. */
353 struct ls_expr * next; /* Next in the list. */
354 int invalid; /* Invalid for some reason. */
355 int index; /* If it maps to a bitmap index. */
356 unsigned int hash_index; /* Index when in a hash table. */
357 rtx reaching_reg; /* Register to use when re-writing. */
358 };
359
360 /* Head of the list of load/store memory refs. */
361 static struct ls_expr * pre_ldst_mems = NULL;
362
363 /* Hashtable for the load/store memory refs. */
364 static htab_t pre_ldst_table = NULL;
365
366 /* Bitmap containing one bit for each register in the program.
367 Used when performing GCSE to track which registers have been set since
368 the start of the basic block. */
369 static regset reg_set_bitmap;
370
371 /* Array, indexed by basic block number for a list of insns which modify
372 memory within that block. */
373 static vec<rtx> *modify_mem_list;
374 static bitmap modify_mem_list_set;
375
376 typedef struct modify_pair_s
377 {
378 rtx dest; /* A MEM. */
379 rtx dest_addr; /* The canonical address of `dest'. */
380 } modify_pair;
381
382
383 /* This array parallels modify_mem_list, except that it stores MEMs
384 being set and their canonicalized memory addresses. */
385 static vec<modify_pair> *canon_modify_mem_list;
386
387 /* Bitmap indexed by block numbers to record which blocks contain
388 function calls. */
389 static bitmap blocks_with_calls;
390
391 /* Various variables for statistics gathering. */
392
393 /* Memory used in a pass.
394 This isn't intended to be absolutely precise. Its intent is only
395 to keep an eye on memory usage. */
396 static int bytes_used;
397
398 /* GCSE substitutions made. */
399 static int gcse_subst_count;
400 /* Number of copy instructions created. */
401 static int gcse_create_count;
402 \f
403 /* Doing code hoisting. */
404 static bool doing_code_hoisting_p = false;
405 \f
406 /* For available exprs */
407 static sbitmap *ae_kill;
408 \f
409 /* Data stored for each basic block. */
410 struct bb_data
411 {
412 /* Maximal register pressure inside basic block for given register class
413 (defined only for the pressure classes). */
414 int max_reg_pressure[N_REG_CLASSES];
415 /* Recorded register pressure of basic block before trying to hoist
416 an expression. Will be used to restore the register pressure
417 if the expression should not be hoisted. */
418 int old_pressure;
419 /* Recorded register live_in info of basic block during code hoisting
420 process. BACKUP is used to record live_in info before trying to
421 hoist an expression, and will be used to restore LIVE_IN if the
422 expression should not be hoisted. */
423 bitmap live_in, backup;
424 };
425
426 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
427
428 static basic_block curr_bb;
429
430 /* Current register pressure for each pressure class. */
431 static int curr_reg_pressure[N_REG_CLASSES];
432 \f
433
434 static void compute_can_copy (void);
435 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
436 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
437 static void *gcse_alloc (unsigned long);
438 static void alloc_gcse_mem (void);
439 static void free_gcse_mem (void);
440 static void hash_scan_insn (rtx, struct hash_table_d *);
441 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
442 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
443 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
444 static int want_to_gcse_p (rtx, int *);
445 static int oprs_unchanged_p (const_rtx, const_rtx, int);
446 static int oprs_anticipatable_p (const_rtx, const_rtx);
447 static int oprs_available_p (const_rtx, const_rtx);
448 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
449 struct hash_table_d *);
450 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
451 static int expr_equiv_p (const_rtx, const_rtx);
452 static void record_last_reg_set_info (rtx, int);
453 static void record_last_mem_set_info (rtx);
454 static void record_last_set_info (rtx, const_rtx, void *);
455 static void compute_hash_table (struct hash_table_d *);
456 static void alloc_hash_table (struct hash_table_d *);
457 static void free_hash_table (struct hash_table_d *);
458 static void compute_hash_table_work (struct hash_table_d *);
459 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
460 static void compute_transp (const_rtx, int, sbitmap *);
461 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
462 struct hash_table_d *);
463 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
464 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
465 static void canon_list_insert (rtx, const_rtx, void *);
466 static void alloc_pre_mem (int, int);
467 static void free_pre_mem (void);
468 static struct edge_list *compute_pre_data (void);
469 static int pre_expr_reaches_here_p (basic_block, struct expr *,
470 basic_block);
471 static void insert_insn_end_basic_block (struct expr *, basic_block);
472 static void pre_insert_copy_insn (struct expr *, rtx);
473 static void pre_insert_copies (void);
474 static int pre_delete (void);
475 static int pre_gcse (struct edge_list *);
476 static int one_pre_gcse_pass (void);
477 static void add_label_notes (rtx, rtx);
478 static void alloc_code_hoist_mem (int, int);
479 static void free_code_hoist_mem (void);
480 static void compute_code_hoist_vbeinout (void);
481 static void compute_code_hoist_data (void);
482 static int should_hoist_expr_to_dom (basic_block, struct expr *, basic_block,
483 sbitmap, int, int *, enum reg_class,
484 int *, bitmap, rtx);
485 static int hoist_code (void);
486 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
487 static enum reg_class get_pressure_class_and_nregs (rtx insn, int *nregs);
488 static int one_code_hoisting_pass (void);
489 static rtx process_insert_insn (struct expr *);
490 static int pre_edge_insert (struct edge_list *, struct expr **);
491 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
492 basic_block, char *);
493 static struct ls_expr * ldst_entry (rtx);
494 static void free_ldst_entry (struct ls_expr *);
495 static void free_ld_motion_mems (void);
496 static void print_ldst_list (FILE *);
497 static struct ls_expr * find_rtx_in_ldst (rtx);
498 static int simple_mem (const_rtx);
499 static void invalidate_any_buried_refs (rtx);
500 static void compute_ld_motion_mems (void);
501 static void trim_ld_motion_mems (void);
502 static void update_ld_motion_stores (struct expr *);
503 static void clear_modify_mem_tables (void);
504 static void free_modify_mem_tables (void);
505 static rtx gcse_emit_move_after (rtx, rtx, rtx);
506 static bool is_too_expensive (const char *);
507
508 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
509 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
510
511 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
512 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
513
514 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
515 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
516
517 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
518 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
519 \f
520 /* Misc. utilities. */
521
522 #define can_copy \
523 (this_target_gcse->x_can_copy)
524 #define can_copy_init_p \
525 (this_target_gcse->x_can_copy_init_p)
526
527 /* Compute which modes support reg/reg copy operations. */
528
529 static void
530 compute_can_copy (void)
531 {
532 int i;
533 #ifndef AVOID_CCMODE_COPIES
534 rtx reg, insn;
535 #endif
536 memset (can_copy, 0, NUM_MACHINE_MODES);
537
538 start_sequence ();
539 for (i = 0; i < NUM_MACHINE_MODES; i++)
540 if (GET_MODE_CLASS (i) == MODE_CC)
541 {
542 #ifdef AVOID_CCMODE_COPIES
543 can_copy[i] = 0;
544 #else
545 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
546 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
547 if (recog (PATTERN (insn), insn, NULL) >= 0)
548 can_copy[i] = 1;
549 #endif
550 }
551 else
552 can_copy[i] = 1;
553
554 end_sequence ();
555 }
556
557 /* Returns whether the mode supports reg/reg copy operations. */
558
559 bool
560 can_copy_p (enum machine_mode mode)
561 {
562 if (! can_copy_init_p)
563 {
564 compute_can_copy ();
565 can_copy_init_p = true;
566 }
567
568 return can_copy[mode] != 0;
569 }
570 \f
571 /* Cover function to xmalloc to record bytes allocated. */
572
573 static void *
574 gmalloc (size_t size)
575 {
576 bytes_used += size;
577 return xmalloc (size);
578 }
579
580 /* Cover function to xcalloc to record bytes allocated. */
581
582 static void *
583 gcalloc (size_t nelem, size_t elsize)
584 {
585 bytes_used += nelem * elsize;
586 return xcalloc (nelem, elsize);
587 }
588
589 /* Cover function to obstack_alloc. */
590
591 static void *
592 gcse_alloc (unsigned long size)
593 {
594 bytes_used += size;
595 return obstack_alloc (&gcse_obstack, size);
596 }
597
598 /* Allocate memory for the reg/memory set tracking tables.
599 This is called at the start of each pass. */
600
601 static void
602 alloc_gcse_mem (void)
603 {
604 /* Allocate vars to track sets of regs. */
605 reg_set_bitmap = ALLOC_REG_SET (NULL);
606
607 /* Allocate array to keep a list of insns which modify memory in each
608 basic block. The two typedefs are needed to work around the
609 pre-processor limitation with template types in macro arguments. */
610 typedef vec<rtx> vec_rtx_heap;
611 typedef vec<modify_pair> vec_modify_pair_heap;
612 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block);
613 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap, last_basic_block);
614 modify_mem_list_set = BITMAP_ALLOC (NULL);
615 blocks_with_calls = BITMAP_ALLOC (NULL);
616 }
617
618 /* Free memory allocated by alloc_gcse_mem. */
619
620 static void
621 free_gcse_mem (void)
622 {
623 FREE_REG_SET (reg_set_bitmap);
624
625 free_modify_mem_tables ();
626 BITMAP_FREE (modify_mem_list_set);
627 BITMAP_FREE (blocks_with_calls);
628 }
629 \f
630 /* Compute the local properties of each recorded expression.
631
632 Local properties are those that are defined by the block, irrespective of
633 other blocks.
634
635 An expression is transparent in a block if its operands are not modified
636 in the block.
637
638 An expression is computed (locally available) in a block if it is computed
639 at least once and expression would contain the same value if the
640 computation was moved to the end of the block.
641
642 An expression is locally anticipatable in a block if it is computed at
643 least once and expression would contain the same value if the computation
644 was moved to the beginning of the block.
645
646 We call this routine for pre and code hoisting. They all compute
647 basically the same information and thus can easily share this code.
648
649 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
650 properties. If NULL, then it is not necessary to compute or record that
651 particular property.
652
653 TABLE controls which hash table to look at. */
654
655 static void
656 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
657 struct hash_table_d *table)
658 {
659 unsigned int i;
660
661 /* Initialize any bitmaps that were passed in. */
662 if (transp)
663 {
664 bitmap_vector_ones (transp, last_basic_block);
665 }
666
667 if (comp)
668 bitmap_vector_clear (comp, last_basic_block);
669 if (antloc)
670 bitmap_vector_clear (antloc, last_basic_block);
671
672 for (i = 0; i < table->size; i++)
673 {
674 struct expr *expr;
675
676 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
677 {
678 int indx = expr->bitmap_index;
679 struct occr *occr;
680
681 /* The expression is transparent in this block if it is not killed.
682 We start by assuming all are transparent [none are killed], and
683 then reset the bits for those that are. */
684 if (transp)
685 compute_transp (expr->expr, indx, transp);
686
687 /* The occurrences recorded in antic_occr are exactly those that
688 we want to set to nonzero in ANTLOC. */
689 if (antloc)
690 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
691 {
692 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
693
694 /* While we're scanning the table, this is a good place to
695 initialize this. */
696 occr->deleted_p = 0;
697 }
698
699 /* The occurrences recorded in avail_occr are exactly those that
700 we want to set to nonzero in COMP. */
701 if (comp)
702 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
703 {
704 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
705
706 /* While we're scanning the table, this is a good place to
707 initialize this. */
708 occr->copied_p = 0;
709 }
710
711 /* While we're scanning the table, this is a good place to
712 initialize this. */
713 expr->reaching_reg = 0;
714 }
715 }
716 }
717 \f
718 /* Hash table support. */
719
720 struct reg_avail_info
721 {
722 basic_block last_bb;
723 int first_set;
724 int last_set;
725 };
726
727 static struct reg_avail_info *reg_avail_info;
728 static basic_block current_bb;
729
730 /* See whether X, the source of a set, is something we want to consider for
731 GCSE. */
732
733 static int
734 want_to_gcse_p (rtx x, int *max_distance_ptr)
735 {
736 #ifdef STACK_REGS
737 /* On register stack architectures, don't GCSE constants from the
738 constant pool, as the benefits are often swamped by the overhead
739 of shuffling the register stack between basic blocks. */
740 if (IS_STACK_MODE (GET_MODE (x)))
741 x = avoid_constant_pool_reference (x);
742 #endif
743
744 /* GCSE'ing constants:
745
746 We do not specifically distinguish between constant and non-constant
747 expressions in PRE and Hoist. We use set_src_cost below to limit
748 the maximum distance simple expressions can travel.
749
750 Nevertheless, constants are much easier to GCSE, and, hence,
751 it is easy to overdo the optimizations. Usually, excessive PRE and
752 Hoisting of constant leads to increased register pressure.
753
754 RA can deal with this by rematerialing some of the constants.
755 Therefore, it is important that the back-end generates sets of constants
756 in a way that allows reload rematerialize them under high register
757 pressure, i.e., a pseudo register with REG_EQUAL to constant
758 is set only once. Failing to do so will result in IRA/reload
759 spilling such constants under high register pressure instead of
760 rematerializing them. */
761
762 switch (GET_CODE (x))
763 {
764 case REG:
765 case SUBREG:
766 case CALL:
767 return 0;
768
769 CASE_CONST_ANY:
770 if (!doing_code_hoisting_p)
771 /* Do not PRE constants. */
772 return 0;
773
774 /* FALLTHRU */
775
776 default:
777 if (doing_code_hoisting_p)
778 /* PRE doesn't implement max_distance restriction. */
779 {
780 int cost;
781 int max_distance;
782
783 gcc_assert (!optimize_function_for_speed_p (cfun)
784 && optimize_function_for_size_p (cfun));
785 cost = set_src_cost (x, 0);
786
787 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
788 {
789 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
790 if (max_distance == 0)
791 return 0;
792
793 gcc_assert (max_distance > 0);
794 }
795 else
796 max_distance = 0;
797
798 if (max_distance_ptr)
799 *max_distance_ptr = max_distance;
800 }
801
802 return can_assign_to_reg_without_clobbers_p (x);
803 }
804 }
805
806 /* Used internally by can_assign_to_reg_without_clobbers_p. */
807
808 static GTY(()) rtx test_insn;
809
810 /* Return true if we can assign X to a pseudo register such that the
811 resulting insn does not result in clobbering a hard register as a
812 side-effect.
813
814 Additionally, if the target requires it, check that the resulting insn
815 can be copied. If it cannot, this means that X is special and probably
816 has hidden side-effects we don't want to mess with.
817
818 This function is typically used by code motion passes, to verify
819 that it is safe to insert an insn without worrying about clobbering
820 maybe live hard regs. */
821
822 bool
823 can_assign_to_reg_without_clobbers_p (rtx x)
824 {
825 int num_clobbers = 0;
826 int icode;
827
828 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
829 if (general_operand (x, GET_MODE (x)))
830 return 1;
831 else if (GET_MODE (x) == VOIDmode)
832 return 0;
833
834 /* Otherwise, check if we can make a valid insn from it. First initialize
835 our test insn if we haven't already. */
836 if (test_insn == 0)
837 {
838 test_insn
839 = make_insn_raw (gen_rtx_SET (VOIDmode,
840 gen_rtx_REG (word_mode,
841 FIRST_PSEUDO_REGISTER * 2),
842 const0_rtx));
843 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
844 }
845
846 /* Now make an insn like the one we would make when GCSE'ing and see if
847 valid. */
848 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
849 SET_SRC (PATTERN (test_insn)) = x;
850
851 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
852 if (icode < 0)
853 return false;
854
855 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
856 return false;
857
858 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
859 return false;
860
861 return true;
862 }
863
864 /* Return nonzero if the operands of expression X are unchanged from the
865 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
866 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
867
868 static int
869 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
870 {
871 int i, j;
872 enum rtx_code code;
873 const char *fmt;
874
875 if (x == 0)
876 return 1;
877
878 code = GET_CODE (x);
879 switch (code)
880 {
881 case REG:
882 {
883 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
884
885 if (info->last_bb != current_bb)
886 return 1;
887 if (avail_p)
888 return info->last_set < DF_INSN_LUID (insn);
889 else
890 return info->first_set >= DF_INSN_LUID (insn);
891 }
892
893 case MEM:
894 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
895 x, avail_p))
896 return 0;
897 else
898 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
899
900 case PRE_DEC:
901 case PRE_INC:
902 case POST_DEC:
903 case POST_INC:
904 case PRE_MODIFY:
905 case POST_MODIFY:
906 return 0;
907
908 case PC:
909 case CC0: /*FIXME*/
910 case CONST:
911 CASE_CONST_ANY:
912 case SYMBOL_REF:
913 case LABEL_REF:
914 case ADDR_VEC:
915 case ADDR_DIFF_VEC:
916 return 1;
917
918 default:
919 break;
920 }
921
922 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
923 {
924 if (fmt[i] == 'e')
925 {
926 /* If we are about to do the last recursive call needed at this
927 level, change it into iteration. This function is called enough
928 to be worth it. */
929 if (i == 0)
930 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
931
932 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
933 return 0;
934 }
935 else if (fmt[i] == 'E')
936 for (j = 0; j < XVECLEN (x, i); j++)
937 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
938 return 0;
939 }
940
941 return 1;
942 }
943
944 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
945
946 struct mem_conflict_info
947 {
948 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
949 see if a memory store conflicts with this memory load. */
950 const_rtx mem;
951
952 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
953 references. */
954 bool conflict;
955 };
956
957 /* DEST is the output of an instruction. If it is a memory reference and
958 possibly conflicts with the load found in DATA, then communicate this
959 information back through DATA. */
960
961 static void
962 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
963 void *data)
964 {
965 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
966
967 while (GET_CODE (dest) == SUBREG
968 || GET_CODE (dest) == ZERO_EXTRACT
969 || GET_CODE (dest) == STRICT_LOW_PART)
970 dest = XEXP (dest, 0);
971
972 /* If DEST is not a MEM, then it will not conflict with the load. Note
973 that function calls are assumed to clobber memory, but are handled
974 elsewhere. */
975 if (! MEM_P (dest))
976 return;
977
978 /* If we are setting a MEM in our list of specially recognized MEMs,
979 don't mark as killed this time. */
980 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
981 {
982 if (!find_rtx_in_ldst (dest))
983 mci->conflict = true;
984 return;
985 }
986
987 if (true_dependence (dest, GET_MODE (dest), mci->mem))
988 mci->conflict = true;
989 }
990
991 /* Return nonzero if the expression in X (a memory reference) is killed
992 in block BB before or after the insn with the LUID in UID_LIMIT.
993 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
994 before UID_LIMIT.
995
996 To check the entire block, set UID_LIMIT to max_uid + 1 and
997 AVAIL_P to 0. */
998
999 static int
1000 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1001 int avail_p)
1002 {
1003 vec<rtx> list = modify_mem_list[bb->index];
1004 rtx setter;
1005 unsigned ix;
1006
1007 /* If this is a readonly then we aren't going to be changing it. */
1008 if (MEM_READONLY_P (x))
1009 return 0;
1010
1011 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1012 {
1013 struct mem_conflict_info mci;
1014
1015 /* Ignore entries in the list that do not apply. */
1016 if ((avail_p
1017 && DF_INSN_LUID (setter) < uid_limit)
1018 || (! avail_p
1019 && DF_INSN_LUID (setter) > uid_limit))
1020 continue;
1021
1022 /* If SETTER is a call everything is clobbered. Note that calls
1023 to pure functions are never put on the list, so we need not
1024 worry about them. */
1025 if (CALL_P (setter))
1026 return 1;
1027
1028 /* SETTER must be an INSN of some kind that sets memory. Call
1029 note_stores to examine each hunk of memory that is modified. */
1030 mci.mem = x;
1031 mci.conflict = false;
1032 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1033 if (mci.conflict)
1034 return 1;
1035 }
1036 return 0;
1037 }
1038
1039 /* Return nonzero if the operands of expression X are unchanged from
1040 the start of INSN's basic block up to but not including INSN. */
1041
1042 static int
1043 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1044 {
1045 return oprs_unchanged_p (x, insn, 0);
1046 }
1047
1048 /* Return nonzero if the operands of expression X are unchanged from
1049 INSN to the end of INSN's basic block. */
1050
1051 static int
1052 oprs_available_p (const_rtx x, const_rtx insn)
1053 {
1054 return oprs_unchanged_p (x, insn, 1);
1055 }
1056
1057 /* Hash expression X.
1058
1059 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1060 indicating if a volatile operand is found or if the expression contains
1061 something we don't want to insert in the table. HASH_TABLE_SIZE is
1062 the current size of the hash table to be probed. */
1063
1064 static unsigned int
1065 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1066 int hash_table_size)
1067 {
1068 unsigned int hash;
1069
1070 *do_not_record_p = 0;
1071
1072 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1073 return hash % hash_table_size;
1074 }
1075
1076 /* Return nonzero if exp1 is equivalent to exp2. */
1077
1078 static int
1079 expr_equiv_p (const_rtx x, const_rtx y)
1080 {
1081 return exp_equiv_p (x, y, 0, true);
1082 }
1083
1084 /* Insert expression X in INSN in the hash TABLE.
1085 If it is already present, record it as the last occurrence in INSN's
1086 basic block.
1087
1088 MODE is the mode of the value X is being stored into.
1089 It is only used if X is a CONST_INT.
1090
1091 ANTIC_P is nonzero if X is an anticipatable expression.
1092 AVAIL_P is nonzero if X is an available expression.
1093
1094 MAX_DISTANCE is the maximum distance in instructions this expression can
1095 be moved. */
1096
1097 static void
1098 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1099 int avail_p, int max_distance, struct hash_table_d *table)
1100 {
1101 int found, do_not_record_p;
1102 unsigned int hash;
1103 struct expr *cur_expr, *last_expr = NULL;
1104 struct occr *antic_occr, *avail_occr;
1105
1106 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1107
1108 /* Do not insert expression in table if it contains volatile operands,
1109 or if hash_expr determines the expression is something we don't want
1110 to or can't handle. */
1111 if (do_not_record_p)
1112 return;
1113
1114 cur_expr = table->table[hash];
1115 found = 0;
1116
1117 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1118 {
1119 /* If the expression isn't found, save a pointer to the end of
1120 the list. */
1121 last_expr = cur_expr;
1122 cur_expr = cur_expr->next_same_hash;
1123 }
1124
1125 if (! found)
1126 {
1127 cur_expr = GOBNEW (struct expr);
1128 bytes_used += sizeof (struct expr);
1129 if (table->table[hash] == NULL)
1130 /* This is the first pattern that hashed to this index. */
1131 table->table[hash] = cur_expr;
1132 else
1133 /* Add EXPR to end of this hash chain. */
1134 last_expr->next_same_hash = cur_expr;
1135
1136 /* Set the fields of the expr element. */
1137 cur_expr->expr = x;
1138 cur_expr->bitmap_index = table->n_elems++;
1139 cur_expr->next_same_hash = NULL;
1140 cur_expr->antic_occr = NULL;
1141 cur_expr->avail_occr = NULL;
1142 gcc_assert (max_distance >= 0);
1143 cur_expr->max_distance = max_distance;
1144 }
1145 else
1146 gcc_assert (cur_expr->max_distance == max_distance);
1147
1148 /* Now record the occurrence(s). */
1149 if (antic_p)
1150 {
1151 antic_occr = cur_expr->antic_occr;
1152
1153 if (antic_occr
1154 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1155 antic_occr = NULL;
1156
1157 if (antic_occr)
1158 /* Found another instance of the expression in the same basic block.
1159 Prefer the currently recorded one. We want the first one in the
1160 block and the block is scanned from start to end. */
1161 ; /* nothing to do */
1162 else
1163 {
1164 /* First occurrence of this expression in this basic block. */
1165 antic_occr = GOBNEW (struct occr);
1166 bytes_used += sizeof (struct occr);
1167 antic_occr->insn = insn;
1168 antic_occr->next = cur_expr->antic_occr;
1169 antic_occr->deleted_p = 0;
1170 cur_expr->antic_occr = antic_occr;
1171 }
1172 }
1173
1174 if (avail_p)
1175 {
1176 avail_occr = cur_expr->avail_occr;
1177
1178 if (avail_occr
1179 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1180 {
1181 /* Found another instance of the expression in the same basic block.
1182 Prefer this occurrence to the currently recorded one. We want
1183 the last one in the block and the block is scanned from start
1184 to end. */
1185 avail_occr->insn = insn;
1186 }
1187 else
1188 {
1189 /* First occurrence of this expression in this basic block. */
1190 avail_occr = GOBNEW (struct occr);
1191 bytes_used += sizeof (struct occr);
1192 avail_occr->insn = insn;
1193 avail_occr->next = cur_expr->avail_occr;
1194 avail_occr->deleted_p = 0;
1195 cur_expr->avail_occr = avail_occr;
1196 }
1197 }
1198 }
1199
1200 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1201
1202 static void
1203 hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1204 {
1205 rtx src = SET_SRC (set);
1206 rtx dest = SET_DEST (set);
1207 rtx note;
1208
1209 if (GET_CODE (src) == CALL)
1210 hash_scan_call (src, insn, table);
1211
1212 else if (REG_P (dest))
1213 {
1214 unsigned int regno = REGNO (dest);
1215 int max_distance = 0;
1216
1217 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1218
1219 This allows us to do a single GCSE pass and still eliminate
1220 redundant constants, addresses or other expressions that are
1221 constructed with multiple instructions.
1222
1223 However, keep the original SRC if INSN is a simple reg-reg move.
1224 In this case, there will almost always be a REG_EQUAL note on the
1225 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1226 for INSN, we miss copy propagation opportunities and we perform the
1227 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1228 do more than one PRE GCSE pass.
1229
1230 Note that this does not impede profitable constant propagations. We
1231 "look through" reg-reg sets in lookup_avail_set. */
1232 note = find_reg_equal_equiv_note (insn);
1233 if (note != 0
1234 && REG_NOTE_KIND (note) == REG_EQUAL
1235 && !REG_P (src)
1236 && want_to_gcse_p (XEXP (note, 0), NULL))
1237 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1238
1239 /* Only record sets of pseudo-regs in the hash table. */
1240 if (regno >= FIRST_PSEUDO_REGISTER
1241 /* Don't GCSE something if we can't do a reg/reg copy. */
1242 && can_copy_p (GET_MODE (dest))
1243 /* GCSE commonly inserts instruction after the insn. We can't
1244 do that easily for EH edges so disable GCSE on these for now. */
1245 /* ??? We can now easily create new EH landing pads at the
1246 gimple level, for splitting edges; there's no reason we
1247 can't do the same thing at the rtl level. */
1248 && !can_throw_internal (insn)
1249 /* Is SET_SRC something we want to gcse? */
1250 && want_to_gcse_p (src, &max_distance)
1251 /* Don't CSE a nop. */
1252 && ! set_noop_p (set)
1253 /* Don't GCSE if it has attached REG_EQUIV note.
1254 At this point this only function parameters should have
1255 REG_EQUIV notes and if the argument slot is used somewhere
1256 explicitly, it means address of parameter has been taken,
1257 so we should not extend the lifetime of the pseudo. */
1258 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1259 {
1260 /* An expression is not anticipatable if its operands are
1261 modified before this insn or if this is not the only SET in
1262 this insn. The latter condition does not have to mean that
1263 SRC itself is not anticipatable, but we just will not be
1264 able to handle code motion of insns with multiple sets. */
1265 int antic_p = oprs_anticipatable_p (src, insn)
1266 && !multiple_sets (insn);
1267 /* An expression is not available if its operands are
1268 subsequently modified, including this insn. It's also not
1269 available if this is a branch, because we can't insert
1270 a set after the branch. */
1271 int avail_p = (oprs_available_p (src, insn)
1272 && ! JUMP_P (insn));
1273
1274 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1275 max_distance, table);
1276 }
1277 }
1278 /* In case of store we want to consider the memory value as available in
1279 the REG stored in that memory. This makes it possible to remove
1280 redundant loads from due to stores to the same location. */
1281 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1282 {
1283 unsigned int regno = REGNO (src);
1284 int max_distance = 0;
1285
1286 /* Only record sets of pseudo-regs in the hash table. */
1287 if (regno >= FIRST_PSEUDO_REGISTER
1288 /* Don't GCSE something if we can't do a reg/reg copy. */
1289 && can_copy_p (GET_MODE (src))
1290 /* GCSE commonly inserts instruction after the insn. We can't
1291 do that easily for EH edges so disable GCSE on these for now. */
1292 && !can_throw_internal (insn)
1293 /* Is SET_DEST something we want to gcse? */
1294 && want_to_gcse_p (dest, &max_distance)
1295 /* Don't CSE a nop. */
1296 && ! set_noop_p (set)
1297 /* Don't GCSE if it has attached REG_EQUIV note.
1298 At this point this only function parameters should have
1299 REG_EQUIV notes and if the argument slot is used somewhere
1300 explicitly, it means address of parameter has been taken,
1301 so we should not extend the lifetime of the pseudo. */
1302 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1303 || ! MEM_P (XEXP (note, 0))))
1304 {
1305 /* Stores are never anticipatable. */
1306 int antic_p = 0;
1307 /* An expression is not available if its operands are
1308 subsequently modified, including this insn. It's also not
1309 available if this is a branch, because we can't insert
1310 a set after the branch. */
1311 int avail_p = oprs_available_p (dest, insn)
1312 && ! JUMP_P (insn);
1313
1314 /* Record the memory expression (DEST) in the hash table. */
1315 insert_expr_in_table (dest, GET_MODE (dest), insn,
1316 antic_p, avail_p, max_distance, table);
1317 }
1318 }
1319 }
1320
1321 static void
1322 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1323 struct hash_table_d *table ATTRIBUTE_UNUSED)
1324 {
1325 /* Currently nothing to do. */
1326 }
1327
1328 static void
1329 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1330 struct hash_table_d *table ATTRIBUTE_UNUSED)
1331 {
1332 /* Currently nothing to do. */
1333 }
1334
1335 /* Process INSN and add hash table entries as appropriate. */
1336
1337 static void
1338 hash_scan_insn (rtx insn, struct hash_table_d *table)
1339 {
1340 rtx pat = PATTERN (insn);
1341 int i;
1342
1343 /* Pick out the sets of INSN and for other forms of instructions record
1344 what's been modified. */
1345
1346 if (GET_CODE (pat) == SET)
1347 hash_scan_set (pat, insn, table);
1348
1349 else if (GET_CODE (pat) == CLOBBER)
1350 hash_scan_clobber (pat, insn, table);
1351
1352 else if (GET_CODE (pat) == CALL)
1353 hash_scan_call (pat, insn, table);
1354
1355 else if (GET_CODE (pat) == PARALLEL)
1356 for (i = 0; i < XVECLEN (pat, 0); i++)
1357 {
1358 rtx x = XVECEXP (pat, 0, i);
1359
1360 if (GET_CODE (x) == SET)
1361 hash_scan_set (x, insn, table);
1362 else if (GET_CODE (x) == CLOBBER)
1363 hash_scan_clobber (x, insn, table);
1364 else if (GET_CODE (x) == CALL)
1365 hash_scan_call (x, insn, table);
1366 }
1367 }
1368
1369 /* Dump the hash table TABLE to file FILE under the name NAME. */
1370
1371 static void
1372 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1373 {
1374 int i;
1375 /* Flattened out table, so it's printed in proper order. */
1376 struct expr **flat_table;
1377 unsigned int *hash_val;
1378 struct expr *expr;
1379
1380 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1381 hash_val = XNEWVEC (unsigned int, table->n_elems);
1382
1383 for (i = 0; i < (int) table->size; i++)
1384 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1385 {
1386 flat_table[expr->bitmap_index] = expr;
1387 hash_val[expr->bitmap_index] = i;
1388 }
1389
1390 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1391 name, table->size, table->n_elems);
1392
1393 for (i = 0; i < (int) table->n_elems; i++)
1394 if (flat_table[i] != 0)
1395 {
1396 expr = flat_table[i];
1397 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1398 expr->bitmap_index, hash_val[i], expr->max_distance);
1399 print_rtl (file, expr->expr);
1400 fprintf (file, "\n");
1401 }
1402
1403 fprintf (file, "\n");
1404
1405 free (flat_table);
1406 free (hash_val);
1407 }
1408
1409 /* Record register first/last/block set information for REGNO in INSN.
1410
1411 first_set records the first place in the block where the register
1412 is set and is used to compute "anticipatability".
1413
1414 last_set records the last place in the block where the register
1415 is set and is used to compute "availability".
1416
1417 last_bb records the block for which first_set and last_set are
1418 valid, as a quick test to invalidate them. */
1419
1420 static void
1421 record_last_reg_set_info (rtx insn, int regno)
1422 {
1423 struct reg_avail_info *info = &reg_avail_info[regno];
1424 int luid = DF_INSN_LUID (insn);
1425
1426 info->last_set = luid;
1427 if (info->last_bb != current_bb)
1428 {
1429 info->last_bb = current_bb;
1430 info->first_set = luid;
1431 }
1432 }
1433
1434 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1435 Note we store a pair of elements in the list, so they have to be
1436 taken off pairwise. */
1437
1438 static void
1439 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1440 void * v_insn)
1441 {
1442 rtx dest_addr, insn;
1443 int bb;
1444 modify_pair pair;
1445
1446 while (GET_CODE (dest) == SUBREG
1447 || GET_CODE (dest) == ZERO_EXTRACT
1448 || GET_CODE (dest) == STRICT_LOW_PART)
1449 dest = XEXP (dest, 0);
1450
1451 /* If DEST is not a MEM, then it will not conflict with a load. Note
1452 that function calls are assumed to clobber memory, but are handled
1453 elsewhere. */
1454
1455 if (! MEM_P (dest))
1456 return;
1457
1458 dest_addr = get_addr (XEXP (dest, 0));
1459 dest_addr = canon_rtx (dest_addr);
1460 insn = (rtx) v_insn;
1461 bb = BLOCK_FOR_INSN (insn)->index;
1462
1463 pair.dest = dest;
1464 pair.dest_addr = dest_addr;
1465 canon_modify_mem_list[bb].safe_push (pair);
1466 }
1467
1468 /* Record memory modification information for INSN. We do not actually care
1469 about the memory location(s) that are set, or even how they are set (consider
1470 a CALL_INSN). We merely need to record which insns modify memory. */
1471
1472 static void
1473 record_last_mem_set_info (rtx insn)
1474 {
1475 int bb = BLOCK_FOR_INSN (insn)->index;
1476
1477 /* load_killed_in_block_p will handle the case of calls clobbering
1478 everything. */
1479 modify_mem_list[bb].safe_push (insn);
1480 bitmap_set_bit (modify_mem_list_set, bb);
1481
1482 if (CALL_P (insn))
1483 bitmap_set_bit (blocks_with_calls, bb);
1484 else
1485 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1486 }
1487
1488 /* Called from compute_hash_table via note_stores to handle one
1489 SET or CLOBBER in an insn. DATA is really the instruction in which
1490 the SET is taking place. */
1491
1492 static void
1493 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1494 {
1495 rtx last_set_insn = (rtx) data;
1496
1497 if (GET_CODE (dest) == SUBREG)
1498 dest = SUBREG_REG (dest);
1499
1500 if (REG_P (dest))
1501 record_last_reg_set_info (last_set_insn, REGNO (dest));
1502 else if (MEM_P (dest)
1503 /* Ignore pushes, they clobber nothing. */
1504 && ! push_operand (dest, GET_MODE (dest)))
1505 record_last_mem_set_info (last_set_insn);
1506 }
1507
1508 /* Top level function to create an expression hash table.
1509
1510 Expression entries are placed in the hash table if
1511 - they are of the form (set (pseudo-reg) src),
1512 - src is something we want to perform GCSE on,
1513 - none of the operands are subsequently modified in the block
1514
1515 Currently src must be a pseudo-reg or a const_int.
1516
1517 TABLE is the table computed. */
1518
1519 static void
1520 compute_hash_table_work (struct hash_table_d *table)
1521 {
1522 int i;
1523
1524 /* re-Cache any INSN_LIST nodes we have allocated. */
1525 clear_modify_mem_tables ();
1526 /* Some working arrays used to track first and last set in each block. */
1527 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1528
1529 for (i = 0; i < max_reg_num (); ++i)
1530 reg_avail_info[i].last_bb = NULL;
1531
1532 FOR_EACH_BB (current_bb)
1533 {
1534 rtx insn;
1535 unsigned int regno;
1536
1537 /* First pass over the instructions records information used to
1538 determine when registers and memory are first and last set. */
1539 FOR_BB_INSNS (current_bb, insn)
1540 {
1541 if (!NONDEBUG_INSN_P (insn))
1542 continue;
1543
1544 if (CALL_P (insn))
1545 {
1546 hard_reg_set_iterator hrsi;
1547 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1548 0, regno, hrsi)
1549 record_last_reg_set_info (insn, regno);
1550
1551 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1552 record_last_mem_set_info (insn);
1553 }
1554
1555 note_stores (PATTERN (insn), record_last_set_info, insn);
1556 }
1557
1558 /* The next pass builds the hash table. */
1559 FOR_BB_INSNS (current_bb, insn)
1560 if (NONDEBUG_INSN_P (insn))
1561 hash_scan_insn (insn, table);
1562 }
1563
1564 free (reg_avail_info);
1565 reg_avail_info = NULL;
1566 }
1567
1568 /* Allocate space for the set/expr hash TABLE.
1569 It is used to determine the number of buckets to use. */
1570
1571 static void
1572 alloc_hash_table (struct hash_table_d *table)
1573 {
1574 int n;
1575
1576 n = get_max_insn_count ();
1577
1578 table->size = n / 4;
1579 if (table->size < 11)
1580 table->size = 11;
1581
1582 /* Attempt to maintain efficient use of hash table.
1583 Making it an odd number is simplest for now.
1584 ??? Later take some measurements. */
1585 table->size |= 1;
1586 n = table->size * sizeof (struct expr *);
1587 table->table = GNEWVAR (struct expr *, n);
1588 }
1589
1590 /* Free things allocated by alloc_hash_table. */
1591
1592 static void
1593 free_hash_table (struct hash_table_d *table)
1594 {
1595 free (table->table);
1596 }
1597
1598 /* Compute the expression hash table TABLE. */
1599
1600 static void
1601 compute_hash_table (struct hash_table_d *table)
1602 {
1603 /* Initialize count of number of entries in hash table. */
1604 table->n_elems = 0;
1605 memset (table->table, 0, table->size * sizeof (struct expr *));
1606
1607 compute_hash_table_work (table);
1608 }
1609 \f
1610 /* Expression tracking support. */
1611
1612 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1613 static void
1614 clear_modify_mem_tables (void)
1615 {
1616 unsigned i;
1617 bitmap_iterator bi;
1618
1619 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1620 {
1621 modify_mem_list[i].release ();
1622 canon_modify_mem_list[i].release ();
1623 }
1624 bitmap_clear (modify_mem_list_set);
1625 bitmap_clear (blocks_with_calls);
1626 }
1627
1628 /* Release memory used by modify_mem_list_set. */
1629
1630 static void
1631 free_modify_mem_tables (void)
1632 {
1633 clear_modify_mem_tables ();
1634 free (modify_mem_list);
1635 free (canon_modify_mem_list);
1636 modify_mem_list = 0;
1637 canon_modify_mem_list = 0;
1638 }
1639 \f
1640 /* For each block, compute whether X is transparent. X is either an
1641 expression or an assignment [though we don't care which, for this context
1642 an assignment is treated as an expression]. For each block where an
1643 element of X is modified, reset the INDX bit in BMAP. */
1644
1645 static void
1646 compute_transp (const_rtx x, int indx, sbitmap *bmap)
1647 {
1648 int i, j;
1649 enum rtx_code code;
1650 const char *fmt;
1651
1652 /* repeat is used to turn tail-recursion into iteration since GCC
1653 can't do it when there's no return value. */
1654 repeat:
1655
1656 if (x == 0)
1657 return;
1658
1659 code = GET_CODE (x);
1660 switch (code)
1661 {
1662 case REG:
1663 {
1664 df_ref def;
1665 for (def = DF_REG_DEF_CHAIN (REGNO (x));
1666 def;
1667 def = DF_REF_NEXT_REG (def))
1668 bitmap_clear_bit (bmap[DF_REF_BB (def)->index], indx);
1669 }
1670
1671 return;
1672
1673 case MEM:
1674 if (! MEM_READONLY_P (x))
1675 {
1676 bitmap_iterator bi;
1677 unsigned bb_index;
1678 rtx x_addr;
1679
1680 x_addr = get_addr (XEXP (x, 0));
1681 x_addr = canon_rtx (x_addr);
1682
1683 /* First handle all the blocks with calls. We don't need to
1684 do any list walking for them. */
1685 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
1686 {
1687 bitmap_clear_bit (bmap[bb_index], indx);
1688 }
1689
1690 /* Now iterate over the blocks which have memory modifications
1691 but which do not have any calls. */
1692 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
1693 blocks_with_calls,
1694 0, bb_index, bi)
1695 {
1696 vec<modify_pair> list
1697 = canon_modify_mem_list[bb_index];
1698 modify_pair *pair;
1699 unsigned ix;
1700
1701 FOR_EACH_VEC_ELT_REVERSE (list, ix, pair)
1702 {
1703 rtx dest = pair->dest;
1704 rtx dest_addr = pair->dest_addr;
1705
1706 if (canon_true_dependence (dest, GET_MODE (dest),
1707 dest_addr, x, x_addr))
1708 bitmap_clear_bit (bmap[bb_index], indx);
1709 }
1710 }
1711 }
1712
1713 x = XEXP (x, 0);
1714 goto repeat;
1715
1716 case PC:
1717 case CC0: /*FIXME*/
1718 case CONST:
1719 CASE_CONST_ANY:
1720 case SYMBOL_REF:
1721 case LABEL_REF:
1722 case ADDR_VEC:
1723 case ADDR_DIFF_VEC:
1724 return;
1725
1726 default:
1727 break;
1728 }
1729
1730 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1731 {
1732 if (fmt[i] == 'e')
1733 {
1734 /* If we are about to do the last recursive call
1735 needed at this level, change it into iteration.
1736 This function is called enough to be worth it. */
1737 if (i == 0)
1738 {
1739 x = XEXP (x, i);
1740 goto repeat;
1741 }
1742
1743 compute_transp (XEXP (x, i), indx, bmap);
1744 }
1745 else if (fmt[i] == 'E')
1746 for (j = 0; j < XVECLEN (x, i); j++)
1747 compute_transp (XVECEXP (x, i, j), indx, bmap);
1748 }
1749 }
1750 \f
1751 /* Compute PRE+LCM working variables. */
1752
1753 /* Local properties of expressions. */
1754
1755 /* Nonzero for expressions that are transparent in the block. */
1756 static sbitmap *transp;
1757
1758 /* Nonzero for expressions that are computed (available) in the block. */
1759 static sbitmap *comp;
1760
1761 /* Nonzero for expressions that are locally anticipatable in the block. */
1762 static sbitmap *antloc;
1763
1764 /* Nonzero for expressions where this block is an optimal computation
1765 point. */
1766 static sbitmap *pre_optimal;
1767
1768 /* Nonzero for expressions which are redundant in a particular block. */
1769 static sbitmap *pre_redundant;
1770
1771 /* Nonzero for expressions which should be inserted on a specific edge. */
1772 static sbitmap *pre_insert_map;
1773
1774 /* Nonzero for expressions which should be deleted in a specific block. */
1775 static sbitmap *pre_delete_map;
1776
1777 /* Allocate vars used for PRE analysis. */
1778
1779 static void
1780 alloc_pre_mem (int n_blocks, int n_exprs)
1781 {
1782 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1783 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1784 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1785
1786 pre_optimal = NULL;
1787 pre_redundant = NULL;
1788 pre_insert_map = NULL;
1789 pre_delete_map = NULL;
1790 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1791
1792 /* pre_insert and pre_delete are allocated later. */
1793 }
1794
1795 /* Free vars used for PRE analysis. */
1796
1797 static void
1798 free_pre_mem (void)
1799 {
1800 sbitmap_vector_free (transp);
1801 sbitmap_vector_free (comp);
1802
1803 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1804
1805 if (pre_optimal)
1806 sbitmap_vector_free (pre_optimal);
1807 if (pre_redundant)
1808 sbitmap_vector_free (pre_redundant);
1809 if (pre_insert_map)
1810 sbitmap_vector_free (pre_insert_map);
1811 if (pre_delete_map)
1812 sbitmap_vector_free (pre_delete_map);
1813
1814 transp = comp = NULL;
1815 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1816 }
1817
1818 /* Remove certain expressions from anticipatable and transparent
1819 sets of basic blocks that have incoming abnormal edge.
1820 For PRE remove potentially trapping expressions to avoid placing
1821 them on abnormal edges. For hoisting remove memory references that
1822 can be clobbered by calls. */
1823
1824 static void
1825 prune_expressions (bool pre_p)
1826 {
1827 sbitmap prune_exprs;
1828 struct expr *expr;
1829 unsigned int ui;
1830 basic_block bb;
1831
1832 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1833 bitmap_clear (prune_exprs);
1834 for (ui = 0; ui < expr_hash_table.size; ui++)
1835 {
1836 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1837 {
1838 /* Note potentially trapping expressions. */
1839 if (may_trap_p (expr->expr))
1840 {
1841 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1842 continue;
1843 }
1844
1845 if (!pre_p && MEM_P (expr->expr))
1846 /* Note memory references that can be clobbered by a call.
1847 We do not split abnormal edges in hoisting, so would
1848 a memory reference get hoisted along an abnormal edge,
1849 it would be placed /before/ the call. Therefore, only
1850 constant memory references can be hoisted along abnormal
1851 edges. */
1852 {
1853 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1854 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1855 continue;
1856
1857 if (MEM_READONLY_P (expr->expr)
1858 && !MEM_VOLATILE_P (expr->expr)
1859 && MEM_NOTRAP_P (expr->expr))
1860 /* Constant memory reference, e.g., a PIC address. */
1861 continue;
1862
1863 /* ??? Optimally, we would use interprocedural alias
1864 analysis to determine if this mem is actually killed
1865 by this call. */
1866
1867 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1868 }
1869 }
1870 }
1871
1872 FOR_EACH_BB (bb)
1873 {
1874 edge e;
1875 edge_iterator ei;
1876
1877 /* If the current block is the destination of an abnormal edge, we
1878 kill all trapping (for PRE) and memory (for hoist) expressions
1879 because we won't be able to properly place the instruction on
1880 the edge. So make them neither anticipatable nor transparent.
1881 This is fairly conservative.
1882
1883 ??? For hoisting it may be necessary to check for set-and-jump
1884 instructions here, not just for abnormal edges. The general problem
1885 is that when an expression cannot not be placed right at the end of
1886 a basic block we should account for any side-effects of a subsequent
1887 jump instructions that could clobber the expression. It would
1888 be best to implement this check along the lines of
1889 should_hoist_expr_to_dom where the target block is already known
1890 and, hence, there's no need to conservatively prune expressions on
1891 "intermediate" set-and-jump instructions. */
1892 FOR_EACH_EDGE (e, ei, bb->preds)
1893 if ((e->flags & EDGE_ABNORMAL)
1894 && (pre_p || CALL_P (BB_END (e->src))))
1895 {
1896 bitmap_and_compl (antloc[bb->index],
1897 antloc[bb->index], prune_exprs);
1898 bitmap_and_compl (transp[bb->index],
1899 transp[bb->index], prune_exprs);
1900 break;
1901 }
1902 }
1903
1904 sbitmap_free (prune_exprs);
1905 }
1906
1907 /* It may be necessary to insert a large number of insns on edges to
1908 make the existing occurrences of expressions fully redundant. This
1909 routine examines the set of insertions and deletions and if the ratio
1910 of insertions to deletions is too high for a particular expression, then
1911 the expression is removed from the insertion/deletion sets.
1912
1913 N_ELEMS is the number of elements in the hash table. */
1914
1915 static void
1916 prune_insertions_deletions (int n_elems)
1917 {
1918 sbitmap_iterator sbi;
1919 sbitmap prune_exprs;
1920
1921 /* We always use I to iterate over blocks/edges and J to iterate over
1922 expressions. */
1923 unsigned int i, j;
1924
1925 /* Counts for the number of times an expression needs to be inserted and
1926 number of times an expression can be removed as a result. */
1927 int *insertions = GCNEWVEC (int, n_elems);
1928 int *deletions = GCNEWVEC (int, n_elems);
1929
1930 /* Set of expressions which require too many insertions relative to
1931 the number of deletions achieved. We will prune these out of the
1932 insertion/deletion sets. */
1933 prune_exprs = sbitmap_alloc (n_elems);
1934 bitmap_clear (prune_exprs);
1935
1936 /* Iterate over the edges counting the number of times each expression
1937 needs to be inserted. */
1938 for (i = 0; i < (unsigned) n_edges; i++)
1939 {
1940 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1941 insertions[j]++;
1942 }
1943
1944 /* Similarly for deletions, but those occur in blocks rather than on
1945 edges. */
1946 for (i = 0; i < (unsigned) last_basic_block; i++)
1947 {
1948 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1949 deletions[j]++;
1950 }
1951
1952 /* Now that we have accurate counts, iterate over the elements in the
1953 hash table and see if any need too many insertions relative to the
1954 number of evaluations that can be removed. If so, mark them in
1955 PRUNE_EXPRS. */
1956 for (j = 0; j < (unsigned) n_elems; j++)
1957 if (deletions[j]
1958 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1959 bitmap_set_bit (prune_exprs, j);
1960
1961 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1962 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1963 {
1964 for (i = 0; i < (unsigned) n_edges; i++)
1965 bitmap_clear_bit (pre_insert_map[i], j);
1966
1967 for (i = 0; i < (unsigned) last_basic_block; i++)
1968 bitmap_clear_bit (pre_delete_map[i], j);
1969 }
1970
1971 sbitmap_free (prune_exprs);
1972 free (insertions);
1973 free (deletions);
1974 }
1975
1976 /* Top level routine to do the dataflow analysis needed by PRE. */
1977
1978 static struct edge_list *
1979 compute_pre_data (void)
1980 {
1981 struct edge_list *edge_list;
1982 basic_block bb;
1983
1984 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1985 prune_expressions (true);
1986 bitmap_vector_clear (ae_kill, last_basic_block);
1987
1988 /* Compute ae_kill for each basic block using:
1989
1990 ~(TRANSP | COMP)
1991 */
1992
1993 FOR_EACH_BB (bb)
1994 {
1995 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1996 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1997 }
1998
1999 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
2000 ae_kill, &pre_insert_map, &pre_delete_map);
2001 sbitmap_vector_free (antloc);
2002 antloc = NULL;
2003 sbitmap_vector_free (ae_kill);
2004 ae_kill = NULL;
2005
2006 prune_insertions_deletions (expr_hash_table.n_elems);
2007
2008 return edge_list;
2009 }
2010 \f
2011 /* PRE utilities */
2012
2013 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
2014 block BB.
2015
2016 VISITED is a pointer to a working buffer for tracking which BB's have
2017 been visited. It is NULL for the top-level call.
2018
2019 We treat reaching expressions that go through blocks containing the same
2020 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
2021 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2022 2 as not reaching. The intent is to improve the probability of finding
2023 only one reaching expression and to reduce register lifetimes by picking
2024 the closest such expression. */
2025
2026 static int
2027 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
2028 basic_block bb, char *visited)
2029 {
2030 edge pred;
2031 edge_iterator ei;
2032
2033 FOR_EACH_EDGE (pred, ei, bb->preds)
2034 {
2035 basic_block pred_bb = pred->src;
2036
2037 if (pred->src == ENTRY_BLOCK_PTR
2038 /* Has predecessor has already been visited? */
2039 || visited[pred_bb->index])
2040 ;/* Nothing to do. */
2041
2042 /* Does this predecessor generate this expression? */
2043 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
2044 {
2045 /* Is this the occurrence we're looking for?
2046 Note that there's only one generating occurrence per block
2047 so we just need to check the block number. */
2048 if (occr_bb == pred_bb)
2049 return 1;
2050
2051 visited[pred_bb->index] = 1;
2052 }
2053 /* Ignore this predecessor if it kills the expression. */
2054 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2055 visited[pred_bb->index] = 1;
2056
2057 /* Neither gen nor kill. */
2058 else
2059 {
2060 visited[pred_bb->index] = 1;
2061 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2062 return 1;
2063 }
2064 }
2065
2066 /* All paths have been checked. */
2067 return 0;
2068 }
2069
2070 /* The wrapper for pre_expr_reaches_here_work that ensures that any
2071 memory allocated for that function is returned. */
2072
2073 static int
2074 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2075 {
2076 int rval;
2077 char *visited = XCNEWVEC (char, last_basic_block);
2078
2079 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2080
2081 free (visited);
2082 return rval;
2083 }
2084 \f
2085 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2086
2087 static rtx
2088 process_insert_insn (struct expr *expr)
2089 {
2090 rtx reg = expr->reaching_reg;
2091 /* Copy the expression to make sure we don't have any sharing issues. */
2092 rtx exp = copy_rtx (expr->expr);
2093 rtx pat;
2094
2095 start_sequence ();
2096
2097 /* If the expression is something that's an operand, like a constant,
2098 just copy it to a register. */
2099 if (general_operand (exp, GET_MODE (reg)))
2100 emit_move_insn (reg, exp);
2101
2102 /* Otherwise, make a new insn to compute this expression and make sure the
2103 insn will be recognized (this also adds any needed CLOBBERs). */
2104 else
2105 {
2106 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2107
2108 if (insn_invalid_p (insn, false))
2109 gcc_unreachable ();
2110 }
2111
2112 pat = get_insns ();
2113 end_sequence ();
2114
2115 return pat;
2116 }
2117
2118 /* Add EXPR to the end of basic block BB.
2119
2120 This is used by both the PRE and code hoisting. */
2121
2122 static void
2123 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2124 {
2125 rtx insn = BB_END (bb);
2126 rtx new_insn;
2127 rtx reg = expr->reaching_reg;
2128 int regno = REGNO (reg);
2129 rtx pat, pat_end;
2130
2131 pat = process_insert_insn (expr);
2132 gcc_assert (pat && INSN_P (pat));
2133
2134 pat_end = pat;
2135 while (NEXT_INSN (pat_end) != NULL_RTX)
2136 pat_end = NEXT_INSN (pat_end);
2137
2138 /* If the last insn is a jump, insert EXPR in front [taking care to
2139 handle cc0, etc. properly]. Similarly we need to care trapping
2140 instructions in presence of non-call exceptions. */
2141
2142 if (JUMP_P (insn)
2143 || (NONJUMP_INSN_P (insn)
2144 && (!single_succ_p (bb)
2145 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2146 {
2147 #ifdef HAVE_cc0
2148 rtx note;
2149 #endif
2150
2151 /* If this is a jump table, then we can't insert stuff here. Since
2152 we know the previous real insn must be the tablejump, we insert
2153 the new instruction just before the tablejump. */
2154 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
2155 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
2156 insn = prev_active_insn (insn);
2157
2158 #ifdef HAVE_cc0
2159 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2160 if cc0 isn't set. */
2161 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2162 if (note)
2163 insn = XEXP (note, 0);
2164 else
2165 {
2166 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
2167 if (maybe_cc0_setter
2168 && INSN_P (maybe_cc0_setter)
2169 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2170 insn = maybe_cc0_setter;
2171 }
2172 #endif
2173 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2174 new_insn = emit_insn_before_noloc (pat, insn, bb);
2175 }
2176
2177 /* Likewise if the last insn is a call, as will happen in the presence
2178 of exception handling. */
2179 else if (CALL_P (insn)
2180 && (!single_succ_p (bb)
2181 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2182 {
2183 /* Keeping in mind targets with small register classes and parameters
2184 in registers, we search backward and place the instructions before
2185 the first parameter is loaded. Do this for everyone for consistency
2186 and a presumption that we'll get better code elsewhere as well. */
2187
2188 /* Since different machines initialize their parameter registers
2189 in different orders, assume nothing. Collect the set of all
2190 parameter registers. */
2191 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2192
2193 /* If we found all the parameter loads, then we want to insert
2194 before the first parameter load.
2195
2196 If we did not find all the parameter loads, then we might have
2197 stopped on the head of the block, which could be a CODE_LABEL.
2198 If we inserted before the CODE_LABEL, then we would be putting
2199 the insn in the wrong basic block. In that case, put the insn
2200 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2201 while (LABEL_P (insn)
2202 || NOTE_INSN_BASIC_BLOCK_P (insn))
2203 insn = NEXT_INSN (insn);
2204
2205 new_insn = emit_insn_before_noloc (pat, insn, bb);
2206 }
2207 else
2208 new_insn = emit_insn_after_noloc (pat, insn, bb);
2209
2210 while (1)
2211 {
2212 if (INSN_P (pat))
2213 add_label_notes (PATTERN (pat), new_insn);
2214 if (pat == pat_end)
2215 break;
2216 pat = NEXT_INSN (pat);
2217 }
2218
2219 gcse_create_count++;
2220
2221 if (dump_file)
2222 {
2223 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2224 bb->index, INSN_UID (new_insn));
2225 fprintf (dump_file, "copying expression %d to reg %d\n",
2226 expr->bitmap_index, regno);
2227 }
2228 }
2229
2230 /* Insert partially redundant expressions on edges in the CFG to make
2231 the expressions fully redundant. */
2232
2233 static int
2234 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2235 {
2236 int e, i, j, num_edges, set_size, did_insert = 0;
2237 sbitmap *inserted;
2238
2239 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2240 if it reaches any of the deleted expressions. */
2241
2242 set_size = pre_insert_map[0]->size;
2243 num_edges = NUM_EDGES (edge_list);
2244 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2245 bitmap_vector_clear (inserted, num_edges);
2246
2247 for (e = 0; e < num_edges; e++)
2248 {
2249 int indx;
2250 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2251
2252 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2253 {
2254 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2255
2256 for (j = indx;
2257 insert && j < (int) expr_hash_table.n_elems;
2258 j++, insert >>= 1)
2259 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2260 {
2261 struct expr *expr = index_map[j];
2262 struct occr *occr;
2263
2264 /* Now look at each deleted occurrence of this expression. */
2265 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2266 {
2267 if (! occr->deleted_p)
2268 continue;
2269
2270 /* Insert this expression on this edge if it would
2271 reach the deleted occurrence in BB. */
2272 if (!bitmap_bit_p (inserted[e], j))
2273 {
2274 rtx insn;
2275 edge eg = INDEX_EDGE (edge_list, e);
2276
2277 /* We can't insert anything on an abnormal and
2278 critical edge, so we insert the insn at the end of
2279 the previous block. There are several alternatives
2280 detailed in Morgans book P277 (sec 10.5) for
2281 handling this situation. This one is easiest for
2282 now. */
2283
2284 if (eg->flags & EDGE_ABNORMAL)
2285 insert_insn_end_basic_block (index_map[j], bb);
2286 else
2287 {
2288 insn = process_insert_insn (index_map[j]);
2289 insert_insn_on_edge (insn, eg);
2290 }
2291
2292 if (dump_file)
2293 {
2294 fprintf (dump_file, "PRE: edge (%d,%d), ",
2295 bb->index,
2296 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2297 fprintf (dump_file, "copy expression %d\n",
2298 expr->bitmap_index);
2299 }
2300
2301 update_ld_motion_stores (expr);
2302 bitmap_set_bit (inserted[e], j);
2303 did_insert = 1;
2304 gcse_create_count++;
2305 }
2306 }
2307 }
2308 }
2309 }
2310
2311 sbitmap_vector_free (inserted);
2312 return did_insert;
2313 }
2314
2315 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2316 Given "old_reg <- expr" (INSN), instead of adding after it
2317 reaching_reg <- old_reg
2318 it's better to do the following:
2319 reaching_reg <- expr
2320 old_reg <- reaching_reg
2321 because this way copy propagation can discover additional PRE
2322 opportunities. But if this fails, we try the old way.
2323 When "expr" is a store, i.e.
2324 given "MEM <- old_reg", instead of adding after it
2325 reaching_reg <- old_reg
2326 it's better to add it before as follows:
2327 reaching_reg <- old_reg
2328 MEM <- reaching_reg. */
2329
2330 static void
2331 pre_insert_copy_insn (struct expr *expr, rtx insn)
2332 {
2333 rtx reg = expr->reaching_reg;
2334 int regno = REGNO (reg);
2335 int indx = expr->bitmap_index;
2336 rtx pat = PATTERN (insn);
2337 rtx set, first_set, new_insn;
2338 rtx old_reg;
2339 int i;
2340
2341 /* This block matches the logic in hash_scan_insn. */
2342 switch (GET_CODE (pat))
2343 {
2344 case SET:
2345 set = pat;
2346 break;
2347
2348 case PARALLEL:
2349 /* Search through the parallel looking for the set whose
2350 source was the expression that we're interested in. */
2351 first_set = NULL_RTX;
2352 set = NULL_RTX;
2353 for (i = 0; i < XVECLEN (pat, 0); i++)
2354 {
2355 rtx x = XVECEXP (pat, 0, i);
2356 if (GET_CODE (x) == SET)
2357 {
2358 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2359 may not find an equivalent expression, but in this
2360 case the PARALLEL will have a single set. */
2361 if (first_set == NULL_RTX)
2362 first_set = x;
2363 if (expr_equiv_p (SET_SRC (x), expr->expr))
2364 {
2365 set = x;
2366 break;
2367 }
2368 }
2369 }
2370
2371 gcc_assert (first_set);
2372 if (set == NULL_RTX)
2373 set = first_set;
2374 break;
2375
2376 default:
2377 gcc_unreachable ();
2378 }
2379
2380 if (REG_P (SET_DEST (set)))
2381 {
2382 old_reg = SET_DEST (set);
2383 /* Check if we can modify the set destination in the original insn. */
2384 if (validate_change (insn, &SET_DEST (set), reg, 0))
2385 {
2386 new_insn = gen_move_insn (old_reg, reg);
2387 new_insn = emit_insn_after (new_insn, insn);
2388 }
2389 else
2390 {
2391 new_insn = gen_move_insn (reg, old_reg);
2392 new_insn = emit_insn_after (new_insn, insn);
2393 }
2394 }
2395 else /* This is possible only in case of a store to memory. */
2396 {
2397 old_reg = SET_SRC (set);
2398 new_insn = gen_move_insn (reg, old_reg);
2399
2400 /* Check if we can modify the set source in the original insn. */
2401 if (validate_change (insn, &SET_SRC (set), reg, 0))
2402 new_insn = emit_insn_before (new_insn, insn);
2403 else
2404 new_insn = emit_insn_after (new_insn, insn);
2405 }
2406
2407 gcse_create_count++;
2408
2409 if (dump_file)
2410 fprintf (dump_file,
2411 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2412 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2413 INSN_UID (insn), regno);
2414 }
2415
2416 /* Copy available expressions that reach the redundant expression
2417 to `reaching_reg'. */
2418
2419 static void
2420 pre_insert_copies (void)
2421 {
2422 unsigned int i, added_copy;
2423 struct expr *expr;
2424 struct occr *occr;
2425 struct occr *avail;
2426
2427 /* For each available expression in the table, copy the result to
2428 `reaching_reg' if the expression reaches a deleted one.
2429
2430 ??? The current algorithm is rather brute force.
2431 Need to do some profiling. */
2432
2433 for (i = 0; i < expr_hash_table.size; i++)
2434 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2435 {
2436 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2437 we don't want to insert a copy here because the expression may not
2438 really be redundant. So only insert an insn if the expression was
2439 deleted. This test also avoids further processing if the
2440 expression wasn't deleted anywhere. */
2441 if (expr->reaching_reg == NULL)
2442 continue;
2443
2444 /* Set when we add a copy for that expression. */
2445 added_copy = 0;
2446
2447 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2448 {
2449 if (! occr->deleted_p)
2450 continue;
2451
2452 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2453 {
2454 rtx insn = avail->insn;
2455
2456 /* No need to handle this one if handled already. */
2457 if (avail->copied_p)
2458 continue;
2459
2460 /* Don't handle this one if it's a redundant one. */
2461 if (INSN_DELETED_P (insn))
2462 continue;
2463
2464 /* Or if the expression doesn't reach the deleted one. */
2465 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2466 expr,
2467 BLOCK_FOR_INSN (occr->insn)))
2468 continue;
2469
2470 added_copy = 1;
2471
2472 /* Copy the result of avail to reaching_reg. */
2473 pre_insert_copy_insn (expr, insn);
2474 avail->copied_p = 1;
2475 }
2476 }
2477
2478 if (added_copy)
2479 update_ld_motion_stores (expr);
2480 }
2481 }
2482
2483 /* Emit move from SRC to DEST noting the equivalence with expression computed
2484 in INSN. */
2485
2486 static rtx
2487 gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2488 {
2489 rtx new_rtx;
2490 rtx set = single_set (insn), set2;
2491 rtx note;
2492 rtx eqv = NULL_RTX;
2493
2494 /* This should never fail since we're creating a reg->reg copy
2495 we've verified to be valid. */
2496
2497 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2498
2499 /* Note the equivalence for local CSE pass. Take the note from the old
2500 set if there was one. Otherwise record the SET_SRC from the old set
2501 unless DEST is also an operand of the SET_SRC. */
2502 set2 = single_set (new_rtx);
2503 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2504 return new_rtx;
2505 if ((note = find_reg_equal_equiv_note (insn)))
2506 eqv = XEXP (note, 0);
2507 else if (! REG_P (dest)
2508 || ! reg_mentioned_p (dest, SET_SRC (set)))
2509 eqv = SET_SRC (set);
2510
2511 if (eqv != NULL_RTX)
2512 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2513
2514 return new_rtx;
2515 }
2516
2517 /* Delete redundant computations.
2518 Deletion is done by changing the insn to copy the `reaching_reg' of
2519 the expression into the result of the SET. It is left to later passes
2520 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
2521
2522 Return nonzero if a change is made. */
2523
2524 static int
2525 pre_delete (void)
2526 {
2527 unsigned int i;
2528 int changed;
2529 struct expr *expr;
2530 struct occr *occr;
2531
2532 changed = 0;
2533 for (i = 0; i < expr_hash_table.size; i++)
2534 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2535 {
2536 int indx = expr->bitmap_index;
2537
2538 /* We only need to search antic_occr since we require ANTLOC != 0. */
2539 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2540 {
2541 rtx insn = occr->insn;
2542 rtx set;
2543 basic_block bb = BLOCK_FOR_INSN (insn);
2544
2545 /* We only delete insns that have a single_set. */
2546 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2547 && (set = single_set (insn)) != 0
2548 && dbg_cnt (pre_insn))
2549 {
2550 /* Create a pseudo-reg to store the result of reaching
2551 expressions into. Get the mode for the new pseudo from
2552 the mode of the original destination pseudo. */
2553 if (expr->reaching_reg == NULL)
2554 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2555
2556 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2557 delete_insn (insn);
2558 occr->deleted_p = 1;
2559 changed = 1;
2560 gcse_subst_count++;
2561
2562 if (dump_file)
2563 {
2564 fprintf (dump_file,
2565 "PRE: redundant insn %d (expression %d) in ",
2566 INSN_UID (insn), indx);
2567 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2568 bb->index, REGNO (expr->reaching_reg));
2569 }
2570 }
2571 }
2572 }
2573
2574 return changed;
2575 }
2576
2577 /* Perform GCSE optimizations using PRE.
2578 This is called by one_pre_gcse_pass after all the dataflow analysis
2579 has been done.
2580
2581 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2582 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2583 Compiler Design and Implementation.
2584
2585 ??? A new pseudo reg is created to hold the reaching expression. The nice
2586 thing about the classical approach is that it would try to use an existing
2587 reg. If the register can't be adequately optimized [i.e. we introduce
2588 reload problems], one could add a pass here to propagate the new register
2589 through the block.
2590
2591 ??? We don't handle single sets in PARALLELs because we're [currently] not
2592 able to copy the rest of the parallel when we insert copies to create full
2593 redundancies from partial redundancies. However, there's no reason why we
2594 can't handle PARALLELs in the cases where there are no partial
2595 redundancies. */
2596
2597 static int
2598 pre_gcse (struct edge_list *edge_list)
2599 {
2600 unsigned int i;
2601 int did_insert, changed;
2602 struct expr **index_map;
2603 struct expr *expr;
2604
2605 /* Compute a mapping from expression number (`bitmap_index') to
2606 hash table entry. */
2607
2608 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2609 for (i = 0; i < expr_hash_table.size; i++)
2610 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2611 index_map[expr->bitmap_index] = expr;
2612
2613 /* Delete the redundant insns first so that
2614 - we know what register to use for the new insns and for the other
2615 ones with reaching expressions
2616 - we know which insns are redundant when we go to create copies */
2617
2618 changed = pre_delete ();
2619 did_insert = pre_edge_insert (edge_list, index_map);
2620
2621 /* In other places with reaching expressions, copy the expression to the
2622 specially allocated pseudo-reg that reaches the redundant expr. */
2623 pre_insert_copies ();
2624 if (did_insert)
2625 {
2626 commit_edge_insertions ();
2627 changed = 1;
2628 }
2629
2630 free (index_map);
2631 return changed;
2632 }
2633
2634 /* Top level routine to perform one PRE GCSE pass.
2635
2636 Return nonzero if a change was made. */
2637
2638 static int
2639 one_pre_gcse_pass (void)
2640 {
2641 int changed = 0;
2642
2643 gcse_subst_count = 0;
2644 gcse_create_count = 0;
2645
2646 /* Return if there's nothing to do, or it is too expensive. */
2647 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
2648 || is_too_expensive (_("PRE disabled")))
2649 return 0;
2650
2651 /* We need alias. */
2652 init_alias_analysis ();
2653
2654 bytes_used = 0;
2655 gcc_obstack_init (&gcse_obstack);
2656 alloc_gcse_mem ();
2657
2658 alloc_hash_table (&expr_hash_table);
2659 add_noreturn_fake_exit_edges ();
2660 if (flag_gcse_lm)
2661 compute_ld_motion_mems ();
2662
2663 compute_hash_table (&expr_hash_table);
2664 if (flag_gcse_lm)
2665 trim_ld_motion_mems ();
2666 if (dump_file)
2667 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2668
2669 if (expr_hash_table.n_elems > 0)
2670 {
2671 struct edge_list *edge_list;
2672 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
2673 edge_list = compute_pre_data ();
2674 changed |= pre_gcse (edge_list);
2675 free_edge_list (edge_list);
2676 free_pre_mem ();
2677 }
2678
2679 if (flag_gcse_lm)
2680 free_ld_motion_mems ();
2681 remove_fake_exit_edges ();
2682 free_hash_table (&expr_hash_table);
2683
2684 free_gcse_mem ();
2685 obstack_free (&gcse_obstack, NULL);
2686
2687 /* We are finished with alias. */
2688 end_alias_analysis ();
2689
2690 if (dump_file)
2691 {
2692 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2693 current_function_name (), n_basic_blocks, bytes_used);
2694 fprintf (dump_file, "%d substs, %d insns created\n",
2695 gcse_subst_count, gcse_create_count);
2696 }
2697
2698 return changed;
2699 }
2700 \f
2701 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2702 to INSN. If such notes are added to an insn which references a
2703 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2704 that note, because the following loop optimization pass requires
2705 them. */
2706
2707 /* ??? If there was a jump optimization pass after gcse and before loop,
2708 then we would not need to do this here, because jump would add the
2709 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2710
2711 static void
2712 add_label_notes (rtx x, rtx insn)
2713 {
2714 enum rtx_code code = GET_CODE (x);
2715 int i, j;
2716 const char *fmt;
2717
2718 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2719 {
2720 /* This code used to ignore labels that referred to dispatch tables to
2721 avoid flow generating (slightly) worse code.
2722
2723 We no longer ignore such label references (see LABEL_REF handling in
2724 mark_jump_label for additional information). */
2725
2726 /* There's no reason for current users to emit jump-insns with
2727 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2728 notes. */
2729 gcc_assert (!JUMP_P (insn));
2730 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
2731
2732 if (LABEL_P (XEXP (x, 0)))
2733 LABEL_NUSES (XEXP (x, 0))++;
2734
2735 return;
2736 }
2737
2738 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2739 {
2740 if (fmt[i] == 'e')
2741 add_label_notes (XEXP (x, i), insn);
2742 else if (fmt[i] == 'E')
2743 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2744 add_label_notes (XVECEXP (x, i, j), insn);
2745 }
2746 }
2747
2748 /* Code Hoisting variables and subroutines. */
2749
2750 /* Very busy expressions. */
2751 static sbitmap *hoist_vbein;
2752 static sbitmap *hoist_vbeout;
2753
2754 /* ??? We could compute post dominators and run this algorithm in
2755 reverse to perform tail merging, doing so would probably be
2756 more effective than the tail merging code in jump.c.
2757
2758 It's unclear if tail merging could be run in parallel with
2759 code hoisting. It would be nice. */
2760
2761 /* Allocate vars used for code hoisting analysis. */
2762
2763 static void
2764 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2765 {
2766 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2767 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2768 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2769
2770 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2771 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2772 }
2773
2774 /* Free vars used for code hoisting analysis. */
2775
2776 static void
2777 free_code_hoist_mem (void)
2778 {
2779 sbitmap_vector_free (antloc);
2780 sbitmap_vector_free (transp);
2781 sbitmap_vector_free (comp);
2782
2783 sbitmap_vector_free (hoist_vbein);
2784 sbitmap_vector_free (hoist_vbeout);
2785
2786 free_dominance_info (CDI_DOMINATORS);
2787 }
2788
2789 /* Compute the very busy expressions at entry/exit from each block.
2790
2791 An expression is very busy if all paths from a given point
2792 compute the expression. */
2793
2794 static void
2795 compute_code_hoist_vbeinout (void)
2796 {
2797 int changed, passes;
2798 basic_block bb;
2799
2800 bitmap_vector_clear (hoist_vbeout, last_basic_block);
2801 bitmap_vector_clear (hoist_vbein, last_basic_block);
2802
2803 passes = 0;
2804 changed = 1;
2805
2806 while (changed)
2807 {
2808 changed = 0;
2809
2810 /* We scan the blocks in the reverse order to speed up
2811 the convergence. */
2812 FOR_EACH_BB_REVERSE (bb)
2813 {
2814 if (bb->next_bb != EXIT_BLOCK_PTR)
2815 {
2816 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2817 hoist_vbein, bb);
2818
2819 /* Include expressions in VBEout that are calculated
2820 in BB and available at its end. */
2821 bitmap_ior (hoist_vbeout[bb->index],
2822 hoist_vbeout[bb->index], comp[bb->index]);
2823 }
2824
2825 changed |= bitmap_or_and (hoist_vbein[bb->index],
2826 antloc[bb->index],
2827 hoist_vbeout[bb->index],
2828 transp[bb->index]);
2829 }
2830
2831 passes++;
2832 }
2833
2834 if (dump_file)
2835 {
2836 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2837
2838 FOR_EACH_BB (bb)
2839 {
2840 fprintf (dump_file, "vbein (%d): ", bb->index);
2841 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2842 fprintf (dump_file, "vbeout(%d): ", bb->index);
2843 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2844 }
2845 }
2846 }
2847
2848 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2849
2850 static void
2851 compute_code_hoist_data (void)
2852 {
2853 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2854 prune_expressions (false);
2855 compute_code_hoist_vbeinout ();
2856 calculate_dominance_info (CDI_DOMINATORS);
2857 if (dump_file)
2858 fprintf (dump_file, "\n");
2859 }
2860
2861 /* Update register pressure for BB when hoisting an expression from
2862 instruction FROM, if live ranges of inputs are shrunk. Also
2863 maintain live_in information if live range of register referred
2864 in FROM is shrunk.
2865
2866 Return 0 if register pressure doesn't change, otherwise return
2867 the number by which register pressure is decreased.
2868
2869 NOTE: Register pressure won't be increased in this function. */
2870
2871 static int
2872 update_bb_reg_pressure (basic_block bb, rtx from)
2873 {
2874 rtx dreg, insn;
2875 basic_block succ_bb;
2876 df_ref *op, op_ref;
2877 edge succ;
2878 edge_iterator ei;
2879 int decreased_pressure = 0;
2880 int nregs;
2881 enum reg_class pressure_class;
2882
2883 for (op = DF_INSN_USES (from); *op; op++)
2884 {
2885 dreg = DF_REF_REAL_REG (*op);
2886 /* The live range of register is shrunk only if it isn't:
2887 1. referred on any path from the end of this block to EXIT, or
2888 2. referred by insns other than FROM in this block. */
2889 FOR_EACH_EDGE (succ, ei, bb->succs)
2890 {
2891 succ_bb = succ->dest;
2892 if (succ_bb == EXIT_BLOCK_PTR)
2893 continue;
2894
2895 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2896 break;
2897 }
2898 if (succ != NULL)
2899 continue;
2900
2901 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2902 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2903 {
2904 if (!DF_REF_INSN_INFO (op_ref))
2905 continue;
2906
2907 insn = DF_REF_INSN (op_ref);
2908 if (BLOCK_FOR_INSN (insn) == bb
2909 && NONDEBUG_INSN_P (insn) && insn != from)
2910 break;
2911 }
2912
2913 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2914 /* Decrease register pressure and update live_in information for
2915 this block. */
2916 if (!op_ref && pressure_class != NO_REGS)
2917 {
2918 decreased_pressure += nregs;
2919 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2920 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2921 }
2922 }
2923 return decreased_pressure;
2924 }
2925
2926 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2927 flow graph, if it can reach BB unimpared. Stop the search if the
2928 expression would need to be moved more than DISTANCE instructions.
2929
2930 DISTANCE is the number of instructions through which EXPR can be
2931 hoisted up in flow graph.
2932
2933 BB_SIZE points to an array which contains the number of instructions
2934 for each basic block.
2935
2936 PRESSURE_CLASS and NREGS are register class and number of hard registers
2937 for storing EXPR.
2938
2939 HOISTED_BBS points to a bitmap indicating basic blocks through which
2940 EXPR is hoisted.
2941
2942 FROM is the instruction from which EXPR is hoisted.
2943
2944 It's unclear exactly what Muchnick meant by "unimpared". It seems
2945 to me that the expression must either be computed or transparent in
2946 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2947 would allow the expression to be hoisted out of loops, even if
2948 the expression wasn't a loop invariant.
2949
2950 Contrast this to reachability for PRE where an expression is
2951 considered reachable if *any* path reaches instead of *all*
2952 paths. */
2953
2954 static int
2955 should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
2956 basic_block bb, sbitmap visited, int distance,
2957 int *bb_size, enum reg_class pressure_class,
2958 int *nregs, bitmap hoisted_bbs, rtx from)
2959 {
2960 unsigned int i;
2961 edge pred;
2962 edge_iterator ei;
2963 sbitmap_iterator sbi;
2964 int visited_allocated_locally = 0;
2965 int decreased_pressure = 0;
2966
2967 if (flag_ira_hoist_pressure)
2968 {
2969 /* Record old information of basic block BB when it is visited
2970 at the first time. */
2971 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2972 {
2973 struct bb_data *data = BB_DATA (bb);
2974 bitmap_copy (data->backup, data->live_in);
2975 data->old_pressure = data->max_reg_pressure[pressure_class];
2976 }
2977 decreased_pressure = update_bb_reg_pressure (bb, from);
2978 }
2979 /* Terminate the search if distance, for which EXPR is allowed to move,
2980 is exhausted. */
2981 if (distance > 0)
2982 {
2983 if (flag_ira_hoist_pressure)
2984 {
2985 /* Prefer to hoist EXPR if register pressure is decreased. */
2986 if (decreased_pressure > *nregs)
2987 distance += bb_size[bb->index];
2988 /* Let EXPR be hoisted through basic block at no cost if one
2989 of following conditions is satisfied:
2990
2991 1. The basic block has low register pressure.
2992 2. Register pressure won't be increases after hoisting EXPR.
2993
2994 Constant expressions is handled conservatively, because
2995 hoisting constant expression aggressively results in worse
2996 code. This decision is made by the observation of CSiBE
2997 on ARM target, while it has no obvious effect on other
2998 targets like x86, x86_64, mips and powerpc. */
2999 else if (CONST_INT_P (expr->expr)
3000 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
3001 >= ira_class_hard_regs_num[pressure_class]
3002 && decreased_pressure < *nregs))
3003 distance -= bb_size[bb->index];
3004 }
3005 else
3006 distance -= bb_size[bb->index];
3007
3008 if (distance <= 0)
3009 return 0;
3010 }
3011 else
3012 gcc_assert (distance == 0);
3013
3014 if (visited == NULL)
3015 {
3016 visited_allocated_locally = 1;
3017 visited = sbitmap_alloc (last_basic_block);
3018 bitmap_clear (visited);
3019 }
3020
3021 FOR_EACH_EDGE (pred, ei, bb->preds)
3022 {
3023 basic_block pred_bb = pred->src;
3024
3025 if (pred->src == ENTRY_BLOCK_PTR)
3026 break;
3027 else if (pred_bb == expr_bb)
3028 continue;
3029 else if (bitmap_bit_p (visited, pred_bb->index))
3030 continue;
3031 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
3032 break;
3033 /* Not killed. */
3034 else
3035 {
3036 bitmap_set_bit (visited, pred_bb->index);
3037 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
3038 visited, distance, bb_size,
3039 pressure_class, nregs,
3040 hoisted_bbs, from))
3041 break;
3042 }
3043 }
3044 if (visited_allocated_locally)
3045 {
3046 /* If EXPR can be hoisted to expr_bb, record basic blocks through
3047 which EXPR is hoisted in hoisted_bbs. */
3048 if (flag_ira_hoist_pressure && !pred)
3049 {
3050 /* Record the basic block from which EXPR is hoisted. */
3051 bitmap_set_bit (visited, bb->index);
3052 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3053 bitmap_set_bit (hoisted_bbs, i);
3054 }
3055 sbitmap_free (visited);
3056 }
3057
3058 return (pred == NULL);
3059 }
3060 \f
3061 /* Find occurrence in BB. */
3062
3063 static struct occr *
3064 find_occr_in_bb (struct occr *occr, basic_block bb)
3065 {
3066 /* Find the right occurrence of this expression. */
3067 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3068 occr = occr->next;
3069
3070 return occr;
3071 }
3072
3073 /* Actually perform code hoisting.
3074
3075 The code hoisting pass can hoist multiple computations of the same
3076 expression along dominated path to a dominating basic block, like
3077 from b2/b3 to b1 as depicted below:
3078
3079 b1 ------
3080 /\ |
3081 / \ |
3082 bx by distance
3083 / \ |
3084 / \ |
3085 b2 b3 ------
3086
3087 Unfortunately code hoisting generally extends the live range of an
3088 output pseudo register, which increases register pressure and hurts
3089 register allocation. To address this issue, an attribute MAX_DISTANCE
3090 is computed and attached to each expression. The attribute is computed
3091 from rtx cost of the corresponding expression and it's used to control
3092 how long the expression can be hoisted up in flow graph. As the
3093 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3094 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3095 register pressure if live ranges of inputs are shrunk.
3096
3097 Option "-fira-hoist-pressure" implements register pressure directed
3098 hoist based on upper method. The rationale is:
3099 1. Calculate register pressure for each basic block by reusing IRA
3100 facility.
3101 2. When expression is hoisted through one basic block, GCC checks
3102 the change of live ranges for inputs/output. The basic block's
3103 register pressure will be increased because of extended live
3104 range of output. However, register pressure will be decreased
3105 if the live ranges of inputs are shrunk.
3106 3. After knowing how hoisting affects register pressure, GCC prefers
3107 to hoist the expression if it can decrease register pressure, by
3108 increasing DISTANCE of the corresponding expression.
3109 4. If hoisting the expression increases register pressure, GCC checks
3110 register pressure of the basic block and decrease DISTANCE only if
3111 the register pressure is high. In other words, expression will be
3112 hoisted through at no cost if the basic block has low register
3113 pressure.
3114 5. Update register pressure information for basic blocks through
3115 which expression is hoisted. */
3116
3117 static int
3118 hoist_code (void)
3119 {
3120 basic_block bb, dominated;
3121 vec<basic_block> dom_tree_walk;
3122 unsigned int dom_tree_walk_index;
3123 vec<basic_block> domby;
3124 unsigned int i, j, k;
3125 struct expr **index_map;
3126 struct expr *expr;
3127 int *to_bb_head;
3128 int *bb_size;
3129 int changed = 0;
3130 struct bb_data *data;
3131 /* Basic blocks that have occurrences reachable from BB. */
3132 bitmap from_bbs;
3133 /* Basic blocks through which expr is hoisted. */
3134 bitmap hoisted_bbs = NULL;
3135 bitmap_iterator bi;
3136
3137 /* Compute a mapping from expression number (`bitmap_index') to
3138 hash table entry. */
3139
3140 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3141 for (i = 0; i < expr_hash_table.size; i++)
3142 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3143 index_map[expr->bitmap_index] = expr;
3144
3145 /* Calculate sizes of basic blocks and note how far
3146 each instruction is from the start of its block. We then use this
3147 data to restrict distance an expression can travel. */
3148
3149 to_bb_head = XCNEWVEC (int, get_max_uid ());
3150 bb_size = XCNEWVEC (int, last_basic_block);
3151
3152 FOR_EACH_BB (bb)
3153 {
3154 rtx insn;
3155 int to_head;
3156
3157 to_head = 0;
3158 FOR_BB_INSNS (bb, insn)
3159 {
3160 /* Don't count debug instructions to avoid them affecting
3161 decision choices. */
3162 if (NONDEBUG_INSN_P (insn))
3163 to_bb_head[INSN_UID (insn)] = to_head++;
3164 }
3165
3166 bb_size[bb->index] = to_head;
3167 }
3168
3169 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
3170 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
3171 == ENTRY_BLOCK_PTR->next_bb));
3172
3173 from_bbs = BITMAP_ALLOC (NULL);
3174 if (flag_ira_hoist_pressure)
3175 hoisted_bbs = BITMAP_ALLOC (NULL);
3176
3177 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3178 ENTRY_BLOCK_PTR->next_bb);
3179
3180 /* Walk over each basic block looking for potentially hoistable
3181 expressions, nothing gets hoisted from the entry block. */
3182 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3183 {
3184 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3185
3186 if (domby.length () == 0)
3187 continue;
3188
3189 /* Examine each expression that is very busy at the exit of this
3190 block. These are the potentially hoistable expressions. */
3191 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3192 {
3193 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3194 {
3195 int nregs = 0;
3196 enum reg_class pressure_class = NO_REGS;
3197 /* Current expression. */
3198 struct expr *expr = index_map[i];
3199 /* Number of occurrences of EXPR that can be hoisted to BB. */
3200 int hoistable = 0;
3201 /* Occurrences reachable from BB. */
3202 vec<occr_t> occrs_to_hoist = vNULL;
3203 /* We want to insert the expression into BB only once, so
3204 note when we've inserted it. */
3205 int insn_inserted_p;
3206 occr_t occr;
3207
3208 /* If an expression is computed in BB and is available at end of
3209 BB, hoist all occurrences dominated by BB to BB. */
3210 if (bitmap_bit_p (comp[bb->index], i))
3211 {
3212 occr = find_occr_in_bb (expr->antic_occr, bb);
3213
3214 if (occr)
3215 {
3216 /* An occurrence might've been already deleted
3217 while processing a dominator of BB. */
3218 if (!occr->deleted_p)
3219 {
3220 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3221 hoistable++;
3222 }
3223 }
3224 else
3225 hoistable++;
3226 }
3227
3228 /* We've found a potentially hoistable expression, now
3229 we look at every block BB dominates to see if it
3230 computes the expression. */
3231 FOR_EACH_VEC_ELT (domby, j, dominated)
3232 {
3233 int max_distance;
3234
3235 /* Ignore self dominance. */
3236 if (bb == dominated)
3237 continue;
3238 /* We've found a dominated block, now see if it computes
3239 the busy expression and whether or not moving that
3240 expression to the "beginning" of that block is safe. */
3241 if (!bitmap_bit_p (antloc[dominated->index], i))
3242 continue;
3243
3244 occr = find_occr_in_bb (expr->antic_occr, dominated);
3245 gcc_assert (occr);
3246
3247 /* An occurrence might've been already deleted
3248 while processing a dominator of BB. */
3249 if (occr->deleted_p)
3250 continue;
3251 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3252
3253 max_distance = expr->max_distance;
3254 if (max_distance > 0)
3255 /* Adjust MAX_DISTANCE to account for the fact that
3256 OCCR won't have to travel all of DOMINATED, but
3257 only part of it. */
3258 max_distance += (bb_size[dominated->index]
3259 - to_bb_head[INSN_UID (occr->insn)]);
3260
3261 pressure_class = get_pressure_class_and_nregs (occr->insn,
3262 &nregs);
3263
3264 /* Note if the expression should be hoisted from the dominated
3265 block to BB if it can reach DOMINATED unimpared.
3266
3267 Keep track of how many times this expression is hoistable
3268 from a dominated block into BB. */
3269 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3270 max_distance, bb_size,
3271 pressure_class, &nregs,
3272 hoisted_bbs, occr->insn))
3273 {
3274 hoistable++;
3275 occrs_to_hoist.safe_push (occr);
3276 bitmap_set_bit (from_bbs, dominated->index);
3277 }
3278 }
3279
3280 /* If we found more than one hoistable occurrence of this
3281 expression, then note it in the vector of expressions to
3282 hoist. It makes no sense to hoist things which are computed
3283 in only one BB, and doing so tends to pessimize register
3284 allocation. One could increase this value to try harder
3285 to avoid any possible code expansion due to register
3286 allocation issues; however experiments have shown that
3287 the vast majority of hoistable expressions are only movable
3288 from two successors, so raising this threshold is likely
3289 to nullify any benefit we get from code hoisting. */
3290 if (hoistable > 1 && dbg_cnt (hoist_insn))
3291 {
3292 /* If (hoistable != vec::length), then there is
3293 an occurrence of EXPR in BB itself. Don't waste
3294 time looking for LCA in this case. */
3295 if ((unsigned) hoistable == occrs_to_hoist.length ())
3296 {
3297 basic_block lca;
3298
3299 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3300 from_bbs);
3301 if (lca != bb)
3302 /* Punt, it's better to hoist these occurrences to
3303 LCA. */
3304 occrs_to_hoist.release ();
3305 }
3306 }
3307 else
3308 /* Punt, no point hoisting a single occurence. */
3309 occrs_to_hoist.release ();
3310
3311 if (flag_ira_hoist_pressure
3312 && !occrs_to_hoist.is_empty ())
3313 {
3314 /* Increase register pressure of basic blocks to which
3315 expr is hoisted because of extended live range of
3316 output. */
3317 data = BB_DATA (bb);
3318 data->max_reg_pressure[pressure_class] += nregs;
3319 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3320 {
3321 data = BB_DATA (BASIC_BLOCK (k));
3322 data->max_reg_pressure[pressure_class] += nregs;
3323 }
3324 }
3325 else if (flag_ira_hoist_pressure)
3326 {
3327 /* Restore register pressure and live_in info for basic
3328 blocks recorded in hoisted_bbs when expr will not be
3329 hoisted. */
3330 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3331 {
3332 data = BB_DATA (BASIC_BLOCK (k));
3333 bitmap_copy (data->live_in, data->backup);
3334 data->max_reg_pressure[pressure_class]
3335 = data->old_pressure;
3336 }
3337 }
3338
3339 if (flag_ira_hoist_pressure)
3340 bitmap_clear (hoisted_bbs);
3341
3342 insn_inserted_p = 0;
3343
3344 /* Walk through occurrences of I'th expressions we want
3345 to hoist to BB and make the transformations. */
3346 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3347 {
3348 rtx insn;
3349 rtx set;
3350
3351 gcc_assert (!occr->deleted_p);
3352
3353 insn = occr->insn;
3354 set = single_set (insn);
3355 gcc_assert (set);
3356
3357 /* Create a pseudo-reg to store the result of reaching
3358 expressions into. Get the mode for the new pseudo
3359 from the mode of the original destination pseudo.
3360
3361 It is important to use new pseudos whenever we
3362 emit a set. This will allow reload to use
3363 rematerialization for such registers. */
3364 if (!insn_inserted_p)
3365 expr->reaching_reg
3366 = gen_reg_rtx_and_attrs (SET_DEST (set));
3367
3368 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3369 insn);
3370 delete_insn (insn);
3371 occr->deleted_p = 1;
3372 changed = 1;
3373 gcse_subst_count++;
3374
3375 if (!insn_inserted_p)
3376 {
3377 insert_insn_end_basic_block (expr, bb);
3378 insn_inserted_p = 1;
3379 }
3380 }
3381
3382 occrs_to_hoist.release ();
3383 bitmap_clear (from_bbs);
3384 }
3385 }
3386 domby.release ();
3387 }
3388
3389 dom_tree_walk.release ();
3390 BITMAP_FREE (from_bbs);
3391 if (flag_ira_hoist_pressure)
3392 BITMAP_FREE (hoisted_bbs);
3393
3394 free (bb_size);
3395 free (to_bb_head);
3396 free (index_map);
3397
3398 return changed;
3399 }
3400
3401 /* Return pressure class and number of needed hard registers (through
3402 *NREGS) of register REGNO. */
3403 static enum reg_class
3404 get_regno_pressure_class (int regno, int *nregs)
3405 {
3406 if (regno >= FIRST_PSEUDO_REGISTER)
3407 {
3408 enum reg_class pressure_class;
3409
3410 pressure_class = reg_allocno_class (regno);
3411 pressure_class = ira_pressure_class_translate[pressure_class];
3412 *nregs
3413 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3414 return pressure_class;
3415 }
3416 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3417 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3418 {
3419 *nregs = 1;
3420 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3421 }
3422 else
3423 {
3424 *nregs = 0;
3425 return NO_REGS;
3426 }
3427 }
3428
3429 /* Return pressure class and number of hard registers (through *NREGS)
3430 for destination of INSN. */
3431 static enum reg_class
3432 get_pressure_class_and_nregs (rtx insn, int *nregs)
3433 {
3434 rtx reg;
3435 enum reg_class pressure_class;
3436 rtx set = single_set (insn);
3437
3438 /* Considered invariant insns have only one set. */
3439 gcc_assert (set != NULL_RTX);
3440 reg = SET_DEST (set);
3441 if (GET_CODE (reg) == SUBREG)
3442 reg = SUBREG_REG (reg);
3443 if (MEM_P (reg))
3444 {
3445 *nregs = 0;
3446 pressure_class = NO_REGS;
3447 }
3448 else
3449 {
3450 gcc_assert (REG_P (reg));
3451 pressure_class = reg_allocno_class (REGNO (reg));
3452 pressure_class = ira_pressure_class_translate[pressure_class];
3453 *nregs
3454 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3455 }
3456 return pressure_class;
3457 }
3458
3459 /* Increase (if INCR_P) or decrease current register pressure for
3460 register REGNO. */
3461 static void
3462 change_pressure (int regno, bool incr_p)
3463 {
3464 int nregs;
3465 enum reg_class pressure_class;
3466
3467 pressure_class = get_regno_pressure_class (regno, &nregs);
3468 if (! incr_p)
3469 curr_reg_pressure[pressure_class] -= nregs;
3470 else
3471 {
3472 curr_reg_pressure[pressure_class] += nregs;
3473 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3474 < curr_reg_pressure[pressure_class])
3475 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3476 = curr_reg_pressure[pressure_class];
3477 }
3478 }
3479
3480 /* Calculate register pressure for each basic block by walking insns
3481 from last to first. */
3482 static void
3483 calculate_bb_reg_pressure (void)
3484 {
3485 int i;
3486 unsigned int j;
3487 rtx insn;
3488 basic_block bb;
3489 bitmap curr_regs_live;
3490 bitmap_iterator bi;
3491
3492
3493 ira_setup_eliminable_regset (false);
3494 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3495 FOR_EACH_BB (bb)
3496 {
3497 curr_bb = bb;
3498 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3499 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3500 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3501 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3502 for (i = 0; i < ira_pressure_classes_num; i++)
3503 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3504 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3505 change_pressure (j, true);
3506
3507 FOR_BB_INSNS_REVERSE (bb, insn)
3508 {
3509 rtx dreg;
3510 int regno;
3511 df_ref *def_rec, *use_rec;
3512
3513 if (! NONDEBUG_INSN_P (insn))
3514 continue;
3515
3516 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
3517 {
3518 dreg = DF_REF_REAL_REG (*def_rec);
3519 gcc_assert (REG_P (dreg));
3520 regno = REGNO (dreg);
3521 if (!(DF_REF_FLAGS (*def_rec)
3522 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3523 {
3524 if (bitmap_clear_bit (curr_regs_live, regno))
3525 change_pressure (regno, false);
3526 }
3527 }
3528
3529 for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
3530 {
3531 dreg = DF_REF_REAL_REG (*use_rec);
3532 gcc_assert (REG_P (dreg));
3533 regno = REGNO (dreg);
3534 if (bitmap_set_bit (curr_regs_live, regno))
3535 change_pressure (regno, true);
3536 }
3537 }
3538 }
3539 BITMAP_FREE (curr_regs_live);
3540
3541 if (dump_file == NULL)
3542 return;
3543
3544 fprintf (dump_file, "\nRegister Pressure: \n");
3545 FOR_EACH_BB (bb)
3546 {
3547 fprintf (dump_file, " Basic block %d: \n", bb->index);
3548 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3549 {
3550 enum reg_class pressure_class;
3551
3552 pressure_class = ira_pressure_classes[i];
3553 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3554 continue;
3555
3556 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3557 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3558 }
3559 }
3560 fprintf (dump_file, "\n");
3561 }
3562
3563 /* Top level routine to perform one code hoisting (aka unification) pass
3564
3565 Return nonzero if a change was made. */
3566
3567 static int
3568 one_code_hoisting_pass (void)
3569 {
3570 int changed = 0;
3571
3572 gcse_subst_count = 0;
3573 gcse_create_count = 0;
3574
3575 /* Return if there's nothing to do, or it is too expensive. */
3576 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3577 || is_too_expensive (_("GCSE disabled")))
3578 return 0;
3579
3580 doing_code_hoisting_p = true;
3581
3582 /* Calculate register pressure for each basic block. */
3583 if (flag_ira_hoist_pressure)
3584 {
3585 regstat_init_n_sets_and_refs ();
3586 ira_set_pseudo_classes (false, dump_file);
3587 alloc_aux_for_blocks (sizeof (struct bb_data));
3588 calculate_bb_reg_pressure ();
3589 regstat_free_n_sets_and_refs ();
3590 }
3591
3592 /* We need alias. */
3593 init_alias_analysis ();
3594
3595 bytes_used = 0;
3596 gcc_obstack_init (&gcse_obstack);
3597 alloc_gcse_mem ();
3598
3599 alloc_hash_table (&expr_hash_table);
3600 compute_hash_table (&expr_hash_table);
3601 if (dump_file)
3602 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3603
3604 if (expr_hash_table.n_elems > 0)
3605 {
3606 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
3607 compute_code_hoist_data ();
3608 changed = hoist_code ();
3609 free_code_hoist_mem ();
3610 }
3611
3612 if (flag_ira_hoist_pressure)
3613 {
3614 free_aux_for_blocks ();
3615 free_reg_info ();
3616 }
3617 free_hash_table (&expr_hash_table);
3618 free_gcse_mem ();
3619 obstack_free (&gcse_obstack, NULL);
3620
3621 /* We are finished with alias. */
3622 end_alias_analysis ();
3623
3624 if (dump_file)
3625 {
3626 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3627 current_function_name (), n_basic_blocks, bytes_used);
3628 fprintf (dump_file, "%d substs, %d insns created\n",
3629 gcse_subst_count, gcse_create_count);
3630 }
3631
3632 doing_code_hoisting_p = false;
3633
3634 return changed;
3635 }
3636 \f
3637 /* Here we provide the things required to do store motion towards the exit.
3638 In order for this to be effective, gcse also needed to be taught how to
3639 move a load when it is killed only by a store to itself.
3640
3641 int i;
3642 float a[10];
3643
3644 void foo(float scale)
3645 {
3646 for (i=0; i<10; i++)
3647 a[i] *= scale;
3648 }
3649
3650 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3651 the load out since its live around the loop, and stored at the bottom
3652 of the loop.
3653
3654 The 'Load Motion' referred to and implemented in this file is
3655 an enhancement to gcse which when using edge based LCM, recognizes
3656 this situation and allows gcse to move the load out of the loop.
3657
3658 Once gcse has hoisted the load, store motion can then push this
3659 load towards the exit, and we end up with no loads or stores of 'i'
3660 in the loop. */
3661
3662 static hashval_t
3663 pre_ldst_expr_hash (const void *p)
3664 {
3665 int do_not_record_p = 0;
3666 const struct ls_expr *const x = (const struct ls_expr *) p;
3667 return
3668 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
3669 }
3670
3671 static int
3672 pre_ldst_expr_eq (const void *p1, const void *p2)
3673 {
3674 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
3675 *const ptr2 = (const struct ls_expr *) p2;
3676 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
3677 }
3678
3679 /* This will search the ldst list for a matching expression. If it
3680 doesn't find one, we create one and initialize it. */
3681
3682 static struct ls_expr *
3683 ldst_entry (rtx x)
3684 {
3685 int do_not_record_p = 0;
3686 struct ls_expr * ptr;
3687 unsigned int hash;
3688 void **slot;
3689 struct ls_expr e;
3690
3691 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3692 NULL, /*have_reg_qty=*/false);
3693
3694 e.pattern = x;
3695 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
3696 if (*slot)
3697 return (struct ls_expr *)*slot;
3698
3699 ptr = XNEW (struct ls_expr);
3700
3701 ptr->next = pre_ldst_mems;
3702 ptr->expr = NULL;
3703 ptr->pattern = x;
3704 ptr->pattern_regs = NULL_RTX;
3705 ptr->loads = NULL_RTX;
3706 ptr->stores = NULL_RTX;
3707 ptr->reaching_reg = NULL_RTX;
3708 ptr->invalid = 0;
3709 ptr->index = 0;
3710 ptr->hash_index = hash;
3711 pre_ldst_mems = ptr;
3712 *slot = ptr;
3713
3714 return ptr;
3715 }
3716
3717 /* Free up an individual ldst entry. */
3718
3719 static void
3720 free_ldst_entry (struct ls_expr * ptr)
3721 {
3722 free_INSN_LIST_list (& ptr->loads);
3723 free_INSN_LIST_list (& ptr->stores);
3724
3725 free (ptr);
3726 }
3727
3728 /* Free up all memory associated with the ldst list. */
3729
3730 static void
3731 free_ld_motion_mems (void)
3732 {
3733 if (pre_ldst_table)
3734 htab_delete (pre_ldst_table);
3735 pre_ldst_table = NULL;
3736
3737 while (pre_ldst_mems)
3738 {
3739 struct ls_expr * tmp = pre_ldst_mems;
3740
3741 pre_ldst_mems = pre_ldst_mems->next;
3742
3743 free_ldst_entry (tmp);
3744 }
3745
3746 pre_ldst_mems = NULL;
3747 }
3748
3749 /* Dump debugging info about the ldst list. */
3750
3751 static void
3752 print_ldst_list (FILE * file)
3753 {
3754 struct ls_expr * ptr;
3755
3756 fprintf (file, "LDST list: \n");
3757
3758 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3759 {
3760 fprintf (file, " Pattern (%3d): ", ptr->index);
3761
3762 print_rtl (file, ptr->pattern);
3763
3764 fprintf (file, "\n Loads : ");
3765
3766 if (ptr->loads)
3767 print_rtl (file, ptr->loads);
3768 else
3769 fprintf (file, "(nil)");
3770
3771 fprintf (file, "\n Stores : ");
3772
3773 if (ptr->stores)
3774 print_rtl (file, ptr->stores);
3775 else
3776 fprintf (file, "(nil)");
3777
3778 fprintf (file, "\n\n");
3779 }
3780
3781 fprintf (file, "\n");
3782 }
3783
3784 /* Returns 1 if X is in the list of ldst only expressions. */
3785
3786 static struct ls_expr *
3787 find_rtx_in_ldst (rtx x)
3788 {
3789 struct ls_expr e;
3790 void **slot;
3791 if (!pre_ldst_table)
3792 return NULL;
3793 e.pattern = x;
3794 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
3795 if (!slot || ((struct ls_expr *)*slot)->invalid)
3796 return NULL;
3797 return (struct ls_expr *) *slot;
3798 }
3799 \f
3800 /* Load Motion for loads which only kill themselves. */
3801
3802 /* Return true if x, a MEM, is a simple access with no side effects.
3803 These are the types of loads we consider for the ld_motion list,
3804 otherwise we let the usual aliasing take care of it. */
3805
3806 static int
3807 simple_mem (const_rtx x)
3808 {
3809 if (MEM_VOLATILE_P (x))
3810 return 0;
3811
3812 if (GET_MODE (x) == BLKmode)
3813 return 0;
3814
3815 /* If we are handling exceptions, we must be careful with memory references
3816 that may trap. If we are not, the behavior is undefined, so we may just
3817 continue. */
3818 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3819 return 0;
3820
3821 if (side_effects_p (x))
3822 return 0;
3823
3824 /* Do not consider function arguments passed on stack. */
3825 if (reg_mentioned_p (stack_pointer_rtx, x))
3826 return 0;
3827
3828 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3829 return 0;
3830
3831 return 1;
3832 }
3833
3834 /* Make sure there isn't a buried reference in this pattern anywhere.
3835 If there is, invalidate the entry for it since we're not capable
3836 of fixing it up just yet.. We have to be sure we know about ALL
3837 loads since the aliasing code will allow all entries in the
3838 ld_motion list to not-alias itself. If we miss a load, we will get
3839 the wrong value since gcse might common it and we won't know to
3840 fix it up. */
3841
3842 static void
3843 invalidate_any_buried_refs (rtx x)
3844 {
3845 const char * fmt;
3846 int i, j;
3847 struct ls_expr * ptr;
3848
3849 /* Invalidate it in the list. */
3850 if (MEM_P (x) && simple_mem (x))
3851 {
3852 ptr = ldst_entry (x);
3853 ptr->invalid = 1;
3854 }
3855
3856 /* Recursively process the insn. */
3857 fmt = GET_RTX_FORMAT (GET_CODE (x));
3858
3859 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3860 {
3861 if (fmt[i] == 'e')
3862 invalidate_any_buried_refs (XEXP (x, i));
3863 else if (fmt[i] == 'E')
3864 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3865 invalidate_any_buried_refs (XVECEXP (x, i, j));
3866 }
3867 }
3868
3869 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3870 being defined as MEM loads and stores to symbols, with no side effects
3871 and no registers in the expression. For a MEM destination, we also
3872 check that the insn is still valid if we replace the destination with a
3873 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3874 which don't match this criteria, they are invalidated and trimmed out
3875 later. */
3876
3877 static void
3878 compute_ld_motion_mems (void)
3879 {
3880 struct ls_expr * ptr;
3881 basic_block bb;
3882 rtx insn;
3883
3884 pre_ldst_mems = NULL;
3885 pre_ldst_table
3886 = htab_create (13, pre_ldst_expr_hash, pre_ldst_expr_eq, NULL);
3887
3888 FOR_EACH_BB (bb)
3889 {
3890 FOR_BB_INSNS (bb, insn)
3891 {
3892 if (NONDEBUG_INSN_P (insn))
3893 {
3894 if (GET_CODE (PATTERN (insn)) == SET)
3895 {
3896 rtx src = SET_SRC (PATTERN (insn));
3897 rtx dest = SET_DEST (PATTERN (insn));
3898
3899 /* Check for a simple LOAD... */
3900 if (MEM_P (src) && simple_mem (src))
3901 {
3902 ptr = ldst_entry (src);
3903 if (REG_P (dest))
3904 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3905 else
3906 ptr->invalid = 1;
3907 }
3908 else
3909 {
3910 /* Make sure there isn't a buried load somewhere. */
3911 invalidate_any_buried_refs (src);
3912 }
3913
3914 /* Check for stores. Don't worry about aliased ones, they
3915 will block any movement we might do later. We only care
3916 about this exact pattern since those are the only
3917 circumstance that we will ignore the aliasing info. */
3918 if (MEM_P (dest) && simple_mem (dest))
3919 {
3920 ptr = ldst_entry (dest);
3921
3922 if (! MEM_P (src)
3923 && GET_CODE (src) != ASM_OPERANDS
3924 /* Check for REG manually since want_to_gcse_p
3925 returns 0 for all REGs. */
3926 && can_assign_to_reg_without_clobbers_p (src))
3927 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3928 else
3929 ptr->invalid = 1;
3930 }
3931 }
3932 else
3933 invalidate_any_buried_refs (PATTERN (insn));
3934 }
3935 }
3936 }
3937 }
3938
3939 /* Remove any references that have been either invalidated or are not in the
3940 expression list for pre gcse. */
3941
3942 static void
3943 trim_ld_motion_mems (void)
3944 {
3945 struct ls_expr * * last = & pre_ldst_mems;
3946 struct ls_expr * ptr = pre_ldst_mems;
3947
3948 while (ptr != NULL)
3949 {
3950 struct expr * expr;
3951
3952 /* Delete if entry has been made invalid. */
3953 if (! ptr->invalid)
3954 {
3955 /* Delete if we cannot find this mem in the expression list. */
3956 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3957
3958 for (expr = expr_hash_table.table[hash];
3959 expr != NULL;
3960 expr = expr->next_same_hash)
3961 if (expr_equiv_p (expr->expr, ptr->pattern))
3962 break;
3963 }
3964 else
3965 expr = (struct expr *) 0;
3966
3967 if (expr)
3968 {
3969 /* Set the expression field if we are keeping it. */
3970 ptr->expr = expr;
3971 last = & ptr->next;
3972 ptr = ptr->next;
3973 }
3974 else
3975 {
3976 *last = ptr->next;
3977 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
3978 free_ldst_entry (ptr);
3979 ptr = * last;
3980 }
3981 }
3982
3983 /* Show the world what we've found. */
3984 if (dump_file && pre_ldst_mems != NULL)
3985 print_ldst_list (dump_file);
3986 }
3987
3988 /* This routine will take an expression which we are replacing with
3989 a reaching register, and update any stores that are needed if
3990 that expression is in the ld_motion list. Stores are updated by
3991 copying their SRC to the reaching register, and then storing
3992 the reaching register into the store location. These keeps the
3993 correct value in the reaching register for the loads. */
3994
3995 static void
3996 update_ld_motion_stores (struct expr * expr)
3997 {
3998 struct ls_expr * mem_ptr;
3999
4000 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
4001 {
4002 /* We can try to find just the REACHED stores, but is shouldn't
4003 matter to set the reaching reg everywhere... some might be
4004 dead and should be eliminated later. */
4005
4006 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4007 where reg is the reaching reg used in the load. We checked in
4008 compute_ld_motion_mems that we can replace (set mem expr) with
4009 (set reg expr) in that insn. */
4010 rtx list = mem_ptr->stores;
4011
4012 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4013 {
4014 rtx insn = XEXP (list, 0);
4015 rtx pat = PATTERN (insn);
4016 rtx src = SET_SRC (pat);
4017 rtx reg = expr->reaching_reg;
4018 rtx copy;
4019
4020 /* If we've already copied it, continue. */
4021 if (expr->reaching_reg == src)
4022 continue;
4023
4024 if (dump_file)
4025 {
4026 fprintf (dump_file, "PRE: store updated with reaching reg ");
4027 print_rtl (dump_file, reg);
4028 fprintf (dump_file, ":\n ");
4029 print_inline_rtx (dump_file, insn, 8);
4030 fprintf (dump_file, "\n");
4031 }
4032
4033 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4034 emit_insn_before (copy, insn);
4035 SET_SRC (pat) = reg;
4036 df_insn_rescan (insn);
4037
4038 /* un-recognize this pattern since it's probably different now. */
4039 INSN_CODE (insn) = -1;
4040 gcse_create_count++;
4041 }
4042 }
4043 }
4044 \f
4045 /* Return true if the graph is too expensive to optimize. PASS is the
4046 optimization about to be performed. */
4047
4048 static bool
4049 is_too_expensive (const char *pass)
4050 {
4051 /* Trying to perform global optimizations on flow graphs which have
4052 a high connectivity will take a long time and is unlikely to be
4053 particularly useful.
4054
4055 In normal circumstances a cfg should have about twice as many
4056 edges as blocks. But we do not want to punish small functions
4057 which have a couple switch statements. Rather than simply
4058 threshold the number of blocks, uses something with a more
4059 graceful degradation. */
4060 if (n_edges > 20000 + n_basic_blocks * 4)
4061 {
4062 warning (OPT_Wdisabled_optimization,
4063 "%s: %d basic blocks and %d edges/basic block",
4064 pass, n_basic_blocks, n_edges / n_basic_blocks);
4065
4066 return true;
4067 }
4068
4069 /* If allocating memory for the dataflow bitmaps would take up too much
4070 storage it's better just to disable the optimization. */
4071 if ((n_basic_blocks
4072 * SBITMAP_SET_SIZE (max_reg_num ())
4073 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4074 {
4075 warning (OPT_Wdisabled_optimization,
4076 "%s: %d basic blocks and %d registers",
4077 pass, n_basic_blocks, max_reg_num ());
4078
4079 return true;
4080 }
4081
4082 return false;
4083 }
4084 \f
4085 /* All the passes implemented in this file. Each pass has its
4086 own gate and execute function, and at the end of the file a
4087 pass definition for passes.c.
4088
4089 We do not construct an accurate cfg in functions which call
4090 setjmp, so none of these passes runs if the function calls
4091 setjmp.
4092 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4093
4094 static bool
4095 gate_rtl_pre (void)
4096 {
4097 return optimize > 0 && flag_gcse
4098 && !cfun->calls_setjmp
4099 && optimize_function_for_speed_p (cfun)
4100 && dbg_cnt (pre);
4101 }
4102
4103 static unsigned int
4104 execute_rtl_pre (void)
4105 {
4106 int changed;
4107 delete_unreachable_blocks ();
4108 df_analyze ();
4109 changed = one_pre_gcse_pass ();
4110 flag_rerun_cse_after_global_opts |= changed;
4111 if (changed)
4112 cleanup_cfg (0);
4113 return 0;
4114 }
4115
4116 static bool
4117 gate_rtl_hoist (void)
4118 {
4119 return optimize > 0 && flag_gcse
4120 && !cfun->calls_setjmp
4121 /* It does not make sense to run code hoisting unless we are optimizing
4122 for code size -- it rarely makes programs faster, and can make then
4123 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4124 && optimize_function_for_size_p (cfun)
4125 && dbg_cnt (hoist);
4126 }
4127
4128 static unsigned int
4129 execute_rtl_hoist (void)
4130 {
4131 int changed;
4132 delete_unreachable_blocks ();
4133 df_analyze ();
4134 changed = one_code_hoisting_pass ();
4135 flag_rerun_cse_after_global_opts |= changed;
4136 if (changed)
4137 cleanup_cfg (0);
4138 return 0;
4139 }
4140
4141 struct rtl_opt_pass pass_rtl_pre =
4142 {
4143 {
4144 RTL_PASS,
4145 "rtl pre", /* name */
4146 OPTGROUP_NONE, /* optinfo_flags */
4147 gate_rtl_pre, /* gate */
4148 execute_rtl_pre, /* execute */
4149 NULL, /* sub */
4150 NULL, /* next */
4151 0, /* static_pass_number */
4152 TV_PRE, /* tv_id */
4153 PROP_cfglayout, /* properties_required */
4154 0, /* properties_provided */
4155 0, /* properties_destroyed */
4156 0, /* todo_flags_start */
4157 TODO_df_finish | TODO_verify_rtl_sharing |
4158 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4159 }
4160 };
4161
4162 struct rtl_opt_pass pass_rtl_hoist =
4163 {
4164 {
4165 RTL_PASS,
4166 "hoist", /* name */
4167 OPTGROUP_NONE, /* optinfo_flags */
4168 gate_rtl_hoist, /* gate */
4169 execute_rtl_hoist, /* execute */
4170 NULL, /* sub */
4171 NULL, /* next */
4172 0, /* static_pass_number */
4173 TV_HOIST, /* tv_id */
4174 PROP_cfglayout, /* properties_required */
4175 0, /* properties_provided */
4176 0, /* properties_destroyed */
4177 0, /* todo_flags_start */
4178 TODO_df_finish | TODO_verify_rtl_sharing |
4179 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4180 }
4181 };
4182
4183 #include "gt-gcse.h"