[multiple changes]
[gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "tm.h"
139 #include "diagnostic-core.h"
140 #include "toplev.h"
141
142 #include "hard-reg-set.h"
143 #include "rtl.h"
144 #include "tree.h"
145 #include "tm_p.h"
146 #include "regs.h"
147 #include "ira.h"
148 #include "flags.h"
149 #include "insn-config.h"
150 #include "recog.h"
151 #include "basic-block.h"
152 #include "function.h"
153 #include "expr.h"
154 #include "except.h"
155 #include "ggc.h"
156 #include "params.h"
157 #include "cselib.h"
158 #include "intl.h"
159 #include "obstack.h"
160 #include "tree-pass.h"
161 #include "hashtab.h"
162 #include "df.h"
163 #include "dbgcnt.h"
164 #include "target.h"
165 #include "gcse.h"
166
167 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
168 are a superset of those done by classic GCSE.
169
170 Two passes of copy/constant propagation are done around PRE or hoisting
171 because the first one enables more GCSE and the second one helps to clean
172 up the copies that PRE and HOIST create. This is needed more for PRE than
173 for HOIST because code hoisting will try to use an existing register
174 containing the common subexpression rather than create a new one. This is
175 harder to do for PRE because of the code motion (which HOIST doesn't do).
176
177 Expressions we are interested in GCSE-ing are of the form
178 (set (pseudo-reg) (expression)).
179 Function want_to_gcse_p says what these are.
180
181 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
182 This allows PRE to hoist expressions that are expressed in multiple insns,
183 such as complex address calculations (e.g. for PIC code, or loads with a
184 high part and a low part).
185
186 PRE handles moving invariant expressions out of loops (by treating them as
187 partially redundant).
188
189 **********************
190
191 We used to support multiple passes but there are diminishing returns in
192 doing so. The first pass usually makes 90% of the changes that are doable.
193 A second pass can make a few more changes made possible by the first pass.
194 Experiments show any further passes don't make enough changes to justify
195 the expense.
196
197 A study of spec92 using an unlimited number of passes:
198 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
199 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
200 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
201
202 It was found doing copy propagation between each pass enables further
203 substitutions.
204
205 This study was done before expressions in REG_EQUAL notes were added as
206 candidate expressions for optimization, and before the GIMPLE optimizers
207 were added. Probably, multiple passes is even less efficient now than
208 at the time when the study was conducted.
209
210 PRE is quite expensive in complicated functions because the DFA can take
211 a while to converge. Hence we only perform one pass.
212
213 **********************
214
215 The steps for PRE are:
216
217 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
218
219 2) Perform the data flow analysis for PRE.
220
221 3) Delete the redundant instructions
222
223 4) Insert the required copies [if any] that make the partially
224 redundant instructions fully redundant.
225
226 5) For other reaching expressions, insert an instruction to copy the value
227 to a newly created pseudo that will reach the redundant instruction.
228
229 The deletion is done first so that when we do insertions we
230 know which pseudo reg to use.
231
232 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
233 argue it is not. The number of iterations for the algorithm to converge
234 is typically 2-4 so I don't view it as that expensive (relatively speaking).
235
236 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
237 we create. To make an expression reach the place where it's redundant,
238 the result of the expression is copied to a new register, and the redundant
239 expression is deleted by replacing it with this new register. Classic GCSE
240 doesn't have this problem as much as it computes the reaching defs of
241 each register in each block and thus can try to use an existing
242 register. */
243 \f
244 /* GCSE global vars. */
245
246 struct target_gcse default_target_gcse;
247 #if SWITCHABLE_TARGET
248 struct target_gcse *this_target_gcse = &default_target_gcse;
249 #endif
250
251 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
252 int flag_rerun_cse_after_global_opts;
253
254 /* An obstack for our working variables. */
255 static struct obstack gcse_obstack;
256
257 /* Hash table of expressions. */
258
259 struct expr
260 {
261 /* The expression. */
262 rtx expr;
263 /* Index in the available expression bitmaps. */
264 int bitmap_index;
265 /* Next entry with the same hash. */
266 struct expr *next_same_hash;
267 /* List of anticipatable occurrences in basic blocks in the function.
268 An "anticipatable occurrence" is one that is the first occurrence in the
269 basic block, the operands are not modified in the basic block prior
270 to the occurrence and the output is not used between the start of
271 the block and the occurrence. */
272 struct occr *antic_occr;
273 /* List of available occurrence in basic blocks in the function.
274 An "available occurrence" is one that is the last occurrence in the
275 basic block and the operands are not modified by following statements in
276 the basic block [including this insn]. */
277 struct occr *avail_occr;
278 /* Non-null if the computation is PRE redundant.
279 The value is the newly created pseudo-reg to record a copy of the
280 expression in all the places that reach the redundant copy. */
281 rtx reaching_reg;
282 /* Maximum distance in instructions this expression can travel.
283 We avoid moving simple expressions for more than a few instructions
284 to keep register pressure under control.
285 A value of "0" removes restrictions on how far the expression can
286 travel. */
287 int max_distance;
288 };
289
290 /* Occurrence of an expression.
291 There is one per basic block. If a pattern appears more than once the
292 last appearance is used [or first for anticipatable expressions]. */
293
294 struct occr
295 {
296 /* Next occurrence of this expression. */
297 struct occr *next;
298 /* The insn that computes the expression. */
299 rtx insn;
300 /* Nonzero if this [anticipatable] occurrence has been deleted. */
301 char deleted_p;
302 /* Nonzero if this [available] occurrence has been copied to
303 reaching_reg. */
304 /* ??? This is mutually exclusive with deleted_p, so they could share
305 the same byte. */
306 char copied_p;
307 };
308
309 typedef struct occr *occr_t;
310
311 /* Expression hash tables.
312 Each hash table is an array of buckets.
313 ??? It is known that if it were an array of entries, structure elements
314 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
315 not clear whether in the final analysis a sufficient amount of memory would
316 be saved as the size of the available expression bitmaps would be larger
317 [one could build a mapping table without holes afterwards though].
318 Someday I'll perform the computation and figure it out. */
319
320 struct hash_table_d
321 {
322 /* The table itself.
323 This is an array of `expr_hash_table_size' elements. */
324 struct expr **table;
325
326 /* Size of the hash table, in elements. */
327 unsigned int size;
328
329 /* Number of hash table elements. */
330 unsigned int n_elems;
331 };
332
333 /* Expression hash table. */
334 static struct hash_table_d expr_hash_table;
335
336 /* This is a list of expressions which are MEMs and will be used by load
337 or store motion.
338 Load motion tracks MEMs which aren't killed by anything except itself,
339 i.e. loads and stores to a single location.
340 We can then allow movement of these MEM refs with a little special
341 allowance. (all stores copy the same value to the reaching reg used
342 for the loads). This means all values used to store into memory must have
343 no side effects so we can re-issue the setter value. */
344
345 struct ls_expr
346 {
347 struct expr * expr; /* Gcse expression reference for LM. */
348 rtx pattern; /* Pattern of this mem. */
349 rtx pattern_regs; /* List of registers mentioned by the mem. */
350 rtx loads; /* INSN list of loads seen. */
351 rtx stores; /* INSN list of stores seen. */
352 struct ls_expr * next; /* Next in the list. */
353 int invalid; /* Invalid for some reason. */
354 int index; /* If it maps to a bitmap index. */
355 unsigned int hash_index; /* Index when in a hash table. */
356 rtx reaching_reg; /* Register to use when re-writing. */
357 };
358
359 /* Head of the list of load/store memory refs. */
360 static struct ls_expr * pre_ldst_mems = NULL;
361
362 /* Hashtable for the load/store memory refs. */
363 static htab_t pre_ldst_table = NULL;
364
365 /* Bitmap containing one bit for each register in the program.
366 Used when performing GCSE to track which registers have been set since
367 the start of the basic block. */
368 static regset reg_set_bitmap;
369
370 /* Array, indexed by basic block number for a list of insns which modify
371 memory within that block. */
372 static vec<rtx> *modify_mem_list;
373 static bitmap modify_mem_list_set;
374
375 typedef struct modify_pair_s
376 {
377 rtx dest; /* A MEM. */
378 rtx dest_addr; /* The canonical address of `dest'. */
379 } modify_pair;
380
381
382 /* This array parallels modify_mem_list, except that it stores MEMs
383 being set and their canonicalized memory addresses. */
384 static vec<modify_pair> *canon_modify_mem_list;
385
386 /* Bitmap indexed by block numbers to record which blocks contain
387 function calls. */
388 static bitmap blocks_with_calls;
389
390 /* Various variables for statistics gathering. */
391
392 /* Memory used in a pass.
393 This isn't intended to be absolutely precise. Its intent is only
394 to keep an eye on memory usage. */
395 static int bytes_used;
396
397 /* GCSE substitutions made. */
398 static int gcse_subst_count;
399 /* Number of copy instructions created. */
400 static int gcse_create_count;
401 \f
402 /* Doing code hoisting. */
403 static bool doing_code_hoisting_p = false;
404 \f
405 /* For available exprs */
406 static sbitmap *ae_kill;
407 \f
408 /* Data stored for each basic block. */
409 struct bb_data
410 {
411 /* Maximal register pressure inside basic block for given register class
412 (defined only for the pressure classes). */
413 int max_reg_pressure[N_REG_CLASSES];
414 /* Recorded register pressure of basic block before trying to hoist
415 an expression. Will be used to restore the register pressure
416 if the expression should not be hoisted. */
417 int old_pressure;
418 /* Recorded register live_in info of basic block during code hoisting
419 process. BACKUP is used to record live_in info before trying to
420 hoist an expression, and will be used to restore LIVE_IN if the
421 expression should not be hoisted. */
422 bitmap live_in, backup;
423 };
424
425 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
426
427 static basic_block curr_bb;
428
429 /* Current register pressure for each pressure class. */
430 static int curr_reg_pressure[N_REG_CLASSES];
431 \f
432
433 static void compute_can_copy (void);
434 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
435 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
436 static void *gcse_alloc (unsigned long);
437 static void alloc_gcse_mem (void);
438 static void free_gcse_mem (void);
439 static void hash_scan_insn (rtx, struct hash_table_d *);
440 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
441 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
442 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
443 static int want_to_gcse_p (rtx, int *);
444 static int oprs_unchanged_p (const_rtx, const_rtx, int);
445 static int oprs_anticipatable_p (const_rtx, const_rtx);
446 static int oprs_available_p (const_rtx, const_rtx);
447 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
448 struct hash_table_d *);
449 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
450 static int expr_equiv_p (const_rtx, const_rtx);
451 static void record_last_reg_set_info (rtx, int);
452 static void record_last_mem_set_info (rtx);
453 static void record_last_set_info (rtx, const_rtx, void *);
454 static void compute_hash_table (struct hash_table_d *);
455 static void alloc_hash_table (struct hash_table_d *);
456 static void free_hash_table (struct hash_table_d *);
457 static void compute_hash_table_work (struct hash_table_d *);
458 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
459 static void compute_transp (const_rtx, int, sbitmap *);
460 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
461 struct hash_table_d *);
462 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
463 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
464 static void canon_list_insert (rtx, const_rtx, void *);
465 static void alloc_pre_mem (int, int);
466 static void free_pre_mem (void);
467 static struct edge_list *compute_pre_data (void);
468 static int pre_expr_reaches_here_p (basic_block, struct expr *,
469 basic_block);
470 static void insert_insn_end_basic_block (struct expr *, basic_block);
471 static void pre_insert_copy_insn (struct expr *, rtx);
472 static void pre_insert_copies (void);
473 static int pre_delete (void);
474 static int pre_gcse (struct edge_list *);
475 static int one_pre_gcse_pass (void);
476 static void add_label_notes (rtx, rtx);
477 static void alloc_code_hoist_mem (int, int);
478 static void free_code_hoist_mem (void);
479 static void compute_code_hoist_vbeinout (void);
480 static void compute_code_hoist_data (void);
481 static int should_hoist_expr_to_dom (basic_block, struct expr *, basic_block,
482 sbitmap, int, int *, enum reg_class,
483 int *, bitmap, rtx);
484 static int hoist_code (void);
485 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
486 static enum reg_class get_pressure_class_and_nregs (rtx insn, int *nregs);
487 static int one_code_hoisting_pass (void);
488 static rtx process_insert_insn (struct expr *);
489 static int pre_edge_insert (struct edge_list *, struct expr **);
490 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
491 basic_block, char *);
492 static struct ls_expr * ldst_entry (rtx);
493 static void free_ldst_entry (struct ls_expr *);
494 static void free_ld_motion_mems (void);
495 static void print_ldst_list (FILE *);
496 static struct ls_expr * find_rtx_in_ldst (rtx);
497 static int simple_mem (const_rtx);
498 static void invalidate_any_buried_refs (rtx);
499 static void compute_ld_motion_mems (void);
500 static void trim_ld_motion_mems (void);
501 static void update_ld_motion_stores (struct expr *);
502 static void clear_modify_mem_tables (void);
503 static void free_modify_mem_tables (void);
504 static rtx gcse_emit_move_after (rtx, rtx, rtx);
505 static bool is_too_expensive (const char *);
506
507 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
508 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
509
510 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
511 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
512
513 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
514 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
515
516 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
517 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
518 \f
519 /* Misc. utilities. */
520
521 #define can_copy \
522 (this_target_gcse->x_can_copy)
523 #define can_copy_init_p \
524 (this_target_gcse->x_can_copy_init_p)
525
526 /* Compute which modes support reg/reg copy operations. */
527
528 static void
529 compute_can_copy (void)
530 {
531 int i;
532 #ifndef AVOID_CCMODE_COPIES
533 rtx reg, insn;
534 #endif
535 memset (can_copy, 0, NUM_MACHINE_MODES);
536
537 start_sequence ();
538 for (i = 0; i < NUM_MACHINE_MODES; i++)
539 if (GET_MODE_CLASS (i) == MODE_CC)
540 {
541 #ifdef AVOID_CCMODE_COPIES
542 can_copy[i] = 0;
543 #else
544 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
545 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
546 if (recog (PATTERN (insn), insn, NULL) >= 0)
547 can_copy[i] = 1;
548 #endif
549 }
550 else
551 can_copy[i] = 1;
552
553 end_sequence ();
554 }
555
556 /* Returns whether the mode supports reg/reg copy operations. */
557
558 bool
559 can_copy_p (enum machine_mode mode)
560 {
561 if (! can_copy_init_p)
562 {
563 compute_can_copy ();
564 can_copy_init_p = true;
565 }
566
567 return can_copy[mode] != 0;
568 }
569 \f
570 /* Cover function to xmalloc to record bytes allocated. */
571
572 static void *
573 gmalloc (size_t size)
574 {
575 bytes_used += size;
576 return xmalloc (size);
577 }
578
579 /* Cover function to xcalloc to record bytes allocated. */
580
581 static void *
582 gcalloc (size_t nelem, size_t elsize)
583 {
584 bytes_used += nelem * elsize;
585 return xcalloc (nelem, elsize);
586 }
587
588 /* Cover function to obstack_alloc. */
589
590 static void *
591 gcse_alloc (unsigned long size)
592 {
593 bytes_used += size;
594 return obstack_alloc (&gcse_obstack, size);
595 }
596
597 /* Allocate memory for the reg/memory set tracking tables.
598 This is called at the start of each pass. */
599
600 static void
601 alloc_gcse_mem (void)
602 {
603 /* Allocate vars to track sets of regs. */
604 reg_set_bitmap = ALLOC_REG_SET (NULL);
605
606 /* Allocate array to keep a list of insns which modify memory in each
607 basic block. The two typedefs are needed to work around the
608 pre-processor limitation with template types in macro arguments. */
609 typedef vec<rtx> vec_rtx_heap;
610 typedef vec<modify_pair> vec_modify_pair_heap;
611 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block);
612 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap, last_basic_block);
613 modify_mem_list_set = BITMAP_ALLOC (NULL);
614 blocks_with_calls = BITMAP_ALLOC (NULL);
615 }
616
617 /* Free memory allocated by alloc_gcse_mem. */
618
619 static void
620 free_gcse_mem (void)
621 {
622 FREE_REG_SET (reg_set_bitmap);
623
624 free_modify_mem_tables ();
625 BITMAP_FREE (modify_mem_list_set);
626 BITMAP_FREE (blocks_with_calls);
627 }
628 \f
629 /* Compute the local properties of each recorded expression.
630
631 Local properties are those that are defined by the block, irrespective of
632 other blocks.
633
634 An expression is transparent in a block if its operands are not modified
635 in the block.
636
637 An expression is computed (locally available) in a block if it is computed
638 at least once and expression would contain the same value if the
639 computation was moved to the end of the block.
640
641 An expression is locally anticipatable in a block if it is computed at
642 least once and expression would contain the same value if the computation
643 was moved to the beginning of the block.
644
645 We call this routine for pre and code hoisting. They all compute
646 basically the same information and thus can easily share this code.
647
648 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
649 properties. If NULL, then it is not necessary to compute or record that
650 particular property.
651
652 TABLE controls which hash table to look at. */
653
654 static void
655 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
656 struct hash_table_d *table)
657 {
658 unsigned int i;
659
660 /* Initialize any bitmaps that were passed in. */
661 if (transp)
662 {
663 bitmap_vector_ones (transp, last_basic_block);
664 }
665
666 if (comp)
667 bitmap_vector_clear (comp, last_basic_block);
668 if (antloc)
669 bitmap_vector_clear (antloc, last_basic_block);
670
671 for (i = 0; i < table->size; i++)
672 {
673 struct expr *expr;
674
675 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
676 {
677 int indx = expr->bitmap_index;
678 struct occr *occr;
679
680 /* The expression is transparent in this block if it is not killed.
681 We start by assuming all are transparent [none are killed], and
682 then reset the bits for those that are. */
683 if (transp)
684 compute_transp (expr->expr, indx, transp);
685
686 /* The occurrences recorded in antic_occr are exactly those that
687 we want to set to nonzero in ANTLOC. */
688 if (antloc)
689 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
690 {
691 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
692
693 /* While we're scanning the table, this is a good place to
694 initialize this. */
695 occr->deleted_p = 0;
696 }
697
698 /* The occurrences recorded in avail_occr are exactly those that
699 we want to set to nonzero in COMP. */
700 if (comp)
701 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
702 {
703 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
704
705 /* While we're scanning the table, this is a good place to
706 initialize this. */
707 occr->copied_p = 0;
708 }
709
710 /* While we're scanning the table, this is a good place to
711 initialize this. */
712 expr->reaching_reg = 0;
713 }
714 }
715 }
716 \f
717 /* Hash table support. */
718
719 struct reg_avail_info
720 {
721 basic_block last_bb;
722 int first_set;
723 int last_set;
724 };
725
726 static struct reg_avail_info *reg_avail_info;
727 static basic_block current_bb;
728
729 /* See whether X, the source of a set, is something we want to consider for
730 GCSE. */
731
732 static int
733 want_to_gcse_p (rtx x, int *max_distance_ptr)
734 {
735 #ifdef STACK_REGS
736 /* On register stack architectures, don't GCSE constants from the
737 constant pool, as the benefits are often swamped by the overhead
738 of shuffling the register stack between basic blocks. */
739 if (IS_STACK_MODE (GET_MODE (x)))
740 x = avoid_constant_pool_reference (x);
741 #endif
742
743 /* GCSE'ing constants:
744
745 We do not specifically distinguish between constant and non-constant
746 expressions in PRE and Hoist. We use set_src_cost below to limit
747 the maximum distance simple expressions can travel.
748
749 Nevertheless, constants are much easier to GCSE, and, hence,
750 it is easy to overdo the optimizations. Usually, excessive PRE and
751 Hoisting of constant leads to increased register pressure.
752
753 RA can deal with this by rematerialing some of the constants.
754 Therefore, it is important that the back-end generates sets of constants
755 in a way that allows reload rematerialize them under high register
756 pressure, i.e., a pseudo register with REG_EQUAL to constant
757 is set only once. Failing to do so will result in IRA/reload
758 spilling such constants under high register pressure instead of
759 rematerializing them. */
760
761 switch (GET_CODE (x))
762 {
763 case REG:
764 case SUBREG:
765 case CALL:
766 return 0;
767
768 CASE_CONST_ANY:
769 if (!doing_code_hoisting_p)
770 /* Do not PRE constants. */
771 return 0;
772
773 /* FALLTHRU */
774
775 default:
776 if (doing_code_hoisting_p)
777 /* PRE doesn't implement max_distance restriction. */
778 {
779 int cost;
780 int max_distance;
781
782 gcc_assert (!optimize_function_for_speed_p (cfun)
783 && optimize_function_for_size_p (cfun));
784 cost = set_src_cost (x, 0);
785
786 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
787 {
788 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
789 if (max_distance == 0)
790 return 0;
791
792 gcc_assert (max_distance > 0);
793 }
794 else
795 max_distance = 0;
796
797 if (max_distance_ptr)
798 *max_distance_ptr = max_distance;
799 }
800
801 return can_assign_to_reg_without_clobbers_p (x);
802 }
803 }
804
805 /* Used internally by can_assign_to_reg_without_clobbers_p. */
806
807 static GTY(()) rtx test_insn;
808
809 /* Return true if we can assign X to a pseudo register such that the
810 resulting insn does not result in clobbering a hard register as a
811 side-effect.
812
813 Additionally, if the target requires it, check that the resulting insn
814 can be copied. If it cannot, this means that X is special and probably
815 has hidden side-effects we don't want to mess with.
816
817 This function is typically used by code motion passes, to verify
818 that it is safe to insert an insn without worrying about clobbering
819 maybe live hard regs. */
820
821 bool
822 can_assign_to_reg_without_clobbers_p (rtx x)
823 {
824 int num_clobbers = 0;
825 int icode;
826
827 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
828 if (general_operand (x, GET_MODE (x)))
829 return 1;
830 else if (GET_MODE (x) == VOIDmode)
831 return 0;
832
833 /* Otherwise, check if we can make a valid insn from it. First initialize
834 our test insn if we haven't already. */
835 if (test_insn == 0)
836 {
837 test_insn
838 = make_insn_raw (gen_rtx_SET (VOIDmode,
839 gen_rtx_REG (word_mode,
840 FIRST_PSEUDO_REGISTER * 2),
841 const0_rtx));
842 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
843 }
844
845 /* Now make an insn like the one we would make when GCSE'ing and see if
846 valid. */
847 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
848 SET_SRC (PATTERN (test_insn)) = x;
849
850 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
851 if (icode < 0)
852 return false;
853
854 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
855 return false;
856
857 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
858 return false;
859
860 return true;
861 }
862
863 /* Return nonzero if the operands of expression X are unchanged from the
864 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
865 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
866
867 static int
868 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
869 {
870 int i, j;
871 enum rtx_code code;
872 const char *fmt;
873
874 if (x == 0)
875 return 1;
876
877 code = GET_CODE (x);
878 switch (code)
879 {
880 case REG:
881 {
882 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
883
884 if (info->last_bb != current_bb)
885 return 1;
886 if (avail_p)
887 return info->last_set < DF_INSN_LUID (insn);
888 else
889 return info->first_set >= DF_INSN_LUID (insn);
890 }
891
892 case MEM:
893 if (! flag_gcse_lm
894 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
895 x, avail_p))
896 return 0;
897 else
898 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
899
900 case PRE_DEC:
901 case PRE_INC:
902 case POST_DEC:
903 case POST_INC:
904 case PRE_MODIFY:
905 case POST_MODIFY:
906 return 0;
907
908 case PC:
909 case CC0: /*FIXME*/
910 case CONST:
911 CASE_CONST_ANY:
912 case SYMBOL_REF:
913 case LABEL_REF:
914 case ADDR_VEC:
915 case ADDR_DIFF_VEC:
916 return 1;
917
918 default:
919 break;
920 }
921
922 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
923 {
924 if (fmt[i] == 'e')
925 {
926 /* If we are about to do the last recursive call needed at this
927 level, change it into iteration. This function is called enough
928 to be worth it. */
929 if (i == 0)
930 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
931
932 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
933 return 0;
934 }
935 else if (fmt[i] == 'E')
936 for (j = 0; j < XVECLEN (x, i); j++)
937 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
938 return 0;
939 }
940
941 return 1;
942 }
943
944 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
945
946 struct mem_conflict_info
947 {
948 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
949 see if a memory store conflicts with this memory load. */
950 const_rtx mem;
951
952 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
953 references. */
954 bool conflict;
955 };
956
957 /* DEST is the output of an instruction. If it is a memory reference and
958 possibly conflicts with the load found in DATA, then communicate this
959 information back through DATA. */
960
961 static void
962 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
963 void *data)
964 {
965 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
966
967 while (GET_CODE (dest) == SUBREG
968 || GET_CODE (dest) == ZERO_EXTRACT
969 || GET_CODE (dest) == STRICT_LOW_PART)
970 dest = XEXP (dest, 0);
971
972 /* If DEST is not a MEM, then it will not conflict with the load. Note
973 that function calls are assumed to clobber memory, but are handled
974 elsewhere. */
975 if (! MEM_P (dest))
976 return;
977
978 /* If we are setting a MEM in our list of specially recognized MEMs,
979 don't mark as killed this time. */
980 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
981 {
982 if (!find_rtx_in_ldst (dest))
983 mci->conflict = true;
984 return;
985 }
986
987 if (true_dependence (dest, GET_MODE (dest), mci->mem))
988 mci->conflict = true;
989 }
990
991 /* Return nonzero if the expression in X (a memory reference) is killed
992 in block BB before or after the insn with the LUID in UID_LIMIT.
993 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
994 before UID_LIMIT.
995
996 To check the entire block, set UID_LIMIT to max_uid + 1 and
997 AVAIL_P to 0. */
998
999 static int
1000 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1001 int avail_p)
1002 {
1003 vec<rtx> list = modify_mem_list[bb->index];
1004 rtx setter;
1005 unsigned ix;
1006
1007 /* If this is a readonly then we aren't going to be changing it. */
1008 if (MEM_READONLY_P (x))
1009 return 0;
1010
1011 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1012 {
1013 struct mem_conflict_info mci;
1014
1015 /* Ignore entries in the list that do not apply. */
1016 if ((avail_p
1017 && DF_INSN_LUID (setter) < uid_limit)
1018 || (! avail_p
1019 && DF_INSN_LUID (setter) > uid_limit))
1020 continue;
1021
1022 /* If SETTER is a call everything is clobbered. Note that calls
1023 to pure functions are never put on the list, so we need not
1024 worry about them. */
1025 if (CALL_P (setter))
1026 return 1;
1027
1028 /* SETTER must be an INSN of some kind that sets memory. Call
1029 note_stores to examine each hunk of memory that is modified. */
1030 mci.mem = x;
1031 mci.conflict = false;
1032 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1033 if (mci.conflict)
1034 return 1;
1035 }
1036 return 0;
1037 }
1038
1039 /* Return nonzero if the operands of expression X are unchanged from
1040 the start of INSN's basic block up to but not including INSN. */
1041
1042 static int
1043 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1044 {
1045 return oprs_unchanged_p (x, insn, 0);
1046 }
1047
1048 /* Return nonzero if the operands of expression X are unchanged from
1049 INSN to the end of INSN's basic block. */
1050
1051 static int
1052 oprs_available_p (const_rtx x, const_rtx insn)
1053 {
1054 return oprs_unchanged_p (x, insn, 1);
1055 }
1056
1057 /* Hash expression X.
1058
1059 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1060 indicating if a volatile operand is found or if the expression contains
1061 something we don't want to insert in the table. HASH_TABLE_SIZE is
1062 the current size of the hash table to be probed. */
1063
1064 static unsigned int
1065 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1066 int hash_table_size)
1067 {
1068 unsigned int hash;
1069
1070 *do_not_record_p = 0;
1071
1072 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1073 return hash % hash_table_size;
1074 }
1075
1076 /* Return nonzero if exp1 is equivalent to exp2. */
1077
1078 static int
1079 expr_equiv_p (const_rtx x, const_rtx y)
1080 {
1081 return exp_equiv_p (x, y, 0, true);
1082 }
1083
1084 /* Insert expression X in INSN in the hash TABLE.
1085 If it is already present, record it as the last occurrence in INSN's
1086 basic block.
1087
1088 MODE is the mode of the value X is being stored into.
1089 It is only used if X is a CONST_INT.
1090
1091 ANTIC_P is nonzero if X is an anticipatable expression.
1092 AVAIL_P is nonzero if X is an available expression.
1093
1094 MAX_DISTANCE is the maximum distance in instructions this expression can
1095 be moved. */
1096
1097 static void
1098 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1099 int avail_p, int max_distance, struct hash_table_d *table)
1100 {
1101 int found, do_not_record_p;
1102 unsigned int hash;
1103 struct expr *cur_expr, *last_expr = NULL;
1104 struct occr *antic_occr, *avail_occr;
1105
1106 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1107
1108 /* Do not insert expression in table if it contains volatile operands,
1109 or if hash_expr determines the expression is something we don't want
1110 to or can't handle. */
1111 if (do_not_record_p)
1112 return;
1113
1114 cur_expr = table->table[hash];
1115 found = 0;
1116
1117 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1118 {
1119 /* If the expression isn't found, save a pointer to the end of
1120 the list. */
1121 last_expr = cur_expr;
1122 cur_expr = cur_expr->next_same_hash;
1123 }
1124
1125 if (! found)
1126 {
1127 cur_expr = GOBNEW (struct expr);
1128 bytes_used += sizeof (struct expr);
1129 if (table->table[hash] == NULL)
1130 /* This is the first pattern that hashed to this index. */
1131 table->table[hash] = cur_expr;
1132 else
1133 /* Add EXPR to end of this hash chain. */
1134 last_expr->next_same_hash = cur_expr;
1135
1136 /* Set the fields of the expr element. */
1137 cur_expr->expr = x;
1138 cur_expr->bitmap_index = table->n_elems++;
1139 cur_expr->next_same_hash = NULL;
1140 cur_expr->antic_occr = NULL;
1141 cur_expr->avail_occr = NULL;
1142 gcc_assert (max_distance >= 0);
1143 cur_expr->max_distance = max_distance;
1144 }
1145 else
1146 gcc_assert (cur_expr->max_distance == max_distance);
1147
1148 /* Now record the occurrence(s). */
1149 if (antic_p)
1150 {
1151 antic_occr = cur_expr->antic_occr;
1152
1153 if (antic_occr
1154 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1155 antic_occr = NULL;
1156
1157 if (antic_occr)
1158 /* Found another instance of the expression in the same basic block.
1159 Prefer the currently recorded one. We want the first one in the
1160 block and the block is scanned from start to end. */
1161 ; /* nothing to do */
1162 else
1163 {
1164 /* First occurrence of this expression in this basic block. */
1165 antic_occr = GOBNEW (struct occr);
1166 bytes_used += sizeof (struct occr);
1167 antic_occr->insn = insn;
1168 antic_occr->next = cur_expr->antic_occr;
1169 antic_occr->deleted_p = 0;
1170 cur_expr->antic_occr = antic_occr;
1171 }
1172 }
1173
1174 if (avail_p)
1175 {
1176 avail_occr = cur_expr->avail_occr;
1177
1178 if (avail_occr
1179 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1180 {
1181 /* Found another instance of the expression in the same basic block.
1182 Prefer this occurrence to the currently recorded one. We want
1183 the last one in the block and the block is scanned from start
1184 to end. */
1185 avail_occr->insn = insn;
1186 }
1187 else
1188 {
1189 /* First occurrence of this expression in this basic block. */
1190 avail_occr = GOBNEW (struct occr);
1191 bytes_used += sizeof (struct occr);
1192 avail_occr->insn = insn;
1193 avail_occr->next = cur_expr->avail_occr;
1194 avail_occr->deleted_p = 0;
1195 cur_expr->avail_occr = avail_occr;
1196 }
1197 }
1198 }
1199
1200 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1201
1202 static void
1203 hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1204 {
1205 rtx src = SET_SRC (set);
1206 rtx dest = SET_DEST (set);
1207 rtx note;
1208
1209 if (GET_CODE (src) == CALL)
1210 hash_scan_call (src, insn, table);
1211
1212 else if (REG_P (dest))
1213 {
1214 unsigned int regno = REGNO (dest);
1215 int max_distance = 0;
1216
1217 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1218
1219 This allows us to do a single GCSE pass and still eliminate
1220 redundant constants, addresses or other expressions that are
1221 constructed with multiple instructions.
1222
1223 However, keep the original SRC if INSN is a simple reg-reg move.
1224 In this case, there will almost always be a REG_EQUAL note on the
1225 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1226 for INSN, we miss copy propagation opportunities and we perform the
1227 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1228 do more than one PRE GCSE pass.
1229
1230 Note that this does not impede profitable constant propagations. We
1231 "look through" reg-reg sets in lookup_avail_set. */
1232 note = find_reg_equal_equiv_note (insn);
1233 if (note != 0
1234 && REG_NOTE_KIND (note) == REG_EQUAL
1235 && !REG_P (src)
1236 && want_to_gcse_p (XEXP (note, 0), NULL))
1237 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1238
1239 /* Only record sets of pseudo-regs in the hash table. */
1240 if (regno >= FIRST_PSEUDO_REGISTER
1241 /* Don't GCSE something if we can't do a reg/reg copy. */
1242 && can_copy_p (GET_MODE (dest))
1243 /* GCSE commonly inserts instruction after the insn. We can't
1244 do that easily for EH edges so disable GCSE on these for now. */
1245 /* ??? We can now easily create new EH landing pads at the
1246 gimple level, for splitting edges; there's no reason we
1247 can't do the same thing at the rtl level. */
1248 && !can_throw_internal (insn)
1249 /* Is SET_SRC something we want to gcse? */
1250 && want_to_gcse_p (src, &max_distance)
1251 /* Don't CSE a nop. */
1252 && ! set_noop_p (set)
1253 /* Don't GCSE if it has attached REG_EQUIV note.
1254 At this point this only function parameters should have
1255 REG_EQUIV notes and if the argument slot is used somewhere
1256 explicitly, it means address of parameter has been taken,
1257 so we should not extend the lifetime of the pseudo. */
1258 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1259 {
1260 /* An expression is not anticipatable if its operands are
1261 modified before this insn or if this is not the only SET in
1262 this insn. The latter condition does not have to mean that
1263 SRC itself is not anticipatable, but we just will not be
1264 able to handle code motion of insns with multiple sets. */
1265 int antic_p = oprs_anticipatable_p (src, insn)
1266 && !multiple_sets (insn);
1267 /* An expression is not available if its operands are
1268 subsequently modified, including this insn. It's also not
1269 available if this is a branch, because we can't insert
1270 a set after the branch. */
1271 int avail_p = (oprs_available_p (src, insn)
1272 && ! JUMP_P (insn));
1273
1274 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1275 max_distance, table);
1276 }
1277 }
1278 /* In case of store we want to consider the memory value as available in
1279 the REG stored in that memory. This makes it possible to remove
1280 redundant loads from due to stores to the same location. */
1281 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1282 {
1283 unsigned int regno = REGNO (src);
1284 int max_distance = 0;
1285
1286 /* Only record sets of pseudo-regs in the hash table. */
1287 if (regno >= FIRST_PSEUDO_REGISTER
1288 /* Don't GCSE something if we can't do a reg/reg copy. */
1289 && can_copy_p (GET_MODE (src))
1290 /* GCSE commonly inserts instruction after the insn. We can't
1291 do that easily for EH edges so disable GCSE on these for now. */
1292 && !can_throw_internal (insn)
1293 /* Is SET_DEST something we want to gcse? */
1294 && want_to_gcse_p (dest, &max_distance)
1295 /* Don't CSE a nop. */
1296 && ! set_noop_p (set)
1297 /* Don't GCSE if it has attached REG_EQUIV note.
1298 At this point this only function parameters should have
1299 REG_EQUIV notes and if the argument slot is used somewhere
1300 explicitly, it means address of parameter has been taken,
1301 so we should not extend the lifetime of the pseudo. */
1302 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1303 || ! MEM_P (XEXP (note, 0))))
1304 {
1305 /* Stores are never anticipatable. */
1306 int antic_p = 0;
1307 /* An expression is not available if its operands are
1308 subsequently modified, including this insn. It's also not
1309 available if this is a branch, because we can't insert
1310 a set after the branch. */
1311 int avail_p = oprs_available_p (dest, insn)
1312 && ! JUMP_P (insn);
1313
1314 /* Record the memory expression (DEST) in the hash table. */
1315 insert_expr_in_table (dest, GET_MODE (dest), insn,
1316 antic_p, avail_p, max_distance, table);
1317 }
1318 }
1319 }
1320
1321 static void
1322 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1323 struct hash_table_d *table ATTRIBUTE_UNUSED)
1324 {
1325 /* Currently nothing to do. */
1326 }
1327
1328 static void
1329 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1330 struct hash_table_d *table ATTRIBUTE_UNUSED)
1331 {
1332 /* Currently nothing to do. */
1333 }
1334
1335 /* Process INSN and add hash table entries as appropriate. */
1336
1337 static void
1338 hash_scan_insn (rtx insn, struct hash_table_d *table)
1339 {
1340 rtx pat = PATTERN (insn);
1341 int i;
1342
1343 /* Pick out the sets of INSN and for other forms of instructions record
1344 what's been modified. */
1345
1346 if (GET_CODE (pat) == SET)
1347 hash_scan_set (pat, insn, table);
1348
1349 else if (GET_CODE (pat) == CLOBBER)
1350 hash_scan_clobber (pat, insn, table);
1351
1352 else if (GET_CODE (pat) == CALL)
1353 hash_scan_call (pat, insn, table);
1354
1355 else if (GET_CODE (pat) == PARALLEL)
1356 for (i = 0; i < XVECLEN (pat, 0); i++)
1357 {
1358 rtx x = XVECEXP (pat, 0, i);
1359
1360 if (GET_CODE (x) == SET)
1361 hash_scan_set (x, insn, table);
1362 else if (GET_CODE (x) == CLOBBER)
1363 hash_scan_clobber (x, insn, table);
1364 else if (GET_CODE (x) == CALL)
1365 hash_scan_call (x, insn, table);
1366 }
1367 }
1368
1369 /* Dump the hash table TABLE to file FILE under the name NAME. */
1370
1371 static void
1372 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1373 {
1374 int i;
1375 /* Flattened out table, so it's printed in proper order. */
1376 struct expr **flat_table;
1377 unsigned int *hash_val;
1378 struct expr *expr;
1379
1380 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1381 hash_val = XNEWVEC (unsigned int, table->n_elems);
1382
1383 for (i = 0; i < (int) table->size; i++)
1384 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1385 {
1386 flat_table[expr->bitmap_index] = expr;
1387 hash_val[expr->bitmap_index] = i;
1388 }
1389
1390 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1391 name, table->size, table->n_elems);
1392
1393 for (i = 0; i < (int) table->n_elems; i++)
1394 if (flat_table[i] != 0)
1395 {
1396 expr = flat_table[i];
1397 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1398 expr->bitmap_index, hash_val[i], expr->max_distance);
1399 print_rtl (file, expr->expr);
1400 fprintf (file, "\n");
1401 }
1402
1403 fprintf (file, "\n");
1404
1405 free (flat_table);
1406 free (hash_val);
1407 }
1408
1409 /* Record register first/last/block set information for REGNO in INSN.
1410
1411 first_set records the first place in the block where the register
1412 is set and is used to compute "anticipatability".
1413
1414 last_set records the last place in the block where the register
1415 is set and is used to compute "availability".
1416
1417 last_bb records the block for which first_set and last_set are
1418 valid, as a quick test to invalidate them. */
1419
1420 static void
1421 record_last_reg_set_info (rtx insn, int regno)
1422 {
1423 struct reg_avail_info *info = &reg_avail_info[regno];
1424 int luid = DF_INSN_LUID (insn);
1425
1426 info->last_set = luid;
1427 if (info->last_bb != current_bb)
1428 {
1429 info->last_bb = current_bb;
1430 info->first_set = luid;
1431 }
1432 }
1433
1434 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1435 Note we store a pair of elements in the list, so they have to be
1436 taken off pairwise. */
1437
1438 static void
1439 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1440 void * v_insn)
1441 {
1442 rtx dest_addr, insn;
1443 int bb;
1444 modify_pair pair;
1445
1446 while (GET_CODE (dest) == SUBREG
1447 || GET_CODE (dest) == ZERO_EXTRACT
1448 || GET_CODE (dest) == STRICT_LOW_PART)
1449 dest = XEXP (dest, 0);
1450
1451 /* If DEST is not a MEM, then it will not conflict with a load. Note
1452 that function calls are assumed to clobber memory, but are handled
1453 elsewhere. */
1454
1455 if (! MEM_P (dest))
1456 return;
1457
1458 dest_addr = get_addr (XEXP (dest, 0));
1459 dest_addr = canon_rtx (dest_addr);
1460 insn = (rtx) v_insn;
1461 bb = BLOCK_FOR_INSN (insn)->index;
1462
1463 pair.dest = dest;
1464 pair.dest_addr = dest_addr;
1465 canon_modify_mem_list[bb].safe_push (pair);
1466 }
1467
1468 /* Record memory modification information for INSN. We do not actually care
1469 about the memory location(s) that are set, or even how they are set (consider
1470 a CALL_INSN). We merely need to record which insns modify memory. */
1471
1472 static void
1473 record_last_mem_set_info (rtx insn)
1474 {
1475 int bb;
1476
1477 if (! flag_gcse_lm)
1478 return;
1479
1480 /* load_killed_in_block_p will handle the case of calls clobbering
1481 everything. */
1482 bb = BLOCK_FOR_INSN (insn)->index;
1483 modify_mem_list[bb].safe_push (insn);
1484 bitmap_set_bit (modify_mem_list_set, bb);
1485
1486 if (CALL_P (insn))
1487 bitmap_set_bit (blocks_with_calls, bb);
1488 else
1489 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1490 }
1491
1492 /* Called from compute_hash_table via note_stores to handle one
1493 SET or CLOBBER in an insn. DATA is really the instruction in which
1494 the SET is taking place. */
1495
1496 static void
1497 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1498 {
1499 rtx last_set_insn = (rtx) data;
1500
1501 if (GET_CODE (dest) == SUBREG)
1502 dest = SUBREG_REG (dest);
1503
1504 if (REG_P (dest))
1505 record_last_reg_set_info (last_set_insn, REGNO (dest));
1506 else if (MEM_P (dest)
1507 /* Ignore pushes, they clobber nothing. */
1508 && ! push_operand (dest, GET_MODE (dest)))
1509 record_last_mem_set_info (last_set_insn);
1510 }
1511
1512 /* Top level function to create an expression hash table.
1513
1514 Expression entries are placed in the hash table if
1515 - they are of the form (set (pseudo-reg) src),
1516 - src is something we want to perform GCSE on,
1517 - none of the operands are subsequently modified in the block
1518
1519 Currently src must be a pseudo-reg or a const_int.
1520
1521 TABLE is the table computed. */
1522
1523 static void
1524 compute_hash_table_work (struct hash_table_d *table)
1525 {
1526 int i;
1527
1528 /* re-Cache any INSN_LIST nodes we have allocated. */
1529 clear_modify_mem_tables ();
1530 /* Some working arrays used to track first and last set in each block. */
1531 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1532
1533 for (i = 0; i < max_reg_num (); ++i)
1534 reg_avail_info[i].last_bb = NULL;
1535
1536 FOR_EACH_BB (current_bb)
1537 {
1538 rtx insn;
1539 unsigned int regno;
1540
1541 /* First pass over the instructions records information used to
1542 determine when registers and memory are first and last set. */
1543 FOR_BB_INSNS (current_bb, insn)
1544 {
1545 if (!NONDEBUG_INSN_P (insn))
1546 continue;
1547
1548 if (CALL_P (insn))
1549 {
1550 hard_reg_set_iterator hrsi;
1551 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1552 0, regno, hrsi)
1553 record_last_reg_set_info (insn, regno);
1554
1555 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1556 record_last_mem_set_info (insn);
1557 }
1558
1559 note_stores (PATTERN (insn), record_last_set_info, insn);
1560 }
1561
1562 /* The next pass builds the hash table. */
1563 FOR_BB_INSNS (current_bb, insn)
1564 if (NONDEBUG_INSN_P (insn))
1565 hash_scan_insn (insn, table);
1566 }
1567
1568 free (reg_avail_info);
1569 reg_avail_info = NULL;
1570 }
1571
1572 /* Allocate space for the set/expr hash TABLE.
1573 It is used to determine the number of buckets to use. */
1574
1575 static void
1576 alloc_hash_table (struct hash_table_d *table)
1577 {
1578 int n;
1579
1580 n = get_max_insn_count ();
1581
1582 table->size = n / 4;
1583 if (table->size < 11)
1584 table->size = 11;
1585
1586 /* Attempt to maintain efficient use of hash table.
1587 Making it an odd number is simplest for now.
1588 ??? Later take some measurements. */
1589 table->size |= 1;
1590 n = table->size * sizeof (struct expr *);
1591 table->table = GNEWVAR (struct expr *, n);
1592 }
1593
1594 /* Free things allocated by alloc_hash_table. */
1595
1596 static void
1597 free_hash_table (struct hash_table_d *table)
1598 {
1599 free (table->table);
1600 }
1601
1602 /* Compute the expression hash table TABLE. */
1603
1604 static void
1605 compute_hash_table (struct hash_table_d *table)
1606 {
1607 /* Initialize count of number of entries in hash table. */
1608 table->n_elems = 0;
1609 memset (table->table, 0, table->size * sizeof (struct expr *));
1610
1611 compute_hash_table_work (table);
1612 }
1613 \f
1614 /* Expression tracking support. */
1615
1616 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1617 static void
1618 clear_modify_mem_tables (void)
1619 {
1620 unsigned i;
1621 bitmap_iterator bi;
1622
1623 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1624 {
1625 modify_mem_list[i].release ();
1626 canon_modify_mem_list[i].release ();
1627 }
1628 bitmap_clear (modify_mem_list_set);
1629 bitmap_clear (blocks_with_calls);
1630 }
1631
1632 /* Release memory used by modify_mem_list_set. */
1633
1634 static void
1635 free_modify_mem_tables (void)
1636 {
1637 clear_modify_mem_tables ();
1638 free (modify_mem_list);
1639 free (canon_modify_mem_list);
1640 modify_mem_list = 0;
1641 canon_modify_mem_list = 0;
1642 }
1643 \f
1644 /* For each block, compute whether X is transparent. X is either an
1645 expression or an assignment [though we don't care which, for this context
1646 an assignment is treated as an expression]. For each block where an
1647 element of X is modified, reset the INDX bit in BMAP. */
1648
1649 static void
1650 compute_transp (const_rtx x, int indx, sbitmap *bmap)
1651 {
1652 int i, j;
1653 enum rtx_code code;
1654 const char *fmt;
1655
1656 /* repeat is used to turn tail-recursion into iteration since GCC
1657 can't do it when there's no return value. */
1658 repeat:
1659
1660 if (x == 0)
1661 return;
1662
1663 code = GET_CODE (x);
1664 switch (code)
1665 {
1666 case REG:
1667 {
1668 df_ref def;
1669 for (def = DF_REG_DEF_CHAIN (REGNO (x));
1670 def;
1671 def = DF_REF_NEXT_REG (def))
1672 bitmap_clear_bit (bmap[DF_REF_BB (def)->index], indx);
1673 }
1674
1675 return;
1676
1677 case MEM:
1678 if (! MEM_READONLY_P (x))
1679 {
1680 bitmap_iterator bi;
1681 unsigned bb_index;
1682 rtx x_addr;
1683
1684 x_addr = get_addr (XEXP (x, 0));
1685 x_addr = canon_rtx (x_addr);
1686
1687 /* First handle all the blocks with calls. We don't need to
1688 do any list walking for them. */
1689 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
1690 {
1691 bitmap_clear_bit (bmap[bb_index], indx);
1692 }
1693
1694 /* Now iterate over the blocks which have memory modifications
1695 but which do not have any calls. */
1696 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
1697 blocks_with_calls,
1698 0, bb_index, bi)
1699 {
1700 vec<modify_pair> list
1701 = canon_modify_mem_list[bb_index];
1702 modify_pair *pair;
1703 unsigned ix;
1704
1705 FOR_EACH_VEC_ELT_REVERSE (list, ix, pair)
1706 {
1707 rtx dest = pair->dest;
1708 rtx dest_addr = pair->dest_addr;
1709
1710 if (canon_true_dependence (dest, GET_MODE (dest),
1711 dest_addr, x, x_addr))
1712 bitmap_clear_bit (bmap[bb_index], indx);
1713 }
1714 }
1715 }
1716
1717 x = XEXP (x, 0);
1718 goto repeat;
1719
1720 case PC:
1721 case CC0: /*FIXME*/
1722 case CONST:
1723 CASE_CONST_ANY:
1724 case SYMBOL_REF:
1725 case LABEL_REF:
1726 case ADDR_VEC:
1727 case ADDR_DIFF_VEC:
1728 return;
1729
1730 default:
1731 break;
1732 }
1733
1734 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1735 {
1736 if (fmt[i] == 'e')
1737 {
1738 /* If we are about to do the last recursive call
1739 needed at this level, change it into iteration.
1740 This function is called enough to be worth it. */
1741 if (i == 0)
1742 {
1743 x = XEXP (x, i);
1744 goto repeat;
1745 }
1746
1747 compute_transp (XEXP (x, i), indx, bmap);
1748 }
1749 else if (fmt[i] == 'E')
1750 for (j = 0; j < XVECLEN (x, i); j++)
1751 compute_transp (XVECEXP (x, i, j), indx, bmap);
1752 }
1753 }
1754 \f
1755 /* Compute PRE+LCM working variables. */
1756
1757 /* Local properties of expressions. */
1758
1759 /* Nonzero for expressions that are transparent in the block. */
1760 static sbitmap *transp;
1761
1762 /* Nonzero for expressions that are computed (available) in the block. */
1763 static sbitmap *comp;
1764
1765 /* Nonzero for expressions that are locally anticipatable in the block. */
1766 static sbitmap *antloc;
1767
1768 /* Nonzero for expressions where this block is an optimal computation
1769 point. */
1770 static sbitmap *pre_optimal;
1771
1772 /* Nonzero for expressions which are redundant in a particular block. */
1773 static sbitmap *pre_redundant;
1774
1775 /* Nonzero for expressions which should be inserted on a specific edge. */
1776 static sbitmap *pre_insert_map;
1777
1778 /* Nonzero for expressions which should be deleted in a specific block. */
1779 static sbitmap *pre_delete_map;
1780
1781 /* Allocate vars used for PRE analysis. */
1782
1783 static void
1784 alloc_pre_mem (int n_blocks, int n_exprs)
1785 {
1786 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1787 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1788 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1789
1790 pre_optimal = NULL;
1791 pre_redundant = NULL;
1792 pre_insert_map = NULL;
1793 pre_delete_map = NULL;
1794 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1795
1796 /* pre_insert and pre_delete are allocated later. */
1797 }
1798
1799 /* Free vars used for PRE analysis. */
1800
1801 static void
1802 free_pre_mem (void)
1803 {
1804 sbitmap_vector_free (transp);
1805 sbitmap_vector_free (comp);
1806
1807 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1808
1809 if (pre_optimal)
1810 sbitmap_vector_free (pre_optimal);
1811 if (pre_redundant)
1812 sbitmap_vector_free (pre_redundant);
1813 if (pre_insert_map)
1814 sbitmap_vector_free (pre_insert_map);
1815 if (pre_delete_map)
1816 sbitmap_vector_free (pre_delete_map);
1817
1818 transp = comp = NULL;
1819 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1820 }
1821
1822 /* Remove certain expressions from anticipatable and transparent
1823 sets of basic blocks that have incoming abnormal edge.
1824 For PRE remove potentially trapping expressions to avoid placing
1825 them on abnormal edges. For hoisting remove memory references that
1826 can be clobbered by calls. */
1827
1828 static void
1829 prune_expressions (bool pre_p)
1830 {
1831 sbitmap prune_exprs;
1832 struct expr *expr;
1833 unsigned int ui;
1834 basic_block bb;
1835
1836 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1837 bitmap_clear (prune_exprs);
1838 for (ui = 0; ui < expr_hash_table.size; ui++)
1839 {
1840 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1841 {
1842 /* Note potentially trapping expressions. */
1843 if (may_trap_p (expr->expr))
1844 {
1845 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1846 continue;
1847 }
1848
1849 if (!pre_p && MEM_P (expr->expr))
1850 /* Note memory references that can be clobbered by a call.
1851 We do not split abnormal edges in hoisting, so would
1852 a memory reference get hoisted along an abnormal edge,
1853 it would be placed /before/ the call. Therefore, only
1854 constant memory references can be hoisted along abnormal
1855 edges. */
1856 {
1857 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1858 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1859 continue;
1860
1861 if (MEM_READONLY_P (expr->expr)
1862 && !MEM_VOLATILE_P (expr->expr)
1863 && MEM_NOTRAP_P (expr->expr))
1864 /* Constant memory reference, e.g., a PIC address. */
1865 continue;
1866
1867 /* ??? Optimally, we would use interprocedural alias
1868 analysis to determine if this mem is actually killed
1869 by this call. */
1870
1871 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1872 }
1873 }
1874 }
1875
1876 FOR_EACH_BB (bb)
1877 {
1878 edge e;
1879 edge_iterator ei;
1880
1881 /* If the current block is the destination of an abnormal edge, we
1882 kill all trapping (for PRE) and memory (for hoist) expressions
1883 because we won't be able to properly place the instruction on
1884 the edge. So make them neither anticipatable nor transparent.
1885 This is fairly conservative.
1886
1887 ??? For hoisting it may be necessary to check for set-and-jump
1888 instructions here, not just for abnormal edges. The general problem
1889 is that when an expression cannot not be placed right at the end of
1890 a basic block we should account for any side-effects of a subsequent
1891 jump instructions that could clobber the expression. It would
1892 be best to implement this check along the lines of
1893 should_hoist_expr_to_dom where the target block is already known
1894 and, hence, there's no need to conservatively prune expressions on
1895 "intermediate" set-and-jump instructions. */
1896 FOR_EACH_EDGE (e, ei, bb->preds)
1897 if ((e->flags & EDGE_ABNORMAL)
1898 && (pre_p || CALL_P (BB_END (e->src))))
1899 {
1900 bitmap_and_compl (antloc[bb->index],
1901 antloc[bb->index], prune_exprs);
1902 bitmap_and_compl (transp[bb->index],
1903 transp[bb->index], prune_exprs);
1904 break;
1905 }
1906 }
1907
1908 sbitmap_free (prune_exprs);
1909 }
1910
1911 /* It may be necessary to insert a large number of insns on edges to
1912 make the existing occurrences of expressions fully redundant. This
1913 routine examines the set of insertions and deletions and if the ratio
1914 of insertions to deletions is too high for a particular expression, then
1915 the expression is removed from the insertion/deletion sets.
1916
1917 N_ELEMS is the number of elements in the hash table. */
1918
1919 static void
1920 prune_insertions_deletions (int n_elems)
1921 {
1922 sbitmap_iterator sbi;
1923 sbitmap prune_exprs;
1924
1925 /* We always use I to iterate over blocks/edges and J to iterate over
1926 expressions. */
1927 unsigned int i, j;
1928
1929 /* Counts for the number of times an expression needs to be inserted and
1930 number of times an expression can be removed as a result. */
1931 int *insertions = GCNEWVEC (int, n_elems);
1932 int *deletions = GCNEWVEC (int, n_elems);
1933
1934 /* Set of expressions which require too many insertions relative to
1935 the number of deletions achieved. We will prune these out of the
1936 insertion/deletion sets. */
1937 prune_exprs = sbitmap_alloc (n_elems);
1938 bitmap_clear (prune_exprs);
1939
1940 /* Iterate over the edges counting the number of times each expression
1941 needs to be inserted. */
1942 for (i = 0; i < (unsigned) n_edges; i++)
1943 {
1944 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1945 insertions[j]++;
1946 }
1947
1948 /* Similarly for deletions, but those occur in blocks rather than on
1949 edges. */
1950 for (i = 0; i < (unsigned) last_basic_block; i++)
1951 {
1952 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1953 deletions[j]++;
1954 }
1955
1956 /* Now that we have accurate counts, iterate over the elements in the
1957 hash table and see if any need too many insertions relative to the
1958 number of evaluations that can be removed. If so, mark them in
1959 PRUNE_EXPRS. */
1960 for (j = 0; j < (unsigned) n_elems; j++)
1961 if (deletions[j]
1962 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1963 bitmap_set_bit (prune_exprs, j);
1964
1965 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1966 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1967 {
1968 for (i = 0; i < (unsigned) n_edges; i++)
1969 bitmap_clear_bit (pre_insert_map[i], j);
1970
1971 for (i = 0; i < (unsigned) last_basic_block; i++)
1972 bitmap_clear_bit (pre_delete_map[i], j);
1973 }
1974
1975 sbitmap_free (prune_exprs);
1976 free (insertions);
1977 free (deletions);
1978 }
1979
1980 /* Top level routine to do the dataflow analysis needed by PRE. */
1981
1982 static struct edge_list *
1983 compute_pre_data (void)
1984 {
1985 struct edge_list *edge_list;
1986 basic_block bb;
1987
1988 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1989 prune_expressions (true);
1990 bitmap_vector_clear (ae_kill, last_basic_block);
1991
1992 /* Compute ae_kill for each basic block using:
1993
1994 ~(TRANSP | COMP)
1995 */
1996
1997 FOR_EACH_BB (bb)
1998 {
1999 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
2000 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
2001 }
2002
2003 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
2004 ae_kill, &pre_insert_map, &pre_delete_map);
2005 sbitmap_vector_free (antloc);
2006 antloc = NULL;
2007 sbitmap_vector_free (ae_kill);
2008 ae_kill = NULL;
2009
2010 prune_insertions_deletions (expr_hash_table.n_elems);
2011
2012 return edge_list;
2013 }
2014 \f
2015 /* PRE utilities */
2016
2017 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
2018 block BB.
2019
2020 VISITED is a pointer to a working buffer for tracking which BB's have
2021 been visited. It is NULL for the top-level call.
2022
2023 We treat reaching expressions that go through blocks containing the same
2024 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
2025 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2026 2 as not reaching. The intent is to improve the probability of finding
2027 only one reaching expression and to reduce register lifetimes by picking
2028 the closest such expression. */
2029
2030 static int
2031 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
2032 basic_block bb, char *visited)
2033 {
2034 edge pred;
2035 edge_iterator ei;
2036
2037 FOR_EACH_EDGE (pred, ei, bb->preds)
2038 {
2039 basic_block pred_bb = pred->src;
2040
2041 if (pred->src == ENTRY_BLOCK_PTR
2042 /* Has predecessor has already been visited? */
2043 || visited[pred_bb->index])
2044 ;/* Nothing to do. */
2045
2046 /* Does this predecessor generate this expression? */
2047 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
2048 {
2049 /* Is this the occurrence we're looking for?
2050 Note that there's only one generating occurrence per block
2051 so we just need to check the block number. */
2052 if (occr_bb == pred_bb)
2053 return 1;
2054
2055 visited[pred_bb->index] = 1;
2056 }
2057 /* Ignore this predecessor if it kills the expression. */
2058 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2059 visited[pred_bb->index] = 1;
2060
2061 /* Neither gen nor kill. */
2062 else
2063 {
2064 visited[pred_bb->index] = 1;
2065 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2066 return 1;
2067 }
2068 }
2069
2070 /* All paths have been checked. */
2071 return 0;
2072 }
2073
2074 /* The wrapper for pre_expr_reaches_here_work that ensures that any
2075 memory allocated for that function is returned. */
2076
2077 static int
2078 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2079 {
2080 int rval;
2081 char *visited = XCNEWVEC (char, last_basic_block);
2082
2083 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2084
2085 free (visited);
2086 return rval;
2087 }
2088 \f
2089 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2090
2091 static rtx
2092 process_insert_insn (struct expr *expr)
2093 {
2094 rtx reg = expr->reaching_reg;
2095 /* Copy the expression to make sure we don't have any sharing issues. */
2096 rtx exp = copy_rtx (expr->expr);
2097 rtx pat;
2098
2099 start_sequence ();
2100
2101 /* If the expression is something that's an operand, like a constant,
2102 just copy it to a register. */
2103 if (general_operand (exp, GET_MODE (reg)))
2104 emit_move_insn (reg, exp);
2105
2106 /* Otherwise, make a new insn to compute this expression and make sure the
2107 insn will be recognized (this also adds any needed CLOBBERs). */
2108 else
2109 {
2110 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2111
2112 if (insn_invalid_p (insn, false))
2113 gcc_unreachable ();
2114 }
2115
2116 pat = get_insns ();
2117 end_sequence ();
2118
2119 return pat;
2120 }
2121
2122 /* Add EXPR to the end of basic block BB.
2123
2124 This is used by both the PRE and code hoisting. */
2125
2126 static void
2127 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2128 {
2129 rtx insn = BB_END (bb);
2130 rtx new_insn;
2131 rtx reg = expr->reaching_reg;
2132 int regno = REGNO (reg);
2133 rtx pat, pat_end;
2134
2135 pat = process_insert_insn (expr);
2136 gcc_assert (pat && INSN_P (pat));
2137
2138 pat_end = pat;
2139 while (NEXT_INSN (pat_end) != NULL_RTX)
2140 pat_end = NEXT_INSN (pat_end);
2141
2142 /* If the last insn is a jump, insert EXPR in front [taking care to
2143 handle cc0, etc. properly]. Similarly we need to care trapping
2144 instructions in presence of non-call exceptions. */
2145
2146 if (JUMP_P (insn)
2147 || (NONJUMP_INSN_P (insn)
2148 && (!single_succ_p (bb)
2149 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2150 {
2151 #ifdef HAVE_cc0
2152 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2153 if cc0 isn't set. */
2154 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2155 if (note)
2156 insn = XEXP (note, 0);
2157 else
2158 {
2159 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
2160 if (maybe_cc0_setter
2161 && INSN_P (maybe_cc0_setter)
2162 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2163 insn = maybe_cc0_setter;
2164 }
2165 #endif
2166 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2167 new_insn = emit_insn_before_noloc (pat, insn, bb);
2168 }
2169
2170 /* Likewise if the last insn is a call, as will happen in the presence
2171 of exception handling. */
2172 else if (CALL_P (insn)
2173 && (!single_succ_p (bb)
2174 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2175 {
2176 /* Keeping in mind targets with small register classes and parameters
2177 in registers, we search backward and place the instructions before
2178 the first parameter is loaded. Do this for everyone for consistency
2179 and a presumption that we'll get better code elsewhere as well. */
2180
2181 /* Since different machines initialize their parameter registers
2182 in different orders, assume nothing. Collect the set of all
2183 parameter registers. */
2184 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2185
2186 /* If we found all the parameter loads, then we want to insert
2187 before the first parameter load.
2188
2189 If we did not find all the parameter loads, then we might have
2190 stopped on the head of the block, which could be a CODE_LABEL.
2191 If we inserted before the CODE_LABEL, then we would be putting
2192 the insn in the wrong basic block. In that case, put the insn
2193 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2194 while (LABEL_P (insn)
2195 || NOTE_INSN_BASIC_BLOCK_P (insn))
2196 insn = NEXT_INSN (insn);
2197
2198 new_insn = emit_insn_before_noloc (pat, insn, bb);
2199 }
2200 else
2201 new_insn = emit_insn_after_noloc (pat, insn, bb);
2202
2203 while (1)
2204 {
2205 if (INSN_P (pat))
2206 add_label_notes (PATTERN (pat), new_insn);
2207 if (pat == pat_end)
2208 break;
2209 pat = NEXT_INSN (pat);
2210 }
2211
2212 gcse_create_count++;
2213
2214 if (dump_file)
2215 {
2216 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2217 bb->index, INSN_UID (new_insn));
2218 fprintf (dump_file, "copying expression %d to reg %d\n",
2219 expr->bitmap_index, regno);
2220 }
2221 }
2222
2223 /* Insert partially redundant expressions on edges in the CFG to make
2224 the expressions fully redundant. */
2225
2226 static int
2227 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2228 {
2229 int e, i, j, num_edges, set_size, did_insert = 0;
2230 sbitmap *inserted;
2231
2232 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2233 if it reaches any of the deleted expressions. */
2234
2235 set_size = pre_insert_map[0]->size;
2236 num_edges = NUM_EDGES (edge_list);
2237 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2238 bitmap_vector_clear (inserted, num_edges);
2239
2240 for (e = 0; e < num_edges; e++)
2241 {
2242 int indx;
2243 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2244
2245 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2246 {
2247 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2248
2249 for (j = indx;
2250 insert && j < (int) expr_hash_table.n_elems;
2251 j++, insert >>= 1)
2252 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2253 {
2254 struct expr *expr = index_map[j];
2255 struct occr *occr;
2256
2257 /* Now look at each deleted occurrence of this expression. */
2258 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2259 {
2260 if (! occr->deleted_p)
2261 continue;
2262
2263 /* Insert this expression on this edge if it would
2264 reach the deleted occurrence in BB. */
2265 if (!bitmap_bit_p (inserted[e], j))
2266 {
2267 rtx insn;
2268 edge eg = INDEX_EDGE (edge_list, e);
2269
2270 /* We can't insert anything on an abnormal and
2271 critical edge, so we insert the insn at the end of
2272 the previous block. There are several alternatives
2273 detailed in Morgans book P277 (sec 10.5) for
2274 handling this situation. This one is easiest for
2275 now. */
2276
2277 if (eg->flags & EDGE_ABNORMAL)
2278 insert_insn_end_basic_block (index_map[j], bb);
2279 else
2280 {
2281 insn = process_insert_insn (index_map[j]);
2282 insert_insn_on_edge (insn, eg);
2283 }
2284
2285 if (dump_file)
2286 {
2287 fprintf (dump_file, "PRE: edge (%d,%d), ",
2288 bb->index,
2289 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2290 fprintf (dump_file, "copy expression %d\n",
2291 expr->bitmap_index);
2292 }
2293
2294 update_ld_motion_stores (expr);
2295 bitmap_set_bit (inserted[e], j);
2296 did_insert = 1;
2297 gcse_create_count++;
2298 }
2299 }
2300 }
2301 }
2302 }
2303
2304 sbitmap_vector_free (inserted);
2305 return did_insert;
2306 }
2307
2308 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2309 Given "old_reg <- expr" (INSN), instead of adding after it
2310 reaching_reg <- old_reg
2311 it's better to do the following:
2312 reaching_reg <- expr
2313 old_reg <- reaching_reg
2314 because this way copy propagation can discover additional PRE
2315 opportunities. But if this fails, we try the old way.
2316 When "expr" is a store, i.e.
2317 given "MEM <- old_reg", instead of adding after it
2318 reaching_reg <- old_reg
2319 it's better to add it before as follows:
2320 reaching_reg <- old_reg
2321 MEM <- reaching_reg. */
2322
2323 static void
2324 pre_insert_copy_insn (struct expr *expr, rtx insn)
2325 {
2326 rtx reg = expr->reaching_reg;
2327 int regno = REGNO (reg);
2328 int indx = expr->bitmap_index;
2329 rtx pat = PATTERN (insn);
2330 rtx set, first_set, new_insn;
2331 rtx old_reg;
2332 int i;
2333
2334 /* This block matches the logic in hash_scan_insn. */
2335 switch (GET_CODE (pat))
2336 {
2337 case SET:
2338 set = pat;
2339 break;
2340
2341 case PARALLEL:
2342 /* Search through the parallel looking for the set whose
2343 source was the expression that we're interested in. */
2344 first_set = NULL_RTX;
2345 set = NULL_RTX;
2346 for (i = 0; i < XVECLEN (pat, 0); i++)
2347 {
2348 rtx x = XVECEXP (pat, 0, i);
2349 if (GET_CODE (x) == SET)
2350 {
2351 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2352 may not find an equivalent expression, but in this
2353 case the PARALLEL will have a single set. */
2354 if (first_set == NULL_RTX)
2355 first_set = x;
2356 if (expr_equiv_p (SET_SRC (x), expr->expr))
2357 {
2358 set = x;
2359 break;
2360 }
2361 }
2362 }
2363
2364 gcc_assert (first_set);
2365 if (set == NULL_RTX)
2366 set = first_set;
2367 break;
2368
2369 default:
2370 gcc_unreachable ();
2371 }
2372
2373 if (REG_P (SET_DEST (set)))
2374 {
2375 old_reg = SET_DEST (set);
2376 /* Check if we can modify the set destination in the original insn. */
2377 if (validate_change (insn, &SET_DEST (set), reg, 0))
2378 {
2379 new_insn = gen_move_insn (old_reg, reg);
2380 new_insn = emit_insn_after (new_insn, insn);
2381 }
2382 else
2383 {
2384 new_insn = gen_move_insn (reg, old_reg);
2385 new_insn = emit_insn_after (new_insn, insn);
2386 }
2387 }
2388 else /* This is possible only in case of a store to memory. */
2389 {
2390 old_reg = SET_SRC (set);
2391 new_insn = gen_move_insn (reg, old_reg);
2392
2393 /* Check if we can modify the set source in the original insn. */
2394 if (validate_change (insn, &SET_SRC (set), reg, 0))
2395 new_insn = emit_insn_before (new_insn, insn);
2396 else
2397 new_insn = emit_insn_after (new_insn, insn);
2398 }
2399
2400 gcse_create_count++;
2401
2402 if (dump_file)
2403 fprintf (dump_file,
2404 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2405 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2406 INSN_UID (insn), regno);
2407 }
2408
2409 /* Copy available expressions that reach the redundant expression
2410 to `reaching_reg'. */
2411
2412 static void
2413 pre_insert_copies (void)
2414 {
2415 unsigned int i, added_copy;
2416 struct expr *expr;
2417 struct occr *occr;
2418 struct occr *avail;
2419
2420 /* For each available expression in the table, copy the result to
2421 `reaching_reg' if the expression reaches a deleted one.
2422
2423 ??? The current algorithm is rather brute force.
2424 Need to do some profiling. */
2425
2426 for (i = 0; i < expr_hash_table.size; i++)
2427 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2428 {
2429 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2430 we don't want to insert a copy here because the expression may not
2431 really be redundant. So only insert an insn if the expression was
2432 deleted. This test also avoids further processing if the
2433 expression wasn't deleted anywhere. */
2434 if (expr->reaching_reg == NULL)
2435 continue;
2436
2437 /* Set when we add a copy for that expression. */
2438 added_copy = 0;
2439
2440 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2441 {
2442 if (! occr->deleted_p)
2443 continue;
2444
2445 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2446 {
2447 rtx insn = avail->insn;
2448
2449 /* No need to handle this one if handled already. */
2450 if (avail->copied_p)
2451 continue;
2452
2453 /* Don't handle this one if it's a redundant one. */
2454 if (INSN_DELETED_P (insn))
2455 continue;
2456
2457 /* Or if the expression doesn't reach the deleted one. */
2458 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2459 expr,
2460 BLOCK_FOR_INSN (occr->insn)))
2461 continue;
2462
2463 added_copy = 1;
2464
2465 /* Copy the result of avail to reaching_reg. */
2466 pre_insert_copy_insn (expr, insn);
2467 avail->copied_p = 1;
2468 }
2469 }
2470
2471 if (added_copy)
2472 update_ld_motion_stores (expr);
2473 }
2474 }
2475
2476 /* Emit move from SRC to DEST noting the equivalence with expression computed
2477 in INSN. */
2478
2479 static rtx
2480 gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2481 {
2482 rtx new_rtx;
2483 rtx set = single_set (insn), set2;
2484 rtx note;
2485 rtx eqv = NULL_RTX;
2486
2487 /* This should never fail since we're creating a reg->reg copy
2488 we've verified to be valid. */
2489
2490 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2491
2492 /* Note the equivalence for local CSE pass. Take the note from the old
2493 set if there was one. Otherwise record the SET_SRC from the old set
2494 unless DEST is also an operand of the SET_SRC. */
2495 set2 = single_set (new_rtx);
2496 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2497 return new_rtx;
2498 if ((note = find_reg_equal_equiv_note (insn)))
2499 eqv = XEXP (note, 0);
2500 else if (! REG_P (dest)
2501 || ! reg_mentioned_p (dest, SET_SRC (set)))
2502 eqv = SET_SRC (set);
2503
2504 if (eqv != NULL_RTX)
2505 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2506
2507 return new_rtx;
2508 }
2509
2510 /* Delete redundant computations.
2511 Deletion is done by changing the insn to copy the `reaching_reg' of
2512 the expression into the result of the SET. It is left to later passes
2513 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
2514
2515 Return nonzero if a change is made. */
2516
2517 static int
2518 pre_delete (void)
2519 {
2520 unsigned int i;
2521 int changed;
2522 struct expr *expr;
2523 struct occr *occr;
2524
2525 changed = 0;
2526 for (i = 0; i < expr_hash_table.size; i++)
2527 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2528 {
2529 int indx = expr->bitmap_index;
2530
2531 /* We only need to search antic_occr since we require ANTLOC != 0. */
2532 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2533 {
2534 rtx insn = occr->insn;
2535 rtx set;
2536 basic_block bb = BLOCK_FOR_INSN (insn);
2537
2538 /* We only delete insns that have a single_set. */
2539 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2540 && (set = single_set (insn)) != 0
2541 && dbg_cnt (pre_insn))
2542 {
2543 /* Create a pseudo-reg to store the result of reaching
2544 expressions into. Get the mode for the new pseudo from
2545 the mode of the original destination pseudo. */
2546 if (expr->reaching_reg == NULL)
2547 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2548
2549 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2550 delete_insn (insn);
2551 occr->deleted_p = 1;
2552 changed = 1;
2553 gcse_subst_count++;
2554
2555 if (dump_file)
2556 {
2557 fprintf (dump_file,
2558 "PRE: redundant insn %d (expression %d) in ",
2559 INSN_UID (insn), indx);
2560 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2561 bb->index, REGNO (expr->reaching_reg));
2562 }
2563 }
2564 }
2565 }
2566
2567 return changed;
2568 }
2569
2570 /* Perform GCSE optimizations using PRE.
2571 This is called by one_pre_gcse_pass after all the dataflow analysis
2572 has been done.
2573
2574 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2575 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2576 Compiler Design and Implementation.
2577
2578 ??? A new pseudo reg is created to hold the reaching expression. The nice
2579 thing about the classical approach is that it would try to use an existing
2580 reg. If the register can't be adequately optimized [i.e. we introduce
2581 reload problems], one could add a pass here to propagate the new register
2582 through the block.
2583
2584 ??? We don't handle single sets in PARALLELs because we're [currently] not
2585 able to copy the rest of the parallel when we insert copies to create full
2586 redundancies from partial redundancies. However, there's no reason why we
2587 can't handle PARALLELs in the cases where there are no partial
2588 redundancies. */
2589
2590 static int
2591 pre_gcse (struct edge_list *edge_list)
2592 {
2593 unsigned int i;
2594 int did_insert, changed;
2595 struct expr **index_map;
2596 struct expr *expr;
2597
2598 /* Compute a mapping from expression number (`bitmap_index') to
2599 hash table entry. */
2600
2601 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2602 for (i = 0; i < expr_hash_table.size; i++)
2603 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2604 index_map[expr->bitmap_index] = expr;
2605
2606 /* Delete the redundant insns first so that
2607 - we know what register to use for the new insns and for the other
2608 ones with reaching expressions
2609 - we know which insns are redundant when we go to create copies */
2610
2611 changed = pre_delete ();
2612 did_insert = pre_edge_insert (edge_list, index_map);
2613
2614 /* In other places with reaching expressions, copy the expression to the
2615 specially allocated pseudo-reg that reaches the redundant expr. */
2616 pre_insert_copies ();
2617 if (did_insert)
2618 {
2619 commit_edge_insertions ();
2620 changed = 1;
2621 }
2622
2623 free (index_map);
2624 return changed;
2625 }
2626
2627 /* Top level routine to perform one PRE GCSE pass.
2628
2629 Return nonzero if a change was made. */
2630
2631 static int
2632 one_pre_gcse_pass (void)
2633 {
2634 int changed = 0;
2635
2636 gcse_subst_count = 0;
2637 gcse_create_count = 0;
2638
2639 /* Return if there's nothing to do, or it is too expensive. */
2640 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
2641 || is_too_expensive (_("PRE disabled")))
2642 return 0;
2643
2644 /* We need alias. */
2645 init_alias_analysis ();
2646
2647 bytes_used = 0;
2648 gcc_obstack_init (&gcse_obstack);
2649 alloc_gcse_mem ();
2650
2651 alloc_hash_table (&expr_hash_table);
2652 add_noreturn_fake_exit_edges ();
2653 if (flag_gcse_lm)
2654 compute_ld_motion_mems ();
2655
2656 compute_hash_table (&expr_hash_table);
2657 if (flag_gcse_lm)
2658 trim_ld_motion_mems ();
2659 if (dump_file)
2660 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2661
2662 if (expr_hash_table.n_elems > 0)
2663 {
2664 struct edge_list *edge_list;
2665 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
2666 edge_list = compute_pre_data ();
2667 changed |= pre_gcse (edge_list);
2668 free_edge_list (edge_list);
2669 free_pre_mem ();
2670 }
2671
2672 if (flag_gcse_lm)
2673 free_ld_motion_mems ();
2674 remove_fake_exit_edges ();
2675 free_hash_table (&expr_hash_table);
2676
2677 free_gcse_mem ();
2678 obstack_free (&gcse_obstack, NULL);
2679
2680 /* We are finished with alias. */
2681 end_alias_analysis ();
2682
2683 if (dump_file)
2684 {
2685 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2686 current_function_name (), n_basic_blocks, bytes_used);
2687 fprintf (dump_file, "%d substs, %d insns created\n",
2688 gcse_subst_count, gcse_create_count);
2689 }
2690
2691 return changed;
2692 }
2693 \f
2694 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2695 to INSN. If such notes are added to an insn which references a
2696 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2697 that note, because the following loop optimization pass requires
2698 them. */
2699
2700 /* ??? If there was a jump optimization pass after gcse and before loop,
2701 then we would not need to do this here, because jump would add the
2702 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2703
2704 static void
2705 add_label_notes (rtx x, rtx insn)
2706 {
2707 enum rtx_code code = GET_CODE (x);
2708 int i, j;
2709 const char *fmt;
2710
2711 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2712 {
2713 /* This code used to ignore labels that referred to dispatch tables to
2714 avoid flow generating (slightly) worse code.
2715
2716 We no longer ignore such label references (see LABEL_REF handling in
2717 mark_jump_label for additional information). */
2718
2719 /* There's no reason for current users to emit jump-insns with
2720 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2721 notes. */
2722 gcc_assert (!JUMP_P (insn));
2723 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
2724
2725 if (LABEL_P (XEXP (x, 0)))
2726 LABEL_NUSES (XEXP (x, 0))++;
2727
2728 return;
2729 }
2730
2731 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2732 {
2733 if (fmt[i] == 'e')
2734 add_label_notes (XEXP (x, i), insn);
2735 else if (fmt[i] == 'E')
2736 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2737 add_label_notes (XVECEXP (x, i, j), insn);
2738 }
2739 }
2740
2741 /* Code Hoisting variables and subroutines. */
2742
2743 /* Very busy expressions. */
2744 static sbitmap *hoist_vbein;
2745 static sbitmap *hoist_vbeout;
2746
2747 /* ??? We could compute post dominators and run this algorithm in
2748 reverse to perform tail merging, doing so would probably be
2749 more effective than the tail merging code in jump.c.
2750
2751 It's unclear if tail merging could be run in parallel with
2752 code hoisting. It would be nice. */
2753
2754 /* Allocate vars used for code hoisting analysis. */
2755
2756 static void
2757 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2758 {
2759 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2760 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2761 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2762
2763 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2764 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2765 }
2766
2767 /* Free vars used for code hoisting analysis. */
2768
2769 static void
2770 free_code_hoist_mem (void)
2771 {
2772 sbitmap_vector_free (antloc);
2773 sbitmap_vector_free (transp);
2774 sbitmap_vector_free (comp);
2775
2776 sbitmap_vector_free (hoist_vbein);
2777 sbitmap_vector_free (hoist_vbeout);
2778
2779 free_dominance_info (CDI_DOMINATORS);
2780 }
2781
2782 /* Compute the very busy expressions at entry/exit from each block.
2783
2784 An expression is very busy if all paths from a given point
2785 compute the expression. */
2786
2787 static void
2788 compute_code_hoist_vbeinout (void)
2789 {
2790 int changed, passes;
2791 basic_block bb;
2792
2793 bitmap_vector_clear (hoist_vbeout, last_basic_block);
2794 bitmap_vector_clear (hoist_vbein, last_basic_block);
2795
2796 passes = 0;
2797 changed = 1;
2798
2799 while (changed)
2800 {
2801 changed = 0;
2802
2803 /* We scan the blocks in the reverse order to speed up
2804 the convergence. */
2805 FOR_EACH_BB_REVERSE (bb)
2806 {
2807 if (bb->next_bb != EXIT_BLOCK_PTR)
2808 {
2809 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2810 hoist_vbein, bb);
2811
2812 /* Include expressions in VBEout that are calculated
2813 in BB and available at its end. */
2814 bitmap_ior (hoist_vbeout[bb->index],
2815 hoist_vbeout[bb->index], comp[bb->index]);
2816 }
2817
2818 changed |= bitmap_or_and (hoist_vbein[bb->index],
2819 antloc[bb->index],
2820 hoist_vbeout[bb->index],
2821 transp[bb->index]);
2822 }
2823
2824 passes++;
2825 }
2826
2827 if (dump_file)
2828 {
2829 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2830
2831 FOR_EACH_BB (bb)
2832 {
2833 fprintf (dump_file, "vbein (%d): ", bb->index);
2834 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2835 fprintf (dump_file, "vbeout(%d): ", bb->index);
2836 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2837 }
2838 }
2839 }
2840
2841 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2842
2843 static void
2844 compute_code_hoist_data (void)
2845 {
2846 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2847 prune_expressions (false);
2848 compute_code_hoist_vbeinout ();
2849 calculate_dominance_info (CDI_DOMINATORS);
2850 if (dump_file)
2851 fprintf (dump_file, "\n");
2852 }
2853
2854 /* Update register pressure for BB when hoisting an expression from
2855 instruction FROM, if live ranges of inputs are shrunk. Also
2856 maintain live_in information if live range of register referred
2857 in FROM is shrunk.
2858
2859 Return 0 if register pressure doesn't change, otherwise return
2860 the number by which register pressure is decreased.
2861
2862 NOTE: Register pressure won't be increased in this function. */
2863
2864 static int
2865 update_bb_reg_pressure (basic_block bb, rtx from)
2866 {
2867 rtx dreg, insn;
2868 basic_block succ_bb;
2869 df_ref *op, op_ref;
2870 edge succ;
2871 edge_iterator ei;
2872 int decreased_pressure = 0;
2873 int nregs;
2874 enum reg_class pressure_class;
2875
2876 for (op = DF_INSN_USES (from); *op; op++)
2877 {
2878 dreg = DF_REF_REAL_REG (*op);
2879 /* The live range of register is shrunk only if it isn't:
2880 1. referred on any path from the end of this block to EXIT, or
2881 2. referred by insns other than FROM in this block. */
2882 FOR_EACH_EDGE (succ, ei, bb->succs)
2883 {
2884 succ_bb = succ->dest;
2885 if (succ_bb == EXIT_BLOCK_PTR)
2886 continue;
2887
2888 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2889 break;
2890 }
2891 if (succ != NULL)
2892 continue;
2893
2894 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2895 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2896 {
2897 if (!DF_REF_INSN_INFO (op_ref))
2898 continue;
2899
2900 insn = DF_REF_INSN (op_ref);
2901 if (BLOCK_FOR_INSN (insn) == bb
2902 && NONDEBUG_INSN_P (insn) && insn != from)
2903 break;
2904 }
2905
2906 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2907 /* Decrease register pressure and update live_in information for
2908 this block. */
2909 if (!op_ref && pressure_class != NO_REGS)
2910 {
2911 decreased_pressure += nregs;
2912 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2913 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2914 }
2915 }
2916 return decreased_pressure;
2917 }
2918
2919 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2920 flow graph, if it can reach BB unimpared. Stop the search if the
2921 expression would need to be moved more than DISTANCE instructions.
2922
2923 DISTANCE is the number of instructions through which EXPR can be
2924 hoisted up in flow graph.
2925
2926 BB_SIZE points to an array which contains the number of instructions
2927 for each basic block.
2928
2929 PRESSURE_CLASS and NREGS are register class and number of hard registers
2930 for storing EXPR.
2931
2932 HOISTED_BBS points to a bitmap indicating basic blocks through which
2933 EXPR is hoisted.
2934
2935 FROM is the instruction from which EXPR is hoisted.
2936
2937 It's unclear exactly what Muchnick meant by "unimpared". It seems
2938 to me that the expression must either be computed or transparent in
2939 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2940 would allow the expression to be hoisted out of loops, even if
2941 the expression wasn't a loop invariant.
2942
2943 Contrast this to reachability for PRE where an expression is
2944 considered reachable if *any* path reaches instead of *all*
2945 paths. */
2946
2947 static int
2948 should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
2949 basic_block bb, sbitmap visited, int distance,
2950 int *bb_size, enum reg_class pressure_class,
2951 int *nregs, bitmap hoisted_bbs, rtx from)
2952 {
2953 unsigned int i;
2954 edge pred;
2955 edge_iterator ei;
2956 sbitmap_iterator sbi;
2957 int visited_allocated_locally = 0;
2958 int decreased_pressure = 0;
2959
2960 if (flag_ira_hoist_pressure)
2961 {
2962 /* Record old information of basic block BB when it is visited
2963 at the first time. */
2964 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2965 {
2966 struct bb_data *data = BB_DATA (bb);
2967 bitmap_copy (data->backup, data->live_in);
2968 data->old_pressure = data->max_reg_pressure[pressure_class];
2969 }
2970 decreased_pressure = update_bb_reg_pressure (bb, from);
2971 }
2972 /* Terminate the search if distance, for which EXPR is allowed to move,
2973 is exhausted. */
2974 if (distance > 0)
2975 {
2976 if (flag_ira_hoist_pressure)
2977 {
2978 /* Prefer to hoist EXPR if register pressure is decreased. */
2979 if (decreased_pressure > *nregs)
2980 distance += bb_size[bb->index];
2981 /* Let EXPR be hoisted through basic block at no cost if one
2982 of following conditions is satisfied:
2983
2984 1. The basic block has low register pressure.
2985 2. Register pressure won't be increases after hoisting EXPR.
2986
2987 Constant expressions is handled conservatively, because
2988 hoisting constant expression aggressively results in worse
2989 code. This decision is made by the observation of CSiBE
2990 on ARM target, while it has no obvious effect on other
2991 targets like x86, x86_64, mips and powerpc. */
2992 else if (CONST_INT_P (expr->expr)
2993 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2994 >= ira_class_hard_regs_num[pressure_class]
2995 && decreased_pressure < *nregs))
2996 distance -= bb_size[bb->index];
2997 }
2998 else
2999 distance -= bb_size[bb->index];
3000
3001 if (distance <= 0)
3002 return 0;
3003 }
3004 else
3005 gcc_assert (distance == 0);
3006
3007 if (visited == NULL)
3008 {
3009 visited_allocated_locally = 1;
3010 visited = sbitmap_alloc (last_basic_block);
3011 bitmap_clear (visited);
3012 }
3013
3014 FOR_EACH_EDGE (pred, ei, bb->preds)
3015 {
3016 basic_block pred_bb = pred->src;
3017
3018 if (pred->src == ENTRY_BLOCK_PTR)
3019 break;
3020 else if (pred_bb == expr_bb)
3021 continue;
3022 else if (bitmap_bit_p (visited, pred_bb->index))
3023 continue;
3024 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
3025 break;
3026 /* Not killed. */
3027 else
3028 {
3029 bitmap_set_bit (visited, pred_bb->index);
3030 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
3031 visited, distance, bb_size,
3032 pressure_class, nregs,
3033 hoisted_bbs, from))
3034 break;
3035 }
3036 }
3037 if (visited_allocated_locally)
3038 {
3039 /* If EXPR can be hoisted to expr_bb, record basic blocks through
3040 which EXPR is hoisted in hoisted_bbs. */
3041 if (flag_ira_hoist_pressure && !pred)
3042 {
3043 /* Record the basic block from which EXPR is hoisted. */
3044 bitmap_set_bit (visited, bb->index);
3045 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3046 bitmap_set_bit (hoisted_bbs, i);
3047 }
3048 sbitmap_free (visited);
3049 }
3050
3051 return (pred == NULL);
3052 }
3053 \f
3054 /* Find occurrence in BB. */
3055
3056 static struct occr *
3057 find_occr_in_bb (struct occr *occr, basic_block bb)
3058 {
3059 /* Find the right occurrence of this expression. */
3060 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3061 occr = occr->next;
3062
3063 return occr;
3064 }
3065
3066 /* Actually perform code hoisting.
3067
3068 The code hoisting pass can hoist multiple computations of the same
3069 expression along dominated path to a dominating basic block, like
3070 from b2/b3 to b1 as depicted below:
3071
3072 b1 ------
3073 /\ |
3074 / \ |
3075 bx by distance
3076 / \ |
3077 / \ |
3078 b2 b3 ------
3079
3080 Unfortunately code hoisting generally extends the live range of an
3081 output pseudo register, which increases register pressure and hurts
3082 register allocation. To address this issue, an attribute MAX_DISTANCE
3083 is computed and attached to each expression. The attribute is computed
3084 from rtx cost of the corresponding expression and it's used to control
3085 how long the expression can be hoisted up in flow graph. As the
3086 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3087 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3088 register pressure if live ranges of inputs are shrunk.
3089
3090 Option "-fira-hoist-pressure" implements register pressure directed
3091 hoist based on upper method. The rationale is:
3092 1. Calculate register pressure for each basic block by reusing IRA
3093 facility.
3094 2. When expression is hoisted through one basic block, GCC checks
3095 the change of live ranges for inputs/output. The basic block's
3096 register pressure will be increased because of extended live
3097 range of output. However, register pressure will be decreased
3098 if the live ranges of inputs are shrunk.
3099 3. After knowing how hoisting affects register pressure, GCC prefers
3100 to hoist the expression if it can decrease register pressure, by
3101 increasing DISTANCE of the corresponding expression.
3102 4. If hoisting the expression increases register pressure, GCC checks
3103 register pressure of the basic block and decrease DISTANCE only if
3104 the register pressure is high. In other words, expression will be
3105 hoisted through at no cost if the basic block has low register
3106 pressure.
3107 5. Update register pressure information for basic blocks through
3108 which expression is hoisted. */
3109
3110 static int
3111 hoist_code (void)
3112 {
3113 basic_block bb, dominated;
3114 vec<basic_block> dom_tree_walk;
3115 unsigned int dom_tree_walk_index;
3116 vec<basic_block> domby;
3117 unsigned int i, j, k;
3118 struct expr **index_map;
3119 struct expr *expr;
3120 int *to_bb_head;
3121 int *bb_size;
3122 int changed = 0;
3123 struct bb_data *data;
3124 /* Basic blocks that have occurrences reachable from BB. */
3125 bitmap from_bbs;
3126 /* Basic blocks through which expr is hoisted. */
3127 bitmap hoisted_bbs = NULL;
3128 bitmap_iterator bi;
3129
3130 /* Compute a mapping from expression number (`bitmap_index') to
3131 hash table entry. */
3132
3133 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3134 for (i = 0; i < expr_hash_table.size; i++)
3135 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3136 index_map[expr->bitmap_index] = expr;
3137
3138 /* Calculate sizes of basic blocks and note how far
3139 each instruction is from the start of its block. We then use this
3140 data to restrict distance an expression can travel. */
3141
3142 to_bb_head = XCNEWVEC (int, get_max_uid ());
3143 bb_size = XCNEWVEC (int, last_basic_block);
3144
3145 FOR_EACH_BB (bb)
3146 {
3147 rtx insn;
3148 int to_head;
3149
3150 to_head = 0;
3151 FOR_BB_INSNS (bb, insn)
3152 {
3153 /* Don't count debug instructions to avoid them affecting
3154 decision choices. */
3155 if (NONDEBUG_INSN_P (insn))
3156 to_bb_head[INSN_UID (insn)] = to_head++;
3157 }
3158
3159 bb_size[bb->index] = to_head;
3160 }
3161
3162 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
3163 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
3164 == ENTRY_BLOCK_PTR->next_bb));
3165
3166 from_bbs = BITMAP_ALLOC (NULL);
3167 if (flag_ira_hoist_pressure)
3168 hoisted_bbs = BITMAP_ALLOC (NULL);
3169
3170 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3171 ENTRY_BLOCK_PTR->next_bb);
3172
3173 /* Walk over each basic block looking for potentially hoistable
3174 expressions, nothing gets hoisted from the entry block. */
3175 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3176 {
3177 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3178
3179 if (domby.length () == 0)
3180 continue;
3181
3182 /* Examine each expression that is very busy at the exit of this
3183 block. These are the potentially hoistable expressions. */
3184 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3185 {
3186 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3187 {
3188 int nregs = 0;
3189 enum reg_class pressure_class = NO_REGS;
3190 /* Current expression. */
3191 struct expr *expr = index_map[i];
3192 /* Number of occurrences of EXPR that can be hoisted to BB. */
3193 int hoistable = 0;
3194 /* Occurrences reachable from BB. */
3195 vec<occr_t> occrs_to_hoist = vNULL;
3196 /* We want to insert the expression into BB only once, so
3197 note when we've inserted it. */
3198 int insn_inserted_p;
3199 occr_t occr;
3200
3201 /* If an expression is computed in BB and is available at end of
3202 BB, hoist all occurrences dominated by BB to BB. */
3203 if (bitmap_bit_p (comp[bb->index], i))
3204 {
3205 occr = find_occr_in_bb (expr->antic_occr, bb);
3206
3207 if (occr)
3208 {
3209 /* An occurrence might've been already deleted
3210 while processing a dominator of BB. */
3211 if (!occr->deleted_p)
3212 {
3213 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3214 hoistable++;
3215 }
3216 }
3217 else
3218 hoistable++;
3219 }
3220
3221 /* We've found a potentially hoistable expression, now
3222 we look at every block BB dominates to see if it
3223 computes the expression. */
3224 FOR_EACH_VEC_ELT (domby, j, dominated)
3225 {
3226 int max_distance;
3227
3228 /* Ignore self dominance. */
3229 if (bb == dominated)
3230 continue;
3231 /* We've found a dominated block, now see if it computes
3232 the busy expression and whether or not moving that
3233 expression to the "beginning" of that block is safe. */
3234 if (!bitmap_bit_p (antloc[dominated->index], i))
3235 continue;
3236
3237 occr = find_occr_in_bb (expr->antic_occr, dominated);
3238 gcc_assert (occr);
3239
3240 /* An occurrence might've been already deleted
3241 while processing a dominator of BB. */
3242 if (occr->deleted_p)
3243 continue;
3244 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3245
3246 max_distance = expr->max_distance;
3247 if (max_distance > 0)
3248 /* Adjust MAX_DISTANCE to account for the fact that
3249 OCCR won't have to travel all of DOMINATED, but
3250 only part of it. */
3251 max_distance += (bb_size[dominated->index]
3252 - to_bb_head[INSN_UID (occr->insn)]);
3253
3254 pressure_class = get_pressure_class_and_nregs (occr->insn,
3255 &nregs);
3256
3257 /* Note if the expression should be hoisted from the dominated
3258 block to BB if it can reach DOMINATED unimpared.
3259
3260 Keep track of how many times this expression is hoistable
3261 from a dominated block into BB. */
3262 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3263 max_distance, bb_size,
3264 pressure_class, &nregs,
3265 hoisted_bbs, occr->insn))
3266 {
3267 hoistable++;
3268 occrs_to_hoist.safe_push (occr);
3269 bitmap_set_bit (from_bbs, dominated->index);
3270 }
3271 }
3272
3273 /* If we found more than one hoistable occurrence of this
3274 expression, then note it in the vector of expressions to
3275 hoist. It makes no sense to hoist things which are computed
3276 in only one BB, and doing so tends to pessimize register
3277 allocation. One could increase this value to try harder
3278 to avoid any possible code expansion due to register
3279 allocation issues; however experiments have shown that
3280 the vast majority of hoistable expressions are only movable
3281 from two successors, so raising this threshold is likely
3282 to nullify any benefit we get from code hoisting. */
3283 if (hoistable > 1 && dbg_cnt (hoist_insn))
3284 {
3285 /* If (hoistable != vec::length), then there is
3286 an occurrence of EXPR in BB itself. Don't waste
3287 time looking for LCA in this case. */
3288 if ((unsigned) hoistable == occrs_to_hoist.length ())
3289 {
3290 basic_block lca;
3291
3292 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3293 from_bbs);
3294 if (lca != bb)
3295 /* Punt, it's better to hoist these occurrences to
3296 LCA. */
3297 occrs_to_hoist.release ();
3298 }
3299 }
3300 else
3301 /* Punt, no point hoisting a single occurence. */
3302 occrs_to_hoist.release ();
3303
3304 if (flag_ira_hoist_pressure
3305 && !occrs_to_hoist.is_empty ())
3306 {
3307 /* Increase register pressure of basic blocks to which
3308 expr is hoisted because of extended live range of
3309 output. */
3310 data = BB_DATA (bb);
3311 data->max_reg_pressure[pressure_class] += nregs;
3312 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3313 {
3314 data = BB_DATA (BASIC_BLOCK (k));
3315 data->max_reg_pressure[pressure_class] += nregs;
3316 }
3317 }
3318 else if (flag_ira_hoist_pressure)
3319 {
3320 /* Restore register pressure and live_in info for basic
3321 blocks recorded in hoisted_bbs when expr will not be
3322 hoisted. */
3323 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3324 {
3325 data = BB_DATA (BASIC_BLOCK (k));
3326 bitmap_copy (data->live_in, data->backup);
3327 data->max_reg_pressure[pressure_class]
3328 = data->old_pressure;
3329 }
3330 }
3331
3332 if (flag_ira_hoist_pressure)
3333 bitmap_clear (hoisted_bbs);
3334
3335 insn_inserted_p = 0;
3336
3337 /* Walk through occurrences of I'th expressions we want
3338 to hoist to BB and make the transformations. */
3339 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3340 {
3341 rtx insn;
3342 rtx set;
3343
3344 gcc_assert (!occr->deleted_p);
3345
3346 insn = occr->insn;
3347 set = single_set (insn);
3348 gcc_assert (set);
3349
3350 /* Create a pseudo-reg to store the result of reaching
3351 expressions into. Get the mode for the new pseudo
3352 from the mode of the original destination pseudo.
3353
3354 It is important to use new pseudos whenever we
3355 emit a set. This will allow reload to use
3356 rematerialization for such registers. */
3357 if (!insn_inserted_p)
3358 expr->reaching_reg
3359 = gen_reg_rtx_and_attrs (SET_DEST (set));
3360
3361 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3362 insn);
3363 delete_insn (insn);
3364 occr->deleted_p = 1;
3365 changed = 1;
3366 gcse_subst_count++;
3367
3368 if (!insn_inserted_p)
3369 {
3370 insert_insn_end_basic_block (expr, bb);
3371 insn_inserted_p = 1;
3372 }
3373 }
3374
3375 occrs_to_hoist.release ();
3376 bitmap_clear (from_bbs);
3377 }
3378 }
3379 domby.release ();
3380 }
3381
3382 dom_tree_walk.release ();
3383 BITMAP_FREE (from_bbs);
3384 if (flag_ira_hoist_pressure)
3385 BITMAP_FREE (hoisted_bbs);
3386
3387 free (bb_size);
3388 free (to_bb_head);
3389 free (index_map);
3390
3391 return changed;
3392 }
3393
3394 /* Return pressure class and number of needed hard registers (through
3395 *NREGS) of register REGNO. */
3396 static enum reg_class
3397 get_regno_pressure_class (int regno, int *nregs)
3398 {
3399 if (regno >= FIRST_PSEUDO_REGISTER)
3400 {
3401 enum reg_class pressure_class;
3402
3403 pressure_class = reg_allocno_class (regno);
3404 pressure_class = ira_pressure_class_translate[pressure_class];
3405 *nregs
3406 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3407 return pressure_class;
3408 }
3409 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3410 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3411 {
3412 *nregs = 1;
3413 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3414 }
3415 else
3416 {
3417 *nregs = 0;
3418 return NO_REGS;
3419 }
3420 }
3421
3422 /* Return pressure class and number of hard registers (through *NREGS)
3423 for destination of INSN. */
3424 static enum reg_class
3425 get_pressure_class_and_nregs (rtx insn, int *nregs)
3426 {
3427 rtx reg;
3428 enum reg_class pressure_class;
3429 rtx set = single_set (insn);
3430
3431 /* Considered invariant insns have only one set. */
3432 gcc_assert (set != NULL_RTX);
3433 reg = SET_DEST (set);
3434 if (GET_CODE (reg) == SUBREG)
3435 reg = SUBREG_REG (reg);
3436 if (MEM_P (reg))
3437 {
3438 *nregs = 0;
3439 pressure_class = NO_REGS;
3440 }
3441 else
3442 {
3443 gcc_assert (REG_P (reg));
3444 pressure_class = reg_allocno_class (REGNO (reg));
3445 pressure_class = ira_pressure_class_translate[pressure_class];
3446 *nregs
3447 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3448 }
3449 return pressure_class;
3450 }
3451
3452 /* Increase (if INCR_P) or decrease current register pressure for
3453 register REGNO. */
3454 static void
3455 change_pressure (int regno, bool incr_p)
3456 {
3457 int nregs;
3458 enum reg_class pressure_class;
3459
3460 pressure_class = get_regno_pressure_class (regno, &nregs);
3461 if (! incr_p)
3462 curr_reg_pressure[pressure_class] -= nregs;
3463 else
3464 {
3465 curr_reg_pressure[pressure_class] += nregs;
3466 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3467 < curr_reg_pressure[pressure_class])
3468 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3469 = curr_reg_pressure[pressure_class];
3470 }
3471 }
3472
3473 /* Calculate register pressure for each basic block by walking insns
3474 from last to first. */
3475 static void
3476 calculate_bb_reg_pressure (void)
3477 {
3478 int i;
3479 unsigned int j;
3480 rtx insn;
3481 basic_block bb;
3482 bitmap curr_regs_live;
3483 bitmap_iterator bi;
3484
3485
3486 ira_setup_eliminable_regset (false);
3487 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3488 FOR_EACH_BB (bb)
3489 {
3490 curr_bb = bb;
3491 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3492 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3493 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3494 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3495 for (i = 0; i < ira_pressure_classes_num; i++)
3496 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3497 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3498 change_pressure (j, true);
3499
3500 FOR_BB_INSNS_REVERSE (bb, insn)
3501 {
3502 rtx dreg;
3503 int regno;
3504 df_ref *def_rec, *use_rec;
3505
3506 if (! NONDEBUG_INSN_P (insn))
3507 continue;
3508
3509 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
3510 {
3511 dreg = DF_REF_REAL_REG (*def_rec);
3512 gcc_assert (REG_P (dreg));
3513 regno = REGNO (dreg);
3514 if (!(DF_REF_FLAGS (*def_rec)
3515 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3516 {
3517 if (bitmap_clear_bit (curr_regs_live, regno))
3518 change_pressure (regno, false);
3519 }
3520 }
3521
3522 for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
3523 {
3524 dreg = DF_REF_REAL_REG (*use_rec);
3525 gcc_assert (REG_P (dreg));
3526 regno = REGNO (dreg);
3527 if (bitmap_set_bit (curr_regs_live, regno))
3528 change_pressure (regno, true);
3529 }
3530 }
3531 }
3532 BITMAP_FREE (curr_regs_live);
3533
3534 if (dump_file == NULL)
3535 return;
3536
3537 fprintf (dump_file, "\nRegister Pressure: \n");
3538 FOR_EACH_BB (bb)
3539 {
3540 fprintf (dump_file, " Basic block %d: \n", bb->index);
3541 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3542 {
3543 enum reg_class pressure_class;
3544
3545 pressure_class = ira_pressure_classes[i];
3546 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3547 continue;
3548
3549 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3550 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3551 }
3552 }
3553 fprintf (dump_file, "\n");
3554 }
3555
3556 /* Top level routine to perform one code hoisting (aka unification) pass
3557
3558 Return nonzero if a change was made. */
3559
3560 static int
3561 one_code_hoisting_pass (void)
3562 {
3563 int changed = 0;
3564
3565 gcse_subst_count = 0;
3566 gcse_create_count = 0;
3567
3568 /* Return if there's nothing to do, or it is too expensive. */
3569 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3570 || is_too_expensive (_("GCSE disabled")))
3571 return 0;
3572
3573 doing_code_hoisting_p = true;
3574
3575 /* Calculate register pressure for each basic block. */
3576 if (flag_ira_hoist_pressure)
3577 {
3578 regstat_init_n_sets_and_refs ();
3579 ira_set_pseudo_classes (false, dump_file);
3580 alloc_aux_for_blocks (sizeof (struct bb_data));
3581 calculate_bb_reg_pressure ();
3582 regstat_free_n_sets_and_refs ();
3583 }
3584
3585 /* We need alias. */
3586 init_alias_analysis ();
3587
3588 bytes_used = 0;
3589 gcc_obstack_init (&gcse_obstack);
3590 alloc_gcse_mem ();
3591
3592 alloc_hash_table (&expr_hash_table);
3593 compute_hash_table (&expr_hash_table);
3594 if (dump_file)
3595 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3596
3597 if (expr_hash_table.n_elems > 0)
3598 {
3599 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
3600 compute_code_hoist_data ();
3601 changed = hoist_code ();
3602 free_code_hoist_mem ();
3603 }
3604
3605 if (flag_ira_hoist_pressure)
3606 {
3607 free_aux_for_blocks ();
3608 free_reg_info ();
3609 }
3610 free_hash_table (&expr_hash_table);
3611 free_gcse_mem ();
3612 obstack_free (&gcse_obstack, NULL);
3613
3614 /* We are finished with alias. */
3615 end_alias_analysis ();
3616
3617 if (dump_file)
3618 {
3619 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3620 current_function_name (), n_basic_blocks, bytes_used);
3621 fprintf (dump_file, "%d substs, %d insns created\n",
3622 gcse_subst_count, gcse_create_count);
3623 }
3624
3625 doing_code_hoisting_p = false;
3626
3627 return changed;
3628 }
3629 \f
3630 /* Here we provide the things required to do store motion towards the exit.
3631 In order for this to be effective, gcse also needed to be taught how to
3632 move a load when it is killed only by a store to itself.
3633
3634 int i;
3635 float a[10];
3636
3637 void foo(float scale)
3638 {
3639 for (i=0; i<10; i++)
3640 a[i] *= scale;
3641 }
3642
3643 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3644 the load out since its live around the loop, and stored at the bottom
3645 of the loop.
3646
3647 The 'Load Motion' referred to and implemented in this file is
3648 an enhancement to gcse which when using edge based LCM, recognizes
3649 this situation and allows gcse to move the load out of the loop.
3650
3651 Once gcse has hoisted the load, store motion can then push this
3652 load towards the exit, and we end up with no loads or stores of 'i'
3653 in the loop. */
3654
3655 static hashval_t
3656 pre_ldst_expr_hash (const void *p)
3657 {
3658 int do_not_record_p = 0;
3659 const struct ls_expr *const x = (const struct ls_expr *) p;
3660 return
3661 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
3662 }
3663
3664 static int
3665 pre_ldst_expr_eq (const void *p1, const void *p2)
3666 {
3667 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
3668 *const ptr2 = (const struct ls_expr *) p2;
3669 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
3670 }
3671
3672 /* This will search the ldst list for a matching expression. If it
3673 doesn't find one, we create one and initialize it. */
3674
3675 static struct ls_expr *
3676 ldst_entry (rtx x)
3677 {
3678 int do_not_record_p = 0;
3679 struct ls_expr * ptr;
3680 unsigned int hash;
3681 void **slot;
3682 struct ls_expr e;
3683
3684 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3685 NULL, /*have_reg_qty=*/false);
3686
3687 e.pattern = x;
3688 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
3689 if (*slot)
3690 return (struct ls_expr *)*slot;
3691
3692 ptr = XNEW (struct ls_expr);
3693
3694 ptr->next = pre_ldst_mems;
3695 ptr->expr = NULL;
3696 ptr->pattern = x;
3697 ptr->pattern_regs = NULL_RTX;
3698 ptr->loads = NULL_RTX;
3699 ptr->stores = NULL_RTX;
3700 ptr->reaching_reg = NULL_RTX;
3701 ptr->invalid = 0;
3702 ptr->index = 0;
3703 ptr->hash_index = hash;
3704 pre_ldst_mems = ptr;
3705 *slot = ptr;
3706
3707 return ptr;
3708 }
3709
3710 /* Free up an individual ldst entry. */
3711
3712 static void
3713 free_ldst_entry (struct ls_expr * ptr)
3714 {
3715 free_INSN_LIST_list (& ptr->loads);
3716 free_INSN_LIST_list (& ptr->stores);
3717
3718 free (ptr);
3719 }
3720
3721 /* Free up all memory associated with the ldst list. */
3722
3723 static void
3724 free_ld_motion_mems (void)
3725 {
3726 if (pre_ldst_table)
3727 htab_delete (pre_ldst_table);
3728 pre_ldst_table = NULL;
3729
3730 while (pre_ldst_mems)
3731 {
3732 struct ls_expr * tmp = pre_ldst_mems;
3733
3734 pre_ldst_mems = pre_ldst_mems->next;
3735
3736 free_ldst_entry (tmp);
3737 }
3738
3739 pre_ldst_mems = NULL;
3740 }
3741
3742 /* Dump debugging info about the ldst list. */
3743
3744 static void
3745 print_ldst_list (FILE * file)
3746 {
3747 struct ls_expr * ptr;
3748
3749 fprintf (file, "LDST list: \n");
3750
3751 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3752 {
3753 fprintf (file, " Pattern (%3d): ", ptr->index);
3754
3755 print_rtl (file, ptr->pattern);
3756
3757 fprintf (file, "\n Loads : ");
3758
3759 if (ptr->loads)
3760 print_rtl (file, ptr->loads);
3761 else
3762 fprintf (file, "(nil)");
3763
3764 fprintf (file, "\n Stores : ");
3765
3766 if (ptr->stores)
3767 print_rtl (file, ptr->stores);
3768 else
3769 fprintf (file, "(nil)");
3770
3771 fprintf (file, "\n\n");
3772 }
3773
3774 fprintf (file, "\n");
3775 }
3776
3777 /* Returns 1 if X is in the list of ldst only expressions. */
3778
3779 static struct ls_expr *
3780 find_rtx_in_ldst (rtx x)
3781 {
3782 struct ls_expr e;
3783 void **slot;
3784 if (!pre_ldst_table)
3785 return NULL;
3786 e.pattern = x;
3787 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
3788 if (!slot || ((struct ls_expr *)*slot)->invalid)
3789 return NULL;
3790 return (struct ls_expr *) *slot;
3791 }
3792 \f
3793 /* Load Motion for loads which only kill themselves. */
3794
3795 /* Return true if x, a MEM, is a simple access with no side effects.
3796 These are the types of loads we consider for the ld_motion list,
3797 otherwise we let the usual aliasing take care of it. */
3798
3799 static int
3800 simple_mem (const_rtx x)
3801 {
3802 if (MEM_VOLATILE_P (x))
3803 return 0;
3804
3805 if (GET_MODE (x) == BLKmode)
3806 return 0;
3807
3808 /* If we are handling exceptions, we must be careful with memory references
3809 that may trap. If we are not, the behavior is undefined, so we may just
3810 continue. */
3811 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3812 return 0;
3813
3814 if (side_effects_p (x))
3815 return 0;
3816
3817 /* Do not consider function arguments passed on stack. */
3818 if (reg_mentioned_p (stack_pointer_rtx, x))
3819 return 0;
3820
3821 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3822 return 0;
3823
3824 return 1;
3825 }
3826
3827 /* Make sure there isn't a buried reference in this pattern anywhere.
3828 If there is, invalidate the entry for it since we're not capable
3829 of fixing it up just yet.. We have to be sure we know about ALL
3830 loads since the aliasing code will allow all entries in the
3831 ld_motion list to not-alias itself. If we miss a load, we will get
3832 the wrong value since gcse might common it and we won't know to
3833 fix it up. */
3834
3835 static void
3836 invalidate_any_buried_refs (rtx x)
3837 {
3838 const char * fmt;
3839 int i, j;
3840 struct ls_expr * ptr;
3841
3842 /* Invalidate it in the list. */
3843 if (MEM_P (x) && simple_mem (x))
3844 {
3845 ptr = ldst_entry (x);
3846 ptr->invalid = 1;
3847 }
3848
3849 /* Recursively process the insn. */
3850 fmt = GET_RTX_FORMAT (GET_CODE (x));
3851
3852 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3853 {
3854 if (fmt[i] == 'e')
3855 invalidate_any_buried_refs (XEXP (x, i));
3856 else if (fmt[i] == 'E')
3857 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3858 invalidate_any_buried_refs (XVECEXP (x, i, j));
3859 }
3860 }
3861
3862 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3863 being defined as MEM loads and stores to symbols, with no side effects
3864 and no registers in the expression. For a MEM destination, we also
3865 check that the insn is still valid if we replace the destination with a
3866 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3867 which don't match this criteria, they are invalidated and trimmed out
3868 later. */
3869
3870 static void
3871 compute_ld_motion_mems (void)
3872 {
3873 struct ls_expr * ptr;
3874 basic_block bb;
3875 rtx insn;
3876
3877 pre_ldst_mems = NULL;
3878 pre_ldst_table
3879 = htab_create (13, pre_ldst_expr_hash, pre_ldst_expr_eq, NULL);
3880
3881 FOR_EACH_BB (bb)
3882 {
3883 FOR_BB_INSNS (bb, insn)
3884 {
3885 if (NONDEBUG_INSN_P (insn))
3886 {
3887 if (GET_CODE (PATTERN (insn)) == SET)
3888 {
3889 rtx src = SET_SRC (PATTERN (insn));
3890 rtx dest = SET_DEST (PATTERN (insn));
3891
3892 /* Check for a simple LOAD... */
3893 if (MEM_P (src) && simple_mem (src))
3894 {
3895 ptr = ldst_entry (src);
3896 if (REG_P (dest))
3897 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3898 else
3899 ptr->invalid = 1;
3900 }
3901 else
3902 {
3903 /* Make sure there isn't a buried load somewhere. */
3904 invalidate_any_buried_refs (src);
3905 }
3906
3907 /* Check for stores. Don't worry about aliased ones, they
3908 will block any movement we might do later. We only care
3909 about this exact pattern since those are the only
3910 circumstance that we will ignore the aliasing info. */
3911 if (MEM_P (dest) && simple_mem (dest))
3912 {
3913 ptr = ldst_entry (dest);
3914
3915 if (! MEM_P (src)
3916 && GET_CODE (src) != ASM_OPERANDS
3917 /* Check for REG manually since want_to_gcse_p
3918 returns 0 for all REGs. */
3919 && can_assign_to_reg_without_clobbers_p (src))
3920 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3921 else
3922 ptr->invalid = 1;
3923 }
3924 }
3925 else
3926 invalidate_any_buried_refs (PATTERN (insn));
3927 }
3928 }
3929 }
3930 }
3931
3932 /* Remove any references that have been either invalidated or are not in the
3933 expression list for pre gcse. */
3934
3935 static void
3936 trim_ld_motion_mems (void)
3937 {
3938 struct ls_expr * * last = & pre_ldst_mems;
3939 struct ls_expr * ptr = pre_ldst_mems;
3940
3941 while (ptr != NULL)
3942 {
3943 struct expr * expr;
3944
3945 /* Delete if entry has been made invalid. */
3946 if (! ptr->invalid)
3947 {
3948 /* Delete if we cannot find this mem in the expression list. */
3949 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3950
3951 for (expr = expr_hash_table.table[hash];
3952 expr != NULL;
3953 expr = expr->next_same_hash)
3954 if (expr_equiv_p (expr->expr, ptr->pattern))
3955 break;
3956 }
3957 else
3958 expr = (struct expr *) 0;
3959
3960 if (expr)
3961 {
3962 /* Set the expression field if we are keeping it. */
3963 ptr->expr = expr;
3964 last = & ptr->next;
3965 ptr = ptr->next;
3966 }
3967 else
3968 {
3969 *last = ptr->next;
3970 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
3971 free_ldst_entry (ptr);
3972 ptr = * last;
3973 }
3974 }
3975
3976 /* Show the world what we've found. */
3977 if (dump_file && pre_ldst_mems != NULL)
3978 print_ldst_list (dump_file);
3979 }
3980
3981 /* This routine will take an expression which we are replacing with
3982 a reaching register, and update any stores that are needed if
3983 that expression is in the ld_motion list. Stores are updated by
3984 copying their SRC to the reaching register, and then storing
3985 the reaching register into the store location. These keeps the
3986 correct value in the reaching register for the loads. */
3987
3988 static void
3989 update_ld_motion_stores (struct expr * expr)
3990 {
3991 struct ls_expr * mem_ptr;
3992
3993 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3994 {
3995 /* We can try to find just the REACHED stores, but is shouldn't
3996 matter to set the reaching reg everywhere... some might be
3997 dead and should be eliminated later. */
3998
3999 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4000 where reg is the reaching reg used in the load. We checked in
4001 compute_ld_motion_mems that we can replace (set mem expr) with
4002 (set reg expr) in that insn. */
4003 rtx list = mem_ptr->stores;
4004
4005 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4006 {
4007 rtx insn = XEXP (list, 0);
4008 rtx pat = PATTERN (insn);
4009 rtx src = SET_SRC (pat);
4010 rtx reg = expr->reaching_reg;
4011 rtx copy;
4012
4013 /* If we've already copied it, continue. */
4014 if (expr->reaching_reg == src)
4015 continue;
4016
4017 if (dump_file)
4018 {
4019 fprintf (dump_file, "PRE: store updated with reaching reg ");
4020 print_rtl (dump_file, reg);
4021 fprintf (dump_file, ":\n ");
4022 print_inline_rtx (dump_file, insn, 8);
4023 fprintf (dump_file, "\n");
4024 }
4025
4026 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4027 emit_insn_before (copy, insn);
4028 SET_SRC (pat) = reg;
4029 df_insn_rescan (insn);
4030
4031 /* un-recognize this pattern since it's probably different now. */
4032 INSN_CODE (insn) = -1;
4033 gcse_create_count++;
4034 }
4035 }
4036 }
4037 \f
4038 /* Return true if the graph is too expensive to optimize. PASS is the
4039 optimization about to be performed. */
4040
4041 static bool
4042 is_too_expensive (const char *pass)
4043 {
4044 /* Trying to perform global optimizations on flow graphs which have
4045 a high connectivity will take a long time and is unlikely to be
4046 particularly useful.
4047
4048 In normal circumstances a cfg should have about twice as many
4049 edges as blocks. But we do not want to punish small functions
4050 which have a couple switch statements. Rather than simply
4051 threshold the number of blocks, uses something with a more
4052 graceful degradation. */
4053 if (n_edges > 20000 + n_basic_blocks * 4)
4054 {
4055 warning (OPT_Wdisabled_optimization,
4056 "%s: %d basic blocks and %d edges/basic block",
4057 pass, n_basic_blocks, n_edges / n_basic_blocks);
4058
4059 return true;
4060 }
4061
4062 /* If allocating memory for the dataflow bitmaps would take up too much
4063 storage it's better just to disable the optimization. */
4064 if ((n_basic_blocks
4065 * SBITMAP_SET_SIZE (max_reg_num ())
4066 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4067 {
4068 warning (OPT_Wdisabled_optimization,
4069 "%s: %d basic blocks and %d registers",
4070 pass, n_basic_blocks, max_reg_num ());
4071
4072 return true;
4073 }
4074
4075 return false;
4076 }
4077 \f
4078 /* All the passes implemented in this file. Each pass has its
4079 own gate and execute function, and at the end of the file a
4080 pass definition for passes.c.
4081
4082 We do not construct an accurate cfg in functions which call
4083 setjmp, so none of these passes runs if the function calls
4084 setjmp.
4085 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4086
4087 static bool
4088 gate_rtl_pre (void)
4089 {
4090 return optimize > 0 && flag_gcse
4091 && !cfun->calls_setjmp
4092 && optimize_function_for_speed_p (cfun)
4093 && dbg_cnt (pre);
4094 }
4095
4096 static unsigned int
4097 execute_rtl_pre (void)
4098 {
4099 int changed;
4100 delete_unreachable_blocks ();
4101 df_analyze ();
4102 changed = one_pre_gcse_pass ();
4103 flag_rerun_cse_after_global_opts |= changed;
4104 if (changed)
4105 cleanup_cfg (0);
4106 return 0;
4107 }
4108
4109 static bool
4110 gate_rtl_hoist (void)
4111 {
4112 return optimize > 0 && flag_gcse
4113 && !cfun->calls_setjmp
4114 /* It does not make sense to run code hoisting unless we are optimizing
4115 for code size -- it rarely makes programs faster, and can make then
4116 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4117 && optimize_function_for_size_p (cfun)
4118 && dbg_cnt (hoist);
4119 }
4120
4121 static unsigned int
4122 execute_rtl_hoist (void)
4123 {
4124 int changed;
4125 delete_unreachable_blocks ();
4126 df_analyze ();
4127 changed = one_code_hoisting_pass ();
4128 flag_rerun_cse_after_global_opts |= changed;
4129 if (changed)
4130 cleanup_cfg (0);
4131 return 0;
4132 }
4133
4134 struct rtl_opt_pass pass_rtl_pre =
4135 {
4136 {
4137 RTL_PASS,
4138 "rtl pre", /* name */
4139 OPTGROUP_NONE, /* optinfo_flags */
4140 gate_rtl_pre, /* gate */
4141 execute_rtl_pre, /* execute */
4142 NULL, /* sub */
4143 NULL, /* next */
4144 0, /* static_pass_number */
4145 TV_PRE, /* tv_id */
4146 PROP_cfglayout, /* properties_required */
4147 0, /* properties_provided */
4148 0, /* properties_destroyed */
4149 0, /* todo_flags_start */
4150 TODO_df_finish | TODO_verify_rtl_sharing |
4151 TODO_verify_flow /* todo_flags_finish */
4152 }
4153 };
4154
4155 struct rtl_opt_pass pass_rtl_hoist =
4156 {
4157 {
4158 RTL_PASS,
4159 "hoist", /* name */
4160 OPTGROUP_NONE, /* optinfo_flags */
4161 gate_rtl_hoist, /* gate */
4162 execute_rtl_hoist, /* execute */
4163 NULL, /* sub */
4164 NULL, /* next */
4165 0, /* static_pass_number */
4166 TV_HOIST, /* tv_id */
4167 PROP_cfglayout, /* properties_required */
4168 0, /* properties_provided */
4169 0, /* properties_destroyed */
4170 0, /* todo_flags_start */
4171 TODO_df_finish | TODO_verify_rtl_sharing |
4172 TODO_verify_flow /* todo_flags_finish */
4173 }
4174 };
4175
4176 #include "gt-gcse.h"