From 7506f49132276e0bd064a169dca106b6436bde7c Mon Sep 17 00:00:00 2001 From: Doug Evans Date: Wed, 20 May 1998 00:24:32 +0000 Subject: [PATCH] * Global CSE and constant/copy propagation. * Makefile.in (OBJS): Add gcse.o (STAGESTUFF): Add *.gcse. (gcse.o): Add dependencies. (mostlyclean): Remove *.gcse and */*.gcse. * gcse.c: New file. * loop.c (loop_optimize): Move call to init_alias_analysis. * recog.c (validate_replace_src): New function. * toplev.c (gcse_dump): New global variable. (flag_gcse, gcse_time): Likewise. (compile_file): Initialize gcse_time and clean out the gcse dump file if necessary. (rest_of_compilation): Call gcse_main as requested. Dump RTL after gcse if requested. (main): Enable gcse for -O2 and above. Handle -dG. Enable gcse dumps for -da. * gcc.texi: Add gcse related internal documentation. * invoke.texi: Note new command line options for gcse. * tm.texi: Document AVOID_CCMODE_COPIES. * mips.h (AVOID_CCMODE_COPIES): Define. Co-Authored-By: Jeffrey A Law From-SVN: r19901 --- gcc/ChangeLog | 24 + gcc/Makefile.in | 11 +- gcc/config/mips/mips.h | 7 + gcc/gcc.texi | 16 + gcc/gcse.c | 4690 ++++++++++++++++++++++++++++++++++++++++ gcc/invoke.texi | 8 +- gcc/loop.c | 8 +- gcc/recog.c | 19 + gcc/tm.texi | 6 + gcc/toplev.c | 28 + 10 files changed, 4811 insertions(+), 6 deletions(-) create mode 100644 gcc/gcse.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 8723d585789..f9fc5b9d4d3 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,27 @@ +Wed May 20 01:11:02 1998 Doug Evans (devans@cygnus.com) + Jeff Law (law@cygnus.com) + + * Global CSE and constant/copy propagation. + * Makefile.in (OBJS): Add gcse.o + (STAGESTUFF): Add *.gcse. + (gcse.o): Add dependencies. + (mostlyclean): Remove *.gcse and */*.gcse. + * gcse.c: New file. + * loop.c (loop_optimize): Move call to init_alias_analysis. + * recog.c (validate_replace_src): New function. + * toplev.c (gcse_dump): New global variable. + (flag_gcse, gcse_time): Likewise. + (compile_file): Initialize gcse_time and clean out the gcse dump + file if necessary. + (rest_of_compilation): Call gcse_main as requested. Dump RTL + after gcse if requested. + (main): Enable gcse for -O2 and above. Handle -dG. Enable gcse + dumps for -da. + * gcc.texi: Add gcse related internal documentation. + * invoke.texi: Note new command line options for gcse. + * tm.texi: Document AVOID_CCMODE_COPIES. + * mips.h (AVOID_CCMODE_COPIES): Define. + Tue May 19 22:31:20 1998 Jeffrey A Law (law@cygnus.com) * Makefile.in (deduced.h): Only run scan-types if $(SYSTEM_HEADER_DIR) diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 2b7416f95bf..6d6eeabbd35 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -652,7 +652,7 @@ OBJS = toplev.o version.o tree.o print-tree.o stor-layout.o fold-const.o \ varasm.o rtl.o print-rtl.o rtlanal.o emit-rtl.o genrtl.o real.o regmove.o \ dbxout.o sdbout.o dwarfout.o dwarf2out.o xcoffout.o bitmap.o alias.o \ integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o \ - regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o \ + regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o gcse.o \ insn-peep.o reorg.o $(SCHED_PREFIX)sched.o final.o recog.o reg-stack.o \ insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \ profile.o insn-attrtab.o $(out_object_file) getpwd.o $(EXTRA_OBJS) convert.o @@ -685,7 +685,7 @@ STAGESTUFF = *$(objext) insn-flags.h insn-config.h insn-codes.h \ specs collect2$(exeext) $(USE_COLLECT2) underscore.c \ gcov$(exeext) *.bp \ *.greg *.lreg *.combine *.flow *.cse *.jump *.rtl *.tree *.loop \ - *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack \ + *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack *.gcse \ *.[si] \ $(LANG_STAGESTUFF) @@ -1449,6 +1449,8 @@ stupid.o : stupid.c $(CONFIG_H) system.h $(RTL_H) regs.h hard-reg-set.h flags.h cse.o : cse.c $(CONFIG_H) system.h $(RTL_H) regs.h hard-reg-set.h flags.h \ real.h insn-config.h insn-codes.h $(RECOG_H) expr.h +gcse.o : gcse.c $(CONFIG_H) system.h $(RTL_H) regs.h hard-reg-set.h flags.h \ + real.h insn-config.h insn-codes.h $(RECOG_H) expr.h basic-block.h profile.o : profile.c $(CONFIG_H) system.h $(RTL_H) flags.h insn-flags.h \ gcov-io.h $(TREE_H) output.h regs.h toplev.h loop.o : loop.c $(CONFIG_H) system.h $(RTL_H) flags.h loop.h insn-config.h \ @@ -2159,10 +2161,11 @@ mostlyclean: lang.mostlyclean -rm -f */stamp-* */tmp-* # Delete debugging dump files. -rm -f *.greg *.lreg *.combine *.flow *.cse *.jump *.rtl *.tree *.loop - -rm -f *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack *.addressof *.regmove *.mach *.bp + -rm -f *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack *.addressof + -rm -f *.regmove *.mach *.bp *.gcse -rm -f */*.greg */*.lreg */*.combine */*.flow */*.cse */*.jump */*.rtl -rm -f */*.tree */*.loop */*.dbr */*.jump2 */*.sched */*.cse2 - -rm -f */*.sched2 */*.stack */*.regmove + -rm -f */*.sched2 */*.stack */*.regmove */*.gcse # Delete some files made during installation. -rm -f specs gfloat.h float.h-* enquire SYSCALLS.c.X SYSCALLS.c -rm -f collect collect2 mips-tfile mips-tdump alloca.s diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h index 78751e6fa97..c53d452fe99 100644 --- a/gcc/config/mips/mips.h +++ b/gcc/config/mips/mips.h @@ -3561,6 +3561,13 @@ while (0) (((mips_cpu == PROCESSOR_R4000 || mips_cpu == PROCESSOR_R6000) ? 6 : 4) \ + memory_move_secondary_cost ((MODE), (CLASS), (TO_P))) +/* Define if copies to/from condition code registers should be avoided. + + This is needed for the MIPS because reload_outcc is not complete; + it needs to handle cases where the source is a general or another + condition code register. */ +#define AVOID_CCMODE_COPIES + /* A C expression for the cost of a branch instruction. A value of 1 is the default; other values are interpreted relative to that. */ diff --git a/gcc/gcc.texi b/gcc/gcc.texi index e0584aafc6e..62e32b2c28c 100644 --- a/gcc/gcc.texi +++ b/gcc/gcc.texi @@ -3302,6 +3302,22 @@ The option @samp{-ds} causes a debugging dump of the RTL code after this pass. This dump file's name is made by appending @samp{.cse} to the input file name. +@cindex global common subexpression elimination +@cindex constant propagation +@cindex copy propagation +@item +Global common subexpression elimination. This pass performs GCSE +using Morel-Renvoise Partial Redundancy Elimination, with the exception +that it does not try to move invariants out of loops - that is left to +the loop optimization pass. This pass also performs global constant +and copy propagation. + +The source file for this pass is gcse.c. + +The option @samp{-dG} causes a debugging dump of the RTL code after +this pass. This dump file's name is made by appending @samp{.gcse} to +the input file name. + @cindex loop optimization @cindex code motion @cindex strength-reduction diff --git a/gcc/gcse.c b/gcc/gcse.c new file mode 100644 index 00000000000..9deff471725 --- /dev/null +++ b/gcc/gcse.c @@ -0,0 +1,4690 @@ +/* CYGNUS LOCAL entire file */ +/* Global common subexpression elimination + and global constant/copy propagation for GNU compiler. + Copyright (C) 1997 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* TODO + - reordering of memory allocation and freeing to be more space efficient + - do rough calc of how many regs are needed in each block, and a rough + calc of how many regs are available in each class and use that to + throttle back the code in cases where RTX_COST is minimal. + - memory aliasing support + - ability to realloc sbitmap vectors would allow one initial computation + of reg_set_in_block with only subsequent additions, rather than + recomputing it for each pass + + NOTES + - the classic gcse implementation is kept in for now for comparison +*/ + +/* References searched while implementing this. + This list will eventually be deleted but I wanted to have a record of it + for now. + + Compilers Principles, Techniques and Tools + Aho, Sethi, Ullman + Addison-Wesley, 1988 + + Global Optimization by Suppression of Partial Redundancies + E. Morel, C. Renvoise + communications of the acm, Vol. 22, Num. 2, Feb. 1979 + + A Portable Machine-Independent Global Optimizer - Design and Measurements + Frederick Chow + Stanford Ph.D. thesis, Dec. 1983 + +xxx + Elimination Algorithms for Data Flow Analysis + B.G. Ryder, M.C. Paull + ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986 + +reread + A Fast Algorithm for Code Movement Optimization + D.M. Dhamdhere + SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988 + + A Solution to a Problem with Morel and Renvoise's + Global Optimization by Suppression of Partial Redundancies + K-H Drechsler, M.P. Stadel + ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988 + + Practical Adaptation of the Global Optimization + Algorithm of Morel and Renvoise + D.M. Dhamdhere + ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991 + + Efficiently Computing Static Single Assignment Form and the Control + Dependence Graph + R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck + ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991 + +yyy + How to Analyze Large Programs Efficiently and Informatively + D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck + ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI + + Lazy Code Motion + J. Knoop, O. Ruthing, B. Steffen + ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI + + What's In a Region? Or Computing Control Dependence Regions in Near-Linear + Time for Reducible Flow Control + Thomas Ball + ACM Letters on Programming Languages and Systems, + Vol. 2, Num. 1-4, Mar-Dec 1993 + + An Efficient Representation for Sparse Sets + Preston Briggs, Linda Torczon + ACM Letters on Programming Languages and Systems, + Vol. 2, Num. 1-4, Mar-Dec 1993 + + A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion + K-H Drechsler, M.P. Stadel + ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993 + + Partial Dead Code Elimination + J. Knoop, O. Ruthing, B. Steffen + ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 + + Effective Partial Redundancy Elimination + P. Briggs, K.D. Cooper + ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 + + The Program Structure Tree: Computing Control Regions in Linear Time + R. Johnson, D. Pearson, K. Pingali + ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 + + Optimal Code Motion: Theory and Practice + J. Knoop, O. Ruthing, B. Steffen + ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994 + + The power of assignment motion + J. Knoop, O. Ruthing, B. Steffen + ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI + + Global code motion / global value numbering + C. Click + ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI + + Value Driven Redundancy Elimination + L.T. Simpson + Rice University Ph.D. thesis, Apr. 1996 + + Value Numbering + L.T. Simpson + Massively Scalar Compiler Project, Rice University, Sep. 1996 + + High Performance Compilers for Parallel Computing + Michael Wolfe + Addison-Wesley, 1996 + + People wishing to speed up the code here should read xxx, yyy. + People wishing to do something different can find various possibilities + in the above papers and elsewhere. +*/ + +#include "config.h" +/* Must precede rtl.h for FFS. */ +#include + +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "flags.h" +#include "real.h" +#include "insn-config.h" +#include "recog.h" +#include "basic-block.h" + +#include "obstack.h" +#define obstack_chunk_alloc gmalloc +#define obstack_chunk_free free + +/* Maximum number of passes to perform. */ +#define MAX_PASSES 1 + +/* Propagate flow information through back edges and thus enable PRE's + moving loop invariant calculations out of loops. + + Originally this tended to create worse overall code, but several + improvements during the development of PRE seem to have made following + back edges generally a win. + + Note much of the loop invariant code motion done here would normally + be done by loop.c, which has more heuristics for when to move invariants + out of loops. At some point we might need to move some of those + heuristics into gcse.c. */ +#define FOLLOW_BACK_EDGES 1 + +/* We support two GCSE implementations: Classic GCSE (i.e. Dragon Book) + and PRE (Partial Redundancy Elimination) GCSE (based on Fred Chow's thesis). + The default is PRE. + + In either case we perform the following steps: + + 1) Compute basic block information. + + 2) Compute table of places where registers are set. + + 3) Perform copy/constant propagation. + + 4) Perform global cse. + + 5) Perform another pass of copy/constant propagation [only if PRE]. + + Two passes of copy/constant propagation are done because the first one + enables more GCSE and the second one helps to clean up the copies that + GCSE creates. This is needed more for PRE than for Classic because Classic + GCSE will try to use an existing register containing the common + subexpression rather than create a new one. This is harder to do for PRE + because of the code motion (which Classic GCSE doesn't do). + + Expressions we are interested in GCSE-ing are of the form + (set (pseudo-reg) (expression)). + Function want_to_gcse_p says what these are. + + PRE handles moving invariant expressions out of loops (by treating them as + partially redundant). This feature of PRE is disabled here (by not + propagating dataflow information along back edges) because loop.c has more + involved (and thus typically better) heuristics for what to move out of + loops. + + Eventually it would be nice to replace cse.c/gcse.c with SSA (static single + assignment) based GVN (global value numbering). L. T. Simpson's paper + (Rice University) on value numbering is a useful reference for this. + + ********************** + + We used to support multiple passes but there are diminishing returns in + doing so. The first pass usually makes 90% of the changes that are doable. + A second pass can make a few more changes made possible by the first pass. + Experiments show any further passes don't make enough changes to justify + the expense. + + A study of spec92 using an unlimited number of passes: + [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83, + [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2, + [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1 + + It was found doing copy propagation between each pass enables further + substitutions. + + PRE is quite expensive in complicated functions because the DFA can take + awhile to converge. Hence we only perform one pass. Macro MAX_PASSES can + be modified if one wants to experiment. + + ********************** + + The steps for PRE are: + + 1) Build the hash table of expressions we wish to GCSE (expr_hash_table). + + 2) Perform the data flow analysis for PRE. + + 3) Delete the redundant instructions + + 4) Insert the required copies [if any] that make the partially + redundant instructions fully redundant. + + 5) For other reaching expressions, insert an instruction to copy the value + to a newly created pseudo that will reach the redundant instruction. + + The deletion is done first so that when we do insertions we + know which pseudo reg to use. + + Various papers have argued that PRE DFA is expensive (O(n^2)) and others + argue it is not. The number of iterations for the algorithm to converge + is typically 2-4 so I don't view it as that expensive (relatively speaking). + + PRE GCSE depends heavily on the seconds CSE pass to clean up the copies + we create. To make an expression reach the place where it's redundant, + the result of the expression is copied to a new register, and the redundant + expression is deleted by replacing it with this new register. Classic GCSE + doesn't have this problem as much as it computes the reaching defs of + each register in each block and thus can try to use an existing register. + + ********************** + + When -fclassic-gcse, we perform a classic global CSE pass. + It is based on the algorithms in the Dragon book, and is based on code + written by Devor Tevi at Intel. + + The steps for Classic GCSE are: + + 1) Build the hash table of expressions we wish to GCSE (expr_hash_table). + Also recorded are reaching definition "gen" statements (rd_gen) + + 2) Compute the reaching definitions (reaching_defs). + This is a bitmap for each basic block indexed by INSN_CUID that is 1 + for each statement containing a definition that reaches the block. + + 3) Compute the available expressions (ae_in). + This is a bitmap for each basic block indexed by expression number + that is 1 for each expression that is available at the beginning of + the block. + + 4) Perform GCSE. + This is done by scanning each instruction looking for sets of the form + (set (pseudo-reg) (expression)) and checking if `expression' is in the + hash table. If it is, and if the expression is available, and if only + one computation of the expression reaches the instruction, we substitute + the expression for a register containing its value. If there is no + such register, but the expression is expensive enough we create an + instruction to copy the result of the expression into and use that. + + ********************** + + A fair bit of simplicity is created by creating small functions for simple + tasks, even when the function is only called in one place. This may + measurably slow things down [or may not] by creating more function call + overhead than is necessary. The source is laid out so that it's trivial + to make the affected functions inline so that one can measure what speed + up, if any, can be achieved, and maybe later when things settle things can + be rearranged. + + Help stamp out big monolithic functions! */ + +/* GCSE global vars. */ + +/* -dG dump file. */ +static FILE *gcse_file; + +/* Bitmaps are normally not included in debugging dumps. + However it's useful to be able to print them from GDB. + We could create special functions for this, but it's simpler to + just allow passing stderr to the dump_foo fns. Since stderr can + be a macro, we store a copy here. */ +static FILE *debug_stderr; + +/* An obstack for our working variables. */ +static struct obstack gcse_obstack; + +/* Non-zero for each mode that supports (set (reg) (reg)). + This is trivially true for integer and floating point values. + It may or may not be true for condition codes. */ +static char can_copy_p[(int) NUM_MACHINE_MODES]; + +/* Non-zero if can_copy_p has been initialized. */ +static int can_copy_init_p; + +/* Element I is a list of I's predecessors/successors. */ +static int_list_ptr *s_preds; +static int_list_ptr *s_succs; + +/* Element I is the number of predecessors/successors of basic block I. */ +static int *num_preds; +static int *num_succs; + +/* Hash table of expressions. */ + +struct expr +{ + /* The expression (SET_SRC for expressions, PATTERN for assignments). */ + rtx expr; + /* Index in the available expression bitmaps. */ + int bitmap_index; + /* Next entry with the same hash. */ + struct expr *next_same_hash; + /* List of anticipatable occurrences in basic blocks in the function. + An "anticipatable occurrence" is one that is the first occurrence in the + basic block and the operands are not modified in the basic block prior + to the occurrence. */ + struct occr *antic_occr; + /* List of available occurrence in basic blocks in the function. + An "available occurrence" is one that is the last occurrence in the + basic block and the operands are not modified by following statements in + the basic block [including this insn]. */ + struct occr *avail_occr; + /* Non-null if the computation is PRE redundant. + The value is the newly created pseudo-reg to record a copy of the + expression in all the places that reach the redundant copy. */ + rtx reaching_reg; +}; + +/* Occurrence of an expression. + There is one per basic block. If a pattern appears more than once the + last appearance is used [or first for anticipatable expressions]. */ + +struct occr +{ + /* Next occurrence of this expression. */ + struct occr *next; + /* The insn that computes the expression. */ + rtx insn; + /* Non-zero if this [anticipatable] occurrence has been deleted. */ + char deleted_p; + /* Non-zero if this [available] occurrence has been copied to + reaching_reg. */ + /* ??? This is mutually exclusive with deleted_p, so they could share + the same byte. */ + char copied_p; +}; + +/* Expression and copy propagation hash tables. + Each hash table is an array of buckets. + ??? It is known that if it were an array of entries, structure elements + `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is + not clear whether in the final analysis a sufficient amount of memory would + be saved as the size of the available expression bitmaps would be larger + [one could build a mapping table without holes afterwards though]. + Someday I'll perform the computation and figure it out. +*/ + +/* Total size of the expression hash table, in elements. */ +static int expr_hash_table_size; +/* The table itself. + This is an array of `expr_hash_table_size' elements. */ +static struct expr **expr_hash_table; + +/* Total size of the copy propagation hash table, in elements. */ +static int set_hash_table_size; +/* The table itself. + This is an array of `set_hash_table_size' elements. */ +static struct expr **set_hash_table; + +/* Mapping of uids to cuids. + Only real insns get cuids. */ +static int *uid_cuid; + +/* Highest UID in UID_CUID. */ +static int max_uid; + +/* Get the cuid of an insn. */ +#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)]) + +/* Number of cuids. */ +static int max_cuid; + +/* Mapping of cuids to insns. */ +static rtx *cuid_insn; + +/* Get insn from cuid. */ +#define CUID_INSN(CUID) (cuid_insn[CUID]) + +/* Maximum register number in function prior to doing gcse + 1. + Registers created during this pass have regno >= max_gcse_regno. + This is named with "gcse" to not collide with global of same name. */ +static int max_gcse_regno; + +/* Maximum number of cse-able expressions found. */ +static int n_exprs; +/* Maximum number of assignments for copy propagation found. */ +static int n_sets; + +/* Table of registers that are modified. + For each register, each element is a list of places where the pseudo-reg + is set. + + For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only + requires knowledge of which blocks kill which regs [and thus could use + a bitmap instead of the lists `reg_set_table' uses]. The classic GCSE + uses the information in lists. + + If the classic GCSE pass is deleted `reg_set_table' and could be turned + into an array of bitmaps (num-bbs x num-regs) + [however perhaps it may be useful to keep the data as is]. + One advantage of recording things this way is that `reg_set_table' is + fairly sparse with respect to pseudo regs but for hard regs could be + fairly dense [relatively speaking]. + And recording sets of pseudo-regs in lists speeds + up functions like compute_transp since in the case of pseudo-regs we only + need to iterate over the number of times a pseudo-reg is set, not over the + number of basic blocks [clearly there is a bit of a slow down in the cases + where a pseudo is set more than once in a block, however it is believed + that the net effect is to speed things up]. This isn't done for hard-regs + because recording call-clobbered hard-regs in `reg_set_table' at each + function call can consume a fair bit of memory, and iterating over hard-regs + stored this way in compute_transp will be more expensive. */ + +typedef struct reg_set { + /* The next setting of this register. */ + struct reg_set *next; + /* The insn where it was set. */ + rtx insn; +} reg_set; +static reg_set **reg_set_table; +/* Size of `reg_set_table'. + The table starts out at max_gcse_regno + slop, and is enlarged as + necessary. */ +static int reg_set_table_size; +/* Amount to grow `reg_set_table' by when it's full. */ +#define REG_SET_TABLE_SLOP 100 + +/* Bitmap containing one bit for each register in the program. + Used when performing GCSE to track which registers have been set since + the start of the basic block. */ +static sbitmap reg_set_bitmap; + +/* For each block, a bitmap of registers set in the block. + This is used by expr_killed_p and compute_transp. + It is computed during hash table computation and not by compute_sets + as it includes registers added since the last pass (or between cprop and + gcse) and it's currently not easy to realloc sbitmap vectors. */ +static sbitmap *reg_set_in_block; + +/* For each block, non-zero if memory is set in that block. + This is computed during hash table computation and is used by + expr_killed_p and compute_transp. + ??? Handling of memory is very simple, we don't make any attempt + to optimize things (later). + ??? This can be computed by compute_sets since the information + doesn't change. */ +static char *mem_set_in_block; + +/* Various variables for statistics gathering. */ + +/* Memory used in a pass. + This isn't intended to be absolutely precise. Its intent is only + to keep an eye on memory usage. */ +static int bytes_used; +/* GCSE substitutions made. */ +static int gcse_subst_count; +/* Number of copy instructions created. */ +static int gcse_create_count; +/* Number of constants propagated. */ +static int const_prop_count; +/* Number of copys propagated. */ +static int copy_prop_count; + +extern char *current_function_name; +extern int current_function_calls_setjmp; + +/* These variables are used by classic GCSE. + Normally they'd be defined a bit later, but `rd_gen' needs to + be declared sooner. */ + +/* A bitmap of all ones for implementing the algorithm for available + expressions and reaching definitions. */ +/* ??? Available expression bitmaps have a different size than reaching + definition bitmaps. This should be the larger of the two, however, it + is not currently used for reaching definitions. */ +static sbitmap u_bitmap; + +/* Each block has a bitmap of each type. + The length of each blocks bitmap is: + + max_cuid - for reaching definitions + n_exprs - for available expressions + + Thus we view the bitmaps as 2 dimensional arrays. i.e. + rd_kill[block_num][cuid_num] + ae_kill[block_num][expr_num] +*/ + +/* For reaching defs */ +static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out; + +/* for available exprs */ +static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out; + +static void compute_can_copy PROTO ((void)); + +static char *gmalloc PROTO ((unsigned int)); +static char *grealloc PROTO ((char *, unsigned int)); +static char *gcse_alloc PROTO ((unsigned long)); +static void alloc_gcse_mem PROTO ((rtx)); +static void free_gcse_mem PROTO ((void)); +static void dump_cuid_table PROTO ((FILE *)); + +static void alloc_reg_set_mem PROTO ((int)); +static void free_reg_set_mem PROTO ((void)); +static void record_one_set PROTO ((int, rtx)); +static void record_set_info PROTO ((rtx, rtx)); +static void compute_sets PROTO ((rtx)); + +static void hash_scan_insn PROTO ((rtx, int)); +static void hash_scan_set PROTO ((rtx, rtx, int)); +static void hash_scan_clobber PROTO ((rtx, rtx)); +static void hash_scan_call PROTO ((rtx, rtx)); +static void maybe_set_rd_gen PROTO ((int, rtx)); +static int want_to_gcse_p PROTO ((rtx)); +static int oprs_unchanged_p PROTO ((rtx, rtx, int)); +static int oprs_anticipatable_p PROTO ((rtx, rtx)); +static int oprs_available_p PROTO ((rtx, rtx)); +static void insert_expr_in_table PROTO ((rtx, enum machine_mode, rtx, int, int)); +static void insert_set_in_table PROTO ((rtx, rtx)); +static unsigned int hash_expr PROTO ((rtx, enum machine_mode, int *, int)); +static unsigned int hash_expr_1 PROTO ((rtx, enum machine_mode, int *)); +static unsigned int hash_set PROTO ((int, int)); +static int expr_equiv_p PROTO ((rtx, rtx)); +static void record_last_reg_set_info PROTO ((rtx, int)); +static void record_last_mem_set_info PROTO ((rtx)); +static void record_last_set_info PROTO ((rtx, rtx)); +static void compute_hash_table PROTO ((rtx, int)); +static void alloc_set_hash_table PROTO ((int)); +static void free_set_hash_table PROTO ((void)); +static void compute_set_hash_table PROTO ((rtx)); +static void alloc_expr_hash_table PROTO ((int)); +static void free_expr_hash_table PROTO ((void)); +static void compute_expr_hash_table PROTO ((rtx)); +static void dump_hash_table PROTO ((FILE *, char *, struct expr **, int, int)); +static struct expr *lookup_expr PROTO ((rtx)); +static struct expr *lookup_set PROTO ((int, rtx)); +static struct expr *next_set PROTO ((int, struct expr *)); +static void reset_opr_set_tables PROTO ((void)); +static int oprs_not_set_p PROTO ((rtx, rtx)); +static void mark_call PROTO ((rtx, rtx)); +static void mark_set PROTO ((rtx, rtx)); +static void mark_clobber PROTO ((rtx, rtx)); +static void mark_oprs_set PROTO ((rtx)); + +static void alloc_rd_mem PROTO ((int, int)); +static void free_rd_mem PROTO ((void)); +static void compute_kill_rd PROTO ((void)); +static void handle_rd_kill_set PROTO ((rtx, int, int)); +static void compute_rd PROTO ((void)); +static void dump_rd_table PROTO ((FILE *, char *, sbitmap *)); + +static void alloc_avail_expr_mem PROTO ((int, int)); +static void free_avail_expr_mem PROTO ((void)); +static void compute_ae_gen PROTO ((void)); +static void compute_ae_kill PROTO ((void)); +static int expr_killed_p PROTO ((rtx, int)); +static void compute_available PROTO ((void)); + +static int expr_reaches_here_p PROTO ((struct occr *, struct expr *, + int, int, char *)); +static rtx computing_insn PROTO ((struct expr *, rtx)); +static int def_reaches_here_p PROTO ((rtx, rtx)); +static int can_disregard_other_sets PROTO ((struct reg_set **, rtx, int)); +static int handle_avail_expr PROTO ((rtx, struct expr *)); +static int classic_gcse PROTO ((void)); +static int one_classic_gcse_pass PROTO ((rtx, int)); + +static void alloc_cprop_mem PROTO ((int, int)); +static void free_cprop_mem PROTO ((void)); +static void dump_cprop_data PROTO ((FILE *)); +static void compute_transp PROTO ((rtx, int, sbitmap *, int)); +static void compute_cprop_local_properties PROTO ((void)); +static void compute_cprop_avinout PROTO ((void)); +static void compute_cprop_data PROTO ((void)); +static void find_used_regs PROTO ((rtx)); +static int try_replace_reg PROTO ((rtx, rtx, rtx)); +static struct expr *find_avail_set PROTO ((int, rtx)); +static int cprop_insn PROTO ((rtx)); +static int cprop PROTO ((void)); +static int one_cprop_pass PROTO ((rtx, int)); + +static void alloc_pre_mem PROTO ((int, int)); +static void free_pre_mem PROTO ((void)); +static void dump_pre_data PROTO ((FILE *)); +static void compute_pre_local_properties PROTO ((void)); +static void compute_pre_avinout PROTO ((void)); +static void compute_pre_antinout PROTO ((void)); +static void compute_pre_pavinout PROTO ((void)); +static void compute_pre_ppinout PROTO ((void)); +static void compute_pre_data PROTO ((void)); +static int pre_expr_reaches_here_p PROTO ((struct occr *, struct expr *, + int, char *)); +static void pre_insert_insn PROTO ((struct expr *, int)); +static void pre_insert PROTO ((struct expr **)); +static void pre_insert_copy_insn PROTO ((struct expr *, rtx)); +static void pre_insert_copies PROTO ((void)); +static int pre_delete PROTO ((void)); +static int pre_gcse PROTO ((void)); +static int one_pre_gcse_pass PROTO ((rtx, int)); + +/* Entry point for global common subexpression elimination. + F is the first instruction in the function. */ + +void +gcse_main (f, file) + rtx f; + FILE *file; +{ + int changed, pass; + /* Bytes used at start of pass. */ + int initial_bytes_used; + /* Maximum number of bytes used by a pass. */ + int max_pass_bytes; + /* Point to release obstack data from for each pass. */ + char *gcse_obstack_bottom; + + /* It's impossible to construct a correct control flow graph in the + presense of setjmp, so just punt to be safe. */ + if (current_function_calls_setjmp) + return; + + /* For calling dump_foo fns from gdb. */ + debug_stderr = stderr; + + max_gcse_regno = max_reg_num (); + find_basic_blocks (f, max_gcse_regno, file, 0); + + /* Return if there's nothing to do. */ + if (n_basic_blocks <= 1) + { + /* Free storage allocated by find_basic_blocks. */ + free_basic_block_vars (0); + return; + } + + /* See what modes support reg/reg copy operations. */ + if (! can_copy_init_p) + { + compute_can_copy (); + can_copy_init_p = 1; + } + + gcc_obstack_init (&gcse_obstack); + + gcse_file = file; + + /* Allocate and compute predecessors/successors. */ + + s_preds = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr)); + s_succs = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr)); + num_preds = (int *) alloca (n_basic_blocks * sizeof (int)); + num_succs = (int *) alloca (n_basic_blocks * sizeof (int)); + bytes_used = 4 * n_basic_blocks * sizeof (int_list_ptr); + compute_preds_succs (s_preds, s_succs, num_preds, num_succs); + + if (file) + { + dump_bb_data (file, s_preds, s_succs); + } + + /* Record where pseudo-registers are set. + This data is kept accurate during each pass. + ??? We could also record hard-reg and memory information here + [since it's unchanging], however it is currently done during + hash table computation. */ + + alloc_reg_set_mem (max_gcse_regno); + compute_sets (f); + + pass = 0; + initial_bytes_used = bytes_used; + max_pass_bytes = 0; + gcse_obstack_bottom = gcse_alloc (1); + changed = 1; + while (changed && pass < MAX_PASSES) + { + changed = 0; + if (file) + fprintf (file, "GCSE pass %d\n\n", pass + 1); + + /* Initialize bytes_used to the space for the pred/succ lists, + and the reg_set_table data. */ + bytes_used = initial_bytes_used; + + /* Each pass may create new registers, so recalculate each time. */ + max_gcse_regno = max_reg_num (); + + alloc_gcse_mem (f); + + changed = one_cprop_pass (f, pass + 1); + + if (optimize_size) + changed |= one_classic_gcse_pass (f, pass + 1); + else + changed |= one_pre_gcse_pass (f, pass + 1); + + if (max_pass_bytes < bytes_used) + max_pass_bytes = bytes_used; + + free_gcse_mem (); + + if (file) + { + fprintf (file, "\n"); + fflush (file); + } + obstack_free (&gcse_obstack, gcse_obstack_bottom); + pass++; + } + + /* If we're doing PRE, do one last pass of copy propagation. */ + if (! optimize_size) + { + max_gcse_regno = max_reg_num (); + alloc_gcse_mem (f); + one_cprop_pass (f, pass + 1); + free_gcse_mem (); + } + + if (file) + { + fprintf (file, "GCSE of %s: %d basic blocks, ", + current_function_name, n_basic_blocks); + fprintf (file, "%d pass%s, %d bytes\n\n", + pass, pass > 1 ? "es" : "", max_pass_bytes); + } + + /* Free our obstack. */ + obstack_free (&gcse_obstack, NULL_PTR); + /* Free reg_set_table. */ + free_reg_set_mem (); + /* Free storage used to record predecessor/successor data. */ + free_bb_mem (); + /* Free storage allocated by find_basic_blocks. */ + free_basic_block_vars (0); +} + +/* Misc. utilities. */ + +/* Compute which modes support reg/reg copy operations. */ + +static void +compute_can_copy () +{ + int i; + rtx reg,insn; + char *free_point = (char *) oballoc (1); + + bzero (can_copy_p, NUM_MACHINE_MODES); + + start_sequence (); + for (i = 0; i < NUM_MACHINE_MODES; i++) + { + switch (GET_MODE_CLASS (i)) + { + case MODE_CC : +#ifdef AVOID_CCMODE_COPIES + can_copy_p[i] = 0; +#else + reg = gen_rtx (REG, (enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1); + insn = emit_insn (gen_rtx (SET, VOIDmode, reg, reg)); + if (recog (PATTERN (insn), insn, NULL_PTR) >= 0) + can_copy_p[i] = 1; +#endif + break; + default : + can_copy_p[i] = 1; + break; + } + } + end_sequence (); + + /* Free the objects we just allocated. */ + obfree (free_point); +} + +/* Cover function to xmalloc to record bytes allocated. */ + +static char * +gmalloc (size) + unsigned int size; +{ + bytes_used += size; + return xmalloc (size); +} + +/* Cover function to xrealloc. + We don't record the additional size since we don't know it. + It won't affect memory usage stats much anyway. */ + +static char * +grealloc (ptr, size) + char *ptr; + unsigned int size; +{ + return xrealloc (ptr, size); +} + +/* Cover function to obstack_alloc. + We don't need to record the bytes allocated here since + obstack_chunk_alloc is set to gmalloc. */ + +static char * +gcse_alloc (size) + unsigned long size; +{ + return (char *) obstack_alloc (&gcse_obstack, size); +} + +/* Allocate memory for the cuid mapping array, + and reg/memory set tracking tables. + + This is called at the start of each pass. */ + +static void +alloc_gcse_mem (f) + rtx f; +{ + int i,n; + rtx insn; + + /* Find the largest UID and create a mapping from UIDs to CUIDs. + CUIDs are like UIDs except they increase monotonically, have no gaps, + and only apply to real insns. */ + + max_uid = get_max_uid (); + n = (max_uid + 1) * sizeof (int); + uid_cuid = (int *) gmalloc (n); + bzero ((char *) uid_cuid, n); + for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) + { + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + INSN_CUID (insn) = i++; + else + INSN_CUID (insn) = i; + } + + /* Create a table mapping cuids to insns. */ + + max_cuid = i; + n = (max_cuid + 1) * sizeof (rtx); + cuid_insn = (rtx *) gmalloc (n); + bzero ((char *) cuid_insn, n); + for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) + { + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + { + CUID_INSN (i) = insn; + i++; + } + } + + /* Allocate vars to track sets of regs. */ + + reg_set_bitmap = (sbitmap) sbitmap_alloc (max_gcse_regno); + + /* Allocate vars to track sets of regs, memory per block. */ + + reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, + max_gcse_regno); + mem_set_in_block = (char *) gmalloc (n_basic_blocks); +} + +/* Free memory allocated by alloc_gcse_mem. */ + +static void +free_gcse_mem () +{ + free (uid_cuid); + free (cuid_insn); + + free (reg_set_bitmap); + + free (reg_set_in_block); + free (mem_set_in_block); +} + +static void +dump_cuid_table (file) + FILE *file; +{ + int i,n; + + fprintf (file, "CUID table\n"); + for (i = n = 0; i < max_cuid; i++) + { + rtx insn = CUID_INSN (i); + if (n != 0 && n % 10 == 0) + fprintf (file, "\n"); + if (insn != NULL) + fprintf (file, " %d", INSN_UID (insn)); + } + fprintf (file, "\n\n"); +} + +/* Register set information. + + `reg_set_table' records where each register is set or otherwise + modified. */ + +static struct obstack reg_set_obstack; + +static void +alloc_reg_set_mem (n_regs) + int n_regs; +{ + int n; + + reg_set_table_size = n_regs + REG_SET_TABLE_SLOP; + n = reg_set_table_size * sizeof (struct reg_set *); + reg_set_table = (struct reg_set **) gmalloc (n); + bzero ((char *) reg_set_table, n); + + gcc_obstack_init (®_set_obstack); +} + +static void +free_reg_set_mem () +{ + free (reg_set_table); + obstack_free (®_set_obstack, NULL_PTR); +} + +/* Record REGNO in the reg_set table. */ + +static void +record_one_set (regno, insn) + int regno; + rtx insn; +{ + /* allocate a new reg_set element and link it onto the list */ + struct reg_set *new_reg_info, *reg_info_ptr1, *reg_info_ptr2; + + /* If the table isn't big enough, enlarge it. */ + if (regno >= reg_set_table_size) + { + int new_size = regno + REG_SET_TABLE_SLOP; + reg_set_table = (struct reg_set **) + grealloc ((char *) reg_set_table, + new_size * sizeof (struct reg_set *)); + bzero ((char *) (reg_set_table + reg_set_table_size), + (new_size - reg_set_table_size) * sizeof (struct reg_set *)); + reg_set_table_size = new_size; + } + + new_reg_info = (struct reg_set *) obstack_alloc (®_set_obstack, + sizeof (struct reg_set)); + bytes_used += sizeof (struct reg_set); + new_reg_info->insn = insn; + new_reg_info->next = NULL; + if (reg_set_table[regno] == NULL) + reg_set_table[regno] = new_reg_info; + else + { + reg_info_ptr1 = reg_info_ptr2 = reg_set_table[regno]; + /* ??? One could keep a "last" pointer to speed this up. */ + while (reg_info_ptr1 != NULL) + { + reg_info_ptr2 = reg_info_ptr1; + reg_info_ptr1 = reg_info_ptr1->next; + } + reg_info_ptr2->next = new_reg_info; + } +} + +/* For communication between next two functions (via note_stores). */ +static rtx record_set_insn; + +/* Called from compute_sets via note_stores to handle one + SET or CLOBBER in an insn. */ + +static void +record_set_info (dest, setter) + rtx dest, setter; +{ + if (GET_CODE (dest) == SUBREG) + dest = SUBREG_REG (dest); + + if (GET_CODE (dest) == REG) + { + if (REGNO (dest) >= FIRST_PSEUDO_REGISTER) + record_one_set (REGNO (dest), record_set_insn); + } +} + +/* Scan the function and record each set of each pseudo-register. + + This is called once, at the start of the gcse pass. + See the comments for `reg_set_table' for further docs. */ + +static void +compute_sets (f) + rtx f; +{ + rtx insn = f; + + while (insn) + { + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + { + record_set_insn = insn; + note_stores (PATTERN (insn), record_set_info); + } + insn = NEXT_INSN (insn); + } +} + +/* Hash table support. */ + +/* For each register, the cuid of the first/last insn in the block to set it, + or zero if not set. */ +static int *reg_first_set; +static int *reg_last_set; + +/* While computing "first/last set" info, this is the CUID of first/last insn + to set memory or zero if not set. `mem_last_set' is also used when + performing GCSE to record whether memory has been set since the beginning + of the block. + Note that handling of memory is very simple, we don't make any attempt + to optimize things (later). */ +static int mem_first_set; +static int mem_last_set; + +/* Set the appropriate bit in `rd_gen' [the gen for reaching defs] if the + register set in this insn is not set after this insn in this block. */ + +static void +maybe_set_rd_gen (regno, insn) + int regno; + rtx insn; +{ + if (reg_last_set[regno] <= INSN_CUID (insn)) + SET_BIT (rd_gen[BLOCK_NUM (insn)], INSN_CUID (insn)); +} + +/* Perform a quick check whether X, the source of a set, is something + we want to consider for GCSE. */ + +static int +want_to_gcse_p (x) + rtx x; +{ + enum rtx_code code = GET_CODE (x); + + switch (code) + { + case REG: + case SUBREG: + case CONST_INT: + case CONST_DOUBLE: + case CALL: + return 0; + + default: + break; + } + + return 1; +} + +/* Return non-zero if the operands of expression X are unchanged from the + start of INSN's basic block up to but not including INSN (if AVAIL_P == 0), + or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */ + +static int +oprs_unchanged_p (x, insn, avail_p) + rtx x, insn; + int avail_p; +{ + int i; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ + repeat: + + if (x == 0) + return 1; + + code = GET_CODE (x); + switch (code) + { + case REG: + if (avail_p) + return (reg_last_set[REGNO (x)] == 0 + || reg_last_set[REGNO (x)] < INSN_CUID (insn)); + else + return (reg_first_set[REGNO (x)] == 0 + || reg_first_set[REGNO (x)] >= INSN_CUID (insn)); + + case MEM: + if (avail_p) + { + if (mem_last_set != 0 + && mem_last_set >= INSN_CUID (insn)) + return 0; + } + else + { + if (mem_first_set != 0 + && mem_first_set < INSN_CUID (insn)) + return 0; + } + x = XEXP (x, 0); + goto repeat; + + case PRE_DEC: + case PRE_INC: + case POST_DEC: + case POST_INC: + return 0; + + case PC: + case CC0: /*FIXME*/ + case CONST: + case CONST_INT: + case CONST_DOUBLE: + case SYMBOL_REF: + case LABEL_REF: + case ADDR_VEC: + case ADDR_DIFF_VEC: + return 1; + + default: + break; + } + + i = GET_RTX_LENGTH (code) - 1; + fmt = GET_RTX_FORMAT (code); + for (; i >= 0; i--) + { + if (fmt[i] == 'e') + { + rtx tem = XEXP (x, i); + + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = tem; + goto repeat; + } + if (! oprs_unchanged_p (tem, insn, avail_p)) + return 0; + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + { + if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p)) + return 0; + } + } + } + + return 1; +} + +/* Return non-zero if the operands of expression X are unchanged from + the start of INSN's basic block up to but not including INSN. */ + +static int +oprs_anticipatable_p (x, insn) + rtx x, insn; +{ + return oprs_unchanged_p (x, insn, 0); +} + +/* Return non-zero if the operands of expression X are unchanged from + INSN to the end of INSN's basic block. */ + +static int +oprs_available_p (x, insn) + rtx x, insn; +{ + return oprs_unchanged_p (x, insn, 1); +} + +/* Hash expression X. + MODE is only used if X is a CONST_INT. + A boolean indicating if a volatile operand is found or if the expression + contains something we don't want to insert in the table is stored in + DO_NOT_RECORD_P. + + ??? One might want to merge this with canon_hash. Later. */ + +static unsigned int +hash_expr (x, mode, do_not_record_p, hash_table_size) + rtx x; + enum machine_mode mode; + int *do_not_record_p; + int hash_table_size; +{ + unsigned int hash; + + *do_not_record_p = 0; + + hash = hash_expr_1 (x, mode, do_not_record_p); + return hash % hash_table_size; +} + +/* Subroutine of hash_expr to do the actual work. */ + +static unsigned int +hash_expr_1 (x, mode, do_not_record_p) + rtx x; + enum machine_mode mode; + int *do_not_record_p; +{ + int i, j; + unsigned hash = 0; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ + repeat: + + if (x == 0) + return hash; + + code = GET_CODE (x); + switch (code) + { + case REG: + { + register int regno = REGNO (x); + hash += ((unsigned) REG << 7) + regno; + return hash; + } + + case CONST_INT: + { + unsigned HOST_WIDE_INT tem = INTVAL (x); + hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem; + return hash; + } + + case CONST_DOUBLE: + /* This is like the general case, except that it only counts + the integers representing the constant. */ + hash += (unsigned) code + (unsigned) GET_MODE (x); + if (GET_MODE (x) != VOIDmode) + for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++) + { + unsigned tem = XINT (x, i); + hash += tem; + } + else + hash += ((unsigned) CONST_DOUBLE_LOW (x) + + (unsigned) CONST_DOUBLE_HIGH (x)); + return hash; + + /* Assume there is only one rtx object for any given label. */ + case LABEL_REF: + /* We don't hash on the address of the CODE_LABEL to avoid bootstrap + differences and differences between each stage's debugging dumps. */ + hash += ((unsigned) LABEL_REF << 7) + CODE_LABEL_NUMBER (XEXP (x, 0)); + return hash; + + case SYMBOL_REF: + { + /* Don't hash on the symbol's address to avoid bootstrap differences. + Different hash values may cause expressions to be recorded in + different orders and thus different registers to be used in the + final assembler. This also avoids differences in the dump files + between various stages. */ + unsigned int h = 0; + unsigned char *p = (unsigned char *) XSTR (x, 0); + while (*p) + h += (h << 7) + *p++; /* ??? revisit */ + hash += ((unsigned) SYMBOL_REF << 7) + h; + return hash; + } + + case MEM: + if (MEM_VOLATILE_P (x)) + { + *do_not_record_p = 1; + return 0; + } + hash += (unsigned) MEM; + x = XEXP (x, 0); + goto repeat; + + case PRE_DEC: + case PRE_INC: + case POST_DEC: + case POST_INC: + case PC: + case CC0: + case CALL: + case UNSPEC_VOLATILE: + *do_not_record_p = 1; + return 0; + + case ASM_OPERANDS: + if (MEM_VOLATILE_P (x)) + { + *do_not_record_p = 1; + return 0; + } + + default: + break; + } + + i = GET_RTX_LENGTH (code) - 1; + hash += (unsigned) code + (unsigned) GET_MODE (x); + fmt = GET_RTX_FORMAT (code); + for (; i >= 0; i--) + { + if (fmt[i] == 'e') + { + rtx tem = XEXP (x, i); + + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = tem; + goto repeat; + } + hash += hash_expr_1 (tem, 0, do_not_record_p); + if (*do_not_record_p) + return 0; + } + else if (fmt[i] == 'E') + for (j = 0; j < XVECLEN (x, i); j++) + { + hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p); + if (*do_not_record_p) + return 0; + } + else if (fmt[i] == 's') + { + register unsigned char *p = (unsigned char *) XSTR (x, i); + if (p) + while (*p) + hash += *p++; + } + else if (fmt[i] == 'i') + { + register unsigned tem = XINT (x, i); + hash += tem; + } + else + abort (); + } + + return hash; +} + +/* Hash a set of register REGNO. + + Sets are hashed on the register that is set. + This simplifies the PRE copy propagation code. + + ??? May need to make things more elaborate. Later, as necessary. */ + +static unsigned int +hash_set (regno, hash_table_size) + int regno; + int hash_table_size; +{ + unsigned int hash; + + hash = regno; + return hash % hash_table_size; +} + +/* Return non-zero if exp1 is equivalent to exp2. + ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */ + +static int +expr_equiv_p (x, y) + rtx x, y; +{ + register int i, j; + register enum rtx_code code; + register char *fmt; + + if (x == y) + return 1; + if (x == 0 || y == 0) + return x == y; + + code = GET_CODE (x); + if (code != GET_CODE (y)) + return 0; + + /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ + if (GET_MODE (x) != GET_MODE (y)) + return 0; + + switch (code) + { + case PC: + case CC0: + return x == y; + + case CONST_INT: + return INTVAL (x) == INTVAL (y); + + case LABEL_REF: + return XEXP (x, 0) == XEXP (y, 0); + + case SYMBOL_REF: + return XSTR (x, 0) == XSTR (y, 0); + + case REG: + return REGNO (x) == REGNO (y); + + /* For commutative operations, check both orders. */ + case PLUS: + case MULT: + case AND: + case IOR: + case XOR: + case NE: + case EQ: + return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0)) + && expr_equiv_p (XEXP (x, 1), XEXP (y, 1))) + || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1)) + && expr_equiv_p (XEXP (x, 1), XEXP (y, 0)))); + + default: + break; + } + + /* Compare the elements. If any pair of corresponding elements + fail to match, return 0 for the whole thing. */ + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + switch (fmt[i]) + { + case 'e': + if (! expr_equiv_p (XEXP (x, i), XEXP (y, i))) + return 0; + break; + + case 'E': + if (XVECLEN (x, i) != XVECLEN (y, i)) + return 0; + for (j = 0; j < XVECLEN (x, i); j++) + if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j))) + return 0; + break; + + case 's': + if (strcmp (XSTR (x, i), XSTR (y, i))) + return 0; + break; + + case 'i': + if (XINT (x, i) != XINT (y, i)) + return 0; + break; + + case 'w': + if (XWINT (x, i) != XWINT (y, i)) + return 0; + break; + + case '0': + break; + + default: + abort (); + } + } + + return 1; +} + +/* Insert expression X in INSN in the hash table. + If it is already present, record it as the last occurrence in INSN's + basic block. + + MODE is the mode of the value X is being stored into. + It is only used if X is a CONST_INT. + + ANTIC_P is non-zero if X is an anticipatable expression. + AVAIL_P is non-zero if X is an available expression. */ + +static void +insert_expr_in_table (x, mode, insn, antic_p, avail_p) + rtx x; + enum machine_mode mode; + rtx insn; + int antic_p, avail_p; +{ + int found, do_not_record_p; + unsigned int hash; + struct expr *cur_expr, *last_expr = NULL; + struct occr *antic_occr, *avail_occr; + struct occr *last_occr = NULL; + + hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size); + + /* Do not insert expression in table if it contains volatile operands, + or if hash_expr determines the expression is something we don't want + to or can't handle. */ + if (do_not_record_p) + return; + + cur_expr = expr_hash_table[hash]; + found = 0; + + while (cur_expr && ! (found = expr_equiv_p (cur_expr->expr, x))) + { + /* If the expression isn't found, save a pointer to the end of + the list. */ + last_expr = cur_expr; + cur_expr = cur_expr->next_same_hash; + } + + if (! found) + { + cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr)); + bytes_used += sizeof (struct expr); + if (expr_hash_table[hash] == NULL) + { + /* This is the first pattern that hashed to this index. */ + expr_hash_table[hash] = cur_expr; + } + else + { + /* Add EXPR to end of this hash chain. */ + last_expr->next_same_hash = cur_expr; + } + /* Set the fields of the expr element. */ + cur_expr->expr = x; + cur_expr->bitmap_index = n_exprs++; + cur_expr->next_same_hash = NULL; + cur_expr->antic_occr = NULL; + cur_expr->avail_occr = NULL; + } + + /* Now record the occurrence(s). */ + + if (antic_p) + { + antic_occr = cur_expr->antic_occr; + + /* Search for another occurrence in the same basic block. */ + while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn)) + { + /* If an occurrence isn't found, save a pointer to the end of + the list. */ + last_occr = antic_occr; + antic_occr = antic_occr->next; + } + + if (antic_occr) + { + /* Found another instance of the expression in the same basic block. + Prefer the currently recorded one. We want the first one in the + block and the block is scanned from start to end. */ + ; /* nothing to do */ + } + else + { + /* First occurrence of this expression in this basic block. */ + antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr)); + bytes_used += sizeof (struct occr); + /* First occurrence of this expression in any block? */ + if (cur_expr->antic_occr == NULL) + cur_expr->antic_occr = antic_occr; + else + last_occr->next = antic_occr; + antic_occr->insn = insn; + antic_occr->next = NULL; + } + } + + if (avail_p) + { + avail_occr = cur_expr->avail_occr; + + /* Search for another occurrence in the same basic block. */ + while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn)) + { + /* If an occurrence isn't found, save a pointer to the end of + the list. */ + last_occr = avail_occr; + avail_occr = avail_occr->next; + } + + if (avail_occr) + { + /* Found another instance of the expression in the same basic block. + Prefer this occurrence to the currently recorded one. We want + the last one in the block and the block is scanned from start + to end. */ + avail_occr->insn = insn; + } + else + { + /* First occurrence of this expression in this basic block. */ + avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr)); + bytes_used += sizeof (struct occr); + /* First occurrence of this expression in any block? */ + if (cur_expr->avail_occr == NULL) + cur_expr->avail_occr = avail_occr; + else + last_occr->next = avail_occr; + avail_occr->insn = insn; + avail_occr->next = NULL; + } + } +} + +/* Insert pattern X in INSN in the hash table. + X is a SET of a reg to either another reg or a constant. + If it is already present, record it as the last occurrence in INSN's + basic block. */ + +static void +insert_set_in_table (x, insn) + rtx x; + rtx insn; +{ + int found; + unsigned int hash; + struct expr *cur_expr, *last_expr = NULL; + struct occr *cur_occr, *last_occr = NULL; + + if (GET_CODE (x) != SET + || GET_CODE (SET_DEST (x)) != REG) + abort (); + + hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size); + + cur_expr = set_hash_table[hash]; + found = 0; + + while (cur_expr && ! (found = expr_equiv_p (cur_expr->expr, x))) + { + /* If the expression isn't found, save a pointer to the end of + the list. */ + last_expr = cur_expr; + cur_expr = cur_expr->next_same_hash; + } + + if (! found) + { + cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr)); + bytes_used += sizeof (struct expr); + if (set_hash_table[hash] == NULL) + { + /* This is the first pattern that hashed to this index. */ + set_hash_table[hash] = cur_expr; + } + else + { + /* Add EXPR to end of this hash chain. */ + last_expr->next_same_hash = cur_expr; + } + /* Set the fields of the expr element. + We must copy X because it can be modified when copy propagation is + performed on its operands. */ + /* ??? Should this go in a different obstack? */ + cur_expr->expr = copy_rtx (x); + cur_expr->bitmap_index = n_sets++; + cur_expr->next_same_hash = NULL; + cur_expr->antic_occr = NULL; + cur_expr->avail_occr = NULL; + } + + /* Now record the occurrence. */ + + cur_occr = cur_expr->avail_occr; + + /* Search for another occurrence in the same basic block. */ + while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn)) + { + /* If an occurrence isn't found, save a pointer to the end of + the list. */ + last_occr = cur_occr; + cur_occr = cur_occr->next; + } + + if (cur_occr) + { + /* Found another instance of the expression in the same basic block. + Prefer this occurrence to the currently recorded one. We want + the last one in the block and the block is scanned from start + to end. */ + cur_occr->insn = insn; + } + else + { + /* First occurrence of this expression in this basic block. */ + cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr)); + bytes_used += sizeof (struct occr); + /* First occurrence of this expression in any block? */ + if (cur_expr->avail_occr == NULL) + cur_expr->avail_occr = cur_occr; + else + last_occr->next = cur_occr; + cur_occr->insn = insn; + cur_occr->next = NULL; + } +} + +/* Scan pattern PAT of INSN and add an entry to the hash table. + If SET_P is non-zero, this is for the assignment hash table, + otherwise it is for the expression hash table. */ + +static void +hash_scan_set (pat, insn, set_p) + rtx pat, insn; + int set_p; +{ + rtx src = SET_SRC (pat); + rtx dest = SET_DEST (pat); + + if (GET_CODE (src) == CALL) + hash_scan_call (src, insn); + + if (GET_CODE (dest) == REG) + { + int regno = REGNO (dest); + rtx tmp; + + /* Only record sets of pseudo-regs in the hash table. */ + if (! set_p + && regno >= FIRST_PSEUDO_REGISTER + /* Don't GCSE something if we can't do a reg/reg copy. */ + && can_copy_p [GET_MODE (dest)] + /* Is SET_SRC something we want to gcse? */ + && want_to_gcse_p (src)) + { + /* An expression is not anticipatable if its operands are + modified before this insn. */ + int antic_p = ! optimize_size && oprs_anticipatable_p (src, insn); + /* An expression is not available if its operands are + subsequently modified, including this insn. */ + int avail_p = oprs_available_p (src, insn); + insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p); + } + /* Record sets for constant/copy propagation. */ + else if (set_p + && regno >= FIRST_PSEUDO_REGISTER + && ((GET_CODE (src) == REG + && REGNO (src) >= FIRST_PSEUDO_REGISTER + && can_copy_p [GET_MODE (dest)]) + /* ??? CONST_INT:wip */ + || GET_CODE (src) == CONST_INT) + /* A copy is not available if its src or dest is subsequently + modified. Here we want to search from INSN+1 on, but + oprs_available_p searches from INSN on. */ + && (insn == BLOCK_END (BLOCK_NUM (insn)) + || ((tmp = next_nonnote_insn (insn)) != NULL_RTX + && oprs_available_p (pat, tmp)))) + insert_set_in_table (pat, insn); + } + + /* Check if first/last set in this block for classic gcse, + but not for copy/constant propagation. */ + if (optimize_size && !set_p) + + { + rtx dest = SET_DEST (pat); + + while (GET_CODE (dest) == SUBREG + || GET_CODE (dest) == ZERO_EXTRACT + || GET_CODE (dest) == SIGN_EXTRACT + || GET_CODE (dest) == STRICT_LOW_PART) + dest = XEXP (dest, 0); + if (GET_CODE (dest) == REG) + maybe_set_rd_gen (REGNO (dest), insn); + } +} + +static void +hash_scan_clobber (x, insn) + rtx x, insn; +{ + /* Currently nothing to do. */ +} + +static void +hash_scan_call (x, insn) + rtx x, insn; +{ + /* Currently nothing to do. */ +} + +/* Process INSN and add hash table entries as appropriate. + + Only available expressions that set a single pseudo-reg are recorded. + + Single sets in a PARALLEL could be handled, but it's an extra complication + that isn't dealt with right now. The trick is handling the CLOBBERs that + are also in the PARALLEL. Later. + + If SET_P is non-zero, this is for the assignment hash table, + otherwise it is for the expression hash table. */ + +static void +hash_scan_insn (insn, set_p) + rtx insn; + int set_p; +{ + rtx pat = PATTERN (insn); + + /* Pick out the sets of INSN and for other forms of instructions record + what's been modified. */ + + if (GET_CODE (pat) == SET) + hash_scan_set (pat, insn, set_p); + else if (GET_CODE (pat) == PARALLEL) + { + int i; + + for (i = 0; i < XVECLEN (pat, 0); i++) + { + rtx x = XVECEXP (pat, 0, i); + + if (GET_CODE (x) == SET) + { + if (GET_CODE (SET_SRC (x)) == CALL) + hash_scan_call (SET_SRC (x), insn); + + /* Check if first/last set in this block for classic + gcse, but not for constant/copy propagation. */ + if (optimize_size && !set_p) + { + rtx dest = SET_DEST (x); + + while (GET_CODE (dest) == SUBREG + || GET_CODE (dest) == ZERO_EXTRACT + || GET_CODE (dest) == SIGN_EXTRACT + || GET_CODE (dest) == STRICT_LOW_PART) + dest = XEXP (dest, 0); + if (GET_CODE (dest) == REG) + maybe_set_rd_gen (REGNO (dest), insn); + } + } + else if (GET_CODE (x) == CLOBBER) + hash_scan_clobber (x, insn); + else if (GET_CODE (x) == CALL) + hash_scan_call (x, insn); + } + } + else if (GET_CODE (pat) == CLOBBER) + hash_scan_clobber (pat, insn); + else if (GET_CODE (pat) == CALL) + hash_scan_call (pat, insn); +} + +static void +dump_hash_table (file, name, table, table_size, total_size) + FILE *file; + char *name; + struct expr **table; + int table_size, total_size; +{ + int i; + /* Flattened out table, so it's printed in proper order. */ + struct expr **flat_table = (struct expr **) alloca (total_size * sizeof (struct expr *)); + unsigned int *hash_val = (unsigned int *) alloca (total_size * sizeof (unsigned int)); + + bzero ((char *) flat_table, total_size * sizeof (struct expr *)); + for (i = 0; i < table_size; i++) + { + struct expr *expr; + + for (expr = table[i]; expr != NULL; expr = expr->next_same_hash) + { + flat_table[expr->bitmap_index] = expr; + hash_val[expr->bitmap_index] = i; + } + } + + fprintf (file, "%s hash table (%d buckets, %d entries)\n", + name, table_size, total_size); + + for (i = 0; i < total_size; i++) + { + struct expr *expr = flat_table[i]; + + fprintf (file, "Index %d (hash value %d)\n ", + expr->bitmap_index, hash_val[i]); + print_rtl (file, expr->expr); + fprintf (file, "\n"); + } + + fprintf (file, "\n"); +} + +/* Record register first/last/block set information for REGNO in INSN. + reg_first_set records the first place in the block where the register + is set and is used to compute "anticipatability". + reg_last_set records the last place in the block where the register + is set and is used to compute "availability". + reg_set_in_block records whether the register is set in the block + and is used to compute "transparency". */ + +static void +record_last_reg_set_info (insn, regno) + rtx insn; + int regno; +{ + if (reg_first_set[regno] == 0) + reg_first_set[regno] = INSN_CUID (insn); + reg_last_set[regno] = INSN_CUID (insn); + SET_BIT (reg_set_in_block[BLOCK_NUM (insn)], regno); +} + +/* Record memory first/last/block set information for INSN. */ + +static void +record_last_mem_set_info (insn) + rtx insn; +{ + if (mem_first_set == 0) + mem_first_set = INSN_CUID (insn); + mem_last_set = INSN_CUID (insn); + mem_set_in_block[BLOCK_NUM (insn)] = 1; +} + +/* Used for communicating between next two routines. */ +static rtx last_set_insn; + +/* Called from compute_hash_table via note_stores to handle one + SET or CLOBBER in an insn. */ + +static void +record_last_set_info (dest, setter) + rtx dest, setter; +{ + if (GET_CODE (dest) == SUBREG) + dest = SUBREG_REG (dest); + + if (GET_CODE (dest) == REG) + record_last_reg_set_info (last_set_insn, REGNO (dest)); + else if (GET_CODE (dest) == MEM + /* Ignore pushes, they clobber nothing. */ + && ! push_operand (dest, GET_MODE (dest))) + record_last_mem_set_info (last_set_insn); +} + +/* Top level function to create an expression or assignment hash table. + + Expression entries are placed in the hash table if + - they are of the form (set (pseudo-reg) src), + - src is something we want to perform GCSE on, + - none of the operands are subsequently modified in the block + + Assignment entries are placed in the hash table if + - they are of the form (set (pseudo-reg) src), + - src is something we want to perform const/copy propagation on, + - none of the operands or target are subsequently modified in the block + Currently src must be a pseudo-reg or a const_int. + + F is the first insn. + SET_P is non-zero for computing the assignment hash table. */ + +static void +compute_hash_table (f, set_p) + rtx f; + int set_p; +{ + int bb; + + /* While we compute the hash table we also compute a bit array of which + registers are set in which blocks. + We also compute which blocks set memory, in the absence of aliasing + support [which is TODO]. + ??? This isn't needed during const/copy propagation, but it's cheap to + compute. Later. */ + sbitmap_vector_zero (reg_set_in_block, n_basic_blocks); + bzero ((char *) mem_set_in_block, n_basic_blocks); + + /* Some working arrays used to track first and last set in each block. */ + /* ??? One could use alloca here, but at some size a threshold is crossed + beyond which one should use malloc. Are we at that threshold here? */ + reg_first_set = (int *) gmalloc (max_gcse_regno * sizeof (int)); + reg_last_set = (int *) gmalloc (max_gcse_regno * sizeof (int)); + + for (bb = 0; bb < n_basic_blocks; bb++) + { + rtx insn; + int regno; + + /* First pass over the instructions records information used to + determine when registers and memory are first and last set. + ??? The mem_set_in_block and hard-reg reg_set_in_block computation + could be moved to compute_sets since they currently don't change. */ + + bzero ((char *) reg_first_set, max_gcse_regno * sizeof (int)); + bzero ((char *) reg_last_set, max_gcse_regno * sizeof (int)); + mem_first_set = 0; + mem_last_set = 0; + + for (insn = basic_block_head[bb]; + insn && insn != NEXT_INSN (basic_block_end[bb]); + insn = NEXT_INSN (insn)) + { +#ifdef NON_SAVING_SETJMP + if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE + && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP) + { + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + record_last_reg_set_info (insn, regno); + continue; + } +#endif + + if (GET_RTX_CLASS (GET_CODE (insn)) != 'i') + continue; + + if (GET_CODE (insn) == CALL_INSN) + { + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + if (call_used_regs[regno]) + record_last_reg_set_info (insn, regno); + if (! CONST_CALL_P (insn)) + record_last_mem_set_info (insn); + } + + last_set_insn = insn; + note_stores (PATTERN (insn), record_last_set_info); + } + + /* The next pass builds the hash table. */ + + for (insn = basic_block_head[bb]; + insn && insn != NEXT_INSN (basic_block_end[bb]); + insn = NEXT_INSN (insn)) + { + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + hash_scan_insn (insn, set_p); + } + } + + free (reg_first_set); + free (reg_last_set); + /* Catch bugs early. */ + reg_first_set = reg_last_set = 0; +} + +/* Allocate space for the set hash table. + N_INSNS is the number of instructions in the function. + It is used to determine the number of buckets to use. */ + +static void +alloc_set_hash_table (n_insns) + int n_insns; +{ + int n; + + set_hash_table_size = n_insns / 4; + if (set_hash_table_size < 11) + set_hash_table_size = 11; + /* Attempt to maintain efficient use of hash table. + Making it an odd number is simplest for now. + ??? Later take some measurements. */ + set_hash_table_size |= 1; + n = set_hash_table_size * sizeof (struct expr *); + set_hash_table = (struct expr **) gmalloc (n); +} + +/* Free things allocated by alloc_set_hash_table. */ + +static void +free_set_hash_table () +{ + free (set_hash_table); +} + +/* Compute the hash table for doing copy/const propagation. */ + +static void +compute_set_hash_table (f) + rtx f; +{ + /* Initialize count of number of entries in hash table. */ + n_sets = 0; + bzero ((char *) set_hash_table, set_hash_table_size * sizeof (struct expr *)); + + compute_hash_table (f, 1); +} + +/* Allocate space for the expression hash table. + N_INSNS is the number of instructions in the function. + It is used to determine the number of buckets to use. */ + +static void +alloc_expr_hash_table (n_insns) + int n_insns; +{ + int n; + + expr_hash_table_size = n_insns / 2; + /* Make sure the amount is usable. */ + if (expr_hash_table_size < 11) + expr_hash_table_size = 11; + /* Attempt to maintain efficient use of hash table. + Making it an odd number is simplest for now. + ??? Later take some measurements. */ + expr_hash_table_size |= 1; + n = expr_hash_table_size * sizeof (struct expr *); + expr_hash_table = (struct expr **) gmalloc (n); +} + +/* Free things allocated by alloc_expr_hash_table. */ + +static void +free_expr_hash_table () +{ + free (expr_hash_table); +} + +/* Compute the hash table for doing GCSE. */ + +static void +compute_expr_hash_table (f) + rtx f; +{ + /* Initialize count of number of entries in hash table. */ + n_exprs = 0; + bzero ((char *) expr_hash_table, expr_hash_table_size * sizeof (struct expr *)); + + compute_hash_table (f, 0); +} + +/* Expression tracking support. */ + +/* Lookup pattern PAT in the expression table. + The result is a pointer to the table entry, or NULL if not found. */ + +static struct expr * +lookup_expr (pat) + rtx pat; +{ + int do_not_record_p; + unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p, + expr_hash_table_size); + struct expr *expr; + + if (do_not_record_p) + return NULL; + + expr = expr_hash_table[hash]; + + while (expr && ! expr_equiv_p (expr->expr, pat)) + expr = expr->next_same_hash; + + return expr; +} + +/* Lookup REGNO in the set table. + If PAT is non-NULL look for the entry that matches it, otherwise return + the first entry for REGNO. + The result is a pointer to the table entry, or NULL if not found. */ + +static struct expr * +lookup_set (regno, pat) + int regno; + rtx pat; +{ + unsigned int hash = hash_set (regno, set_hash_table_size); + struct expr *expr; + + expr = set_hash_table[hash]; + + if (pat) + { + while (expr && ! expr_equiv_p (expr->expr, pat)) + expr = expr->next_same_hash; + } + else + { + while (expr && REGNO (SET_DEST (expr->expr)) != regno) + expr = expr->next_same_hash; + } + + return expr; +} + +/* Return the next entry for REGNO in list EXPR. */ + +static struct expr * +next_set (regno, expr) + int regno; + struct expr *expr; +{ + do + expr = expr->next_same_hash; + while (expr && REGNO (SET_DEST (expr->expr)) != regno); + return expr; +} + +/* Reset tables used to keep track of what's still available [since the + start of the block]. */ + +static void +reset_opr_set_tables () +{ + /* Maintain a bitmap of which regs have been set since beginning of + the block. */ + sbitmap_zero (reg_set_bitmap); + /* Also keep a record of the last instruction to modify memory. + For now this is very trivial, we only record whether any memory + location has been modified. */ + mem_last_set = 0; +} + +/* Return non-zero if the operands of X are not set before INSN in + INSN's basic block. */ + +static int +oprs_not_set_p (x, insn) + rtx x, insn; +{ + int i; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ +repeat: + + if (x == 0) + return 1; + + code = GET_CODE (x); + switch (code) + { + case PC: + case CC0: + case CONST: + case CONST_INT: + case CONST_DOUBLE: + case SYMBOL_REF: + case LABEL_REF: + case ADDR_VEC: + case ADDR_DIFF_VEC: + return 1; + + case MEM: + if (mem_last_set != 0) + return 0; + x = XEXP (x, 0); + goto repeat; + + case REG: + return ! TEST_BIT (reg_set_bitmap, REGNO (x)); + + default: + break; + } + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + int not_set_p; + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = XEXP (x, 0); + goto repeat; + } + not_set_p = oprs_not_set_p (XEXP (x, i), insn); + if (! not_set_p) + return 0; + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + { + int not_set_p = oprs_not_set_p (XVECEXP (x, i, j), insn); + if (! not_set_p) + return 0; + } + } + } + + return 1; +} + +/* Mark things set by a CALL. */ + +static void +mark_call (pat, insn) + rtx pat, insn; +{ + mem_last_set = INSN_CUID (insn); +} + +/* Mark things set by a SET. */ + +static void +mark_set (pat, insn) + rtx pat, insn; +{ + rtx dest = SET_DEST (pat); + + while (GET_CODE (dest) == SUBREG + || GET_CODE (dest) == ZERO_EXTRACT + || GET_CODE (dest) == SIGN_EXTRACT + || GET_CODE (dest) == STRICT_LOW_PART) + dest = XEXP (dest, 0); + + if (GET_CODE (dest) == REG) + SET_BIT (reg_set_bitmap, REGNO (dest)); + else if (GET_CODE (dest) == MEM) + mem_last_set = INSN_CUID (insn); + + if (GET_CODE (SET_SRC (pat)) == CALL) + mark_call (SET_SRC (pat), insn); +} + +/* Record things set by a CLOBBER. */ + +static void +mark_clobber (pat, insn) + rtx pat, insn; +{ + rtx clob = XEXP (pat, 0); + + while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART) + clob = XEXP (clob, 0); + + if (GET_CODE (clob) == REG) + SET_BIT (reg_set_bitmap, REGNO (clob)); + else + mem_last_set = INSN_CUID (insn); +} + +/* Record things set by INSN. + This data is used by oprs_not_set_p. */ + +static void +mark_oprs_set (insn) + rtx insn; +{ + rtx pat = PATTERN (insn); + + if (GET_CODE (pat) == SET) + mark_set (pat, insn); + else if (GET_CODE (pat) == PARALLEL) + { + int i; + + for (i = 0; i < XVECLEN (pat, 0); i++) + { + rtx x = XVECEXP (pat, 0, i); + + if (GET_CODE (x) == SET) + mark_set (x, insn); + else if (GET_CODE (x) == CLOBBER) + mark_clobber (x, insn); + else if (GET_CODE (x) == CALL) + mark_call (x, insn); + } + } + else if (GET_CODE (pat) == CLOBBER) + mark_clobber (pat, insn); + else if (GET_CODE (pat) == CALL) + mark_call (pat, insn); +} + +/* Classic GCSE reaching definition support. */ + +/* Allocate reaching def variables. */ + +static void +alloc_rd_mem (n_blocks, n_insns) + int n_blocks, n_insns; +{ + rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns); + sbitmap_vector_zero (rd_kill, n_basic_blocks); + + rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns); + sbitmap_vector_zero (rd_gen, n_basic_blocks); + + reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns); + sbitmap_vector_zero (reaching_defs, n_basic_blocks); + + rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns); + sbitmap_vector_zero (rd_out, n_basic_blocks); +} + +/* Free reaching def variables. */ + +static void +free_rd_mem () +{ + free (rd_kill); + free (rd_gen); + free (reaching_defs); + free (rd_out); +} + +/* Add INSN to the kills of BB. + REGNO, set in BB, is killed by INSN. */ + +static void +handle_rd_kill_set (insn, regno, bb) + rtx insn; + int regno, bb; +{ + struct reg_set *this_reg = reg_set_table[regno]; + + while (this_reg) + { + if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn)) + SET_BIT (rd_kill[bb], INSN_CUID (this_reg->insn)); + this_reg = this_reg->next; + } +} + +static void +dump_rd_table (file, title, bmap) + FILE *file; + char *title; + sbitmap *bmap; +{ + int bb,cuid,i,j,n; + + fprintf (file, "%s\n", title); + for (bb = 0; bb < n_basic_blocks; bb++) + { + fprintf (file, "BB %d\n", bb); + dump_sbitmap (file, bmap[bb]); + for (i = n = cuid = 0; i < bmap[bb]->size; i++) + { + for (j = 0; j < SBITMAP_ELT_BITS; j++, cuid++) + { + if ((bmap[bb]->elms[i] & (1 << j)) != 0) + { + if (n % 10 == 0) + fprintf (file, " "); + fprintf (file, " %d", INSN_UID (CUID_INSN (cuid))); + n++; + } + } + } + if (n != 0) + fprintf (file, "\n"); + } + fprintf (file, "\n"); +} + +/* Compute the set of kill's for reaching definitions. */ + +static void +compute_kill_rd () +{ + int bb,cuid; + + /* For each block + For each set bit in `gen' of the block (i.e each insn which + generates a definition in the block) + Call the reg set by the insn corresponding to that bit regx + Look at the linked list starting at reg_set_table[regx] + For each setting of regx in the linked list, which is not in + this block + Set the bit in `kill' corresponding to that insn + */ + + for (bb = 0; bb < n_basic_blocks; bb++) + { + for (cuid = 0; cuid < max_cuid; cuid++) + { + if (TEST_BIT (rd_gen[bb], cuid)) + { + rtx insn = CUID_INSN (cuid); + rtx pat = PATTERN (insn); + + if (GET_CODE (insn) == CALL_INSN) + { + int regno; + + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + { + if (call_used_regs[regno]) + handle_rd_kill_set (insn, regno, bb); + } + } + + if (GET_CODE (pat) == PARALLEL) + { + int i; + + /* We work backwards because ... */ + for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) + { + enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i)); + if ((code == SET || code == CLOBBER) + && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG) + handle_rd_kill_set (insn, + REGNO (XEXP (XVECEXP (pat, 0, i), 0)), + bb); + } + } + else if (GET_CODE (pat) == SET) + { + if (GET_CODE (SET_DEST (pat)) == REG) + { + /* Each setting of this register outside of this block + must be marked in the set of kills in this block. */ + handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb); + } + } + /* FIXME: CLOBBER? */ + } + } + } +} + +/* Compute the reaching definitions as in + Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman, + Chapter 10. It is the same algorithm as used for computing available + expressions but applied to the gens and kills of reaching definitions. */ + +static void +compute_rd () +{ + int bb, changed, passes; + + for (bb = 0; bb < n_basic_blocks; bb++) + sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 0; bb < n_basic_blocks; bb++) + { + sbitmap_union_of_predecessors (reaching_defs[bb], rd_out, + bb, s_preds); + changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb], + reaching_defs[bb], rd_kill[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "reaching def computation: %d passes\n", passes); +} + +/* Classic GCSE available expression support. */ + +/* Allocate memory for available expression computation. */ + +static void +alloc_avail_expr_mem (n_blocks, n_exprs) + int n_blocks, n_exprs; +{ + ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs); + sbitmap_vector_zero (ae_kill, n_basic_blocks); + + ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs); + sbitmap_vector_zero (ae_gen, n_basic_blocks); + + ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs); + sbitmap_vector_zero (ae_in, n_basic_blocks); + + ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs); + sbitmap_vector_zero (ae_out, n_basic_blocks); + + u_bitmap = (sbitmap) sbitmap_alloc (n_exprs); + sbitmap_ones (u_bitmap); +} + +static void +free_avail_expr_mem () +{ + free (ae_kill); + free (ae_gen); + free (ae_in); + free (ae_out); + free (u_bitmap); +} + +/* Compute the set of available expressions generated in each basic block. */ + +static void +compute_ae_gen () +{ + int i; + + /* For each recorded occurrence of each expression, set ae_gen[bb][expr]. + This is all we have to do because an expression is not recorded if it + is not available, and the only expressions we want to work with are the + ones that are recorded. */ + + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr = expr_hash_table[i]; + while (expr != NULL) + { + struct occr *occr = expr->avail_occr; + while (occr != NULL) + { + SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index); + occr = occr->next; + } + expr = expr->next_same_hash; + } + } +} + +/* Return non-zero if expression X is killed in BB. */ + +static int +expr_killed_p (x, bb) + rtx x; + int bb; +{ + int i; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ + repeat: + + if (x == 0) + return 1; + + code = GET_CODE (x); + switch (code) + { + case REG: + return TEST_BIT (reg_set_in_block[bb], REGNO (x)); + + case MEM: + if (mem_set_in_block[bb]) + return 1; + x = XEXP (x, 0); + goto repeat; + + case PC: + case CC0: /*FIXME*/ + case CONST: + case CONST_INT: + case CONST_DOUBLE: + case SYMBOL_REF: + case LABEL_REF: + case ADDR_VEC: + case ADDR_DIFF_VEC: + return 0; + + default: + break; + } + + i = GET_RTX_LENGTH (code) - 1; + fmt = GET_RTX_FORMAT (code); + for (; i >= 0; i--) + { + if (fmt[i] == 'e') + { + rtx tem = XEXP (x, i); + + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = tem; + goto repeat; + } + if (expr_killed_p (tem, bb)) + return 1; + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + { + if (expr_killed_p (XVECEXP (x, i, j), bb)) + return 1; + } + } + } + + return 0; +} + +/* Compute the set of available expressions killed in each basic block. */ + +static void +compute_ae_kill () +{ + int bb,i; + + for (bb = 0; bb < n_basic_blocks; bb++) + { + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr = expr_hash_table[i]; + + for ( ; expr != NULL; expr = expr->next_same_hash) + { + /* Skip EXPR if generated in this block. */ + if (TEST_BIT (ae_gen[bb], expr->bitmap_index)) + continue; + + if (expr_killed_p (expr->expr, bb)) + SET_BIT (ae_kill[bb], expr->bitmap_index); + } + } + } +} + +/* Compute available expressions. + + Implement the algorithm to find available expressions + as given in the Aho Sethi Ullman book, pages 627-631. */ + +static void +compute_available () +{ + int bb, changed, passes; + + sbitmap_zero (ae_in[0]); + + sbitmap_copy (ae_out[0] /*dst*/, ae_gen[0] /*src*/); + + for (bb = 1; bb < n_basic_blocks; bb++) + sbitmap_difference (ae_out[bb], u_bitmap, ae_kill[bb]); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 1; bb < n_basic_blocks; bb++) + { + sbitmap_intersect_of_predecessors (ae_in[bb], ae_out, + bb, s_preds); + changed |= sbitmap_union_of_diff (ae_out[bb], ae_gen[bb], + ae_in[bb], ae_kill[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "avail expr computation: %d passes\n", passes); +} + +/* Actually perform the Classic GCSE optimizations. */ + +/* Return non-zero if occurrence OCCR of expression EXPR reaches block BB. + + CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself + as a positive reach. We want to do this when there are two computations + of the expression in the block. + + VISITED is a pointer to a working buffer for tracking which BB's have + been visited. It is NULL for the top-level call. + + We treat reaching expressions that go through blocks containing the same + reaching expression as "not reaching". E.g. if EXPR is generated in blocks + 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block + 2 as not reaching. The intent is to improve the probability of finding + only one reaching expression and to reduce register lifetimes by picking + the closest such expression. */ + +static int +expr_reaches_here_p (occr, expr, bb, check_self_loop, visited) + struct occr *occr; + struct expr *expr; + int bb; + int check_self_loop; + char *visited; +{ + int_list_ptr pred; + + if (visited == NULL) + { + visited = (char *) alloca (n_basic_blocks); + bzero (visited, n_basic_blocks); + } + + for (pred = s_preds[bb]; pred != NULL; pred = pred->next) + { + int pred_bb = INT_LIST_VAL (pred); + + if (visited[pred_bb]) + { + /* This predecessor has already been visited. + Nothing to do. */ + ; + } + else if (pred_bb == bb) + { + /* BB loops on itself. */ + if (check_self_loop + && TEST_BIT (ae_gen[pred_bb], expr->bitmap_index) + && BLOCK_NUM (occr->insn) == pred_bb) + return 1; + visited[pred_bb] = 1; + } + /* Ignore this predecessor if it kills the expression. */ + else if (TEST_BIT (ae_kill[pred_bb], expr->bitmap_index)) + visited[pred_bb] = 1; + /* Does this predecessor generate this expression? */ + else if (TEST_BIT (ae_gen[pred_bb], expr->bitmap_index)) + { + /* Is this the occurrence we're looking for? + Note that there's only one generating occurrence per block + so we just need to check the block number. */ + if (BLOCK_NUM (occr->insn) == pred_bb) + return 1; + visited[pred_bb] = 1; + } + /* Neither gen nor kill. */ + else + { + visited[pred_bb] = 1; + if (expr_reaches_here_p (occr, expr, pred_bb, check_self_loop, visited)) + return 1; + } + } + + /* All paths have been checked. */ + return 0; +} + +/* Return the instruction that computes EXPR that reaches INSN's basic block. + If there is more than one such instruction, return NULL. + + Called only by handle_avail_expr. */ + +static rtx +computing_insn (expr, insn) + struct expr *expr; + rtx insn; +{ + int bb = BLOCK_NUM (insn); + + if (expr->avail_occr->next == NULL) + { + if (BLOCK_NUM (expr->avail_occr->insn) == bb) + { + /* The available expression is actually itself + (i.e. a loop in the flow graph) so do nothing. */ + return NULL; + } + /* (FIXME) Case that we found a pattern that was created by + a substitution that took place. */ + return expr->avail_occr->insn; + } + else + { + /* Pattern is computed more than once. + Search backwards from this insn to see how many of these + computations actually reach this insn. */ + struct occr *occr; + rtx insn_computes_expr = NULL; + int can_reach = 0; + + for (occr = expr->avail_occr; occr != NULL; occr = occr->next) + { + if (BLOCK_NUM (occr->insn) == bb) + { + /* The expression is generated in this block. + The only time we care about this is when the expression + is generated later in the block [and thus there's a loop]. + We let the normal cse pass handle the other cases. */ + if (INSN_CUID (insn) < INSN_CUID (occr->insn)) + { + if (expr_reaches_here_p (occr, expr, bb, 1, NULL)) + { + can_reach++; + if (can_reach > 1) + return NULL; + insn_computes_expr = occr->insn; + } + } + } + else /* Computation of the pattern outside this block. */ + { + if (expr_reaches_here_p (occr, expr, bb, 0, NULL)) + { + can_reach++; + if (can_reach > 1) + return NULL; + insn_computes_expr = occr->insn; + } + } + } + + if (insn_computes_expr == NULL) + abort (); + return insn_computes_expr; + } +} + +/* Return non-zero if the definition in DEF_INSN can reach INSN. + Only called by can_disregard_other_sets. */ + +static int +def_reaches_here_p (insn, def_insn) + rtx insn, def_insn; +{ + rtx reg; + + if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn))) + return 1; + + if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn)) + { + if (INSN_CUID (def_insn) < INSN_CUID (insn)) + { + if (GET_CODE (PATTERN (def_insn)) == PARALLEL) + return 1; + if (GET_CODE (PATTERN (def_insn)) == CLOBBER) + reg = XEXP (PATTERN (def_insn), 0); + else if (GET_CODE (PATTERN (def_insn)) == SET) + reg = SET_DEST (PATTERN (def_insn)); + else + abort (); + return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn); + } + else + return 0; + } + + return 0; +} + +/* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. + The value returned is the number of definitions that reach INSN. + Returning a value of zero means that [maybe] more than one definition + reaches INSN and the caller can't perform whatever optimization it is + trying. i.e. it is always safe to return zero. */ + +static int +can_disregard_other_sets (addr_this_reg, insn, for_combine) + struct reg_set **addr_this_reg; + rtx insn; + int for_combine; +{ + int number_of_reaching_defs = 0; + struct reg_set *this_reg = *addr_this_reg; + + while (this_reg) + { + if (def_reaches_here_p (insn, this_reg->insn)) + { + number_of_reaching_defs++; + /* Ignore parallels for now. */ + if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL) + return 0; + if (!for_combine + && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER + || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)), + SET_SRC (PATTERN (insn))))) + { + /* A setting of the reg to a different value reaches INSN. */ + return 0; + } + if (number_of_reaching_defs > 1) + { + /* If in this setting the value the register is being + set to is equal to the previous value the register + was set to and this setting reaches the insn we are + trying to do the substitution on then we are ok. */ + + if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER) + return 0; + if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)), + SET_SRC (PATTERN (insn)))) + return 0; + } + *addr_this_reg = this_reg; + } + + /* prev_this_reg = this_reg; */ + this_reg = this_reg->next; + } + + return number_of_reaching_defs; +} + +/* Expression computed by insn is available and the substitution is legal, + so try to perform the substitution. + + The result is non-zero if any changes were made. */ + +static int +handle_avail_expr (insn, expr) + rtx insn; + struct expr *expr; +{ + rtx pat, insn_computes_expr; + rtx to; + struct reg_set *this_reg; + int found_setting, use_src; + int changed = 0; + + /* We only handle the case where one computation of the expression + reaches this instruction. */ + insn_computes_expr = computing_insn (expr, insn); + if (insn_computes_expr == NULL) + return 0; + + found_setting = 0; + use_src = 0; + + /* At this point we know only one computation of EXPR outside of this + block reaches this insn. Now try to find a register that the + expression is computed into. */ + + if (GET_CODE (SET_SRC (PATTERN (insn_computes_expr))) == REG) + { + /* This is the case when the available expression that reaches + here has already been handled as an available expression. */ + int regnum_for_replacing = REGNO (SET_SRC (PATTERN (insn_computes_expr))); + /* If the register was created by GCSE we can't use `reg_set_table', + however we know it's set only once. */ + if (regnum_for_replacing >= max_gcse_regno + /* If the register the expression is computed into is set only once, + or only one set reaches this insn, we can use it. */ + || (((this_reg = reg_set_table[regnum_for_replacing]), + this_reg->next == NULL) + || can_disregard_other_sets (&this_reg, insn, 0))) + { + use_src = 1; + found_setting = 1; + } + } + + if (!found_setting) + { + int regnum_for_replacing = REGNO (SET_DEST (PATTERN (insn_computes_expr))); + /* This shouldn't happen. */ + if (regnum_for_replacing >= max_gcse_regno) + abort (); + this_reg = reg_set_table[regnum_for_replacing]; + /* If the register the expression is computed into is set only once, + or only one set reaches this insn, use it. */ + if (this_reg->next == NULL + || can_disregard_other_sets (&this_reg, insn, 0)) + found_setting = 1; + } + + if (found_setting) + { + pat = PATTERN (insn); + if (use_src) + to = SET_SRC (PATTERN (insn_computes_expr)); + else + to = SET_DEST (PATTERN (insn_computes_expr)); + changed = validate_change (insn, &SET_SRC (pat), to, 0); + + /* We should be able to ignore the return code from validate_change but + to play it safe we check. */ + if (changed) + { + gcse_subst_count++; + if (gcse_file != NULL) + { + fprintf (gcse_file, "GCSE: Replacing the source in insn %d with reg %d %s insn %d\n", + INSN_UID (insn), REGNO (to), + use_src ? "from" : "set in", + INSN_UID (insn_computes_expr)); + } + + } + } + /* The register that the expr is computed into is set more than once. */ + else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/) + { + /* Insert an insn after insnx that copies the reg set in insnx + into a new pseudo register call this new register REGN. + From insnb until end of basic block or until REGB is set + replace all uses of REGB with REGN. */ + rtx new_insn; + + to = gen_reg_rtx (GET_MODE (SET_DEST (PATTERN (insn_computes_expr)))); + + /* Generate the new insn. */ + /* ??? If the change fails, we return 0, even though we created + an insn. I think this is ok. */ + new_insn = emit_insn_after (gen_rtx (SET, VOIDmode, to, + SET_DEST (PATTERN (insn_computes_expr))), + insn_computes_expr); + /* Keep block number table up to date. */ + set_block_num (new_insn, BLOCK_NUM (insn_computes_expr)); + /* Keep register set table up to date. */ + record_one_set (REGNO (to), new_insn); + + gcse_create_count++; + if (gcse_file != NULL) + { + fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d, computed in insn %d,\n", + INSN_UID (NEXT_INSN (insn_computes_expr)), + REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))), + INSN_UID (insn_computes_expr)); + fprintf (gcse_file, " into newly allocated reg %d\n", REGNO (to)); + } + + pat = PATTERN (insn); + + /* Do register replacement for INSN. */ + changed = validate_change (insn, &SET_SRC (pat), + SET_DEST (PATTERN (NEXT_INSN (insn_computes_expr))), + 0); + + /* We should be able to ignore the return code from validate_change but + to play it safe we check. */ + if (changed) + { + gcse_subst_count++; + if (gcse_file != NULL) + { + fprintf (gcse_file, "GCSE: Replacing the source in insn %d with reg %d set in insn %d\n", + INSN_UID (insn), + REGNO (SET_DEST (PATTERN (NEXT_INSN (insn_computes_expr)))), + INSN_UID (insn_computes_expr)); + } + + } + } + + return changed; +} + +/* Perform classic GCSE. + This is called by one_classic_gcse_pass after all the dataflow analysis + has been done. + + The result is non-zero if a change was made. */ + +static int +classic_gcse () +{ + int bb, changed; + rtx insn; + + /* Note we start at block 1. */ + + changed = 0; + for (bb = 1; bb < n_basic_blocks; bb++) + { + /* Reset tables used to keep track of what's still valid [since the + start of the block]. */ + reset_opr_set_tables (); + + for (insn = basic_block_head[bb]; + insn != NULL && insn != NEXT_INSN (basic_block_end[bb]); + insn = NEXT_INSN (insn)) + { + /* Is insn of form (set (pseudo-reg) ...)? */ + + if (GET_CODE (insn) == INSN + && GET_CODE (PATTERN (insn)) == SET + && GET_CODE (SET_DEST (PATTERN (insn))) == REG + && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER) + { + rtx pat = PATTERN (insn); + rtx src = SET_SRC (pat); + struct expr *expr; + + if (want_to_gcse_p (src) + /* Is the expression recorded? */ + && ((expr = lookup_expr (src)) != NULL) + /* Is the expression available [at the start of the + block]? */ + && TEST_BIT (ae_in[bb], expr->bitmap_index) + /* Are the operands unchanged since the start of the + block? */ + && oprs_not_set_p (src, insn)) + changed |= handle_avail_expr (insn, expr); + } + + /* Keep track of everything modified by this insn. */ + /* ??? Need to be careful w.r.t. mods done to INSN. */ + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + mark_oprs_set (insn); + } + } + + return changed; +} + +/* Top level routine to perform one classic GCSE pass. + + Return non-zero if a change was made. */ + +static int +one_classic_gcse_pass (f, pass) + rtx f; + int pass; +{ + int changed = 0; + + gcse_subst_count = 0; + gcse_create_count = 0; + + alloc_expr_hash_table (max_cuid); + alloc_rd_mem (n_basic_blocks, max_cuid); + compute_expr_hash_table (f); + if (gcse_file) + dump_hash_table (gcse_file, "Expression", expr_hash_table, + expr_hash_table_size, n_exprs); + if (n_exprs > 0) + { + compute_kill_rd (); + compute_rd (); + alloc_avail_expr_mem (n_basic_blocks, n_exprs); + compute_ae_gen (); + compute_ae_kill (); + compute_available (); + changed = classic_gcse (); + free_avail_expr_mem (); + } + free_rd_mem (); + free_expr_hash_table (); + + if (gcse_file) + { + fprintf (gcse_file, "\n"); + fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs, %d insns created\n", + current_function_name, pass, + bytes_used, gcse_subst_count, gcse_create_count); + } + + return changed; +} + +/* Compute copy/constant propagation working variables. */ + +/* Local properties of assignments. */ + +static sbitmap *cprop_pavloc; +static sbitmap *cprop_absaltered; + +/* Global properties of assignments (computed from the local properties). */ + +static sbitmap *cprop_avin; +static sbitmap *cprop_avout; + +/* Allocate vars used for copy/const propagation. + N_BLOCKS is the number of basic blocks. + N_SETS is the number of sets. */ + +static void +alloc_cprop_mem (n_blocks, n_sets) + int n_blocks, n_sets; +{ + cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets); + cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets); + + cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets); + cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets); +} + +/* Free vars used by copy/const propagation. */ + +static void +free_cprop_mem () +{ + free (cprop_pavloc); + free (cprop_absaltered); + free (cprop_avin); + free (cprop_avout); +} + +/* Dump copy/const propagation data. */ + +static void +dump_cprop_data (file) + FILE *file; +{ + dump_sbitmap_vector (file, "CPROP partially locally available sets", "BB", + cprop_pavloc, n_basic_blocks); + dump_sbitmap_vector (file, "CPROP absolutely altered sets", "BB", + cprop_absaltered, n_basic_blocks); + + dump_sbitmap_vector (file, "CPROP available incoming sets", "BB", + cprop_avin, n_basic_blocks); + dump_sbitmap_vector (file, "CPROP available outgoing sets", "BB", + cprop_avout, n_basic_blocks); +} + +/* For each block, compute whether X is transparent. + X is either an expression or an assignment [though we don't care which, + for this context an assignment is treated as an expression]. + For each block where an element of X is modified, set (SET_P == 1) or reset + (SET_P == 0) the INDX bit in BMAP. */ + +static void +compute_transp (x, indx, bmap, set_p) + rtx x; + int indx; + sbitmap *bmap; + int set_p; +{ + int bb,i; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ + repeat: + + if (x == 0) + return; + + code = GET_CODE (x); + switch (code) + { + case REG: + { + reg_set *r; + int regno = REGNO (x); + + if (set_p) + { + if (regno < FIRST_PSEUDO_REGISTER) + { + for (bb = 0; bb < n_basic_blocks; bb++) + if (TEST_BIT (reg_set_in_block[bb], regno)) + SET_BIT (bmap[bb], indx); + } + else + { + for (r = reg_set_table[regno]; r != NULL; r = r->next) + { + bb = BLOCK_NUM (r->insn); + SET_BIT (bmap[bb], indx); + } + } + } + else + { + if (regno < FIRST_PSEUDO_REGISTER) + { + for (bb = 0; bb < n_basic_blocks; bb++) + if (TEST_BIT (reg_set_in_block[bb], regno)) + RESET_BIT (bmap[bb], indx); + } + else + { + for (r = reg_set_table[regno]; r != NULL; r = r->next) + { + bb = BLOCK_NUM (r->insn); + RESET_BIT (bmap[bb], indx); + } + } + } + return; + } + + case MEM: + if (set_p) + { + for (bb = 0; bb < n_basic_blocks; bb++) + if (mem_set_in_block[bb]) + SET_BIT (bmap[bb], indx); + } + else + { + for (bb = 0; bb < n_basic_blocks; bb++) + if (mem_set_in_block[bb]) + RESET_BIT (bmap[bb], indx); + } + x = XEXP (x, 0); + goto repeat; + + case PC: + case CC0: /*FIXME*/ + case CONST: + case CONST_INT: + case CONST_DOUBLE: + case SYMBOL_REF: + case LABEL_REF: + case ADDR_VEC: + case ADDR_DIFF_VEC: + return; + + default: + break; + } + + i = GET_RTX_LENGTH (code) - 1; + fmt = GET_RTX_FORMAT (code); + for (; i >= 0; i--) + { + if (fmt[i] == 'e') + { + rtx tem = XEXP (x, i); + + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = tem; + goto repeat; + } + compute_transp (tem, indx, bmap, set_p); + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + compute_transp (XVECEXP (x, i, j), indx, bmap, set_p); + } + } +} + +static void +compute_cprop_local_properties () +{ + int i; + + sbitmap_vector_zero (cprop_absaltered, n_basic_blocks); + sbitmap_vector_zero (cprop_pavloc, n_basic_blocks); + + for (i = 0; i < set_hash_table_size; i++) + { + struct expr *expr; + + for (expr = set_hash_table[i]; expr != NULL; expr = expr->next_same_hash) + { + struct occr *occr; + int indx = expr->bitmap_index; + + /* The assignment is absolutely altered if any operand is modified + by this block [excluding the assignment itself]. + We start by assuming all are transparent [none are killed], and + then setting the bits for those that are. */ + + compute_transp (expr->expr, indx, cprop_absaltered, 1); + + /* The occurrences recorded in avail_occr are exactly those that + we want to set to non-zero in PAVLOC. */ + + for (occr = expr->avail_occr; occr != NULL; occr = occr->next) + { + int bb = BLOCK_NUM (occr->insn); + SET_BIT (cprop_pavloc[bb], indx); + } + } + } +} + +static void +compute_cprop_avinout () +{ + int bb, changed, passes; + + sbitmap_zero (cprop_avin[0]); + sbitmap_vector_ones (cprop_avout, n_basic_blocks); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 0; bb < n_basic_blocks; bb++) + { + if (bb != 0) + sbitmap_intersect_of_predecessors (cprop_avin[bb], cprop_avout, + bb, s_preds); + changed |= sbitmap_union_of_diff (cprop_avout[bb], + cprop_pavloc[bb], + cprop_avin[bb], + cprop_absaltered[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "cprop avail expr computation: %d passes\n", passes); +} + +/* Top level routine to do the dataflow analysis needed by copy/const + propagation. */ + +static void +compute_cprop_data () +{ + compute_cprop_local_properties (); + compute_cprop_avinout (); +} + +/* Copy/constant propagation. */ + +struct reg_use { + rtx reg_rtx; +}; + +/* Maximum number of register uses in an insn that we handle. */ +#define MAX_USES 8 + +/* Table of uses found in an insn. + Allocated statically to avoid alloc/free complexity and overhead. */ +static struct reg_use reg_use_table[MAX_USES]; + +/* Index into `reg_use_table' while building it. */ +static int reg_use_count; + +/* Set up a list of register numbers used in INSN. + The found uses are stored in `reg_use_table'. + `reg_use_count' is initialized to zero before entry, and + contains the number of uses in the table upon exit. + + ??? If a register appears multiple times we will record it multiple + times. This doesn't hurt anything but it will slow things down. */ + +static void +find_used_regs (x) + rtx x; +{ + int i; + enum rtx_code code; + char *fmt; + + /* repeat is used to turn tail-recursion into iteration. */ + repeat: + + if (x == 0) + return; + + code = GET_CODE (x); + switch (code) + { + case REG: + if (reg_use_count == MAX_USES) + return; + reg_use_table[reg_use_count].reg_rtx = x; + reg_use_count++; + return; + + case MEM: + x = XEXP (x, 0); + goto repeat; + + case PC: + case CC0: + case CONST: + case CONST_INT: + case CONST_DOUBLE: + case SYMBOL_REF: + case LABEL_REF: + case CLOBBER: + case ADDR_VEC: + case ADDR_DIFF_VEC: + case ASM_INPUT: /*FIXME*/ + return; + + case SET: + if (GET_CODE (SET_DEST (x)) == MEM) + find_used_regs (SET_DEST (x)); + x = SET_SRC (x); + goto repeat; + + default: + break; + } + + /* Recursively scan the operands of this expression. */ + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + /* If we are about to do the last recursive call + needed at this level, change it into iteration. + This function is called enough to be worth it. */ + if (i == 0) + { + x = XEXP (x, 0); + goto repeat; + } + find_used_regs (XEXP (x, i)); + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + find_used_regs (XVECEXP (x, i, j)); + } + } +} + +/* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO. + Returns non-zero is successful. */ + +static int +try_replace_reg (from, to, insn) + rtx from, to, insn; +{ + return validate_replace_src (from, to, insn); +} + +/* Find a set of REGNO that is available on entry to INSN's block. + Returns NULL if not found. */ + +static struct expr * +find_avail_set (regno, insn) + int regno; + rtx insn; +{ + struct expr *set = lookup_set (regno, NULL_RTX); + + while (set) + { + if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index)) + break; + set = next_set (regno, set); + } + + return set; +} + +/* Perform constant and copy propagation on INSN. + The result is non-zero if a change was made. */ + +static int +cprop_insn (insn) + rtx insn; +{ + struct reg_use *reg_used; + int changed = 0; + + /* ??? For now only propagate into SETs. */ + if (GET_CODE (insn) != INSN + || GET_CODE (PATTERN (insn)) != SET) + return 0; + + reg_use_count = 0; + find_used_regs (PATTERN (insn)); + + reg_used = ®_use_table[0]; + for ( ; reg_use_count > 0; reg_used++, reg_use_count--) + { + rtx pat, src; + struct expr *set; + int regno = REGNO (reg_used->reg_rtx); + + /* Ignore registers created by GCSE. + We do this because ... */ + if (regno >= max_gcse_regno) + continue; + + /* If the register has already been set in this block, there's + nothing we can do. */ + if (! oprs_not_set_p (reg_used->reg_rtx, insn)) + continue; + + /* Find an assignment that sets reg_used and is available + at the start of the block. */ + set = find_avail_set (regno, insn); + if (! set) + continue; + + pat = set->expr; + /* ??? We might be able to handle PARALLELs. Later. */ + if (GET_CODE (pat) != SET) + abort (); + src = SET_SRC (pat); + + if (GET_CODE (src) == CONST_INT) + { + if (try_replace_reg (reg_used->reg_rtx, src, insn)) + { + changed = 1; + const_prop_count++; + if (gcse_file != NULL) + { + fprintf (gcse_file, "CONST-PROP: Replacing reg %d in insn %d with constant ", + regno, INSN_UID (insn)); + fprintf (gcse_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (src)); + fprintf (gcse_file, "\n"); + } + + /* The original insn setting reg_used may or may not now be + deletable. We leave the deletion to flow. */ + } + } + else if (GET_CODE (src) == REG + && REGNO (src) >= FIRST_PSEUDO_REGISTER + && REGNO (src) != regno) + { + /* We know the set is available. + Now check that SET_SRC is ANTLOC (i.e. none of the source operands + have changed since the start of the block). */ + if (oprs_not_set_p (src, insn)) + { + if (try_replace_reg (reg_used->reg_rtx, src, insn)) + { + changed = 1; + copy_prop_count++; + if (gcse_file != NULL) + { + fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d with reg %d\n", + regno, INSN_UID (insn), REGNO (src)); + } + + /* The original insn setting reg_used may or may not now be + deletable. We leave the deletion to flow. */ + /* FIXME: If it turns out that the insn isn't deletable, + then we may have unnecessarily extended register lifetimes + and made things worse. */ + } + } + } + } + + return changed; +} + +/* Forward propagate copies. + This includes copies and constants. + Return non-zero if a change was made. */ + +static int +cprop () +{ + int bb, changed; + rtx insn; + + /* Note we start at block 1. */ + + changed = 0; + for (bb = 1; bb < n_basic_blocks; bb++) + { + /* Reset tables used to keep track of what's still valid [since the + start of the block]. */ + reset_opr_set_tables (); + + for (insn = basic_block_head[bb]; + insn != NULL && insn != NEXT_INSN (basic_block_end[bb]); + insn = NEXT_INSN (insn)) + { + if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') + { + changed |= cprop_insn (insn); + + /* Keep track of everything modified by this insn. */ + /* ??? Need to be careful w.r.t. mods done to INSN. */ + mark_oprs_set (insn); + } + } + } + + if (gcse_file != NULL) + fprintf (gcse_file, "\n"); + + return changed; +} + +/* Perform one copy/constant propagation pass. + F is the first insn in the function. + PASS is the pass count. */ + +static int +one_cprop_pass (f, pass) + rtx f; + int pass; +{ + int changed = 0; + + const_prop_count = 0; + copy_prop_count = 0; + + alloc_set_hash_table (max_cuid); + compute_set_hash_table (f); + if (gcse_file) + dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size, + n_sets); + if (n_sets > 0) + { + alloc_cprop_mem (n_basic_blocks, n_sets); + compute_cprop_data (); + changed = cprop (); + free_cprop_mem (); + } + free_set_hash_table (); + + if (gcse_file) + { + fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, %d const props, %d copy props\n", + current_function_name, pass, + bytes_used, const_prop_count, copy_prop_count); + fprintf (gcse_file, "\n"); + } + + return changed; +} + +/* Compute PRE working variables. */ + +/* Local properties of expressions. */ +/* Nonzero for expressions that are transparent in the block. */ +static sbitmap *pre_transp; +/* Nonzero for expressions that are computed (available) in the block. */ +static sbitmap *pre_comp; +/* Nonzero for expressions that are locally anticipatable in the block. */ +static sbitmap *pre_antloc; + +/* Global properties (computed from the expression local properties). */ +/* Nonzero for expressions that are available on block entry/exit. */ +static sbitmap *pre_avin; +static sbitmap *pre_avout; +/* Nonzero for expressions that are anticipatable on block entry/exit. */ +static sbitmap *pre_antin; +static sbitmap *pre_antout; +/* Nonzero for expressions that are partially available on block entry/exit. */ +static sbitmap *pre_pavin; +static sbitmap *pre_pavout; +/* Nonzero for expressions that are "placement possible" on block entry/exit. */ +static sbitmap *pre_ppin; +static sbitmap *pre_ppout; + +/* Used while performing PRE to denote which insns are redundant. */ +static sbitmap pre_redundant; + +/* Allocate vars used for PRE analysis. */ + +static void +alloc_pre_mem (n_blocks, n_exprs) + int n_blocks, n_exprs; +{ + pre_transp = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_comp = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_antloc = sbitmap_vector_alloc (n_blocks, n_exprs); + + pre_avin = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_avout = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_antin = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_antout = sbitmap_vector_alloc (n_blocks, n_exprs); + + pre_pavin = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_pavout = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_ppin = sbitmap_vector_alloc (n_blocks, n_exprs); + pre_ppout = sbitmap_vector_alloc (n_blocks, n_exprs); +} + +/* Free vars used for PRE analysis. */ + +static void +free_pre_mem () +{ + free (pre_transp); + free (pre_comp); + free (pre_antloc); + + free (pre_avin); + free (pre_avout); + free (pre_antin); + free (pre_antout); + + free (pre_pavin); + free (pre_pavout); + free (pre_ppin); + free (pre_ppout); +} + +/* Dump PRE data. */ + +static void +dump_pre_data (file) + FILE *file; +{ + dump_sbitmap_vector (file, "PRE locally transparent expressions", "BB", + pre_transp, n_basic_blocks); + dump_sbitmap_vector (file, "PRE locally available expressions", "BB", + pre_comp, n_basic_blocks); + dump_sbitmap_vector (file, "PRE locally anticipatable expressions", "BB", + pre_antloc, n_basic_blocks); + + dump_sbitmap_vector (file, "PRE available incoming expressions", "BB", + pre_avin, n_basic_blocks); + dump_sbitmap_vector (file, "PRE available outgoing expressions", "BB", + pre_avout, n_basic_blocks); + dump_sbitmap_vector (file, "PRE anticipatable incoming expressions", "BB", + pre_antin, n_basic_blocks); + dump_sbitmap_vector (file, "PRE anticipatable outgoing expressions", "BB", + pre_antout, n_basic_blocks); + + dump_sbitmap_vector (file, "PRE partially available incoming expressions", "BB", + pre_pavin, n_basic_blocks); + dump_sbitmap_vector (file, "PRE partially available outgoing expressions", "BB", + pre_pavout, n_basic_blocks); + dump_sbitmap_vector (file, "PRE placement possible on incoming", "BB", + pre_ppin, n_basic_blocks); + dump_sbitmap_vector (file, "PRE placement possible on outgoing", "BB", + pre_ppout, n_basic_blocks); +} + +/* Compute the local properties of each recorded expression. + Local properties are those that are defined by the block, irrespective + of other blocks. + + An expression is transparent in a block if its operands are not modified + in the block. + + An expression is computed (locally available) in a block if it is computed + at least once and expression would contain the same value if the + computation was moved to the end of the block. + + An expression is locally anticipatable in a block if it is computed at + least once and expression would contain the same value if the computation + was moved to the beginning of the block. */ + +static void +compute_pre_local_properties () +{ + int i; + + sbitmap_vector_ones (pre_transp, n_basic_blocks); + sbitmap_vector_zero (pre_comp, n_basic_blocks); + sbitmap_vector_zero (pre_antloc, n_basic_blocks); + + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr; + + for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash) + { + struct occr *occr; + int indx = expr->bitmap_index; + + /* The expression is transparent in this block if it is not killed. + We start by assuming all are transparent [none are killed], and then + reset the bits for those that are. */ + + compute_transp (expr->expr, indx, pre_transp, 0); + + /* The occurrences recorded in antic_occr are exactly those that + we want to set to non-zero in ANTLOC. */ + + for (occr = expr->antic_occr; occr != NULL; occr = occr->next) + { + int bb = BLOCK_NUM (occr->insn); + SET_BIT (pre_antloc[bb], indx); + + /* While we're scanning the table, this is a good place to + initialize this. */ + occr->deleted_p = 0; + } + + /* The occurrences recorded in avail_occr are exactly those that + we want to set to non-zero in COMP. */ + + for (occr = expr->avail_occr; occr != NULL; occr = occr->next) + { + int bb = BLOCK_NUM (occr->insn); + SET_BIT (pre_comp[bb], indx); + + /* While we're scanning the table, this is a good place to + initialize this. */ + occr->copied_p = 0; + } + + /* While we're scanning the table, this is a good place to + initialize this. */ + expr->reaching_reg = 0; + } + } +} + +/* Compute expression availability at entrance and exit of each block. */ + +static void +compute_pre_avinout () +{ + int bb, changed, passes; + + sbitmap_zero (pre_avin[0]); + sbitmap_vector_ones (pre_avout, n_basic_blocks); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 0; bb < n_basic_blocks; bb++) + { + if (bb != 0) + sbitmap_intersect_of_predecessors (pre_avin[bb], pre_avout, + bb, s_preds); + changed |= sbitmap_a_or_b_and_c (pre_avout[bb], pre_comp[bb], + pre_transp[bb], pre_avin[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "avail expr computation: %d passes\n", passes); +} + +/* Compute expression anticipatability at entrance and exit of each block. */ + +static void +compute_pre_antinout () +{ + int bb, changed, passes; + + sbitmap_zero (pre_antout[n_basic_blocks - 1]); + sbitmap_vector_ones (pre_antin, n_basic_blocks); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + /* We scan the blocks in the reverse order to speed up + the convergence. */ + for (bb = n_basic_blocks - 1; bb >= 0; bb--) + { + if (bb != n_basic_blocks - 1) + sbitmap_intersect_of_successors (pre_antout[bb], pre_antin, + bb, s_succs); + changed |= sbitmap_a_or_b_and_c (pre_antin[bb], pre_antloc[bb], + pre_transp[bb], pre_antout[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "antic expr computation: %d passes\n", passes); +} + +/* Compute expression partial availability at entrance and exit of + each block. */ + +static void +compute_pre_pavinout () +{ + int bb, changed, passes; + + sbitmap_zero (pre_pavin[0]); + sbitmap_vector_zero (pre_pavout, n_basic_blocks); + + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 0; bb < n_basic_blocks; bb++) + { + if (bb != 0) + sbitmap_union_of_predecessors (pre_pavin[bb], pre_pavout, + bb, s_preds); + changed |= sbitmap_a_or_b_and_c (pre_pavout[bb], pre_comp[bb], + pre_transp[bb], pre_pavin[bb]); + } + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "partially avail expr computation: %d passes\n", passes); +} + +/* Compute "placement possible" information on entrance and exit of + each block. + + From Fred Chow's Thesis: + A computation `e' is PP at a point `p' if it is anticipated at `p' and + all the anticipated e's can be rendered redundant by zero or more + insertions at that point and some other points in the procedure, and + these insertions satisfy the conditions that the insertions are always + at points that `e' is anticipated and the first anticipated e's after the + insertions are rendered redundant. */ + +static void +compute_pre_ppinout () +{ + int bb, i, changed, size, passes; + + sbitmap_vector_ones (pre_ppin, n_basic_blocks); + /* ??? Inefficient as we set pre_ppin[0] twice, but simple. */ + sbitmap_zero (pre_ppin[0]); + + sbitmap_vector_ones (pre_ppout, n_basic_blocks); + /* ??? Inefficient as we set pre_ppout[n_basic_blocks-1] twice, but simple. */ + sbitmap_zero (pre_ppout[n_basic_blocks - 1]); + + size = pre_ppin[0]->size; + passes = 0; + changed = 1; + while (changed) + { + changed = 0; + for (bb = 1; bb < n_basic_blocks; bb++) + { + sbitmap_ptr antin = pre_antin[bb]->elms; + sbitmap_ptr pavin = pre_pavin[bb]->elms; + sbitmap_ptr antloc = pre_antloc[bb]->elms; + sbitmap_ptr transp = pre_transp[bb]->elms; + sbitmap_ptr ppout = pre_ppout[bb]->elms; + sbitmap_ptr ppin = pre_ppin[bb]->elms; + + for (i = 0; i < size; i++) + { + int_list_ptr pred; + SBITMAP_ELT_TYPE tmp = *antin & *pavin & (*antloc | (*transp & *ppout)); + SBITMAP_ELT_TYPE pred_val = -1L; + + for (pred = s_preds[bb]; pred != NULL; pred = pred->next) + { + int pred_bb = INT_LIST_VAL (pred); + sbitmap_ptr ppout_j,avout_j; + + if (pred_bb == ENTRY_BLOCK) + continue; + + /* If this is a back edge, propagate info along the back + edge to allow for loop invariant code motion. + + See FOLLOW_BACK_EDGES at the top of this file for a longer + discussion about loop invariant code motion in pre. */ + if (! FOLLOW_BACK_EDGES + && (INSN_CUID (BLOCK_HEAD (bb)) + < INSN_CUID (BLOCK_END (pred_bb)))) + { + pred_val = 0; + } + else + { + ppout_j = pre_ppout[pred_bb]->elms + i; + avout_j = pre_avout[pred_bb]->elms + i; + pred_val &= *ppout_j | *avout_j; + } + } + tmp &= pred_val; + *ppin = tmp; + antin++; pavin++; antloc++; transp++; ppout++; ppin++; + } + } + + for (bb = 0; bb < n_basic_blocks - 1; bb++) + { + sbitmap_ptr ppout = pre_ppout[bb]->elms; + + for (i = 0; i < size; i++) + { + int_list_ptr succ; + SBITMAP_ELT_TYPE tmp = -1L; + + for (succ = s_succs[bb]; succ != NULL; succ = succ->next) + { + int succ_bb = INT_LIST_VAL (succ); + sbitmap_ptr ppin; + + if (succ_bb == EXIT_BLOCK) + continue; + + ppin = pre_ppin[succ_bb]->elms + i; + tmp &= *ppin; + } + if (*ppout != tmp) + { + changed = 1; + *ppout++ = tmp; + } + else + ppout++; + } + } + + passes++; + } + + if (gcse_file) + fprintf (gcse_file, "placement possible computation: %d passes\n", passes); +} + +/* Top level routine to do the dataflow analysis needed by PRE. */ + +static void +compute_pre_data () +{ + compute_pre_local_properties (); + compute_pre_avinout (); + compute_pre_antinout (); + compute_pre_pavinout (); + compute_pre_ppinout (); + if (gcse_file) + fprintf (gcse_file, "\n"); +} + +/* PRE utilities */ + +/* Return non-zero if occurrence OCCR of expression EXPR reaches block BB. + + VISITED is a pointer to a working buffer for tracking which BB's have + been visited. It is NULL for the top-level call. + + We treat reaching expressions that go through blocks containing the same + reaching expression as "not reaching". E.g. if EXPR is generated in blocks + 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block + 2 as not reaching. The intent is to improve the probability of finding + only one reaching expression and to reduce register lifetimes by picking + the closest such expression. */ + +static int +pre_expr_reaches_here_p (occr, expr, bb, visited) + struct occr *occr; + struct expr *expr; + int bb; + char *visited; +{ + int_list_ptr pred; + + if (visited == NULL) + { + visited = (char *) alloca (n_basic_blocks); + bzero (visited, n_basic_blocks); + } + + for (pred = s_preds[bb]; pred != NULL; pred = pred->next) + { + int pred_bb = INT_LIST_VAL (pred); + + if (pred_bb == ENTRY_BLOCK + /* Has predecessor has already been visited? */ + || visited[pred_bb]) + { + /* Nothing to do. */ + } + /* Does this predecessor generate this expression? */ + else if (TEST_BIT (pre_comp[pred_bb], expr->bitmap_index)) + { + /* Is this the occurrence we're looking for? + Note that there's only one generating occurrence per block + so we just need to check the block number. */ + if (BLOCK_NUM (occr->insn) == pred_bb) + return 1; + visited[pred_bb] = 1; + } + /* Ignore this predecessor if it kills the expression. */ + else if (! TEST_BIT (pre_transp[pred_bb], expr->bitmap_index)) + visited[pred_bb] = 1; + /* Neither gen nor kill. */ + else + { + visited[pred_bb] = 1; + if (pre_expr_reaches_here_p (occr, expr, pred_bb, visited)) + return 1; + } + } + + /* All paths have been checked. */ + return 0; +} + +/* Add EXPR to the end of basic block BB. */ + +static void +pre_insert_insn (expr, bb) + struct expr *expr; + int bb; +{ + rtx insn = BLOCK_END (bb); + rtx new_insn; + rtx reg = expr->reaching_reg; + int regno = REGNO (reg); + rtx pat; + + pat = gen_rtx (SET, VOIDmode, reg, copy_rtx (expr->expr)); + + /* If the last insn is a jump, insert EXPR in front [taking care to + handle cc0, etc. properly]. */ + + if (GET_CODE (insn) == JUMP_INSN) + { + rtx note; + + /* If this is a jump table, then we can't insert stuff here. Since + we know the previous real insn must be the tablejump, we insert + the new instruction just before the tablejump. */ + if (GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) + insn = prev_real_insn (insn); + +#ifdef HAVE_cc0 + /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts + if cc0 isn't set. */ + note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); + if (note) + insn = XEXP (note, 0); + else + { + rtx maybe_cc0_setter = prev_nonnote_insn (insn); + if (maybe_cc0_setter + && GET_RTX_CLASS (GET_CODE (maybe_cc0_setter)) == 'i' + && sets_cc0_p (PATTERN (maybe_cc0_setter))) + insn = maybe_cc0_setter; + } +#endif + /* FIXME: What if something in cc0/jump uses value set in new insn? */ + new_insn = emit_insn_before (pat, insn); + if (BLOCK_HEAD (bb) == insn) + BLOCK_HEAD (bb) = new_insn; + /* Keep block number table up to date. */ + set_block_num (new_insn, bb); + /* Keep register set table up to date. */ + record_one_set (regno, new_insn); + } + else + { + new_insn = emit_insn_after (pat, insn); + BLOCK_END (bb) = new_insn; + /* Keep block number table up to date. */ + set_block_num (new_insn, bb); + /* Keep register set table up to date. */ + record_one_set (regno, new_insn); + } + + gcse_create_count++; + + if (gcse_file) + { + fprintf (gcse_file, "PRE: end of bb %d, insn %d, copying expression %d to reg %d\n", + bb, INSN_UID (new_insn), expr->bitmap_index, regno); + } +} + +/* Insert partially redundant expressions at the ends of appropriate basic + blocks making them now redundant. */ + +static void +pre_insert (index_map) + struct expr **index_map; +{ + int bb, i, size; + + /* Compute INSERT = PPOUT & (~AVOUT) & (~PPIN | ~TRANSP) for each + expression. Where INSERT == TRUE, add the expression at the end of + the basic block. */ + + size = pre_ppout[0]->size; + for (bb = 0; bb < n_basic_blocks; bb++) + { + int indx; + sbitmap_ptr ppout = pre_ppout[bb]->elms; + sbitmap_ptr avout = pre_avout[bb]->elms; + sbitmap_ptr ppin = pre_ppin[bb]->elms; + sbitmap_ptr transp = pre_transp[bb]->elms; + + for (i = indx = 0; + i < size; + i++, indx += SBITMAP_ELT_BITS, ppout++, avout++, ppin++, transp++) + { + int j; + SBITMAP_ELT_TYPE insert = *ppout & (~*avout) & (~*ppin | ~*transp); + + for (j = indx; insert != 0 && j < n_exprs; j++, insert >>= 1) + { + if ((insert & 1) != 0 + /* If the basic block isn't reachable, PPOUT will be TRUE. + However, we don't want to insert a copy here because the + expression may not really be redundant. So only insert + an insn if the expression was deleted. */ + && index_map[j]->reaching_reg != NULL) + pre_insert_insn (index_map[j], bb); + } + } + } +} + +/* Copy the result of INSN to REG. + INDX is the expression number. */ + +static void +pre_insert_copy_insn (expr, insn) + struct expr *expr; + rtx insn; +{ + rtx reg = expr->reaching_reg; + int regno = REGNO (reg); + int indx = expr->bitmap_index; + rtx set = single_set (insn); + rtx new_insn; + + if (!set) + abort (); + new_insn = emit_insn_after (gen_rtx (SET, VOIDmode, reg, SET_DEST (set)), + insn); + /* Keep block number table up to date. */ + set_block_num (new_insn, BLOCK_NUM (insn)); + /* Keep register set table up to date. */ + record_one_set (regno, new_insn); + + gcse_create_count++; + + if (gcse_file) + { + fprintf (gcse_file, "PRE: bb %d, insn %d, copying expression %d in insn %d to reg %d\n", + BLOCK_NUM (insn), INSN_UID (new_insn), indx, INSN_UID (insn), regno); + } +} + +/* Copy available expressions that reach the redundant expression + to `reaching_reg'. */ + +static void +pre_insert_copies () +{ + int i; + + /* For each available expression in the table, copy the result to + `reaching_reg' if the expression reaches a deleted one. + + ??? The current algorithm is rather brute force. + Need to do some profiling. */ + + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr; + + for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash) + { + struct occr *occr; + + /* If the basic block isn't reachable, PPOUT will be TRUE. + However, we don't want to insert a copy here because the + expression may not really be redundant. So only insert + an insn if the expression was deleted. + This test also avoids further processing if the expression + wasn't deleted anywhere. */ + if (expr->reaching_reg == NULL) + continue; + + for (occr = expr->antic_occr; occr != NULL; occr = occr->next) + { + struct occr *avail; + + if (! occr->deleted_p) + continue; + + for (avail = expr->avail_occr; avail != NULL; avail = avail->next) + { + rtx insn = avail->insn; + + /* No need to handle this one if handled already. */ + if (avail->copied_p) + continue; + /* Don't handle this one if it's a redundant one. */ + if (TEST_BIT (pre_redundant, INSN_CUID (insn))) + continue; + /* Or if the expression doesn't reach the deleted one. */ + if (! pre_expr_reaches_here_p (avail, expr, + BLOCK_NUM (occr->insn), + NULL)) + continue; + + /* Copy the result of avail to reaching_reg. */ + pre_insert_copy_insn (expr, insn); + avail->copied_p = 1; + } + } + } + } +} + +/* Delete redundant computations. + These are ones that satisy ANTLOC & PPIN. + Deletion is done by changing the insn to copy the `reaching_reg' of + the expression into the result of the SET. It is left to later passes + (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it. + + Returns non-zero if a change is made. */ + +static int +pre_delete () +{ + int i, changed; + + changed = 0; + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr; + + for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash) + { + struct occr *occr; + int indx = expr->bitmap_index; + + /* We only need to search antic_occr since we require + ANTLOC != 0. */ + + for (occr = expr->antic_occr; occr != NULL; occr = occr->next) + { + rtx insn = occr->insn; + rtx set; + int bb = BLOCK_NUM (insn); + sbitmap ppin = pre_ppin[bb]; + + if (TEST_BIT (ppin, indx)) + { + /* Create a pseudo-reg to store the result of reaching + expressions into. */ + if (expr->reaching_reg == NULL) + expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->expr)); + + set = single_set (insn); + if (! set) + abort (); + + /* In theory this should never fail since we're creating + a reg->reg copy. + + However, on the x86 some of the movXX patterns actually + contain clobbers of scratch regs. This may cause the + insn created by validate_change to not patch any pattern + and thus cause validate_change to fail. */ + if (validate_change (insn, &SET_SRC (set), + expr->reaching_reg, 0)) + { + occr->deleted_p = 1; + SET_BIT (pre_redundant, INSN_CUID (insn)); + changed = 1; + gcse_subst_count++; + } + + if (gcse_file) + { + fprintf (gcse_file, "PRE: redundant insn %d (expression %d) in bb %d, reaching reg is %d\n", + INSN_UID (insn), indx, bb, REGNO (expr->reaching_reg)); + } + } + } + } + } + + return changed; +} + +/* Perform GCSE optimizations using PRE. + This is called by one_pre_gcse_pass after all the dataflow analysis + has been done. + + This is based on the original Morel-Renvoise paper and Fred Chow's thesis. + + The M-R paper uses "TRANSP" to describe an expression as being transparent + in a block where as Chow's thesis uses "ALTERED". We use TRANSP. + + ??? A new pseudo reg is created to hold the reaching expression. + The nice thing about the classical approach is that it would try to + use an existing reg. If the register can't be adequately optimized + [i.e. we introduce reload problems], one could add a pass here to + propagate the new register through the block. + + ??? We don't handle single sets in PARALLELs because we're [currently] + not able to copy the rest of the parallel when we insert copies to create + full redundancies from partial redundancies. However, there's no reason + why we can't handle PARALLELs in the cases where there are no partial + redundancies. */ + +static int +pre_gcse () +{ + int i; + int changed; + struct expr **index_map; + + /* Compute a mapping from expression number (`bitmap_index') to + hash table entry. */ + + index_map = (struct expr **) alloca (n_exprs * sizeof (struct expr *)); + bzero ((char *) index_map, n_exprs * sizeof (struct expr *)); + for (i = 0; i < expr_hash_table_size; i++) + { + struct expr *expr; + + for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash) + index_map[expr->bitmap_index] = expr; + } + + /* Reset bitmap used to track which insns are redundant. */ + pre_redundant = sbitmap_alloc (max_cuid); + sbitmap_zero (pre_redundant); + + /* Delete the redundant insns first so that + - we know what register to use for the new insns and for the other + ones with reaching expressions + - we know which insns are redundant when we go to create copies */ + changed = pre_delete (); + + /* Insert insns in places that make partially redundant expressions + fully redundant. */ + pre_insert (index_map); + + /* In other places with reaching expressions, copy the expression to the + specially allocated pseudo-reg that reaches the redundant expression. */ + pre_insert_copies (); + + free (pre_redundant); + + return changed; +} + +/* Top level routine to perform one PRE GCSE pass. + + Return non-zero if a change was made. */ + +static int +one_pre_gcse_pass (f, pass) + rtx f; + int pass; +{ + int changed = 0; + + gcse_subst_count = 0; + gcse_create_count = 0; + + alloc_expr_hash_table (max_cuid); + compute_expr_hash_table (f); + if (gcse_file) + dump_hash_table (gcse_file, "Expression", expr_hash_table, + expr_hash_table_size, n_exprs); + if (n_exprs > 0) + { + alloc_pre_mem (n_basic_blocks, n_exprs); + compute_pre_data (); + changed |= pre_gcse (); + free_pre_mem (); + } + free_expr_hash_table (); + + if (gcse_file) + { + fprintf (gcse_file, "\n"); + fprintf (gcse_file, "PRE GCSE of %s, pass %d: %d bytes needed, %d substs, %d insns created\n", + current_function_name, pass, + bytes_used, gcse_subst_count, gcse_create_count); + } + + return changed; +} diff --git a/gcc/invoke.texi b/gcc/invoke.texi index 894ffe7c3fc..2289f3c543d 100644 --- a/gcc/invoke.texi +++ b/gcc/invoke.texi @@ -149,7 +149,7 @@ in the following sections. -fcaller-saves -fcse-follow-jumps -fcse-skip-blocks -fdelayed-branch -fexpensive-optimizations -ffast-math -ffloat-store -fforce-addr -fforce-mem --ffunction-sections -finline-functions +-ffunction-sections -fgcse -finline-functions -fkeep-inline-functions -fno-default-inline -fno-defer-pop -fno-function-cse -fno-inline -fno-peephole -fomit-frame-pointer -fregmove @@ -1986,6 +1986,8 @@ Dump after purging ADDRESSOF, to @file{@var{file}.addressof}. Dump after flow analysis, to @file{@var{file}.flow}. @item g Dump after global register allocation, to @file{@var{file}.greg}. +@item G +Dump after GCSE, to @file{@var{file}.gcse}. @item j Dump after first jump optimization, to @file{@var{file}.jump}. @item J @@ -2299,6 +2301,10 @@ performed. @item -frerun-loop-opt Run the loop optimizer twice. +@item -fgcse +Perform a global common subexpression elimination pass. +This pass also performs global constant and copy propagation. + @item -fexpensive-optimizations Perform a number of minor optimizations that are relatively expensive. diff --git a/gcc/loop.c b/gcc/loop.c index ce0b9b02631..8bc4ef28fac 100644 --- a/gcc/loop.c +++ b/gcc/loop.c @@ -400,7 +400,6 @@ loop_optimize (f, dumpfile, unroll_p) loop_dump_stream = dumpfile; init_recog_no_volatile (); - init_alias_analysis (); max_reg_before_loop = max_reg_num (); @@ -477,6 +476,13 @@ loop_optimize (f, dumpfile, unroll_p) function. */ reg_scan (f, max_reg_num (), 1); + /* This must occur after reg_scan so that registers created by gcse + will have entries in the register tables. + + We could have added a call to reg_scan after gcse_main in toplev.c, + but moving this call to init_alias_analysis is more efficient. */ + init_alias_analysis (); + /* See if we went too far. */ if (get_max_uid () > max_uid_for_loop) abort (); diff --git a/gcc/recog.c b/gcc/recog.c index 8b7d5ac9483..ded35f7b489 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -537,6 +537,25 @@ validate_replace_rtx (from, to, insn) validate_replace_rtx_1 (&PATTERN (insn), from, to, insn); return apply_change_group (); } + +/* Try replacing every occurrence of FROM in INSN with TO, avoiding + SET_DESTs. After all changes have been made, validate by seeing if + INSN is still valid. */ + +int +validate_replace_src (from, to, insn) + rtx from, to, insn; +{ + if ((GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN) + || GET_CODE (PATTERN (insn)) != SET) + abort (); + + validate_replace_rtx_1 (&SET_SRC (PATTERN (insn)), from, to, insn); + if (GET_CODE (SET_DEST (PATTERN (insn))) == MEM) + validate_replace_rtx_1 (&XEXP (SET_DEST (PATTERN (insn)), 0), + from, to, insn); + return apply_change_group (); +} #ifdef HAVE_cc0 /* Return 1 if the insn using CC0 set by INSN does not contain diff --git a/gcc/tm.texi b/gcc/tm.texi index 9ae707403b5..120b497dbdb 100644 --- a/gcc/tm.texi +++ b/gcc/tm.texi @@ -1505,6 +1505,12 @@ accessibility of the value in a narrower mode. You should define this macro to return nonzero in as many cases as possible since doing so will allow GNU CC to perform better register allocation. + +@findex AVOID_CCMODE_COPIES +@item AVOID_CCMODE_COPIES +Define this macro if the compiler should avoid copies to/from @code{CCmode} +registers. You should only define this macro if support fo copying to/from +@code{CCmode} is incomplete. @end table @node Leaf Functions diff --git a/gcc/toplev.c b/gcc/toplev.c index 2542d1a23f2..ecd11f9b66a 100644 --- a/gcc/toplev.c +++ b/gcc/toplev.c @@ -267,6 +267,7 @@ int rtl_dump_and_exit = 0; int jump_opt_dump = 0; int addressof_dump = 0; int cse_dump = 0; +int gcse_dump = 0; int loop_dump = 0; int cse2_dump = 0; int branch_prob_dump = 0; @@ -538,6 +539,10 @@ int flag_volatile_global; int flag_syntax_only = 0; +/* Nonzero means perform global cse. */ + +static int flag_gcse; + /* Nonzero means to rerun cse after loop optimization. This increases compilation time about 20% and picks up a few more common expressions. */ @@ -738,6 +743,7 @@ struct { char *string; int *variable; int on_value;} f_options[] = {"pcc-struct-return", &flag_pcc_struct_return, 1}, {"reg-struct-return", &flag_pcc_struct_return, 0}, {"delayed-branch", &flag_delayed_branch, 1}, + {"gcse", &flag_gcse, 1}, {"rerun-cse-after-loop", &flag_rerun_cse_after_loop, 1}, {"rerun-loop-opt", &flag_rerun_loop_opt, 1}, {"pretend-float", &flag_pretend_float, 1}, @@ -977,6 +983,7 @@ int varconst_time; int integration_time; int jump_time; int cse_time; +int gcse_time; int loop_time; int cse2_time; int branch_prob_time; @@ -2274,6 +2281,7 @@ compile_file (name) integration_time = 0; jump_time = 0; cse_time = 0; + gcse_time = 0; loop_time = 0; cse2_time = 0; branch_prob_time = 0; @@ -2360,6 +2368,8 @@ compile_file (name) if (dbr_sched_dump) clean_dump_file (".dbr"); #endif + if (gcse_dump) + clean_dump_file (".gcse"); #ifdef STACK_REGS if (stack_reg_dump) clean_dump_file (".stack"); @@ -2840,6 +2850,7 @@ compile_file (name) print_time ("integration", integration_time); print_time ("jump", jump_time); print_time ("cse", cse_time); + print_time ("gcse", gcse_time); print_time ("loop", loop_time); print_time ("cse2", cse2_time); print_time ("branch-prob", branch_prob_time); @@ -3257,6 +3268,18 @@ rest_of_compilation (decl) if (addressof_dump) dump_rtl (".addressof", decl, print_rtl, insns); + /* Perform global cse. */ + + if (optimize > 0 && flag_gcse) + { + if (gcse_dump) + open_dump_file (".gcse", IDENTIFIER_POINTER (DECL_NAME (decl))); + + TIMEVAR (gcse_time, gcse_main (insns, rtl_dump_file)); + + if (gcse_dump) + close_dump_file (print_rtl, insns); + } /* Move constant computations out of loops. */ if (optimize > 0) @@ -3801,6 +3824,7 @@ main (argc, argv, envp) { flag_cse_follow_jumps = 1; flag_cse_skip_blocks = 1; + flag_gcse = 1; flag_expensive_optimizations = 1; flag_strength_reduce = 1; flag_rerun_cse_after_loop = 1; @@ -3879,6 +3903,7 @@ main (argc, argv, envp) regmove_dump = 1; rtl_dump = 1; cse_dump = 1, cse2_dump = 1; + gcse_dump = 1; sched_dump = 1; sched2_dump = 1; #ifdef STACK_REGS @@ -3911,6 +3936,9 @@ main (argc, argv, envp) case 'g': global_reg_dump = 1; break; + case 'G': + gcse_dump = 1; + break; case 'j': jump_opt_dump = 1; break; -- 2.30.2