/* Rtl-level induction variable analysis.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
-
+ Copyright (C) 2004-2014 Free Software Foundation, Inc.
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
+Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* This is a simple analysis of induction variables of the loop. The major use
is for determining the number of iterations of a loop for loop unrolling,
doloop optimization and branch prediction. The iv information is computed
on demand.
- Induction variable is analyzed by walking the use-def chains. When a biv
- is found, it is cached in the bivs hash table. When register is proved
- to be a giv, its description is stored to DF_REF_DATA of the def reference.
+ Induction variables are analyzed by walking the use-def chains. When
+ a basic induction variable (biv) is found, it is cached in the bivs
+ hash table. When register is proved to be a biv, its description
+ is stored to DF_REF_DATA of the def reference.
The analysis works always with one loop -- you must call
iv_analysis_loop_init (loop) for it. All the other functions then work with
iv_analysis_done () to clean up the memory.
The available functions are:
-
+
iv_analyze (insn, reg, iv): Stores the description of the induction variable
corresponding to the use of register REG in INSN to IV. Returns true if
REG is an induction variable in INSN. false otherwise.
iv_analyze_expr (insn, rhs, mode, iv): Stores to IV the description of iv
corresponding to expression EXPR evaluated at INSN. All registers used bu
EXPR must also be used in INSN.
- iv_current_loop_df (): Returns the dataflow object for the current loop used
- by iv analysis. */
+*/
#include "config.h"
#include "system.h"
#include "cfgloop.h"
#include "expr.h"
#include "intl.h"
-#include "output.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "df.h"
-#include "hashtab.h"
+#include "hash-table.h"
+#include "dumpfile.h"
/* Possible return values of iv_get_reaching_def. */
struct rtx_iv iv; /* Value of the biv. */
};
+static bool clean_slate = true;
+
+static unsigned int iv_ref_table_size = 0;
+
+/* Table of rtx_ivs indexed by the df_ref uid field. */
+static struct rtx_iv ** iv_ref_table;
+
/* Induction variable stored at the reference. */
-#define DF_REF_IV(REF) ((struct rtx_iv *) DF_REF_DATA (REF))
-#define DF_REF_IV_SET(REF, IV) DF_REF_DATA (REF) = (IV)
+#define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
+#define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV)
/* The current loop. */
static struct loop *current_loop;
-/* Dataflow information for the current loop. */
+/* Hashtable helper. */
-static struct df *df = NULL;
+struct biv_entry_hasher : typed_free_remove <biv_entry>
+{
+ typedef biv_entry value_type;
+ typedef rtx_def compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
-/* Bivs of the current loop. */
+/* Returns hash value for biv B. */
-static htab_t bivs;
+inline hashval_t
+biv_entry_hasher::hash (const value_type *b)
+{
+ return b->regno;
+}
+
+/* Compares biv B and register R. */
-/* Return the dataflow object for the current loop. */
-struct df *
-iv_current_loop_df (void)
+inline bool
+biv_entry_hasher::equal (const value_type *b, const compare_type *r)
{
- return df;
+ return b->regno == REGNO (r);
}
+/* Bivs of the current loop. */
+
+static hash_table <biv_entry_hasher> bivs;
+
static bool iv_analyze_op (rtx, rtx, struct rtx_iv *);
+/* Return the RTX code corresponding to the IV extend code EXTEND. */
+static inline enum rtx_code
+iv_extend_to_rtx_code (enum iv_extend_code extend)
+{
+ switch (extend)
+ {
+ case IV_SIGN_EXTEND:
+ return SIGN_EXTEND;
+ case IV_ZERO_EXTEND:
+ return ZERO_EXTEND;
+ case IV_UNKNOWN_EXTEND:
+ return UNKNOWN;
+ }
+ gcc_unreachable ();
+}
+
/* Dumps information about IV to FILE. */
extern void dump_iv_info (FILE *, struct rtx_iv *);
if (iv->mode != iv->extend_mode)
fprintf (file, " %s to %s",
- rtx_name[iv->extend],
+ rtx_name[iv_extend_to_rtx_code (iv->extend)],
GET_MODE_NAME (iv->extend_mode));
if (iv->mult != const1_rtx)
subreg_lowpart_offset (outer_mode, inner_mode));
}
+static void
+check_iv_ref_table_size (void)
+{
+ if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ())
+ {
+ unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
+ iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
+ memset (&iv_ref_table[iv_ref_table_size], 0,
+ (new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
+ iv_ref_table_size = new_size;
+ }
+}
+
+
/* Checks whether REG is a well-behaved register. */
static bool
static void
clear_iv_info (void)
{
- unsigned i, n_defs = DF_DEFS_SIZE (df);
+ unsigned i, n_defs = DF_DEFS_TABLE_SIZE ();
struct rtx_iv *iv;
- struct df_ref *def;
+ check_iv_ref_table_size ();
for (i = 0; i < n_defs; i++)
{
- def = DF_DEFS_GET (df, i);
- iv = DF_REF_IV (def);
- if (!iv)
- continue;
- free (iv);
- DF_REF_IV_SET (def, NULL);
+ iv = iv_ref_table[i];
+ if (iv)
+ {
+ free (iv);
+ iv_ref_table[i] = NULL;
+ }
}
- htab_empty (bivs);
+ bivs.empty ();
}
-/* Returns hash value for biv B. */
-
-static hashval_t
-biv_hash (const void *b)
-{
- return ((const struct biv_entry *) b)->regno;
-}
-
-/* Compares biv B and register R. */
-
-static int
-biv_eq (const void *b, const void *r)
-{
- return ((const struct biv_entry *) b)->regno == REGNO ((rtx) r);
-}
/* Prepare the data for an induction variable analysis of a LOOP. */
void
iv_analysis_loop_init (struct loop *loop)
{
- basic_block *body = get_loop_body_in_dom_order (loop), bb;
- bitmap blocks = BITMAP_ALLOC (NULL);
- unsigned i;
- bool first_time = (df == NULL);
-
current_loop = loop;
/* Clear the information from the analysis of the previous loop. */
- if (first_time)
+ if (clean_slate)
{
- df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES);
- df_chain_add_problem (df, DF_UD_CHAIN);
- bivs = htab_create (10, biv_hash, biv_eq, free);
+ df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
+ bivs.create (10);
+ clean_slate = false;
}
else
clear_iv_info ();
- for (i = 0; i < loop->num_nodes; i++)
- {
- bb = body[i];
- bitmap_set_bit (blocks, bb->index);
- }
- df_set_blocks (df, blocks);
- df_analyze (df);
- BITMAP_FREE (blocks);
- free (body);
+ /* Get rid of the ud chains before processing the rescans. Then add
+ the problem back. */
+ df_remove_problem (df_chain);
+ df_process_deferred_rescans ();
+ df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
+ df_chain_add_problem (DF_UD_CHAIN);
+ df_note_add_problem ();
+ df_analyze_loop (loop);
+ if (dump_file)
+ df_dump_region (dump_file);
+
+ check_iv_ref_table_size ();
}
/* Finds the definition of REG that dominates loop latch and stores
is set to NULL and true is returned. */
static bool
-latch_dominating_def (rtx reg, struct df_ref **def)
+latch_dominating_def (rtx reg, df_ref *def)
{
- struct df_ref *single_rd = NULL, *adef;
+ df_ref single_rd = NULL, adef;
unsigned regno = REGNO (reg);
- struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno);
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (df, current_loop->latch);
+ struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
- for (adef = reg_info->reg_chain; adef; adef = adef->next_reg)
+ for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
- if (!bitmap_bit_p (bb_info->out, DF_REF_ID (adef)))
+ if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef))
+ || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef)))
continue;
/* More than one reaching definition. */
/* Gets definition of REG reaching its use in INSN and stores it to DEF. */
static enum iv_grd_result
-iv_get_reaching_def (rtx insn, rtx reg, struct df_ref **def)
+iv_get_reaching_def (rtx insn, rtx reg, df_ref *def)
{
- struct df_ref *use, *adef;
+ df_ref use, adef;
basic_block def_bb, use_bb;
rtx def_insn;
bool dom_p;
-
+
*def = NULL;
if (!simple_reg_p (reg))
return GRD_INVALID;
reg = SUBREG_REG (reg);
gcc_assert (REG_P (reg));
- use = df_find_use (df, insn, reg);
+ use = df_find_use (insn, reg);
gcc_assert (use != NULL);
if (!DF_REF_CHAIN (use))
return GRD_INVALID;
adef = DF_REF_CHAIN (use)->ref;
+
+ /* We do not handle setting only part of the register. */
+ if (DF_REF_FLAGS (adef) & DF_REF_READ_WRITE)
+ return GRD_INVALID;
+
def_insn = DF_REF_INSN (adef);
def_bb = DF_REF_BB (adef);
use_bb = BLOCK_FOR_INSN (insn);
if (use_bb == def_bb)
- dom_p = (DF_INSN_LUID (df, def_insn) < DF_INSN_LUID (df, insn));
+ dom_p = (DF_INSN_LUID (def_insn) < DF_INSN_LUID (insn));
else
dom_p = dominated_by_p (CDI_DOMINATORS, use_bb, def_bb);
iv->base = cst;
iv->step = const0_rtx;
iv->first_special = false;
- iv->extend = UNKNOWN;
+ iv->extend = IV_UNKNOWN_EXTEND;
iv->extend_mode = iv->mode;
iv->delta = const0_rtx;
iv->mult = const1_rtx;
&& !iv->first_special)
{
rtx val = get_iv_value (iv, const0_rtx);
- val = lowpart_subreg (mode, val, iv->extend_mode);
+ val = lowpart_subreg (mode, val,
+ iv->extend == IV_UNKNOWN_EXTEND
+ ? iv->mode : iv->extend_mode);
iv->base = val;
- iv->extend = UNKNOWN;
+ iv->extend = IV_UNKNOWN_EXTEND;
iv->mode = iv->extend_mode = mode;
iv->delta = const0_rtx;
iv->mult = const1_rtx;
if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (iv->mode))
return false;
- iv->extend = UNKNOWN;
+ iv->extend = IV_UNKNOWN_EXTEND;
iv->mode = mode;
iv->base = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta,
/* Evaluates application of EXTEND to MODE on IV. */
static bool
-iv_extend (struct rtx_iv *iv, enum rtx_code extend, enum machine_mode mode)
+iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, enum machine_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
&& !iv->first_special)
{
rtx val = get_iv_value (iv, const0_rtx);
- val = simplify_gen_unary (extend, mode, val, iv->extend_mode);
-
+ if (iv->extend_mode != iv->mode
+ && iv->extend != IV_UNKNOWN_EXTEND
+ && iv->extend != extend)
+ val = lowpart_subreg (iv->mode, val, iv->extend_mode);
+ val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode,
+ val,
+ iv->extend == extend
+ ? iv->extend_mode : iv->mode);
iv->base = val;
- iv->extend = UNKNOWN;
+ iv->extend = IV_UNKNOWN_EXTEND;
iv->mode = iv->extend_mode = mode;
iv->delta = const0_rtx;
iv->mult = const1_rtx;
if (mode != iv->extend_mode)
return false;
- if (iv->extend != UNKNOWN
+ if (iv->extend != IV_UNKNOWN_EXTEND
&& iv->extend != extend)
return false;
static bool
iv_neg (struct rtx_iv *iv)
{
- if (iv->extend == UNKNOWN)
+ if (iv->extend == IV_UNKNOWN_EXTEND)
{
iv->base = simplify_gen_unary (NEG, iv->extend_mode,
iv->base, iv->extend_mode);
rtx arg;
/* Extend the constant to extend_mode of the other operand if necessary. */
- if (iv0->extend == UNKNOWN
+ if (iv0->extend == IV_UNKNOWN_EXTEND
&& iv0->mode == iv0->extend_mode
&& iv0->step == const0_rtx
&& GET_MODE_SIZE (iv0->extend_mode) < GET_MODE_SIZE (iv1->extend_mode))
iv0->base = simplify_gen_unary (ZERO_EXTEND, iv0->extend_mode,
iv0->base, iv0->mode);
}
- if (iv1->extend == UNKNOWN
+ if (iv1->extend == IV_UNKNOWN_EXTEND
&& iv1->mode == iv1->extend_mode
&& iv1->step == const0_rtx
&& GET_MODE_SIZE (iv1->extend_mode) < GET_MODE_SIZE (iv0->extend_mode))
if (mode != iv1->extend_mode)
return false;
- if (iv0->extend == UNKNOWN && iv1->extend == UNKNOWN)
+ if (iv0->extend == IV_UNKNOWN_EXTEND
+ && iv1->extend == IV_UNKNOWN_EXTEND)
{
if (iv0->mode != iv1->mode)
return false;
}
/* Handle addition of constant. */
- if (iv1->extend == UNKNOWN
+ if (iv1->extend == IV_UNKNOWN_EXTEND
&& iv1->mode == mode
&& iv1->step == const0_rtx)
{
return true;
}
- if (iv0->extend == UNKNOWN
+ if (iv0->extend == IV_UNKNOWN_EXTEND
&& iv0->mode == mode
&& iv0->step == const0_rtx)
{
&& GET_MODE (mby) != mode)
return false;
- if (iv->extend == UNKNOWN)
+ if (iv->extend == IV_UNKNOWN_EXTEND)
{
iv->base = simplify_gen_binary (MULT, mode, iv->base, mby);
iv->step = simplify_gen_binary (MULT, mode, iv->step, mby);
&& GET_MODE (mby) != mode)
return false;
- if (iv->extend == UNKNOWN)
+ if (iv->extend == IV_UNKNOWN_EXTEND)
{
iv->base = simplify_gen_binary (ASHIFT, mode, iv->base, mby);
iv->step = simplify_gen_binary (ASHIFT, mode, iv->step, mby);
at get_biv_step. */
static bool
-get_biv_step_1 (struct df_ref *def, rtx reg,
+get_biv_step_1 (df_ref def, rtx reg,
rtx *inner_step, enum machine_mode *inner_mode,
- enum rtx_code *extend, enum machine_mode outer_mode,
+ enum iv_extend_code *extend, enum machine_mode outer_mode,
rtx *outer_step)
{
rtx set, rhs, op0 = NULL_RTX, op1 = NULL_RTX;
rtx next, nextr, tmp;
enum rtx_code code;
rtx insn = DF_REF_INSN (def);
- struct df_ref *next_def;
+ df_ref next_def;
enum iv_grd_result res;
set = single_set (insn);
return false;
*inner_step = const0_rtx;
- *extend = UNKNOWN;
+ *extend = IV_UNKNOWN_EXTEND;
*inner_mode = outer_mode;
*outer_step = const0_rtx;
}
*inner_step = simplify_gen_binary (PLUS, outer_mode,
*inner_step, *outer_step);
*outer_step = const0_rtx;
- *extend = UNKNOWN;
+ *extend = IV_UNKNOWN_EXTEND;
}
switch (code)
case SIGN_EXTEND:
case ZERO_EXTEND:
gcc_assert (GET_MODE (op0) == *inner_mode
- && *extend == UNKNOWN
+ && *extend == IV_UNKNOWN_EXTEND
&& *outer_step == const0_rtx);
- *extend = code;
+ *extend = (code == SIGN_EXTEND) ? IV_SIGN_EXTEND : IV_ZERO_EXTEND;
break;
default:
LAST_DEF is the definition of REG that dominates loop latch. */
static bool
-get_biv_step (struct df_ref *last_def, rtx reg, rtx *inner_step,
- enum machine_mode *inner_mode, enum rtx_code *extend,
+get_biv_step (df_ref last_def, rtx reg, rtx *inner_step,
+ enum machine_mode *inner_mode, enum iv_extend_code *extend,
enum machine_mode *outer_mode, rtx *outer_step)
{
*outer_mode = GET_MODE (reg);
outer_step))
return false;
- gcc_assert ((*inner_mode == *outer_mode) != (*extend != UNKNOWN));
+ gcc_assert ((*inner_mode == *outer_mode) != (*extend != IV_UNKNOWN_EXTEND));
gcc_assert (*inner_mode != *outer_mode || *outer_step == const0_rtx);
return true;
/* Records information that DEF is induction variable IV. */
static void
-record_iv (struct df_ref *def, struct rtx_iv *iv)
+record_iv (df_ref def, struct rtx_iv *iv)
{
struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
*recorded_iv = *iv;
+ check_iv_ref_table_size ();
DF_REF_IV_SET (def, recorded_iv);
}
static bool
analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
{
- struct biv_entry *biv = htab_find_with_hash (bivs, def, REGNO (def));
+ struct biv_entry *biv = bivs.find_with_hash (def, REGNO (def));
if (!biv)
return false;
record_biv (rtx def, struct rtx_iv *iv)
{
struct biv_entry *biv = XNEW (struct biv_entry);
- void **slot = htab_find_slot_with_hash (bivs, def, REGNO (def), INSERT);
+ biv_entry **slot = bivs.find_slot_with_hash (def, REGNO (def), INSERT);
biv->regno = REGNO (def);
biv->iv = *iv;
{
rtx inner_step, outer_step;
enum machine_mode inner_mode, outer_mode;
- enum rtx_code extend;
- struct df_ref *last_def;
+ enum iv_extend_code extend;
+ df_ref last_def;
if (dump_file)
{
print_rtl (dump_file, def);
fprintf (dump_file, " for bivness.\n");
}
-
+
if (!REG_P (def))
{
if (!CONSTANT_P (def))
return iv->base != NULL_RTX;
}
-/* Analyzes expression RHS used at INSN and stores the result to *IV.
+/* Analyzes expression RHS used at INSN and stores the result to *IV.
The mode of the induction variable is MODE. */
bool
{
if (!iv_analyze_op (insn, rhs, iv))
return false;
-
+
if (iv->mode == VOIDmode)
{
iv->mode = mode;
switch (code)
{
case SIGN_EXTEND:
+ if (!iv_extend (&iv0, IV_SIGN_EXTEND, mode))
+ return false;
+ break;
+
case ZERO_EXTEND:
- if (!iv_extend (&iv0, code, mode))
+ if (!iv_extend (&iv0, IV_ZERO_EXTEND, mode))
return false;
break;
/* Analyzes iv DEF and stores the result to *IV. */
static bool
-iv_analyze_def (struct df_ref *def, struct rtx_iv *iv)
+iv_analyze_def (df_ref def, struct rtx_iv *iv)
{
rtx insn = DF_REF_INSN (def);
rtx reg = DF_REF_REG (def);
print_rtl_single (dump_file, insn);
}
+ check_iv_ref_table_size ();
if (DF_REF_IV (def))
{
if (dump_file)
iv->base = NULL_RTX;
iv->step = NULL_RTX;
+ if (!REG_P (reg))
+ return false;
+
set = single_set (insn);
- if (!set || SET_DEST (set) != reg)
+ if (!set)
return false;
+ if (!REG_P (SET_DEST (set)))
+ return false;
+
+ gcc_assert (SET_DEST (set) == reg);
rhs = find_reg_equal_equiv_note (insn);
if (rhs)
rhs = XEXP (rhs, 0);
static bool
iv_analyze_op (rtx insn, rtx op, struct rtx_iv *iv)
{
- struct df_ref *def = NULL;
+ df_ref def = NULL;
enum iv_grd_result res;
if (dump_file)
print_rtl_single (dump_file, insn);
}
- if (CONSTANT_P (op))
+ if (function_invariant_p (op))
res = GRD_INVARIANT;
else if (GET_CODE (op) == SUBREG)
{
else
reg = val;
- while (!df_find_use (df, insn, reg))
+ while (!df_find_use (insn, reg))
insn = NEXT_INSN (insn);
}
bool
iv_analyze_result (rtx insn, rtx def, struct rtx_iv *iv)
{
- struct df_ref *adef;
+ df_ref adef;
- adef = df_find_def (df, insn, def);
+ adef = df_find_def (insn, def);
if (!adef)
return false;
biv_p (rtx insn, rtx reg)
{
struct rtx_iv iv;
- struct df_ref *def, *last_def;
+ df_ref def, last_def;
if (!simple_reg_p (reg))
return false;
- def = df_find_def (df, insn, reg);
+ def = df_find_def (insn, reg);
gcc_assert (def != NULL);
if (!latch_dominating_def (reg, &last_def))
return false;
val = lowpart_subreg (iv->mode, val, iv->extend_mode);
- if (iv->extend == UNKNOWN)
+ if (iv->extend == IV_UNKNOWN_EXTEND)
return val;
- val = simplify_gen_unary (iv->extend, iv->extend_mode, val, iv->mode);
+ val = simplify_gen_unary (iv_extend_to_rtx_code (iv->extend),
+ iv->extend_mode, val, iv->mode);
val = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta,
simplify_gen_binary (MULT, iv->extend_mode,
iv->mult, val));
void
iv_analysis_done (void)
{
- if (df)
+ if (!clean_slate)
{
clear_iv_info ();
- df_finish (df);
- df = NULL;
- htab_delete (bivs);
- bivs = NULL;
+ clean_slate = true;
+ df_finish_pass (true);
+ bivs.dispose ();
+ free (iv_ref_table);
+ iv_ref_table = NULL;
+ iv_ref_table_size = 0;
}
}
if (!REG_P (*reg))
return 0;
- return REGNO_REG_SET_P (alt, REGNO (*reg));
+ return REGNO_REG_SET_P ((bitmap) alt, REGNO (*reg));
}
/* Marks registers altered by EXPR in set ALT. */
static void
-mark_altered (rtx expr, rtx by ATTRIBUTE_UNUSED, void *alt)
+mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED, void *alt)
{
if (GET_CODE (expr) == SUBREG)
expr = SUBREG_REG (expr);
if (!REG_P (expr))
return;
- SET_REGNO_REG_SET (alt, REGNO (expr));
+ SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr));
}
/* Checks whether RHS is simple enough to process. */
{
rtx op0, op1;
- if (CONSTANT_P (rhs)
+ if (function_invariant_p (rhs)
|| (REG_P (rhs) && !HARD_REGISTER_P (rhs)))
return true;
{
case PLUS:
case MINUS:
+ case AND:
op0 = XEXP (rhs, 0);
op1 = XEXP (rhs, 1);
- /* Allow reg + const sets only. */
- if (REG_P (op0) && !HARD_REGISTER_P (op0) && CONSTANT_P (op1))
- return true;
- if (REG_P (op1) && !HARD_REGISTER_P (op1) && CONSTANT_P (op0))
- return true;
+ /* Allow reg OP const and reg OP reg. */
+ if (!(REG_P (op0) && !HARD_REGISTER_P (op0))
+ && !function_invariant_p (op0))
+ return false;
+ if (!(REG_P (op1) && !HARD_REGISTER_P (op1))
+ && !function_invariant_p (op1))
+ return false;
- return false;
+ return true;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case MULT:
+ op0 = XEXP (rhs, 0);
+ op1 = XEXP (rhs, 1);
+ /* Allow reg OP const. */
+ if (!(REG_P (op0) && !HARD_REGISTER_P (op0)))
+ return false;
+ if (!function_invariant_p (op1))
+ return false;
+
+ return true;
default:
return false;
}
}
-/* Simplifies *EXPR using assignment in INSN. ALTERED is the set of registers
- altered so far. */
+/* If REG has a single definition, replace it with its known value in EXPR.
+ Callback for for_each_rtx. */
-static void
-simplify_using_assignment (rtx insn, rtx *expr, regset altered)
+static int
+replace_single_def_regs (rtx *reg, void *expr1)
{
- rtx set = single_set (insn);
- rtx lhs = NULL_RTX, rhs;
- bool ret = false;
+ unsigned regno;
+ df_ref adef;
+ rtx set, src;
+ rtx *expr = (rtx *)expr1;
- if (set)
- {
- lhs = SET_DEST (set);
- if (!REG_P (lhs)
- || altered_reg_used (&lhs, altered))
- ret = true;
- }
- else
- ret = true;
+ if (!REG_P (*reg))
+ return 0;
- note_stores (PATTERN (insn), mark_altered, altered);
- if (CALL_P (insn))
+ regno = REGNO (*reg);
+ for (;;)
{
- int i;
+ rtx note;
+ adef = DF_REG_DEF_CHAIN (regno);
+ if (adef == NULL || DF_REF_NEXT_REG (adef) != NULL
+ || DF_REF_IS_ARTIFICIAL (adef))
+ return -1;
+
+ set = single_set (DF_REF_INSN (adef));
+ if (set == NULL || !REG_P (SET_DEST (set))
+ || REGNO (SET_DEST (set)) != regno)
+ return -1;
+
+ note = find_reg_equal_equiv_note (DF_REF_INSN (adef));
+
+ if (note && function_invariant_p (XEXP (note, 0)))
+ {
+ src = XEXP (note, 0);
+ break;
+ }
+ src = SET_SRC (set);
- /* Kill all call clobbered registers. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
- SET_REGNO_REG_SET (altered, i);
+ if (REG_P (src))
+ {
+ regno = REGNO (src);
+ continue;
+ }
+ break;
}
+ if (!function_invariant_p (src))
+ return -1;
- if (ret)
- return;
+ *expr = simplify_replace_rtx (*expr, *reg, src);
+ return 1;
+}
+
+/* A subroutine of simplify_using_initial_values, this function examines INSN
+ to see if it contains a suitable set that we can use to make a replacement.
+ If it is suitable, return true and set DEST and SRC to the lhs and rhs of
+ the set; return false otherwise. */
+
+static bool
+suitable_set_for_replacement (rtx insn, rtx *dest, rtx *src)
+{
+ rtx set = single_set (insn);
+ rtx lhs = NULL_RTX, rhs;
+
+ if (!set)
+ return false;
+
+ lhs = SET_DEST (set);
+ if (!REG_P (lhs))
+ return false;
rhs = find_reg_equal_equiv_note (insn);
if (rhs)
rhs = SET_SRC (set);
if (!simple_rhs_p (rhs))
- return;
+ return false;
- if (for_each_rtx (&rhs, altered_reg_used, altered))
- return;
+ *dest = lhs;
+ *src = rhs;
+ return true;
+}
- *expr = simplify_replace_rtx (*expr, lhs, rhs);
+/* Using the data returned by suitable_set_for_replacement, replace DEST
+ with SRC in *EXPR and return the new expression. Also call
+ replace_single_def_regs if the replacement changed something. */
+static void
+replace_in_expr (rtx *expr, rtx dest, rtx src)
+{
+ rtx old = *expr;
+ *expr = simplify_replace_rtx (*expr, dest, src);
+ if (old == *expr)
+ return;
+ while (for_each_rtx (expr, replace_single_def_regs, expr) != 0)
+ continue;
}
/* Checks whether A implies B. */
rtx op0, op1, opb0, opb1, r;
enum machine_mode mode;
+ if (rtx_equal_p (a, b))
+ return true;
+
if (GET_CODE (a) == EQ)
{
op0 = XEXP (a, 0);
op1 = XEXP (a, 1);
- if (REG_P (op0))
+ if (REG_P (op0)
+ || (GET_CODE (op0) == SUBREG
+ && REG_P (SUBREG_REG (op0))))
{
r = simplify_replace_rtx (b, op0, op1);
if (r == const_true_rtx)
return true;
}
- if (REG_P (op1))
+ if (REG_P (op1)
+ || (GET_CODE (op1) == SUBREG
+ && REG_P (SUBREG_REG (op1))))
{
r = simplify_replace_rtx (b, op1, op0);
if (r == const_true_rtx)
/* A != N is equivalent to A - (N + 1) <u -1. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& GET_CODE (b) == LTU
&& opb1 == constm1_rtx
&& GET_CODE (opb0) == PLUS
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT)1
/* Likewise, A != N implies A - N > 0. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT)
+ && CONST_INT_P (op1))
{
if (GET_CODE (b) == GTU
&& GET_CODE (opb0) == PLUS
&& opb1 == const0_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
if (GET_CODE (b) == GEU
&& GET_CODE (opb0) == PLUS
&& opb1 == const1_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
/* A >s X, where X is positive, implies A <u Y, if Y is negative. */
if ((GET_CODE (a) == GT || GET_CODE (a) == GE)
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& ((GET_CODE (a) == GT && op1 == constm1_rtx)
|| INTVAL (op1) >= 0)
&& GET_CODE (b) == LTU
- && GET_CODE (opb1) == CONST_INT)
+ && CONST_INT_P (opb1)
+ && rtx_equal_p (op0, opb0))
return INTVAL (opb1) < 0;
return false;
mode = GET_MODE (op1);
gcc_assert (mode != VOIDmode);
- if (GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op1)
&& GET_MODE_CLASS (mode) != MODE_CC
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
{
rtx rev, reve, exp = *expr;
- if (!COMPARISON_P (exp))
- return;
-
/* If some register gets altered later, we do not really speak about its
value at the time of comparison. */
if (altered
&& for_each_rtx (&cond, altered_reg_used, altered))
return;
+ if (GET_CODE (cond) == EQ
+ && REG_P (XEXP (cond, 0)) && CONSTANT_P (XEXP (cond, 1)))
+ {
+ *expr = simplify_replace_rtx (*expr, XEXP (cond, 0), XEXP (cond, 1));
+ return;
+ }
+
+ if (!COMPARISON_P (exp))
+ return;
+
rev = reversed_condition (cond);
reve = reversed_condition (exp);
return;
}
-
if (rev && rtx_equal_p (exp, rev))
{
*expr = const0_rtx;
*expr = const_true_rtx;
return;
}
-
+
if (reve && implies_p (cond, reve))
{
*expr = const0_rtx;
static void
simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
{
- rtx head, tail, insn;
+ bool expression_valid;
+ rtx head, tail, insn, cond_list, last_valid_expr;
rtx neutral, aggr;
- regset altered;
+ regset altered, this_altered;
edge e;
if (!*expr)
default:
gcc_unreachable ();
}
-
+
simplify_using_initial_values (loop, UNKNOWN, &head);
if (head == aggr)
{
*expr = tail;
return;
}
-
+
XEXP (*expr, 0) = head;
XEXP (*expr, 1) = tail;
return;
gcc_assert (op == UNKNOWN);
+ for (;;)
+ if (for_each_rtx (expr, replace_single_def_regs, expr) == 0)
+ break;
+ if (CONSTANT_P (*expr))
+ return;
+
e = loop_preheader_edge (loop);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
altered = ALLOC_REG_SET (®_obstack);
+ this_altered = ALLOC_REG_SET (®_obstack);
+ expression_valid = true;
+ last_valid_expr = *expr;
+ cond_list = NULL_RTX;
while (1)
{
insn = BB_END (e->src);
if (any_condjump_p (insn))
{
rtx cond = get_condition (BB_END (e->src), NULL, false, true);
-
+
if (cond && (e->flags & EDGE_FALLTHRU))
cond = reversed_condition (cond);
if (cond)
{
+ rtx old = *expr;
simplify_using_condition (cond, expr, altered);
- if (CONSTANT_P (*expr))
+ if (old != *expr)
{
- FREE_REG_SET (altered);
- return;
+ rtx note;
+ if (CONSTANT_P (*expr))
+ goto out;
+ for (note = cond_list; note; note = XEXP (note, 1))
+ {
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ if (CONSTANT_P (*expr))
+ goto out;
+ }
}
+ cond_list = alloc_EXPR_LIST (0, cond, cond_list);
}
}
FOR_BB_INSNS_REVERSE (e->src, insn)
{
+ rtx src, dest;
+ rtx old = *expr;
+
if (!INSN_P (insn))
continue;
-
- simplify_using_assignment (insn, expr, altered);
- if (CONSTANT_P (*expr))
+
+ CLEAR_REG_SET (this_altered);
+ note_stores (PATTERN (insn), mark_altered, this_altered);
+ if (CALL_P (insn))
{
- FREE_REG_SET (altered);
- return;
+ /* Kill all call clobbered registers. */
+ unsigned int i;
+ hard_reg_set_iterator hrsi;
+ EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
+ 0, i, hrsi)
+ SET_REGNO_REG_SET (this_altered, i);
}
- if (for_each_rtx (expr, altered_reg_used, altered))
+
+ if (suitable_set_for_replacement (insn, &dest, &src))
{
- FREE_REG_SET (altered);
- return;
+ rtx *pnote, *pnote_next;
+
+ replace_in_expr (expr, dest, src);
+ if (CONSTANT_P (*expr))
+ goto out;
+
+ for (pnote = &cond_list; *pnote; pnote = pnote_next)
+ {
+ rtx note = *pnote;
+ rtx old_cond = XEXP (note, 0);
+
+ pnote_next = &XEXP (note, 1);
+ replace_in_expr (&XEXP (note, 0), dest, src);
+
+ /* We can no longer use a condition that has been simplified
+ to a constant, and simplify_using_condition will abort if
+ we try. */
+ if (CONSTANT_P (XEXP (note, 0)))
+ {
+ *pnote = *pnote_next;
+ pnote_next = pnote;
+ free_EXPR_LIST_node (note);
+ }
+ /* Retry simplifications with this condition if either the
+ expression or the condition changed. */
+ else if (old_cond != XEXP (note, 0) || old != *expr)
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ }
}
+ else
+ {
+ rtx *pnote, *pnote_next;
+
+ /* If we did not use this insn to make a replacement, any overlap
+ between stores in this insn and our expression will cause the
+ expression to become invalid. */
+ if (for_each_rtx (expr, altered_reg_used, this_altered))
+ goto out;
+
+ /* Likewise for the conditions. */
+ for (pnote = &cond_list; *pnote; pnote = pnote_next)
+ {
+ rtx note = *pnote;
+ rtx old_cond = XEXP (note, 0);
+
+ pnote_next = &XEXP (note, 1);
+ if (for_each_rtx (&old_cond, altered_reg_used, this_altered))
+ {
+ *pnote = *pnote_next;
+ pnote_next = pnote;
+ free_EXPR_LIST_node (note);
+ }
+ }
+ }
+
+ if (CONSTANT_P (*expr))
+ goto out;
+
+ IOR_REG_SET (altered, this_altered);
+
+ /* If the expression now contains regs that have been altered, we
+ can't return it to the caller. However, it is still valid for
+ further simplification, so keep searching to see if we can
+ eventually turn it into a constant. */
+ if (for_each_rtx (expr, altered_reg_used, altered))
+ expression_valid = false;
+ if (expression_valid)
+ last_valid_expr = *expr;
}
if (!single_pred_p (e->src)
- || single_pred (e->src) == ENTRY_BLOCK_PTR)
+ || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
e = single_pred_edge (e->src);
}
+ out:
+ free_EXPR_LIST_list (&cond_list);
+ if (!CONSTANT_P (*expr))
+ *expr = last_valid_expr;
FREE_REG_SET (altered);
+ FREE_REG_SET (this_altered);
}
/* Transforms invariant IV into MODE. Adds assumptions based on the fact
}
iv->mode = mode;
- iv->extend = signed_p ? SIGN_EXTEND : ZERO_EXTEND;
+ iv->extend = signed_p ? IV_SIGN_EXTEND : IV_ZERO_EXTEND;
}
/* Transforms IV0 and IV1 compared by COND so that they are both compared as
{
case LE:
case LT:
- if (iv0->extend == ZERO_EXTEND
- || iv1->extend == ZERO_EXTEND)
+ if (iv0->extend == IV_ZERO_EXTEND
+ || iv1->extend == IV_ZERO_EXTEND)
return false;
signed_p = true;
break;
case LEU:
case LTU:
- if (iv0->extend == SIGN_EXTEND
- || iv1->extend == SIGN_EXTEND)
+ if (iv0->extend == IV_SIGN_EXTEND
+ || iv1->extend == IV_SIGN_EXTEND)
return false;
signed_p = false;
break;
case NE:
- if (iv0->extend != UNKNOWN
- && iv1->extend != UNKNOWN
+ if (iv0->extend != IV_UNKNOWN_EXTEND
+ && iv1->extend != IV_UNKNOWN_EXTEND
&& iv0->extend != iv1->extend)
return false;
signed_p = false;
- if (iv0->extend != UNKNOWN)
- signed_p = iv0->extend == SIGN_EXTEND;
- if (iv1->extend != UNKNOWN)
- signed_p = iv1->extend == SIGN_EXTEND;
+ if (iv0->extend != IV_UNKNOWN_EXTEND)
+ signed_p = iv0->extend == IV_SIGN_EXTEND;
+ if (iv1->extend != IV_UNKNOWN_EXTEND)
+ signed_p = iv1->extend == IV_SIGN_EXTEND;
break;
default:
and iv0 and iv1 are both ivs iterating in SI mode, but calculated
in different modes. This does not seem impossible to handle, but
it hardly ever occurs in practice.
-
+
The only exception is the case when one of operands is invariant.
For example pentium 3 generates comparisons like
(lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
return true;
}
-/* Tries to estimate the maximum number of iterations. */
+/* Tries to estimate the maximum number of iterations in LOOP, and return the
+ result. This function is called from iv_number_of_iterations with
+ a number of fields in DESC already filled in. OLD_NITER is the original
+ expression for the number of iterations, before we tried to simplify it. */
static unsigned HOST_WIDEST_INT
-determine_max_iter (struct loop *loop, struct niter_desc *desc)
+determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
unsigned HOST_WIDEST_INT nmax, inc;
+ unsigned HOST_WIDEST_INT andmax = 0;
+
+ /* We used to look for constant operand 0 of AND,
+ but canonicalization should always make this impossible. */
+ gcc_checking_assert (GET_CODE (niter) != AND
+ || !CONST_INT_P (XEXP (niter, 0)));
if (GET_CODE (niter) == AND
- && GET_CODE (XEXP (niter, 0)) == CONST_INT)
+ && CONST_INT_P (XEXP (niter, 1)))
{
- nmax = INTVAL (XEXP (niter, 0));
- if (!(nmax & (nmax + 1)))
- {
- desc->niter_max = nmax;
- return nmax;
- }
+ andmax = UINTVAL (XEXP (niter, 1));
+ niter = XEXP (niter, 0);
}
get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax);
if (GET_CODE (niter) == UDIV)
{
- if (GET_CODE (XEXP (niter, 1)) != CONST_INT)
- {
- desc->niter_max = nmax;
- return nmax;
- }
+ if (!CONST_INT_P (XEXP (niter, 1)))
+ return nmax;
inc = INTVAL (XEXP (niter, 1));
niter = XEXP (niter, 0);
}
/* We could use a binary search here, but for now improving the upper
bound by just one eliminates one important corner case. */
- cmp = gen_rtx_fmt_ee (desc->signed_p ? LT : LTU, VOIDmode, niter, mmax);
+ cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode,
+ desc->mode, old_niter, mmax);
simplify_using_initial_values (loop, UNKNOWN, &cmp);
if (cmp == const_true_rtx)
{
if (dump_file)
fprintf (dump_file, ";; improved upper bound by one.\n");
}
- desc->niter_max = nmax / inc;
- return nmax / inc;
+ nmax /= inc;
+ if (andmax)
+ nmax = MIN (nmax, andmax);
+ if (dump_file)
+ fprintf (dump_file, ";; Determined upper bound "HOST_WIDEST_INT_PRINT_DEC".\n",
+ nmax);
+ return nmax;
}
/* Computes number of iterations of the CONDITION in INSN in LOOP and stores
enum rtx_code cond;
enum machine_mode mode, comp_mode;
rtx mmin, mmax, mode_mmin, mode_mmax;
- unsigned HOST_WIDEST_INT s, size, d, inv;
+ unsigned HOST_WIDEST_INT s, size, d, inv, max;
HOST_WIDEST_INT up, down, inc, step_val;
int was_sharp = false;
rtx old_niter;
desc->const_iter = false;
desc->niter_expr = NULL_RTX;
- desc->niter_max = 0;
cond = GET_CODE (condition);
gcc_assert (COMPARISON_P (condition));
goto fail;
if (iv0.extend_mode == VOIDmode)
iv0.mode = iv0.extend_mode = mode;
-
+
op1 = XEXP (condition, 1);
if (!iv_analyze (insn, op1, &iv1))
goto fail;
mode_mmin = lowpart_subreg (mode, mmin, comp_mode);
mode_mmax = lowpart_subreg (mode, mmax, comp_mode);
- if (GET_CODE (iv0.step) != CONST_INT || GET_CODE (iv1.step) != CONST_INT)
+ if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step))
goto fail;
/* We can take care of the case of two induction variables chasing each other
iv1.step = const0_rtx;
}
+ iv0.step = lowpart_subreg (mode, iv0.step, comp_mode);
+ iv1.step = lowpart_subreg (mode, iv1.step, comp_mode);
+
/* This is either infinite loop or the one that ends immediately, depending
on initial values. Unswitching should remove this kind of conditions. */
if (iv0.step == const0_rtx && iv1.step == const0_rtx)
step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode);
else
step = iv0.step;
+ step = lowpart_subreg (mode, step, comp_mode);
delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base);
delta = lowpart_subreg (mode, delta, comp_mode);
delta = simplify_gen_binary (UMOD, mode, delta, step);
may_xform = const0_rtx;
may_not_xform = const_true_rtx;
- if (GET_CODE (delta) == CONST_INT)
+ if (CONST_INT_P (delta))
{
if (was_sharp && INTVAL (delta) == INTVAL (step) - 1)
{
number of iterations in this step, so record the information
here. */
inc = INTVAL (iv0.step) - INTVAL (iv1.step);
- if (GET_CODE (iv1.base) == CONST_INT)
+ if (CONST_INT_P (iv1.base))
up = INTVAL (iv1.base);
else
up = INTVAL (mode_mmax) - inc;
- down = INTVAL (GET_CODE (iv0.base) == CONST_INT
+ down = INTVAL (CONST_INT_P (iv0.base)
? iv0.base
: mode_mmin);
- desc->niter_max = (up - down) / inc + 1;
+ max = (up - down) / inc + 1;
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, double_int::from_uhwi (max),
+ false, true);
if (iv0.step == const0_rtx)
{
bound = GEN_INT (((unsigned HOST_WIDEST_INT) 1 << (size - 1 ) << 1) - 1);
tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
- tmp = simplify_gen_binary (UMOD, mode, tmp1, GEN_INT (d));
+ tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode));
assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx);
desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite);
- tmp = simplify_gen_binary (UDIV, mode, tmp1, GEN_INT (d));
+ tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode));
inv = inverse (s, size);
tmp = simplify_gen_binary (MULT, mode, tmp, gen_int_mode (inv, mode));
desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound);
&& XEXP (desc->noloop_assumptions, 0) == const_true_rtx)
goto zero_iter;
- if (GET_CODE (desc->niter_expr) == CONST_INT)
+ if (CONST_INT_P (desc->niter_expr))
{
unsigned HOST_WIDEST_INT val = INTVAL (desc->niter_expr);
desc->const_iter = true;
- desc->niter_max = desc->niter = val & GET_MODE_MASK (desc->mode);
+ desc->niter = val & GET_MODE_MASK (desc->mode);
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, double_int::from_uhwi (desc->niter),
+ false, true);
}
else
{
- if (!desc->niter_max)
- desc->niter_max = determine_max_iter (loop, desc);
+ max = determine_max_iter (loop, desc, old_niter);
+ if (!max)
+ goto zero_iter_simplify;
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, double_int::from_uhwi (max),
+ false, true);
/* simplify_using_initial_values does a copy propagation on the registers
in the expression for the number of iterations. This prolongs life
zero_iter:
desc->const_iter = true;
desc->niter = 0;
- desc->niter_max = 0;
+ record_niter_bound (loop, double_int_zero,
+ true, true);
desc->noloop_assumptions = NULL_RTX;
desc->niter_expr = const0_rtx;
return;
{
if (flow_bb_inside_loop_p (loop, e->dest))
continue;
-
+
check_simple_exit (loop, e, &act);
if (!act.simple_p)
continue;
if (act.infinite && !desc->infinite)
continue;
}
-
+
*desc = act;
}
}
print_rtl (dump_file, desc->niter_expr);
fprintf (dump_file, "\n");
- fprintf (dump_file, " upper bound: ");
- fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter_max);
- fprintf (dump_file, "\n");
+ fprintf (dump_file, " upper bound: %li\n",
+ (long)get_max_loop_iterations_int (loop));
+ fprintf (dump_file, " realistic bound: %li\n",
+ (long)get_estimated_loop_iterations_int (loop));
}
else
fprintf (dump_file, "Loop %d is not simple.\n", loop->num);
if (desc)
return desc;
- desc = XNEW (struct niter_desc);
+ /* At least desc->infinite is not always initialized by
+ find_simple_loop_exit. */
+ desc = ggc_alloc_cleared_niter_desc ();
iv_analysis_loop_init (loop);
find_simple_exit (loop, desc);
- loop->aux = desc;
+ loop->simple_loop_desc = desc;
if (desc->simple_p && (desc->assumptions || desc->infinite))
{
- const char *wording;
+ const char *wording;
- /* Assume that no overflow happens and that the loop is finite.
+ /* Assume that no overflow happens and that the loop is finite.
We already warned at the tree level if we ran optimizations there. */
if (!flag_tree_loop_optimize && warn_unsafe_loop_optimizations)
{
if (desc->infinite)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop is not infinite")
: N_("cannot optimize possibly infinite loops");
}
if (desc->assumptions)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop counter does not overflow")
: N_("cannot optimize loop, the loop counter may overflow");
if (!desc)
return;
- free (desc);
- loop->aux = NULL;
+ ggc_free (desc);
+ loop->simple_loop_desc = NULL;
}