/* Rtl-level induction variable analysis.
- Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 2004-2020 Free Software Foundation, Inc.
This file is part of GCC.
The available functions are:
- iv_analyze (insn, reg, iv): Stores the description of the induction variable
- corresponding to the use of register REG in INSN to IV. Returns true if
- REG is an induction variable in INSN. false otherwise.
- If use of REG is not found in INSN, following insns are scanned (so that
- we may call this function on insn returned by get_condition).
+ iv_analyze (insn, mode, reg, iv): Stores the description of the induction
+ variable corresponding to the use of register REG in INSN to IV, given
+ that REG has mode MODE. Returns true if REG is an induction variable
+ in INSN. false otherwise. If a use of REG is not found in INSN,
+ the following insns are scanned (so that we may call this function
+ on insns returned by get_condition).
iv_analyze_result (insn, def, iv): Stores to IV the description of the iv
corresponding to DEF, which is a register defined in INSN.
- iv_analyze_expr (insn, rhs, mode, iv): Stores to IV the description of iv
+ iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv
corresponding to expression EXPR evaluated at INSN. All registers used bu
- EXPR must also be used in INSN.
+ EXPR must also be used in INSN. MODE is the mode of EXPR.
*/
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
#include "rtl.h"
-#include "hard-reg-set.h"
-#include "obstack.h"
-#include "basic-block.h"
+#include "df.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+#include "diagnostic-core.h"
#include "cfgloop.h"
-#include "expr.h"
#include "intl.h"
-#include "diagnostic-core.h"
-#include "df.h"
-#include "hashtab.h"
#include "dumpfile.h"
+#include "rtl-iter.h"
+#include "tree-ssa-loop-niter.h"
+#include "regs.h"
+#include "function-abi.h"
/* Possible return values of iv_get_reaching_def. */
/* Information about a biv. */
-struct biv_entry
+class biv_entry
{
+public:
unsigned regno; /* The register of the biv. */
- struct rtx_iv iv; /* Value of the biv. */
+ class rtx_iv iv; /* Value of the biv. */
};
static bool clean_slate = true;
static unsigned int iv_ref_table_size = 0;
/* Table of rtx_ivs indexed by the df_ref uid field. */
-static struct rtx_iv ** iv_ref_table;
+static class rtx_iv ** iv_ref_table;
/* Induction variable stored at the reference. */
-#define DF_REF_IV(REF) iv_ref_table[DF_REF_ID(REF)]
-#define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID(REF)] = (IV)
+#define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
+#define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV)
/* The current loop. */
-static struct loop *current_loop;
+static class loop *current_loop;
+
+/* Hashtable helper. */
+
+struct biv_entry_hasher : free_ptr_hash <biv_entry>
+{
+ typedef rtx_def *compare_type;
+ static inline hashval_t hash (const biv_entry *);
+ static inline bool equal (const biv_entry *, const rtx_def *);
+};
+
+/* Returns hash value for biv B. */
+
+inline hashval_t
+biv_entry_hasher::hash (const biv_entry *b)
+{
+ return b->regno;
+}
+
+/* Compares biv B and register R. */
+
+inline bool
+biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r)
+{
+ return b->regno == REGNO (r);
+}
/* Bivs of the current loop. */
-static htab_t bivs;
+static hash_table<biv_entry_hasher> *bivs;
-static bool iv_analyze_op (rtx, rtx, struct rtx_iv *);
+static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
/* Return the RTX code corresponding to the IV extend code EXTEND. */
static inline enum rtx_code
/* Dumps information about IV to FILE. */
-extern void dump_iv_info (FILE *, struct rtx_iv *);
+extern void dump_iv_info (FILE *, class rtx_iv *);
void
-dump_iv_info (FILE *file, struct rtx_iv *iv)
+dump_iv_info (FILE *file, class rtx_iv *iv)
{
if (!iv->base)
{
fprintf (file, " (first special)");
}
-/* Generates a subreg to get the least significant part of EXPR (in mode
- INNER_MODE) to OUTER_MODE. */
-
-rtx
-lowpart_subreg (enum machine_mode outer_mode, rtx expr,
- enum machine_mode inner_mode)
-{
- return simplify_gen_subreg (outer_mode, expr, inner_mode,
- subreg_lowpart_offset (outer_mode, inner_mode));
-}
-
static void
check_iv_ref_table_size (void)
{
- if (iv_ref_table_size < DF_DEFS_TABLE_SIZE())
+ if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
+ iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size);
memset (&iv_ref_table[iv_ref_table_size], 0,
- (new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
+ (new_size - iv_ref_table_size) * sizeof (class rtx_iv *));
iv_ref_table_size = new_size;
}
}
clear_iv_info (void)
{
unsigned i, n_defs = DF_DEFS_TABLE_SIZE ();
- struct rtx_iv *iv;
+ class rtx_iv *iv;
check_iv_ref_table_size ();
for (i = 0; i < n_defs; i++)
}
}
- htab_empty (bivs);
-}
-
-/* Returns hash value for biv B. */
-
-static hashval_t
-biv_hash (const void *b)
-{
- return ((const struct biv_entry *) b)->regno;
+ bivs->empty ();
}
-/* Compares biv B and register R. */
-
-static int
-biv_eq (const void *b, const void *r)
-{
- return ((const struct biv_entry *) b)->regno == REGNO ((const_rtx) r);
-}
/* Prepare the data for an induction variable analysis of a LOOP. */
void
-iv_analysis_loop_init (struct loop *loop)
+iv_analysis_loop_init (class loop *loop)
{
- basic_block *body = get_loop_body_in_dom_order (loop), bb;
- bitmap blocks = BITMAP_ALLOC (NULL);
- unsigned i;
-
current_loop = loop;
/* Clear the information from the analysis of the previous loop. */
if (clean_slate)
{
df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
- bivs = htab_create (10, biv_hash, biv_eq, free);
+ bivs = new hash_table<biv_entry_hasher> (10);
clean_slate = false;
}
else
clear_iv_info ();
- for (i = 0; i < loop->num_nodes; i++)
- {
- bb = body[i];
- bitmap_set_bit (blocks, bb->index);
- }
/* Get rid of the ud chains before processing the rescans. Then add
the problem back. */
df_remove_problem (df_chain);
df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
df_chain_add_problem (DF_UD_CHAIN);
df_note_add_problem ();
- df_set_blocks (blocks);
- df_analyze ();
+ df_analyze_loop (loop);
if (dump_file)
df_dump_region (dump_file);
check_iv_ref_table_size ();
- BITMAP_FREE (blocks);
- free (body);
}
/* Finds the definition of REG that dominates loop latch and stores
{
df_ref single_rd = NULL, adef;
unsigned regno = REGNO (reg);
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
/* Gets definition of REG reaching its use in INSN and stores it to DEF. */
static enum iv_grd_result
-iv_get_reaching_def (rtx insn, rtx reg, df_ref *def)
+iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def)
{
df_ref use, adef;
basic_block def_bb, use_bb;
- rtx def_insn;
+ rtx_insn *def_insn;
bool dom_p;
*def = NULL;
consistency with other iv manipulation functions that may fail). */
static bool
-iv_constant (struct rtx_iv *iv, rtx cst, enum machine_mode mode)
+iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst)
{
- if (mode == VOIDmode)
- mode = GET_MODE (cst);
-
iv->mode = mode;
iv->base = cst;
iv->step = const0_rtx;
/* Evaluates application of subreg to MODE on IV. */
static bool
-iv_subreg (struct rtx_iv *iv, enum machine_mode mode)
+iv_subreg (class rtx_iv *iv, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
&& !iv->first_special)
{
rtx val = get_iv_value (iv, const0_rtx);
- val = lowpart_subreg (mode, val, iv->extend_mode);
+ val = lowpart_subreg (mode, val,
+ iv->extend == IV_UNKNOWN_EXTEND
+ ? iv->mode : iv->extend_mode);
iv->base = val;
iv->extend = IV_UNKNOWN_EXTEND;
/* Evaluates application of EXTEND to MODE on IV. */
static bool
-iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, enum machine_mode mode)
+iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
&& !iv->first_special)
{
rtx val = get_iv_value (iv, const0_rtx);
+ if (iv->extend_mode != iv->mode
+ && iv->extend != IV_UNKNOWN_EXTEND
+ && iv->extend != extend)
+ val = lowpart_subreg (iv->mode, val, iv->extend_mode);
val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode,
- val, iv->extend_mode);
+ val,
+ iv->extend == extend
+ ? iv->extend_mode : iv->mode);
iv->base = val;
iv->extend = IV_UNKNOWN_EXTEND;
iv->mode = iv->extend_mode = mode;
/* Evaluates negation of IV. */
static bool
-iv_neg (struct rtx_iv *iv)
+iv_neg (class rtx_iv *iv)
{
if (iv->extend == IV_UNKNOWN_EXTEND)
{
/* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
static bool
-iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op)
+iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op)
{
- enum machine_mode mode;
+ scalar_int_mode mode;
rtx arg;
/* Extend the constant to extend_mode of the other operand if necessary. */
/* Evaluates multiplication of IV by constant CST. */
static bool
-iv_mult (struct rtx_iv *iv, rtx mby)
+iv_mult (class rtx_iv *iv, rtx mby)
{
- enum machine_mode mode = iv->extend_mode;
+ scalar_int_mode mode = iv->extend_mode;
if (GET_MODE (mby) != VOIDmode
&& GET_MODE (mby) != mode)
/* Evaluates shift of IV by constant CST. */
static bool
-iv_shift (struct rtx_iv *iv, rtx mby)
+iv_shift (class rtx_iv *iv, rtx mby)
{
- enum machine_mode mode = iv->extend_mode;
+ scalar_int_mode mode = iv->extend_mode;
if (GET_MODE (mby) != VOIDmode
&& GET_MODE (mby) != mode)
at get_biv_step. */
static bool
-get_biv_step_1 (df_ref def, rtx reg,
- rtx *inner_step, enum machine_mode *inner_mode,
- enum iv_extend_code *extend, enum machine_mode outer_mode,
+get_biv_step_1 (df_ref def, scalar_int_mode outer_mode, rtx reg,
+ rtx *inner_step, scalar_int_mode *inner_mode,
+ enum iv_extend_code *extend,
rtx *outer_step)
{
rtx set, rhs, op0 = NULL_RTX, op1 = NULL_RTX;
- rtx next, nextr, tmp;
+ rtx next, nextr;
enum rtx_code code;
- rtx insn = DF_REF_INSN (def);
+ rtx_insn *insn = DF_REF_INSN (def);
df_ref next_def;
enum iv_grd_result res;
op1 = XEXP (rhs, 1);
if (code == PLUS && CONSTANT_P (op0))
- {
- tmp = op0; op0 = op1; op1 = tmp;
- }
+ std::swap (op0, op1);
if (!simple_reg_p (op0)
|| !CONSTANT_P (op1))
*inner_mode = outer_mode;
*outer_step = const0_rtx;
}
- else if (!get_biv_step_1 (next_def, reg,
- inner_step, inner_mode, extend, outer_mode,
+ else if (!get_biv_step_1 (next_def, outer_mode, reg,
+ inner_step, inner_mode, extend,
outer_step))
return false;
if (GET_CODE (next) == SUBREG)
{
- enum machine_mode amode = GET_MODE (next);
-
- if (GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
+ scalar_int_mode amode;
+ if (!is_a <scalar_int_mode> (GET_MODE (next), &amode)
+ || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
return false;
*inner_mode = amode;
LAST_DEF is the definition of REG that dominates loop latch. */
static bool
-get_biv_step (df_ref last_def, rtx reg, rtx *inner_step,
- enum machine_mode *inner_mode, enum iv_extend_code *extend,
- enum machine_mode *outer_mode, rtx *outer_step)
+get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg,
+ rtx *inner_step, scalar_int_mode *inner_mode,
+ enum iv_extend_code *extend, rtx *outer_step)
{
- *outer_mode = GET_MODE (reg);
-
- if (!get_biv_step_1 (last_def, reg,
- inner_step, inner_mode, extend, *outer_mode,
+ if (!get_biv_step_1 (last_def, outer_mode, reg,
+ inner_step, inner_mode, extend,
outer_step))
return false;
- gcc_assert ((*inner_mode == *outer_mode) != (*extend != IV_UNKNOWN_EXTEND));
- gcc_assert (*inner_mode != *outer_mode || *outer_step == const0_rtx);
+ gcc_assert ((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND));
+ gcc_assert (*inner_mode != outer_mode || *outer_step == const0_rtx);
return true;
}
/* Records information that DEF is induction variable IV. */
static void
-record_iv (df_ref def, struct rtx_iv *iv)
+record_iv (df_ref def, class rtx_iv *iv)
{
- struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
+ class rtx_iv *recorded_iv = XNEW (class rtx_iv);
*recorded_iv = *iv;
check_iv_ref_table_size ();
IV and return true. Otherwise return false. */
static bool
-analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
+analyzed_for_bivness_p (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv =
- (struct biv_entry *) htab_find_with_hash (bivs, def, REGNO (def));
+ class biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
if (!biv)
return false;
}
static void
-record_biv (rtx def, struct rtx_iv *iv)
+record_biv (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv = XNEW (struct biv_entry);
- void **slot = htab_find_slot_with_hash (bivs, def, REGNO (def), INSERT);
+ class biv_entry *biv = XNEW (class biv_entry);
+ biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def), INSERT);
biv->regno = REGNO (def);
biv->iv = *iv;
}
/* Determines whether DEF is a biv and if so, stores its description
- to *IV. */
+ to *IV. OUTER_MODE is the mode of DEF. */
static bool
-iv_analyze_biv (rtx def, struct rtx_iv *iv)
+iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv)
{
rtx inner_step, outer_step;
- enum machine_mode inner_mode, outer_mode;
+ scalar_int_mode inner_mode;
enum iv_extend_code extend;
df_ref last_def;
if (!CONSTANT_P (def))
return false;
- return iv_constant (iv, def, VOIDmode);
+ return iv_constant (iv, outer_mode, def);
}
if (!latch_dominating_def (def, &last_def))
}
if (!last_def)
- return iv_constant (iv, def, VOIDmode);
+ return iv_constant (iv, outer_mode, def);
if (analyzed_for_bivness_p (def, iv))
{
return iv->base != NULL_RTX;
}
- if (!get_biv_step (last_def, def, &inner_step, &inner_mode, &extend,
- &outer_mode, &outer_step))
+ if (!get_biv_step (last_def, outer_mode, def, &inner_step, &inner_mode,
+ &extend, &outer_step))
{
iv->base = NULL_RTX;
goto end;
The mode of the induction variable is MODE. */
bool
-iv_analyze_expr (rtx insn, rtx rhs, enum machine_mode mode, struct rtx_iv *iv)
+iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs,
+ class rtx_iv *iv)
{
- rtx mby = NULL_RTX, tmp;
+ rtx mby = NULL_RTX;
rtx op0 = NULL_RTX, op1 = NULL_RTX;
- struct rtx_iv iv0, iv1;
+ class rtx_iv iv0, iv1;
enum rtx_code code = GET_CODE (rhs);
- enum machine_mode omode = mode;
+ scalar_int_mode omode = mode;
- iv->mode = VOIDmode;
iv->base = NULL_RTX;
iv->step = NULL_RTX;
if (CONSTANT_P (rhs)
|| REG_P (rhs)
|| code == SUBREG)
- {
- if (!iv_analyze_op (insn, rhs, iv))
- return false;
-
- if (iv->mode == VOIDmode)
- {
- iv->mode = mode;
- iv->extend_mode = mode;
- }
-
- return true;
- }
+ return iv_analyze_op (insn, mode, rhs, iv);
switch (code)
{
case ZERO_EXTEND:
case NEG:
op0 = XEXP (rhs, 0);
- omode = GET_MODE (op0);
+ /* We don't know how many bits there are in a sign-extended constant. */
+ if (!is_a <scalar_int_mode> (GET_MODE (op0), &omode))
+ return false;
break;
case PLUS:
op0 = XEXP (rhs, 0);
mby = XEXP (rhs, 1);
if (!CONSTANT_P (mby))
- {
- tmp = op0;
- op0 = mby;
- mby = tmp;
- }
+ std::swap (op0, mby);
if (!CONSTANT_P (mby))
return false;
break;
}
if (op0
- && !iv_analyze_expr (insn, op0, omode, &iv0))
+ && !iv_analyze_expr (insn, omode, op0, &iv0))
return false;
if (op1
- && !iv_analyze_expr (insn, op1, omode, &iv1))
+ && !iv_analyze_expr (insn, omode, op1, &iv1))
return false;
switch (code)
/* Analyzes iv DEF and stores the result to *IV. */
static bool
-iv_analyze_def (df_ref def, struct rtx_iv *iv)
+iv_analyze_def (df_ref def, class rtx_iv *iv)
{
- rtx insn = DF_REF_INSN (def);
+ rtx_insn *insn = DF_REF_INSN (def);
rtx reg = DF_REF_REG (def);
rtx set, rhs;
return iv->base != NULL_RTX;
}
- iv->mode = VOIDmode;
iv->base = NULL_RTX;
iv->step = NULL_RTX;
- if (!REG_P (reg))
+ scalar_int_mode mode;
+ if (!REG_P (reg) || !is_a <scalar_int_mode> (GET_MODE (reg), &mode))
return false;
set = single_set (insn);
else
rhs = SET_SRC (set);
- iv_analyze_expr (insn, rhs, GET_MODE (reg), iv);
+ iv_analyze_expr (insn, mode, rhs, iv);
record_iv (def, iv);
if (dump_file)
return iv->base != NULL_RTX;
}
-/* Analyzes operand OP of INSN and stores the result to *IV. */
+/* Analyzes operand OP of INSN and stores the result to *IV. MODE is the
+ mode of OP. */
static bool
-iv_analyze_op (rtx insn, rtx op, struct rtx_iv *iv)
+iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv)
{
df_ref def = NULL;
enum iv_grd_result res;
res = GRD_INVARIANT;
else if (GET_CODE (op) == SUBREG)
{
- if (!subreg_lowpart_p (op))
+ scalar_int_mode inner_mode;
+ if (!subreg_lowpart_p (op)
+ || !is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &inner_mode))
return false;
- if (!iv_analyze_op (insn, SUBREG_REG (op), iv))
+ if (!iv_analyze_op (insn, inner_mode, SUBREG_REG (op), iv))
return false;
- return iv_subreg (iv, GET_MODE (op));
+ return iv_subreg (iv, mode);
}
else
{
if (res == GRD_INVARIANT)
{
- iv_constant (iv, op, VOIDmode);
+ iv_constant (iv, mode, op);
if (dump_file)
{
}
if (res == GRD_MAYBE_BIV)
- return iv_analyze_biv (op, iv);
+ return iv_analyze_biv (mode, op, iv);
return iv_analyze_def (def, iv);
}
-/* Analyzes value VAL at INSN and stores the result to *IV. */
+/* Analyzes value VAL at INSN and stores the result to *IV. MODE is the
+ mode of VAL. */
bool
-iv_analyze (rtx insn, rtx val, struct rtx_iv *iv)
+iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv)
{
rtx reg;
insn = NEXT_INSN (insn);
}
- return iv_analyze_op (insn, val, iv);
+ return iv_analyze_op (insn, mode, val, iv);
}
/* Analyzes definition of DEF in INSN and stores the result to IV. */
bool
-iv_analyze_result (rtx insn, rtx def, struct rtx_iv *iv)
+iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv)
{
df_ref adef;
}
/* Checks whether definition of register REG in INSN is a basic induction
- variable. IV analysis must have been initialized (via a call to
+ variable. MODE is the mode of REG.
+
+ IV analysis must have been initialized (via a call to
iv_analysis_loop_init) for this function to produce a result. */
bool
-biv_p (rtx insn, rtx reg)
+biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg)
{
- struct rtx_iv iv;
+ class rtx_iv iv;
df_ref def, last_def;
if (!simple_reg_p (reg))
if (last_def != def)
return false;
- if (!iv_analyze_biv (reg, &iv))
+ if (!iv_analyze_biv (mode, reg, &iv))
return false;
return iv.step != const0_rtx;
/* Calculates value of IV at ITERATION-th iteration. */
rtx
-get_iv_value (struct rtx_iv *iv, rtx iteration)
+get_iv_value (class rtx_iv *iv, rtx iteration)
{
rtx val;
clear_iv_info ();
clean_slate = true;
df_finish_pass (true);
- htab_delete (bivs);
+ delete bivs;
+ bivs = NULL;
free (iv_ref_table);
iv_ref_table = NULL;
iv_ref_table_size = 0;
- bivs = NULL;
}
}
/* Computes inverse to X modulo (1 << MOD). */
-static unsigned HOST_WIDEST_INT
-inverse (unsigned HOST_WIDEST_INT x, int mod)
+static uint64_t
+inverse (uint64_t x, int mod)
{
- unsigned HOST_WIDEST_INT mask =
- ((unsigned HOST_WIDEST_INT) 1 << (mod - 1) << 1) - 1;
- unsigned HOST_WIDEST_INT rslt = 1;
+ uint64_t mask =
+ ((uint64_t) 1 << (mod - 1) << 1) - 1;
+ uint64_t rslt = 1;
int i;
for (i = 0; i < mod - 1; i++)
return rslt;
}
-/* Checks whether register *REG is in set ALT. Callback for for_each_rtx. */
+/* Checks whether any register in X is in set ALT. */
-static int
-altered_reg_used (rtx *reg, void *alt)
+static bool
+altered_reg_used (const_rtx x, bitmap alt)
{
- if (!REG_P (*reg))
- return 0;
-
- return REGNO_REG_SET_P ((bitmap) alt, REGNO (*reg));
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, x, NONCONST)
+ {
+ const_rtx x = *iter;
+ if (REG_P (x) && REGNO_REG_SET_P (alt, REGNO (x)))
+ return true;
+ }
+ return false;
}
/* Marks registers altered by EXPR in set ALT. */
}
}
-/* If REG has a single definition, replace it with its known value in EXPR.
- Callback for for_each_rtx. */
+/* If REGNO has a single definition, return its known value, otherwise return
+ null. */
-static int
-replace_single_def_regs (rtx *reg, void *expr1)
+static rtx
+find_single_def_src (unsigned int regno)
{
- unsigned regno;
- df_ref adef;
- rtx set, src;
- rtx *expr = (rtx *)expr1;
+ rtx src = NULL_RTX;
- if (!REG_P (*reg))
- return 0;
-
- regno = REGNO (*reg);
- for (;;)
+ /* Don't look through unbounded number of single definition REG copies,
+ there might be loops for sources with uninitialized variables. */
+ for (int cnt = 0; cnt < 128; cnt++)
{
- rtx note;
- adef = DF_REG_DEF_CHAIN (regno);
+ df_ref adef = DF_REG_DEF_CHAIN (regno);
if (adef == NULL || DF_REF_NEXT_REG (adef) != NULL
- || DF_REF_IS_ARTIFICIAL (adef))
- return -1;
+ || DF_REF_IS_ARTIFICIAL (adef))
+ return NULL_RTX;
- set = single_set (DF_REF_INSN (adef));
+ rtx set = single_set (DF_REF_INSN (adef));
if (set == NULL || !REG_P (SET_DEST (set))
|| REGNO (SET_DEST (set)) != regno)
- return -1;
-
- note = find_reg_equal_equiv_note (DF_REF_INSN (adef));
+ return NULL_RTX;
+ rtx note = find_reg_equal_equiv_note (DF_REF_INSN (adef));
if (note && function_invariant_p (XEXP (note, 0)))
{
src = XEXP (note, 0);
break;
}
if (!function_invariant_p (src))
- return -1;
+ return NULL_RTX;
- *expr = simplify_replace_rtx (*expr, *reg, src);
- return 1;
+ return src;
+}
+
+/* If any registers in *EXPR that have a single definition, try to replace
+ them with the known-equivalent values. */
+
+static void
+replace_single_def_regs (rtx *expr)
+{
+ subrtx_var_iterator::array_type array;
+ repeat:
+ FOR_EACH_SUBRTX_VAR (iter, array, *expr, NONCONST)
+ {
+ rtx x = *iter;
+ if (REG_P (x))
+ if (rtx new_x = find_single_def_src (REGNO (x)))
+ {
+ *expr = simplify_replace_rtx (*expr, x, new_x);
+ goto repeat;
+ }
+ }
}
/* A subroutine of simplify_using_initial_values, this function examines INSN
the set; return false otherwise. */
static bool
-suitable_set_for_replacement (rtx insn, rtx *dest, rtx *src)
+suitable_set_for_replacement (rtx_insn *insn, rtx *dest, rtx *src)
{
rtx set = single_set (insn);
rtx lhs = NULL_RTX, rhs;
*expr = simplify_replace_rtx (*expr, dest, src);
if (old == *expr)
return;
- while (for_each_rtx (expr, replace_single_def_regs, expr) != 0)
- continue;
+ replace_single_def_regs (expr);
}
/* Checks whether A implies B. */
static bool
implies_p (rtx a, rtx b)
{
- rtx op0, op1, opb0, opb1, r;
- enum machine_mode mode;
+ rtx op0, op1, opb0, opb1;
+ machine_mode mode;
+
+ if (rtx_equal_p (a, b))
+ return true;
if (GET_CODE (a) == EQ)
{
op0 = XEXP (a, 0);
op1 = XEXP (a, 1);
- if (REG_P (op0))
+ if (REG_P (op0)
+ || (GET_CODE (op0) == SUBREG
+ && REG_P (SUBREG_REG (op0))))
{
- r = simplify_replace_rtx (b, op0, op1);
+ rtx r = simplify_replace_rtx (b, op0, op1);
if (r == const_true_rtx)
return true;
}
- if (REG_P (op1))
+ if (REG_P (op1)
+ || (GET_CODE (op1) == SUBREG
+ && REG_P (SUBREG_REG (op1))))
{
- r = simplify_replace_rtx (b, op1, op0);
+ rtx r = simplify_replace_rtx (b, op1, op0);
if (r == const_true_rtx)
return true;
}
{
if (GET_CODE (a) == GT)
- {
- r = op0;
- op0 = op1;
- op1 = r;
- }
+ std::swap (op0, op1);
if (GET_CODE (b) == GE)
- {
- r = opb0;
- opb0 = opb1;
- opb1 = r;
- }
+ std::swap (opb0, opb1);
if (SCALAR_INT_MODE_P (mode)
&& rtx_equal_p (op1, opb1)
&& CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
- != ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
+ != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
&& rtx_equal_p (XEXP (opb0, 0), op0))
return INTVAL (op1) == -INTVAL (XEXP (opb0, 1));
if (GET_CODE (b) == GEU
&& CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
- != ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
+ != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
&& rtx_equal_p (XEXP (opb0, 0), op0))
return INTVAL (op1) == -INTVAL (XEXP (opb0, 1));
}
rtx
canon_condition (rtx cond)
{
- rtx tem;
rtx op0, op1;
enum rtx_code code;
- enum machine_mode mode;
+ machine_mode mode;
code = GET_CODE (cond);
op0 = XEXP (cond, 0);
if (swap_commutative_operands_p (op0, op1))
{
code = swap_condition (code);
- tem = op0;
- op0 = op1;
- op1 = tem;
+ std::swap (op0, op1);
}
mode = GET_MODE (op0);
mode = GET_MODE (op1);
gcc_assert (mode != VOIDmode);
- if (CONST_INT_P (op1)
- && GET_MODE_CLASS (mode) != MODE_CC
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (CONST_SCALAR_INT_P (op1) && GET_MODE_CLASS (mode) != MODE_CC)
{
- HOST_WIDE_INT const_val = INTVAL (op1);
- unsigned HOST_WIDE_INT uconst_val = const_val;
- unsigned HOST_WIDE_INT max_val
- = (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode);
+ rtx_mode_t const_val (op1, mode);
switch (code)
{
case LE:
- if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
- code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
+ if (wi::ne_p (const_val, wi::max_value (mode, SIGNED)))
+ {
+ code = LT;
+ op1 = immed_wide_int_const (wi::add (const_val, 1), mode);
+ }
break;
- /* When cross-compiling, const_val might be sign-extended from
- BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
case GE:
- if ((HOST_WIDE_INT) (const_val & max_val)
- != (((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
- code = GT, op1 = gen_int_mode (const_val - 1, mode);
+ if (wi::ne_p (const_val, wi::min_value (mode, SIGNED)))
+ {
+ code = GT;
+ op1 = immed_wide_int_const (wi::sub (const_val, 1), mode);
+ }
break;
case LEU:
- if (uconst_val < max_val)
- code = LTU, op1 = gen_int_mode (uconst_val + 1, mode);
+ if (wi::ne_p (const_val, -1))
+ {
+ code = LTU;
+ op1 = immed_wide_int_const (wi::add (const_val, 1), mode);
+ }
break;
case GEU:
- if (uconst_val != 0)
- code = GTU, op1 = gen_int_mode (uconst_val - 1, mode);
+ if (wi::ne_p (const_val, 0))
+ {
+ code = GTU;
+ op1 = immed_wide_int_const (wi::sub (const_val, 1), mode);
+ }
break;
default:
return cond;
}
+/* Reverses CONDition; returns NULL if we cannot. */
+
+static rtx
+reversed_condition (rtx cond)
+{
+ enum rtx_code reversed;
+ reversed = reversed_comparison_code (cond, NULL);
+ if (reversed == UNKNOWN)
+ return NULL_RTX;
+ else
+ return gen_rtx_fmt_ee (reversed,
+ GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+}
+
/* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the
set of altered regs. */
/* If some register gets altered later, we do not really speak about its
value at the time of comparison. */
- if (altered
- && for_each_rtx (&cond, altered_reg_used, altered))
+ if (altered && altered_reg_used (cond, altered))
return;
if (GET_CODE (cond) == EQ
is a list, its elements are assumed to be combined using OP. */
static void
-simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
+simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr)
{
bool expression_valid;
- rtx head, tail, insn, cond_list, last_valid_expr;
+ rtx head, tail, last_valid_expr;
+ rtx_expr_list *cond_list;
+ rtx_insn *insn;
rtx neutral, aggr;
regset altered, this_altered;
edge e;
gcc_assert (op == UNKNOWN);
- for (;;)
- if (for_each_rtx (expr, replace_single_def_regs, expr) == 0)
- break;
+ replace_single_def_regs (expr);
if (CONSTANT_P (*expr))
return;
e = loop_preheader_edge (loop);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
altered = ALLOC_REG_SET (®_obstack);
expression_valid = true;
last_valid_expr = *expr;
- cond_list = NULL_RTX;
+ cond_list = NULL;
while (1)
{
insn = BB_END (e->src);
continue;
CLEAR_REG_SET (this_altered);
- note_stores (PATTERN (insn), mark_altered, this_altered);
+ note_stores (insn, mark_altered, this_altered);
if (CALL_P (insn))
{
- int i;
-
- /* Kill all call clobbered registers. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
- SET_REGNO_REG_SET (this_altered, i);
+ /* Kill all registers that might be clobbered by the call.
+ We don't track modes of hard registers, so we need to be
+ conservative and assume that partial kills are full kills. */
+ function_abi callee_abi = insn_callee_abi (insn);
+ IOR_REG_SET_HRS (this_altered,
+ callee_abi.full_and_partial_reg_clobbers ());
}
if (suitable_set_for_replacement (insn, &dest, &src))
{
- rtx *pnote, *pnote_next;
+ rtx_expr_list **pnote, **pnote_next;
replace_in_expr (expr, dest, src);
if (CONSTANT_P (*expr))
for (pnote = &cond_list; *pnote; pnote = pnote_next)
{
- rtx note = *pnote;
+ rtx_expr_list *note = *pnote;
rtx old_cond = XEXP (note, 0);
- pnote_next = &XEXP (note, 1);
+ pnote_next = (rtx_expr_list **)&XEXP (note, 1);
replace_in_expr (&XEXP (note, 0), dest, src);
/* We can no longer use a condition that has been simplified
}
}
else
- /* If we did not use this insn to make a replacement, any overlap
- between stores in this insn and our expression will cause the
- expression to become invalid. */
- if (for_each_rtx (expr, altered_reg_used, this_altered))
- goto out;
+ {
+ rtx_expr_list **pnote, **pnote_next;
+
+ /* If we did not use this insn to make a replacement, any overlap
+ between stores in this insn and our expression will cause the
+ expression to become invalid. */
+ if (altered_reg_used (*expr, this_altered))
+ goto out;
+
+ /* Likewise for the conditions. */
+ for (pnote = &cond_list; *pnote; pnote = pnote_next)
+ {
+ rtx_expr_list *note = *pnote;
+ rtx old_cond = XEXP (note, 0);
+
+ pnote_next = (rtx_expr_list **)&XEXP (note, 1);
+ if (altered_reg_used (old_cond, this_altered))
+ {
+ *pnote = *pnote_next;
+ pnote_next = pnote;
+ free_EXPR_LIST_node (note);
+ }
+ }
+ }
if (CONSTANT_P (*expr))
goto out;
can't return it to the caller. However, it is still valid for
further simplification, so keep searching to see if we can
eventually turn it into a constant. */
- if (for_each_rtx (expr, altered_reg_used, altered))
+ if (altered_reg_used (*expr, altered))
expression_valid = false;
if (expression_valid)
last_valid_expr = *expr;
}
if (!single_pred_p (e->src)
- || single_pred (e->src) == ENTRY_BLOCK_PTR)
+ || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
e = single_pred_edge (e->src);
}
is SIGNED_P to DESC. */
static void
-shorten_into_mode (struct rtx_iv *iv, enum machine_mode mode,
- enum rtx_code cond, bool signed_p, struct niter_desc *desc)
+shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode,
+ enum rtx_code cond, bool signed_p, class niter_desc *desc)
{
rtx mmin, mmax, cond_over, cond_under;
some assumptions to DESC). */
static bool
-canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
- enum rtx_code cond, struct niter_desc *desc)
+canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1,
+ enum rtx_code cond, class niter_desc *desc)
{
- enum machine_mode comp_mode;
+ scalar_int_mode comp_mode;
bool signed_p;
/* If the ivs behave specially in the first iteration, or are
a number of fields in DESC already filled in. OLD_NITER is the original
expression for the number of iterations, before we tried to simplify it. */
-static unsigned HOST_WIDEST_INT
-determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
+static uint64_t
+determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
- unsigned HOST_WIDEST_INT nmax, inc;
+ uint64_t nmax, inc;
+ uint64_t andmax = 0;
+
+ /* We used to look for constant operand 0 of AND,
+ but canonicalization should always make this impossible. */
+ gcc_checking_assert (GET_CODE (niter) != AND
+ || !CONST_INT_P (XEXP (niter, 0)));
if (GET_CODE (niter) == AND
- && CONST_INT_P (XEXP (niter, 0)))
+ && CONST_INT_P (XEXP (niter, 1)))
{
- nmax = INTVAL (XEXP (niter, 0));
- if (!(nmax & (nmax + 1)))
- return nmax;
+ andmax = UINTVAL (XEXP (niter, 1));
+ niter = XEXP (niter, 0);
}
get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax);
- nmax = INTVAL (mmax) - INTVAL (mmin);
+ nmax = UINTVAL (mmax) - UINTVAL (mmin);
if (GET_CODE (niter) == UDIV)
{
if (dump_file)
fprintf (dump_file, ";; improved upper bound by one.\n");
}
- return nmax / inc;
+ nmax /= inc;
+ if (andmax)
+ nmax = MIN (nmax, andmax);
+ if (dump_file)
+ fprintf (dump_file, ";; Determined upper bound %" PRId64".\n",
+ nmax);
+ return nmax;
}
/* Computes number of iterations of the CONDITION in INSN in LOOP and stores
(basically its rtl version), complicated by things like subregs. */
static void
-iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
- struct niter_desc *desc)
+iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition,
+ class niter_desc *desc)
{
rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1;
- struct rtx_iv iv0, iv1, tmp_iv;
+ class rtx_iv iv0, iv1;
rtx assumption, may_not_xform;
enum rtx_code cond;
- enum machine_mode mode, comp_mode;
+ machine_mode nonvoid_mode;
+ scalar_int_mode comp_mode;
rtx mmin, mmax, mode_mmin, mode_mmax;
- unsigned HOST_WIDEST_INT s, size, d, inv, max;
- HOST_WIDEST_INT up, down, inc, step_val;
+ uint64_t s, size, d, inv, max, up, down;
+ int64_t inc, step_val;
int was_sharp = false;
rtx old_niter;
bool step_is_pow2;
desc->const_iter = false;
desc->niter_expr = NULL_RTX;
- desc->niter_max = 0;
- if (loop->any_upper_bound
- && loop->nb_iterations_upper_bound.fits_uhwi ())
- desc->niter_max = loop->nb_iterations_upper_bound.low;
cond = GET_CODE (condition);
gcc_assert (COMPARISON_P (condition));
- mode = GET_MODE (XEXP (condition, 0));
- if (mode == VOIDmode)
- mode = GET_MODE (XEXP (condition, 1));
+ nonvoid_mode = GET_MODE (XEXP (condition, 0));
+ if (nonvoid_mode == VOIDmode)
+ nonvoid_mode = GET_MODE (XEXP (condition, 1));
/* The constant comparisons should be folded. */
- gcc_assert (mode != VOIDmode);
+ gcc_assert (nonvoid_mode != VOIDmode);
/* We only handle integers or pointers. */
- if (GET_MODE_CLASS (mode) != MODE_INT
- && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ scalar_int_mode mode;
+ if (!is_a <scalar_int_mode> (nonvoid_mode, &mode))
goto fail;
op0 = XEXP (condition, 0);
- if (!iv_analyze (insn, op0, &iv0))
+ if (!iv_analyze (insn, mode, op0, &iv0))
goto fail;
- if (iv0.extend_mode == VOIDmode)
- iv0.mode = iv0.extend_mode = mode;
op1 = XEXP (condition, 1);
- if (!iv_analyze (insn, op1, &iv1))
+ if (!iv_analyze (insn, mode, op1, &iv1))
goto fail;
- if (iv1.extend_mode == VOIDmode)
- iv1.mode = iv1.extend_mode = mode;
if (GET_MODE_BITSIZE (iv0.extend_mode) > HOST_BITS_PER_WIDE_INT
|| GET_MODE_BITSIZE (iv1.extend_mode) > HOST_BITS_PER_WIDE_INT)
case GT:
case GEU:
case GTU:
- tmp_iv = iv0; iv0 = iv1; iv1 = tmp_iv;
+ std::swap (iv0, iv1);
cond = swap_condition (cond);
break;
case NE:
comp_mode = iv0.extend_mode;
mode = iv0.mode;
- size = GET_MODE_BITSIZE (mode);
+ size = GET_MODE_PRECISION (mode);
get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax);
mode_mmin = lowpart_subreg (mode, mmin, comp_mode);
mode_mmax = lowpart_subreg (mode, mmax, comp_mode);
iv1.step = const0_rtx;
}
+ iv0.step = lowpart_subreg (mode, iv0.step, comp_mode);
+ iv1.step = lowpart_subreg (mode, iv1.step, comp_mode);
+
/* This is either infinite loop or the one that ends immediately, depending
on initial values. Unswitching should remove this kind of conditions. */
if (iv0.step == const0_rtx && iv1.step == const0_rtx)
step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode);
else
step = iv0.step;
+ step = lowpart_subreg (mode, step, comp_mode);
delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base);
delta = lowpart_subreg (mode, delta, comp_mode);
delta = simplify_gen_binary (UMOD, mode, delta, step);
? iv0.base
: mode_mmin);
max = (up - down) / inc + 1;
- if (!desc->niter_max
- || max < desc->niter_max)
- desc->niter_max = max;
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, max, false, true);
if (iv0.step == const0_rtx)
{
iv1.step = const0_rtx;
if (INTVAL (iv0.step) < 0)
{
- iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, mode);
- iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, mode);
+ iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode);
+ iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode);
}
iv0.step = lowpart_subreg (mode, iv0.step, comp_mode);
d *= 2;
size--;
}
- bound = GEN_INT (((unsigned HOST_WIDEST_INT) 1 << (size - 1 ) << 1) - 1);
+ bound = GEN_INT (((uint64_t) 1 << (size - 1 ) << 1) - 1);
tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
- tmp = simplify_gen_binary (UMOD, mode, tmp1, GEN_INT (d));
+ tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode));
assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx);
desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite);
- tmp = simplify_gen_binary (UDIV, mode, tmp1, GEN_INT (d));
+ tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode));
inv = inverse (s, size);
tmp = simplify_gen_binary (MULT, mode, tmp, gen_int_mode (inv, mode));
desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound);
if (CONST_INT_P (desc->niter_expr))
{
- unsigned HOST_WIDEST_INT val = INTVAL (desc->niter_expr);
+ uint64_t val = INTVAL (desc->niter_expr);
desc->const_iter = true;
- desc->niter_max = desc->niter = val & GET_MODE_MASK (desc->mode);
+ desc->niter = val & GET_MODE_MASK (desc->mode);
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, desc->niter, false, true);
}
else
{
max = determine_max_iter (loop, desc, old_niter);
- if (!desc->niter_max
- || max < desc->niter_max)
- desc->niter_max = max;
+ if (!max)
+ goto zero_iter_simplify;
+ if (!desc->infinite
+ && !desc->assumptions)
+ record_niter_bound (loop, max, false, true);
/* simplify_using_initial_values does a copy propagation on the registers
in the expression for the number of iterations. This prolongs life
zero_iter:
desc->const_iter = true;
desc->niter = 0;
- desc->niter_max = 0;
+ record_niter_bound (loop, 0, true, true);
desc->noloop_assumptions = NULL_RTX;
desc->niter_expr = const0_rtx;
return;
into DESC. */
static void
-check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc)
+check_simple_exit (class loop *loop, edge e, class niter_desc *desc)
{
basic_block exit_bb;
- rtx condition, at;
+ rtx condition;
+ rtx_insn *at;
edge ein;
exit_bb = e->src;
/* Finds a simple exit of LOOP and stores its description into DESC. */
-void
-find_simple_exit (struct loop *loop, struct niter_desc *desc)
+static void
+find_simple_exit (class loop *loop, class niter_desc *desc)
{
unsigned i;
basic_block *body;
edge e;
- struct niter_desc act;
+ class niter_desc act;
bool any = false;
edge_iterator ei;
print_rtl (dump_file, desc->niter_expr);
fprintf (dump_file, "\n");
- fprintf (dump_file, " upper bound: ");
- fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter_max);
- fprintf (dump_file, "\n");
+ fprintf (dump_file, " upper bound: %li\n",
+ (long)get_max_loop_iterations_int (loop));
+ fprintf (dump_file, " likely upper bound: %li\n",
+ (long)get_likely_max_loop_iterations_int (loop));
+ fprintf (dump_file, " realistic bound: %li\n",
+ (long)get_estimated_loop_iterations_int (loop));
}
else
fprintf (dump_file, "Loop %d is not simple.\n", loop->num);
}
+ /* Fix up the finiteness if possible. We can only do it for single exit,
+ since the loop is finite, but it's possible that we predicate one loop
+ exit to be finite which can not be determined as finite in middle-end as
+ well. It results in incorrect predicate information on the exit condition
+ expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite,
+ it means _1 can exactly divide -8. */
+ if (desc->infinite && single_exit (loop) && finite_loop_p (loop))
+ {
+ desc->infinite = NULL_RTX;
+ if (dump_file)
+ fprintf (dump_file, " infinite updated to finite.\n");
+ }
+
free (body);
}
/* Creates a simple loop description of LOOP if it was not computed
already. */
-struct niter_desc *
-get_simple_loop_desc (struct loop *loop)
+class niter_desc *
+get_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (desc)
return desc;
/* At least desc->infinite is not always initialized by
find_simple_loop_exit. */
- desc = XCNEW (struct niter_desc);
+ desc = ggc_cleared_alloc<niter_desc> ();
iv_analysis_loop_init (loop);
find_simple_exit (loop, desc);
- loop->aux = desc;
-
- if (desc->simple_p && (desc->assumptions || desc->infinite))
- {
- const char *wording;
-
- /* Assume that no overflow happens and that the loop is finite.
- We already warned at the tree level if we ran optimizations there. */
- if (!flag_tree_loop_optimize && warn_unsafe_loop_optimizations)
- {
- if (desc->infinite)
- {
- wording =
- flag_unsafe_loop_optimizations
- ? N_("assuming that the loop is not infinite")
- : N_("cannot optimize possibly infinite loops");
- warning (OPT_Wunsafe_loop_optimizations, "%s",
- gettext (wording));
- }
- if (desc->assumptions)
- {
- wording =
- flag_unsafe_loop_optimizations
- ? N_("assuming that the loop counter does not overflow")
- : N_("cannot optimize loop, the loop counter may overflow");
- warning (OPT_Wunsafe_loop_optimizations, "%s",
- gettext (wording));
- }
- }
-
- if (flag_unsafe_loop_optimizations)
- {
- desc->assumptions = NULL_RTX;
- desc->infinite = NULL_RTX;
- }
- }
-
+ loop->simple_loop_desc = desc;
return desc;
}
/* Releases simple loop description for LOOP. */
void
-free_simple_loop_desc (struct loop *loop)
+free_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (!desc)
return;
- free (desc);
- loop->aux = NULL;
+ ggc_free (desc);
+ loop->simple_loop_desc = NULL;
}