/* Induction variable optimizations.
- Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
+#include "pointer-set.h"
+#include "hash-table.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-cfg.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "tree-ssa-loop-ivopts.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-ssa-loop-niter.h"
+#include "tree-ssa-loop.h"
+#include "expr.h"
+#include "tree-dfa.h"
#include "tree-ssa.h"
#include "cfgloop.h"
#include "tree-pass.h"
-#include "ggc.h"
#include "insn-config.h"
-#include "pointer-set.h"
-#include "hash-table.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "cfgloop.h"
#include "tree-inline.h"
#include "tree-ssa-propagate.h"
#include "expmed.h"
+#include "tree-ssa-address.h"
/* FIXME: Expressions are expanded to RTL in this pass to determine the
cost of different addressing modes. This should be moved to a TBD
/* Dumps information about the induction variable IV to FILE. */
-extern void dump_iv (FILE *, struct iv *);
void
dump_iv (FILE *file, struct iv *iv)
{
/* Dumps information about the USE to FILE. */
-extern void dump_use (FILE *, struct iv_use *);
void
dump_use (FILE *file, struct iv_use *use)
{
/* Dumps information about the uses to FILE. */
-extern void dump_uses (FILE *, struct ivopts_data *);
void
dump_uses (FILE *file, struct ivopts_data *data)
{
/* Dumps information about induction variable candidate CAND to FILE. */
-extern void dump_cand (FILE *, struct iv_cand *);
void
dump_cand (FILE *file, struct iv_cand *cand)
{
static struct iv *
alloc_iv (tree base, tree step)
{
+ tree base_object = base;
struct iv *iv = XCNEW (struct iv);
gcc_assert (step != NULL_TREE);
+ /* Lower all address expressions except ones with DECL_P as operand.
+ By doing this:
+ 1) More accurate cost can be computed for address expressions;
+ 2) Duplicate candidates won't be created for bases in different
+ forms, like &a[0] and &a. */
+ STRIP_NOPS (base_object);
+ if (TREE_CODE (base_object) == ADDR_EXPR
+ && !DECL_P (TREE_OPERAND (base_object, 0)))
+ {
+ aff_tree comb;
+ double_int size;
+ base_object = get_inner_reference_aff (TREE_OPERAND (base_object, 0),
+ &comb, &size);
+ gcc_assert (base_object != NULL_TREE);
+ base_object = build_fold_addr_expr (base_object);
+ base = fold_convert (TREE_TYPE (base), aff_combination_to_tree (&comb));
+ }
+
iv->base = base;
- iv->base_object = determine_base_object (base);
+ iv->base_object = determine_base_object (base_object);
iv->step = step;
iv->biv_p = false;
iv->have_use_for = false;
static void
mark_bivs (struct ivopts_data *data)
{
- gimple phi;
+ gimple phi, def;
tree var;
struct iv *iv, *incr_iv;
struct loop *loop = data->current_loop;
continue;
var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
+ def = SSA_NAME_DEF_STMT (var);
+ /* Don't mark iv peeled from other one as biv. */
+ if (def
+ && gimple_code (def) == GIMPLE_PHI
+ && gimple_bb (def) == loop->header)
+ continue;
+
incr_iv = get_iv (data, var);
if (!incr_iv)
continue;
return true;
}
-/* Returns true if statement STMT is obviously invariant in LOOP,
- i.e. if all its operands on the RHS are defined outside of the LOOP.
- LOOP should not be the function body. */
-
-bool
-stmt_invariant_in_loop_p (struct loop *loop, gimple stmt)
-{
- unsigned i;
- tree lhs;
-
- gcc_assert (loop_depth (loop) > 0);
-
- lhs = gimple_get_lhs (stmt);
- for (i = 0; i < gimple_num_ops (stmt); i++)
- {
- tree op = gimple_op (stmt, i);
- if (op != lhs && !expr_invariant_in_loop_p (loop, op))
- return false;
- }
-
- return true;
-}
-
/* Cumulates the steps of indices into DATA and replaces their values with the
initial ones. Returns false when the value of the index cannot be determined.
Callback for for_each_index. */
}
}
-/* Returns true if memory reference REF with step STEP may be unaligned. */
+/* Return true if memory reference REF with step STEP may be unaligned. */
static bool
may_be_unaligned_p (tree ref, tree step)
{
- tree base;
- tree base_type;
- HOST_WIDE_INT bitsize;
- HOST_WIDE_INT bitpos;
- tree toffset;
- enum machine_mode mode;
- int unsignedp, volatilep;
- unsigned base_align;
-
/* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
thus they are not misaligned. */
if (TREE_CODE (ref) == TARGET_MEM_REF)
return false;
- /* The test below is basically copy of what expr.c:normal_inner_ref
- does to check whether the object must be loaded by parts when
- STRICT_ALIGNMENT is true. */
- base = get_inner_reference (ref, &bitsize, &bitpos, &toffset, &mode,
- &unsignedp, &volatilep, true);
- base_type = TREE_TYPE (base);
- base_align = get_object_alignment (base);
- base_align = MAX (base_align, TYPE_ALIGN (base_type));
+ unsigned int align = TYPE_ALIGN (TREE_TYPE (ref));
- if (mode != BLKmode)
- {
- unsigned mode_align = GET_MODE_ALIGNMENT (mode);
-
- if (base_align < mode_align
- || (bitpos % mode_align) != 0
- || (bitpos % BITS_PER_UNIT) != 0)
- return true;
-
- if (toffset
- && (highest_pow2_factor (toffset) * BITS_PER_UNIT) < mode_align)
- return true;
+ unsigned HOST_WIDE_INT bitpos;
+ unsigned int ref_align;
+ get_object_alignment_1 (ref, &ref_align, &bitpos);
+ if (ref_align < align
+ || (bitpos % align) != 0
+ || (bitpos % BITS_PER_UNIT) != 0)
+ return true;
- if ((highest_pow2_factor (step) * BITS_PER_UNIT) < mode_align)
- return true;
- }
+ unsigned int trailing_zeros = tree_ctz (step);
+ if (trailing_zeros < HOST_BITS_PER_INT
+ && (1U << trailing_zeros) * BITS_PER_UNIT < align)
+ return true;
return false;
}
bb = body[i];
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !flow_bb_inside_loop_p (data->current_loop, e->dest))
find_interesting_uses_outside (data, e);
static tree
strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
- unsigned HOST_WIDE_INT *offset)
+ HOST_WIDE_INT *offset)
{
tree op0 = NULL_TREE, op1 = NULL_TREE, tmp, step;
enum tree_code code;
tree type, orig_type = TREE_TYPE (expr);
- unsigned HOST_WIDE_INT off0, off1, st;
+ HOST_WIDE_INT off0, off1, st;
tree orig_expr = expr;
STRIP_NOPS (expr);
break;
case COMPONENT_REF:
- if (!inside_addr)
- return orig_expr;
+ {
+ tree field;
- tmp = component_ref_field_offset (expr);
- if (top_compref
- && cst_and_fits_in_hwi (tmp))
- {
- /* Strip the component reference completely. */
- op0 = TREE_OPERAND (expr, 0);
- op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
- *offset = off0 + int_cst_value (tmp);
- return op0;
- }
+ if (!inside_addr)
+ return orig_expr;
+
+ tmp = component_ref_field_offset (expr);
+ field = TREE_OPERAND (expr, 1);
+ if (top_compref
+ && cst_and_fits_in_hwi (tmp)
+ && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field)))
+ {
+ HOST_WIDE_INT boffset, abs_off;
+
+ /* Strip the component reference completely. */
+ op0 = TREE_OPERAND (expr, 0);
+ op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
+ boffset = int_cst_value (DECL_FIELD_BIT_OFFSET (field));
+ abs_off = abs_hwi (boffset) / BITS_PER_UNIT;
+ if (boffset < 0)
+ abs_off = -abs_off;
+
+ *offset = off0 + int_cst_value (tmp) + abs_off;
+ return op0;
+ }
+ }
break;
case ADDR_EXPR:
static tree
strip_offset (tree expr, unsigned HOST_WIDE_INT *offset)
{
- return strip_offset_1 (expr, false, false, offset);
+ HOST_WIDE_INT off;
+ tree core = strip_offset_1 (expr, false, false, &off);
+ *offset = off;
+ return core;
}
/* Returns variant of TYPE that can be used as base for different uses.
/* Additionally record the possibility of leaving the original iv
untouched. */
def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (data->current_loop));
- cand = add_candidate_1 (data,
- iv->base, iv->step, true, IP_ORIGINAL, NULL,
- SSA_NAME_DEF_STMT (def));
- cand->var_before = iv->ssa_name;
- cand->var_after = def;
+ /* Don't add candidate if it's from another PHI node because
+ it's an affine iv appearing in the form of PEELED_CHREC. */
+ phi = SSA_NAME_DEF_STMT (def);
+ if (gimple_code (phi) != GIMPLE_PHI)
+ {
+ cand = add_candidate_1 (data,
+ iv->base, iv->step, true, IP_ORIGINAL, NULL,
+ SSA_NAME_DEF_STMT (def));
+ cand->var_before = iv->ssa_name;
+ cand->var_after = def;
+ }
+ else
+ gcc_assert (gimple_bb (phi) == data->current_loop->header);
}
}
static bool
get_computation_aff (struct loop *loop,
struct iv_use *use, struct iv_cand *cand, gimple at,
- struct affine_tree_combination *aff)
+ struct aff_tree *aff)
{
tree ubase = use->iv->base;
tree ustep = use->iv->step;
{
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
rtx reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
- rtx addr;
+ rtx reg2 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
+ rtx addr, scaled;
HOST_WIDE_INT i;
valid_mult = sbitmap_alloc (2 * MAX_RATIO + 1);
bitmap_clear (valid_mult);
- addr = gen_rtx_fmt_ee (MULT, address_mode, reg1, NULL_RTX);
+ scaled = gen_rtx_fmt_ee (MULT, address_mode, reg1, NULL_RTX);
+ addr = gen_rtx_fmt_ee (PLUS, address_mode, scaled, reg2);
for (i = -MAX_RATIO; i <= MAX_RATIO; i++)
{
- XEXP (addr, 1) = gen_int_mode (i, address_mode);
- if (memory_address_addr_space_p (mode, addr, as))
+ XEXP (scaled, 1) = gen_int_mode (i, address_mode);
+ if (memory_address_addr_space_p (mode, addr, as)
+ || memory_address_addr_space_p (mode, scaled, as))
bitmap_set_bit (valid_mult, i + MAX_RATIO);
}
TODO -- there must be some better way. This all is quite crude. */
+enum ainc_type
+{
+ AINC_PRE_INC, /* Pre increment. */
+ AINC_PRE_DEC, /* Pre decrement. */
+ AINC_POST_INC, /* Post increment. */
+ AINC_POST_DEC, /* Post decrement. */
+ AINC_NONE /* Also the number of auto increment types. */
+};
+
typedef struct address_cost_data_s
{
HOST_WIDE_INT min_offset, max_offset;
unsigned costs[2][2][2][2];
+ unsigned ainc_costs[AINC_NONE];
} *address_cost_data;
static bool has_preinc[MAX_MACHINE_MODE], has_postinc[MAX_MACHINE_MODE];
static bool has_predec[MAX_MACHINE_MODE], has_postdec[MAX_MACHINE_MODE];
unsigned cost, acost, complexity;
+ enum ainc_type autoinc_type;
bool offset_p, ratio_p, autoinc;
HOST_WIDE_INT s_offset, autoinc_offset, msize;
unsigned HOST_WIDE_INT mask;
reg0 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
- if (USE_LOAD_PRE_DECREMENT (mem_mode)
+ if (USE_LOAD_PRE_DECREMENT (mem_mode)
|| USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_DEC (address_mode, reg0);
has_predec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_predec[mem_mode])
+ data->ainc_costs[AINC_PRE_DEC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_POST_DECREMENT (mem_mode)
+ if (USE_LOAD_POST_DECREMENT (mem_mode)
|| USE_STORE_POST_DECREMENT (mem_mode))
{
addr = gen_rtx_POST_DEC (address_mode, reg0);
has_postdec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_postdec[mem_mode])
+ data->ainc_costs[AINC_POST_DEC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_PRE_INCREMENT (mem_mode)
+ if (USE_LOAD_PRE_INCREMENT (mem_mode)
|| USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_INC (address_mode, reg0);
has_preinc[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_preinc[mem_mode])
+ data->ainc_costs[AINC_PRE_INC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_POST_INCREMENT (mem_mode)
+ if (USE_LOAD_POST_INCREMENT (mem_mode)
|| USE_STORE_POST_INCREMENT (mem_mode))
{
addr = gen_rtx_POST_INC (address_mode, reg0);
has_postinc[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_postinc[mem_mode])
+ data->ainc_costs[AINC_POST_INC]
+ = address_cost (addr, mem_mode, as, speed);
}
for (i = 0; i < 16; i++)
{
s_offset = offset;
autoinc = false;
+ autoinc_type = AINC_NONE;
msize = GET_MODE_SIZE (mem_mode);
autoinc_offset = offset;
if (stmt_after_inc)
autoinc_offset += ratio * cstep;
if (symbol_present || var_present || ratio != 1)
autoinc = false;
- else if ((has_postinc[mem_mode] && autoinc_offset == 0
- && msize == cstep)
- || (has_postdec[mem_mode] && autoinc_offset == 0
+ else
+ {
+ if (has_postinc[mem_mode] && autoinc_offset == 0
+ && msize == cstep)
+ autoinc_type = AINC_POST_INC;
+ else if (has_postdec[mem_mode] && autoinc_offset == 0
&& msize == -cstep)
- || (has_preinc[mem_mode] && autoinc_offset == msize
+ autoinc_type = AINC_POST_DEC;
+ else if (has_preinc[mem_mode] && autoinc_offset == msize
&& msize == cstep)
- || (has_predec[mem_mode] && autoinc_offset == -msize
- && msize == -cstep))
- autoinc = true;
+ autoinc_type = AINC_PRE_INC;
+ else if (has_predec[mem_mode] && autoinc_offset == -msize
+ && msize == -cstep)
+ autoinc_type = AINC_PRE_DEC;
+
+ if (autoinc_type != AINC_NONE)
+ autoinc = true;
+ }
cost = 0;
offset_p = (s_offset != 0
if (may_autoinc)
*may_autoinc = autoinc;
- acost = data->costs[symbol_present][var_present][offset_p][ratio_p];
+ if (autoinc)
+ acost = data->ainc_costs[autoinc_type];
+ else
+ acost = data->costs[symbol_present][var_present][offset_p][ratio_p];
complexity = (symbol_present != 0) + (var_present != 0) + offset_p + ratio_p;
return new_cost (cost + acost, complexity);
}
int m = exact_log2 (int_cst_value (cst));
int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
int sa_cost;
+ bool equal_p = false;
if (!(m >= 0 && m < maxm))
return false;
+ if (operand_equal_p (op1, mult, 0))
+ equal_p = true;
+
sa_cost = (TREE_CODE (expr) != MINUS_EXPR
? shiftadd_cost (speed, mode, m)
- : (mult == op1
+ : (equal_p
? shiftsub1_cost (speed, mode, m)
: shiftsub0_cost (speed, mode, m)));
res = new_cost (sa_cost, 0);
- res = add_costs (res, mult == op1 ? cost0 : cost1);
+ res = add_costs (res, equal_p ? cost0 : cost1);
STRIP_NOPS (multop);
if (!is_gimple_val (multop))
op1 = TREE_OPERAND (expr, 1);
STRIP_NOPS (op0);
STRIP_NOPS (op1);
-
- if (is_gimple_val (op0))
- cost0 = no_cost;
- else
- cost0 = force_expr_to_var_cost (op0, speed);
-
- if (is_gimple_val (op1))
- cost1 = no_cost;
- else
- cost1 = force_expr_to_var_cost (op1, speed);
-
break;
+ CASE_CONVERT:
case NEGATE_EXPR:
op0 = TREE_OPERAND (expr, 0);
STRIP_NOPS (op0);
op1 = NULL_TREE;
-
- if (is_gimple_val (op0))
- cost0 = no_cost;
- else
- cost0 = force_expr_to_var_cost (op0, speed);
-
- cost1 = no_cost;
break;
default:
return new_cost (target_spill_cost[speed], 0);
}
+ if (op0 == NULL_TREE
+ || TREE_CODE (op0) == SSA_NAME || CONSTANT_CLASS_P (op0))
+ cost0 = no_cost;
+ else
+ cost0 = force_expr_to_var_cost (op0, speed);
+
+ if (op1 == NULL_TREE
+ || TREE_CODE (op1) == SSA_NAME || CONSTANT_CLASS_P (op1))
+ cost1 = no_cost;
+ else
+ cost1 = force_expr_to_var_cost (op1, speed);
+
mode = TYPE_MODE (TREE_TYPE (expr));
switch (TREE_CODE (expr))
{
}
break;
+ CASE_CONVERT:
+ {
+ tree inner_mode, outer_mode;
+ outer_mode = TREE_TYPE (expr);
+ inner_mode = TREE_TYPE (op0);
+ cost = new_cost (convert_cost (TYPE_MODE (outer_mode),
+ TYPE_MODE (inner_mode), speed), 0);
+ }
+ break;
+
case MULT_EXPR:
if (cst_and_fits_in_hwi (op0))
cost = new_cost (mult_by_coeff_cost (int_cst_value (op0),
if (ratio == 1)
{
- if(operand_equal_p (ubase, cbase, 0))
+ if (operand_equal_p (ubase, cbase, 0))
return -1;
if (TREE_CODE (ubase) == ADDR_EXPR
{
tree ind = TREE_OPERAND (usym, 1);
if (TREE_CODE (ind) == INTEGER_CST
- && host_integerp (ind, 0)
- && TREE_INT_CST_LOW (ind) == 0)
+ && tree_fits_shwi_p (ind)
+ && tree_to_shwi (ind) == 0)
usym = TREE_OPERAND (usym, 0);
}
if (TREE_CODE (csym) == ARRAY_REF)
{
tree ind = TREE_OPERAND (csym, 1);
if (TREE_CODE (ind) == INTEGER_CST
- && host_integerp (ind, 0)
- && TREE_INT_CST_LOW (ind) == 0)
+ && tree_fits_shwi_p (ind)
+ && tree_to_shwi (ind) == 0)
csym = TREE_OPERAND (csym, 0);
}
if (operand_equal_p (usym, csym, 0))
period = build_low_bits_mask (type,
(TYPE_PRECISION (type)
- - tree_low_cst (pow2div, 1)));
+ - tree_to_uhwi (pow2div)));
return period;
}
struct tree_niter_desc *niter)
{
tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
- struct affine_tree_combination nit, tmpa, tmpb;
+ struct aff_tree nit, tmpa, tmpb;
enum tree_code comp;
HOST_WIDE_INT step;
}
/* Try narrowing set IVS by removing CAND. Return the cost of
- the new set and store the differences in DELTA. */
+ the new set and store the differences in DELTA. START is
+ the candidate with which we start narrowing. */
static comp_cost
iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_cand *cand, struct iv_ca_delta **delta)
+ struct iv_cand *cand, struct iv_cand *start,
+ struct iv_ca_delta **delta)
{
unsigned i, ci;
struct iv_use *use;
struct cost_pair *old_cp, *new_cp, *cp;
bitmap_iterator bi;
struct iv_cand *cnd;
- comp_cost cost;
+ comp_cost cost, best_cost, acost;
*delta = NULL;
for (i = 0; i < n_iv_uses (data); i++)
if (old_cp->cand != cand)
continue;
- new_cp = NULL;
+ best_cost = iv_ca_cost (ivs);
+ /* Start narrowing with START. */
+ new_cp = get_use_iv_cost (data, use, start);
if (data->consider_all_candidates)
{
EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, ci, bi)
{
- if (ci == cand->id)
+ if (ci == cand->id || (start && ci == start->id))
continue;
cnd = iv_cand (data, ci);
if (!cp)
continue;
- if (!iv_ca_has_deps (ivs, cp))
- continue;
-
- if (!cheaper_cost_pair (cp, new_cp))
- continue;
+ iv_ca_set_cp (data, ivs, use, cp);
+ acost = iv_ca_cost (ivs);
- new_cp = cp;
+ if (compare_costs (acost, best_cost) < 0)
+ {
+ best_cost = acost;
+ new_cp = cp;
+ }
}
}
else
{
EXECUTE_IF_AND_IN_BITMAP (use->related_cands, ivs->cands, 0, ci, bi)
{
- if (ci == cand->id)
+ if (ci == cand->id || (start && ci == start->id))
continue;
cnd = iv_cand (data, ci);
cp = get_use_iv_cost (data, use, cnd);
if (!cp)
continue;
- if (!iv_ca_has_deps (ivs, cp))
- continue;
- if (!cheaper_cost_pair (cp, new_cp))
- continue;
+ iv_ca_set_cp (data, ivs, use, cp);
+ acost = iv_ca_cost (ivs);
- new_cp = cp;
+ if (compare_costs (acost, best_cost) < 0)
+ {
+ best_cost = acost;
+ new_cp = cp;
+ }
}
}
+ /* Restore to old cp for use. */
+ iv_ca_set_cp (data, ivs, use, old_cp);
if (!new_cp)
{
if (cand == except_cand)
continue;
- acost = iv_ca_narrow (data, ivs, cand, &act_delta);
+ acost = iv_ca_narrow (data, ivs, cand, except_cand, &act_delta);
if (compare_costs (acost, best_cost) < 0)
{
{
struct loop *loop;
struct ivopts_data data;
- loop_iterator li;
tree_ssa_iv_optimize_init (&data);
/* Optimize the loops starting with the innermost ones. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loop_dump (loop, dump_file, NULL, 1);