/* Loop unrolling.
- Copyright (C) 2002-2015 Free Software Foundation, Inc.
+ Copyright (C) 2002-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
-#include "alias.h"
-#include "symtab.h"
#include "tree.h"
-#include "hard-reg-set.h"
-#include "obstack.h"
+#include "cfghooks.h"
+#include "memmodel.h"
+#include "optabs.h"
+#include "emit-rtl.h"
+#include "recog.h"
#include "profile.h"
-#include "predict.h"
-#include "function.h"
-#include "dominance.h"
-#include "cfg.h"
#include "cfgrtl.h"
-#include "basic-block.h"
#include "cfgloop.h"
-#include "params.h"
-#include "insn-codes.h"
-#include "optabs.h"
-#include "flags.h"
-#include "insn-config.h"
-#include "expmed.h"
#include "dojump.h"
-#include "explow.h"
-#include "calls.h"
-#include "emit-rtl.h"
-#include "varasm.h"
-#include "stmt.h"
#include "expr.h"
-#include "recog.h"
-#include "target.h"
#include "dumpfile.h"
/* This pass performs loop unrolling. We only perform this
basic_block loop_preheader; /* The loop preheader basic block. */
};
-static void decide_unroll_stupid (struct loop *, int);
-static void decide_unroll_constant_iterations (struct loop *, int);
-static void decide_unroll_runtime_iterations (struct loop *, int);
-static void unroll_loop_stupid (struct loop *);
+static void decide_unroll_stupid (class loop *, int);
+static void decide_unroll_constant_iterations (class loop *, int);
+static void decide_unroll_runtime_iterations (class loop *, int);
+static void unroll_loop_stupid (class loop *);
static void decide_unrolling (int);
-static void unroll_loop_constant_iterations (struct loop *);
-static void unroll_loop_runtime_iterations (struct loop *);
-static struct opt_info *analyze_insns_in_loop (struct loop *);
+static void unroll_loop_constant_iterations (class loop *);
+static void unroll_loop_runtime_iterations (class loop *);
+static struct opt_info *analyze_insns_in_loop (class loop *);
static void opt_info_start_duplication (struct opt_info *);
static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
static void free_opt_info (struct opt_info *);
-static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx_insn *);
-static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
+static struct var_to_expand *analyze_insn_to_expand_var (class loop*, rtx_insn *);
+static bool referenced_in_one_insn_in_loop_p (class loop *, rtx, int *);
static struct iv_to_split *analyze_iv_to_split_insn (rtx_insn *);
static void expand_var_during_unrolling (struct var_to_expand *, rtx_insn *);
static void insert_var_expansion_initialization (struct var_to_expand *,
appropriate given the dump or -fopt-info settings. */
static void
-report_unroll (struct loop *loop, location_t locus)
+report_unroll (class loop *loop, dump_location_t locus)
{
- int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
+ dump_flags_t report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS;
if (loop->lpt_decision.decision == LPT_NONE)
return;
if (!dump_enabled_p ())
return;
- dump_printf_loc (report_flags, locus,
+ dump_metadata_t metadata (report_flags, locus.get_impl_location ());
+ dump_printf_loc (metadata, locus.get_user_location (),
"loop unrolled %d times",
loop->lpt_decision.times);
- if (profile_info)
- dump_printf (report_flags,
+ if (profile_info && loop->header->count.initialized_p ())
+ dump_printf (metadata,
" (header execution count %d)",
- (int)loop->header->count);
+ (int)loop->header->count.to_gcov_type ());
- dump_printf (report_flags, "\n");
+ dump_printf (metadata, "\n");
}
/* Decide whether unroll loops and how much. */
static void
decide_unrolling (int flags)
{
- struct loop *loop;
+ class loop *loop;
/* Scan the loops, inner ones first. */
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop->lpt_decision.decision = LPT_NONE;
- location_t locus = get_loop_location (loop);
+ dump_user_location_t locus = get_loop_location (loop);
if (dump_enabled_p ())
- dump_printf_loc (TDF_RTL, locus,
- ";; *** Considering loop %d at BB %d for "
- "unrolling ***\n",
- loop->num, loop->header->index);
+ dump_printf_loc (MSG_NOTE, locus,
+ "considering unrolling loop %d at BB %d\n",
+ loop->num, loop->header->index);
+
+ if (loop->unroll == 1)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ ";; Not unrolling loop, user didn't want it unrolled\n");
+ continue;
+ }
/* Do not peel cold areas. */
if (optimize_loop_for_size_p (loop))
loop->ninsns = num_loop_insns (loop);
loop->av_ninsns = average_num_loop_insns (loop);
- /* Try transformations one by one in decreasing order of
- priority. */
-
+ /* Try transformations one by one in decreasing order of priority. */
decide_unroll_constant_iterations (loop, flags);
if (loop->lpt_decision.decision == LPT_NONE)
decide_unroll_runtime_iterations (loop, flags);
void
unroll_loops (int flags)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
/* Now decide rest of unrolling. */
/* Check whether exit of the LOOP is at the end of loop body. */
static bool
-loop_exit_at_end_p (struct loop *loop)
+loop_exit_at_end_p (class loop *loop)
{
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
rtx_insn *insn;
/* We should never have conditional in latch block. */
and how much. */
static void
-decide_unroll_constant_iterations (struct loop *loop, int flags)
+decide_unroll_constant_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
- if (!(flags & UAP_UNROLL))
- {
- /* We were not asked to, just return back silently. */
- return;
- }
+ /* If we were not asked to unroll this loop, just return back silently. */
+ if (!(flags & UAP_UNROLL) && !loop->unroll)
+ return;
- if (dump_file)
- fprintf (dump_file,
- "\n;; Considering unrolling loop with constant "
- "number of iterations\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ "considering unrolling loop with constant "
+ "number of iterations\n");
/* nunroll = total number of copies of the original loop body in
- unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+ unrolled loop (i.e. if it is 2, we have to duplicate loop body once). */
+ nunroll = param_max_unrolled_insns / loop->ninsns;
nunroll_by_av
- = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
return;
}
+ /* Check for an explicit unrolling factor. */
+ if (loop->unroll > 0 && loop->unroll < USHRT_MAX)
+ {
+ /* However we cannot unroll completely at the RTL level a loop with
+ constant number of iterations; it should have been peeled instead. */
+ if (desc->niter == 0 || (unsigned) loop->unroll > desc->niter - 1)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; Loop should have been peeled\n");
+ }
+ else
+ {
+ loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
+ loop->lpt_decision.times = loop->unroll - 1;
+ }
+ return;
+ }
+
/* Check whether the loop rolls enough to consider.
Consult also loop bounds and profile; in the case the loop has more
than one exit it may well loop less than determined maximal number
of iterations. */
if (desc->niter < 2 * nunroll
|| ((get_estimated_loop_iterations (loop, &iterations)
- || get_max_loop_iterations (loop, &iterations))
+ || get_likely_max_loop_iterations (loop, &iterations))
&& wi::ltu_p (iterations, 2 * nunroll)))
{
if (dump_file)
best_copies = 2 * nunroll + 10;
i = 2 * nunroll + 2;
- if (i - 1 >= desc->niter)
+ if (i > desc->niter - 2)
i = desc->niter - 2;
for (; i >= nunroll - 1; i--)
}
*/
static void
-unroll_loop_constant_iterations (struct loop *loop)
+unroll_loop_constant_iterations (class loop *loop)
{
unsigned HOST_WIDE_INT niter;
unsigned exit_mod;
- sbitmap wont_exit;
unsigned i;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
exit_mod = niter % (max_unroll + 1);
- wont_exit = sbitmap_alloc (max_unroll + 1);
+ auto_sbitmap wont_exit (max_unroll + 2);
bitmap_ones (wont_exit);
auto_vec<edge> remove_edges;
loop->nb_iterations_estimate -= exit_mod;
else
loop->any_estimate = false;
+ if (loop->any_likely_upper_bound
+ && wi::leu_p (exit_mod, loop->nb_iterations_likely_upper_bound))
+ loop->nb_iterations_likely_upper_bound -= exit_mod;
+ else
+ loop->any_likely_upper_bound = false;
}
bitmap_set_bit (wont_exit, 1);
loop->nb_iterations_estimate -= exit_mod + 1;
else
loop->any_estimate = false;
+ if (loop->any_likely_upper_bound
+ && wi::leu_p (exit_mod + 1, loop->nb_iterations_likely_upper_bound))
+ loop->nb_iterations_likely_upper_bound -= exit_mod + 1;
+ else
+ loop->any_likely_upper_bound = false;
desc->noloop_assumptions = NULL_RTX;
bitmap_set_bit (wont_exit, 0);
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
if (loop->any_estimate)
loop->nb_iterations_estimate
= wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
- desc->niter_expr = GEN_INT (desc->niter);
+ if (loop->any_likely_upper_bound)
+ loop->nb_iterations_likely_upper_bound
+ = wi::udiv_trunc (loop->nb_iterations_likely_upper_bound, max_unroll + 1);
+ desc->niter_expr = gen_int_mode (desc->niter, desc->mode);
/* Remove the edges. */
FOR_EACH_VEC_ELT (remove_edges, i, e)
/* Decide whether to unroll LOOP iterating runtime computable number of times
and how much. */
static void
-decide_unroll_runtime_iterations (struct loop *loop, int flags)
+decide_unroll_runtime_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
- if (!(flags & UAP_UNROLL))
- {
- /* We were not asked to, just return back silently. */
- return;
- }
+ /* If we were not asked to unroll this loop, just return back silently. */
+ if (!(flags & UAP_UNROLL) && !loop->unroll)
+ return;
- if (dump_file)
- fprintf (dump_file,
- "\n;; Considering unrolling loop with runtime "
- "computable number of iterations\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ "considering unrolling loop with runtime-"
+ "computable number of iterations\n");
/* nunroll = total number of copies of the original loop body in
unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
- nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ nunroll = param_max_unrolled_insns / loop->ninsns;
+ nunroll_by_av = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
+ if (loop->unroll > 0 && loop->unroll < USHRT_MAX)
+ nunroll = loop->unroll;
+
/* Skip big loops. */
if (nunroll <= 1)
{
/* Check whether the loop rolls. */
if ((get_estimated_loop_iterations (loop, &iterations)
- || get_max_loop_iterations (loop, &iterations))
+ || get_likely_max_loop_iterations (loop, &iterations))
&& wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
return;
}
- /* Success; now force nunroll to be power of 2, as we are unable to
- cope with overflows in computation of number of iterations. */
+ /* Success; now force nunroll to be power of 2, as code-gen
+ requires it, we are unable to cope with overflows in
+ computation of number of iterations. */
for (i = 1; 2 * i <= nunroll; i *= 2)
continue;
static rtx_insn *
compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp,
- rtx_code_label *label, int prob, rtx_insn *cinsn)
+ rtx_code_label *label, profile_probability prob,
+ rtx_insn *cinsn)
{
rtx_insn *seq;
rtx_jump_insn *jump;
op0 = force_operand (op0, NULL_RTX);
op1 = force_operand (op1, NULL_RTX);
do_compare_rtx_and_jump (op0, op1, comp, 0,
- mode, NULL_RTX, NULL, label, -1);
+ mode, NULL_RTX, NULL, label,
+ profile_probability::uninitialized ());
jump = as_a <rtx_jump_insn *> (get_last_insn ());
jump->set_jump_target (label);
LABEL_NUSES (label)++;
}
- add_int_reg_note (jump, REG_BR_PROB, prob);
+ if (prob.initialized_p ())
+ add_reg_br_prob_note (jump, prob);
seq = get_insns ();
end_sequence ();
return seq;
}
-/* Unroll LOOP for which we are able to count number of iterations in runtime
- LOOP->LPT_DECISION.TIMES times. The transformation does this (with some
- extra care for case n < 0):
+/* Unroll LOOP for which we are able to count number of iterations in
+ runtime LOOP->LPT_DECISION.TIMES times. The times value must be a
+ power of two. The transformation does this (with some extra care
+ for case n < 0):
for (i = 0; i < n; i++)
body;
}
*/
static void
-unroll_loop_runtime_iterations (struct loop *loop)
+unroll_loop_runtime_iterations (class loop *loop)
{
rtx old_niter, niter, tmp;
rtx_insn *init_code, *branch_code;
- unsigned i, j, p;
- basic_block preheader, *body, swtch, ezc_swtch;
- sbitmap wont_exit;
+ unsigned i, j;
+ profile_probability p;
+ basic_block preheader, *body, swtch, ezc_swtch = NULL;
int may_exit_copy;
+ profile_count iter_count, new_count;
unsigned n_peel;
edge e;
bool extra_zero_check, last_may_exit;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
if (tmp != niter)
emit_move_insn (niter, tmp);
+ /* For loops that exit at end and whose number of iterations is reliable,
+ add one to niter to account for first pass through loop body before
+ reaching exit test. */
+ if (exit_at_end && !desc->noloop_assumptions)
+ {
+ niter = expand_simple_binop (desc->mode, PLUS,
+ niter, const1_rtx,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ old_niter = niter;
+ }
+
/* Count modulo by ANDing it with max_unroll; we use the fact that
the number of unrollings is a power of two, and thus this is correct
even if there is overflow in the computation. */
auto_vec<edge> remove_edges;
- wont_exit = sbitmap_alloc (max_unroll + 2);
+ auto_sbitmap wont_exit (max_unroll + 2);
- /* Peel the first copy of loop body (almost always we must leave exit test
- here; the only exception is when we have extra zero check and the number
- of iterations is reliable. Also record the place of (possible) extra
- zero check. */
- bitmap_clear (wont_exit);
- if (extra_zero_check
- && !desc->noloop_assumptions)
- bitmap_set_bit (wont_exit, 1);
- ezc_swtch = loop_preheader_edge (loop)->src;
- ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- 1, wont_exit, desc->out_edge,
- &remove_edges,
- DLTHE_FLAG_UPDATE_FREQ);
- gcc_assert (ok);
+ if (extra_zero_check || desc->noloop_assumptions)
+ {
+ /* Peel the first copy of loop body. Leave the exit test if the number
+ of iterations is not reliable. Also record the place of the extra zero
+ check. */
+ bitmap_clear (wont_exit);
+ if (!desc->noloop_assumptions)
+ bitmap_set_bit (wont_exit, 1);
+ ezc_swtch = loop_preheader_edge (loop)->src;
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ 1, wont_exit, desc->out_edge,
+ &remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
+ }
/* Record the place where switch will be built for preconditioning. */
swtch = split_edge (loop_preheader_edge (loop));
+ /* Compute count increments for each switch block and initialize
+ innermost switch block. Switch blocks and peeled loop copies are built
+ from innermost outward. */
+ iter_count = new_count = swtch->count.apply_scale (1, max_unroll + 1);
+ swtch->count = new_count;
+
for (i = 0; i < n_peel; i++)
{
/* Peel the copy. */
/* Create item for switch. */
j = n_peel - i - (extra_zero_check ? 0 : 1);
- p = REG_BR_PROB_BASE / (i + 2);
+ p = profile_probability::always ().apply_scale (1, i + 2);
preheader = split_edge (loop_preheader_edge (loop));
- branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
- block_label (preheader), p,
- NULL);
+ /* Add in count of edge from switch block. */
+ preheader->count += iter_count;
+ branch_code = compare_and_jump_seq (copy_rtx (niter),
+ gen_int_mode (j, desc->mode), EQ,
+ block_label (preheader), p, NULL);
/* We rely on the fact that the compare and jump cannot be optimized out,
and hence the cfg we create is correct. */
swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
- single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
+ single_succ_edge (swtch)->probability = p.invert ();
+ new_count += iter_count;
+ swtch->count = new_count;
e = make_edge (swtch, preheader,
single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
- e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
e->probability = p;
}
if (extra_zero_check)
{
/* Add branch for zero iterations. */
- p = REG_BR_PROB_BASE / (max_unroll + 1);
+ p = profile_probability::always ().apply_scale (1, max_unroll + 1);
swtch = ezc_swtch;
preheader = split_edge (loop_preheader_edge (loop));
+ /* Recompute count adjustments since initial peel copy may
+ have exited and reduced those values that were computed above. */
+ iter_count = swtch->count.apply_scale (1, max_unroll + 1);
+ /* Add in count of edge from switch block. */
+ preheader->count += iter_count;
branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
block_label (preheader), p,
NULL);
swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
- single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
+ single_succ_edge (swtch)->probability = p.invert ();
e = make_edge (swtch, preheader,
single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
- e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
e->probability = p;
}
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
if (loop->any_estimate)
loop->nb_iterations_estimate
= wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
+ if (loop->any_likely_upper_bound)
+ loop->nb_iterations_likely_upper_bound
+ = wi::udiv_trunc (loop->nb_iterations_likely_upper_bound, max_unroll + 1);
if (exit_at_end)
{
desc->niter_expr =
--loop->nb_iterations_estimate;
else
loop->any_estimate = false;
+ if (loop->any_likely_upper_bound
+ && loop->nb_iterations_likely_upper_bound != 0)
+ --loop->nb_iterations_likely_upper_bound;
+ else
+ loop->any_likely_upper_bound = false;
}
if (dump_file)
/* Decide whether to unroll LOOP stupidly and how much. */
static void
-decide_unroll_stupid (struct loop *loop, int flags)
+decide_unroll_stupid (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
- if (!(flags & UAP_UNROLL_ALL))
- {
- /* We were not asked to, just return back silently. */
- return;
- }
+ /* If we were not asked to unroll this loop, just return back silently. */
+ if (!(flags & UAP_UNROLL_ALL) && !loop->unroll)
+ return;
- if (dump_file)
- fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "considering unrolling loop stupidly\n");
/* nunroll = total number of copies of the original loop body in
unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+ nunroll = param_max_unrolled_insns / loop->ninsns;
nunroll_by_av
- = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
+ if (loop->unroll > 0 && loop->unroll < USHRT_MAX)
+ nunroll = loop->unroll;
+
/* Skip big loops. */
if (nunroll <= 1)
{
if (desc->simple_p && !desc->assumptions)
{
if (dump_file)
- fprintf (dump_file, ";; The loop is simple\n");
+ fprintf (dump_file, ";; Loop is simple\n");
return;
}
/* Check whether the loop rolls. */
if ((get_estimated_loop_iterations (loop, &iterations)
- || get_max_loop_iterations (loop, &iterations))
+ || get_likely_max_loop_iterations (loop, &iterations))
&& wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
}
*/
static void
-unroll_loop_stupid (struct loop *loop)
+unroll_loop_stupid (class loop *loop)
{
- sbitmap wont_exit;
unsigned nunroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
bool ok;
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
- wont_exit = sbitmap_alloc (nunroll + 1);
+ auto_sbitmap wont_exit (nunroll + 1);
bitmap_clear (wont_exit);
opt_info_start_duplication (opt_info);
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (desc->simple_p)
{
/* We indeed may get here provided that there are nontrivial assumptions
variable. */
static bool
-referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
+referenced_in_one_insn_in_loop_p (class loop *loop, rtx reg,
int *debug_uses)
{
basic_block *body, bb;
/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
static void
-reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
+reset_debug_uses_in_loop (class loop *loop, rtx reg, int debug_uses)
{
basic_block *body, bb;
unsigned i;
*/
static struct var_to_expand *
-analyze_insn_to_expand_var (struct loop *loop, rtx_insn *insn)
+analyze_insn_to_expand_var (class loop *loop, rtx_insn *insn)
{
rtx set, dest, src;
struct var_to_expand *ves;
}
/* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
- in MD. But if there is no optab to generate the insn, we can not
+ in MD. But if there is no optab to generate the insn, we cannot
perform the variable expansion. This can happen if an MD provides
an insn but not a named pattern to generate it, for example to avoid
producing code that needs additional mode switches like for x87/mmx.
if (debug_uses)
/* Instead of resetting the debug insns, we could replace each
debug use in the loop with the sum or product of all expanded
- accummulators. Since we'll only know of all expansions at the
+ accumulators. Since we'll only know of all expansions at the
end, we'd have to keep track of which vars_to_expand a debug
insn in the loop references, take note of each copy of the
debug insn during unrolling, and when it's all done, compute
analyze_iv_to_split_insn (rtx_insn *insn)
{
rtx set, dest;
- struct rtx_iv iv;
+ class rtx_iv iv;
struct iv_to_split *ivts;
+ scalar_int_mode mode;
bool ok;
/* For now we just split the basic induction variables. Later this may be
return NULL;
dest = SET_DEST (set);
- if (!REG_P (dest))
+ if (!REG_P (dest) || !is_a <scalar_int_mode> (GET_MODE (dest), &mode))
return NULL;
- if (!biv_p (insn, dest))
+ if (!biv_p (insn, mode, dest))
return NULL;
ok = iv_analyze_result (insn, dest, &iv);
is undefined for the return value. */
static struct opt_info *
-analyze_insns_in_loop (struct loop *loop)
+analyze_insns_in_loop (class loop *loop)
{
basic_block *body, bb;
unsigned i;
else
{
incr = simplify_gen_binary (MULT, mode,
- ivts->step, gen_int_mode (delta, mode));
+ copy_rtx (ivts->step),
+ gen_int_mode (delta, mode));
expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
ivts->base_var, incr);
}
/* Generate a new register only if the expansion limit has not been
reached. Else reuse an already existing expansion. */
- if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
+ if (param_max_variable_expansions > ve->expansion_count)
{
really_new_expansion = true;
new_reg = gen_reg_rtx (GET_MODE (ve->reg));
if (ve->var_expansions.length () == 0)
return;
+ /* ve->reg might be SUBREG or some other non-shareable RTL, and we use
+ it both here and as the destination of the assignment. */
+ sum = copy_rtx (sum);
start_sequence ();
switch (ve->op)
{
FOR_BB_INSNS_SAFE (bb, insn, next)
{
if (!INSN_P (insn)
- || (DEBUG_INSN_P (insn)
+ || (DEBUG_BIND_INSN_P (insn)
+ && INSN_VAR_LOCATION_DECL (insn)
&& TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL))
continue;
while (!INSN_P (orig_insn)
- || (DEBUG_INSN_P (orig_insn)
+ || (DEBUG_BIND_INSN_P (orig_insn)
+ && INSN_VAR_LOCATION_DECL (orig_insn)
&& (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn))
== LABEL_DECL)))
orig_insn = NEXT_INSN (orig_insn);