+
+/* A hash function for information about insns to split. */
+
+static hashval_t
+si_info_hash (const void *ivts)
+{
+ return (hashval_t) INSN_UID (((const struct iv_to_split *) ivts)->insn);
+}
+
+/* An equality functions for information about insns to split. */
+
+static int
+si_info_eq (const void *ivts1, const void *ivts2)
+{
+ const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1;
+ const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2;
+
+ return i1->insn == i2->insn;
+}
+
+/* Return a hash for VES, which is really a "var_to_expand *". */
+
+static hashval_t
+ve_info_hash (const void *ves)
+{
+ return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn);
+}
+
+/* Return true if IVTS1 and IVTS2 (which are really both of type
+ "var_to_expand *") refer to the same instruction. */
+
+static int
+ve_info_eq (const void *ivts1, const void *ivts2)
+{
+ const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1;
+ const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2;
+
+ return i1->insn == i2->insn;
+}
+
+/* Returns true if REG is referenced in one nondebug insn in LOOP.
+ Set *DEBUG_USES to the number of debug insns that reference the
+ variable. */
+
+bool
+referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
+ int *debug_uses)
+{
+ basic_block *body, bb;
+ unsigned i;
+ int count_ref = 0;
+ rtx insn;
+
+ body = get_loop_body (loop);
+ for (i = 0; i < loop->num_nodes; i++)
+ {
+ bb = body[i];
+
+ FOR_BB_INSNS (bb, insn)
+ if (!rtx_referenced_p (reg, insn))
+ continue;
+ else if (DEBUG_INSN_P (insn))
+ ++*debug_uses;
+ else if (++count_ref > 1)
+ break;
+ }
+ free (body);
+ return (count_ref == 1);
+}
+
+/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
+
+static void
+reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
+{
+ basic_block *body, bb;
+ unsigned i;
+ rtx insn;
+
+ body = get_loop_body (loop);
+ for (i = 0; debug_uses && i < loop->num_nodes; i++)
+ {
+ bb = body[i];
+
+ FOR_BB_INSNS (bb, insn)
+ if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn))
+ continue;
+ else
+ {
+ validate_change (insn, &INSN_VAR_LOCATION_LOC (insn),
+ gen_rtx_UNKNOWN_VAR_LOC (), 0);
+ if (!--debug_uses)
+ break;
+ }
+ }
+ free (body);
+}
+
+/* Determine whether INSN contains an accumulator
+ which can be expanded into separate copies,
+ one for each copy of the LOOP body.
+
+ for (i = 0 ; i < n; i++)
+ sum += a[i];
+
+ ==>
+
+ sum += a[i]
+ ....
+ i = i+1;
+ sum1 += a[i]
+ ....
+ i = i+1
+ sum2 += a[i];
+ ....
+
+ Return NULL if INSN contains no opportunity for expansion of accumulator.
+ Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
+ information and return a pointer to it.
+*/
+
+static struct var_to_expand *
+analyze_insn_to_expand_var (struct loop *loop, rtx insn)
+{
+ rtx set, dest, src;
+ struct var_to_expand *ves;
+ unsigned accum_pos;
+ enum rtx_code code;
+ int debug_uses = 0;
+
+ set = single_set (insn);
+ if (!set)
+ return NULL;
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+ code = GET_CODE (src);
+
+ if (code != PLUS && code != MINUS && code != MULT && code != FMA)
+ return NULL;
+
+ if (FLOAT_MODE_P (GET_MODE (dest)))
+ {
+ if (!flag_associative_math)
+ return NULL;
+ /* In the case of FMA, we're also changing the rounding. */
+ if (code == FMA && !flag_unsafe_math_optimizations)
+ return NULL;
+ }
+
+ /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
+ in MD. But if there is no optab to generate the insn, we can not
+ perform the variable expansion. This can happen if an MD provides
+ an insn but not a named pattern to generate it, for example to avoid
+ producing code that needs additional mode switches like for x87/mmx.
+
+ So we check have_insn_for which looks for an optab for the operation
+ in SRC. If it doesn't exist, we can't perform the expansion even
+ though INSN is valid. */
+ if (!have_insn_for (code, GET_MODE (src)))
+ return NULL;
+
+ if (!REG_P (dest)
+ && !(GET_CODE (dest) == SUBREG
+ && REG_P (SUBREG_REG (dest))))
+ return NULL;
+
+ /* Find the accumulator use within the operation. */
+ if (code == FMA)
+ {
+ /* We only support accumulation via FMA in the ADD position. */
+ if (!rtx_equal_p (dest, XEXP (src, 2)))
+ return NULL;
+ accum_pos = 2;
+ }
+ else if (rtx_equal_p (dest, XEXP (src, 0)))
+ accum_pos = 0;
+ else if (rtx_equal_p (dest, XEXP (src, 1)))
+ {
+ /* The method of expansion that we are using; which includes the
+ initialization of the expansions with zero and the summation of
+ the expansions at the end of the computation will yield wrong
+ results for (x = something - x) thus avoid using it in that case. */
+ if (code == MINUS)
+ return NULL;
+ accum_pos = 1;
+ }
+ else
+ return NULL;
+
+ /* It must not otherwise be used. */
+ if (code == FMA)
+ {
+ if (rtx_referenced_p (dest, XEXP (src, 0))
+ || rtx_referenced_p (dest, XEXP (src, 1)))
+ return NULL;
+ }
+ else if (rtx_referenced_p (dest, XEXP (src, 1 - accum_pos)))
+ return NULL;
+
+ /* It must be used in exactly one insn. */
+ if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses))
+ return NULL;
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n;; Expanding Accumulator ");
+ print_rtl (dump_file, dest);
+ fprintf (dump_file, "\n");
+ }
+
+ if (debug_uses)
+ /* Instead of resetting the debug insns, we could replace each
+ debug use in the loop with the sum or product of all expanded
+ accummulators. Since we'll only know of all expansions at the
+ end, we'd have to keep track of which vars_to_expand a debug
+ insn in the loop references, take note of each copy of the
+ debug insn during unrolling, and when it's all done, compute
+ the sum or product of each variable and adjust the original
+ debug insn and each copy thereof. What a pain! */
+ reset_debug_uses_in_loop (loop, dest, debug_uses);
+
+ /* Record the accumulator to expand. */
+ ves = XNEW (struct var_to_expand);
+ ves->insn = insn;
+ ves->reg = copy_rtx (dest);
+ ves->var_expansions.create (1);
+ ves->next = NULL;
+ ves->op = GET_CODE (src);
+ ves->expansion_count = 0;
+ ves->reuse_expansion = 0;
+ return ves;
+}
+
+/* Determine whether there is an induction variable in INSN that
+ we would like to split during unrolling.
+
+ I.e. replace
+
+ i = i + 1;
+ ...
+ i = i + 1;
+ ...
+ i = i + 1;
+ ...
+
+ type chains by
+
+ i0 = i + 1
+ ...
+ i = i0 + 1
+ ...
+ i = i0 + 2
+ ...
+
+ Return NULL if INSN contains no interesting IVs. Otherwise, allocate
+ an IV_TO_SPLIT structure, fill it with the relevant information and return a
+ pointer to it. */
+
+static struct iv_to_split *
+analyze_iv_to_split_insn (rtx insn)
+{
+ rtx set, dest;
+ struct rtx_iv iv;
+ struct iv_to_split *ivts;
+ bool ok;
+
+ /* For now we just split the basic induction variables. Later this may be
+ extended for example by selecting also addresses of memory references. */
+ set = single_set (insn);
+ if (!set)
+ return NULL;
+
+ dest = SET_DEST (set);
+ if (!REG_P (dest))
+ return NULL;
+
+ if (!biv_p (insn, dest))
+ return NULL;
+
+ ok = iv_analyze_result (insn, dest, &iv);
+
+ /* This used to be an assert under the assumption that if biv_p returns
+ true that iv_analyze_result must also return true. However, that
+ assumption is not strictly correct as evidenced by pr25569.
+
+ Returning NULL when iv_analyze_result returns false is safe and
+ avoids the problems in pr25569 until the iv_analyze_* routines
+ can be fixed, which is apparently hard and time consuming
+ according to their author. */
+ if (! ok)
+ return NULL;
+
+ if (iv.step == const0_rtx
+ || iv.mode != iv.extend_mode)
+ return NULL;
+
+ /* Record the insn to split. */
+ ivts = XNEW (struct iv_to_split);
+ ivts->insn = insn;
+ ivts->orig_var = dest;
+ ivts->base_var = NULL_RTX;
+ ivts->step = iv.step;
+ ivts->next = NULL;
+ ivts->n_loc = 1;
+ ivts->loc[0] = 1;
+
+ return ivts;
+}
+
+/* Determines which of insns in LOOP can be optimized.
+ Return a OPT_INFO struct with the relevant hash tables filled
+ with all insns to be optimized. The FIRST_NEW_BLOCK field
+ is undefined for the return value. */
+
+static struct opt_info *
+analyze_insns_in_loop (struct loop *loop)
+{
+ basic_block *body, bb;
+ unsigned i;
+ struct opt_info *opt_info = XCNEW (struct opt_info);
+ rtx insn;
+ struct iv_to_split *ivts = NULL;
+ struct var_to_expand *ves = NULL;
+ PTR *slot1;
+ PTR *slot2;
+ vec<edge> edges = get_loop_exit_edges (loop);
+ edge exit;
+ bool can_apply = false;
+
+ iv_analysis_loop_init (loop);
+
+ body = get_loop_body (loop);
+
+ if (flag_split_ivs_in_unroller)
+ {
+ opt_info->insns_to_split = htab_create (5 * loop->num_nodes,
+ si_info_hash, si_info_eq, free);
+ opt_info->iv_to_split_head = NULL;
+ opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
+ }
+
+ /* Record the loop exit bb and loop preheader before the unrolling. */
+ opt_info->loop_preheader = loop_preheader_edge (loop)->src;
+
+ if (edges.length () == 1)
+ {
+ exit = edges[0];
+ if (!(exit->flags & EDGE_COMPLEX))
+ {
+ opt_info->loop_exit = split_edge (exit);
+ can_apply = true;
+ }
+ }
+
+ if (flag_variable_expansion_in_unroller
+ && can_apply)
+ {
+ opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes,
+ ve_info_hash,
+ ve_info_eq, free);
+ opt_info->var_to_expand_head = NULL;
+ opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
+ }
+
+ for (i = 0; i < loop->num_nodes; i++)
+ {
+ bb = body[i];
+ if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
+ continue;
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (!INSN_P (insn))
+ continue;
+
+ if (opt_info->insns_to_split)
+ ivts = analyze_iv_to_split_insn (insn);
+
+ if (ivts)
+ {
+ slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT);
+ gcc_assert (*slot1 == NULL);
+ *slot1 = ivts;
+ *opt_info->iv_to_split_tail = ivts;
+ opt_info->iv_to_split_tail = &ivts->next;
+ continue;
+ }
+
+ if (opt_info->insns_with_var_to_expand)
+ ves = analyze_insn_to_expand_var (loop, insn);
+
+ if (ves)
+ {
+ slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT);
+ gcc_assert (*slot2 == NULL);
+ *slot2 = ves;
+ *opt_info->var_to_expand_tail = ves;
+ opt_info->var_to_expand_tail = &ves->next;
+ }
+ }
+ }
+
+ edges.release ();
+ free (body);
+ return opt_info;
+}
+
+/* Called just before loop duplication. Records start of duplicated area
+ to OPT_INFO. */
+
+static void
+opt_info_start_duplication (struct opt_info *opt_info)
+{
+ if (opt_info)
+ opt_info->first_new_block = last_basic_block;
+}
+
+/* Determine the number of iterations between initialization of the base
+ variable and the current copy (N_COPY). N_COPIES is the total number
+ of newly created copies. UNROLLING is true if we are unrolling
+ (not peeling) the loop. */
+
+static unsigned
+determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
+{
+ if (unrolling)
+ {
+ /* If we are unrolling, initialization is done in the original loop
+ body (number 0). */
+ return n_copy;
+ }
+ else
+ {
+ /* If we are peeling, the copy in that the initialization occurs has
+ number 1. The original loop (number 0) is the last. */
+ if (n_copy)
+ return n_copy - 1;
+ else
+ return n_copies;
+ }
+}
+
+/* Locate in EXPR the expression corresponding to the location recorded
+ in IVTS, and return a pointer to the RTX for this location. */
+
+static rtx *
+get_ivts_expr (rtx expr, struct iv_to_split *ivts)
+{
+ unsigned i;
+ rtx *ret = &expr;
+
+ for (i = 0; i < ivts->n_loc; i++)
+ ret = &XEXP (*ret, ivts->loc[i]);
+
+ return ret;
+}
+
+/* Allocate basic variable for the induction variable chain. */
+
+static void
+allocate_basic_variable (struct iv_to_split *ivts)
+{
+ rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
+
+ ivts->base_var = gen_reg_rtx (GET_MODE (expr));
+}
+
+/* Insert initialization of basic variable of IVTS before INSN, taking
+ the initial value from INSN. */
+
+static void
+insert_base_initialization (struct iv_to_split *ivts, rtx insn)
+{
+ rtx expr = copy_rtx (*get_ivts_expr (single_set (insn), ivts));
+ rtx seq;
+
+ start_sequence ();
+ expr = force_operand (expr, ivts->base_var);
+ if (expr != ivts->base_var)
+ emit_move_insn (ivts->base_var, expr);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_before (seq, insn);
+}
+
+/* Replace the use of induction variable described in IVTS in INSN
+ by base variable + DELTA * step. */
+
+static void
+split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
+{
+ rtx expr, *loc, seq, incr, var;
+ enum machine_mode mode = GET_MODE (ivts->base_var);
+ rtx src, dest, set;
+
+ /* Construct base + DELTA * step. */
+ if (!delta)
+ expr = ivts->base_var;
+ else
+ {
+ incr = simplify_gen_binary (MULT, mode,
+ ivts->step, gen_int_mode (delta, mode));
+ expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
+ ivts->base_var, incr);
+ }
+
+ /* Figure out where to do the replacement. */
+ loc = get_ivts_expr (single_set (insn), ivts);
+
+ /* If we can make the replacement right away, we're done. */
+ if (validate_change (insn, loc, expr, 0))
+ return;
+
+ /* Otherwise, force EXPR into a register and try again. */
+ start_sequence ();
+ var = gen_reg_rtx (mode);
+ expr = force_operand (expr, var);
+ if (expr != var)
+ emit_move_insn (var, expr);
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ if (validate_change (insn, loc, var, 0))
+ return;
+
+ /* The last chance. Try recreating the assignment in insn
+ completely from scratch. */
+ set = single_set (insn);
+ gcc_assert (set);
+
+ start_sequence ();
+ *loc = var;
+ src = copy_rtx (SET_SRC (set));
+ dest = copy_rtx (SET_DEST (set));
+ src = force_operand (src, dest);
+ if (src != dest)
+ emit_move_insn (dest, src);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_before (seq, insn);
+ delete_insn (insn);
+}
+
+
+/* Return one expansion of the accumulator recorded in struct VE. */
+
+static rtx
+get_expansion (struct var_to_expand *ve)
+{
+ rtx reg;
+
+ if (ve->reuse_expansion == 0)
+ reg = ve->reg;
+ else
+ reg = ve->var_expansions[ve->reuse_expansion - 1];
+
+ if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion)
+ ve->reuse_expansion = 0;
+ else
+ ve->reuse_expansion++;
+
+ return reg;
+}
+
+
+/* Given INSN replace the uses of the accumulator recorded in VE
+ with a new register. */
+
+static void
+expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
+{
+ rtx new_reg, set;
+ bool really_new_expansion = false;
+
+ set = single_set (insn);
+ gcc_assert (set);
+
+ /* Generate a new register only if the expansion limit has not been
+ reached. Else reuse an already existing expansion. */
+ if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
+ {
+ really_new_expansion = true;
+ new_reg = gen_reg_rtx (GET_MODE (ve->reg));
+ }
+ else
+ new_reg = get_expansion (ve);
+
+ validate_replace_rtx_group (SET_DEST (set), new_reg, insn);
+ if (apply_change_group ())
+ if (really_new_expansion)
+ {
+ ve->var_expansions.safe_push (new_reg);
+ ve->expansion_count++;
+ }
+}
+
+/* Initialize the variable expansions in loop preheader. PLACE is the
+ loop-preheader basic block where the initialization of the
+ expansions should take place. The expansions are initialized with
+ (-0) when the operation is plus or minus to honor sign zero. This
+ way we can prevent cases where the sign of the final result is
+ effected by the sign of the expansion. Here is an example to
+ demonstrate this:
+
+ for (i = 0 ; i < n; i++)
+ sum += something;
+
+ ==>
+
+ sum += something
+ ....
+ i = i+1;
+ sum1 += something
+ ....
+ i = i+1
+ sum2 += something;
+ ....
+
+ When SUM is initialized with -zero and SOMETHING is also -zero; the
+ final result of sum should be -zero thus the expansions sum1 and sum2
+ should be initialized with -zero as well (otherwise we will get +zero
+ as the final result). */
+
+static void
+insert_var_expansion_initialization (struct var_to_expand *ve,
+ basic_block place)
+{
+ rtx seq, var, zero_init;
+ unsigned i;
+ enum machine_mode mode = GET_MODE (ve->reg);
+ bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
+
+ if (ve->var_expansions.length () == 0)
+ return;
+
+ start_sequence ();
+ switch (ve->op)
+ {
+ case FMA:
+ /* Note that we only accumulate FMA via the ADD operand. */
+ case PLUS:
+ case MINUS:
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
+ {
+ if (honor_signed_zero_p)
+ zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
+ else
+ zero_init = CONST0_RTX (mode);
+ emit_move_insn (var, zero_init);
+ }
+ break;
+
+ case MULT:
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
+ {
+ zero_init = CONST1_RTX (GET_MODE (var));
+ emit_move_insn (var, zero_init);
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_after (seq, BB_END (place));
+}
+
+/* Combine the variable expansions at the loop exit. PLACE is the
+ loop exit basic block where the summation of the expansions should
+ take place. */
+
+static void
+combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
+{
+ rtx sum = ve->reg;
+ rtx expr, seq, var, insn;
+ unsigned i;
+
+ if (ve->var_expansions.length () == 0)
+ return;
+
+ start_sequence ();
+ switch (ve->op)
+ {
+ case FMA:
+ /* Note that we only accumulate FMA via the ADD operand. */
+ case PLUS:
+ case MINUS:
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
+ sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum);
+ break;
+
+ case MULT:
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
+ sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ expr = force_operand (sum, ve->reg);
+ if (expr != ve->reg)
+ emit_move_insn (ve->reg, expr);
+ seq = get_insns ();
+ end_sequence ();
+
+ insn = BB_HEAD (place);
+ while (!NOTE_INSN_BASIC_BLOCK_P (insn))
+ insn = NEXT_INSN (insn);
+
+ emit_insn_after (seq, insn);
+}
+
+/* Strip away REG_EQUAL notes for IVs we're splitting.
+
+ Updating REG_EQUAL notes for IVs we split is tricky: We
+ cannot tell until after unrolling, DF-rescanning, and liveness
+ updating, whether an EQ_USE is reached by the split IV while
+ the IV reg is still live. See PR55006.
+
+ ??? We cannot use remove_reg_equal_equiv_notes_for_regno,
+ because RTL loop-iv requires us to defer rescanning insns and
+ any notes attached to them. So resort to old techniques... */
+
+static void
+maybe_strip_eq_note_for_split_iv (struct opt_info *opt_info, rtx insn)
+{
+ struct iv_to_split *ivts;
+ rtx note = find_reg_equal_equiv_note (insn);
+ if (! note)
+ return;
+ for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
+ if (reg_mentioned_p (ivts->orig_var, note))
+ {
+ remove_note (insn, note);
+ return;
+ }
+}
+
+/* Apply loop optimizations in loop copies using the
+ data which gathered during the unrolling. Structure
+ OPT_INFO record that data.
+
+ UNROLLING is true if we unrolled (not peeled) the loop.
+ REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
+ the loop (as it should happen in complete unrolling, but not in ordinary
+ peeling of the loop). */
+
+static void
+apply_opt_in_copies (struct opt_info *opt_info,
+ unsigned n_copies, bool unrolling,
+ bool rewrite_original_loop)
+{
+ unsigned i, delta;
+ basic_block bb, orig_bb;
+ rtx insn, orig_insn, next;
+ struct iv_to_split ivts_templ, *ivts;
+ struct var_to_expand ve_templ, *ves;
+
+ /* Sanity check -- we need to put initialization in the original loop
+ body. */
+ gcc_assert (!unrolling || rewrite_original_loop);
+
+ /* Allocate the basic variables (i0). */
+ if (opt_info->insns_to_split)
+ for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
+ allocate_basic_variable (ivts);
+
+ for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
+ {
+ bb = BASIC_BLOCK (i);
+ orig_bb = get_bb_original (bb);
+
+ /* bb->aux holds position in copy sequence initialized by
+ duplicate_loop_to_header_edge. */
+ delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
+ unrolling);
+ bb->aux = 0;
+ orig_insn = BB_HEAD (orig_bb);
+ FOR_BB_INSNS_SAFE (bb, insn, next)
+ {
+ if (!INSN_P (insn)
+ || (DEBUG_INSN_P (insn)
+ && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL))
+ continue;
+
+ while (!INSN_P (orig_insn)
+ || (DEBUG_INSN_P (orig_insn)
+ && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn))
+ == LABEL_DECL)))
+ orig_insn = NEXT_INSN (orig_insn);
+
+ ivts_templ.insn = orig_insn;
+ ve_templ.insn = orig_insn;
+
+ /* Apply splitting iv optimization. */
+ if (opt_info->insns_to_split)
+ {
+ maybe_strip_eq_note_for_split_iv (opt_info, insn);
+
+ ivts = (struct iv_to_split *)
+ htab_find (opt_info->insns_to_split, &ivts_templ);
+
+ if (ivts)
+ {
+ gcc_assert (GET_CODE (PATTERN (insn))
+ == GET_CODE (PATTERN (orig_insn)));
+
+ if (!delta)
+ insert_base_initialization (ivts, insn);
+ split_iv (ivts, insn, delta);
+ }
+ }
+ /* Apply variable expansion optimization. */
+ if (unrolling && opt_info->insns_with_var_to_expand)
+ {
+ ves = (struct var_to_expand *)
+ htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
+ if (ves)
+ {
+ gcc_assert (GET_CODE (PATTERN (insn))
+ == GET_CODE (PATTERN (orig_insn)));
+ expand_var_during_unrolling (ves, insn);
+ }
+ }
+ orig_insn = NEXT_INSN (orig_insn);
+ }
+ }
+
+ if (!rewrite_original_loop)
+ return;
+
+ /* Initialize the variable expansions in the loop preheader
+ and take care of combining them at the loop exit. */
+ if (opt_info->insns_with_var_to_expand)
+ {
+ for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
+ insert_var_expansion_initialization (ves, opt_info->loop_preheader);
+ for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
+ combine_var_copies_in_loop_exit (ves, opt_info->loop_exit);
+ }
+
+ /* Rewrite also the original loop body. Find them as originals of the blocks
+ in the last copied iteration, i.e. those that have
+ get_bb_copy (get_bb_original (bb)) == bb. */
+ for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
+ {
+ bb = BASIC_BLOCK (i);
+ orig_bb = get_bb_original (bb);
+ if (get_bb_copy (orig_bb) != bb)
+ continue;
+
+ delta = determine_split_iv_delta (0, n_copies, unrolling);
+ for (orig_insn = BB_HEAD (orig_bb);
+ orig_insn != NEXT_INSN (BB_END (bb));
+ orig_insn = next)
+ {
+ next = NEXT_INSN (orig_insn);
+
+ if (!INSN_P (orig_insn))
+ continue;
+
+ ivts_templ.insn = orig_insn;
+ if (opt_info->insns_to_split)
+ {
+ maybe_strip_eq_note_for_split_iv (opt_info, orig_insn);
+
+ ivts = (struct iv_to_split *)
+ htab_find (opt_info->insns_to_split, &ivts_templ);
+ if (ivts)
+ {
+ if (!delta)
+ insert_base_initialization (ivts, orig_insn);
+ split_iv (ivts, orig_insn, delta);
+ continue;
+ }
+ }
+
+ }
+ }
+}
+
+/* Release OPT_INFO. */
+
+static void
+free_opt_info (struct opt_info *opt_info)
+{
+ if (opt_info->insns_to_split)
+ htab_delete (opt_info->insns_to_split);
+ if (opt_info->insns_with_var_to_expand)
+ {
+ struct var_to_expand *ves;
+
+ for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
+ ves->var_expansions.release ();
+ htab_delete (opt_info->insns_with_var_to_expand);
+ }
+ free (opt_info);
+}