X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=gcc%2Floop-unroll.c;h=de319c4f1d73cf29296f5284d5703ab423d777a9;hb=2cd45f0e6826ddcc92216a508104b2802eddece3;hp=8293448f5cff7ed77be9c19ed5dc8db1ed7bb1ce;hpb=178df94ff18a589455749496b4cd6888feb567b0;p=gcc.git diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index 8293448f5cf..de319c4f1d7 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -1,11 +1,12 @@ /* Loop unrolling and peeling. - Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2010, 2011 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -14,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -27,13 +27,12 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "obstack.h" #include "basic-block.h" #include "cfgloop.h" -#include "cfglayout.h" #include "params.h" -#include "output.h" #include "expr.h" #include "hashtab.h" -#include "recog.h" -#include "varray.h" +#include "recog.h" +#include "target.h" +#include "dumpfile.h" /* This pass performs loop unrolling and peeling. We only perform these optimizations on innermost loops (with single exception) because @@ -75,32 +74,32 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA struct iv_to_split { rtx insn; /* The insn in that the induction variable occurs. */ + rtx orig_var; /* The variable (register) for the IV before split. */ rtx base_var; /* The variable on that the values in the further iterations are based. */ rtx step; /* Step of the induction variable. */ + struct iv_to_split *next; /* Next entry in walking order. */ unsigned n_loc; unsigned loc[3]; /* Location where the definition of the induction variable occurs in the insn. For example if N_LOC is 2, the expression is located at - XEXP (XEXP (single_set, loc[0]), loc[1]). */ + XEXP (XEXP (single_set, loc[0]), loc[1]). */ }; -DEF_VEC_P(rtx); -DEF_VEC_ALLOC_P(rtx,heap); - /* Information about accumulators to expand. */ struct var_to_expand { rtx insn; /* The insn in that the variable expansion occurs. */ rtx reg; /* The accumulator which is expanded. */ - VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */ - enum rtx_code op; /* The type of the accumulation - addition, subtraction + vec var_expansions; /* The copies of the accumulator which is expanded. */ + struct var_to_expand *next; /* Next entry in walking order. */ + enum rtx_code op; /* The type of the accumulation - addition, subtraction or multiplication. */ int expansion_count; /* Count the number of expansions generated so far. */ int reuse_expansion; /* The expansion we intend to reuse to expand - the accumulator. If REUSE_EXPANSION is 0 reuse - the original accumulator. Else use + the accumulator. If REUSE_EXPANSION is 0 reuse + the original accumulator. Else use var_expansions[REUSE_EXPANSION - 1]. */ }; @@ -110,70 +109,63 @@ struct var_to_expand struct opt_info { htab_t insns_to_split; /* A hashtable of insns to split. */ + struct iv_to_split *iv_to_split_head; /* The first iv to split. */ + struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */ htab_t insns_with_var_to_expand; /* A hashtable of insns with accumulators to expand. */ + struct var_to_expand *var_to_expand_head; /* The first var to expand. */ + struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */ unsigned first_new_block; /* The first basic block that was duplicated. */ basic_block loop_exit; /* The loop exit basic block. */ basic_block loop_preheader; /* The loop preheader basic block. */ }; -static void decide_unrolling_and_peeling (struct loops *, int); -static void peel_loops_completely (struct loops *, int); +static void decide_unrolling_and_peeling (int); +static void peel_loops_completely (int); static void decide_peel_simple (struct loop *, int); static void decide_peel_once_rolling (struct loop *, int); static void decide_peel_completely (struct loop *, int); static void decide_unroll_stupid (struct loop *, int); static void decide_unroll_constant_iterations (struct loop *, int); static void decide_unroll_runtime_iterations (struct loop *, int); -static void peel_loop_simple (struct loops *, struct loop *); -static void peel_loop_completely (struct loops *, struct loop *); -static void unroll_loop_stupid (struct loops *, struct loop *); -static void unroll_loop_constant_iterations (struct loops *, struct loop *); -static void unroll_loop_runtime_iterations (struct loops *, struct loop *); +static void peel_loop_simple (struct loop *); +static void peel_loop_completely (struct loop *); +static void unroll_loop_stupid (struct loop *); +static void unroll_loop_constant_iterations (struct loop *); +static void unroll_loop_runtime_iterations (struct loop *); static struct opt_info *analyze_insns_in_loop (struct loop *); static void opt_info_start_duplication (struct opt_info *); static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool); static void free_opt_info (struct opt_info *); static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx); -static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx); +static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *); static struct iv_to_split *analyze_iv_to_split_insn (rtx); static void expand_var_during_unrolling (struct var_to_expand *, rtx); -static int insert_var_expansion_initialization (void **, void *); -static int combine_var_copies_in_loop_exit (void **, void *); -static int release_var_copies (void **, void *); +static void insert_var_expansion_initialization (struct var_to_expand *, + basic_block); +static void combine_var_copies_in_loop_exit (struct var_to_expand *, + basic_block); static rtx get_expansion (struct var_to_expand *); /* Unroll and/or peel (depending on FLAGS) LOOPS. */ void -unroll_and_peel_loops (struct loops *loops, int flags) +unroll_and_peel_loops (int flags) { - struct loop *loop, *next; + struct loop *loop; bool check; + loop_iterator li; /* First perform complete loop peeling (it is almost surely a win, and affects parameters for further decision a lot). */ - peel_loops_completely (loops, flags); + peel_loops_completely (flags); /* Now decide rest of unrolling and peeling. */ - decide_unrolling_and_peeling (loops, flags); - - loop = loops->tree_root; - while (loop->inner) - loop = loop->inner; + decide_unrolling_and_peeling (flags); /* Scan the loops, inner ones first. */ - while (loop != loops->tree_root) + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - check = true; /* And perform the appropriate transformations. */ switch (loop->lpt_decision.decision) @@ -182,16 +174,16 @@ unroll_and_peel_loops (struct loops *loops, int flags) /* Already done. */ gcc_unreachable (); case LPT_PEEL_SIMPLE: - peel_loop_simple (loops, loop); + peel_loop_simple (loop); break; case LPT_UNROLL_CONSTANT: - unroll_loop_constant_iterations (loops, loop); + unroll_loop_constant_iterations (loop); break; case LPT_UNROLL_RUNTIME: - unroll_loop_runtime_iterations (loops, loop); + unroll_loop_runtime_iterations (loop); break; case LPT_UNROLL_STUPID: - unroll_loop_stupid (loops, loop); + unroll_loop_stupid (loop); break; case LPT_NONE: check = false; @@ -202,11 +194,9 @@ unroll_and_peel_loops (struct loops *loops, int flags) if (check) { #ifdef ENABLE_CHECKING - verify_dominators (CDI_DOMINATORS); - verify_loop_structure (loops); + verify_loop_structure (); #endif } - loop = next; } iv_analysis_done (); @@ -226,34 +216,23 @@ loop_exit_at_end_p (struct loop *loop) /* Check that the latch is empty. */ FOR_BB_INSNS (loop->latch, insn) { - if (INSN_P (insn)) + if (NONDEBUG_INSN_P (insn)) return false; } return true; } -/* Check whether to peel LOOPS (depending on FLAGS) completely and do so. */ +/* Depending on FLAGS, check whether to peel loops completely and do so. */ static void -peel_loops_completely (struct loops *loops, int flags) +peel_loops_completely (int flags) { - struct loop *loop, *next; - - loop = loops->tree_root; - while (loop->inner) - loop = loop->inner; + struct loop *loop; + loop_iterator li; - while (loop != loops->tree_root) + /* Scan the loops, the inner ones first. */ + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - loop->lpt_decision.decision = LPT_NONE; if (dump_file) @@ -269,48 +248,34 @@ peel_loops_completely (struct loops *loops, int flags) if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY) { - peel_loop_completely (loops, loop); + peel_loop_completely (loop); #ifdef ENABLE_CHECKING - verify_dominators (CDI_DOMINATORS); - verify_loop_structure (loops); + verify_loop_structure (); #endif } - loop = next; } } -/* Decide whether unroll or peel LOOPS (depending on FLAGS) and how much. */ +/* Decide whether unroll or peel loops (depending on FLAGS) and how much. */ static void -decide_unrolling_and_peeling (struct loops *loops, int flags) +decide_unrolling_and_peeling (int flags) { - struct loop *loop = loops->tree_root, *next; - - while (loop->inner) - loop = loop->inner; + struct loop *loop; + loop_iterator li; /* Scan the loops, inner ones first. */ - while (loop != loops->tree_root) + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - loop->lpt_decision.decision = LPT_NONE; if (dump_file) fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num); /* Do not peel cold areas. */ - if (!maybe_hot_bb_p (loop->header)) + if (optimize_loop_for_size_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); - loop = next; continue; } @@ -320,7 +285,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) if (dump_file) fprintf (dump_file, ";; Not considering loop, cannot duplicate\n"); - loop = next; continue; } @@ -329,7 +293,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is not innermost\n"); - loop = next; continue; } @@ -346,8 +309,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) decide_unroll_stupid (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_peel_simple (loop, flags); - - loop = next; } } @@ -377,7 +338,8 @@ decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED) || desc->assumptions || desc->infinite || !desc->const_iter - || desc->niter != 0) + || (desc->niter != 0 + && max_loop_iterations_int (loop) != 0)) { if (dump_file) fprintf (dump_file, @@ -410,7 +372,7 @@ decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED) } /* Do not peel cold areas. */ - if (!maybe_hot_bb_p (loop->header)) + if (optimize_loop_for_size_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); @@ -487,38 +449,38 @@ decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED) body; i++; */ static void -peel_loop_completely (struct loops *loops, struct loop *loop) +peel_loop_completely (struct loop *loop) { sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; - unsigned n_remove_edges, i; - edge *remove_edges, ein; + unsigned i; + vec remove_edges; + edge ein; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; - + npeel = desc->niter; if (npeel) { bool ok; - + wont_exit = sbitmap_alloc (npeel + 1); - sbitmap_ones (wont_exit); - RESET_BIT (wont_exit, 0); + bitmap_ones (wont_exit); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); + bitmap_clear_bit (wont_exit, 1); - remove_edges = xcalloc (npeel, sizeof (edge)); - n_remove_edges = 0; + remove_edges.create (0); if (flag_split_ivs_in_unroller) opt_info = analyze_insns_in_loop (loop); - + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, npeel, + npeel, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | DLTHE_FLAG_COMPLETTE_PEEL | (opt_info @@ -526,7 +488,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) gcc_assert (ok); free (wont_exit); - + if (opt_info) { apply_opt_in_copies (opt_info, npeel, false, true); @@ -534,9 +496,9 @@ peel_loop_completely (struct loops *loops, struct loop *loop) } /* Remove the exit edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + FOR_EACH_VEC_ELT (remove_edges, i, ein) + remove_path (ein); + remove_edges.release (); } ein = desc->in_edge; @@ -544,7 +506,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) /* Now remove the unreachable part of the last iteration and cancel the loop. */ - remove_path (loops, ein); + remove_path (ein); if (dump_file) fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel); @@ -558,6 +520,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i; struct niter_desc *desc; + double_int iterations; if (!(flags & UAP_UNROLL)) { @@ -600,8 +563,14 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) return; } - /* Check whether the loop rolls enough to consider. */ - if (desc->niter < 2 * nunroll) + /* Check whether the loop rolls enough to consider. + Consult also loop bounds and profile; in the case the loop has more + than one exit it may well loop less than determined maximal number + of iterations. */ + if (desc->niter < 2 * nunroll + || ((estimated_loop_iterations (loop, &iterations) + || max_loop_iterations (loop, &iterations)) + && iterations.ult (double_int::from_shwi (2 * nunroll)))) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -637,26 +606,21 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) } } - if (dump_file) - fprintf (dump_file, ";; max_unroll %d (%d copies, initial %d).\n", - best_unroll + 1, best_copies, nunroll); - loop->lpt_decision.decision = LPT_UNROLL_CONSTANT; loop->lpt_decision.times = best_unroll; - + if (dump_file) - fprintf (dump_file, - ";; Decided to unroll the constant times rolling loop, %d times.\n", - loop->lpt_decision.times); + fprintf (dump_file, ";; Decided to unroll the loop %d times (%d copies).\n", + loop->lpt_decision.times, best_copies); } -/* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES + 1 - times. The transformation does this: +/* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times. + The transformation does this: for (i = 0; i < 102; i++) body; - ==> + ==> (LOOP->LPT_DECISION.TIMES == 3) i = 0; body; i++; @@ -670,19 +634,20 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) } */ static void -unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) +unroll_loop_constant_iterations (struct loop *loop) { unsigned HOST_WIDE_INT niter; unsigned exit_mod; sbitmap wont_exit; - unsigned n_remove_edges, i; - edge *remove_edges; + unsigned i; + vec remove_edges; + edge e; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); struct opt_info *opt_info = NULL; bool ok; - + niter = desc->niter; /* Should not get here (such loop should be peeled instead). */ @@ -691,14 +656,13 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) exit_mod = niter % (max_unroll + 1); wont_exit = sbitmap_alloc (max_unroll + 1); - sbitmap_ones (wont_exit); + bitmap_ones (wont_exit); - remove_edges = xcalloc (max_unroll + exit_mod + 1, sizeof (edge)); - n_remove_edges = 0; - if (flag_split_ivs_in_unroller + remove_edges.create (0); + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - + if (!exit_at_end) { /* The exit is not at the end of the loop; leave exit test @@ -706,20 +670,20 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) of exit condition have continuous body after unrolling. */ if (dump_file) - fprintf (dump_file, ";; Condition on beginning of loop.\n"); + fprintf (dump_file, ";; Condition at beginning of loop.\n"); /* Peel exit_mod iterations. */ - RESET_BIT (wont_exit, 0); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); + bitmap_clear_bit (wont_exit, 1); if (exit_mod) { opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, exit_mod, + exit_mod, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info && exit_mod > 1 ? DLTHE_RECORD_COPY_NUMBER @@ -727,14 +691,20 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) gcc_assert (ok); if (opt_info && exit_mod > 1) - apply_opt_in_copies (opt_info, exit_mod, false, false); - + apply_opt_in_copies (opt_info, exit_mod, false, false); + desc->noloop_assumptions = NULL_RTX; desc->niter -= exit_mod; - desc->niter_max -= exit_mod; + loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod); + if (loop->any_estimate + && double_int::from_uhwi (exit_mod).ule + (loop->nb_iterations_estimate)) + loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod); + else + loop->any_estimate = false; } - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); } else { @@ -742,7 +712,7 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) the loop tests the condition at the end of loop body. */ if (dump_file) - fprintf (dump_file, ";; Condition on end of loop.\n"); + fprintf (dump_file, ";; Condition at end of loop.\n"); /* We know that niter >= max_unroll + 2; so we do not need to care of case when we would exit before reaching the loop. So just peel @@ -750,42 +720,48 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) if (exit_mod != max_unroll || desc->noloop_assumptions) { - RESET_BIT (wont_exit, 0); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); - + bitmap_clear_bit (wont_exit, 1); + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, exit_mod + 1, + exit_mod + 1, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info && exit_mod > 0 ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info && exit_mod > 0) apply_opt_in_copies (opt_info, exit_mod + 1, false, false); desc->niter -= exit_mod + 1; - desc->niter_max -= exit_mod + 1; + loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod + 1); + if (loop->any_estimate + && double_int::from_uhwi (exit_mod + 1).ule + (loop->nb_iterations_estimate)) + loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod + 1); + else + loop->any_estimate = false; desc->noloop_assumptions = NULL_RTX; - SET_BIT (wont_exit, 0); - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 0); + bitmap_set_bit (wont_exit, 1); } - RESET_BIT (wont_exit, max_unroll); + bitmap_clear_bit (wont_exit, max_unroll); } /* Now unroll the loop. */ - + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, max_unroll, + max_unroll, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER @@ -804,7 +780,7 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) { basic_block exit_block = get_bb_copy (desc->in_edge->src); /* Find a new in and out edge; they are in the last copy we have made. */ - + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { desc->out_edge = EDGE_SUCC (exit_block, 0); @@ -818,13 +794,21 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) } desc->niter /= max_unroll + 1; - desc->niter_max /= max_unroll + 1; + loop->nb_iterations_upper_bound + = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll + + 1), + TRUNC_DIV_EXPR); + if (loop->any_estimate) + loop->nb_iterations_estimate + = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll + + 1), + TRUNC_DIV_EXPR); desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + FOR_EACH_VEC_ELT (remove_edges, i, e) + remove_path (e); + remove_edges.release (); if (dump_file) fprintf (dump_file, @@ -839,6 +823,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; + double_int iterations; if (!(flags & UAP_UNROLL)) { @@ -860,6 +845,9 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); + if (targetm.loop_unroll_adjust) + nunroll = targetm.loop_unroll_adjust (nunroll, loop); + /* Skip big loops. */ if (nunroll <= 1) { @@ -888,8 +876,10 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) return; } - /* If we have profile feedback, check whether the loop rolls. */ - if (loop->header->count && expected_loop_iterations (loop) < 2 * nunroll) + /* Check whether the loop rolls. */ + if ((estimated_loop_iterations (loop, &iterations) + || max_loop_iterations (loop, &iterations)) + && iterations.ult (double_int::from_shwi (2 * nunroll))) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -903,22 +893,67 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) loop->lpt_decision.decision = LPT_UNROLL_RUNTIME; loop->lpt_decision.times = i - 1; - + if (dump_file) - fprintf (dump_file, - ";; Decided to unroll the runtime computable " - "times rolling loop, %d times.\n", + fprintf (dump_file, ";; Decided to unroll the loop %d times.\n", loop->lpt_decision.times); } -/* Unroll LOOP for that we are able to count number of iterations in runtime - LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this (with some +/* Splits edge E and inserts the sequence of instructions INSNS on it, and + returns the newly created block. If INSNS is NULL_RTX, nothing is changed + and NULL is returned instead. */ + +basic_block +split_edge_and_insert (edge e, rtx insns) +{ + basic_block bb; + + if (!insns) + return NULL; + bb = split_edge (e); + emit_insn_after (insns, BB_END (bb)); + + /* ??? We used to assume that INSNS can contain control flow insns, and + that we had to try to find sub basic blocks in BB to maintain a valid + CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB + and call break_superblocks when going out of cfglayout mode. But it + turns out that this never happens; and that if it does ever happen, + the TODO_verify_flow at the end of the RTL loop passes would fail. + + There are two reasons why we expected we could have control flow insns + in INSNS. The first is when a comparison has to be done in parts, and + the second is when the number of iterations is computed for loops with + the number of iterations known at runtime. In both cases, test cases + to get control flow in INSNS appear to be impossible to construct: + + * If do_compare_rtx_and_jump needs several branches to do comparison + in a mode that needs comparison by parts, we cannot analyze the + number of iterations of the loop, and we never get to unrolling it. + + * The code in expand_divmod that was suspected to cause creation of + branching code seems to be only accessed for signed division. The + divisions used by # of iterations analysis are always unsigned. + Problems might arise on architectures that emits branching code + for some operations that may appear in the unroller (especially + for division), but we have no such architectures. + + Considering all this, it was decided that we should for now assume + that INSNS can in theory contain control flow insns, but in practice + it never does. So we don't handle the theoretical case, and should + a real failure ever show up, we have a pretty good clue for how to + fix it. */ + + return bb; +} + +/* Unroll LOOP for which we are able to count number of iterations in runtime + LOOP->LPT_DECISION.TIMES times. The transformation does this (with some extra care for case n < 0): for (i = 0; i < n; i++) body; - ==> + ==> (LOOP->LPT_DECISION.TIMES == 3) i = 0; mod = n % 4; @@ -943,43 +978,43 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) } */ static void -unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) +unroll_loop_runtime_iterations (struct loop *loop) { rtx old_niter, niter, init_code, branch_code, tmp; unsigned i, j, p; - basic_block preheader, *body, *dom_bbs, swtch, ezc_swtch; - unsigned n_dom_bbs; + basic_block preheader, *body, swtch, ezc_swtch; + vec dom_bbs; sbitmap wont_exit; int may_exit_copy; - unsigned n_peel, n_remove_edges; - edge *remove_edges, e; + unsigned n_peel; + vec remove_edges; + edge e; bool extra_zero_check, last_may_exit; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - + /* Remember blocks whose dominators will have to be updated. */ - dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); - n_dom_bbs = 0; + dom_bbs.create (0); body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { - unsigned nldom; - basic_block *ldom; + vec ldom; + basic_block bb; - nldom = get_dominated_by (CDI_DOMINATORS, body[i], &ldom); - for (j = 0; j < nldom; j++) - if (!flow_bb_inside_loop_p (loop, ldom[j])) - dom_bbs[n_dom_bbs++] = ldom[j]; + ldom = get_dominated_by (CDI_DOMINATORS, body[i]); + FOR_EACH_VEC_ELT (ldom, j, bb) + if (!flow_bb_inside_loop_p (loop, bb)) + dom_bbs.safe_push (bb); - free (ldom); + ldom.release (); } free (body); @@ -1019,12 +1054,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) init_code = get_insns (); end_sequence (); + unshare_all_rtl_in_chain (init_code); /* Precondition the loop. */ - loop_split_edge_with (loop_preheader_edge (loop), init_code); + split_edge_and_insert (loop_preheader_edge (loop), init_code); - remove_edges = xcalloc (max_unroll + n_peel + 1, sizeof (edge)); - n_remove_edges = 0; + remove_edges.create (0); wont_exit = sbitmap_alloc (max_unroll + 2); @@ -1032,32 +1067,29 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) here; the only exception is when we have extra zero check and the number of iterations is reliable. Also record the place of (possible) extra zero check. */ - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); if (extra_zero_check && !desc->noloop_assumptions) - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); ezc_swtch = loop_preheader_edge (loop)->src; ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, 1, - wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + 1, wont_exit, desc->out_edge, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ); gcc_assert (ok); /* Record the place where switch will be built for preconditioning. */ - swtch = loop_split_edge_with (loop_preheader_edge (loop), - NULL_RTX); + swtch = split_edge (loop_preheader_edge (loop)); for (i = 0; i < n_peel; i++) { /* Peel the copy. */ - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); if (i != n_peel - 1 || !last_may_exit) - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, 1, - wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + 1, wont_exit, desc->out_edge, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ); gcc_assert (ok); @@ -1065,16 +1097,21 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) j = n_peel - i - (extra_zero_check ? 0 : 1); p = REG_BR_PROB_BASE / (i + 2); - preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + preheader = split_edge (loop_preheader_edge (loop)); branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ, block_label (preheader), p, NULL_RTX); - swtch = loop_split_edge_with (single_pred_edge (swtch), branch_code); + /* We rely on the fact that the compare and jump cannot be optimized out, + and hence the cfg we create is correct. */ + gcc_assert (branch_code != NULL_RTX); + + swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP); + e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p); e->probability = p; } @@ -1083,38 +1120,40 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) /* Add branch for zero iterations. */ p = REG_BR_PROB_BASE / (max_unroll + 1); swtch = ezc_swtch; - preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + preheader = split_edge (loop_preheader_edge (loop)); branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ, block_label (preheader), p, NULL_RTX); + gcc_assert (branch_code != NULL_RTX); - swtch = loop_split_edge_with (single_succ_edge (swtch), branch_code); + swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP); + e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p); e->probability = p; } /* Recount dominators for outer blocks. */ - iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs); + iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); /* And unroll loop. */ - sbitmap_ones (wont_exit); - RESET_BIT (wont_exit, may_exit_copy); + bitmap_ones (wont_exit); + bitmap_clear_bit (wont_exit, may_exit_copy); opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, max_unroll, + max_unroll, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info) { apply_opt_in_copies (opt_info, max_unroll, true, true); @@ -1128,7 +1167,7 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) basic_block exit_block = get_bb_copy (desc->in_edge->src); /* Find a new in and out edge; they are in the last copy we have made. */ - + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { desc->out_edge = EDGE_SUCC (exit_block, 0); @@ -1142,9 +1181,9 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) } /* Remove the edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + FOR_EACH_VEC_ELT (remove_edges, i, e) + remove_path (e); + remove_edges.release (); /* We must be careful when updating the number of iterations due to preconditioning and the fact that the value must be valid at entry @@ -1154,13 +1193,26 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) desc->niter_expr = simplify_gen_binary (UDIV, desc->mode, old_niter, GEN_INT (max_unroll + 1)); - desc->niter_max /= max_unroll + 1; + loop->nb_iterations_upper_bound + = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll + + 1), + TRUNC_DIV_EXPR); + if (loop->any_estimate) + loop->nb_iterations_estimate + = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll + + 1), + TRUNC_DIV_EXPR); if (exit_at_end) { desc->niter_expr = simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx); desc->noloop_assumptions = NULL_RTX; - desc->niter_max--; + --loop->nb_iterations_upper_bound; + if (loop->any_estimate + && loop->nb_iterations_estimate != double_int_zero) + --loop->nb_iterations_estimate; + else + loop->any_estimate = false; } if (dump_file) @@ -1168,6 +1220,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) ";; Unrolled loop %d times, counting # of iterations " "in runtime, %i insns\n", max_unroll, num_loop_insns (loop)); + + dom_bbs.release (); } /* Decide whether to simply peel LOOP and how much. */ @@ -1175,7 +1229,7 @@ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; - struct niter_desc *desc; + double_int iterations; if (!(flags & UAP_PEEL)) { @@ -1199,43 +1253,45 @@ decide_peel_simple (struct loop *loop, int flags) return; } - /* Check for simple loops. */ - desc = get_simple_loop_desc (loop); - - /* Check number of iterations. */ - if (desc->simple_p && !desc->assumptions && desc->const_iter) - { - if (dump_file) - fprintf (dump_file, ";; Loop iterates constant times\n"); - return; - } - /* Do not simply peel loops with branches inside -- it increases number - of mispredicts. */ - if (num_loop_branches (loop) > 1) + of mispredicts. + Exception is when we do have profile and we however have good chance + to peel proper number of iterations loop will iterate in practice. + TODO: this heuristic needs tunning; while for complette unrolling + the branch inside loop mostly eliminates any improvements, for + peeling it is not the case. Also a function call inside loop is + also branch from branch prediction POV (and probably better reason + to not unroll/peel). */ + if (num_loop_branches (loop) > 1 + && profile_status != PROFILE_READ) { if (dump_file) fprintf (dump_file, ";; Not peeling, contains branches\n"); return; } - if (loop->header->count) + /* If we have realistic estimate on number of iterations, use it. */ + if (estimated_loop_iterations (loop, &iterations)) { - unsigned niter = expected_loop_iterations (loop); - if (niter + 1 > npeel) + if (double_int::from_shwi (npeel).ule (iterations)) { if (dump_file) { fprintf (dump_file, ";; Not peeling loop, rolls too much ("); fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, - (HOST_WIDEST_INT) (niter + 1)); + (HOST_WIDEST_INT) (iterations.to_shwi () + 1)); fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel); } return; } - npeel = niter + 1; + npeel = iterations.to_shwi () + 1; } + /* If we have small enough bound on iterations, we can still peel (completely + unroll). */ + else if (max_loop_iterations (loop, &iterations) + && iterations.ult (double_int::from_shwi (npeel))) + npeel = iterations.to_shwi () + 1; else { /* For now we have no good heuristics to decide whether loop peeling @@ -1249,46 +1305,48 @@ decide_peel_simple (struct loop *loop, int flags) /* Success. */ loop->lpt_decision.decision = LPT_PEEL_SIMPLE; loop->lpt_decision.times = npeel; - + if (dump_file) - fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n", + fprintf (dump_file, ";; Decided to simply peel the loop %d times.\n", loop->lpt_decision.times); } -/* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation: +/* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this: + while (cond) body; - ==> + ==> (LOOP->LPT_DECISION.TIMES == 3) if (!cond) goto end; body; if (!cond) goto end; body; + if (!cond) goto end; + body; while (cond) body; end: ; */ static void -peel_loop_simple (struct loops *loops, struct loop *loop) +peel_loop_simple (struct loop *loop) { sbitmap wont_exit; unsigned npeel = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller && npeel > 1) opt_info = analyze_insns_in_loop (loop); - + wont_exit = sbitmap_alloc (npeel + 1); - sbitmap_zero (wont_exit); - + bitmap_clear (wont_exit); + opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, npeel, wont_exit, - NULL, NULL, + npeel, wont_exit, NULL, NULL, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER @@ -1296,7 +1354,7 @@ peel_loop_simple (struct loops *loops, struct loop *loop) gcc_assert (ok); free (wont_exit); - + if (opt_info) { apply_opt_in_copies (opt_info, npeel, false, false); @@ -1330,6 +1388,7 @@ decide_unroll_stupid (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; + double_int iterations; if (!(flags & UAP_UNROLL_ALL)) { @@ -1350,6 +1409,9 @@ decide_unroll_stupid (struct loop *loop, int flags) if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); + if (targetm.loop_unroll_adjust) + nunroll = targetm.loop_unroll_adjust (nunroll, loop); + /* Skip big loops. */ if (nunroll <= 1) { @@ -1370,7 +1432,9 @@ decide_unroll_stupid (struct loop *loop, int flags) } /* Do not unroll loops with branches inside -- it increases number - of mispredicts. */ + of mispredicts. + TODO: this heuristic needs tunning; call inside the loop body + is also relatively good reason to not unroll. */ if (num_loop_branches (loop) > 1) { if (dump_file) @@ -1378,9 +1442,10 @@ decide_unroll_stupid (struct loop *loop, int flags) return; } - /* If we have profile feedback, check whether the loop rolls. */ - if (loop->header->count - && expected_loop_iterations (loop) < 2 * nunroll) + /* Check whether the loop rolls. */ + if ((estimated_loop_iterations (loop, &iterations) + || max_loop_iterations (loop, &iterations)) + && iterations.ult (double_int::from_shwi (2 * nunroll))) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -1395,18 +1460,18 @@ decide_unroll_stupid (struct loop *loop, int flags) loop->lpt_decision.decision = LPT_UNROLL_STUPID; loop->lpt_decision.times = i - 1; - + if (dump_file) - fprintf (dump_file, - ";; Decided to unroll the loop stupidly, %d times.\n", + fprintf (dump_file, ";; Decided to unroll the loop stupidly %d times.\n", loop->lpt_decision.times); } -/* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation: +/* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this: + while (cond) body; - ==> + ==> (LOOP->LPT_DECISION.TIMES == 3) while (cond) { @@ -1420,32 +1485,32 @@ decide_unroll_stupid (struct loop *loop, int flags) } */ static void -unroll_loop_stupid (struct loops *loops, struct loop *loop) +unroll_loop_stupid (struct loop *loop) { sbitmap wont_exit; unsigned nunroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - - + + wont_exit = sbitmap_alloc (nunroll + 1); - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, nunroll, wont_exit, - NULL, NULL, NULL, + nunroll, wont_exit, + NULL, NULL, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info) { apply_opt_in_copies (opt_info, nunroll, true, true); @@ -1475,7 +1540,7 @@ unroll_loop_stupid (struct loops *loops, struct loop *loop) static hashval_t si_info_hash (const void *ivts) { - return htab_hash_pointer (((struct iv_to_split *) ivts)->insn); + return (hashval_t) INSN_UID (((const struct iv_to_split *) ivts)->insn); } /* An equality functions for information about insns to split. */ @@ -1483,8 +1548,8 @@ si_info_hash (const void *ivts) static int si_info_eq (const void *ivts1, const void *ivts2) { - const struct iv_to_split *i1 = ivts1; - const struct iv_to_split *i2 = ivts2; + const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1; + const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2; return i1->insn == i2->insn; } @@ -1494,54 +1559,88 @@ si_info_eq (const void *ivts1, const void *ivts2) static hashval_t ve_info_hash (const void *ves) { - return htab_hash_pointer (((struct var_to_expand *) ves)->insn); + return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn); } -/* Return true if IVTS1 and IVTS2 (which are really both of type +/* Return true if IVTS1 and IVTS2 (which are really both of type "var_to_expand *") refer to the same instruction. */ static int ve_info_eq (const void *ivts1, const void *ivts2) { - const struct var_to_expand *i1 = ivts1; - const struct var_to_expand *i2 = ivts2; - + const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1; + const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2; + return i1->insn == i2->insn; } -/* Returns true if REG is referenced in one insn in LOOP. */ +/* Returns true if REG is referenced in one nondebug insn in LOOP. + Set *DEBUG_USES to the number of debug insns that reference the + variable. */ bool -referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg) +referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg, + int *debug_uses) { basic_block *body, bb; unsigned i; int count_ref = 0; rtx insn; - - body = get_loop_body (loop); + + body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; - + FOR_BB_INSNS (bb, insn) - { - if (rtx_referenced_p (reg, insn)) - count_ref++; - } + if (!rtx_referenced_p (reg, insn)) + continue; + else if (DEBUG_INSN_P (insn)) + ++*debug_uses; + else if (++count_ref > 1) + break; } + free (body); return (count_ref == 1); } +/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */ + +static void +reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses) +{ + basic_block *body, bb; + unsigned i; + rtx insn; + + body = get_loop_body (loop); + for (i = 0; debug_uses && i < loop->num_nodes; i++) + { + bb = body[i]; + + FOR_BB_INSNS (bb, insn) + if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn)) + continue; + else + { + validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), + gen_rtx_UNKNOWN_VAR_LOC (), 0); + if (!--debug_uses) + break; + } + } + free (body); +} + /* Determine whether INSN contains an accumulator - which can be expanded into separate copies, + which can be expanded into separate copies, one for each copy of the LOOP body. - + for (i = 0 ; i < n; i++) sum += a[i]; - + ==> - + sum += a[i] .... i = i+1; @@ -1551,69 +1650,126 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg) sum2 += a[i]; .... - Return NULL if INSN contains no opportunity for expansion of accumulator. - Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant + Return NULL if INSN contains no opportunity for expansion of accumulator. + Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant information and return a pointer to it. */ static struct var_to_expand * analyze_insn_to_expand_var (struct loop *loop, rtx insn) { - rtx set, dest, src, op1; + rtx set, dest, src; struct var_to_expand *ves; - enum machine_mode mode1, mode2; - + unsigned accum_pos; + enum rtx_code code; + int debug_uses = 0; + set = single_set (insn); if (!set) return NULL; - + dest = SET_DEST (set); src = SET_SRC (set); - - if (GET_CODE (src) != PLUS - && GET_CODE (src) != MINUS - && GET_CODE (src) != MULT) + code = GET_CODE (src); + + if (code != PLUS && code != MINUS && code != MULT && code != FMA) return NULL; - - if (!XEXP (src, 0)) + + if (FLOAT_MODE_P (GET_MODE (dest))) + { + if (!flag_associative_math) + return NULL; + /* In the case of FMA, we're also changing the rounding. */ + if (code == FMA && !flag_unsafe_math_optimizations) + return NULL; + } + + /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn + in MD. But if there is no optab to generate the insn, we can not + perform the variable expansion. This can happen if an MD provides + an insn but not a named pattern to generate it, for example to avoid + producing code that needs additional mode switches like for x87/mmx. + + So we check have_insn_for which looks for an optab for the operation + in SRC. If it doesn't exist, we can't perform the expansion even + though INSN is valid. */ + if (!have_insn_for (code, GET_MODE (src))) return NULL; - - op1 = XEXP (src, 0); - + if (!REG_P (dest) && !(GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest)))) return NULL; - - if (!rtx_equal_p (dest, op1)) - return NULL; - - if (!referenced_in_one_insn_in_loop_p (loop, dest)) + + /* Find the accumulator use within the operation. */ + if (code == FMA) + { + /* We only support accumulation via FMA in the ADD position. */ + if (!rtx_equal_p (dest, XEXP (src, 2))) + return NULL; + accum_pos = 2; + } + else if (rtx_equal_p (dest, XEXP (src, 0))) + accum_pos = 0; + else if (rtx_equal_p (dest, XEXP (src, 1))) + { + /* The method of expansion that we are using; which includes the + initialization of the expansions with zero and the summation of + the expansions at the end of the computation will yield wrong + results for (x = something - x) thus avoid using it in that case. */ + if (code == MINUS) + return NULL; + accum_pos = 1; + } + else return NULL; - - if (rtx_referenced_p (dest, XEXP (src, 1))) + + /* It must not otherwise be used. */ + if (code == FMA) + { + if (rtx_referenced_p (dest, XEXP (src, 0)) + || rtx_referenced_p (dest, XEXP (src, 1))) + return NULL; + } + else if (rtx_referenced_p (dest, XEXP (src, 1 - accum_pos))) return NULL; - - mode1 = GET_MODE (dest); - mode2 = GET_MODE (XEXP (src, 1)); - if ((FLOAT_MODE_P (mode1) - || FLOAT_MODE_P (mode2)) - && !flag_unsafe_math_optimizations) + + /* It must be used in exactly one insn. */ + if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses)) return NULL; - + + if (dump_file) + { + fprintf (dump_file, "\n;; Expanding Accumulator "); + print_rtl (dump_file, dest); + fprintf (dump_file, "\n"); + } + + if (debug_uses) + /* Instead of resetting the debug insns, we could replace each + debug use in the loop with the sum or product of all expanded + accummulators. Since we'll only know of all expansions at the + end, we'd have to keep track of which vars_to_expand a debug + insn in the loop references, take note of each copy of the + debug insn during unrolling, and when it's all done, compute + the sum or product of each variable and adjust the original + debug insn and each copy thereof. What a pain! */ + reset_debug_uses_in_loop (loop, dest, debug_uses); + /* Record the accumulator to expand. */ - ves = xmalloc (sizeof (struct var_to_expand)); + ves = XNEW (struct var_to_expand); ves->insn = insn; - ves->var_expansions = VEC_alloc (rtx, heap, 1); ves->reg = copy_rtx (dest); + ves->var_expansions.create (1); + ves->next = NULL; ves->op = GET_CODE (src); ves->expansion_count = 0; ves->reuse_expansion = 0; - return ves; + return ves; } /* Determine whether there is an induction variable in INSN that - we would like to split during unrolling. + we would like to split during unrolling. I.e. replace @@ -1633,7 +1789,7 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn) i = i0 + 2 ... - Return NULL if INSN contains no interesting IVs. Otherwise, allocate + Return NULL if INSN contains no interesting IVs. Otherwise, allocate an IV_TO_SPLIT structure, fill it with the relevant information and return a pointer to it. */ @@ -1658,21 +1814,33 @@ analyze_iv_to_split_insn (rtx insn) if (!biv_p (insn, dest)) return NULL; - ok = iv_analyze (insn, dest, &iv); - gcc_assert (ok); + ok = iv_analyze_result (insn, dest, &iv); + + /* This used to be an assert under the assumption that if biv_p returns + true that iv_analyze_result must also return true. However, that + assumption is not strictly correct as evidenced by pr25569. + + Returning NULL when iv_analyze_result returns false is safe and + avoids the problems in pr25569 until the iv_analyze_* routines + can be fixed, which is apparently hard and time consuming + according to their author. */ + if (! ok) + return NULL; if (iv.step == const0_rtx || iv.mode != iv.extend_mode) return NULL; /* Record the insn to split. */ - ivts = xmalloc (sizeof (struct iv_to_split)); + ivts = XNEW (struct iv_to_split); ivts->insn = insn; + ivts->orig_var = dest; ivts->base_var = NULL_RTX; ivts->step = iv.step; + ivts->next = NULL; ivts->n_loc = 1; ivts->loc[0] = 1; - + return ivts; } @@ -1685,45 +1853,52 @@ static struct opt_info * analyze_insns_in_loop (struct loop *loop) { basic_block *body, bb; - unsigned i, num_edges = 0; - struct opt_info *opt_info = xcalloc (1, sizeof (struct opt_info)); + unsigned i; + struct opt_info *opt_info = XCNEW (struct opt_info); rtx insn; struct iv_to_split *ivts = NULL; struct var_to_expand *ves = NULL; PTR *slot1; PTR *slot2; - edge *edges = get_loop_exit_edges (loop, &num_edges); + vec edges = get_loop_exit_edges (loop); + edge exit; bool can_apply = false; - + iv_analysis_loop_init (loop); body = get_loop_body (loop); if (flag_split_ivs_in_unroller) - opt_info->insns_to_split = htab_create (5 * loop->num_nodes, - si_info_hash, si_info_eq, free); - - /* Record the loop exit bb and loop preheader before the unrolling. */ - if (!loop_preheader_edge (loop)->src) { - loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); - opt_info->loop_preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + opt_info->insns_to_split = htab_create (5 * loop->num_nodes, + si_info_hash, si_info_eq, free); + opt_info->iv_to_split_head = NULL; + opt_info->iv_to_split_tail = &opt_info->iv_to_split_head; } - else - opt_info->loop_preheader = loop_preheader_edge (loop)->src; - - if (num_edges == 1 - && !(edges[0]->flags & EDGE_COMPLEX)) + + /* Record the loop exit bb and loop preheader before the unrolling. */ + opt_info->loop_preheader = loop_preheader_edge (loop)->src; + + if (edges.length () == 1) { - opt_info->loop_exit = loop_split_edge_with (edges[0], NULL_RTX); - can_apply = true; + exit = edges[0]; + if (!(exit->flags & EDGE_COMPLEX)) + { + opt_info->loop_exit = split_edge (exit); + can_apply = true; + } } - + if (flag_variable_expansion_in_unroller && can_apply) - opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes, - ve_info_hash, ve_info_eq, free); - + { + opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes, + ve_info_hash, + ve_info_eq, free); + opt_info->var_to_expand_head = NULL; + opt_info->var_to_expand_tail = &opt_info->var_to_expand_head; + } + for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; @@ -1734,29 +1909,35 @@ analyze_insns_in_loop (struct loop *loop) { if (!INSN_P (insn)) continue; - + if (opt_info->insns_to_split) ivts = analyze_iv_to_split_insn (insn); - + if (ivts) { slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT); + gcc_assert (*slot1 == NULL); *slot1 = ivts; + *opt_info->iv_to_split_tail = ivts; + opt_info->iv_to_split_tail = &ivts->next; continue; } - + if (opt_info->insns_with_var_to_expand) ves = analyze_insn_to_expand_var (loop, insn); - + if (ves) { slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT); + gcc_assert (*slot2 == NULL); *slot2 = ves; + *opt_info->var_to_expand_tail = ves; + opt_info->var_to_expand_tail = &ves->next; } } } - - free (edges); + + edges.release (); free (body); return opt_info; } @@ -1764,7 +1945,7 @@ analyze_insns_in_loop (struct loop *loop) /* Called just before loop duplication. Records start of duplicated area to OPT_INFO. */ -static void +static void opt_info_start_duplication (struct opt_info *opt_info) { if (opt_info) @@ -1811,18 +1992,14 @@ get_ivts_expr (rtx expr, struct iv_to_split *ivts) return ret; } -/* Allocate basic variable for the induction variable chain. Callback for - htab_traverse. */ +/* Allocate basic variable for the induction variable chain. */ -static int -allocate_basic_variable (void **slot, void *data ATTRIBUTE_UNUSED) +static void +allocate_basic_variable (struct iv_to_split *ivts) { - struct iv_to_split *ivts = *slot; rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts); ivts->base_var = gen_reg_rtx (GET_MODE (expr)); - - return 1; } /* Insert initialization of basic variable of IVTS before INSN, taking @@ -1881,7 +2058,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta) seq = get_insns (); end_sequence (); emit_insn_before (seq, insn); - + if (validate_change (insn, loc, var, 0)) return; @@ -1899,7 +2076,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta) emit_move_insn (dest, src); seq = get_insns (); end_sequence (); - + emit_insn_before (seq, insn); delete_insn (insn); } @@ -1911,22 +2088,22 @@ static rtx get_expansion (struct var_to_expand *ve) { rtx reg; - + if (ve->reuse_expansion == 0) reg = ve->reg; else - reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1); - - if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion) + reg = ve->var_expansions[ve->reuse_expansion - 1]; + + if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion) ve->reuse_expansion = 0; - else + else ve->reuse_expansion++; - + return reg; } -/* Given INSN replace the uses of the accumulator recorded in VE +/* Given INSN replace the uses of the accumulator recorded in VE with a new register. */ static void @@ -1934,10 +2111,10 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) { rtx new_reg, set; bool really_new_expansion = false; - + set = single_set (insn); gcc_assert (set); - + /* Generate a new register only if the expansion limit has not been reached. Else reuse an already existing expansion. */ if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count) @@ -1948,118 +2125,174 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) else new_reg = get_expansion (ve); - validate_change (insn, &SET_DEST (set), new_reg, 1); - validate_change (insn, &XEXP (SET_SRC (set), 0), new_reg, 1); - + validate_replace_rtx_group (SET_DEST (set), new_reg, insn); if (apply_change_group ()) if (really_new_expansion) { - VEC_safe_push (rtx, heap, ve->var_expansions, new_reg); + ve->var_expansions.safe_push (new_reg); ve->expansion_count++; } } -/* Initialize the variable expansions in loop preheader. - Callbacks for htab_traverse. PLACE_P is the loop-preheader - basic block where the initialization of the expansions - should take place. */ +/* Initialize the variable expansions in loop preheader. PLACE is the + loop-preheader basic block where the initialization of the + expansions should take place. The expansions are initialized with + (-0) when the operation is plus or minus to honor sign zero. This + way we can prevent cases where the sign of the final result is + effected by the sign of the expansion. Here is an example to + demonstrate this: -static int -insert_var_expansion_initialization (void **slot, void *place_p) + for (i = 0 ; i < n; i++) + sum += something; + + ==> + + sum += something + .... + i = i+1; + sum1 += something + .... + i = i+1 + sum2 += something; + .... + + When SUM is initialized with -zero and SOMETHING is also -zero; the + final result of sum should be -zero thus the expansions sum1 and sum2 + should be initialized with -zero as well (otherwise we will get +zero + as the final result). */ + +static void +insert_var_expansion_initialization (struct var_to_expand *ve, + basic_block place) { - struct var_to_expand *ve = *slot; - basic_block place = (basic_block)place_p; - rtx seq, var, zero_init, insn; + rtx seq, var, zero_init; unsigned i; - - if (VEC_length (rtx, ve->var_expansions) == 0) - return 1; - + enum machine_mode mode = GET_MODE (ve->reg); + bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode); + + if (ve->var_expansions.length () == 0) + return; + start_sequence (); - if (ve->op == PLUS || ve->op == MINUS) - for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) - { - zero_init = CONST0_RTX (GET_MODE (var)); - emit_move_insn (var, zero_init); - } - else if (ve->op == MULT) - for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) - { - zero_init = CONST1_RTX (GET_MODE (var)); - emit_move_insn (var, zero_init); - } - + switch (ve->op) + { + case FMA: + /* Note that we only accumulate FMA via the ADD operand. */ + case PLUS: + case MINUS: + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) + { + if (honor_signed_zero_p) + zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode); + else + zero_init = CONST0_RTX (mode); + emit_move_insn (var, zero_init); + } + break; + + case MULT: + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) + { + zero_init = CONST1_RTX (GET_MODE (var)); + emit_move_insn (var, zero_init); + } + break; + + default: + gcc_unreachable (); + } + seq = get_insns (); end_sequence (); - - insn = BB_HEAD (place); - while (!NOTE_INSN_BASIC_BLOCK_P (insn)) - insn = NEXT_INSN (insn); - - emit_insn_after (seq, insn); - /* Continue traversing the hash table. */ - return 1; + + emit_insn_after (seq, BB_END (place)); } -/* Combine the variable expansions at the loop exit. - Callbacks for htab_traverse. PLACE_P is the loop exit - basic block where the summation of the expansions should - take place. */ +/* Combine the variable expansions at the loop exit. PLACE is the + loop exit basic block where the summation of the expansions should + take place. */ -static int -combine_var_copies_in_loop_exit (void **slot, void *place_p) +static void +combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place) { - struct var_to_expand *ve = *slot; - basic_block place = (basic_block)place_p; rtx sum = ve->reg; rtx expr, seq, var, insn; unsigned i; - if (VEC_length (rtx, ve->var_expansions) == 0) - return 1; - + if (ve->var_expansions.length () == 0) + return; + start_sequence (); - if (ve->op == PLUS || ve->op == MINUS) - for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) - { - sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), - var, sum); - } - else if (ve->op == MULT) - for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) - { - sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), - var, sum); - } - + switch (ve->op) + { + case FMA: + /* Note that we only accumulate FMA via the ADD operand. */ + case PLUS: + case MINUS: + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) + sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum); + break; + + case MULT: + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) + sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum); + break; + + default: + gcc_unreachable (); + } + expr = force_operand (sum, ve->reg); if (expr != ve->reg) emit_move_insn (ve->reg, expr); seq = get_insns (); end_sequence (); - + insn = BB_HEAD (place); while (!NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); emit_insn_after (seq, insn); - - /* Continue traversing the hash table. */ - return 1; } -/* Apply loop optimizations in loop copies using the - data which gathered during the unrolling. Structure +/* Strip away REG_EQUAL notes for IVs we're splitting. + + Updating REG_EQUAL notes for IVs we split is tricky: We + cannot tell until after unrolling, DF-rescanning, and liveness + updating, whether an EQ_USE is reached by the split IV while + the IV reg is still live. See PR55006. + + ??? We cannot use remove_reg_equal_equiv_notes_for_regno, + because RTL loop-iv requires us to defer rescanning insns and + any notes attached to them. So resort to old techniques... */ + +static void +maybe_strip_eq_note_for_split_iv (struct opt_info *opt_info, rtx insn) +{ + struct iv_to_split *ivts; + rtx note = find_reg_equal_equiv_note (insn); + if (! note) + return; + for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next) + if (reg_mentioned_p (ivts->orig_var, note)) + { + remove_note (insn, note); + return; + } +} + +/* Apply loop optimizations in loop copies using the + data which gathered during the unrolling. Structure OPT_INFO record that data. - + UNROLLING is true if we unrolled (not peeled) the loop. REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of the loop (as it should happen in complete unrolling, but not in ordinary peeling of the loop). */ static void -apply_opt_in_copies (struct opt_info *opt_info, - unsigned n_copies, bool unrolling, +apply_opt_in_copies (struct opt_info *opt_info, + unsigned n_copies, bool unrolling, bool rewrite_original_loop) { unsigned i, delta; @@ -2067,49 +2300,56 @@ apply_opt_in_copies (struct opt_info *opt_info, rtx insn, orig_insn, next; struct iv_to_split ivts_templ, *ivts; struct var_to_expand ve_templ, *ves; - + /* Sanity check -- we need to put initialization in the original loop body. */ gcc_assert (!unrolling || rewrite_original_loop); - + /* Allocate the basic variables (i0). */ if (opt_info->insns_to_split) - htab_traverse (opt_info->insns_to_split, allocate_basic_variable, NULL); - + for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next) + allocate_basic_variable (ivts); + for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++) { bb = BASIC_BLOCK (i); orig_bb = get_bb_original (bb); - + /* bb->aux holds position in copy sequence initialized by duplicate_loop_to_header_edge. */ delta = determine_split_iv_delta ((size_t)bb->aux, n_copies, unrolling); bb->aux = 0; orig_insn = BB_HEAD (orig_bb); - for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next) + FOR_BB_INSNS_SAFE (bb, insn, next) { - next = NEXT_INSN (insn); - if (!INSN_P (insn)) + if (!INSN_P (insn) + || (DEBUG_INSN_P (insn) + && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)) continue; - - while (!INSN_P (orig_insn)) + + while (!INSN_P (orig_insn) + || (DEBUG_INSN_P (orig_insn) + && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn)) + == LABEL_DECL))) orig_insn = NEXT_INSN (orig_insn); - + ivts_templ.insn = orig_insn; ve_templ.insn = orig_insn; - + /* Apply splitting iv optimization. */ if (opt_info->insns_to_split) { - ivts = htab_find (opt_info->insns_to_split, &ivts_templ); - + maybe_strip_eq_note_for_split_iv (opt_info, insn); + + ivts = (struct iv_to_split *) + htab_find (opt_info->insns_to_split, &ivts_templ); + if (ivts) { -#ifdef ENABLE_CHECKING - gcc_assert (rtx_equal_p (PATTERN (insn), PATTERN (orig_insn))); -#endif - + gcc_assert (GET_CODE (PATTERN (insn)) + == GET_CODE (PATTERN (orig_insn))); + if (!delta) insert_base_initialization (ivts, insn); split_iv (ivts, insn, delta); @@ -2118,12 +2358,12 @@ apply_opt_in_copies (struct opt_info *opt_info, /* Apply variable expansion optimization. */ if (unrolling && opt_info->insns_with_var_to_expand) { - ves = htab_find (opt_info->insns_with_var_to_expand, &ve_templ); + ves = (struct var_to_expand *) + htab_find (opt_info->insns_with_var_to_expand, &ve_templ); if (ves) - { -#ifdef ENABLE_CHECKING - gcc_assert (rtx_equal_p (PATTERN (insn), PATTERN (orig_insn))); -#endif + { + gcc_assert (GET_CODE (PATTERN (insn)) + == GET_CODE (PATTERN (orig_insn))); expand_var_during_unrolling (ves, insn); } } @@ -2133,19 +2373,17 @@ apply_opt_in_copies (struct opt_info *opt_info, if (!rewrite_original_loop) return; - + /* Initialize the variable expansions in the loop preheader - and take care of combining them at the loop exit. */ + and take care of combining them at the loop exit. */ if (opt_info->insns_with_var_to_expand) { - htab_traverse (opt_info->insns_with_var_to_expand, - insert_var_expansion_initialization, - opt_info->loop_preheader); - htab_traverse (opt_info->insns_with_var_to_expand, - combine_var_copies_in_loop_exit, - opt_info->loop_exit); + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + insert_var_expansion_initialization (ves, opt_info->loop_preheader); + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + combine_var_copies_in_loop_exit (ves, opt_info->loop_exit); } - + /* Rewrite also the original loop body. Find them as originals of the blocks in the last copied iteration, i.e. those that have get_bb_copy (get_bb_original (bb)) == bb. */ @@ -2155,21 +2393,24 @@ apply_opt_in_copies (struct opt_info *opt_info, orig_bb = get_bb_original (bb); if (get_bb_copy (orig_bb) != bb) continue; - + delta = determine_split_iv_delta (0, n_copies, unrolling); for (orig_insn = BB_HEAD (orig_bb); orig_insn != NEXT_INSN (BB_END (bb)); orig_insn = next) { next = NEXT_INSN (orig_insn); - + if (!INSN_P (orig_insn)) continue; - + ivts_templ.insn = orig_insn; if (opt_info->insns_to_split) { - ivts = htab_find (opt_info->insns_to_split, &ivts_templ); + maybe_strip_eq_note_for_split_iv (opt_info, orig_insn); + + ivts = (struct iv_to_split *) + htab_find (opt_info->insns_to_split, &ivts_templ); if (ivts) { if (!delta) @@ -2178,25 +2419,11 @@ apply_opt_in_copies (struct opt_info *opt_info, continue; } } - + } } } -/* Release the data structures used for the variable expansion - optimization. Callbacks for htab_traverse. */ - -static int -release_var_copies (void **slot, void *data ATTRIBUTE_UNUSED) -{ - struct var_to_expand *ve = *slot; - - VEC_free (rtx, heap, ve->var_expansions); - - /* Continue traversing the hash table. */ - return 1; -} - /* Release OPT_INFO. */ static void @@ -2206,8 +2433,10 @@ free_opt_info (struct opt_info *opt_info) htab_delete (opt_info->insns_to_split); if (opt_info->insns_with_var_to_expand) { - htab_traverse (opt_info->insns_with_var_to_expand, - release_var_copies, NULL); + struct var_to_expand *ves; + + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + ves->var_expansions.release (); htab_delete (opt_info->insns_with_var_to_expand); } free (opt_info);