X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=gcc%2Floop-unroll.c;h=de319c4f1d73cf29296f5284d5703ab423d777a9;hb=2cd45f0e6826ddcc92216a508104b2802eddece3;hp=2398e6d6b1145af2dc51b9dd4f214be6f2732488;hpb=e598c332d81d419ecdfce59051bb4657c685e53f;p=gcc.git diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index 2398e6d6b11..de319c4f1d7 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -74,6 +74,7 @@ along with GCC; see the file COPYING3. If not see struct iv_to_split { rtx insn; /* The insn in that the induction variable occurs. */ + rtx orig_var; /* The variable (register) for the IV before split. */ rtx base_var; /* The variable on that the values in the further iterations are based. */ rtx step; /* Step of the induction variable. */ @@ -91,7 +92,7 @@ struct var_to_expand { rtx insn; /* The insn in that the variable expansion occurs. */ rtx reg; /* The accumulator which is expanded. */ - VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */ + vec var_expansions; /* The copies of the accumulator which is expanded. */ struct var_to_expand *next; /* Next entry in walking order. */ enum rtx_code op; /* The type of the accumulation - addition, subtraction or multiplication. */ @@ -215,7 +216,7 @@ loop_exit_at_end_p (struct loop *loop) /* Check that the latch is empty. */ FOR_BB_INSNS (loop->latch, insn) { - if (INSN_P (insn)) + if (NONDEBUG_INSN_P (insn)) return false; } @@ -453,7 +454,7 @@ peel_loop_completely (struct loop *loop) sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; unsigned i; - VEC (edge, heap) *remove_edges; + vec remove_edges; edge ein; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; @@ -465,12 +466,12 @@ peel_loop_completely (struct loop *loop) bool ok; wont_exit = sbitmap_alloc (npeel + 1); - sbitmap_ones (wont_exit); - RESET_BIT (wont_exit, 0); + bitmap_ones (wont_exit); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); + bitmap_clear_bit (wont_exit, 1); - remove_edges = NULL; + remove_edges.create (0); if (flag_split_ivs_in_unroller) opt_info = analyze_insns_in_loop (loop); @@ -495,9 +496,9 @@ peel_loop_completely (struct loop *loop) } /* Remove the exit edges. */ - FOR_EACH_VEC_ELT (edge, remove_edges, i, ein) + FOR_EACH_VEC_ELT (remove_edges, i, ein) remove_path (ein); - VEC_free (edge, heap, remove_edges); + remove_edges.release (); } ein = desc->in_edge; @@ -639,7 +640,7 @@ unroll_loop_constant_iterations (struct loop *loop) unsigned exit_mod; sbitmap wont_exit; unsigned i; - VEC (edge, heap) *remove_edges; + vec remove_edges; edge e; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); @@ -655,9 +656,9 @@ unroll_loop_constant_iterations (struct loop *loop) exit_mod = niter % (max_unroll + 1); wont_exit = sbitmap_alloc (max_unroll + 1); - sbitmap_ones (wont_exit); + bitmap_ones (wont_exit); - remove_edges = NULL; + remove_edges.create (0); if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); @@ -672,9 +673,9 @@ unroll_loop_constant_iterations (struct loop *loop) fprintf (dump_file, ";; Condition at beginning of loop.\n"); /* Peel exit_mod iterations. */ - RESET_BIT (wont_exit, 0); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); + bitmap_clear_bit (wont_exit, 1); if (exit_mod) { @@ -703,7 +704,7 @@ unroll_loop_constant_iterations (struct loop *loop) loop->any_estimate = false; } - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); } else { @@ -719,9 +720,9 @@ unroll_loop_constant_iterations (struct loop *loop) if (exit_mod != max_unroll || desc->noloop_assumptions) { - RESET_BIT (wont_exit, 0); + bitmap_clear_bit (wont_exit, 0); if (desc->noloop_assumptions) - RESET_BIT (wont_exit, 1); + bitmap_clear_bit (wont_exit, 1); opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), @@ -747,11 +748,11 @@ unroll_loop_constant_iterations (struct loop *loop) loop->any_estimate = false; desc->noloop_assumptions = NULL_RTX; - SET_BIT (wont_exit, 0); - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 0); + bitmap_set_bit (wont_exit, 1); } - RESET_BIT (wont_exit, max_unroll); + bitmap_clear_bit (wont_exit, max_unroll); } /* Now unroll the loop. */ @@ -805,9 +806,9 @@ unroll_loop_constant_iterations (struct loop *loop) desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ - FOR_EACH_VEC_ELT (edge, remove_edges, i, e) + FOR_EACH_VEC_ELT (remove_edges, i, e) remove_path (e); - VEC_free (edge, heap, remove_edges); + remove_edges.release (); if (dump_file) fprintf (dump_file, @@ -982,11 +983,11 @@ unroll_loop_runtime_iterations (struct loop *loop) rtx old_niter, niter, init_code, branch_code, tmp; unsigned i, j, p; basic_block preheader, *body, swtch, ezc_swtch; - VEC (basic_block, heap) *dom_bbs; + vec dom_bbs; sbitmap wont_exit; int may_exit_copy; unsigned n_peel; - VEC (edge, heap) *remove_edges; + vec remove_edges; edge e; bool extra_zero_check, last_may_exit; unsigned max_unroll = loop->lpt_decision.times; @@ -1000,20 +1001,20 @@ unroll_loop_runtime_iterations (struct loop *loop) opt_info = analyze_insns_in_loop (loop); /* Remember blocks whose dominators will have to be updated. */ - dom_bbs = NULL; + dom_bbs.create (0); body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { - VEC (basic_block, heap) *ldom; + vec ldom; basic_block bb; ldom = get_dominated_by (CDI_DOMINATORS, body[i]); - FOR_EACH_VEC_ELT (basic_block, ldom, j, bb) + FOR_EACH_VEC_ELT (ldom, j, bb) if (!flow_bb_inside_loop_p (loop, bb)) - VEC_safe_push (basic_block, heap, dom_bbs, bb); + dom_bbs.safe_push (bb); - VEC_free (basic_block, heap, ldom); + ldom.release (); } free (body); @@ -1058,7 +1059,7 @@ unroll_loop_runtime_iterations (struct loop *loop) /* Precondition the loop. */ split_edge_and_insert (loop_preheader_edge (loop), init_code); - remove_edges = NULL; + remove_edges.create (0); wont_exit = sbitmap_alloc (max_unroll + 2); @@ -1066,10 +1067,10 @@ unroll_loop_runtime_iterations (struct loop *loop) here; the only exception is when we have extra zero check and the number of iterations is reliable. Also record the place of (possible) extra zero check. */ - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); if (extra_zero_check && !desc->noloop_assumptions) - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); ezc_swtch = loop_preheader_edge (loop)->src; ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), 1, wont_exit, desc->out_edge, @@ -1083,9 +1084,9 @@ unroll_loop_runtime_iterations (struct loop *loop) for (i = 0; i < n_peel; i++) { /* Peel the copy. */ - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); if (i != n_peel - 1 || !last_may_exit) - SET_BIT (wont_exit, 1); + bitmap_set_bit (wont_exit, 1); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), 1, wont_exit, desc->out_edge, &remove_edges, @@ -1139,8 +1140,8 @@ unroll_loop_runtime_iterations (struct loop *loop) /* And unroll loop. */ - sbitmap_ones (wont_exit); - RESET_BIT (wont_exit, may_exit_copy); + bitmap_ones (wont_exit); + bitmap_clear_bit (wont_exit, may_exit_copy); opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), @@ -1180,9 +1181,9 @@ unroll_loop_runtime_iterations (struct loop *loop) } /* Remove the edges. */ - FOR_EACH_VEC_ELT (edge, remove_edges, i, e) + FOR_EACH_VEC_ELT (remove_edges, i, e) remove_path (e); - VEC_free (edge, heap, remove_edges); + remove_edges.release (); /* We must be careful when updating the number of iterations due to preconditioning and the fact that the value must be valid at entry @@ -1220,7 +1221,7 @@ unroll_loop_runtime_iterations (struct loop *loop) "in runtime, %i insns\n", max_unroll, num_loop_insns (loop)); - VEC_free (basic_block, heap, dom_bbs); + dom_bbs.release (); } /* Decide whether to simply peel LOOP and how much. */ @@ -1228,7 +1229,6 @@ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; - struct niter_desc *desc; double_int iterations; if (!(flags & UAP_PEEL)) @@ -1253,20 +1253,17 @@ decide_peel_simple (struct loop *loop, int flags) return; } - /* Check for simple loops. */ - desc = get_simple_loop_desc (loop); - - /* Check number of iterations. */ - if (desc->simple_p && !desc->assumptions && desc->const_iter) - { - if (dump_file) - fprintf (dump_file, ";; Loop iterates constant times\n"); - return; - } - /* Do not simply peel loops with branches inside -- it increases number - of mispredicts. */ - if (num_loop_branches (loop) > 1) + of mispredicts. + Exception is when we do have profile and we however have good chance + to peel proper number of iterations loop will iterate in practice. + TODO: this heuristic needs tunning; while for complette unrolling + the branch inside loop mostly eliminates any improvements, for + peeling it is not the case. Also a function call inside loop is + also branch from branch prediction POV (and probably better reason + to not unroll/peel). */ + if (num_loop_branches (loop) > 1 + && profile_status != PROFILE_READ) { if (dump_file) fprintf (dump_file, ";; Not peeling, contains branches\n"); @@ -1344,7 +1341,7 @@ peel_loop_simple (struct loop *loop) opt_info = analyze_insns_in_loop (loop); wont_exit = sbitmap_alloc (npeel + 1); - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); opt_info_start_duplication (opt_info); @@ -1435,7 +1432,9 @@ decide_unroll_stupid (struct loop *loop, int flags) } /* Do not unroll loops with branches inside -- it increases number - of mispredicts. */ + of mispredicts. + TODO: this heuristic needs tunning; call inside the loop body + is also relatively good reason to not unroll. */ if (num_loop_branches (loop) > 1) { if (dump_file) @@ -1500,7 +1499,7 @@ unroll_loop_stupid (struct loop *loop) wont_exit = sbitmap_alloc (nunroll + 1); - sbitmap_zero (wont_exit); + bitmap_clear (wont_exit); opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), @@ -1761,7 +1760,7 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn) ves = XNEW (struct var_to_expand); ves->insn = insn; ves->reg = copy_rtx (dest); - ves->var_expansions = VEC_alloc (rtx, heap, 1); + ves->var_expansions.create (1); ves->next = NULL; ves->op = GET_CODE (src); ves->expansion_count = 0; @@ -1835,6 +1834,7 @@ analyze_iv_to_split_insn (rtx insn) /* Record the insn to split. */ ivts = XNEW (struct iv_to_split); ivts->insn = insn; + ivts->orig_var = dest; ivts->base_var = NULL_RTX; ivts->step = iv.step; ivts->next = NULL; @@ -1860,7 +1860,7 @@ analyze_insns_in_loop (struct loop *loop) struct var_to_expand *ves = NULL; PTR *slot1; PTR *slot2; - VEC (edge, heap) *edges = get_loop_exit_edges (loop); + vec edges = get_loop_exit_edges (loop); edge exit; bool can_apply = false; @@ -1879,9 +1879,9 @@ analyze_insns_in_loop (struct loop *loop) /* Record the loop exit bb and loop preheader before the unrolling. */ opt_info->loop_preheader = loop_preheader_edge (loop)->src; - if (VEC_length (edge, edges) == 1) + if (edges.length () == 1) { - exit = VEC_index (edge, edges, 0); + exit = edges[0]; if (!(exit->flags & EDGE_COMPLEX)) { opt_info->loop_exit = split_edge (exit); @@ -1937,7 +1937,7 @@ analyze_insns_in_loop (struct loop *loop) } } - VEC_free (edge, heap, edges); + edges.release (); free (body); return opt_info; } @@ -2092,9 +2092,9 @@ get_expansion (struct var_to_expand *ve) if (ve->reuse_expansion == 0) reg = ve->reg; else - reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1); + reg = ve->var_expansions[ve->reuse_expansion - 1]; - if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion) + if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion) ve->reuse_expansion = 0; else ve->reuse_expansion++; @@ -2129,7 +2129,7 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) if (apply_change_group ()) if (really_new_expansion) { - VEC_safe_push (rtx, heap, ve->var_expansions, new_reg); + ve->var_expansions.safe_push (new_reg); ve->expansion_count++; } } @@ -2170,7 +2170,7 @@ insert_var_expansion_initialization (struct var_to_expand *ve, enum machine_mode mode = GET_MODE (ve->reg); bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode); - if (VEC_length (rtx, ve->var_expansions) == 0) + if (ve->var_expansions.length () == 0) return; start_sequence (); @@ -2180,7 +2180,7 @@ insert_var_expansion_initialization (struct var_to_expand *ve, /* Note that we only accumulate FMA via the ADD operand. */ case PLUS: case MINUS: - FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var) + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) { if (honor_signed_zero_p) zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode); @@ -2191,7 +2191,7 @@ insert_var_expansion_initialization (struct var_to_expand *ve, break; case MULT: - FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var) + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) { zero_init = CONST1_RTX (GET_MODE (var)); emit_move_insn (var, zero_init); @@ -2219,7 +2219,7 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place) rtx expr, seq, var, insn; unsigned i; - if (VEC_length (rtx, ve->var_expansions) == 0) + if (ve->var_expansions.length () == 0) return; start_sequence (); @@ -2229,12 +2229,12 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place) /* Note that we only accumulate FMA via the ADD operand. */ case PLUS: case MINUS: - FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var) + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum); break; case MULT: - FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var) + FOR_EACH_VEC_ELT (ve->var_expansions, i, var) sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum); break; @@ -2255,6 +2255,32 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place) emit_insn_after (seq, insn); } +/* Strip away REG_EQUAL notes for IVs we're splitting. + + Updating REG_EQUAL notes for IVs we split is tricky: We + cannot tell until after unrolling, DF-rescanning, and liveness + updating, whether an EQ_USE is reached by the split IV while + the IV reg is still live. See PR55006. + + ??? We cannot use remove_reg_equal_equiv_notes_for_regno, + because RTL loop-iv requires us to defer rescanning insns and + any notes attached to them. So resort to old techniques... */ + +static void +maybe_strip_eq_note_for_split_iv (struct opt_info *opt_info, rtx insn) +{ + struct iv_to_split *ivts; + rtx note = find_reg_equal_equiv_note (insn); + if (! note) + return; + for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next) + if (reg_mentioned_p (ivts->orig_var, note)) + { + remove_note (insn, note); + return; + } +} + /* Apply loop optimizations in loop copies using the data which gathered during the unrolling. Structure OPT_INFO record that data. @@ -2295,9 +2321,8 @@ apply_opt_in_copies (struct opt_info *opt_info, unrolling); bb->aux = 0; orig_insn = BB_HEAD (orig_bb); - for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next) + FOR_BB_INSNS_SAFE (bb, insn, next) { - next = NEXT_INSN (insn); if (!INSN_P (insn) || (DEBUG_INSN_P (insn) && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)) @@ -2315,6 +2340,8 @@ apply_opt_in_copies (struct opt_info *opt_info, /* Apply splitting iv optimization. */ if (opt_info->insns_to_split) { + maybe_strip_eq_note_for_split_iv (opt_info, insn); + ivts = (struct iv_to_split *) htab_find (opt_info->insns_to_split, &ivts_templ); @@ -2380,6 +2407,8 @@ apply_opt_in_copies (struct opt_info *opt_info, ivts_templ.insn = orig_insn; if (opt_info->insns_to_split) { + maybe_strip_eq_note_for_split_iv (opt_info, orig_insn); + ivts = (struct iv_to_split *) htab_find (opt_info->insns_to_split, &ivts_templ); if (ivts) @@ -2407,7 +2436,7 @@ free_opt_info (struct opt_info *opt_info) struct var_to_expand *ves; for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) - VEC_free (rtx, heap, ves->var_expansions); + ves->var_expansions.release (); htab_delete (opt_info->insns_with_var_to_expand); } free (opt_info);