From: Richard Biener Date: Tue, 6 Oct 2020 13:47:15 +0000 (+0200) Subject: SLP vectorize multiple BBs at once X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=181702ef8ab76afbf5d2cd4d7bc0cef613397d6e;p=gcc.git SLP vectorize multiple BBs at once This work from Martin Liska was motivated by gcc.dg/vect/bb-slp-22.c which shows how poorly we currently BB vectorize code like a0 = in[0] + 23; a1 = in[1] + 142; a2 = in[2] + 2; a3 = in[3] + 31; if (x > y) { b[0] = a0; b[1] = a1; b[2] = a2; b[3] = a3; } else { out[0] = a0 * (x + 1); out[1] = a1 * (y + 1); out[2] = a2 * (x + 1); out[3] = a3 * (y + 1); } namely by vectorizing the stores but not the common load (and add) they are feeded with. Thus with the following patch we change the BB vectorizer from operating on a single basic-block at a time to consider somewhat larger regions (but not the whole function yet because of issues with vector size iteration). I took the opportunity to remove the fancy region iterations again now that we operate on BB granularity and in the end need to visit PHI nodes as well. 2020-10-08 Martin Liska Richard Biener * tree-vectorizer.h (_bb_vec_info::const_iterator): Remove. (_bb_vec_info::const_reverse_iterator): Likewise. (_bb_vec_info::region_stmts): Likewise. (_bb_vec_info::reverse_region_stmts): Likewise. (_bb_vec_info::_bb_vec_info): Adjust. (_bb_vec_info::bb): Remove. (_bb_vec_info::region_begin): Remove. (_bb_vec_info::region_end): Remove. (_bb_vec_info::bbs): New vector of BBs. (vect_slp_function): Declare. * tree-vect-patterns.c (vect_determine_precisions): Use regular stmt iteration. (vect_pattern_recog): Likewise. * tree-vect-slp.c: Include cfganal.h, tree-eh.h and tree-cfg.h. (vect_build_slp_tree_1): Properly refuse to vectorize volatile and throwing stmts. (vect_build_slp_tree_2): Pass group-size down to get_vectype_for_scalar_type. (_bb_vec_info::_bb_vec_info): Use regular stmt iteration, adjust for changed region specification. (_bb_vec_info::~_bb_vec_info): Likewise. (vect_slp_check_for_constructors): Likewise. (vect_slp_region): Likewise. (vect_slp_bbs): New worker operating on a vector of BBs. (vect_slp_bb): Wrap it. (vect_slp_function): New function splitting the function into multi-BB regions. (vect_create_constant_vectors): Handle the case of inserting after a throwing def. (vect_schedule_slp_instance): Adjust. * tree-vectorizer.c (vec_info::remove_stmt): Simplify again. (vec_info::insert_seq_on_entry): Adjust. (pass_slp_vectorize::execute): Also init PHIs. Call vect_slp_function. * gcc.dg/vect/bb-slp-22.c: Adjust. * gfortran.dg/pr68627.f: Likewise. --- diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-22.c b/gcc/testsuite/gcc.dg/vect/bb-slp-22.c index 21552300e11..92cc2a51abd 100644 --- a/gcc/testsuite/gcc.dg/vect/bb-slp-22.c +++ b/gcc/testsuite/gcc.dg/vect/bb-slp-22.c @@ -63,5 +63,5 @@ int main (void) return 0; } -/* { dg-final { scan-tree-dump-times "optimized: basic block" 2 "slp2" } } */ +/* { dg-final { scan-tree-dump-times "optimized: basic block" 1 "slp2" } } */ /* { dg-final { scan-tree-dump "vectorizing SLP node starting from: _\[0-9\]+ = _\[0-9\]+ \\\* a0" "slp2" { target vect_int_mult } } } */ diff --git a/gcc/testsuite/gfortran.dg/pr68627.f b/gcc/testsuite/gfortran.dg/pr68627.f index 7754b72ff58..2ff1f3c77c8 100644 --- a/gcc/testsuite/gfortran.dg/pr68627.f +++ b/gcc/testsuite/gfortran.dg/pr68627.f @@ -1,6 +1,6 @@ ! { dg-do compile { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } -! { dg-options "-Ofast -mavx512f -ffixed-xmm1 -ffixed-xmm2 -ffixed-xmm3 -ffixed-xmm4 -ffixed-xmm5 -ffixed-xmm6 -ffixed-xmm7 -ffixed-xmm8 -ffixed-xmm9 -ffixed-xmm10 -ffixed-xmm11 -ffixed-xmm12 -ffixed-xmm13 -ffixed-xmm14 -ffixed-xmm15" } +! { dg-options "-Ofast -fno-tree-slp-vectorize -mavx512f -ffixed-xmm1 -ffixed-xmm2 -ffixed-xmm3 -ffixed-xmm4 -ffixed-xmm5 -ffixed-xmm6 -ffixed-xmm7 -ffixed-xmm8 -ffixed-xmm9 -ffixed-xmm10 -ffixed-xmm11 -ffixed-xmm12 -ffixed-xmm13 -ffixed-xmm14 -ffixed-xmm15" } IMPLICIT REAL*8(A-H,O-Z) ALLOCATABLE DD1(:), DD2(:), WY(:,:) diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index d626c5f7362..71e4e106202 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -5123,12 +5123,14 @@ vect_determine_precisions (vec_info *vinfo) else { bb_vec_info bb_vinfo = as_a (vinfo); - for (gimple *stmt : bb_vinfo->reverse_region_stmts ()) - { - stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt); - if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) - vect_determine_stmt_precisions (vinfo, stmt_info); - } + for (int i = bb_vinfo->bbs.length () - 1; i != -1; --i) + for (gimple_stmt_iterator gsi = gsi_last_bb (bb_vinfo->bbs[i]); + !gsi_end_p (gsi); gsi_prev (&gsi)) + { + stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (gsi)); + if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) + vect_determine_stmt_precisions (vinfo, stmt_info); + } } } @@ -5487,17 +5489,19 @@ vect_pattern_recog (vec_info *vinfo) else { bb_vec_info bb_vinfo = as_a (vinfo); - for (gimple *stmt : bb_vinfo->region_stmts ()) - { - stmt_vec_info stmt_info = bb_vinfo->lookup_stmt (stmt); - if (!stmt_info || !STMT_VINFO_VECTORIZABLE (stmt_info)) - continue; - - /* Scan over all generic vect_recog_xxx_pattern functions. */ - for (j = 0; j < NUM_PATTERNS; j++) - vect_pattern_recog_1 (vinfo, - &vect_vect_recog_func_ptrs[j], stmt_info); - } + for (unsigned i = 0; i < bb_vinfo->bbs.length (); ++i) + for (gimple_stmt_iterator gsi = gsi_start_bb (bb_vinfo->bbs[i]); + !gsi_end_p (gsi); gsi_next (&gsi)) + { + stmt_vec_info stmt_info = bb_vinfo->lookup_stmt (gsi_stmt (gsi)); + if (!stmt_info || !STMT_VINFO_VECTORIZABLE (stmt_info)) + continue; + + /* Scan over all generic vect_recog_xxx_pattern functions. */ + for (j = 0; j < NUM_PATTERNS; j++) + vect_pattern_recog_1 (vinfo, + &vect_vect_recog_func_ptrs[j], stmt_info); + } } /* After this no more add_stmt calls are allowed. */ diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index 568fc5d3a90..7e22506b49f 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -45,7 +45,9 @@ along with GCC; see the file COPYING3. If not see #include "gimple-fold.h" #include "internal-fn.h" #include "dump-context.h" - +#include "cfganal.h" +#include "tree-eh.h" +#include "tree-cfg.h" static bool vectorizable_slp_permutation (vec_info *, gimple_stmt_iterator *, slp_tree, stmt_vector_for_cost *); @@ -761,8 +763,11 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for %G", stmt); - /* Fail to vectorize statements marked as unvectorizable. */ - if (!STMT_VINFO_VECTORIZABLE (stmt_info)) + /* Fail to vectorize statements marked as unvectorizable, throw + or are volatile. */ + if (!STMT_VINFO_VECTORIZABLE (stmt_info) + || stmt_can_throw_internal (cfun, stmt) + || gimple_has_volatile_ops (stmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -1239,7 +1244,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, if (gphi *stmt = dyn_cast (stmt_info->stmt)) { tree scalar_type = TREE_TYPE (PHI_RESULT (stmt)); - tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type); + tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type, + group_size); if (!vect_record_max_nunits (vinfo, stmt_info, group_size, vectype, max_nunits)) return NULL; @@ -2728,26 +2734,31 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo) } -/* Initialize a bb_vec_info struct for the statements between - REGION_BEGIN_IN (inclusive) and REGION_END_IN (exclusive). */ +/* Initialize a bb_vec_info struct for the statements in BBS basic blocks. */ -_bb_vec_info::_bb_vec_info (gimple_stmt_iterator region_begin_in, - gimple_stmt_iterator region_end_in, - vec_info_shared *shared) - : vec_info (vec_info::bb, init_cost (NULL), shared), - bb (gsi_bb (region_begin_in)), - region_begin (region_begin_in), - region_end (region_end_in) +_bb_vec_info::_bb_vec_info (vec _bbs, vec_info_shared *shared) + : vec_info (vec_info::bb, init_cost (NULL), shared), bbs (_bbs) { - for (gimple *stmt : this->region_stmts ()) + for (unsigned i = 0; i < bbs.length (); ++i) { - gimple_set_uid (stmt, 0); - if (is_gimple_debug (stmt)) - continue; - add_stmt (stmt); + if (i != 0) + for (gphi_iterator si = gsi_start_phis (bbs[i]); !gsi_end_p (si); + gsi_next (&si)) + { + gphi *phi = si.phi (); + gimple_set_uid (phi, 0); + add_stmt (phi); + } + for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); + !gsi_end_p (gsi); gsi_next (&gsi)) + { + gimple *stmt = gsi_stmt (gsi); + gimple_set_uid (stmt, 0); + if (is_gimple_debug (stmt)) + continue; + add_stmt (stmt); + } } - - bb->aux = this; } @@ -2756,11 +2767,23 @@ _bb_vec_info::_bb_vec_info (gimple_stmt_iterator region_begin_in, _bb_vec_info::~_bb_vec_info () { - for (gimple *stmt : this->region_stmts ()) - /* Reset region marker. */ - gimple_set_uid (stmt, -1); - - bb->aux = NULL; + /* Reset region marker. */ + for (unsigned i = 0; i < bbs.length (); ++i) + { + if (i != 0) + for (gphi_iterator si = gsi_start_phis (bbs[i]); !gsi_end_p (si); + gsi_next (&si)) + { + gphi *phi = si.phi (); + gimple_set_uid (phi, -1); + } + for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); + !gsi_end_p (gsi); gsi_next (&gsi)) + { + gimple *stmt = gsi_stmt (gsi); + gimple_set_uid (stmt, -1); + } + } } /* Subroutine of vect_slp_analyze_node_operations. Handle the root of NODE, @@ -3461,9 +3484,11 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo, static void vect_slp_check_for_constructors (bb_vec_info bb_vinfo) { - for (gimple *stmt : bb_vinfo->region_stmts ()) + for (unsigned i = 0; i < bb_vinfo->bbs.length (); ++i) + for (gimple_stmt_iterator gsi = gsi_start_bb (bb_vinfo->bbs[i]); + !gsi_end_p (gsi); gsi_next (&gsi)) { - gassign *assign = dyn_cast (stmt); + gassign *assign = dyn_cast (gsi_stmt (gsi)); if (!assign || gimple_assign_rhs_code (assign) != CONSTRUCTOR) continue; @@ -3602,17 +3627,13 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal, return true; } -/* Subroutine of vect_slp_bb. Try to vectorize the statements between - REGION_BEGIN (inclusive) and REGION_END (exclusive), returning true - on success. The region has N_STMTS statements and has the datarefs - given by DATAREFS. */ +/* Subroutine of vect_slp_bb. Try to vectorize the statements for all + basic blocks in BBS, returning true on success. + The region has N_STMTS statements and has the datarefs given by DATAREFS. */ static bool -vect_slp_region (gimple_stmt_iterator region_begin, - gimple_stmt_iterator region_end, - vec datarefs, - vec *dataref_groups, - unsigned int n_stmts) +vect_slp_region (vec bbs, vec datarefs, + vec *dataref_groups, unsigned int n_stmts) { bb_vec_info bb_vinfo; auto_vector_modes vector_modes; @@ -3629,7 +3650,7 @@ vect_slp_region (gimple_stmt_iterator region_begin, { bool vectorized = false; bool fatal = false; - bb_vinfo = new _bb_vec_info (region_begin, region_end, &shared); + bb_vinfo = new _bb_vec_info (bbs, &shared); bool first_time_p = shared.datarefs.is_empty (); BB_VINFO_DATAREFS (bb_vinfo) = datarefs; @@ -3754,50 +3775,113 @@ vect_slp_region (gimple_stmt_iterator region_begin, } } -/* Main entry for the BB vectorizer. Analyze and transform BB, returns + +/* Main entry for the BB vectorizer. Analyze and transform BBS, returns true if anything in the basic-block was vectorized. */ -bool -vect_slp_bb (basic_block bb) +static bool +vect_slp_bbs (vec bbs) { vec datarefs = vNULL; vec dataref_groups = vNULL; int insns = 0; int current_group = 0; - gimple_stmt_iterator region_begin = gsi_start_nondebug_after_labels_bb (bb); - gimple_stmt_iterator region_end = gsi_last_bb (bb); - if (!gsi_end_p (region_end)) - gsi_next (®ion_end); - for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi); - gsi_next (&gsi)) + for (unsigned i = 0; i < bbs.length (); i++) { - gimple *stmt = gsi_stmt (gsi); - if (is_gimple_debug (stmt)) - continue; + basic_block bb = bbs[i]; + for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi); + gsi_next (&gsi)) + { + gimple *stmt = gsi_stmt (gsi); + if (is_gimple_debug (stmt)) + continue; - insns++; + insns++; - if (gimple_location (stmt) != UNKNOWN_LOCATION) - vect_location = stmt; + if (gimple_location (stmt) != UNKNOWN_LOCATION) + vect_location = stmt; - if (!vect_find_stmt_data_reference (NULL, stmt, &datarefs, - &dataref_groups, current_group)) - ++current_group; + if (!vect_find_stmt_data_reference (NULL, stmt, &datarefs, + &dataref_groups, current_group)) + ++current_group; - if (insns > param_slp_max_insns_in_bb) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: too many instructions in " - "basic block.\n"); + if (insns > param_slp_max_insns_in_bb) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: too many instructions in " + "region.\n"); + } } } - return vect_slp_region (region_begin, region_end, datarefs, - &dataref_groups, insns); + return vect_slp_region (bbs, datarefs, &dataref_groups, insns); } +/* Main entry for the BB vectorizer. Analyze and transform BB, returns + true if anything in the basic-block was vectorized. */ + +bool +vect_slp_bb (basic_block bb) +{ + auto_vec bbs; + bbs.safe_push (bb); + return vect_slp_bbs (bbs); +} + +/* Main entry for the BB vectorizer. Analyze and transform BB, returns + true if anything in the basic-block was vectorized. */ + +bool +vect_slp_function (function *fun) +{ + bool r = false; + int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun)); + unsigned n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false); + + /* For the moment split the function into pieces to avoid making + the iteration on the vector mode moot. Split at points we know + to not handle well which is CFG merges (SLP discovery doesn't + handle non-loop-header PHIs) and loop exits. Since pattern + recog requires reverse iteration to visit uses before defs + simply chop RPO into pieces. */ + auto_vec bbs; + for (unsigned i = 0; i < n; i++) + { + basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]); + + /* Split when a basic block has multiple predecessors or when the + edge into it exits a loop (because of implementation issues with + respect to placement of CTORs for externals). */ + bool split = false; + edge e; + if (!single_pred_p (bb) + || ((e = single_pred_edge (bb)), + loop_exit_edge_p (e->src->loop_father, e))) + split = true; + /* Split when a BB is not dominated by the first block. */ + else if (!bbs.is_empty () + && !dominated_by_p (CDI_DOMINATORS, bb, bbs[0])) + split = true; + + if (split && !bbs.is_empty ()) + { + r |= vect_slp_bbs (bbs); + bbs.truncate (0); + bbs.quick_push (bb); + } + else + bbs.safe_push (bb); + } + + if (!bbs.is_empty ()) + r |= vect_slp_bbs (bbs); + + free (rpo); + + return r; +} /* Build a variable-length vector in which the elements in ELTS are repeated to a fill NRESULTS vectors of type VECTOR_TYPE. Store the vectors in @@ -4059,8 +4143,19 @@ vect_create_constant_vectors (vec_info *vinfo, slp_tree op_node) { if (insert_after) { - gimple_stmt_iterator gsi - = gsi_for_stmt (insert_after->stmt); + gimple_stmt_iterator gsi; + if (!stmt_ends_bb_p (insert_after->stmt)) + gsi = gsi_for_stmt (insert_after->stmt); + else + { + /* When we want to insert after a def where the + defining stmt throws then insert on the fallthru + edge. */ + edge e = find_fallthru_edge + (gimple_bb (insert_after->stmt)->succs); + gcc_assert (single_pred_p (e->dest)); + gsi = gsi_after_labels (e->dest); + } gsi_insert_seq_after (&gsi, ctor_seq, GSI_CONTINUE_LINKING); } @@ -4674,7 +4769,8 @@ vect_schedule_slp_instance (vec_info *vinfo, we do not insert before the region boundary. */ if (SLP_TREE_SCALAR_OPS (child).is_empty () && !vinfo->lookup_def (SLP_TREE_VEC_DEFS (child)[0])) - last_stmt = gsi_stmt (as_a (vinfo)->region_begin); + last_stmt = gsi_stmt (gsi_after_labels + (as_a (vinfo)->bbs[0])); else { unsigned j; diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c index 626f0ce3146..02da755f064 100644 --- a/gcc/tree-vectorizer.c +++ b/gcc/tree-vectorizer.c @@ -605,11 +605,7 @@ vec_info::remove_stmt (stmt_vec_info stmt_info) set_vinfo_for_stmt (stmt_info->stmt, NULL); unlink_stmt_vdef (stmt_info->stmt); gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt); - gimple_stmt_iterator *psi = &si; - if (bb_vec_info bb_vinfo = dyn_cast (this)) - if (gsi_stmt (bb_vinfo->region_begin) == stmt_info->stmt) - psi = &bb_vinfo->region_begin; - gsi_remove (psi, true); + gsi_remove (&si, true); release_defs (stmt_info->stmt); free_stmt_vec_info (stmt_info); } @@ -653,7 +649,8 @@ vec_info::insert_seq_on_entry (stmt_vec_info context, gimple_seq seq) else { bb_vec_info bb_vinfo = as_a (this); - gimple_stmt_iterator gsi_region_begin = bb_vinfo->region_begin; + gimple_stmt_iterator gsi_region_begin + = gsi_after_labels (bb_vinfo->bbs[0]); gsi_insert_seq_before (&gsi_region_begin, seq, GSI_SAME_STMT); } } @@ -1416,6 +1413,13 @@ pass_slp_vectorize::execute (function *fun) /* Mark all stmts as not belonging to the current region and unvisited. */ FOR_EACH_BB_FN (bb, fun) { + for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); + gsi_next (&gsi)) + { + gphi *stmt = gsi.phi (); + gimple_set_uid (stmt, -1); + gimple_set_visited (stmt, false); + } for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { @@ -1425,8 +1429,7 @@ pass_slp_vectorize::execute (function *fun) } } - FOR_EACH_BB_FN (bb, fun) - vect_slp_bb (bb); + vect_slp_function (fun); if (!in_loop_pipeline) { diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 37b091558fd..38daa05aebb 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -827,94 +827,14 @@ loop_vec_info_for_loop (class loop *loop) typedef class _bb_vec_info : public vec_info { public: - - /* GIMPLE statement iterator going from region_begin to region_end. */ - - struct const_iterator - { - const_iterator (gimple_stmt_iterator _gsi) : gsi (_gsi) {} - - const const_iterator & - operator++ () - { - gsi_next (&gsi); return *this; - } - - gimple *operator* () const { return gsi_stmt (gsi); } - - bool - operator== (const const_iterator &other) const - { - return gsi_stmt (gsi) == gsi_stmt (other.gsi); - } - - bool - operator!= (const const_iterator &other) const - { - return !(*this == other); - } - - gimple_stmt_iterator gsi; - }; - - /* GIMPLE statement iterator going from region_end to region_begin. */ - - struct const_reverse_iterator - { - const_reverse_iterator (gimple_stmt_iterator _gsi) : gsi (_gsi) {} - - const const_reverse_iterator & - operator++ () - { - gsi_prev (&gsi); return *this; - } - - gimple *operator* () const { return gsi_stmt (gsi); } - - bool - operator== (const const_reverse_iterator &other) const - { - return gsi_stmt (gsi) == gsi_stmt (other.gsi); - } - - bool - operator!= (const const_reverse_iterator &other) const - { - return !(*this == other); - } - - gimple_stmt_iterator gsi; - }; - - _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); + _bb_vec_info (vec bbs, vec_info_shared *); ~_bb_vec_info (); - /* Returns iterator_range for range-based loop. */ - - iterator_range - region_stmts () - { - return iterator_range (region_begin, region_end); - } - - /* Returns iterator_range for range-based loop in a reverse order. */ - - iterator_range - reverse_region_stmts () - { - const_reverse_iterator begin = region_end; - if (*begin == NULL) - begin = const_reverse_iterator (gsi_last_bb (gsi_bb (region_end))); - else - ++begin; - - const_reverse_iterator end = region_begin; - return iterator_range (begin, ++end); - } - - basic_block bb; - gimple_stmt_iterator region_begin; - gimple_stmt_iterator region_end; + /* The region we are operating on. bbs[0] is the entry, excluding + its PHI nodes. In the future we might want to track an explicit + entry edge to cover bbs[0] PHI nodes and have a region entry + insert location. */ + vec bbs; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb @@ -2035,6 +1955,7 @@ extern void vect_get_slp_defs (slp_tree, vec *); extern void vect_get_slp_defs (vec_info *, slp_tree, vec > *, unsigned n = -1U); extern bool vect_slp_bb (basic_block); +extern bool vect_slp_function (function *); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); extern stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);