From 308bc496884706af4b3077171cbac684c7a6f7c6 Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Mon, 16 Mar 2020 11:47:00 +0100 Subject: [PATCH] add vec_info * parameters where needed Soonish we'll get SLP nodes which have no corresponding scalar stmt and thus not stmt_vec_info and thus no way to get back to the associated vec_info. This patch makes the vec_info available as part of the APIs instead of putting in that back-pointer into the leaf data structures. 2020-05-05 Richard Biener * tree-vectorizer.h (_stmt_vec_info::vinfo): Remove. (STMT_VINFO_LOOP_VINFO): Likewise. (STMT_VINFO_BB_VINFO): Likewise. * tree-vect-data-refs.c: Adjust for the above, adding vec_info * parameters and adjusting calls. * tree-vect-loop-manip.c: Likewise. * tree-vect-loop.c: Likewise. * tree-vect-patterns.c: Likewise. * tree-vect-slp.c: Likewise. * tree-vect-stmts.c: Likewise. * tree-vectorizer.c: Likewise. * target.def (add_stmt_cost): Add vec_info * parameter. * target.h (stmt_in_inner_loop_p): Likewise. * targhooks.c (default_add_stmt_cost): Adjust. * doc/tm.texi: Re-generate. * config/aarch64/aarch64.c (aarch64_extending_load_p): Add vec_info * parameter and adjust. (aarch64_sve_adjust_stmt_cost): Likewise. (aarch64_add_stmt_cost): Likewise. * config/arm/arm.c (arm_add_stmt_cost): Likewise. * config/i386/i386.c (ix86_add_stmt_cost): Likewise. * config/rs6000/rs6000.c (rs6000_add_stmt_cost): Likewise. --- gcc/config/aarch64/aarch64.c | 18 +- gcc/config/arm/arm.c | 6 +- gcc/config/i386/i386.c | 6 +- gcc/config/rs6000/rs6000.c | 6 +- gcc/doc/tm.texi | 2 +- gcc/target.def | 2 +- gcc/target.h | 2 +- gcc/targhooks.c | 6 +- gcc/tree-vect-data-refs.c | 199 +++---- gcc/tree-vect-loop-manip.c | 6 +- gcc/tree-vect-loop.c | 221 ++++---- gcc/tree-vect-patterns.c | 474 +++++++++-------- gcc/tree-vect-slp.c | 132 ++--- gcc/tree-vect-stmts.c | 982 +++++++++++++++++++---------------- gcc/tree-vectorizer.c | 1 - gcc/tree-vectorizer.h | 123 ++--- 16 files changed, 1169 insertions(+), 1017 deletions(-) diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 409ca8d9519..e92c7e69fcb 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -13639,7 +13639,7 @@ aarch64_advsimd_ldp_stp_p (enum vect_cost_for_stmt kind, /* Return true if STMT_INFO extends the result of a load. */ static bool -aarch64_extending_load_p (stmt_vec_info stmt_info) +aarch64_extending_load_p (class vec_info *vinfo, stmt_vec_info stmt_info) { gassign *assign = dyn_cast (stmt_info->stmt); if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign))) @@ -13653,7 +13653,7 @@ aarch64_extending_load_p (stmt_vec_info stmt_info) || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type)) return false; - stmt_vec_info def_stmt_info = stmt_info->vinfo->lookup_def (rhs); + stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs); return (def_stmt_info && STMT_VINFO_DATA_REF (def_stmt_info) && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info))); @@ -13679,7 +13679,7 @@ aarch64_integer_truncation_p (stmt_vec_info stmt_info) operate on vector type VECTYPE. Adjust the cost as necessary for SVE targets. */ static unsigned int -aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind, +aarch64_sve_adjust_stmt_cost (class vec_info *vinfo, vect_cost_for_stmt kind, stmt_vec_info stmt_info, tree vectype, unsigned int stmt_cost) { @@ -13691,7 +13691,7 @@ aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind, on the fly. Optimistically assume that a load followed by an extension will fold to this form during combine, and that the extension therefore comes for free. */ - if (kind == vector_stmt && aarch64_extending_load_p (stmt_info)) + if (kind == vector_stmt && aarch64_extending_load_p (vinfo, stmt_info)) stmt_cost = 0; /* For similar reasons, vector_stmt integer truncations are a no-op, @@ -13744,7 +13744,8 @@ aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind, /* Implement targetm.vectorize.add_stmt_cost. */ static unsigned -aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +aarch64_add_stmt_cost (class vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, struct _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { @@ -13758,13 +13759,14 @@ aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, aarch64_builtin_vectorization_cost (kind, vectype, misalign); if (stmt_info && vectype && aarch64_sve_mode_p (TYPE_MODE (vectype))) - stmt_cost = aarch64_sve_adjust_stmt_cost (kind, stmt_info, vectype, - stmt_cost); + stmt_cost = aarch64_sve_adjust_stmt_cost (vinfo, kind, stmt_info, + vectype, stmt_cost); /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ - if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + if (where == vect_body && stmt_info + && stmt_in_inner_loop_p (vinfo, stmt_info)) count *= 50; /* FIXME */ retval = (unsigned) (count * stmt_cost); diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index b169250918c..bbd7dc5316c 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -12131,7 +12131,8 @@ arm_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, /* Implement targetm.vectorize.add_stmt_cost. */ static unsigned -arm_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +arm_add_stmt_cost (class vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, struct _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { @@ -12146,7 +12147,8 @@ arm_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ - if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + if (where == vect_body && stmt_info + && stmt_in_inner_loop_p (vinfo, stmt_info)) count *= 50; /* FIXME. */ retval = (unsigned) (count * stmt_cost); diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index b4ecc3ce832..b40f443ba8a 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -21878,7 +21878,8 @@ ix86_init_cost (class loop *) /* Implement targetm.vectorize.add_stmt_cost. */ static unsigned -ix86_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, class _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { @@ -22039,7 +22040,8 @@ ix86_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ - if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + if (where == vect_body && stmt_info + && stmt_in_inner_loop_p (vinfo, stmt_info)) count *= 50; /* FIXME. */ retval = (unsigned) (count * stmt_cost); diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 017e7704366..355aea8628d 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -5046,7 +5046,8 @@ adjust_vectorization_cost (enum vect_cost_for_stmt kind, /* Implement targetm.vectorize.add_stmt_cost. */ static unsigned -rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +rs6000_add_stmt_cost (class vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, struct _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { @@ -5062,7 +5063,8 @@ rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ - if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + if (where == vect_body && stmt_info + && stmt_in_inner_loop_p (vinfo, stmt_info)) count *= 50; /* FIXME. */ retval = (unsigned) (count * stmt_cost); diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index 64e7b003a48..710f674860a 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -6094,7 +6094,7 @@ all zeros. GCC can then try to branch around the instruction instead. This hook should initialize target-specific data structures in preparation for modeling the costs of vectorizing a loop or basic block. The default allocates three unsigned integers for accumulating costs for the prologue, body, and epilogue of the loop or basic block. If @var{loop_info} is non-NULL, it identifies the loop being vectorized; otherwise a single block is being vectorized. @end deftypefn -@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where}) +@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (class vec_info *@var{}, void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where}) This hook should update the target-specific @var{data} in response to adding @var{count} copies of the given @var{kind} of statement to a loop or basic block. The default adds the builtin vectorizer cost for the copies of the statement to the accumulator specified by @var{where}, (the prologue, body, or epilogue) and returns the amount added. The return value should be viewed as a tentative cost that may later be revised. @end deftypefn diff --git a/gcc/target.def b/gcc/target.def index 62e3d625a7f..f8d26e63021 100644 --- a/gcc/target.def +++ b/gcc/target.def @@ -2030,7 +2030,7 @@ DEFHOOK "return value should be viewed as a tentative cost that may later be " "revised.", unsigned, - (void *data, int count, enum vect_cost_for_stmt kind, + (class vec_info *, void *data, int count, enum vect_cost_for_stmt kind, class _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where), default_add_stmt_cost) diff --git a/gcc/target.h b/gcc/target.h index 2f47c577d00..440cd25f297 100644 --- a/gcc/target.h +++ b/gcc/target.h @@ -157,7 +157,7 @@ class predefined_function_abi; /* These are defined in tree-vect-stmts.c. */ extern tree stmt_vectype (class _stmt_vec_info *); -extern bool stmt_in_inner_loop_p (class _stmt_vec_info *); +extern bool stmt_in_inner_loop_p (class vec_info *, class _stmt_vec_info *); /* Assembler instructions for creating various kinds of integer object. */ diff --git a/gcc/targhooks.c b/gcc/targhooks.c index 7cb04f30bdb..4caab8cfbfa 100644 --- a/gcc/targhooks.c +++ b/gcc/targhooks.c @@ -1348,7 +1348,8 @@ default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED) it into the cost specified by WHERE, and returns the cost added. */ unsigned -default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +default_add_stmt_cost (class vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, class _stmt_vec_info *stmt_info, int misalign, enum vect_cost_model_location where) { @@ -1361,7 +1362,8 @@ default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ - if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + if (where == vect_body && stmt_info + && stmt_in_inner_loop_p (vinfo, stmt_info)) count *= 50; /* FIXME. */ retval = (unsigned) (count * stmt_cost); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 0192aa64636..7e9ab3ec333 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -695,7 +695,8 @@ vect_slp_analyze_data_ref_dependence (vec_info *vinfo, disambiguating the loads. */ static bool -vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, +vect_slp_analyze_node_dependences (vec_info *vinfo, + slp_instance instance, slp_tree node, vec stores, stmt_vec_info last_store_info) { @@ -703,7 +704,6 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, in NODE verifying we can sink them up to the last stmt in the group. */ stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node); - vec_info *vinfo = last_access_info->vinfo; for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k) { stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k]; @@ -781,7 +781,7 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, the maximum vectorization factor the data dependences allow. */ bool -vect_slp_analyze_instance_dependence (slp_instance instance) +vect_slp_analyze_instance_dependence (vec_info *vinfo, slp_instance instance) { DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence"); @@ -794,7 +794,8 @@ vect_slp_analyze_instance_dependence (slp_instance instance) stmt_vec_info last_store_info = NULL; if (store) { - if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL)) + if (! vect_slp_analyze_node_dependences (vinfo, instance, store, + vNULL, NULL)) return false; /* Mark stores in this instance and remember the last one. */ @@ -810,7 +811,7 @@ vect_slp_analyze_instance_dependence (slp_instance instance) slp_tree load; unsigned int i; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load) - if (! vect_slp_analyze_node_dependences (instance, load, + if (! vect_slp_analyze_node_dependences (vinfo, instance, load, store ? SLP_TREE_SCALAR_STMTS (store) : vNULL, last_store_info)) @@ -831,10 +832,9 @@ vect_slp_analyze_instance_dependence (slp_instance instance) in STMT_INFO. */ static void -vect_record_base_alignment (stmt_vec_info stmt_info, +vect_record_base_alignment (vec_info *vinfo, stmt_vec_info stmt_info, innermost_loop_behavior *drb) { - vec_info *vinfo = stmt_info->vinfo; bool existed; innermost_loop_behavior *&entry = vinfo->base_alignments.get_or_insert (drb->base_address, &existed); @@ -877,13 +877,13 @@ vect_record_base_alignments (vec_info *vinfo) && STMT_VINFO_VECTORIZABLE (stmt_info) && !STMT_VINFO_GATHER_SCATTER_P (stmt_info)) { - vect_record_base_alignment (stmt_info, &DR_INNERMOST (dr)); + vect_record_base_alignment (vinfo, stmt_info, &DR_INNERMOST (dr)); /* If DR is nested in the loop that is being vectorized, we can also record the alignment of the base wrt the outer loop. */ if (loop && nested_in_vect_loop_p (loop, stmt_info)) vect_record_base_alignment - (stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info)); + (vinfo, stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info)); } } } @@ -908,11 +908,11 @@ vect_calculate_target_alignment (dr_vec_info *dr_info) only for trivial cases. TODO. */ static void -vect_compute_data_ref_alignment (dr_vec_info *dr_info) +vect_compute_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info) { stmt_vec_info stmt_info = dr_info->stmt; - vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + vec_base_alignments *base_alignments = &vinfo->base_alignments; + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = NULL; tree ref = DR_REF (dr_info->dr); tree vectype = STMT_VINFO_VECTYPE (stmt_info); @@ -930,7 +930,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info) if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) return; - innermost_loop_behavior *drb = vect_dr_behavior (dr_info); + innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info); bool step_preserves_misalignment_p; poly_uint64 vector_alignment @@ -1137,10 +1137,10 @@ vect_update_misalignment_for_peel (dr_vec_info *dr_info, Return TRUE if DR_INFO can be handled with respect to alignment. */ static opt_result -verify_data_ref_alignment (dr_vec_info *dr_info) +verify_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info) { enum dr_alignment_support supportable_dr_alignment - = vect_supportable_dr_alignment (dr_info, false); + = vect_supportable_dr_alignment (vinfo, dr_info, false); if (!supportable_dr_alignment) return opt_result::failure_at (dr_info->stmt->stmt, @@ -1187,7 +1187,7 @@ vect_verify_datarefs_alignment (loop_vec_info vinfo) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) continue; - opt_result res = verify_data_ref_alignment (dr_info); + opt_result res = verify_data_ref_alignment (vinfo, dr_info); if (!res) return res; } @@ -1278,14 +1278,14 @@ vector_alignment_reachable_p (dr_vec_info *dr_info) /* Calculate the cost of the memory access represented by DR_INFO. */ static void -vect_get_data_access_cost (dr_vec_info *dr_info, +vect_get_data_access_cost (vec_info *vinfo, dr_vec_info *dr_info, unsigned int *inside_cost, unsigned int *outside_cost, stmt_vector_for_cost *body_cost_vec, stmt_vector_for_cost *prologue_cost_vec) { stmt_vec_info stmt_info = dr_info->stmt; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); int ncopies; if (PURE_SLP_STMT (stmt_info)) @@ -1294,10 +1294,10 @@ vect_get_data_access_cost (dr_vec_info *dr_info, ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info)); if (DR_IS_READ (dr_info->dr)) - vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost, - prologue_cost_vec, body_cost_vec, false); + vect_get_load_cost (vinfo, stmt_info, ncopies, true, inside_cost, + outside_cost, prologue_cost_vec, body_cost_vec, false); else - vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec); + vect_get_store_cost (vinfo,stmt_info, ncopies, inside_cost, body_cost_vec); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1315,6 +1315,7 @@ typedef struct _vect_peel_info typedef struct _vect_peel_extended_info { + vec_info *vinfo; struct _vect_peel_info peel_info; unsigned int inside_cost; unsigned int outside_cost; @@ -1352,7 +1353,7 @@ vect_peeling_hash_insert (hash_table *peeling_htab, struct _vect_peel_info elem, *slot; _vect_peel_info **new_slot; bool supportable_dr_alignment - = vect_supportable_dr_alignment (dr_info, true); + = vect_supportable_dr_alignment (loop_vinfo, dr_info, true); elem.npeel = npeel; slot = peeling_htab->find (&elem); @@ -1440,7 +1441,7 @@ vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo, SET_DR_MISALIGNMENT (dr_info, 0); else vect_update_misalignment_for_peel (dr_info, dr0_info, npeel); - vect_get_data_access_cost (dr_info, inside_cost, outside_cost, + vect_get_data_access_cost (loop_vinfo, dr_info, inside_cost, outside_cost, body_cost_vec, prologue_cost_vec); SET_DR_MISALIGNMENT (dr_info, save_misalignment); } @@ -1456,8 +1457,7 @@ vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot, vect_peel_info elem = *slot; int dummy; unsigned int inside_cost = 0, outside_cost = 0; - stmt_vec_info stmt_info = elem->dr_info->stmt; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (min->vinfo); stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec; @@ -1509,6 +1509,7 @@ vect_peeling_hash_choose_best_peeling (hash_table *peeling_hta struct _vect_peel_extended_info res; res.peel_info.dr_info = NULL; + res.vinfo = loop_vinfo; if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { @@ -1565,7 +1566,7 @@ vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info, save_misalignment = DR_MISALIGNMENT (dr_info); vect_update_misalignment_for_peel (dr_info, dr0_info, npeel); supportable_dr_alignment - = vect_supportable_dr_alignment (dr_info, false); + = vect_supportable_dr_alignment (loop_vinfo, dr_info, false); SET_DR_MISALIGNMENT (dr_info, save_misalignment); if (!supportable_dr_alignment) @@ -1753,7 +1754,8 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) continue; - supportable_dr_alignment = vect_supportable_dr_alignment (dr_info, true); + supportable_dr_alignment + = vect_supportable_dr_alignment (loop_vinfo, dr_info, true); do_peeling = vector_alignment_reachable_p (dr_info); if (do_peeling) { @@ -2217,7 +2219,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) } supportable_dr_alignment - = vect_supportable_dr_alignment (dr_info, false); + = vect_supportable_dr_alignment (loop_vinfo, dr_info, false); if (!supportable_dr_alignment) { @@ -2415,7 +2417,7 @@ vect_analyze_data_refs_alignment (loop_vec_info vinfo) { dr_vec_info *dr_info = vinfo->lookup_dr (dr); if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)) - vect_compute_data_ref_alignment (dr_info); + vect_compute_data_ref_alignment (vinfo, dr_info); } return opt_result::success (); @@ -2425,7 +2427,7 @@ vect_analyze_data_refs_alignment (loop_vec_info vinfo) /* Analyze alignment of DRs of stmts in NODE. */ static bool -vect_slp_analyze_and_verify_node_alignment (slp_tree node) +vect_slp_analyze_and_verify_node_alignment (vec_info *vinfo, slp_tree node) { /* We vectorize from the first scalar stmt in the node unless the node is permuted in which case we start from the first @@ -2436,12 +2438,12 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node) first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info); dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info); - vect_compute_data_ref_alignment (dr_info); + vect_compute_data_ref_alignment (vinfo, dr_info); /* For creating the data-ref pointer we need alignment of the first element anyway. */ if (dr_info != first_dr_info) - vect_compute_data_ref_alignment (first_dr_info); - if (! verify_data_ref_alignment (dr_info)) + vect_compute_data_ref_alignment (vinfo, first_dr_info); + if (! verify_data_ref_alignment (vinfo, dr_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -2459,20 +2461,21 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node) Return FALSE if a data reference is found that cannot be vectorized. */ bool -vect_slp_analyze_and_verify_instance_alignment (slp_instance instance) +vect_slp_analyze_and_verify_instance_alignment (vec_info *vinfo, + slp_instance instance) { DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment"); slp_tree node; unsigned i; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node) - if (! vect_slp_analyze_and_verify_node_alignment (node)) + if (! vect_slp_analyze_and_verify_node_alignment (vinfo, node)) return false; node = SLP_INSTANCE_TREE (instance); if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0]) && ! vect_slp_analyze_and_verify_node_alignment - (SLP_INSTANCE_TREE (instance))) + (vinfo, SLP_INSTANCE_TREE (instance))) return false; return true; @@ -2486,15 +2489,15 @@ vect_slp_analyze_and_verify_instance_alignment (slp_instance instance) Worker for vect_analyze_group_access. */ static bool -vect_analyze_group_access_1 (dr_vec_info *dr_info) +vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info) { data_reference *dr = dr_info->dr; tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); stmt_vec_info stmt_info = dr_info->stmt; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); + bb_vec_info bb_vinfo = dyn_cast (vinfo); HOST_WIDE_INT dr_step = -1; HOST_WIDE_INT groupsize, last_accessed_element = 1; bool slp_impossible = false; @@ -2696,9 +2699,9 @@ vect_analyze_group_access_1 (dr_vec_info *dr_info) Collect groups of strided stores for further use in SLP analysis. */ static bool -vect_analyze_group_access (dr_vec_info *dr_info) +vect_analyze_group_access (vec_info *vinfo, dr_vec_info *dr_info) { - if (!vect_analyze_group_access_1 (dr_info)) + if (!vect_analyze_group_access_1 (vinfo, dr_info)) { /* Dissolve the group if present. */ stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt); @@ -2719,13 +2722,13 @@ vect_analyze_group_access (dr_vec_info *dr_info) analyze groups of accesses. */ static bool -vect_analyze_data_ref_access (dr_vec_info *dr_info) +vect_analyze_data_ref_access (vec_info *vinfo, dr_vec_info *dr_info) { data_reference *dr = dr_info->dr; tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); stmt_vec_info stmt_info = dr_info->stmt; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = NULL; if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) @@ -2804,10 +2807,10 @@ vect_analyze_data_ref_access (dr_vec_info *dr_info) if (TREE_CODE (step) != INTEGER_CST) return (STMT_VINFO_STRIDED_P (stmt_info) && (!STMT_VINFO_GROUPED_ACCESS (stmt_info) - || vect_analyze_group_access (dr_info))); + || vect_analyze_group_access (vinfo, dr_info))); /* Not consecutive access - check if it's a part of interleaving group. */ - return vect_analyze_group_access (dr_info); + return vect_analyze_group_access (vinfo, dr_info); } /* Compare two data-references DRA and DRB to group them into chunks @@ -3153,7 +3156,7 @@ vect_analyze_data_ref_accesses (vec_info *vinfo) { dr_vec_info *dr_info = vinfo->lookup_dr (dr); if (STMT_VINFO_VECTORIZABLE (dr_info->stmt) - && !vect_analyze_data_ref_access (dr_info)) + && !vect_analyze_data_ref_access (vinfo, dr_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -3204,7 +3207,7 @@ vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor) gives the worst-case number of bytes covered by the segment. */ static unsigned HOST_WIDE_INT -vect_vfa_access_size (dr_vec_info *dr_info) +vect_vfa_access_size (vec_info *vinfo, dr_vec_info *dr_info) { stmt_vec_info stmt_vinfo = dr_info->stmt; tree ref_type = TREE_TYPE (DR_REF (dr_info->dr)); @@ -3216,7 +3219,7 @@ vect_vfa_access_size (dr_vec_info *dr_info) access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo); } if (STMT_VINFO_VEC_STMT (stmt_vinfo) - && (vect_supportable_dr_alignment (dr_info, false) + && (vect_supportable_dr_alignment (vinfo, dr_info, false) == dr_explicit_realign_optimized)) { /* We might access a full vector's worth. */ @@ -3592,8 +3595,8 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor); segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor); } - access_size_a = vect_vfa_access_size (dr_info_a); - access_size_b = vect_vfa_access_size (dr_info_b); + access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a); + access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b); align_a = vect_vfa_align (dr_info_a); align_b = vect_vfa_align (dr_info_b); @@ -4580,7 +4583,7 @@ vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info) FORNOW: We are only handling array accesses with step 1. */ tree -vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info, +vect_create_addr_base_for_vector_ref (vec_info *vinfo, stmt_vec_info stmt_info, gimple_seq *new_stmt_list, tree offset, tree byte_offset) @@ -4593,11 +4596,11 @@ vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info, gimple_seq seq = NULL; tree vect_ptr_type; tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - innermost_loop_behavior *drb = vect_dr_behavior (dr_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); + innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info); tree data_ref_base = unshare_expr (drb->base_address); - tree base_offset = unshare_expr (get_dr_vinfo_offset (dr_info, true)); + tree base_offset = unshare_expr (get_dr_vinfo_offset (vinfo, dr_info, true)); tree init = unshare_expr (drb->init); if (loop_vinfo) @@ -4714,14 +4717,14 @@ vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info, 3. Return the pointer. */ tree -vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, - class loop *at_loop, tree offset, +vect_create_data_ref_ptr (vec_info *vinfo, stmt_vec_info stmt_info, + tree aggr_type, class loop *at_loop, tree offset, tree *initial_address, gimple_stmt_iterator *gsi, gimple **ptr_incr, bool only_init, tree byte_offset, tree iv_step) { const char *base_name; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = NULL; bool nested_in_vect_loop = false; class loop *containing_loop = NULL; @@ -4739,7 +4742,7 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, bool insert_after; tree indx_before_incr, indx_after_incr; gimple *incr; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + bb_vec_info bb_vinfo = dyn_cast (vinfo); gcc_assert (iv_step != NULL_TREE || TREE_CODE (aggr_type) == ARRAY_TYPE @@ -4848,7 +4851,8 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */ - new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list, + new_temp = vect_create_addr_base_for_vector_ref (vinfo, + stmt_info, &new_stmt_list, offset, byte_offset); if (new_stmt_list) { @@ -4875,7 +4879,7 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, { /* Accesses to invariant addresses should be handled specially by the caller. */ - tree step = vect_dr_behavior (dr_info)->step; + tree step = vect_dr_behavior (vinfo, dr_info)->step; gcc_assert (!integer_zerop (step)); if (iv_step == NULL_TREE) @@ -4977,7 +4981,8 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, */ tree -bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi, +bump_vector_ptr (vec_info *vinfo, + tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi, stmt_vec_info stmt_info, tree bump) { struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); @@ -4997,7 +5002,7 @@ bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi, new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR, dataref_ptr, update); - vect_finish_stmt_generation (stmt_info, incr_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, incr_stmt, gsi); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) @@ -5277,7 +5282,7 @@ vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count, I4: 6 14 22 30 7 15 23 31. */ void -vect_permute_store_chain (vec dr_chain, +vect_permute_store_chain (vec_info *vinfo, vec dr_chain, unsigned int length, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, @@ -5344,7 +5349,7 @@ vect_permute_store_chain (vec dr_chain, data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, vect2, perm3_mask_low); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect1 = data_ref; vect2 = dr_chain[2]; @@ -5355,7 +5360,7 @@ vect_permute_store_chain (vec dr_chain, data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, vect2, perm3_mask_high); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[j] = data_ref; } } @@ -5394,7 +5399,7 @@ vect_permute_store_chain (vec dr_chain, high = make_temp_ssa_name (vectype, NULL, "vect_inter_high"); perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1, vect2, perm_mask_high); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[2*j] = high; /* Create interleaving stmt: @@ -5404,7 +5409,7 @@ vect_permute_store_chain (vec dr_chain, low = make_temp_ssa_name (vectype, NULL, "vect_inter_low"); perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1, vect2, perm_mask_low); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[2*j+1] = low; } memcpy (dr_chain.address (), result_chain->address (), @@ -5465,14 +5470,14 @@ vect_permute_store_chain (vec dr_chain, Return value - the result of the loop-header phi node. */ tree -vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, - tree *realignment_token, +vect_setup_realignment (vec_info *vinfo, stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, tree *realignment_token, enum dr_alignment_support alignment_support_scheme, tree init_addr, class loop **at_loop) { tree vectype = STMT_VINFO_VECTYPE (stmt_info); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); struct data_reference *dr = dr_info->dr; class loop *loop = NULL; @@ -5579,7 +5584,7 @@ vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gcc_assert (!compute_in_loop); vec_dest = vect_create_destination_var (scalar_dest, vectype); - ptr = vect_create_data_ref_ptr (stmt_info, vectype, + ptr = vect_create_data_ref_ptr (vinfo, stmt_info, vectype, loop_for_initial_load, NULL_TREE, &init_addr, NULL, &inc, true); if (TREE_CODE (ptr) == SSA_NAME) @@ -5626,7 +5631,8 @@ vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (!init_addr) { /* Generate the INIT_ADDR computation outside LOOP. */ - init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts, + init_addr = vect_create_addr_base_for_vector_ref (vinfo, + stmt_info, &stmts, NULL_TREE); if (loop) { @@ -5900,7 +5906,7 @@ vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count, 4th vec (E4): 3 7 11 15 19 23 27 31. */ static void -vect_permute_load_chain (vec dr_chain, +vect_permute_load_chain (vec_info *vinfo, vec dr_chain, unsigned int length, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, @@ -5953,7 +5959,7 @@ vect_permute_load_chain (vec dr_chain, data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm3_mask_low); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); /* Create interleaving stmt (high part of): high = VEC_PERM_EXPR dr_chain, data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm3_mask_high); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[k] = data_ref; } } @@ -5998,7 +6004,7 @@ vect_permute_load_chain (vec dr_chain, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm_mask_even); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[j/2] = data_ref; /* data_ref = permute_odd (first_data_ref, second_data_ref); */ @@ -6006,7 +6012,7 @@ vect_permute_load_chain (vec dr_chain, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm_mask_odd); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[j/2+length/2] = data_ref; } memcpy (dr_chain.address (), result_chain->address (), @@ -6103,7 +6109,7 @@ vect_permute_load_chain (vec dr_chain, */ static bool -vect_shift_permute_load_chain (vec dr_chain, +vect_shift_permute_load_chain (vec_info *vinfo, vec dr_chain, unsigned int length, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, @@ -6116,7 +6122,7 @@ vect_shift_permute_load_chain (vec dr_chain, tree vectype = STMT_VINFO_VECTYPE (stmt_info); unsigned int i; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); unsigned HOST_WIDE_INT nelt, vf; if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt) @@ -6205,26 +6211,26 @@ vect_shift_permute_load_chain (vec dr_chain, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, first_vect, perm2_mask1); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect[0] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, second_vect, second_vect, perm2_mask2); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect[1] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[1], shift1_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[j/2 + length/2] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_select"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[1], select_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[j/2] = data_ref; } memcpy (dr_chain.address (), result_chain->address (), @@ -6321,7 +6327,7 @@ vect_shift_permute_load_chain (vec dr_chain, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, dr_chain[k], dr_chain[k], perm3_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect[k] = data_ref; } @@ -6331,7 +6337,7 @@ vect_shift_permute_load_chain (vec dr_chain, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[k % 3], vect[(k + 1) % 3], shift1_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect_shift[k] = data_ref; } @@ -6342,7 +6348,7 @@ vect_shift_permute_load_chain (vec dr_chain, vect_shift[(4 - k) % 3], vect_shift[(3 - k) % 3], shift2_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); vect[k] = data_ref; } @@ -6351,13 +6357,13 @@ vect_shift_permute_load_chain (vec dr_chain, data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[0], shift3_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[nelt % 3] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1], vect[1], shift4_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); (*result_chain)[0] = data_ref; return true; } @@ -6372,7 +6378,8 @@ vect_shift_permute_load_chain (vec dr_chain, */ void -vect_transform_grouped_load (stmt_vec_info stmt_info, vec dr_chain, +vect_transform_grouped_load (vec_info *vinfo, stmt_vec_info stmt_info, + vec dr_chain, int size, gimple_stmt_iterator *gsi) { machine_mode mode; @@ -6389,10 +6396,11 @@ vect_transform_grouped_load (stmt_vec_info stmt_info, vec dr_chain, mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info)); if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 || pow2p_hwi (size) - || !vect_shift_permute_load_chain (dr_chain, size, stmt_info, + || !vect_shift_permute_load_chain (vinfo, dr_chain, size, stmt_info, gsi, &result_chain)) - vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain); - vect_record_grouped_load_vectors (stmt_info, result_chain); + vect_permute_load_chain (vinfo, dr_chain, + size, stmt_info, gsi, &result_chain); + vect_record_grouped_load_vectors (vinfo, stmt_info, result_chain); result_chain.release (); } @@ -6401,10 +6409,9 @@ vect_transform_grouped_load (stmt_vec_info stmt_info, vec dr_chain, for each vector to the associated scalar statement. */ void -vect_record_grouped_load_vectors (stmt_vec_info stmt_info, +vect_record_grouped_load_vectors (vec_info *vinfo, stmt_vec_info stmt_info, vec result_chain) { - vec_info *vinfo = stmt_info->vinfo; stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); unsigned int i, gap_count; tree tmp_data_ref; @@ -6493,14 +6500,14 @@ vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment) alignment. */ enum dr_alignment_support -vect_supportable_dr_alignment (dr_vec_info *dr_info, +vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info, bool check_aligned_accesses) { data_reference *dr = dr_info->dr; stmt_vec_info stmt_info = dr_info->stmt; tree vectype = STMT_VINFO_VECTYPE (stmt_info); machine_mode mode = TYPE_MODE (vectype); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *vect_loop = NULL; bool nested_in_vect_loop = false; diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c index 0ee1ab45c07..8c5e696b995 100644 --- a/gcc/tree-vect-loop-manip.c +++ b/gcc/tree-vect-loop-manip.c @@ -1568,7 +1568,8 @@ get_misalign_in_elems (gimple **seq, loop_vec_info loop_vinfo) tree offset = (negative ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : size_zero_node); - tree start_addr = vect_create_addr_base_for_vector_ref (stmt_info, seq, + tree start_addr = vect_create_addr_base_for_vector_ref (loop_vinfo, + stmt_info, seq, offset); tree type = unsigned_type_for (TREE_TYPE (start_addr)); if (target_align.is_constant (&target_align_c)) @@ -3057,7 +3058,8 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo, /* create: addr_tmp = (int)(address_of_first_vector) */ addr_base = - vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list, + vect_create_addr_base_for_vector_ref (loop_vinfo, + stmt_info, &new_stmt_list, offset); if (new_stmt_list != NULL) gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list); diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index b6c3faeae51..c4c3cc9ecaa 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -161,7 +161,7 @@ static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info, may already be set for general statements (not just data refs). */ static opt_result -vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info, +vect_determine_vf_for_stmt_1 (vec_info *vinfo, stmt_vec_info stmt_info, bool vectype_maybe_set_p, poly_uint64 *vf) { @@ -177,7 +177,8 @@ vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info, } tree stmt_vectype, nunits_vectype; - opt_result res = vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype, + opt_result res = vect_get_vector_types_for_stmt (vinfo, stmt_info, + &stmt_vectype, &nunits_vectype); if (!res) return res; @@ -207,13 +208,13 @@ vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info, or false if something prevented vectorization. */ static opt_result -vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf) +vect_determine_vf_for_stmt (vec_info *vinfo, + stmt_vec_info stmt_info, poly_uint64 *vf) { - vec_info *vinfo = stmt_info->vinfo; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G", stmt_info->stmt); - opt_result res = vect_determine_vf_for_stmt_1 (stmt_info, false, vf); + opt_result res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, false, vf); if (!res) return res; @@ -232,7 +233,7 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf) dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern def stmt: %G", def_stmt_info->stmt); - res = vect_determine_vf_for_stmt_1 (def_stmt_info, true, vf); + res = vect_determine_vf_for_stmt_1 (vinfo, def_stmt_info, true, vf); if (!res) return res; } @@ -241,7 +242,7 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf) dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern statement: %G", stmt_info->stmt); - res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf); + res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, true, vf); if (!res) return res; } @@ -343,7 +344,8 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo) { stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); opt_result res - = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor); + = vect_determine_vf_for_stmt (loop_vinfo, + stmt_info, &vectorization_factor); if (!res) return res; } @@ -440,9 +442,8 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, this function would then return true for x_2. */ static bool -vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi) +vect_inner_phi_in_double_reduction_p (loop_vec_info loop_vinfo, gphi *phi) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); use_operand_p use_p; ssa_op_iter op_iter; FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE) @@ -505,7 +506,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop) } if (!access_fn - || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi) + || vect_inner_phi_in_double_reduction_p (loop_vinfo, phi) || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) || (LOOP_VINFO_LOOP (loop_vinfo) != loop && TREE_CODE (step) != INTEGER_CST)) @@ -1122,7 +1123,7 @@ vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) - (void) add_stmt_cost (target_cost_data, si->count, + (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count, si->kind, si->stmt_info, si->misalign, vect_body); unsigned dummy, body_cost = 0; @@ -1529,7 +1530,8 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def || (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)) - && !vectorizable_lc_phi (stmt_info, NULL, NULL)) + && !vectorizable_lc_phi (loop_vinfo, + stmt_info, NULL, NULL)) return opt_result::failure_at (phi, "unsupported phi\n"); } @@ -1551,21 +1553,24 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) need_to_vectorize = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def && ! PURE_SLP_STMT (stmt_info)) - ok = vectorizable_induction (stmt_info, NULL, NULL, NULL, + ok = vectorizable_induction (loop_vinfo, + stmt_info, NULL, NULL, NULL, &cost_vec); else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def || (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def) || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) && ! PURE_SLP_STMT (stmt_info)) - ok = vectorizable_reduction (stmt_info, NULL, NULL, &cost_vec); + ok = vectorizable_reduction (loop_vinfo, + stmt_info, NULL, NULL, &cost_vec); } /* SLP PHIs are tested by vect_slp_analyze_node_operations. */ if (ok && STMT_VINFO_LIVE_P (stmt_info) && !PURE_SLP_STMT (stmt_info)) - ok = vectorizable_live_operation (stmt_info, NULL, NULL, NULL, + ok = vectorizable_live_operation (loop_vinfo, + stmt_info, NULL, NULL, NULL, -1, false, &cost_vec); if (!ok) @@ -1582,7 +1587,8 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) if (!gimple_clobber_p (stmt)) { opt_result res - = vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt), + = vect_analyze_stmt (loop_vinfo, + loop_vinfo->lookup_stmt (stmt), &need_to_vectorize, NULL, NULL, &cost_vec); if (!res) @@ -1591,7 +1597,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) } } /* bbs */ - add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec); + add_stmt_costs (loop_vinfo, loop_vinfo->target_cost_data, &cost_vec); /* All operations in the loop are either irrelevant (deal with loop control, or dead), or only used outside the loop and can be moved @@ -3397,8 +3403,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); - (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, - vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt, + NULL, 0, vect_prologue); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " @@ -3410,13 +3416,13 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); - (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, - vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt, + NULL, 0, vect_prologue); len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length (); if (len) /* Count LEN - 1 ANDs and LEN comparisons. */ - (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt, - NULL, 0, vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, len * 2 - 1, + scalar_stmt, NULL, 0, vect_prologue); len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length (); if (len) { @@ -3426,8 +3432,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, for (unsigned int i = 0; i < len; ++i) if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p) nstmts += 1; - (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt, - NULL, 0, vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, nstmts, + scalar_stmt, NULL, 0, vect_prologue); } if (dump_enabled_p ()) dump_printf (MSG_NOTE, @@ -3439,8 +3445,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ - (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0, - vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, vector_stmt, + NULL, 0, vect_prologue); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " @@ -3448,8 +3454,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, } if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) - (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, - vect_prologue); + (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken, + NULL, 0, vect_prologue); /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. @@ -3484,7 +3490,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) - (void) add_stmt_cost (target_cost_data, si->count, + (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count, si->kind, si->stmt_info, si->misalign, vect_epilogue); } @@ -3510,9 +3516,11 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, simpler and safer to use the worst-case cost; if this ends up being the tie-breaker between vectorizing or not, then it's probably better not to vectorize. */ - (void) add_stmt_cost (target_cost_data, num_masks, vector_stmt, + (void) add_stmt_cost (loop_vinfo, + target_cost_data, num_masks, vector_stmt, NULL, 0, vect_prologue); - (void) add_stmt_cost (target_cost_data, num_masks - 1, vector_stmt, + (void) add_stmt_cost (loop_vinfo, + target_cost_data, num_masks - 1, vector_stmt, NULL, 0, vect_body); } else if (npeel < 0) @@ -3534,23 +3542,25 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, branch per peeled loop. Even if scalar loop iterations are known, vector iterations are not known since peeled prologue iterations are not known. Hence guards remain the same. */ - (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, + (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); - (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, + (void) add_stmt_cost (loop_vinfo, + target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_prologue); - (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, + (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken, NULL, 0, vect_epilogue); - (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, + (void) add_stmt_cost (loop_vinfo, + target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_epilogue); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { - (void) add_stmt_cost (target_cost_data, + (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count * peel_iters_prologue, si->kind, si->stmt_info, si->misalign, vect_prologue); - (void) add_stmt_cost (target_cost_data, + (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count * peel_iters_epilogue, si->kind, si->stmt_info, si->misalign, vect_epilogue); @@ -3575,11 +3585,13 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, &epilogue_cost_vec); FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) - (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info, + (void) add_stmt_cost (loop_vinfo, + data, si->count, si->kind, si->stmt_info, si->misalign, vect_prologue); FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) - (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info, + (void) add_stmt_cost (loop_vinfo, + data, si->count, si->kind, si->stmt_info, si->misalign, vect_epilogue); prologue_cost_vec.release (); @@ -3910,7 +3922,8 @@ have_whole_vector_shift (machine_mode mode) the loop, and the epilogue code that must be generated. */ static void -vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn, +vect_model_reduction_cost (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, internal_fn reduc_fn, vect_reduction_type reduction_type, int ncopies, stmt_vector_for_cost *cost_vec) { @@ -3919,7 +3932,6 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn, optab optab; tree vectype; machine_mode mode; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = NULL; if (loop_vinfo) @@ -4148,11 +4160,11 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies, A cost model should help decide between these two schemes. */ static tree -get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, +get_initial_def_for_reduction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_vinfo, enum tree_code code, tree init_val, tree *adjustment_def) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree scalar_type = TREE_TYPE (init_val); tree vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type); @@ -4252,14 +4264,14 @@ get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, value will not change the result. */ static void -get_initial_defs_for_reduction (slp_tree slp_node, +get_initial_defs_for_reduction (vec_info *vinfo, + slp_tree slp_node, vec *vec_oprnds, unsigned int number_of_vectors, bool reduc_chain, tree neutral_op) { vec stmts = SLP_TREE_SCALAR_STMTS (slp_node); stmt_vec_info stmt_vinfo = stmts[0]; - vec_info *vinfo = stmt_vinfo->vinfo; unsigned HOST_WIDE_INT nunits; unsigned j, number_of_places_left_in_vector; tree vector_type; @@ -4372,7 +4384,7 @@ get_initial_defs_for_reduction (slp_tree slp_node, the stmt_vec_info the meta information is stored on. */ stmt_vec_info -info_for_reduction (stmt_vec_info stmt_info) +info_for_reduction (vec_info *vinfo, stmt_vec_info stmt_info) { stmt_info = vect_orig_stmt (stmt_info); gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info)); @@ -4388,7 +4400,7 @@ info_for_reduction (stmt_vec_info stmt_info) { edge pe = loop_preheader_edge (gimple_bb (phi)->loop_father); stmt_vec_info info - = stmt_info->vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe)); + = vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe)); if (info && STMT_VINFO_DEF_TYPE (info) == vect_double_reduction_def) stmt_info = info; } @@ -4443,13 +4455,13 @@ info_for_reduction (stmt_vec_info stmt_info) */ static void -vect_create_epilog_for_reduction (stmt_vec_info stmt_info, +vect_create_epilog_for_reduction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, slp_tree slp_node, slp_instance slp_node_instance) { - stmt_vec_info reduc_info = info_for_reduction (stmt_info); + stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info); gcc_assert (reduc_info->is_reduc_info); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); /* For double reductions we need to get at the inner loop reduction stmt which has the meta info attached. Our stmt_info is that of the loop-closed PHI of the inner loop which we remember as @@ -5659,7 +5671,8 @@ get_masked_reduction_fn (internal_fn reduc_fn, tree vectype_in) that should be used to control the operation in a fully-masked loop. */ static bool -vectorize_fold_left_reduction (stmt_vec_info stmt_info, +vectorize_fold_left_reduction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, gimple *reduc_def_stmt, @@ -5667,7 +5680,6 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info, tree ops[3], tree vectype_in, int reduc_index, vec_loop_masks *masks) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); stmt_vec_info new_stmt_info = NULL; @@ -5695,7 +5707,7 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info, if (slp_node) { auto_vec > vec_defs (2); - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs); vec_oprnds0.safe_splice (vec_defs[1 - reduc_index]); vec_defs[0].release (); vec_defs[1].release (); @@ -5704,7 +5716,8 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info, } else { - tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info); + tree loop_vec_def0 = vect_get_vec_def_for_operand (loop_vinfo, + op0, stmt_info); vec_oprnds0.create (1); vec_oprnds0.quick_push (loop_vec_def0); scalar_dest_def_info = stmt_info; @@ -5782,11 +5795,13 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info, if (i == vec_num - 1) { gimple_set_lhs (new_stmt, scalar_dest); - new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info, + new_stmt_info = vect_finish_replace_stmt (loop_vinfo, + scalar_dest_def_info, new_stmt); } else - new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info, + new_stmt_info = vect_finish_stmt_generation (loop_vinfo, + scalar_dest_def_info, new_stmt, gsi); if (slp_node) @@ -5953,13 +5968,13 @@ build_vect_cond_expr (enum tree_code code, tree vop[3], tree mask, does *NOT* necessarily hold for reduction patterns. */ bool -vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node, +vectorizable_reduction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, slp_tree slp_node, slp_instance slp_node_instance, stmt_vector_for_cost *cost_vec) { tree scalar_dest; tree vectype_in = NULL_TREE; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum vect_def_type cond_reduc_dt = vect_unknown_def_type; stmt_vec_info cond_stmt_vinfo = NULL; @@ -5981,7 +5996,7 @@ vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node, return false; /* The stmt we store reduction analysis meta on. */ - stmt_vec_info reduc_info = info_for_reduction (stmt_info); + stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info); reduc_info->is_reduc_info = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) @@ -6714,8 +6729,8 @@ vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node, else vec_num = 1; - vect_model_reduction_cost (stmt_info, reduc_fn, reduction_type, ncopies, - cost_vec); + vect_model_reduction_cost (loop_vinfo, stmt_info, reduc_fn, + reduction_type, ncopies, cost_vec); if (dump_enabled_p () && reduction_type == FOLD_LEFT_REDUCTION) dump_printf_loc (MSG_NOTE, vect_location, @@ -6779,18 +6794,18 @@ vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node, value. */ bool -vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vect_transform_reduction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node) { tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int i; int ncopies; int j; int vec_num; - stmt_vec_info reduc_info = info_for_reduction (stmt_info); + stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info); gcc_assert (reduc_info->is_reduc_info); if (nested_in_vect_loop_p (loop, stmt_info)) @@ -6865,7 +6880,7 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info); return vectorize_fold_left_reduction - (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code, + (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code, reduc_fn, ops, vectype_in, reduc_index, masks); } @@ -6898,7 +6913,7 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Get vec defs for all the operands except the reduction index, ensuring the ordering of the ops in the vector is kept. */ auto_vec, 3> vec_defs; - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs); vec_oprnds0.safe_splice (vec_defs[0]); vec_defs[0].release (); vec_oprnds1.safe_splice (vec_defs[1]); @@ -6912,12 +6927,12 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else { vec_oprnds0.quick_push - (vect_get_vec_def_for_operand (ops[0], stmt_info)); + (vect_get_vec_def_for_operand (loop_vinfo, ops[0], stmt_info)); vec_oprnds1.quick_push - (vect_get_vec_def_for_operand (ops[1], stmt_info)); + (vect_get_vec_def_for_operand (loop_vinfo, ops[1], stmt_info)); if (op_type == ternary_op) vec_oprnds2.quick_push - (vect_get_vec_def_for_operand (ops[2], stmt_info)); + (vect_get_vec_def_for_operand (loop_vinfo, ops[2], stmt_info)); } } else @@ -6970,7 +6985,8 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (loop_vinfo, + stmt_info, call, gsi); } else { @@ -6990,7 +7006,8 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (loop_vinfo, + stmt_info, new_stmt, gsi); } if (slp_node) @@ -7017,11 +7034,11 @@ vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Transform phase of a cycle PHI. */ bool -vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, +vect_transform_cycle_phi (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, slp_tree slp_node, slp_instance slp_node_instance) { tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int i; int ncopies; @@ -7038,7 +7055,7 @@ vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info); reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info); - stmt_vec_info reduc_info = info_for_reduction (stmt_info); + stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info); gcc_assert (reduc_info->is_reduc_info); if (STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION @@ -7088,7 +7105,7 @@ vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, = neutral_op_for_slp_reduction (slp_node, vectype_out, STMT_VINFO_REDUC_CODE (reduc_info), first != NULL); - get_initial_defs_for_reduction (slp_node_instance->reduc_phis, + get_initial_defs_for_reduction (loop_vinfo, slp_node_instance->reduc_phis, &vec_initial_defs, vec_num, first != NULL, neutral_op); } @@ -7122,7 +7139,8 @@ vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, { /* Do not use an adjustment def as that case is not supported correctly if ncopies is not one. */ - vec_initial_def = vect_get_vec_def_for_operand (initial_def, + vec_initial_def = vect_get_vec_def_for_operand (loop_vinfo, + initial_def, reduc_stmt_info); } else @@ -7133,7 +7151,7 @@ vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def) adjustment_defp = NULL; vec_initial_def - = get_initial_def_for_reduction (reduc_stmt_info, code, + = get_initial_def_for_reduction (loop_vinfo, reduc_stmt_info, code, initial_def, adjustment_defp); STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info) = adjustment_def; } @@ -7181,10 +7199,10 @@ vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, /* Vectorizes LC PHIs. */ bool -vectorizable_lc_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, +vectorizable_lc_phi (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, slp_tree slp_node) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (!loop_vinfo || !is_a (stmt_info->stmt) || gimple_phi_num_args (stmt_info->stmt) != 1) @@ -7206,7 +7224,8 @@ vectorizable_lc_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt, edge e = single_pred_edge (bb); tree vec_dest = vect_create_destination_var (scalar_dest, vectype); vec vec_oprnds = vNULL; - vect_get_vec_defs (gimple_phi_arg_def (stmt_info->stmt, 0), NULL_TREE, + vect_get_vec_defs (loop_vinfo, + gimple_phi_arg_def (stmt_info->stmt, 0), NULL_TREE, stmt_info, &vec_oprnds, NULL, slp_node); if (slp_node) { @@ -7294,12 +7313,12 @@ vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code) Return true if STMT_INFO is vectorizable in this way. */ bool -vectorizable_induction (stmt_vec_info stmt_info, +vectorizable_induction (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned ncopies; bool nested_in_vect_loop = false; @@ -7494,10 +7513,11 @@ vectorizable_induction (stmt_vec_info stmt_info, new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) - new_name = vect_init_vector (stmt_info, new_name, + new_name = vect_init_vector (loop_vinfo, stmt_info, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (step_vectype, new_name); - vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL); + vec_step = vect_init_vector (loop_vinfo, stmt_info, + new_vec, step_vectype, NULL); /* Now generate the IVs. */ unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); @@ -7568,10 +7588,11 @@ vectorizable_induction (stmt_vec_info stmt_info, new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) - new_name = vect_init_vector (stmt_info, new_name, + new_name = vect_init_vector (loop_vinfo, stmt_info, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (step_vectype, new_name); - vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL); + vec_step = vect_init_vector (loop_vinfo, stmt_info, new_vec, + step_vectype, NULL); for (; ivn < nvects; ++ivn) { gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt; @@ -7606,7 +7627,8 @@ vectorizable_induction (stmt_vec_info stmt_info, /* iv_loop is nested in the loop to be vectorized. init_expr had already been created during vectorization of previous stmts. We obtain it from the STMT_VINFO_VEC_STMT of the defining stmt. */ - vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info); + vec_init = vect_get_vec_def_for_operand (loop_vinfo, + init_expr, stmt_info); /* If the initial value is not of proper type, convert it. */ if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) { @@ -7709,7 +7731,8 @@ vectorizable_induction (stmt_vec_info stmt_info, gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (step_vectype, t); - vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL); + vec_step = vect_init_vector (loop_vinfo, stmt_info, + new_vec, step_vectype, NULL); /* Create the following def-use cycle: @@ -7778,7 +7801,8 @@ vectorizable_induction (stmt_vec_info stmt_info, gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (step_vectype, t); - vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL); + vec_step = vect_init_vector (loop_vinfo, stmt_info, + new_vec, step_vectype, NULL); vec_def = induc_def; prev_stmt_vinfo = induction_phi_info; @@ -7847,13 +7871,13 @@ vectorizable_induction (stmt_vec_info stmt_info, it can be supported. */ bool -vectorizable_live_operation (stmt_vec_info stmt_info, +vectorizable_live_operation (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, slp_tree slp_node, slp_instance slp_node_instance, int slp_index, bool vec_stmt_p, stmt_vector_for_cost *) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; tree lhs, lhs_type, bitsize, vec_bitsize; @@ -7885,12 +7909,12 @@ vectorizable_live_operation (stmt_vec_info stmt_info, else if (slp_index != 0) return true; } - stmt_vec_info reduc_info = info_for_reduction (stmt_info); + stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info); gcc_assert (reduc_info->is_reduc_info); if (STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION || STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION) return true; - vect_create_epilog_for_reduction (stmt_info, slp_node, + vect_create_epilog_for_reduction (loop_vinfo, stmt_info, slp_node, slp_node_instance); return true; } @@ -8371,7 +8395,7 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info, if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); - if (vect_transform_stmt (stmt_info, gsi, NULL, NULL)) + if (vect_transform_stmt (loop_vinfo, stmt_info, gsi, NULL, NULL)) *seen_store = stmt_info; } @@ -8730,7 +8754,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); - vect_transform_stmt (stmt_info, NULL, NULL, NULL); + vect_transform_stmt (loop_vinfo, stmt_info, NULL, NULL, NULL); } } @@ -8781,7 +8805,8 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call) /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ - vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store)); + vect_remove_stores (loop_vinfo, + DR_GROUP_FIRST_ELEMENT (seen_store)); else /* Free the attached stmt_vec_info and remove the stmt. */ loop_vinfo->remove_stmt (stmt_info); diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index dd0c19dc5e5..1f148a0e620 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -98,13 +98,12 @@ vect_pattern_detected (const char *name, gimple *stmt) VECTYPE if it doesn't have one already. */ static stmt_vec_info -vect_init_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info, - tree vectype) +vect_init_pattern_stmt (vec_info *vinfo, gimple *pattern_stmt, + stmt_vec_info orig_stmt_info, tree vectype) { - vec_info *vinfo = orig_stmt_info->vinfo; stmt_vec_info pattern_stmt_info = vinfo->lookup_stmt (pattern_stmt); if (pattern_stmt_info == NULL) - pattern_stmt_info = orig_stmt_info->vinfo->add_stmt (pattern_stmt); + pattern_stmt_info = vinfo->add_stmt (pattern_stmt); gimple_set_bb (pattern_stmt, gimple_bb (orig_stmt_info->stmt)); pattern_stmt_info->pattern_stmt_p = true; @@ -126,12 +125,12 @@ vect_init_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info, have one already. */ static void -vect_set_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info, - tree vectype) +vect_set_pattern_stmt (vec_info *vinfo, gimple *pattern_stmt, + stmt_vec_info orig_stmt_info, tree vectype) { STMT_VINFO_IN_PATTERN_P (orig_stmt_info) = true; STMT_VINFO_RELATED_STMT (orig_stmt_info) - = vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, vectype); + = vect_init_pattern_stmt (vinfo, pattern_stmt, orig_stmt_info, vectype); } /* Add NEW_STMT to STMT_INFO's pattern definition statements. If VECTYPE @@ -141,13 +140,13 @@ vect_set_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info, from which it was derived. */ static inline void -append_pattern_def_seq (stmt_vec_info stmt_info, gimple *new_stmt, +append_pattern_def_seq (vec_info *vinfo, + stmt_vec_info stmt_info, gimple *new_stmt, tree vectype = NULL_TREE, tree scalar_type_for_mask = NULL_TREE) { gcc_assert (!scalar_type_for_mask == (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))); - vec_info *vinfo = stmt_info->vinfo; if (vectype) { stmt_vec_info new_stmt_info = vinfo->add_stmt (new_stmt); @@ -256,7 +255,7 @@ vect_get_internal_def (vec_info *vinfo, tree op) unsigned. */ static bool -type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign, +type_conversion_p (vec_info *vinfo, tree name, bool check_sign, tree *orig_type, gimple **def_stmt, bool *promotion) { tree type = TREE_TYPE (name); @@ -264,8 +263,7 @@ type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign, enum vect_def_type dt; stmt_vec_info def_stmt_info; - if (!vect_is_simple_use (name, stmt_vinfo->vinfo, &dt, &def_stmt_info, - def_stmt)) + if (!vect_is_simple_use (name, vinfo, &dt, &def_stmt_info, def_stmt)) return false; if (dt != vect_internal_def @@ -293,7 +291,7 @@ type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign, else *promotion = false; - if (!vect_is_simple_use (oprnd0, stmt_vinfo->vinfo, &dt)) + if (!vect_is_simple_use (oprnd0, vinfo, &dt)) return false; return true; @@ -538,13 +536,12 @@ vect_joust_widened_type (tree type, tree new_type, tree *common_type) exists. */ static unsigned int -vect_widened_op_tree (stmt_vec_info stmt_info, tree_code code, +vect_widened_op_tree (vec_info *vinfo, stmt_vec_info stmt_info, tree_code code, tree_code widened_code, bool shift_p, unsigned int max_nops, vect_unpromoted_value *unprom, tree *common_type) { /* Check for an integer operation with the right code. */ - vec_info *vinfo = stmt_info->vinfo; gassign *assign = dyn_cast (stmt_info->stmt); if (!assign) return 0; @@ -581,8 +578,7 @@ vect_widened_op_tree (stmt_vec_info stmt_info, tree_code code, if (shift_p && i == 1) return 0; - if (!vect_look_through_possible_promotion (stmt_info->vinfo, op, - this_unprom)) + if (!vect_look_through_possible_promotion (vinfo, op, this_unprom)) return 0; if (TYPE_PRECISION (this_unprom->type) == TYPE_PRECISION (type)) @@ -602,9 +598,9 @@ vect_widened_op_tree (stmt_vec_info stmt_info, tree_code code, /* Recursively process the definition of the operand. */ stmt_vec_info def_stmt_info = vinfo->lookup_def (this_unprom->op); - nops = vect_widened_op_tree (def_stmt_info, code, widened_code, - shift_p, max_nops, this_unprom, - common_type); + nops = vect_widened_op_tree (vinfo, def_stmt_info, code, + widened_code, shift_p, max_nops, + this_unprom, common_type); if (nops == 0) return 0; @@ -645,16 +641,15 @@ vect_recog_temp_ssa_var (tree type, gimple *stmt) success. */ static bool -vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, +vect_split_statement (vec_info *vinfo, stmt_vec_info stmt2_info, tree new_rhs, gimple *stmt1, tree vectype) { - vec_info *vinfo = stmt2_info->vinfo; if (is_pattern_stmt_p (stmt2_info)) { /* STMT2_INFO is part of a pattern. Get the statement to which the pattern is attached. */ stmt_vec_info orig_stmt2_info = STMT_VINFO_RELATED_STMT (stmt2_info); - vect_init_pattern_stmt (stmt1, orig_stmt2_info, vectype); + vect_init_pattern_stmt (vinfo, stmt1, orig_stmt2_info, vectype); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -702,13 +697,13 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, /* Add STMT1 as a singleton pattern definition sequence. */ gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info); - vect_init_pattern_stmt (stmt1, stmt2_info, vectype); + vect_init_pattern_stmt (vinfo, stmt1, stmt2_info, vectype); gimple_seq_add_stmt_without_update (def_seq, stmt1); /* Build the second of the two pattern statements. */ tree new_lhs = vect_recog_temp_ssa_var (lhs_type, NULL); gassign *new_stmt2 = gimple_build_assign (new_lhs, NOP_EXPR, new_rhs); - vect_set_pattern_stmt (new_stmt2, stmt2_info, lhs_vectype); + vect_set_pattern_stmt (vinfo, new_stmt2, stmt2_info, lhs_vectype); if (dump_enabled_p ()) { @@ -726,11 +721,9 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, available. VECTYPE is the vector form of TYPE. */ static tree -vect_convert_input (stmt_vec_info stmt_info, tree type, +vect_convert_input (vec_info *vinfo, stmt_vec_info stmt_info, tree type, vect_unpromoted_value *unprom, tree vectype) { - vec_info *vinfo = stmt_info->vinfo; - /* Check for a no-op conversion. */ if (types_compatible_p (type, TREE_TYPE (unprom->op))) return unprom->op; @@ -774,9 +767,10 @@ vect_convert_input (stmt_vec_info stmt_info, tree type, input = vect_recog_temp_ssa_var (midtype, NULL); gassign *new_stmt = gimple_build_assign (input, NOP_EXPR, unprom->op); - if (!vect_split_statement (unprom->caster, input, new_stmt, + if (!vect_split_statement (vinfo, unprom->caster, input, new_stmt, vec_midtype)) - append_pattern_def_seq (stmt_info, new_stmt, vec_midtype); + append_pattern_def_seq (vinfo, stmt_info, + new_stmt, vec_midtype); } } @@ -792,7 +786,7 @@ vect_convert_input (stmt_vec_info stmt_info, tree type, /* If OP is an external value, see if we can insert the new statement on an incoming edge. */ if (input == unprom->op && unprom->dt == vect_external_def) - if (edge e = vect_get_external_def_edge (stmt_info->vinfo, input)) + if (edge e = vect_get_external_def_edge (vinfo, input)) { basic_block new_bb = gsi_insert_on_edge_immediate (e, new_stmt); gcc_assert (!new_bb); @@ -800,7 +794,7 @@ vect_convert_input (stmt_vec_info stmt_info, tree type, } /* As a (common) last resort, add the statement to the pattern itself. */ - append_pattern_def_seq (stmt_info, new_stmt, vectype); + append_pattern_def_seq (vinfo, stmt_info, new_stmt, vectype); return new_op; } @@ -808,7 +802,7 @@ vect_convert_input (stmt_vec_info stmt_info, tree type, result in the corresponding elements of RESULT. */ static void -vect_convert_inputs (stmt_vec_info stmt_info, unsigned int n, +vect_convert_inputs (vec_info *vinfo, stmt_vec_info stmt_info, unsigned int n, tree *result, tree type, vect_unpromoted_value *unprom, tree vectype) { @@ -821,7 +815,8 @@ vect_convert_inputs (stmt_vec_info stmt_info, unsigned int n, if (j < i) result[i] = result[j]; else - result[i] = vect_convert_input (stmt_info, type, &unprom[i], vectype); + result[i] = vect_convert_input (vinfo, stmt_info, + type, &unprom[i], vectype); } } @@ -833,13 +828,13 @@ vect_convert_inputs (stmt_vec_info stmt_info, unsigned int n, VECITYPE is the vector form of PATTERN_STMT's result type. */ static gimple * -vect_convert_output (stmt_vec_info stmt_info, tree type, gimple *pattern_stmt, - tree vecitype) +vect_convert_output (vec_info *vinfo, stmt_vec_info stmt_info, tree type, + gimple *pattern_stmt, tree vecitype) { tree lhs = gimple_get_lhs (pattern_stmt); if (!types_compatible_p (type, TREE_TYPE (lhs))) { - append_pattern_def_seq (stmt_info, pattern_stmt, vecitype); + append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vecitype); tree cast_var = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign (cast_var, NOP_EXPR, lhs); } @@ -855,10 +850,11 @@ vect_convert_output (stmt_vec_info stmt_info, tree type, gimple *pattern_stmt, *OP0_OUT and *OP1_OUT. */ static bool -vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code, +vect_reassociating_reduction_p (vec_info *vinfo, + stmt_vec_info stmt_info, tree_code code, tree *op0_out, tree *op1_out) { - loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_info = dyn_cast (vinfo); if (!loop_info) return false; @@ -932,11 +928,11 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code, inner-loop nested in an outer-loop that us being vectorized). */ static gimple * -vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_dot_prod_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { tree oprnd0, oprnd1; gimple *last_stmt = stmt_vinfo->stmt; - vec_info *vinfo = stmt_vinfo->vinfo; tree type, half_type; gimple *pattern_stmt; tree var; @@ -965,7 +961,7 @@ vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ - if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR, + if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR, &oprnd0, &oprnd1)) return NULL; @@ -988,7 +984,7 @@ vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi inside the loop (in case we are analyzing an outer-loop). */ vect_unpromoted_value unprom0[2]; - if (!vect_widened_op_tree (mult_vinfo, MULT_EXPR, WIDEN_MULT_EXPR, + if (!vect_widened_op_tree (vinfo, mult_vinfo, MULT_EXPR, WIDEN_MULT_EXPR, false, 2, unprom0, &half_type)) return NULL; @@ -1007,7 +1003,7 @@ vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* Get the inputs in the appropriate types. */ tree mult_oprnd[2]; - vect_convert_inputs (stmt_vinfo, 2, mult_oprnd, half_type, + vect_convert_inputs (vinfo, stmt_vinfo, 2, mult_oprnd, half_type, unprom0, half_vectype); var = vect_recog_temp_ssa_var (type, NULL); @@ -1056,10 +1052,10 @@ vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) */ static gimple * -vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_sad_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; - vec_info *vinfo = stmt_vinfo->vinfo; tree half_type; /* Look for the following pattern @@ -1090,7 +1086,7 @@ vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out) of the above pattern. */ tree plus_oprnd0, plus_oprnd1; - if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR, + if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR, &plus_oprnd0, &plus_oprnd1)) return NULL; @@ -1152,7 +1148,7 @@ vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi inside the loop (in case we are analyzing an outer-loop). */ vect_unpromoted_value unprom[2]; - if (!vect_widened_op_tree (diff_stmt_vinfo, MINUS_EXPR, MINUS_EXPR, + if (!vect_widened_op_tree (vinfo, diff_stmt_vinfo, MINUS_EXPR, MINUS_EXPR, false, 2, unprom, &half_type)) return NULL; @@ -1165,7 +1161,7 @@ vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* Get the inputs to the SAD_EXPR in the appropriate types. */ tree sad_oprnd[2]; - vect_convert_inputs (stmt_vinfo, 2, sad_oprnd, half_type, + vect_convert_inputs (vinfo, stmt_vinfo, 2, sad_oprnd, half_type, unprom, half_vectype); tree var = vect_recog_temp_ssa_var (sum_type, NULL); @@ -1201,16 +1197,16 @@ vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out) name of the pattern being matched, for dump purposes. */ static gimple * -vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out, +vect_recog_widen_op_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out, tree_code orig_code, tree_code wide_code, bool shift_p, const char *name) { - vec_info *vinfo = last_stmt_info->vinfo; gimple *last_stmt = last_stmt_info->stmt; vect_unpromoted_value unprom[2]; tree half_type; - if (!vect_widened_op_tree (last_stmt_info, orig_code, orig_code, + if (!vect_widened_op_tree (vinfo, last_stmt_info, orig_code, orig_code, shift_p, 2, unprom, &half_type)) return NULL; @@ -1232,7 +1228,7 @@ vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out, auto_vec dummy_vec; if (!vectype || !vecitype - || !supportable_widening_operation (wide_code, last_stmt_info, + || !supportable_widening_operation (vinfo, wide_code, last_stmt_info, vecitype, vectype, &dummy_code, &dummy_code, &dummy_int, &dummy_vec)) @@ -1243,23 +1239,26 @@ vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out, return NULL; tree oprnd[2]; - vect_convert_inputs (last_stmt_info, 2, oprnd, half_type, unprom, vectype); + vect_convert_inputs (vinfo, last_stmt_info, + 2, oprnd, half_type, unprom, vectype); tree var = vect_recog_temp_ssa_var (itype, NULL); gimple *pattern_stmt = gimple_build_assign (var, wide_code, oprnd[0], oprnd[1]); - return vect_convert_output (last_stmt_info, type, pattern_stmt, vecitype); + return vect_convert_output (vinfo, last_stmt_info, + type, pattern_stmt, vecitype); } /* Try to detect multiplication on widened inputs, converting MULT_EXPR to WIDEN_MULT_EXPR. See vect_recog_widen_op_pattern for details. */ static gimple * -vect_recog_widen_mult_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_widen_mult_pattern (vec_info *vinfo, stmt_vec_info last_stmt_info, + tree *type_out) { - return vect_recog_widen_op_pattern (last_stmt_info, type_out, MULT_EXPR, - WIDEN_MULT_EXPR, false, + return vect_recog_widen_op_pattern (vinfo, last_stmt_info, type_out, + MULT_EXPR, WIDEN_MULT_EXPR, false, "vect_recog_widen_mult_pattern"); } @@ -1288,9 +1287,9 @@ vect_recog_widen_mult_pattern (stmt_vec_info last_stmt_info, tree *type_out) */ static gimple * -vect_recog_pow_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_pow_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { - vec_info *vinfo = stmt_vinfo->vinfo; gimple *last_stmt = stmt_vinfo->stmt; tree base, exp; gimple *stmt; @@ -1364,7 +1363,7 @@ vect_recog_pow_pattern (stmt_vec_info stmt_vinfo, tree *type_out) return NULL; tree def = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL); gimple *g = gimple_build_assign (def, MULT_EXPR, exp, logc); - append_pattern_def_seq (stmt_vinfo, g); + append_pattern_def_seq (vinfo, stmt_vinfo, g); tree res = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL); g = gimple_build_call (exp_decl, 1, def); gimple_call_set_lhs (g, res); @@ -1452,11 +1451,11 @@ vect_recog_pow_pattern (stmt_vec_info stmt_vinfo, tree *type_out) inner-loop nested in an outer-loop that us being vectorized). */ static gimple * -vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_widen_sum_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1; - vec_info *vinfo = stmt_vinfo->vinfo; tree type; gimple *pattern_stmt; tree var; @@ -1471,7 +1470,7 @@ vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ - if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR, + if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR, &oprnd0, &oprnd1)) return NULL; @@ -1540,7 +1539,8 @@ vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo, tree *type_out) by users of the result. */ static gimple * -vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_over_widening_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out) { gassign *last_stmt = dyn_cast (last_stmt_info->stmt); if (!last_stmt) @@ -1552,7 +1552,6 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) if (!new_precision) return NULL; - vec_info *vinfo = last_stmt_info->vinfo; tree lhs = gimple_assign_lhs (last_stmt); tree type = TREE_TYPE (lhs); tree_code code = gimple_assign_rhs_code (last_stmt); @@ -1716,7 +1715,7 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) tree ops[3] = {}; for (unsigned int i = 1; i < first_op; ++i) ops[i - 1] = gimple_op (last_stmt, i); - vect_convert_inputs (last_stmt_info, nops, &ops[first_op - 1], + vect_convert_inputs (vinfo, last_stmt_info, nops, &ops[first_op - 1], op_type, &unprom[0], op_vectype); /* Use the operation to produce a result of type OP_TYPE. */ @@ -1732,11 +1731,11 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) /* Convert back to the original signedness, if OP_TYPE is different from NEW_TYPE. */ if (op_type != new_type) - pattern_stmt = vect_convert_output (last_stmt_info, new_type, + pattern_stmt = vect_convert_output (vinfo, last_stmt_info, new_type, pattern_stmt, op_vectype); /* Promote the result to the original type. */ - pattern_stmt = vect_convert_output (last_stmt_info, type, + pattern_stmt = vect_convert_output (vinfo, last_stmt_info, type, pattern_stmt, new_vectype); return pattern_stmt; @@ -1755,14 +1754,14 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) where only the bottom half of res is used. */ static gimple * -vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_mulhs_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out) { /* Check for a right shift. */ gassign *last_stmt = dyn_cast (last_stmt_info->stmt); if (!last_stmt || gimple_assign_rhs_code (last_stmt) != RSHIFT_EXPR) return NULL; - vec_info *vinfo = last_stmt_info->vinfo; /* Check that the shift result is wider than the users of the result need (i.e. that narrowing would be a natural choice). */ @@ -1868,7 +1867,7 @@ vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out) vect_unpromoted_value unprom_mult[2]; tree new_type; unsigned int nops - = vect_widened_op_tree (mulh_stmt_info, MULT_EXPR, WIDEN_MULT_EXPR, + = vect_widened_op_tree (vinfo, mulh_stmt_info, MULT_EXPR, WIDEN_MULT_EXPR, false, 2, unprom_mult, &new_type); if (nops != 2) return NULL; @@ -1896,7 +1895,7 @@ vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out) /* Generate the IFN_MULHRS call. */ tree new_var = vect_recog_temp_ssa_var (new_type, NULL); tree new_ops[2]; - vect_convert_inputs (last_stmt_info, 2, new_ops, new_type, + vect_convert_inputs (vinfo, last_stmt_info, 2, new_ops, new_type, unprom_mult, new_vectype); gcall *mulhrs_stmt = gimple_build_call_internal (ifn, 2, new_ops[0], new_ops[1]); @@ -1907,7 +1906,7 @@ vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out) dump_printf_loc (MSG_NOTE, vect_location, "created pattern stmt: %G", mulhrs_stmt); - return vect_convert_output (last_stmt_info, lhs_type, + return vect_convert_output (vinfo, last_stmt_info, lhs_type, mulhrs_stmt, new_vectype); } @@ -1934,11 +1933,11 @@ vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out) over plus and add a carry. */ static gimple * -vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_average_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out) { /* Check for a shift right by one bit. */ gassign *last_stmt = dyn_cast (last_stmt_info->stmt); - vec_info *vinfo = last_stmt_info->vinfo; if (!last_stmt || gimple_assign_rhs_code (last_stmt) != RSHIFT_EXPR || !integer_onep (gimple_assign_rhs2 (last_stmt))) @@ -1976,7 +1975,7 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) internal_fn ifn = IFN_AVG_FLOOR; vect_unpromoted_value unprom[3]; tree new_type; - unsigned int nops = vect_widened_op_tree (plus_stmt_info, PLUS_EXPR, + unsigned int nops = vect_widened_op_tree (vinfo, plus_stmt_info, PLUS_EXPR, PLUS_EXPR, false, 3, unprom, &new_type); if (nops == 0) @@ -2059,7 +2058,7 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) tree new_var = vect_recog_temp_ssa_var (new_type, NULL); tree new_ops[2]; - vect_convert_inputs (last_stmt_info, 2, new_ops, new_type, + vect_convert_inputs (vinfo, last_stmt_info, 2, new_ops, new_type, unprom, new_vectype); if (fallback_p) @@ -2079,28 +2078,28 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) tree shifted_op0 = vect_recog_temp_ssa_var (new_type, NULL); g = gimple_build_assign (shifted_op0, RSHIFT_EXPR, new_ops[0], one_cst); - append_pattern_def_seq (last_stmt_info, g, new_vectype); + append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype); tree shifted_op1 = vect_recog_temp_ssa_var (new_type, NULL); g = gimple_build_assign (shifted_op1, RSHIFT_EXPR, new_ops[1], one_cst); - append_pattern_def_seq (last_stmt_info, g, new_vectype); + append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype); tree sum_of_shifted = vect_recog_temp_ssa_var (new_type, NULL); g = gimple_build_assign (sum_of_shifted, PLUS_EXPR, shifted_op0, shifted_op1); - append_pattern_def_seq (last_stmt_info, g, new_vectype); + append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype); tree unmasked_carry = vect_recog_temp_ssa_var (new_type, NULL); tree_code c = (ifn == IFN_AVG_CEIL) ? BIT_IOR_EXPR : BIT_AND_EXPR; g = gimple_build_assign (unmasked_carry, c, new_ops[0], new_ops[1]); - append_pattern_def_seq (last_stmt_info, g, new_vectype); + append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype); tree carry = vect_recog_temp_ssa_var (new_type, NULL); g = gimple_build_assign (carry, BIT_AND_EXPR, unmasked_carry, one_cst); - append_pattern_def_seq (last_stmt_info, g, new_vectype); + append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype); g = gimple_build_assign (new_var, PLUS_EXPR, sum_of_shifted, carry); - return vect_convert_output (last_stmt_info, type, g, new_vectype); + return vect_convert_output (vinfo, last_stmt_info, type, g, new_vectype); } /* Generate the IFN_AVG* call. */ @@ -2113,7 +2112,8 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) dump_printf_loc (MSG_NOTE, vect_location, "created pattern stmt: %G", average_stmt); - return vect_convert_output (last_stmt_info, type, average_stmt, new_vectype); + return vect_convert_output (vinfo, last_stmt_info, + type, average_stmt, new_vectype); } /* Recognize cases in which the input to a cast is wider than its @@ -2136,7 +2136,8 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) input doesn't. */ static gimple * -vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_cast_forwprop_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out) { /* Check for a cast, including an integer-to-float conversion. */ gassign *last_stmt = dyn_cast (last_stmt_info->stmt); @@ -2165,7 +2166,6 @@ vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info, tree *type_out) return NULL; /* Try to find an unpromoted input. */ - vec_info *vinfo = last_stmt_info->vinfo; vect_unpromoted_value unprom; if (!vect_look_through_possible_promotion (vinfo, rhs, &unprom) || TYPE_PRECISION (unprom.type) >= TYPE_PRECISION (rhs_type)) @@ -2196,10 +2196,11 @@ vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info, tree *type_out) to WIDEN_LSHIFT_EXPR. See vect_recog_widen_op_pattern for details. */ static gimple * -vect_recog_widen_shift_pattern (stmt_vec_info last_stmt_info, tree *type_out) +vect_recog_widen_shift_pattern (vec_info *vinfo, + stmt_vec_info last_stmt_info, tree *type_out) { - return vect_recog_widen_op_pattern (last_stmt_info, type_out, LSHIFT_EXPR, - WIDEN_LSHIFT_EXPR, true, + return vect_recog_widen_op_pattern (vinfo, last_stmt_info, type_out, + LSHIFT_EXPR, WIDEN_LSHIFT_EXPR, true, "vect_recog_widen_shift_pattern"); } @@ -2231,13 +2232,13 @@ vect_recog_widen_shift_pattern (stmt_vec_info last_stmt_info, tree *type_out) S0 stmt. */ static gimple * -vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_rotate_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1, lhs, var, var1, var2, vectype, type, stype, def, def2; gimple *pattern_stmt, *def_stmt; enum tree_code rhs_code; - vec_info *vinfo = stmt_vinfo->vinfo; enum vect_def_type dt; optab optab1, optab2; edge ext_def = NULL; @@ -2315,7 +2316,7 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { def = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); oprnd0 = def; } @@ -2375,7 +2376,7 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { def = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); oprnd0 = def; } @@ -2428,7 +2429,7 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { def = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); oprnd0 = def; } @@ -2452,7 +2453,7 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { def = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); } stype = TREE_TYPE (def); scalar_int_mode smode = SCALAR_INT_TYPE_MODE (stype); @@ -2481,7 +2482,7 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) gcc_assert (!new_bb); } else - append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype); def2 = vect_recog_temp_ssa_var (stype, NULL); tree mask = build_int_cst (stype, GET_MODE_PRECISION (smode) - 1); @@ -2494,20 +2495,20 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) gcc_assert (!new_bb); } else - append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype); } var1 = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (var1, rhs_code == LROTATE_EXPR ? LSHIFT_EXPR : RSHIFT_EXPR, oprnd0, def); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); var2 = vect_recog_temp_ssa_var (type, NULL); def_stmt = gimple_build_assign (var2, rhs_code == LROTATE_EXPR ? RSHIFT_EXPR : LSHIFT_EXPR, oprnd0, def2); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); /* Pattern detected. */ vect_pattern_detected ("vect_recog_rotate_pattern", last_stmt); @@ -2558,14 +2559,14 @@ vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out) S3 stmt. */ static gimple * -vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo, +vect_recog_vector_vector_shift_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1, lhs, var; gimple *pattern_stmt; enum tree_code rhs_code; - vec_info *vinfo = stmt_vinfo->vinfo; if (!is_gimple_assign (last_stmt)) return NULL; @@ -2622,7 +2623,7 @@ vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo, def_stmt = gimple_build_assign (def, BIT_AND_EXPR, rhs1, mask); tree vecstype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (rhs1)); - append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype); } } } @@ -2631,7 +2632,7 @@ vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo, { def = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL); def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); } /* Pattern detected. */ @@ -2715,8 +2716,9 @@ target_supports_mult_synth_alg (struct algorithm *alg, mult_variant var, VINFO. Return the last statement. */ static gimple * -synth_lshift_by_additions (tree dest, tree op, HOST_WIDE_INT amnt, - stmt_vec_info vinfo) +synth_lshift_by_additions (vec_info *vinfo, + tree dest, tree op, HOST_WIDE_INT amnt, + stmt_vec_info stmt_info) { HOST_WIDE_INT i; tree itype = TREE_TYPE (op); @@ -2730,7 +2732,7 @@ synth_lshift_by_additions (tree dest, tree op, HOST_WIDE_INT amnt, = gimple_build_assign (tmp_var, PLUS_EXPR, prev_res, prev_res); prev_res = tmp_var; if (i < amnt - 1) - append_pattern_def_seq (vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_info, stmt); else return stmt; } @@ -2746,7 +2748,8 @@ synth_lshift_by_additions (tree dest, tree op, HOST_WIDE_INT amnt, left shifts using additions. */ static tree -apply_binop_and_append_stmt (tree_code code, tree op1, tree op2, +apply_binop_and_append_stmt (vec_info *vinfo, + tree_code code, tree op1, tree op2, stmt_vec_info stmt_vinfo, bool synth_shift_p) { if (integer_zerop (op2) @@ -2764,14 +2767,14 @@ apply_binop_and_append_stmt (tree_code code, tree op1, tree op2, if (code == LSHIFT_EXPR && synth_shift_p) { - stmt = synth_lshift_by_additions (tmp_var, op1, TREE_INT_CST_LOW (op2), - stmt_vinfo); - append_pattern_def_seq (stmt_vinfo, stmt); + stmt = synth_lshift_by_additions (vinfo, tmp_var, op1, + TREE_INT_CST_LOW (op2), stmt_vinfo); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); return tmp_var; } stmt = gimple_build_assign (tmp_var, code, op1, op2); - append_pattern_def_seq (stmt_vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); return tmp_var; } @@ -2783,10 +2786,9 @@ apply_binop_and_append_stmt (tree_code code, tree op1, tree op2, works on tree-ssa form. */ static gimple * -vect_synth_mult_by_constant (tree op, tree val, +vect_synth_mult_by_constant (vec_info *vinfo, tree op, tree val, stmt_vec_info stmt_vinfo) { - vec_info *vinfo = stmt_vinfo->vinfo; tree itype = TREE_TYPE (op); machine_mode mode = TYPE_MODE (itype); struct algorithm alg; @@ -2832,7 +2834,7 @@ vect_synth_mult_by_constant (tree op, tree val, { tree tmp_op = vect_recog_temp_ssa_var (multtype, NULL); stmt = gimple_build_assign (tmp_op, CONVERT_EXPR, op); - append_pattern_def_seq (stmt_vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); op = tmp_op; } @@ -2855,23 +2857,23 @@ vect_synth_mult_by_constant (tree op, tree val, case alg_shift: if (synth_shift_p) stmt - = synth_lshift_by_additions (accum_tmp, accumulator, alg.log[i], - stmt_vinfo); + = synth_lshift_by_additions (vinfo, accum_tmp, accumulator, + alg.log[i], stmt_vinfo); else stmt = gimple_build_assign (accum_tmp, LSHIFT_EXPR, accumulator, shft_log); break; case alg_add_t_m2: tmp_var - = apply_binop_and_append_stmt (LSHIFT_EXPR, op, shft_log, - stmt_vinfo, synth_shift_p); + = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, op, shft_log, + stmt_vinfo, synth_shift_p); stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator, tmp_var); break; case alg_sub_t_m2: - tmp_var = apply_binop_and_append_stmt (LSHIFT_EXPR, op, - shft_log, stmt_vinfo, - synth_shift_p); + tmp_var = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, op, + shft_log, stmt_vinfo, + synth_shift_p); /* In some algorithms the first step involves zeroing the accumulator. If subtracting from such an accumulator just emit the negation directly. */ @@ -2883,27 +2885,27 @@ vect_synth_mult_by_constant (tree op, tree val, break; case alg_add_t2_m: tmp_var - = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log, - stmt_vinfo, synth_shift_p); + = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator, + shft_log, stmt_vinfo, synth_shift_p); stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, tmp_var, op); break; case alg_sub_t2_m: tmp_var - = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log, - stmt_vinfo, synth_shift_p); + = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator, + shft_log, stmt_vinfo, synth_shift_p); stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var, op); break; case alg_add_factor: tmp_var - = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log, - stmt_vinfo, synth_shift_p); + = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator, + shft_log, stmt_vinfo, synth_shift_p); stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator, tmp_var); break; case alg_sub_factor: tmp_var - = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log, - stmt_vinfo, synth_shift_p); + = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator, + shft_log, stmt_vinfo, synth_shift_p); stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var, accumulator); break; @@ -2914,7 +2916,7 @@ vect_synth_mult_by_constant (tree op, tree val, but rather return it directly. */ if ((i < alg.ops - 1) || needs_fixup || cast_to_unsigned_p) - append_pattern_def_seq (stmt_vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); accumulator = accum_tmp; } if (variant == negate_variant) @@ -2923,7 +2925,7 @@ vect_synth_mult_by_constant (tree op, tree val, stmt = gimple_build_assign (accum_tmp, NEGATE_EXPR, accumulator); accumulator = accum_tmp; if (cast_to_unsigned_p) - append_pattern_def_seq (stmt_vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); } else if (variant == add_variant) { @@ -2931,7 +2933,7 @@ vect_synth_mult_by_constant (tree op, tree val, stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator, op); accumulator = accum_tmp; if (cast_to_unsigned_p) - append_pattern_def_seq (stmt_vinfo, stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, stmt); } /* Move back to a signed if needed. */ if (cast_to_unsigned_p) @@ -2960,9 +2962,9 @@ vect_synth_mult_by_constant (tree op, tree val, the multiplication. */ static gimple * -vect_recog_mult_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_mult_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { - vec_info *vinfo = stmt_vinfo->vinfo; gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1, vectype, itype; gimple *pattern_stmt; @@ -2998,7 +3000,8 @@ vect_recog_mult_pattern (stmt_vec_info stmt_vinfo, tree *type_out) return NULL; } - pattern_stmt = vect_synth_mult_by_constant (oprnd0, oprnd1, stmt_vinfo); + pattern_stmt = vect_synth_mult_by_constant (vinfo, + oprnd0, oprnd1, stmt_vinfo); if (!pattern_stmt) return NULL; @@ -3049,9 +3052,9 @@ vect_recog_mult_pattern (stmt_vec_info stmt_vinfo, tree *type_out) S1 or modulo S4 stmt. */ static gimple * -vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_divmod_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { - vec_info *vinfo = stmt_vinfo->vinfo; gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1, vectype, itype, cond; gimple *pattern_stmt, *def_stmt; @@ -3126,11 +3129,11 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (rhs_code == TRUNC_MOD_EXPR) { - append_pattern_def_seq (stmt_vinfo, div_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, div_stmt); def_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), LSHIFT_EXPR, var_div, shift); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), MINUS_EXPR, oprnd0, @@ -3155,12 +3158,12 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) fold_build2 (MINUS_EXPR, itype, oprnd1, build_int_cst (itype, 1)), build_int_cst (itype, 0)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); var = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (var, PLUS_EXPR, oprnd0, gimple_assign_lhs (def_stmt)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); shift = build_int_cst (itype, tree_log2 (oprnd1)); pattern_stmt @@ -3176,7 +3179,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) def_stmt = gimple_build_assign (signmask, COND_EXPR, cond, build_int_cst (itype, 1), build_int_cst (itype, 0)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); } else { @@ -3191,27 +3194,27 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) def_stmt = gimple_build_assign (var, COND_EXPR, cond, build_int_cst (utype, -1), build_int_cst (utype, 0)); - append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecutype); var = vect_recog_temp_ssa_var (utype, NULL); def_stmt = gimple_build_assign (var, RSHIFT_EXPR, gimple_assign_lhs (def_stmt), shift); - append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecutype); signmask = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (signmask, NOP_EXPR, var); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); } def_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), PLUS_EXPR, oprnd0, signmask); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); def_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), BIT_AND_EXPR, gimple_assign_lhs (def_stmt), fold_build2 (MINUS_EXPR, itype, oprnd1, build_int_cst (itype, 1))); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), @@ -3270,17 +3273,17 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) t1 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t1, MULT_HIGHPART_EXPR, oprnd0, build_int_cst (itype, ml)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t2 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t2, MINUS_EXPR, oprnd0, t1); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t3 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t3, RSHIFT_EXPR, t2, integer_one_node); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t4 = vect_recog_temp_ssa_var (itype, NULL); def_stmt @@ -3288,7 +3291,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (post_shift != 1) { - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); q = vect_recog_temp_ssa_var (itype, NULL); pattern_stmt @@ -3315,7 +3318,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) def_stmt = gimple_build_assign (t1, RSHIFT_EXPR, oprnd0, build_int_cst (NULL, pre_shift)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); } else t1 = oprnd0; @@ -3326,7 +3329,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (post_shift) { - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); q = vect_recog_temp_ssa_var (itype, NULL); def_stmt @@ -3387,7 +3390,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (add) { /* t2 = t1 + oprnd0; */ - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t2 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t2, PLUS_EXPR, t1, oprnd0); } @@ -3397,7 +3400,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (post_shift) { /* t3 = t2 >> post_shift; */ - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t3 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t3, RSHIFT_EXPR, t2, build_int_cst (itype, post_shift)); @@ -3428,7 +3431,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) t4 = 0; or if we know from VRP that oprnd0 < 0 t4 = -1; */ - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); t4 = vect_recog_temp_ssa_var (itype, NULL); if (msb != 1) def_stmt = gimple_build_assign (t4, INTEGER_CST, @@ -3436,7 +3439,7 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) else def_stmt = gimple_build_assign (t4, RSHIFT_EXPR, oprnd0, build_int_cst (itype, prec - 1)); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); /* q = t3 - t4; or q = t4 - t3; */ q = vect_recog_temp_ssa_var (itype, NULL); @@ -3452,11 +3455,11 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) /* We divided. Now finish by: t1 = q * oprnd1; r = oprnd0 - t1; */ - append_pattern_def_seq (stmt_vinfo, pattern_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt); t1 = vect_recog_temp_ssa_var (itype, NULL); def_stmt = gimple_build_assign (t1, MULT_EXPR, q, oprnd1); - append_pattern_def_seq (stmt_vinfo, def_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); r = vect_recog_temp_ssa_var (itype, NULL); pattern_stmt = gimple_build_assign (r, MINUS_EXPR, oprnd0, t1); @@ -3498,9 +3501,9 @@ vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out) a_T = (TYPE) a_it; */ static gimple * -vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_mixed_size_cond_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { - vec_info *vinfo = stmt_vinfo->vinfo; gimple *last_stmt = stmt_vinfo->stmt; tree cond_expr, then_clause, else_clause; tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype; @@ -3536,11 +3539,11 @@ vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out) return NULL; if ((TREE_CODE (then_clause) != INTEGER_CST - && !type_conversion_p (then_clause, stmt_vinfo, false, &orig_type0, - &def_stmt0, &promotion)) + && !type_conversion_p (vinfo, then_clause, false, + &orig_type0, &def_stmt0, &promotion)) || (TREE_CODE (else_clause) != INTEGER_CST - && !type_conversion_p (else_clause, stmt_vinfo, false, &orig_type1, - &def_stmt1, &promotion))) + && !type_conversion_p (vinfo, else_clause, false, + &orig_type1, &def_stmt1, &promotion))) return NULL; if (orig_type0 && orig_type1 @@ -3609,7 +3612,7 @@ vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out) pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL), NOP_EXPR, gimple_assign_lhs (def_stmt)); - append_pattern_def_seq (stmt_vinfo, def_stmt, vecitype); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecitype); *type_out = vectype; vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt); @@ -3722,12 +3725,12 @@ check_bool_pattern (tree var, vec_info *vinfo, hash_set &stmts) pattern sequence. */ static tree -adjust_bool_pattern_cast (tree type, tree var, stmt_vec_info stmt_info) +adjust_bool_pattern_cast (vec_info *vinfo, + tree type, tree var, stmt_vec_info stmt_info) { - vec_info *vinfo = stmt_info->vinfo; gimple *cast_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL), NOP_EXPR, var); - append_pattern_def_seq (stmt_info, cast_stmt, + append_pattern_def_seq (vinfo, stmt_info, cast_stmt, get_vectype_for_scalar_type (vinfo, type)); return gimple_assign_lhs (cast_stmt); } @@ -3739,10 +3742,9 @@ adjust_bool_pattern_cast (tree type, tree var, stmt_vec_info stmt_info) be associated with. DEFS is a map of pattern defs. */ static void -adjust_bool_pattern (tree var, tree out_type, +adjust_bool_pattern (vec_info *vinfo, tree var, tree out_type, stmt_vec_info stmt_info, hash_map &defs) { - vec_info *vinfo = stmt_info->vinfo; gimple *stmt = SSA_NAME_DEF_STMT (var); enum tree_code rhs_code, def_rhs_code; tree itype, cond_expr, rhs1, rhs2, irhs1, irhs2; @@ -3858,15 +3860,17 @@ adjust_bool_pattern (tree var, tree out_type, int prec2 = TYPE_PRECISION (TREE_TYPE (irhs2)); int out_prec = TYPE_PRECISION (out_type); if (absu_hwi (out_prec - prec1) < absu_hwi (out_prec - prec2)) - irhs2 = adjust_bool_pattern_cast (TREE_TYPE (irhs1), irhs2, + irhs2 = adjust_bool_pattern_cast (vinfo, TREE_TYPE (irhs1), irhs2, stmt_info); else if (absu_hwi (out_prec - prec1) > absu_hwi (out_prec - prec2)) - irhs1 = adjust_bool_pattern_cast (TREE_TYPE (irhs2), irhs1, + irhs1 = adjust_bool_pattern_cast (vinfo, TREE_TYPE (irhs2), irhs1, stmt_info); else { - irhs1 = adjust_bool_pattern_cast (out_type, irhs1, stmt_info); - irhs2 = adjust_bool_pattern_cast (out_type, irhs2, stmt_info); + irhs1 = adjust_bool_pattern_cast (vinfo, + out_type, irhs1, stmt_info); + irhs2 = adjust_bool_pattern_cast (vinfo, + out_type, irhs2, stmt_info); } } itype = TREE_TYPE (irhs1); @@ -3903,7 +3907,7 @@ adjust_bool_pattern (tree var, tree out_type, } gimple_set_location (pattern_stmt, loc); - append_pattern_def_seq (stmt_info, pattern_stmt, + append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, get_vectype_for_scalar_type (vinfo, itype)); defs.put (var, gimple_assign_lhs (pattern_stmt)); } @@ -3923,7 +3927,7 @@ sort_after_uid (const void *p1, const void *p2) OUT_TYPE. Return the def of the pattern root. */ static tree -adjust_bool_stmts (hash_set &bool_stmt_set, +adjust_bool_stmts (vec_info *vinfo, hash_set &bool_stmt_set, tree out_type, stmt_vec_info stmt_info) { /* Gather original stmts in the bool pattern in their order of appearance @@ -3937,7 +3941,7 @@ adjust_bool_stmts (hash_set &bool_stmt_set, /* Now process them in that order, producing pattern stmts. */ hash_map defs; for (unsigned i = 0; i < bool_stmts.length (); ++i) - adjust_bool_pattern (gimple_assign_lhs (bool_stmts[i]), + adjust_bool_pattern (vinfo, gimple_assign_lhs (bool_stmts[i]), out_type, stmt_info, defs); /* Pop the last pattern seq stmt and install it as pattern root for STMT. */ @@ -4012,12 +4016,12 @@ integer_type_for_mask (tree var, vec_info *vinfo) but the above is more efficient. */ static gimple * -vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_bool_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; enum tree_code rhs_code; tree var, lhs, rhs, vectype; - vec_info *vinfo = stmt_vinfo->vinfo; gimple *pattern_stmt; if (!is_gimple_assign (last_stmt)) @@ -4043,7 +4047,8 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (check_bool_pattern (var, vinfo, bool_stmts)) { - rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (lhs), stmt_vinfo); + rhs = adjust_bool_stmts (vinfo, bool_stmts, + TREE_TYPE (lhs), stmt_vinfo); lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs))) pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs); @@ -4075,7 +4080,8 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (!useless_type_conversion_p (type, TREE_TYPE (lhs))) { tree new_vectype = get_vectype_for_scalar_type (vinfo, type); - append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype); + append_pattern_def_seq (vinfo, stmt_vinfo, + pattern_stmt, new_vectype); lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); pattern_stmt = gimple_build_assign (lhs, CONVERT_EXPR, tmp); @@ -4110,7 +4116,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) if (!check_bool_pattern (var, vinfo, bool_stmts)) return NULL; - rhs = adjust_bool_stmts (bool_stmts, type, stmt_vinfo); + rhs = adjust_bool_stmts (vinfo, bool_stmts, type, stmt_vinfo); lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); pattern_stmt @@ -4129,13 +4135,14 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { stmt_vec_info pattern_stmt_info; tree nunits_vectype; - if (!vect_get_vector_types_for_stmt (stmt_vinfo, &vectype, + if (!vect_get_vector_types_for_stmt (vinfo, stmt_vinfo, &vectype, &nunits_vectype) || !VECTOR_MODE_P (TYPE_MODE (vectype))) return NULL; if (check_bool_pattern (var, vinfo, bool_stmts)) - rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (vectype), stmt_vinfo); + rhs = adjust_bool_stmts (vinfo, bool_stmts, + TREE_TYPE (vectype), stmt_vinfo); else { tree type = integer_type_for_mask (var, vinfo); @@ -4153,7 +4160,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) rhs = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign (rhs, COND_EXPR, var, cst1, cst0); - append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype); + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt, new_vectype); } lhs = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vectype), lhs); @@ -4161,7 +4168,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) { tree rhs2 = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); gimple *cast_stmt = gimple_build_assign (rhs2, NOP_EXPR, rhs); - append_pattern_def_seq (stmt_vinfo, cast_stmt); + append_pattern_def_seq (vinfo, stmt_vinfo, cast_stmt); rhs = rhs2; } pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs); @@ -4185,7 +4192,8 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) Return converted mask. */ static tree -build_mask_conversion (tree mask, tree vectype, stmt_vec_info stmt_vinfo) +build_mask_conversion (vec_info *vinfo, + tree mask, tree vectype, stmt_vec_info stmt_vinfo) { gimple *stmt; tree masktype, tmp; @@ -4193,7 +4201,8 @@ build_mask_conversion (tree mask, tree vectype, stmt_vec_info stmt_vinfo) masktype = truth_type_for (vectype); tmp = vect_recog_temp_ssa_var (TREE_TYPE (masktype), NULL); stmt = gimple_build_assign (tmp, CONVERT_EXPR, mask); - append_pattern_def_seq (stmt_vinfo, stmt, masktype, TREE_TYPE (vectype)); + append_pattern_def_seq (vinfo, stmt_vinfo, + stmt, masktype, TREE_TYPE (vectype)); return tmp; } @@ -4225,14 +4234,14 @@ build_mask_conversion (tree mask, tree vectype, stmt_vec_info stmt_vinfo) S4' c_1' = m_3'' ? c_2 : c_3; */ static gimple * -vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out) +vect_recog_mask_conversion_pattern (vec_info *vinfo, + stmt_vec_info stmt_vinfo, tree *type_out) { gimple *last_stmt = stmt_vinfo->stmt; enum tree_code rhs_code; tree lhs = NULL_TREE, rhs1, rhs2, tmp, rhs1_type, rhs2_type; tree vectype1, vectype2; stmt_vec_info pattern_stmt_info; - vec_info *vinfo = stmt_vinfo->vinfo; /* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */ if (is_gimple_call (last_stmt) @@ -4269,7 +4278,7 @@ vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out) TYPE_VECTOR_SUBPARTS (vectype2))) return NULL; - tmp = build_mask_conversion (mask_arg, vectype1, stmt_vinfo); + tmp = build_mask_conversion (vinfo, mask_arg, vectype1, stmt_vinfo); auto_vec args; unsigned int nargs = gimple_call_num_args (last_stmt); @@ -4388,13 +4397,13 @@ vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out) tmp = vect_recog_temp_ssa_var (TREE_TYPE (rhs1), NULL); pattern_stmt = gimple_build_assign (tmp, rhs1); rhs1 = tmp; - append_pattern_def_seq (stmt_vinfo, pattern_stmt, vectype2, + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt, vectype2, rhs1_type); } if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1), TYPE_VECTOR_SUBPARTS (vectype2))) - tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo); + tmp = build_mask_conversion (vinfo, rhs1, vectype1, stmt_vinfo); else tmp = rhs1; @@ -4434,14 +4443,14 @@ vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out) vectype1 = get_mask_type_for_scalar_type (vinfo, rhs1_type); if (!vectype1) return NULL; - rhs2 = build_mask_conversion (rhs2, vectype1, stmt_vinfo); + rhs2 = build_mask_conversion (vinfo, rhs2, vectype1, stmt_vinfo); } else { vectype1 = get_mask_type_for_scalar_type (vinfo, rhs2_type); if (!vectype1) return NULL; - rhs1 = build_mask_conversion (rhs1, vectype1, stmt_vinfo); + rhs1 = build_mask_conversion (vinfo, rhs1, vectype1, stmt_vinfo); } lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); @@ -4491,7 +4500,7 @@ vect_convert_mask_for_vectype (tree mask, tree vectype, if (mask_vectype && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), TYPE_VECTOR_SUBPARTS (mask_vectype))) - mask = build_mask_conversion (mask, vectype, stmt_info); + mask = build_mask_conversion (vinfo, mask, vectype, stmt_info); } return mask; } @@ -4505,15 +4514,15 @@ vect_convert_mask_for_vectype (tree mask, tree vectype, to STMT_INFO. */ static tree -vect_add_conversion_to_pattern (tree type, tree value, stmt_vec_info stmt_info) +vect_add_conversion_to_pattern (vec_info *vinfo, + tree type, tree value, stmt_vec_info stmt_info) { if (useless_type_conversion_p (type, TREE_TYPE (value))) return value; - vec_info *vinfo = stmt_info->vinfo; tree new_value = vect_recog_temp_ssa_var (type, NULL); gassign *conversion = gimple_build_assign (new_value, CONVERT_EXPR, value); - append_pattern_def_seq (stmt_info, conversion, + append_pattern_def_seq (vinfo, stmt_info, conversion, get_vectype_for_scalar_type (vinfo, type)); return new_value; } @@ -4526,10 +4535,11 @@ vect_add_conversion_to_pattern (tree type, tree value, stmt_vec_info stmt_info) as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */ static gimple * -vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out) +vect_recog_gather_scatter_pattern (vec_info *vinfo, + stmt_vec_info stmt_info, tree *type_out) { /* Currently we only support this for loop vectorization. */ - loop_vec_info loop_vinfo = dyn_cast (stmt_info->vinfo); + loop_vec_info loop_vinfo = dyn_cast (vinfo); if (!loop_vinfo) return NULL; @@ -4560,8 +4570,8 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out) latter to the same width as the vector elements. */ tree base = gs_info.base; tree offset_type = TREE_TYPE (gs_info.offset_vectype); - tree offset = vect_add_conversion_to_pattern (offset_type, gs_info.offset, - stmt_info); + tree offset = vect_add_conversion_to_pattern (vinfo, offset_type, + gs_info.offset, stmt_info); /* Build the new pattern statement. */ tree scale = size_int (gs_info.scale); @@ -4705,10 +4715,10 @@ vect_set_min_input_precision (stmt_vec_info stmt_info, tree type, whose result is LHS. */ static bool -vect_determine_min_output_precision_1 (stmt_vec_info stmt_info, tree lhs) +vect_determine_min_output_precision_1 (vec_info *vinfo, + stmt_vec_info stmt_info, tree lhs) { /* Take the maximum precision required by users of the result. */ - vec_info *vinfo = stmt_info->vinfo; unsigned int precision = 0; imm_use_iterator iter; use_operand_p use; @@ -4742,7 +4752,7 @@ vect_determine_min_output_precision_1 (stmt_vec_info stmt_info, tree lhs) /* Calculate min_output_precision for STMT_INFO. */ static void -vect_determine_min_output_precision (stmt_vec_info stmt_info) +vect_determine_min_output_precision (vec_info *vinfo, stmt_vec_info stmt_info) { /* We're only interested in statements with a narrowable result. */ tree lhs = gimple_get_lhs (stmt_info->stmt); @@ -4751,7 +4761,7 @@ vect_determine_min_output_precision (stmt_vec_info stmt_info) || !vect_narrowable_type_p (TREE_TYPE (lhs))) return; - if (!vect_determine_min_output_precision_1 (stmt_info, lhs)) + if (!vect_determine_min_output_precision_1 (vinfo, stmt_info, lhs)) stmt_info->min_output_precision = TYPE_PRECISION (TREE_TYPE (lhs)); } @@ -4962,10 +4972,8 @@ possible_vector_mask_operation_p (stmt_vec_info stmt_info) result in STMT_INFO->mask_precision. */ static void -vect_determine_mask_precision (stmt_vec_info stmt_info) +vect_determine_mask_precision (vec_info *vinfo, stmt_vec_info stmt_info) { - vec_info *vinfo = stmt_info->vinfo; - if (!possible_vector_mask_operation_p (stmt_info) || stmt_info->mask_precision) return; @@ -5070,15 +5078,15 @@ vect_determine_mask_precision (stmt_vec_info stmt_info) have already done so for the users of its result. */ void -vect_determine_stmt_precisions (stmt_vec_info stmt_info) +vect_determine_stmt_precisions (vec_info *vinfo, stmt_vec_info stmt_info) { - vect_determine_min_output_precision (stmt_info); + vect_determine_min_output_precision (vinfo, stmt_info); if (gassign *stmt = dyn_cast (stmt_info->stmt)) { vect_determine_precisions_from_range (stmt_info, stmt); vect_determine_precisions_from_users (stmt_info, stmt); } - vect_determine_mask_precision (stmt_info); + vect_determine_mask_precision (vinfo, stmt_info); } /* Walk backwards through the vectorizable region to determine the @@ -5106,7 +5114,7 @@ vect_determine_precisions (vec_info *vinfo) for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si)) vect_determine_stmt_precisions - (vinfo->lookup_stmt (gsi_stmt (si))); + (vinfo, vinfo->lookup_stmt (gsi_stmt (si))); } } else @@ -5123,13 +5131,13 @@ vect_determine_precisions (vec_info *vinfo) stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt); if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) - vect_determine_stmt_precisions (stmt_info); + vect_determine_stmt_precisions (vinfo, stmt_info); } while (stmt != gsi_stmt (bb_vinfo->region_begin)); } } -typedef gimple *(*vect_recog_func_ptr) (stmt_vec_info, tree *); +typedef gimple *(*vect_recog_func_ptr) (vec_info *, stmt_vec_info, tree *); struct vect_recog_func { @@ -5171,7 +5179,8 @@ const unsigned int NUM_PATTERNS = ARRAY_SIZE (vect_vect_recog_func_ptrs); /* Mark statements that are involved in a pattern. */ static inline void -vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, +vect_mark_pattern_stmts (vec_info *vinfo, + stmt_vec_info orig_stmt_info, gimple *pattern_stmt, tree pattern_vectype) { stmt_vec_info orig_stmt_info_saved = orig_stmt_info; @@ -5213,7 +5222,7 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, dump_printf_loc (MSG_NOTE, vect_location, "extra pattern stmt: %G", gsi_stmt (si)); stmt_vec_info pattern_stmt_info - = vect_init_pattern_stmt (gsi_stmt (si), + = vect_init_pattern_stmt (vinfo, gsi_stmt (si), orig_stmt_info, pattern_vectype); /* Stmts in the def sequence are not vectorizable cycle or induction defs, instead they should all be vect_internal_def @@ -5223,7 +5232,8 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, if (orig_pattern_stmt) { - vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype); + vect_init_pattern_stmt (vinfo, pattern_stmt, + orig_stmt_info, pattern_vectype); /* Insert all the new pattern statements before the original one. */ gimple_seq *orig_def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info); @@ -5236,12 +5246,12 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, gsi_remove (&gsi, false); } else - vect_set_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype); + vect_set_pattern_stmt (vinfo, + pattern_stmt, orig_stmt_info, pattern_vectype); /* Transfer reduction path info to the pattern. */ if (STMT_VINFO_REDUC_IDX (orig_stmt_info_saved) != -1) { - vec_info *vinfo = orig_stmt_info_saved->vinfo; tree lookfor = gimple_op (orig_stmt_info_saved->stmt, 1 + STMT_VINFO_REDUC_IDX (orig_stmt_info)); /* Search the pattern def sequence and the main pattern stmt. Note @@ -5312,9 +5322,9 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, for vect_recog_pattern. */ static void -vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info) +vect_pattern_recog_1 (vec_info *vinfo, + vect_recog_func *recog_func, stmt_vec_info stmt_info) { - vec_info *vinfo = stmt_info->vinfo; gimple *pattern_stmt; loop_vec_info loop_vinfo; tree pattern_vectype; @@ -5328,12 +5338,13 @@ vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info) gimple_stmt_iterator gsi; for (gsi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); !gsi_end_p (gsi); gsi_next (&gsi)) - vect_pattern_recog_1 (recog_func, vinfo->lookup_stmt (gsi_stmt (gsi))); + vect_pattern_recog_1 (vinfo, recog_func, + vinfo->lookup_stmt (gsi_stmt (gsi))); return; } gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); - pattern_stmt = recog_func->fn (stmt_info, &pattern_vectype); + pattern_stmt = recog_func->fn (vinfo, stmt_info, &pattern_vectype); if (!pattern_stmt) { /* Clear any half-formed pattern definition sequence. */ @@ -5341,7 +5352,7 @@ vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info) return; } - loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vinfo = dyn_cast (vinfo); gcc_assert (pattern_vectype); /* Found a vectorizable pattern. */ @@ -5351,7 +5362,7 @@ vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info) recog_func->name, pattern_stmt); /* Mark the stmts that are involved in the pattern. */ - vect_mark_pattern_stmts (stmt_info, pattern_stmt, pattern_vectype); + vect_mark_pattern_stmts (vinfo, stmt_info, pattern_stmt, pattern_vectype); /* Patterns cannot be vectorized using SLP, because they change the order of computation. */ @@ -5471,7 +5482,7 @@ vect_pattern_recog (vec_info *vinfo) stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (si)); /* Scan over all generic vect_recog_xxx_pattern functions. */ for (j = 0; j < NUM_PATTERNS; j++) - vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j], + vect_pattern_recog_1 (vinfo, &vect_vect_recog_func_ptrs[j], stmt_info); } } @@ -5489,7 +5500,8 @@ vect_pattern_recog (vec_info *vinfo) /* Scan over all generic vect_recog_xxx_pattern functions. */ for (j = 0; j < NUM_PATTERNS; j++) - vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j], stmt_info); + vect_pattern_recog_1 (vinfo, + &vect_vect_recog_func_ptrs[j], stmt_info); } } } diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index f6331eeea86..45cf491ddd9 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -627,10 +627,10 @@ vect_update_shared_vectype (stmt_vec_info stmt_info, tree vectype) Used only for BB vectorization. */ static bool -vect_update_all_shared_vectypes (vec stmts) +vect_update_all_shared_vectypes (vec_info *vinfo, vec stmts) { tree vectype, nunits_vectype; - if (!vect_get_vector_types_for_stmt (stmts[0], &vectype, + if (!vect_get_vector_types_for_stmt (vinfo, stmts[0], &vectype, &nunits_vectype, stmts.length ())) return false; @@ -686,7 +686,8 @@ compatible_calls_p (gcall *call1, gcall *call2) vect_build_slp_tree. */ static bool -vect_record_max_nunits (stmt_vec_info stmt_info, unsigned int group_size, +vect_record_max_nunits (vec_info *vinfo, stmt_vec_info stmt_info, + unsigned int group_size, tree vectype, poly_uint64 *max_nunits) { if (!vectype) @@ -703,7 +704,7 @@ vect_record_max_nunits (stmt_vec_info stmt_info, unsigned int group_size, before adjusting *max_nunits for basic-block vectorization. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); unsigned HOST_WIDE_INT const_nunits; - if (STMT_VINFO_BB_VINFO (stmt_info) + if (is_a (vinfo) && (!nunits.is_constant (&const_nunits) || const_nunits > group_size)) { @@ -764,7 +765,7 @@ vect_two_operations_perm_ok_p (vec stmts, to (B1 <= A1 ? X1 : Y1); or be inverted to (A1 < B1) ? Y1 : X1. */ static bool -vect_build_slp_tree_1 (unsigned char *swap, +vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, vec stmts, unsigned int group_size, poly_uint64 *max_nunits, bool *matches, bool *two_operators) @@ -789,7 +790,6 @@ vect_build_slp_tree_1 (unsigned char *swap, stmt_vec_info stmt_info; FOR_EACH_VEC_ELT (stmts, i, stmt_info) { - vec_info *vinfo = stmt_info->vinfo; gimple *stmt = stmt_info->stmt; swap[i] = 0; matches[i] = false; @@ -822,10 +822,10 @@ vect_build_slp_tree_1 (unsigned char *swap, } tree nunits_vectype; - if (!vect_get_vector_types_for_stmt (stmt_info, &vectype, + if (!vect_get_vector_types_for_stmt (vinfo, stmt_info, &vectype, &nunits_vectype, group_size) || (nunits_vectype - && !vect_record_max_nunits (stmt_info, group_size, + && !vect_record_max_nunits (vinfo, stmt_info, group_size, nunits_vectype, max_nunits))) { /* Fatal mismatch. */ @@ -1256,7 +1256,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, { tree scalar_type = TREE_TYPE (PHI_RESULT (stmt)); tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type); - if (!vect_record_max_nunits (stmt_info, group_size, vectype, max_nunits)) + if (!vect_record_max_nunits (vinfo, stmt_info, group_size, vectype, + max_nunits)) return NULL; vect_def_type def_type = STMT_VINFO_DEF_TYPE (stmt_info); @@ -1288,7 +1289,7 @@ vect_build_slp_tree_2 (vec_info *vinfo, bool two_operators = false; unsigned char *swap = XALLOCAVEC (unsigned char, group_size); - if (!vect_build_slp_tree_1 (swap, stmts, group_size, + if (!vect_build_slp_tree_1 (vinfo, swap, stmts, group_size, &this_max_nunits, matches, &two_operators)) return NULL; @@ -1398,7 +1399,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, if (SLP_TREE_DEF_TYPE (grandchild) != vect_external_def) break; if (!grandchild - && vect_update_all_shared_vectypes (oprnd_info->def_stmts)) + && vect_update_all_shared_vectypes (vinfo, + oprnd_info->def_stmts)) { /* Roll back. */ this_tree_size = old_tree_size; @@ -1440,7 +1442,7 @@ vect_build_slp_tree_2 (vec_info *vinfo, scalar version. */ && !is_pattern_stmt_p (stmt_info) && !oprnd_info->any_pattern - && vect_update_all_shared_vectypes (oprnd_info->def_stmts)) + && vect_update_all_shared_vectypes (vinfo, oprnd_info->def_stmts)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1540,7 +1542,7 @@ vect_build_slp_tree_2 (vec_info *vinfo, break; if (!grandchild && (vect_update_all_shared_vectypes - (oprnd_info->def_stmts))) + (vinfo, oprnd_info->def_stmts))) { /* Roll back. */ this_tree_size = old_tree_size; @@ -1922,7 +1924,7 @@ vect_gather_slp_loads (slp_instance inst, slp_tree node) SLP_INSTN are supported. */ static bool -vect_supported_load_permutation_p (slp_instance slp_instn) +vect_supported_load_permutation_p (vec_info *vinfo, slp_instance slp_instn) { unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn); unsigned int i, j, k, next; @@ -1966,7 +1968,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn) /* In basic block vectorization we allow any subchain of an interleaving chain. FORNOW: not supported in loop SLP because of realignment compications. */ - if (STMT_VINFO_BB_VINFO (stmt_info)) + if (is_a (vinfo)) { /* Check whether the loads in an instance form a subchain and thus no permutation is necessary. */ @@ -2015,7 +2017,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn) /* Verify the permutation can be generated. */ vec tem; unsigned n_perms; - if (!vect_transform_slp_perm_load (node, tem, NULL, + if (!vect_transform_slp_perm_load (vinfo, node, tem, NULL, 1, slp_instn, true, &n_perms)) { if (dump_enabled_p ()) @@ -2038,10 +2040,10 @@ vect_supported_load_permutation_p (slp_instance slp_instn) poly_uint64 test_vf = force_common_multiple (SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), LOOP_VINFO_VECT_FACTOR - (STMT_VINFO_LOOP_VINFO (stmt_info))); + (as_a (vinfo))); FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node) if (node->load_permutation.exists () - && !vect_transform_slp_perm_load (node, vNULL, NULL, test_vf, + && !vect_transform_slp_perm_load (vinfo, node, vNULL, NULL, test_vf, slp_instn, true, &n_perms)) return false; @@ -2321,7 +2323,7 @@ vect_analyze_slp_instance (vec_info *vinfo, if (loads_permuted) { - if (!vect_supported_load_permutation_p (new_instance)) + if (!vect_supported_load_permutation_p (vinfo, new_instance)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -2569,7 +2571,8 @@ vect_make_slp_decision (loop_vec_info loop_vinfo) can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */ static void -vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype, +vect_detect_hybrid_slp_stmts (loop_vec_info loop_vinfo, slp_tree node, + unsigned i, slp_vect_type stype, hash_map &visited) { stmt_vec_info stmt_vinfo = SLP_TREE_SCALAR_STMTS (node)[i]; @@ -2577,7 +2580,6 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype, gimple *use_stmt; stmt_vec_info use_vinfo; slp_tree child; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); int j; /* We need to union stype over the incoming graph edges but we still @@ -2637,7 +2639,7 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype, FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child) if (SLP_TREE_DEF_TYPE (child) != vect_external_def && SLP_TREE_DEF_TYPE (child) != vect_constant_def) - vect_detect_hybrid_slp_stmts (child, i, stype, visited); + vect_detect_hybrid_slp_stmts (loop_vinfo, child, i, stype, visited); } /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */ @@ -2730,7 +2732,8 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo) if (j < SLP_INSTANCE_GROUP_SIZE (instance)) { any = true; - vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance), + vect_detect_hybrid_slp_stmts (loop_vinfo, + SLP_INSTANCE_TREE (instance), j, pure_slp, visited); } if (!any) @@ -2820,7 +2823,8 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node, } bool dummy; - return vect_analyze_stmt (stmt_info, &dummy, node, node_instance, cost_vec); + return vect_analyze_stmt (vinfo, stmt_info, &dummy, + node, node_instance, cost_vec); } /* Try to build NODE from scalars, returning true on success. @@ -2987,7 +2991,7 @@ vect_slp_analyze_operations (vec_info *vinfo) visited.add (*x); i++; - add_stmt_costs (vinfo->target_cost_data, &cost_vec); + add_stmt_costs (vinfo, vinfo->target_cost_data, &cost_vec); cost_vec.release (); } } @@ -3001,7 +3005,7 @@ vect_slp_analyze_operations (vec_info *vinfo) update LIFE according to uses of NODE. */ static void -vect_bb_slp_scalar_cost (basic_block bb, +vect_bb_slp_scalar_cost (vec_info *vinfo, basic_block bb, slp_tree node, vec *life, stmt_vector_for_cost *cost_vec, hash_set &visited) @@ -3016,7 +3020,6 @@ vect_bb_slp_scalar_cost (basic_block bb, FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info) { gimple *stmt = stmt_info->stmt; - vec_info *vinfo = stmt_info->vinfo; ssa_op_iter op_iter; def_operand_p def_p; @@ -3074,7 +3077,7 @@ vect_bb_slp_scalar_cost (basic_block bb, /* Do not directly pass LIFE to the recursive call, copy it to confine changes in the callee to the current child/subtree. */ subtree_life.safe_splice (*life); - vect_bb_slp_scalar_cost (bb, child, &subtree_life, cost_vec, + vect_bb_slp_scalar_cost (vinfo, bb, child, &subtree_life, cost_vec, visited); subtree_life.truncate (0); } @@ -3100,12 +3103,12 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo) { auto_vec life; life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance)); - vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo), + vect_bb_slp_scalar_cost (bb_vinfo, BB_VINFO_BB (bb_vinfo), SLP_INSTANCE_TREE (instance), &life, &scalar_costs, visited); } void *target_cost_data = init_cost (NULL); - add_stmt_costs (target_cost_data, &scalar_costs); + add_stmt_costs (bb_vinfo, target_cost_data, &scalar_costs); scalar_costs.release (); unsigned dummy; finish_cost (target_cost_data, &dummy, &scalar_cost, &dummy); @@ -3258,8 +3261,8 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal) dependence in the SLP instances. */ for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); ) { - if (! vect_slp_analyze_and_verify_instance_alignment (instance) - || ! vect_slp_analyze_instance_dependence (instance)) + if (! vect_slp_analyze_and_verify_instance_alignment (bb_vinfo, instance) + || ! vect_slp_analyze_instance_dependence (bb_vinfo, instance)) { slp_tree node = SLP_INSTANCE_TREE (instance); stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0]; @@ -3497,7 +3500,8 @@ vect_slp_bb (basic_block bb) /* Return 1 if vector type STMT_VINFO is a boolean vector. */ static bool -vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, unsigned op_num) +vect_mask_constant_operand_p (vec_info *vinfo, + stmt_vec_info stmt_vinfo, unsigned op_num) { enum tree_code code = gimple_expr_code (stmt_vinfo->stmt); tree op, vectype; @@ -3510,7 +3514,7 @@ vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, unsigned op_num) gassign *stmt = as_a (stmt_vinfo->stmt); op = gimple_assign_rhs1 (stmt); - if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &dt, &vectype)) + if (!vect_is_simple_use (op, vinfo, &dt, &vectype)) gcc_unreachable (); return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype); @@ -3534,7 +3538,7 @@ vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, unsigned op_num) op = TREE_OPERAND (cond, 0); } - if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &dt, &vectype)) + if (!vect_is_simple_use (op, vinfo, &dt, &vectype)) gcc_unreachable (); return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype); @@ -3663,12 +3667,12 @@ duplicate_and_interleave (vec_info *vinfo, gimple_seq *seq, tree vector_type, operands. */ static void -vect_get_constant_vectors (slp_tree slp_node, unsigned op_num, +vect_get_constant_vectors (vec_info *vinfo, + slp_tree slp_node, unsigned op_num, vec *vec_oprnds) { slp_tree op_node = SLP_TREE_CHILDREN (slp_node)[op_num]; stmt_vec_info stmt_vinfo = SLP_TREE_SCALAR_STMTS (slp_node)[0]; - vec_info *vinfo = stmt_vinfo->vinfo; unsigned HOST_WIDE_INT nunits; tree vec_cst; unsigned j, number_of_places_left_in_vector; @@ -3688,7 +3692,7 @@ vect_get_constant_vectors (slp_tree slp_node, unsigned op_num, /* Check if vector type is a boolean vector. */ tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo); if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op)) - && vect_mask_constant_operand_p (stmt_vinfo, op_num)) + && vect_mask_constant_operand_p (vinfo, stmt_vinfo, op_num)) vector_type = truth_type_for (stmt_vectype); else vector_type = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), op_node); @@ -3797,8 +3801,8 @@ vect_get_constant_vectors (slp_tree slp_node, unsigned op_num, constant_p = false; if (TREE_CODE (orig_op) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (orig_op) - && STMT_VINFO_BB_VINFO (stmt_vinfo) - && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb + && is_a (vinfo) + && (as_a (vinfo)->bb == gimple_bb (SSA_NAME_DEF_STMT (orig_op)))) place_after_defs = true; @@ -3823,12 +3827,12 @@ vect_get_constant_vectors (slp_tree slp_node, unsigned op_num, stmt_vec_info last_stmt_info = vect_find_last_scalar_stmt_in_slp (slp_node); gsi = gsi_for_stmt (last_stmt_info->stmt); - init = vect_init_vector (stmt_vinfo, vec_cst, vector_type, - &gsi); + init = vect_init_vector (vinfo, stmt_vinfo, vec_cst, + vector_type, &gsi); } else - init = vect_init_vector (stmt_vinfo, vec_cst, vector_type, - NULL); + init = vect_init_vector (vinfo, stmt_vinfo, vec_cst, + vector_type, NULL); if (ctor_seq != NULL) { gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init)); @@ -3902,7 +3906,8 @@ vect_get_slp_vect_defs (slp_tree slp_node, vec *vec_oprnds) vect_get_slp_vect_defs () to retrieve them. */ void -vect_get_slp_defs (slp_tree slp_node, vec > *vec_oprnds, unsigned n) +vect_get_slp_defs (vec_info *vinfo, + slp_tree slp_node, vec > *vec_oprnds, unsigned n) { if (n == -1U) n = SLP_TREE_CHILDREN (slp_node).length (); @@ -3921,7 +3926,7 @@ vect_get_slp_defs (slp_tree slp_node, vec > *vec_oprnds, unsigned n) vect_get_slp_vect_defs (child, &vec_defs); } else - vect_get_constant_vectors (slp_node, i, &vec_defs); + vect_get_constant_vectors (vinfo, slp_node, i, &vec_defs); vec_oprnds->quick_push (vec_defs); } @@ -3933,13 +3938,13 @@ vect_get_slp_defs (slp_tree slp_node, vec > *vec_oprnds, unsigned n) SLP_NODE_INSTANCE. */ bool -vect_transform_slp_perm_load (slp_tree node, vec dr_chain, +vect_transform_slp_perm_load (vec_info *vinfo, + slp_tree node, vec dr_chain, gimple_stmt_iterator *gsi, poly_uint64 vf, slp_instance slp_node_instance, bool analyze_only, unsigned *n_perms) { stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0]; - vec_info *vinfo = stmt_info->vinfo; int vec_index = 0; tree vectype = STMT_VINFO_VECTYPE (stmt_info); unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance); @@ -4116,7 +4121,8 @@ vect_transform_slp_perm_load (slp_tree node, vec dr_chain, first_vec, second_vec, mask_vec); perm_stmt_info - = vect_finish_stmt_generation (stmt_info, perm_stmt, + = vect_finish_stmt_generation (vinfo, + stmt_info, perm_stmt, gsi); } else @@ -4143,7 +4149,8 @@ vect_transform_slp_perm_load (slp_tree node, vec dr_chain, /* Vectorize SLP instance tree in postorder. */ static void -vect_schedule_slp_instance (slp_tree node, slp_instance instance) +vect_schedule_slp_instance (vec_info *vinfo, + slp_tree node, slp_instance instance) { gimple_stmt_iterator si; stmt_vec_info stmt_info; @@ -4161,7 +4168,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance) return; FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) - vect_schedule_slp_instance (child, instance); + vect_schedule_slp_instance (vinfo, child, instance); /* Push SLP node def-type to stmts. */ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) @@ -4219,11 +4226,11 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance) vec v1; unsigned j; tree tmask = NULL_TREE; - vect_transform_stmt (stmt_info, &si, node, instance); + vect_transform_stmt (vinfo, stmt_info, &si, node, instance); v0 = SLP_TREE_VEC_STMTS (node).copy (); SLP_TREE_VEC_STMTS (node).truncate (0); gimple_assign_set_rhs_code (stmt, ocode); - vect_transform_stmt (stmt_info, &si, node, instance); + vect_transform_stmt (vinfo, stmt_info, &si, node, instance); gimple_assign_set_rhs_code (stmt, code0); v1 = SLP_TREE_VEC_STMTS (node).copy (); SLP_TREE_VEC_STMTS (node).truncate (0); @@ -4261,7 +4268,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance) gimple_assign_lhs (v1[j]->stmt), tmask); SLP_TREE_VEC_STMTS (node).quick_push - (vect_finish_stmt_generation (stmt_info, vstmt, &si)); + (vect_finish_stmt_generation (vinfo, stmt_info, vstmt, &si)); } v0.release (); v1.release (); @@ -4269,7 +4276,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance) } } if (!done_p) - vect_transform_stmt (stmt_info, &si, node, instance); + vect_transform_stmt (vinfo, stmt_info, &si, node, instance); /* Restore stmt def-types. */ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) @@ -4287,7 +4294,8 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance) SLP instances may refer to the same scalar stmt. */ static void -vect_remove_slp_scalar_calls (slp_tree node, hash_set &visited) +vect_remove_slp_scalar_calls (vec_info *vinfo, + slp_tree node, hash_set &visited) { gimple *new_stmt; gimple_stmt_iterator gsi; @@ -4303,7 +4311,7 @@ vect_remove_slp_scalar_calls (slp_tree node, hash_set &visited) return; FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) - vect_remove_slp_scalar_calls (child, visited); + vect_remove_slp_scalar_calls (vinfo, child, visited); FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info) { @@ -4316,16 +4324,16 @@ vect_remove_slp_scalar_calls (slp_tree node, hash_set &visited) lhs = gimple_call_lhs (stmt); new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); gsi = gsi_for_stmt (stmt); - stmt_info->vinfo->replace_stmt (&gsi, stmt_info, new_stmt); + vinfo->replace_stmt (&gsi, stmt_info, new_stmt); SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt; } } static void -vect_remove_slp_scalar_calls (slp_tree node) +vect_remove_slp_scalar_calls (vec_info *vinfo, slp_tree node) { hash_set visited; - vect_remove_slp_scalar_calls (node, visited); + vect_remove_slp_scalar_calls (vinfo, node, visited); } /* Vectorize the instance root. */ @@ -4392,7 +4400,7 @@ vect_schedule_slp (vec_info *vinfo) { slp_tree node = SLP_INSTANCE_TREE (instance); /* Schedule the tree of INSTANCE. */ - vect_schedule_slp_instance (node, instance); + vect_schedule_slp_instance (vinfo, node, instance); if (SLP_INSTANCE_ROOT_STMT (instance)) vectorize_slp_instance_root_stmt (node, instance); @@ -4416,7 +4424,7 @@ vect_schedule_slp (vec_info *vinfo) stmts starting from the SLP tree root if they have no uses. */ if (is_a (vinfo)) - vect_remove_slp_scalar_calls (root); + vect_remove_slp_scalar_calls (vinfo, root); for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store_info) && j < SLP_INSTANCE_GROUP_SIZE (instance); j++) diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 1984787bac4..33210e1485b 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -70,11 +70,11 @@ stmt_vectype (class _stmt_vec_info *stmt_info) /* Return TRUE iff the given statement is in an inner loop relative to the loop being vectorized. */ bool -stmt_in_inner_loop_p (class _stmt_vec_info *stmt_info) +stmt_in_inner_loop_p (vec_info *vinfo, class _stmt_vec_info *stmt_info) { gimple *stmt = STMT_VINFO_STMT (stmt_info); basic_block bb = gimple_bb (stmt); - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop* loop; if (!loop_vinfo) @@ -124,7 +124,8 @@ create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems) with scalar destination SCALAR_DEST. */ static tree -read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +read_vector_array (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree scalar_dest, tree array, unsigned HOST_WIDE_INT n) { tree vect_type, vect, vect_name, array_ref; @@ -140,7 +141,7 @@ read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_stmt = gimple_build_assign (vect, array_ref); vect_name = make_ssa_name (vect, new_stmt); gimple_assign_set_lhs (new_stmt, vect_name); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); return vect_name; } @@ -150,7 +151,8 @@ read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, The store is part of the vectorization of STMT_INFO. */ static void -write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +write_vector_array (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vect, tree array, unsigned HOST_WIDE_INT n) { tree array_ref; @@ -161,7 +163,7 @@ write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, NULL_TREE, NULL_TREE); new_stmt = gimple_build_assign (array_ref, vect); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } /* PTR is a pointer to an array of type TYPE. Return a representation @@ -183,12 +185,12 @@ create_array_ref (tree type, tree ptr, tree alias_ptr_type) Emit the clobber before *GSI. */ static void -vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, - tree var) +vect_clobber_variable (vec_info *vinfo, stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, tree var) { tree clobber = build_clobber (TREE_TYPE (var)); gimple *new_stmt = gimple_build_assign (var, clobber); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } /* Utility functions used by vect_mark_stmts_to_be_vectorized. */ @@ -787,11 +789,11 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal) /* Compute the prologue cost for invariant or constant operands. */ static unsigned -vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info, +vect_prologue_cost_for_slp_op (vec_info *vinfo, + slp_tree node, stmt_vec_info stmt_info, unsigned opno, enum vect_def_type dt, stmt_vector_for_cost *cost_vec) { - vec_info *vinfo = stmt_info->vinfo; gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt; tree op = gimple_op (stmt, opno); unsigned prologue_cost = 0; @@ -856,7 +858,8 @@ vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info, be generated for the single vector op. We will handle that shortly. */ static void -vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, +vect_model_simple_cost (vec_info *vinfo, + stmt_vec_info stmt_info, int ncopies, enum vect_def_type *dt, int ndts, slp_tree node, @@ -884,9 +887,10 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, enum vect_def_type dt; if (!op || op == lhs) continue; - if (vect_is_simple_use (op, stmt_info->vinfo, &dt) + if (vect_is_simple_use (op, vinfo, &dt) && (dt == vect_constant_def || dt == vect_external_def)) - prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info, + prologue_cost += vect_prologue_cost_for_slp_op (vinfo, node, + stmt_info, i, dt, cost_vec); } } @@ -990,7 +994,7 @@ cfun_returns (tree decl) has the overhead of the grouped access attributed to it. */ static void -vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, +vect_model_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies, enum vect_def_type dt, vect_memory_access_type memory_access_type, vec_load_store_type vls_type, slp_tree slp_node, @@ -1007,7 +1011,8 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, if (vls_type == VLS_STORE_INVARIANT) { if (slp_node) - prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info, + prologue_cost += vect_prologue_cost_for_slp_op (vinfo, slp_node, + stmt_info, 1, dt, cost_vec); else prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, @@ -1056,7 +1061,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, scalar_store, stmt_info, 0, vect_body); } else - vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec); + vect_get_store_cost (vinfo, stmt_info, ncopies, &inside_cost, cost_vec); if (memory_access_type == VMAT_ELEMENTWISE || memory_access_type == VMAT_STRIDED_SLP) @@ -1108,13 +1113,13 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, /* Calculate cost of DR's memory access. */ void -vect_get_store_cost (stmt_vec_info stmt_info, int ncopies, +vect_get_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies, unsigned int *inside_cost, stmt_vector_for_cost *body_cost_vec) { dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); int alignment_support_scheme - = vect_supportable_dr_alignment (dr_info, false); + = vect_supportable_dr_alignment (vinfo, dr_info, false); switch (alignment_support_scheme) { @@ -1168,7 +1173,8 @@ vect_get_store_cost (stmt_vec_info stmt_info, int ncopies, access scheme chosen. */ static void -vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, +vect_model_load_cost (vec_info *vinfo, + stmt_vec_info stmt_info, unsigned ncopies, vect_memory_access_type memory_access_type, slp_instance instance, slp_tree slp_node, @@ -1193,7 +1199,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, unsigned assumed_nunits = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info)); unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size; - vect_transform_slp_perm_load (slp_node, vNULL, NULL, + vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, slp_vf, instance, true, &n_perms); inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, @@ -1269,7 +1275,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, scalar_load, stmt_info, 0, vect_body); } else - vect_get_load_cost (stmt_info, ncopies, first_stmt_p, + vect_get_load_cost (vinfo, stmt_info, ncopies, first_stmt_p, &inside_cost, &prologue_cost, cost_vec, cost_vec, true); if (memory_access_type == VMAT_ELEMENTWISE @@ -1286,7 +1292,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, /* Calculate cost of DR's memory access. */ void -vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, +vect_get_load_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies, bool add_realign_cost, unsigned int *inside_cost, unsigned int *prologue_cost, stmt_vector_for_cost *prologue_cost_vec, @@ -1295,7 +1301,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, { dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); int alignment_support_scheme - = vect_supportable_dr_alignment (dr_info, false); + = vect_supportable_dr_alignment (vinfo, dr_info, false); switch (alignment_support_scheme) { @@ -1402,14 +1408,14 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, the loop preheader for the vectorized stmt STMT_VINFO. */ static void -vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, +vect_init_vector_1 (vec_info *vinfo, stmt_vec_info stmt_vinfo, gimple *new_stmt, gimple_stmt_iterator *gsi) { if (gsi) - vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_vinfo, new_stmt, gsi); else { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + loop_vec_info loop_vinfo = dyn_cast (vinfo); if (loop_vinfo) { @@ -1426,7 +1432,7 @@ vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, } else { - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); + bb_vec_info bb_vinfo = dyn_cast (vinfo); basic_block bb; gimple_stmt_iterator gsi_bb_start; @@ -1453,7 +1459,7 @@ vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, It will be used in the vectorization of STMT_INFO. */ tree -vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, +vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type, gimple_stmt_iterator *gsi) { gimple *init_stmt; @@ -1479,7 +1485,7 @@ vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, new_temp = make_ssa_name (TREE_TYPE (type)); init_stmt = gimple_build_assign (new_temp, COND_EXPR, val, true_val, false_val); - vect_init_vector_1 (stmt_info, init_stmt, gsi); + vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi); val = new_temp; } } @@ -1498,7 +1504,7 @@ vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, { init_stmt = gsi_stmt (gsi2); gsi_remove (&gsi2, false); - vect_init_vector_1 (stmt_info, init_stmt, gsi); + vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi); } } } @@ -1507,7 +1513,7 @@ vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_"); init_stmt = gimple_build_assign (new_temp, val); - vect_init_vector_1 (stmt_info, init_stmt, gsi); + vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi); return new_temp; } @@ -1579,12 +1585,13 @@ vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info, vector invariant. */ tree -vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) +vect_get_vec_def_for_operand (vec_info *vinfo, + tree op, stmt_vec_info stmt_vinfo, tree vectype) { gimple *def_stmt; enum vect_def_type dt; bool is_simple_use; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + loop_vec_info loop_vinfo = dyn_cast (vinfo); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1611,7 +1618,7 @@ vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) vector_type = get_vectype_for_scalar_type (loop_vinfo, TREE_TYPE (op)); gcc_assert (vector_type); - return vect_init_vector (stmt_vinfo, op, vector_type, NULL); + return vect_init_vector (vinfo, stmt_vinfo, op, vector_type, NULL); } else return vect_get_vec_def_for_operand_1 (def_stmt_info, dt); @@ -1716,7 +1723,7 @@ vect_get_vec_defs_for_stmt_copy (vec_info *vinfo, /* Get vectorized definitions for OP0 and OP1. */ void -vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, +vect_get_vec_defs (vec_info *vinfo, tree op0, tree op1, stmt_vec_info stmt_info, vec *vec_oprnds0, vec *vec_oprnds1, slp_tree slp_node) @@ -1724,7 +1731,7 @@ vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, if (slp_node) { auto_vec > vec_defs (SLP_TREE_CHILDREN (slp_node).length ()); - vect_get_slp_defs (slp_node, &vec_defs, op1 ? 2 : 1); + vect_get_slp_defs (vinfo, slp_node, &vec_defs, op1 ? 2 : 1); *vec_oprnds0 = vec_defs[0]; if (op1) *vec_oprnds1 = vec_defs[1]; @@ -1734,13 +1741,13 @@ vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, tree vec_oprnd; vec_oprnds0->create (1); - vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info); + vec_oprnd = vect_get_vec_def_for_operand (vinfo, op0, stmt_info); vec_oprnds0->quick_push (vec_oprnd); if (op1) { vec_oprnds1->create (1); - vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info); + vec_oprnd = vect_get_vec_def_for_operand (vinfo, op1, stmt_info); vec_oprnds1->quick_push (vec_oprnd); } } @@ -1751,10 +1758,9 @@ vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, statement and create and return a stmt_vec_info for it. */ static stmt_vec_info -vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) +vect_finish_stmt_generation_1 (vec_info *vinfo, + stmt_vec_info stmt_info, gimple *vec_stmt) { - vec_info *vinfo = stmt_info->vinfo; - stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt); if (dump_enabled_p ()) @@ -1777,7 +1783,8 @@ vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) stmt_vec_info for VEC_STMT. */ stmt_vec_info -vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt) +vect_finish_replace_stmt (vec_info *vinfo, + stmt_vec_info stmt_info, gimple *vec_stmt) { gimple *scalar_stmt = vect_orig_stmt (stmt_info)->stmt; gcc_assert (gimple_get_lhs (scalar_stmt) == gimple_get_lhs (vec_stmt)); @@ -1785,14 +1792,15 @@ vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt) gimple_stmt_iterator gsi = gsi_for_stmt (scalar_stmt); gsi_replace (&gsi, vec_stmt, true); - return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); + return vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt); } /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it before *GSI. Create and return a stmt_vec_info for VEC_STMT. */ stmt_vec_info -vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt, +vect_finish_stmt_generation (vec_info *vinfo, + stmt_vec_info stmt_info, gimple *vec_stmt, gimple_stmt_iterator *gsi) { gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL); @@ -1824,7 +1832,7 @@ vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt, } } gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); - return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); + return vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt); } /* We want to vectorize a call to combined function CFN with function @@ -1857,7 +1865,7 @@ vectorizable_internal_function (combined_fn cfn, tree fndecl, } -static tree permute_vec_elements (tree, tree, tree, stmt_vec_info, +static tree permute_vec_elements (vec_info *, tree, tree, tree, stmt_vec_info, gimple_stmt_iterator *); /* Check whether a load or store statement in the loop described by @@ -2123,10 +2131,10 @@ vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info, is negative, 0 if it is zero, and 1 if it is greater than zero. */ static int -compare_step_with_zero (stmt_vec_info stmt_info) +compare_step_with_zero (vec_info *vinfo, stmt_vec_info stmt_info) { dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); - return tree_int_cst_compare (vect_dr_behavior (dr_info)->step, + return tree_int_cst_compare (vect_dr_behavior (vinfo, dr_info)->step, size_zero_node); } @@ -2154,7 +2162,8 @@ perm_mask_for_reverse (tree vectype) accesses consecutive elements with a negative step. */ static vect_memory_access_type -get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype, +get_negative_load_store_type (vec_info *vinfo, + stmt_vec_info stmt_info, tree vectype, vec_load_store_type vls_type, unsigned int ncopies) { @@ -2169,7 +2178,8 @@ get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype, return VMAT_ELEMENTWISE; } - alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false); + alignment_support_scheme = vect_supportable_dr_alignment (vinfo, + dr_info, false); if (alignment_support_scheme != dr_aligned && alignment_support_scheme != dr_unaligned_supported) { @@ -2286,13 +2296,13 @@ vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype) as well as at the end. */ static bool -get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, +get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, + tree vectype, bool slp, bool masked_p, vec_load_store_type vls_type, vect_memory_access_type *memory_access_type, gather_scatter_info *gs_info) { - vec_info *vinfo = stmt_info->vinfo; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); @@ -2360,7 +2370,8 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, if (overrun_p && !masked_p && (((alignment_support_scheme - = vect_supportable_dr_alignment (first_dr_info, false))) + = vect_supportable_dr_alignment (vinfo, + first_dr_info, false))) == dr_aligned || alignment_support_scheme == dr_unaligned_supported) && known_eq (nunits, (group_size - gap) * 2) @@ -2376,10 +2387,10 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, "Peeling for outer loop is not supported\n"); return false; } - int cmp = compare_step_with_zero (stmt_info); + int cmp = compare_step_with_zero (vinfo, stmt_info); if (cmp < 0) *memory_access_type = get_negative_load_store_type - (stmt_info, vectype, vls_type, 1); + (vinfo, stmt_info, vectype, vls_type, 1); else { gcc_assert (!loop_vinfo || cmp > 0); @@ -2408,7 +2419,7 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, if (!STMT_VINFO_STRIDED_P (first_stmt_info) && (can_overrun_p || !would_overrun_p) - && compare_step_with_zero (stmt_info) > 0) + && compare_step_with_zero (vinfo, stmt_info) > 0) { /* First cope with the degenerate case of a single-element vector. */ @@ -2497,14 +2508,14 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, NCOPIES is the number of vector statements that will be needed. */ static bool -get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, +get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, + tree vectype, bool slp, bool masked_p, vec_load_store_type vls_type, unsigned int ncopies, vect_memory_access_type *memory_access_type, gather_scatter_info *gs_info) { - vec_info *vinfo = stmt_info->vinfo; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) { @@ -2524,7 +2535,7 @@ get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, } else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { - if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p, + if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp, masked_p, vls_type, memory_access_type, gs_info)) return false; } @@ -2540,10 +2551,10 @@ get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, } else { - int cmp = compare_step_with_zero (stmt_info); + int cmp = compare_step_with_zero (vinfo, stmt_info); if (cmp < 0) *memory_access_type = get_negative_load_store_type - (stmt_info, vectype, vls_type, ncopies); + (vinfo, stmt_info, vectype, vls_type, ncopies); else if (cmp == 0) { gcc_assert (vls_type == VLS_LOAD); @@ -2590,11 +2601,10 @@ get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, in *MASK_VECTYPE_OUT. */ static bool -vect_check_scalar_mask (stmt_vec_info stmt_info, tree mask, +vect_check_scalar_mask (vec_info *vinfo, stmt_vec_info stmt_info, tree mask, vect_def_type *mask_dt_out, tree *mask_vectype_out) { - vec_info *vinfo = stmt_info->vinfo; if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask))) { if (dump_enabled_p ()) @@ -2613,7 +2623,7 @@ vect_check_scalar_mask (stmt_vec_info stmt_info, tree mask, enum vect_def_type mask_dt; tree mask_vectype; - if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype)) + if (!vect_is_simple_use (mask, vinfo, &mask_dt, &mask_vectype)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -2656,7 +2666,7 @@ vect_check_scalar_mask (stmt_vec_info stmt_info, tree mask, *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */ static bool -vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, +vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info, tree rhs, vect_def_type *rhs_dt_out, tree *rhs_vectype_out, vec_load_store_type *vls_type_out) { @@ -2672,7 +2682,7 @@ vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, enum vect_def_type rhs_dt; tree rhs_vectype; - if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype)) + if (!vect_is_simple_use (rhs, vinfo, &rhs_dt, &rhs_vectype)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -2703,7 +2713,8 @@ vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, floats are interpreted as a bitmask. */ static tree -vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) +vect_build_all_ones_mask (vec_info *vinfo, + stmt_vec_info stmt_info, tree masktype) { if (TREE_CODE (masktype) == INTEGER_TYPE) return build_int_cst (masktype, -1); @@ -2711,7 +2722,7 @@ vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) { tree mask = build_int_cst (TREE_TYPE (masktype), -1); mask = build_vector_from_val (masktype, mask); - return vect_init_vector (stmt_info, mask, masktype, NULL); + return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL); } else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype))) { @@ -2722,7 +2733,7 @@ vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype))); tree mask = build_real (TREE_TYPE (masktype), r); mask = build_vector_from_val (masktype, mask); - return vect_init_vector (stmt_info, mask, masktype, NULL); + return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL); } gcc_unreachable (); } @@ -2731,7 +2742,8 @@ vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) STMT_INFO as a gather load. */ static tree -vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) +vect_build_zero_merge_argument (vec_info *vinfo, + stmt_vec_info stmt_info, tree vectype) { tree merge; if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE) @@ -2748,7 +2760,7 @@ vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) else gcc_unreachable (); merge = build_vector_from_val (vectype, merge); - return vect_init_vector (stmt_info, merge, vectype, NULL); + return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL); } /* Build a gather load call while vectorizing STMT_INFO. Insert new @@ -2758,13 +2770,13 @@ vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) MASK is null. */ static void -vect_build_gather_load_calls (stmt_vec_info stmt_info, +vect_build_gather_load_calls (vec_info *vinfo, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, gather_scatter_info *gs_info, tree mask) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); @@ -2861,19 +2873,19 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, if (!mask) { - src_op = vect_build_zero_merge_argument (stmt_info, rettype); - mask_op = vect_build_all_ones_mask (stmt_info, masktype); + src_op = vect_build_zero_merge_argument (vinfo, stmt_info, rettype); + mask_op = vect_build_all_ones_mask (vinfo, stmt_info, masktype); } for (int j = 0; j < ncopies; ++j) { tree op, var; if (modifier == WIDEN && (j & 1)) - op = permute_vec_elements (vec_oprnd0, vec_oprnd0, + op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0, perm_mask, stmt_info, gsi); else if (j == 0) op = vec_oprnd0 - = vect_get_vec_def_for_operand (gs_info->offset, stmt_info); + = vect_get_vec_def_for_operand (vinfo, gs_info->offset, stmt_info); else op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_oprnd0); @@ -2885,19 +2897,19 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, var = vect_get_new_ssa_name (idxtype, vect_simple_var); op = build1 (VIEW_CONVERT_EXPR, idxtype, op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); op = var; } if (mask) { if (mask_perm_mask && (j & 1)) - mask_op = permute_vec_elements (mask_op, mask_op, + mask_op = permute_vec_elements (vinfo, mask_op, mask_op, mask_perm_mask, stmt_info, gsi); else { if (j == 0) - vec_mask = vect_get_vec_def_for_operand (mask, stmt_info); + vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info); else if (modifier != NARROW || (j & 1) == 0) vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_mask); @@ -2912,7 +2924,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_op = var; } } @@ -2923,7 +2935,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR : VEC_UNPACK_LO_EXPR, mask_op); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_op = var; } src_op = mask_op; @@ -2941,7 +2953,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; if (!useless_type_conversion_p (real_masktype, utype)) { @@ -2949,7 +2961,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, <= TYPE_PRECISION (real_masktype)); var = vect_get_new_ssa_name (real_masktype, vect_scalar_var); new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; } src_op = build_zero_cst (srctype); @@ -2964,19 +2976,19 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, TYPE_VECTOR_SUBPARTS (rettype))); op = vect_get_new_ssa_name (rettype, vect_simple_var); gimple_call_set_lhs (new_call, op); - vect_finish_stmt_generation (stmt_info, new_call, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi); var = make_ssa_name (vec_dest); op = build1 (VIEW_CONVERT_EXPR, vectype, op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } else { var = make_ssa_name (vec_dest, new_call); gimple_call_set_lhs (new_call, var); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi); } if (modifier == NARROW) @@ -2986,7 +2998,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, prev_res = var; continue; } - var = permute_vec_elements (prev_res, var, perm_mask, + var = permute_vec_elements (vinfo, prev_res, var, perm_mask, stmt_info, gsi); new_stmt_info = loop_vinfo->lookup_def (var); } @@ -3006,7 +3018,8 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info, containing loop. */ static void -vect_get_gather_scatter_ops (class loop *loop, stmt_vec_info stmt_info, +vect_get_gather_scatter_ops (vec_info *vinfo, + class loop *loop, stmt_vec_info stmt_info, gather_scatter_info *gs_info, tree *dataref_ptr, tree *vec_offset) { @@ -3019,7 +3032,7 @@ vect_get_gather_scatter_ops (class loop *loop, stmt_vec_info stmt_info, new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } - *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info, + *vec_offset = vect_get_vec_def_for_operand (vinfo, gs_info->offset, stmt_info, gs_info->offset_vectype); } @@ -3074,14 +3087,15 @@ vect_get_strided_load_store_ops (stmt_vec_info stmt_info, vectorization. */ static tree -vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type, +vect_get_data_ptr_increment (vec_info *vinfo, + dr_vec_info *dr_info, tree aggr_type, vect_memory_access_type memory_access_type) { if (memory_access_type == VMAT_INVARIANT) return size_zero_node; tree iv_step = TYPE_SIZE_UNIT (aggr_type); - tree step = vect_dr_behavior (dr_info)->step; + tree step = vect_dr_behavior (vinfo, dr_info)->step; if (tree_int_cst_sgn (step) == -1) iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step); return iv_step; @@ -3090,14 +3104,14 @@ vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type, /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ static bool -vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_bswap (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, tree vectype_in, stmt_vector_for_cost *cost_vec) { tree op, vectype; gcall *stmt = as_a (stmt_info->stmt); - vec_info *vinfo = stmt_info->vinfo; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); unsigned ncopies; op = gimple_call_arg (stmt, 0); @@ -3157,7 +3171,8 @@ vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { /* Handle uses. */ if (j == 0) - vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); + vect_get_vec_defs (vinfo, op, NULL, stmt_info, &vec_oprnds, NULL, + slp_node); else vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); @@ -3170,16 +3185,16 @@ vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree tem = make_ssa_name (char_vectype); new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, char_vectype, vop)); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); tree tem2 = make_ssa_name (char_vectype); new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, tem, tem, bswap_vconst); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); tem = make_ssa_name (vectype); new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, vectype, tem2)); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); } @@ -3232,7 +3247,8 @@ simple_integer_narrowing (tree vectype_out, tree vectype_in, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_call (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -3245,9 +3261,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vectype_out, vectype_in; poly_uint64 nunits_in; poly_uint64 nunits_out; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + loop_vec_info loop_vinfo = dyn_cast (vinfo); + bb_vec_info bb_vinfo = dyn_cast (vinfo); tree fndecl, new_temp, rhs_type; enum vect_def_type dt[4] = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type, @@ -3316,7 +3331,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if ((int) i == mask_opno) { - if (!vect_check_scalar_mask (stmt_info, op, &dt[i], &vectypes[i])) + if (!vect_check_scalar_mask (vinfo, + stmt_info, op, &dt[i], &vectypes[i])) return false; continue; } @@ -3457,7 +3473,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) - return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node, + return vectorizable_bswap (vinfo, stmt_info, gsi, vec_stmt, slp_node, vectype_in, cost_vec); else { @@ -3484,7 +3500,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_call"); - vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); + vect_model_simple_cost (vinfo, stmt_info, + ncopies, dt, ndts, slp_node, cost_vec); if (ifn != IFN_LAST && modifier == NARROW && !slp_node) record_stmt_cost (cost_vec, ncopies / 2, vec_promote_demote, stmt_info, 0, vect_body); @@ -3527,7 +3544,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, auto_vec > vec_defs (nargs); vec vec_oprnds0; - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_oprnds0 = vec_defs[0]; /* Arguments are ready. Create the new vector stmt. */ @@ -3549,7 +3566,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = gimple_build_call_internal_vec (ifn, vargs); gimple_call_set_lhs (call, half_res); gimple_call_set_nothrow (call, true); - vect_finish_stmt_generation (stmt_info, call, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); if ((i & 1) == 0) { prev_res = half_res; @@ -3560,8 +3577,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = gimple_build_assign (new_temp, convert_code, prev_res, half_res); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, - gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } else { @@ -3585,7 +3602,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + call, gsi); } SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); } @@ -3603,7 +3621,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, op = gimple_call_arg (stmt, i); if (j == 0) vec_oprnd0 - = vect_get_vec_def_for_operand (op, stmt_info, vectypes[i]); + = vect_get_vec_def_for_operand (vinfo, + op, stmt_info, vectypes[i]); else vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]); @@ -3626,11 +3645,11 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree new_var = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); gimple *init_stmt = gimple_build_assign (new_var, cst); - vect_init_vector_1 (stmt_info, init_stmt, NULL); + vect_init_vector_1 (vinfo, stmt_info, init_stmt, NULL); new_temp = make_ssa_name (vec_dest); gimple *new_stmt = gimple_build_assign (new_temp, new_var); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } else if (modifier == NARROW) { @@ -3641,7 +3660,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gcall *call = gimple_build_call_internal_vec (ifn, vargs); gimple_call_set_lhs (call, half_res); gimple_call_set_nothrow (call, true); - vect_finish_stmt_generation (stmt_info, call, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); if ((j & 1) == 0) { prev_res = half_res; @@ -3651,7 +3670,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (new_temp, convert_code, prev_res, half_res); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } else { @@ -3664,7 +3683,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); } if (j == (modifier == NARROW ? 1 : 0)) @@ -3692,7 +3711,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, auto_vec > vec_defs (nargs); vec vec_oprnds0; - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_oprnds0 = vec_defs[0]; /* Arguments are ready. Create the new vector stmt. */ @@ -3715,7 +3734,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); } @@ -3733,7 +3752,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (j == 0) { vec_oprnd0 - = vect_get_vec_def_for_operand (op, stmt_info, + = vect_get_vec_def_for_operand (vinfo, op, stmt_info, vectypes[i]); vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); @@ -3756,7 +3775,7 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_call_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; @@ -3890,7 +3909,7 @@ simd_clone_subparts (tree vectype) Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_simd_clone_call (stmt_vec_info stmt_info, +vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *) @@ -3902,9 +3921,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, stmt_vec_info prev_stmt_info; tree vectype; unsigned int nunits; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + loop_vec_info loop_vinfo = dyn_cast (vinfo); + bb_vec_info bb_vinfo = dyn_cast (vinfo); class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; tree fndecl, new_temp; int ncopies, j; @@ -4201,7 +4219,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, } STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_simd_clone_call"); -/* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */ +/* vect_model_simple_cost (vinfo, stmt_info, ncopies, + dt, slp_node, cost_vec); */ return true; } @@ -4256,7 +4275,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gcc_assert ((k & (k - 1)) == 0); if (m == 0) vec_oprnd0 - = vect_get_vec_def_for_operand (op, stmt_info); + = vect_get_vec_def_for_operand (vinfo, op, stmt_info); else { vec_oprnd0 = arginfo[i].op; @@ -4273,7 +4292,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gassign *new_stmt = gimple_build_assign (make_ssa_name (atype), vec_oprnd0); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); vargs.safe_push (gimple_assign_lhs (new_stmt)); } else @@ -4290,7 +4310,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, { if (m == 0 && l == 0) vec_oprnd0 - = vect_get_vec_def_for_operand (op, stmt_info); + = vect_get_vec_def_for_operand (vinfo, + op, stmt_info); else vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, @@ -4309,8 +4330,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gassign *new_stmt = gimple_build_assign (make_ssa_name (atype), vec_oprnd0); - vect_finish_stmt_generation (stmt_info, new_stmt, - gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); vargs.safe_push (gimple_assign_lhs (new_stmt)); } } @@ -4379,7 +4400,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gassign *new_stmt = gimple_build_assign (new_temp, code, arginfo[i].op, tcst); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); vargs.safe_push (new_temp); } break; @@ -4408,7 +4429,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gimple_call_set_lhs (new_call, new_temp); } stmt_vec_info new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi); if (vec_dest) { @@ -4434,7 +4455,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gimple *new_stmt = gimple_build_assign (make_ssa_name (vectype), t); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); if (j == 0 && l == 0) STMT_VINFO_VEC_STMT (stmt_info) @@ -4446,7 +4468,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, } if (ratype) - vect_clobber_variable (stmt_info, gsi, new_temp); + vect_clobber_variable (vinfo, stmt_info, gsi, new_temp); continue; } else if (simd_clone_subparts (vectype) > nunits) @@ -4466,12 +4488,12 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gimple *new_stmt = gimple_build_assign (make_ssa_name (rtype), tem); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, - gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, gimple_assign_lhs (new_stmt)); } - vect_clobber_variable (stmt_info, gsi, new_temp); + vect_clobber_variable (vinfo, stmt_info, gsi, new_temp); } else CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); @@ -4481,7 +4503,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gimple *new_stmt = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if ((unsigned) j == k - 1) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; @@ -4499,8 +4521,8 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, gimple *new_stmt = gimple_build_assign (make_ssa_name (vec_dest), t); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); - vect_clobber_variable (stmt_info, gsi, new_temp); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + vect_clobber_variable (vinfo, stmt_info, gsi, new_temp); } } @@ -4548,7 +4570,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info, STMT_INFO is the original scalar stmt that we are vectorizing. */ static gimple * -vect_gen_widened_results_half (enum tree_code code, +vect_gen_widened_results_half (vec_info *vinfo, enum tree_code code, tree vec_oprnd0, tree vec_oprnd1, int op_type, tree vec_dest, gimple_stmt_iterator *gsi, stmt_vec_info stmt_info) @@ -4563,7 +4585,7 @@ vect_gen_widened_results_half (enum tree_code code, new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); return new_stmt; } @@ -4577,17 +4599,16 @@ vect_gen_widened_results_half (enum tree_code code, The vectors are collected into VEC_OPRNDS. */ static void -vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, +vect_get_loop_based_defs (vec_info *vinfo, tree *oprnd, stmt_vec_info stmt_info, vec *vec_oprnds, int multi_step_cvt) { - vec_info *vinfo = stmt_info->vinfo; tree vec_oprnd; /* Get first vector operand. */ /* All the vector operands except the very first one (that is scalar oprnd) are stmt copies. */ if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) - vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info); + vec_oprnd = vect_get_vec_def_for_operand (vinfo, *oprnd, stmt_info); else vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd); @@ -4602,7 +4623,7 @@ vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, /* For conversion in multiple steps, continue to get operands recursively. */ if (multi_step_cvt) - vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds, + vect_get_loop_based_defs (vinfo, oprnd, stmt_info, vec_oprnds, multi_step_cvt - 1); } @@ -4612,7 +4633,7 @@ vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, recursively. */ static void -vect_create_vectorized_demotion_stmts (vec *vec_oprnds, +vect_create_vectorized_demotion_stmts (vec_info *vinfo, vec *vec_oprnds, int multi_step_cvt, stmt_vec_info stmt_info, vec vec_dsts, @@ -4634,7 +4655,7 @@ vect_create_vectorized_demotion_stmts (vec *vec_oprnds, new_tmp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_tmp); stmt_vec_info new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (multi_step_cvt) /* Store the resulting vector for next recursive call. */ @@ -4667,7 +4688,8 @@ vect_create_vectorized_demotion_stmts (vec *vec_oprnds, /* At each level of recursion we have half of the operands we had at the previous level. */ vec_oprnds->truncate ((i+1)/2); - vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, + vect_create_vectorized_demotion_stmts (vinfo, vec_oprnds, + multi_step_cvt - 1, stmt_info, vec_dsts, gsi, slp_node, VEC_PACK_TRUNC_EXPR, prev_stmt_info); @@ -4683,7 +4705,8 @@ vect_create_vectorized_demotion_stmts (vec *vec_oprnds, call the function recursively. */ static void -vect_create_vectorized_promotion_stmts (vec *vec_oprnds0, +vect_create_vectorized_promotion_stmts (vec_info *vinfo, + vec *vec_oprnds0, vec *vec_oprnds1, stmt_vec_info stmt_info, tree vec_dest, gimple_stmt_iterator *gsi, @@ -4704,10 +4727,10 @@ vect_create_vectorized_promotion_stmts (vec *vec_oprnds0, vop1 = NULL_TREE; /* Generate the two halves of promotion operation. */ - new_stmt1 = vect_gen_widened_results_half (code1, vop0, vop1, + new_stmt1 = vect_gen_widened_results_half (vinfo, code1, vop0, vop1, op_type, vec_dest, gsi, stmt_info); - new_stmt2 = vect_gen_widened_results_half (code2, vop0, vop1, + new_stmt2 = vect_gen_widened_results_half (vinfo, code2, vop0, vop1, op_type, vec_dest, gsi, stmt_info); if (is_gimple_call (new_stmt1)) @@ -4737,7 +4760,8 @@ vect_create_vectorized_promotion_stmts (vec *vec_oprnds0, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_conversion (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -4745,7 +4769,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree scalar_dest; tree op0, op1 = NULL_TREE; tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; tree new_temp; @@ -4761,8 +4785,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec vec_oprnds0 = vNULL; vec vec_oprnds1 = vNULL; tree vop0; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + bb_vec_info bb_vinfo = dyn_cast (vinfo); int multi_step_cvt = 0; vec interm_types = vNULL; tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; @@ -4928,7 +4951,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, return false; case WIDEN: - if (supportable_widening_operation (code, stmt_info, vectype_out, + if (supportable_widening_operation (vinfo, code, stmt_info, vectype_out, vectype_in, &code1, &code2, &multi_step_cvt, &interm_types)) { @@ -4961,7 +4984,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, cvt_type, &codecvt1)) goto unsupported; } - else if (!supportable_widening_operation (code, stmt_info, + else if (!supportable_widening_operation (vinfo, code, stmt_info, vectype_out, cvt_type, &codecvt1, &codecvt2, &multi_step_cvt, @@ -4970,7 +4993,8 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else gcc_assert (multi_step_cvt == 0); - if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type, + if (supportable_widening_operation (vinfo, NOP_EXPR, stmt_info, + cvt_type, vectype_in, &code1, &code2, &multi_step_cvt, &interm_types)) { @@ -5027,7 +5051,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (modifier == NONE) { STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; - vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, + vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node, cost_vec); } else if (modifier == NARROW) @@ -5119,7 +5143,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, for (j = 0; j < ncopies; j++) { if (j == 0) - vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0, + vect_get_vec_defs (vinfo, op0, NULL, stmt_info, &vec_oprnds0, NULL, slp_node); else vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL); @@ -5133,7 +5157,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); @@ -5173,16 +5197,17 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, for (k = 0; k < slp_node->vec_stmts_size - 1; k++) vec_oprnds1.quick_push (vec_oprnd1); - vect_get_vec_defs (op0, NULL_TREE, stmt_info, + vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, slp_node); } else - vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, + vect_get_vec_defs (vinfo, op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, slp_node); } else { - vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info); + vec_oprnd0 = vect_get_vec_def_for_operand (vinfo, + op0, stmt_info); vec_oprnds0.quick_push (vec_oprnd0); if (op_type == binary_op) { @@ -5190,7 +5215,8 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec_oprnd1 = op1; else vec_oprnd1 - = vect_get_vec_def_for_operand (op1, stmt_info); + = vect_get_vec_def_for_operand (vinfo, + op1, stmt_info); vec_oprnds1.quick_push (vec_oprnd1); } } @@ -5222,7 +5248,7 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, c1 = codecvt1; c2 = codecvt2; } - vect_create_vectorized_promotion_stmts (&vec_oprnds0, + vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0, &vec_oprnds1, stmt_info, this_dest, gsi, c1, c2, op_type); @@ -5238,7 +5264,8 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (new_temp, codecvt1, vop0); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } else new_stmt_info = vinfo->lookup_def (vop0); @@ -5268,12 +5295,13 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { /* Handle uses. */ if (slp_node) - vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, - slp_node); + vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, &vec_oprnds0, + NULL, slp_node); else { vec_oprnds0.truncate (0); - vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0, + vect_get_loop_based_defs (vinfo, + &last_oprnd, stmt_info, &vec_oprnds0, vect_pow2 (multi_step_cvt) - 1); } @@ -5285,11 +5313,12 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest); gassign *new_stmt = gimple_build_assign (new_temp, codecvt1, vop0); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); vec_oprnds0[i] = new_temp; } - vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, + vect_create_vectorized_demotion_stmts (vinfo, &vec_oprnds0, + multi_step_cvt, stmt_info, vec_dsts, gsi, slp_node, code1, &prev_stmt_info); @@ -5338,14 +5367,15 @@ vect_nop_conversion_p (stmt_vec_info stmt_info) Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_assignment (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { tree vec_dest; tree scalar_dest; tree op; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); tree new_temp; enum vect_def_type dt[1] = {vect_unknown_def_type}; int ndts = 1; @@ -5353,8 +5383,7 @@ vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, int i, j; vec vec_oprnds = vNULL; tree vop; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + bb_vec_info bb_vinfo = dyn_cast (vinfo); stmt_vec_info prev_stmt_info = NULL; enum tree_code code; tree vectype_in; @@ -5445,7 +5474,7 @@ vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_assignment"); if (!vect_nop_conversion_p (stmt_info)) - vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, + vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node, cost_vec); return true; } @@ -5462,7 +5491,8 @@ vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { /* Handle uses. */ if (j == 0) - vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); + vect_get_vec_defs (vinfo, op, NULL, stmt_info, &vec_oprnds, NULL, + slp_node); else vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); @@ -5477,7 +5507,7 @@ vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); } @@ -5542,7 +5572,8 @@ vect_supportable_shift (vec_info *vinfo, enum tree_code code, tree scalar_type) Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_shift (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -5551,7 +5582,7 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree op0, op1 = NULL; tree vec_oprnd1 = NULL_TREE; tree vectype; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); enum tree_code code; machine_mode vec_mode; tree new_temp; @@ -5572,8 +5603,7 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vop0, vop1; unsigned int k; bool scalar_shift_arg = true; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + bb_vec_info bb_vinfo = dyn_cast (vinfo); bool incompatible_op1_vectype_p = false; if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) @@ -5813,7 +5843,7 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_shift"); - vect_model_simple_cost (stmt_info, ncopies, dt, + vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, scalar_shift_arg ? 1 : ndts, slp_node, cost_vec); return true; } @@ -5828,7 +5858,7 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { op1 = fold_convert (TREE_TYPE (vectype), op1); if (dt[1] != vect_constant_def) - op1 = vect_init_vector (stmt_info, op1, + op1 = vect_init_vector (vinfo, stmt_info, op1, TREE_TYPE (vectype), NULL); } @@ -5875,9 +5905,10 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* If the argument was the same in all lanes create the correctly typed vector shift amount directly. */ op1 = fold_convert (TREE_TYPE (vectype), op1); - op1 = vect_init_vector (stmt_info, op1, TREE_TYPE (vectype), + op1 = vect_init_vector (vinfo, stmt_info, + op1, TREE_TYPE (vectype), !loop_vinfo ? gsi : NULL); - vec_oprnd1 = vect_init_vector (stmt_info, op1, vectype, + vec_oprnd1 = vect_init_vector (vinfo, stmt_info, op1, vectype, !loop_vinfo ? gsi : NULL); vec_oprnds1.create (slp_node->vec_stmts_size); for (k = 0; k < slp_node->vec_stmts_size; k++) @@ -5906,11 +5937,11 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, (a special case for certain kind of vector shifts); otherwise, operand 1 should be of a vector type (the usual case). */ if (vec_oprnd1) - vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, - slp_node); + vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, + &vec_oprnds0, NULL, slp_node); else - vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, - slp_node); + vect_get_vec_defs (vinfo, op0, op1, stmt_info, + &vec_oprnds0, &vec_oprnds1, slp_node); } else vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1); @@ -5924,7 +5955,7 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); } @@ -5955,7 +5986,8 @@ vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_operation (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -5963,7 +5995,7 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree scalar_dest; tree op0, op1 = NULL_TREE, op2 = NULL_TREE; tree vectype; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); enum tree_code code, orig_code; machine_mode vec_mode; tree new_temp; @@ -5983,8 +6015,7 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec vec_oprnds1 = vNULL; vec vec_oprnds2 = vNULL; tree vop0, vop1, vop2; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + bb_vec_info bb_vinfo = dyn_cast (vinfo); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) return false; @@ -6233,7 +6264,8 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_operation"); - vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); + vect_model_simple_cost (vinfo, stmt_info, + ncopies, dt, ndts, slp_node, cost_vec); return true; } @@ -6319,29 +6351,29 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (j == 0) { if (op_type == binary_op) - vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, - slp_node); + vect_get_vec_defs (vinfo, op0, op1, stmt_info, + &vec_oprnds0, &vec_oprnds1, slp_node); else if (op_type == ternary_op) { if (slp_node) { auto_vec > vec_defs(3); - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_oprnds0 = vec_defs[0]; vec_oprnds1 = vec_defs[1]; vec_oprnds2 = vec_defs[2]; } else { - vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, + vect_get_vec_defs (vinfo, op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, NULL); - vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2, - NULL, NULL); + vect_get_vec_defs (vinfo, op2, NULL_TREE, stmt_info, + &vec_oprnds2, NULL, NULL); } } else - vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, - slp_node); + vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, &vec_oprnds0, + NULL, slp_node); } else { @@ -6376,7 +6408,7 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); } else { @@ -6385,7 +6417,7 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (vec_cvt_dest) { new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp); @@ -6394,8 +6426,8 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp); new_temp = make_ssa_name (vec_cvt_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); - new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } } if (slp_node) @@ -6663,11 +6695,11 @@ scan_store_can_perm_p (tree vectype, tree init, Check magic stores for #pragma omp scan {in,ex}clusive reductions. */ static bool -check_scan_store (stmt_vec_info stmt_info, tree vectype, +check_scan_store (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype, enum vect_def_type rhs_dt, bool slp, tree mask, vect_memory_access_type memory_access_type) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); tree ref_type; @@ -6680,7 +6712,7 @@ check_scan_store (stmt_vec_info stmt_info, tree vectype, || loop_vinfo == NULL || LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) || STMT_VINFO_GROUPED_ACCESS (stmt_info) - || !integer_zerop (get_dr_vinfo_offset (dr_info)) + || !integer_zerop (get_dr_vinfo_offset (vinfo, dr_info)) || !integer_zerop (DR_INIT (dr_info->dr)) || !(ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr))) || !alias_sets_conflict_p (get_alias_set (vectype), @@ -7067,13 +7099,13 @@ check_scan_store (stmt_vec_info stmt_info, tree vectype, Handle only the transformation, checking is done in check_scan_store. */ static bool -vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_scan_store (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, int ncopies) { - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); tree ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)); - vec_info *vinfo = stmt_info->vinfo; tree vectype = STMT_VINFO_VECTYPE (stmt_info); if (dump_enabled_p ()) @@ -7175,7 +7207,8 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vec_oprnd3 = NULL_TREE; tree dataref_ptr = DR_BASE_ADDRESS (dr_info->dr); tree dataref_offset = build_int_cst (ref_type, 0); - tree bump = vect_get_data_ptr_increment (dr_info, vectype, VMAT_CONTIGUOUS); + tree bump = vect_get_data_ptr_increment (vinfo, dr_info, + vectype, VMAT_CONTIGUOUS); tree ldataref_ptr = NULL_TREE; tree orig = NULL_TREE; if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && !inscan_var_store) @@ -7185,10 +7218,10 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info new_stmt_info; if (j == 0) { - vec_oprnd1 = vect_get_vec_def_for_operand (*init, stmt_info); + vec_oprnd1 = vect_get_vec_def_for_operand (vinfo, *init, stmt_info); if (ldataref_ptr == NULL) - vec_oprnd2 = vect_get_vec_def_for_operand (rhs1, stmt_info); - vec_oprnd3 = vect_get_vec_def_for_operand (rhs2, stmt_info); + vec_oprnd2 = vect_get_vec_def_for_operand (vinfo, rhs1, stmt_info); + vec_oprnd3 = vect_get_vec_def_for_operand (vinfo, rhs2, stmt_info); orig = vec_oprnd3; } else @@ -7209,7 +7242,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, dataref_offset); vect_copy_ref_info (data_ref, DR_REF (load1_dr_info->dr)); gimple *g = gimple_build_assign (vec_oprnd2, data_ref); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); if (prev_stmt_info == NULL) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; else @@ -7227,7 +7260,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, != scan_store_kind_perm)) ? zero_vec : vec_oprnd1, v, perms[i]); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); if (prev_stmt_info == NULL) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; else @@ -7247,7 +7280,8 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree new_temp2 = make_ssa_name (vectype); g = gimple_build_assign (new_temp2, VEC_COND_EXPR, vb.build (), new_temp, vec_oprnd1); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, + g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; new_temp = new_temp2; @@ -7266,7 +7300,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree new_temp2 = make_ssa_name (vectype); g = gimple_build_assign (new_temp2, code, v, new_temp); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; @@ -7275,7 +7309,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree new_temp = make_ssa_name (vectype); gimple *g = gimple_build_assign (new_temp, code, orig, v); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; @@ -7287,7 +7321,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { last_perm_arg = make_ssa_name (vectype); g = gimple_build_assign (last_perm_arg, code, new_temp, vec_oprnd2); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; } @@ -7295,7 +7329,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, orig = make_ssa_name (vectype); g = gimple_build_assign (orig, VEC_PERM_EXPR, last_perm_arg, last_perm_arg, perms[units_log2]); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; @@ -7306,7 +7340,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, dataref_offset); vect_copy_ref_info (data_ref, DR_REF (dr_info->dr)); g = gimple_build_assign (data_ref, new_temp); - new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; } @@ -7324,7 +7358,7 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vect_copy_ref_info (data_ref, DR_REF (dr_info->dr)); gimple *g = gimple_build_assign (data_ref, orig); stmt_vec_info new_stmt_info - = vect_finish_stmt_generation (stmt_info, g, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; prev_stmt_info = new_stmt_info; } @@ -7341,7 +7375,8 @@ vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_store (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -7349,7 +7384,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree op; tree vec_oprnd = NULL_TREE; tree elem_type; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = NULL; machine_mode vec_mode; tree dummy; @@ -7371,8 +7406,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec vec_oprnds = vNULL; bool slp = (slp_node != NULL); unsigned int vec_num; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - vec_info *vinfo = stmt_info->vinfo; + bb_vec_info bb_vinfo = dyn_cast (vinfo); tree aggr_type; gather_scatter_info gs_info; poly_uint64 vf; @@ -7426,7 +7460,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (mask_index >= 0) { mask = gimple_call_arg (call, mask_index); - if (!vect_check_scalar_mask (stmt_info, mask, &mask_dt, + if (!vect_check_scalar_mask (vinfo, stmt_info, mask, &mask_dt, &mask_vectype)) return false; } @@ -7468,7 +7502,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, return false; } - if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type)) + if (!vect_check_store_rhs (vinfo, stmt_info, + op, &rhs_dt, &rhs_vectype, &vls_type)) return false; elem_type = TREE_TYPE (vectype); @@ -7478,8 +7513,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, return false; vect_memory_access_type memory_access_type; - if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies, - &memory_access_type, &gs_info)) + if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, vls_type, + ncopies, &memory_access_type, &gs_info)) return false; if (mask) @@ -7528,7 +7563,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1 && !vec_stmt) { - if (!check_scan_store (stmt_info, vectype, rhs_dt, slp, mask, + if (!check_scan_store (vinfo, stmt_info, vectype, rhs_dt, slp, mask, memory_access_type)) return false; } @@ -7543,8 +7578,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, memory_access_type, &gs_info, mask); STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; - vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type, - vls_type, slp_node, cost_vec); + vect_model_store_cost (vinfo, stmt_info, ncopies, rhs_dt, + memory_access_type, vls_type, slp_node, cost_vec); return true; } gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); @@ -7629,7 +7664,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (mask == NULL_TREE) { mask_arg = build_int_cst (masktype, -1); - mask_arg = vect_init_vector (stmt_info, mask_arg, masktype, NULL); + mask_arg = vect_init_vector (vinfo, stmt_info, + mask_arg, masktype, NULL); } scale = build_int_cst (scaletype, gs_info.scale); @@ -7639,11 +7675,13 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { if (j == 0) { - src = vec_oprnd1 = vect_get_vec_def_for_operand (op, stmt_info); - op = vec_oprnd0 = vect_get_vec_def_for_operand (gs_info.offset, + src = vec_oprnd1 = vect_get_vec_def_for_operand (vinfo, + op, stmt_info); + op = vec_oprnd0 = vect_get_vec_def_for_operand (vinfo, + gs_info.offset, stmt_info); if (mask) - mask_op = vec_mask = vect_get_vec_def_for_operand (mask, + mask_op = vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info); } else if (modifier != NONE && (j & 1)) @@ -7653,8 +7691,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, src = vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); - op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, - stmt_info, gsi); + op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0, + perm_mask, stmt_info, gsi); if (mask) mask_op = vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, @@ -7662,8 +7700,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, } else if (modifier == NARROW) { - src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, - stmt_info, gsi); + src = permute_vec_elements (vinfo, vec_oprnd1, vec_oprnd1, + perm_mask, stmt_info, gsi); op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); } @@ -7689,7 +7727,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, src = build1 (VIEW_CONVERT_EXPR, srctype, src); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); src = var; } @@ -7701,7 +7739,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, op = build1 (VIEW_CONVERT_EXPR, idxtype, op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); op = var; } @@ -7717,7 +7755,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR : VEC_UNPACK_LO_EXPR, mask_op); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; } tree optype = TREE_TYPE (mask_arg); @@ -7729,7 +7767,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_arg); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; if (!useless_type_conversion_p (masktype, utype)) { @@ -7737,7 +7775,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, <= TYPE_PRECISION (masktype)); var = vect_get_new_ssa_name (masktype, vect_scalar_var); new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; } } @@ -7745,7 +7783,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gcall *new_stmt = gimple_build_call (gs_info.decl, 5, ptr, mask_arg, op, src, scale); stmt_vec_info new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (prev_stmt_info == NULL) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; @@ -7756,7 +7794,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, return true; } else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3) - return vectorizable_scan_store (stmt_info, gsi, vec_stmt, ncopies); + return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++; @@ -7821,7 +7859,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); gcc_assert (!nested_in_vect_loop_p (loop, stmt_info)); - dr_offset = get_dr_vinfo_offset (first_dr_info); + dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info); stride_base = fold_build_pointer_plus (DR_BASE_ADDRESS (first_dr_info->dr), @@ -7946,7 +7984,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree newoff = copy_ssa_name (running_off, NULL); incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, running_off, pos); - vect_finish_stmt_generation (stmt_info, incr, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); running_off = newoff; } unsigned int group_el = 0; @@ -7960,7 +7998,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { if (slp) { - vect_get_vec_defs (op, NULL_TREE, stmt_info, + vect_get_vec_defs (vinfo, op, NULL_TREE, stmt_info, &vec_oprnds, NULL, slp_node); vec_oprnd = vec_oprnds[0]; } @@ -7968,7 +8006,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { op = vect_get_store_rhs (next_stmt_info); vec_oprnd = vect_get_vec_def_for_operand - (op, next_stmt_info); + (vinfo, op, next_stmt_info); } } else @@ -7986,7 +8024,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple *pun = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd)); - vect_finish_stmt_generation (stmt_info, pun, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi); vec_oprnd = tem; } for (i = 0; i < nstores; i++) @@ -8013,7 +8051,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* And store it to *running_off. */ assign = gimple_build_assign (newref, elem); stmt_vec_info assign_info - = vect_finish_stmt_generation (stmt_info, assign, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + assign, gsi); group_el += lnel; if (! slp @@ -8022,7 +8061,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, newoff = copy_ssa_name (running_off, NULL); incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, running_off, stride_step); - vect_finish_stmt_generation (stmt_info, incr, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); running_off = newoff; group_el = 0; @@ -8057,7 +8096,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, alignment_support_scheme = dr_unaligned_supported; else alignment_support_scheme - = vect_supportable_dr_alignment (first_dr_info, false); + = vect_supportable_dr_alignment (vinfo, first_dr_info, false); gcc_assert (alignment_support_scheme); vec_loop_masks *loop_masks @@ -8096,7 +8135,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); else aggr_type = vectype; - bump = vect_get_data_ptr_increment (dr_info, aggr_type, + bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type, memory_access_type); } @@ -8152,7 +8191,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp) { /* Get vectorized arguments for SLP_NODE. */ - vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds, + vect_get_vec_defs (vinfo, op, NULL_TREE, stmt_info, &vec_oprnds, NULL, slp_node); vec_oprnd = vec_oprnds[0]; @@ -8176,13 +8215,13 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, and only one iteration of the loop will be executed. */ op = vect_get_store_rhs (next_stmt_info); vec_oprnd = vect_get_vec_def_for_operand - (op, next_stmt_info); + (vinfo, op, next_stmt_info); dr_chain.quick_push (vec_oprnd); oprnds.quick_push (vec_oprnd); next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); } if (mask) - vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, + vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info, mask_vectype); } @@ -8195,7 +8234,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, && !loop_masks && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) - && integer_zerop (get_dr_vinfo_offset (first_dr_info)) + && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) && integer_zerop (DR_INIT (first_dr_info->dr)) && alias_sets_conflict_p (get_alias_set (aggr_type), get_alias_set (TREE_TYPE (ref_type)))) @@ -8204,11 +8243,11 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, dataref_offset = build_int_cst (ref_type, 0); } else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) - vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, + vect_get_gather_scatter_ops (vinfo, loop, stmt_info, &gs_info, &dataref_ptr, &vec_offset); else dataref_ptr - = vect_create_data_ref_ptr (first_stmt_info, aggr_type, + = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, simd_lane_access_p ? loop : NULL, offset, &dummy, gsi, &ptr_incr, simd_lane_access_p, NULL_TREE, bump); @@ -8237,7 +8276,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); else - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, bump); } @@ -8251,13 +8290,14 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Invalidate the current contents of VEC_ARRAY. This should become an RTL clobber too, which prevents the vector registers from being upward-exposed. */ - vect_clobber_variable (stmt_info, gsi, vec_array); + vect_clobber_variable (vinfo, stmt_info, gsi, vec_array); /* Store the individual vectors into the array. */ for (i = 0; i < vec_num; i++) { vec_oprnd = dr_chain[i]; - write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i); + write_vector_array (vinfo, stmt_info, + gsi, vec_oprnd, vec_array, i); } tree final_mask = NULL; @@ -8290,10 +8330,11 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (call, data_ref); } gimple_call_set_nothrow (call, true); - new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, + call, gsi); /* Record that VEC_ARRAY is now dead. */ - vect_clobber_variable (stmt_info, gsi, vec_array); + vect_clobber_variable (vinfo, stmt_info, gsi, vec_array); } else { @@ -8303,8 +8344,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (j == 0) result_chain.create (group_size); /* Permute. */ - vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi, - &result_chain); + vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info, + gsi, &result_chain); } stmt_vec_info next_stmt_info = first_stmt_info; @@ -8336,14 +8377,14 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, scale, vec_oprnd); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); break; } if (i > 0) /* Bump the vector pointer. */ - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, - stmt_info, bump); + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, + gsi, stmt_info, bump); if (slp) vec_oprnd = vec_oprnds[i]; @@ -8357,7 +8398,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, misalign = 0; else if (DR_MISALIGNMENT (first_dr_info) == -1) { - align = dr_alignment (vect_dr_behavior (first_dr_info)); + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); misalign = 0; } else @@ -8378,7 +8419,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple *perm_stmt = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, vec_oprnd, perm_mask); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); perm_stmt = SSA_NAME_DEF_STMT (new_temp); vec_oprnd = new_temp; @@ -8395,7 +8436,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, final_mask, vec_oprnd); gimple_call_set_nothrow (call, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, call, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); } else { @@ -8418,7 +8459,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (data_ref, vec_oprnd); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } if (slp) @@ -8479,7 +8520,8 @@ vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel) permuted vector variable. */ static tree -permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, +permute_vec_elements (vec_info *vinfo, + tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, gimple_stmt_iterator *gsi) { tree vectype = TREE_TYPE (x); @@ -8495,7 +8537,7 @@ permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, /* Generate the permute statement. */ perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec); - vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); return data_ref; } @@ -8564,7 +8606,8 @@ hoist_defs_of_uses (stmt_vec_info stmt_info, class loop *loop) Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_load (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, slp_instance slp_node_instance, stmt_vector_for_cost *cost_vec) @@ -8573,7 +8616,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vec_dest = NULL; tree data_ref = NULL; stmt_vec_info prev_stmt_info; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); class loop *loop = NULL; class loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father; bool nested_in_vect_loop = false; @@ -8603,11 +8646,10 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, int vec_num; bool slp = (slp_node != NULL); bool slp_perm = false; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + bb_vec_info bb_vinfo = dyn_cast (vinfo); poly_uint64 vf; tree aggr_type; gather_scatter_info gs_info; - vec_info *vinfo = stmt_info->vinfo; tree ref_type; enum vect_def_type mask_dt = vect_unknown_def_type; @@ -8654,7 +8696,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (mask_index >= 0) { mask = gimple_call_arg (call, mask_index); - if (!vect_check_scalar_mask (stmt_info, mask, &mask_dt, + if (!vect_check_scalar_mask (vinfo, stmt_info, mask, &mask_dt, &mask_vectype)) return false; } @@ -8762,8 +8804,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, group_size = 1; vect_memory_access_type memory_access_type; - if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies, - &memory_access_type, &gs_info)) + if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, VLS_LOAD, + ncopies, &memory_access_type, &gs_info)) return false; if (mask) @@ -8797,7 +8839,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, memory_access_type, &gs_info, mask); STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; - vect_model_load_cost (stmt_info, ncopies, memory_access_type, + vect_model_load_cost (vinfo, stmt_info, ncopies, memory_access_type, slp_node_instance, slp_node, cost_vec); return true; } @@ -8817,7 +8859,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) { - vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask); + vect_build_gather_load_calls (vinfo, + stmt_info, gsi, vec_stmt, &gs_info, mask); return true; } @@ -8853,14 +8896,14 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info new_stmt_info; if (hoist_p) { - new_temp = vect_init_vector (stmt_info, scalar_dest, + new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest, vectype, NULL); gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp); new_stmt_info = vinfo->add_stmt (new_stmt); } else { - new_temp = vect_init_vector (stmt_info, scalar_dest, + new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest, vectype, &gsi2); new_stmt_info = vinfo->lookup_def (new_temp); } @@ -8920,7 +8963,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)); } - dr_offset = get_dr_vinfo_offset (first_dr_info); + dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info); stride_base = fold_build_pointer_plus (DR_BASE_ADDRESS (first_dr_info->dr), @@ -9037,7 +9080,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (make_ssa_name (ltype), data_ref); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (nloads > 1) CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_assign_lhs (new_stmt)); @@ -9049,7 +9092,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree newoff = copy_ssa_name (running_off); gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, running_off, stride_step); - vect_finish_stmt_generation (stmt_info, incr, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); running_off = newoff; group_el = 0; @@ -9058,7 +9101,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (nloads > 1) { tree vec_inv = build_constructor (lvectype, v); - new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi); + new_temp = vect_init_vector (vinfo, stmt_info, + vec_inv, lvectype, gsi); new_stmt_info = vinfo->lookup_def (new_temp); if (lvectype != vectype) { @@ -9068,7 +9112,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, build1 (VIEW_CONVERT_EXPR, vectype, new_temp)); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } } @@ -9091,7 +9136,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp_perm) { unsigned n_perms; - vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, + vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, slp_node_instance, false, &n_perms); } return true; @@ -9180,7 +9225,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, alignment_support_scheme = dr_unaligned_supported; else alignment_support_scheme - = vect_supportable_dr_alignment (first_dr_info, false); + = vect_supportable_dr_alignment (vinfo, first_dr_info, false); gcc_assert (alignment_support_scheme); vec_loop_masks *loop_masks @@ -9319,7 +9364,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, initialized yet, use first_stmt_info_for_drptr DR by bumping the distance from first_stmt_info DR instead as below. */ if (!diff_first_stmt_info) - msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token, + msq = vect_setup_realignment (loop_vinfo, + first_stmt_info, gsi, &realignment_token, alignment_support_scheme, NULL_TREE, &at_loop); if (alignment_support_scheme == dr_explicit_realign_optimized) @@ -9355,7 +9401,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); else aggr_type = vectype; - bump = vect_get_data_ptr_increment (dr_info, aggr_type, + bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type, memory_access_type); } @@ -9373,7 +9419,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (simd_lane_access_p && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) - && integer_zerop (get_dr_vinfo_offset (first_dr_info)) + && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) && integer_zerop (DR_INIT (first_dr_info->dr)) && alias_sets_conflict_p (get_alias_set (aggr_type), get_alias_set (TREE_TYPE (ref_type))) @@ -9386,7 +9432,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else if (diff_first_stmt_info) { dataref_ptr - = vect_create_data_ref_ptr (first_stmt_info_for_drptr, + = vect_create_data_ref_ptr (vinfo, first_stmt_info_for_drptr, aggr_type, at_loop, offset, &dummy, gsi, &ptr_incr, simd_lane_access_p, byte_offset, bump); @@ -9398,11 +9444,12 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, size_binop (MINUS_EXPR, DR_INIT (first_dr_info->dr), DR_INIT (ptrdr))); - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, diff); if (alignment_support_scheme == dr_explicit_realign) { - msq = vect_setup_realignment (first_stmt_info_for_drptr, gsi, + msq = vect_setup_realignment (vinfo, + first_stmt_info_for_drptr, gsi, &realignment_token, alignment_support_scheme, dataref_ptr, &at_loop); @@ -9410,11 +9457,12 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, } } else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) - vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, + vect_get_gather_scatter_ops (vinfo, loop, stmt_info, &gs_info, &dataref_ptr, &vec_offset); else dataref_ptr - = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop, + = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, + at_loop, offset, &dummy, gsi, &ptr_incr, simd_lane_access_p, byte_offset, bump); @@ -9423,11 +9471,11 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp_node) { auto_vec > vec_defs (1); - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_mask = vec_defs[0][0]; } else - vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, + vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info, mask_vectype); } } @@ -9439,7 +9487,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); else - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, bump); if (mask) vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask); @@ -9483,21 +9531,22 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, } gimple_call_set_lhs (call, vec_array); gimple_call_set_nothrow (call, true); - new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); + new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, + call, gsi); /* Extract each vector into an SSA_NAME. */ for (i = 0; i < vec_num; i++) { - new_temp = read_vector_array (stmt_info, gsi, scalar_dest, + new_temp = read_vector_array (vinfo, stmt_info, gsi, scalar_dest, vec_array, i); dr_chain.quick_push (new_temp); } /* Record the mapping between SSA_NAMEs and statements. */ - vect_record_grouped_load_vectors (stmt_info, dr_chain); + vect_record_grouped_load_vectors (vinfo, stmt_info, dr_chain); /* Record that VEC_ARRAY is now dead. */ - vect_clobber_variable (stmt_info, gsi, vec_array); + vect_clobber_variable (vinfo, stmt_info, gsi, vec_array); } else { @@ -9514,8 +9563,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec_mask, gsi); if (i > 0) - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, - stmt_info, bump); + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, + gsi, stmt_info, bump); /* 2. Create the vector-load in the loop. */ gimple *new_stmt = NULL; @@ -9556,7 +9605,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, else if (DR_MISALIGNMENT (first_dr_info) == -1) { align = dr_alignment - (vect_dr_behavior (first_dr_info)); + (vect_dr_behavior (vinfo, first_dr_info)); misalign = 0; } else @@ -9632,8 +9681,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, DR_REF (first_dr_info->dr)); tree tem = make_ssa_name (ltype); new_stmt = gimple_build_assign (tem, data_ref); - vect_finish_stmt_generation (stmt_info, new_stmt, - gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); data_ref = NULL; vec *v; vec_alloc (v, 2); @@ -9658,7 +9707,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree new_vname = make_ssa_name (new_vtype); new_stmt = gimple_build_assign ( new_vname, build_constructor (new_vtype, v)); - vect_finish_stmt_generation (stmt_info, + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); new_stmt = gimple_build_assign ( vec_dest, build1 (VIEW_CONVERT_EXPR, vectype, @@ -9675,7 +9724,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); if (compute_in_loop) - msq = vect_setup_realignment (first_stmt_info, gsi, + msq = vect_setup_realignment (vinfo, first_stmt_info, gsi, &realignment_token, dr_explicit_realign, dataref_ptr, NULL); @@ -9693,7 +9742,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, build_int_cst (TREE_TYPE (dataref_ptr), -(HOST_WIDE_INT) align)); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); data_ref = build2 (MEM_REF, vectype, ptr, build_int_cst (ref_type, 0)); @@ -9704,13 +9754,14 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); gimple_move_vops (new_stmt, stmt_info->stmt); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); msq = new_temp; bump = size_binop (MULT_EXPR, vs, TYPE_SIZE_UNIT (elem_type)); bump = size_binop (MINUS_EXPR, bump, size_one_node); - ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, + ptr = bump_vector_ptr (vinfo, dataref_ptr, NULL, gsi, stmt_info, bump); new_stmt = gimple_build_assign (NULL_TREE, BIT_AND_EXPR, ptr, @@ -9718,7 +9769,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, (TREE_TYPE (ptr), -(HOST_WIDE_INT) align)); ptr = copy_ssa_name (ptr, new_stmt); gimple_assign_set_lhs (new_stmt, ptr); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); data_ref = build2 (MEM_REF, vectype, ptr, build_int_cst (ref_type, 0)); @@ -9738,7 +9790,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, (new_temp, BIT_AND_EXPR, dataref_ptr, build_int_cst (TREE_TYPE (dataref_ptr), -(HOST_WIDE_INT) align)); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); data_ref = build2 (MEM_REF, vectype, new_temp, build_int_cst (ref_type, 0)); @@ -9757,7 +9810,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); /* 3. Handle explicit realignment if necessary/supported. Create in loop: @@ -9774,7 +9828,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); if (alignment_support_scheme == dr_explicit_realign_optimized) { @@ -9790,7 +9845,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) { tree perm_mask = perm_mask_for_reverse (vectype); - new_temp = permute_vec_elements (new_temp, new_temp, + new_temp = permute_vec_elements (vinfo, new_temp, new_temp, perm_mask, stmt_info, gsi); new_stmt_info = vinfo->lookup_def (new_temp); } @@ -9816,8 +9871,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); tree bump = wide_int_to_tree (sizetype, bump_val); - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, - stmt_info, bump); + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, + gsi, stmt_info, bump); group_elt = 0; } } @@ -9829,7 +9884,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); tree bump = wide_int_to_tree (sizetype, bump_val); - dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, bump); } } @@ -9840,7 +9895,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp_perm) { unsigned n_perms; - if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, + if (!vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, slp_node_instance, false, &n_perms)) { @@ -9853,7 +9908,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (grouped_load) { if (memory_access_type != VMAT_LOAD_STORE_LANES) - vect_transform_grouped_load (stmt_info, dr_chain, + vect_transform_grouped_load (vinfo, stmt_info, dr_chain, group_size, gsi); *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); } @@ -9974,11 +10029,11 @@ vect_is_simple_cond (tree cond, vec_info *vinfo, slp_tree slp_node, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_condition (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { - vec_info *vinfo = stmt_info->vinfo; tree scalar_dest = NULL_TREE; tree vec_dest = NULL_TREE; tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; @@ -9988,7 +10043,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; tree vec_compare; tree new_temp; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); enum vect_def_type dts[4] = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; @@ -9998,7 +10053,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; stmt_vec_info prev_stmt_info = NULL; int i, j; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + bb_vec_info bb_vinfo = dyn_cast (vinfo); vec vec_oprnds0 = vNULL; vec vec_oprnds1 = vNULL; vec vec_oprnds2 = vNULL; @@ -10027,7 +10082,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { if (STMT_SLP_TYPE (stmt_info)) return false; - reduc_info = info_for_reduction (stmt_info); + reduc_info = info_for_reduction (vinfo, stmt_info); reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info); reduc_index = STMT_VINFO_REDUC_IDX (stmt_info); gcc_assert (reduction_type != EXTRACT_LAST_REDUCTION @@ -10070,14 +10125,14 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, then_clause = gimple_assign_rhs2 (stmt); else_clause = gimple_assign_rhs3 (stmt); - if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, slp_node, + if (!vect_is_simple_cond (cond_expr, vinfo, slp_node, &comp_vectype, &dts[0], vectype) || !comp_vectype) return false; - if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1)) + if (!vect_is_simple_use (then_clause, vinfo, &dts[2], &vectype1)) return false; - if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2)) + if (!vect_is_simple_use (else_clause, vinfo, &dts[3], &vectype2)) return false; if (vectype1 && !useless_type_conversion_p (vectype, vectype1)) @@ -10209,7 +10264,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, return false; STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; - vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node, + vect_model_simple_cost (vinfo, stmt_info, ncopies, dts, ndts, slp_node, cost_vec, kind); return true; } @@ -10267,7 +10322,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp_node) { auto_vec, 4> vec_defs; - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_oprnds3 = vec_defs.pop (); vec_oprnds2 = vec_defs.pop (); if (!masked) @@ -10279,22 +10334,24 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (masked) { vec_cond_lhs - = vect_get_vec_def_for_operand (cond_expr, stmt_info, + = vect_get_vec_def_for_operand (vinfo, cond_expr, stmt_info, comp_vectype); } else { vec_cond_lhs - = vect_get_vec_def_for_operand (cond_expr0, + = vect_get_vec_def_for_operand (vinfo, cond_expr0, stmt_info, comp_vectype); vec_cond_rhs - = vect_get_vec_def_for_operand (cond_expr1, + = vect_get_vec_def_for_operand (vinfo, cond_expr1, stmt_info, comp_vectype); } - vec_then_clause = vect_get_vec_def_for_operand (then_clause, + vec_then_clause = vect_get_vec_def_for_operand (vinfo, + then_clause, stmt_info); if (reduction_type != EXTRACT_LAST_REDUCTION) - vec_else_clause = vect_get_vec_def_for_operand (else_clause, + vec_else_clause = vect_get_vec_def_for_operand (vinfo, + else_clause, stmt_info); } } @@ -10349,7 +10406,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_stmt = gimple_build_assign (new_temp, bitop1, vec_cond_lhs, vec_cond_rhs); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (bitop2 == NOP_EXPR) vec_compare = new_temp; else if (bitop2 == BIT_NOT_EXPR) @@ -10364,7 +10421,8 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_stmt = gimple_build_assign (vec_compare, bitop2, vec_cond_lhs, new_temp); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } } } @@ -10401,7 +10459,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree vec_compare_name = make_ssa_name (vec_cmp_type); gassign *new_stmt = gimple_build_assign (vec_compare_name, vec_compare); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); vec_compare = vec_compare_name; } @@ -10411,7 +10469,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (vec_compare_name, BIT_NOT_EXPR, vec_compare); - vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); vec_compare = vec_compare_name; } @@ -10425,7 +10483,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *g = gimple_build_assign (tmp2, BIT_AND_EXPR, vec_compare, loop_mask); - vect_finish_stmt_generation (stmt_info, g, gsi); + vect_finish_stmt_generation (vinfo, stmt_info, g, gsi); vec_compare = tmp2; } } @@ -10440,7 +10498,8 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_call_set_lhs (new_stmt, lhs); SSA_NAME_DEF_STMT (lhs) = new_stmt; if (old_stmt == gsi_stmt (*gsi)) - new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt); + new_stmt_info = vect_finish_replace_stmt (vinfo, + stmt_info, new_stmt); else { /* In this case we're moving the definition to later in the @@ -10449,7 +10508,8 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gimple_stmt_iterator old_gsi = gsi_for_stmt (old_stmt); gsi_remove (&old_gsi, true); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } } else @@ -10459,7 +10519,7 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare, vec_then_clause, vec_else_clause); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); @@ -10493,17 +10553,17 @@ vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vectorizable_comparison (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { - vec_info *vinfo = stmt_info->vinfo; tree lhs, rhs1, rhs2; tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; tree new_temp; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type}; int ndts = 2; poly_uint64 nunits; @@ -10511,7 +10571,7 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; stmt_vec_info prev_stmt_info = NULL; int i, j; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + bb_vec_info bb_vinfo = dyn_cast (vinfo); vec vec_oprnds0 = vNULL; vec vec_oprnds1 = vNULL; tree mask_type; @@ -10555,10 +10615,10 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, rhs1 = gimple_assign_rhs1 (stmt); rhs2 = gimple_assign_rhs2 (stmt); - if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1)) + if (!vect_is_simple_use (rhs1, vinfo, &dts[0], &vectype1)) return false; - if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2)) + if (!vect_is_simple_use (rhs2, vinfo, &dts[1], &vectype2)) return false; if (vectype1 && vectype2 @@ -10652,7 +10712,8 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, } STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type; - vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)), + vect_model_simple_cost (vinfo, stmt_info, + ncopies * (1 + (bitop2 != NOP_EXPR)), dts, ndts, slp_node, cost_vec); return true; } @@ -10677,7 +10738,7 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (slp_node) { auto_vec, 2> vec_defs; - vect_get_slp_defs (slp_node, &vec_defs); + vect_get_slp_defs (vinfo, slp_node, &vec_defs); vec_oprnds1 = vec_defs.pop (); vec_oprnds0 = vec_defs.pop (); if (swap_p) @@ -10685,9 +10746,9 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, } else { - vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info, + vec_rhs1 = vect_get_vec_def_for_operand (vinfo, rhs1, stmt_info, vectype); - vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info, + vec_rhs2 = vect_get_vec_def_for_operand (vinfo, rhs2, stmt_info, vectype); } } @@ -10718,7 +10779,7 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gassign *new_stmt = gimple_build_assign (new_temp, code, vec_rhs1, vec_rhs2); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } else { @@ -10729,7 +10790,7 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, vec_rhs2); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); if (bitop2 != NOP_EXPR) { tree res = make_ssa_name (mask); @@ -10739,7 +10800,8 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, new_temp); new_stmt_info - = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); + = vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } } if (slp_node) @@ -10769,7 +10831,8 @@ vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, GSI and VEC_STMT_P are as for vectorizable_live_operation. */ static bool -can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +can_vectorize_live_stmts (loop_vec_info loop_vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, slp_tree slp_node, slp_instance slp_node_instance, bool vec_stmt_p, stmt_vector_for_cost *cost_vec) @@ -10781,15 +10844,16 @@ can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info) { if (STMT_VINFO_LIVE_P (slp_stmt_info) - && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, + && !vectorizable_live_operation (loop_vinfo, + slp_stmt_info, gsi, slp_node, slp_node_instance, i, vec_stmt_p, cost_vec)) return false; } } else if (STMT_VINFO_LIVE_P (stmt_info) - && !vectorizable_live_operation (stmt_info, gsi, slp_node, - slp_node_instance, -1, + && !vectorizable_live_operation (loop_vinfo, stmt_info, gsi, + slp_node, slp_node_instance, -1, vec_stmt_p, cost_vec)) return false; @@ -10799,12 +10863,12 @@ can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Make sure the statement is vectorizable. */ opt_result -vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, +vect_analyze_stmt (vec_info *vinfo, + stmt_vec_info stmt_info, bool *need_to_vectorize, slp_tree node, slp_instance node_instance, stmt_vector_for_cost *cost_vec) { - vec_info *vinfo = stmt_info->vinfo; - bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); + bb_vec_info bb_vinfo = dyn_cast (vinfo); enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); bool ok; gimple_seq pattern_def_seq; @@ -10839,7 +10903,7 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, pattern_def_stmt_info->stmt); opt_result res - = vect_analyze_stmt (pattern_def_stmt_info, + = vect_analyze_stmt (vinfo, pattern_def_stmt_info, need_to_vectorize, node, node_instance, cost_vec); if (!res) @@ -10899,7 +10963,7 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, pattern_stmt_info->stmt); opt_result res - = vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node, + = vect_analyze_stmt (vinfo, pattern_stmt_info, need_to_vectorize, node, node_instance, cost_vec); if (!res) return res; @@ -10956,39 +11020,50 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, /* Prefer vectorizable_call over vectorizable_simd_clone_call so -mveclibabi= takes preference over library functions with the simd attribute. */ - ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, + ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec) + || vectorizable_simd_clone_call (vinfo, stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, - cost_vec) - || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_reduction (stmt_info, node, node_instance, cost_vec) - || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_condition (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_comparison (stmt_info, NULL, NULL, node, + || vectorizable_conversion (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_operation (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_assignment (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_load (vinfo, stmt_info, + NULL, NULL, node, node_instance, cost_vec) + || vectorizable_store (vinfo, stmt_info, NULL, NULL, node, cost_vec) + || vectorizable_reduction (as_a (vinfo), stmt_info, + node, node_instance, cost_vec) + || vectorizable_induction (as_a (vinfo), stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_shift (vinfo, stmt_info, NULL, NULL, node, cost_vec) + || vectorizable_condition (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_lc_phi (stmt_info, NULL, node)); + || vectorizable_lc_phi (as_a (vinfo), + stmt_info, NULL, node)); else { if (bb_vinfo) - ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, - cost_vec) - || vectorizable_conversion (stmt_info, NULL, NULL, node, + ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec) + || vectorizable_simd_clone_call (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_conversion (vinfo, stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_assignment (stmt_info, NULL, NULL, node, + || vectorizable_shift (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_operation (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_assignment (vinfo, stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, - cost_vec) - || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_condition (stmt_info, NULL, NULL, node, cost_vec) - || vectorizable_comparison (stmt_info, NULL, NULL, node, + || vectorizable_load (vinfo, stmt_info, + NULL, NULL, node, node_instance, cost_vec) + || vectorizable_store (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_condition (vinfo, stmt_info, + NULL, NULL, node, cost_vec) + || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node, cost_vec)); } @@ -11003,7 +11078,8 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, if (!bb_vinfo && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type && STMT_VINFO_TYPE (stmt_info) != lc_phi_info_type - && !can_vectorize_live_stmts (stmt_info, NULL, node, node_instance, + && !can_vectorize_live_stmts (as_a (vinfo), + stmt_info, NULL, node, node_instance, false, cost_vec)) return opt_result::failure_at (stmt_info->stmt, "not vectorized:" @@ -11019,10 +11095,10 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, Create a vectorized stmt to replace STMT_INFO, and insert it at GSI. */ bool -vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, +vect_transform_stmt (vec_info *vinfo, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, slp_tree slp_node, slp_instance slp_node_instance) { - vec_info *vinfo = stmt_info->vinfo; bool is_store = false; stmt_vec_info vec_stmt = NULL; bool done; @@ -11030,10 +11106,10 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info); - bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info) + loop_vec_info loop_vinfo = dyn_cast (vinfo); + bool nested_p = (loop_vinfo && nested_in_vect_loop_p - (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)), - stmt_info)); + (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)); gimple *stmt = stmt_info->stmt; switch (STMT_VINFO_TYPE (stmt_info)) @@ -11041,42 +11117,45 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, case type_demotion_vec_info_type: case type_promotion_vec_info_type: case type_conversion_vec_info_type: - done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node, - NULL); + done = vectorizable_conversion (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case induc_vec_info_type: - done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node, + done = vectorizable_induction (as_a (vinfo), + stmt_info, gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case shift_vec_info_type: - done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL); + done = vectorizable_shift (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case op_vec_info_type: - done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node, + done = vectorizable_operation (vinfo, stmt_info, gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case assignment_vec_info_type: - done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node, - NULL); + done = vectorizable_assignment (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case load_vec_info_type: - done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node, + done = vectorizable_load (vinfo, stmt_info, gsi, &vec_stmt, slp_node, slp_node_instance, NULL); gcc_assert (done); break; case store_vec_info_type: - done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL); + done = vectorizable_store (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) { @@ -11093,40 +11172,44 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, break; case condition_vec_info_type: - done = vectorizable_condition (stmt_info, gsi, &vec_stmt, slp_node, NULL); + done = vectorizable_condition (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case comparison_vec_info_type: - done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, + done = vectorizable_comparison (vinfo, stmt_info, gsi, &vec_stmt, slp_node, NULL); gcc_assert (done); break; case call_vec_info_type: - done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL); + done = vectorizable_call (vinfo, stmt_info, + gsi, &vec_stmt, slp_node, NULL); stmt = gsi_stmt (*gsi); break; case call_simd_clone_vec_info_type: - done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt, + done = vectorizable_simd_clone_call (vinfo, stmt_info, gsi, &vec_stmt, slp_node, NULL); stmt = gsi_stmt (*gsi); break; case reduc_vec_info_type: - done = vect_transform_reduction (stmt_info, gsi, &vec_stmt, slp_node); + done = vect_transform_reduction (as_a (vinfo), stmt_info, + gsi, &vec_stmt, slp_node); gcc_assert (done); break; case cycle_phi_info_type: - done = vect_transform_cycle_phi (stmt_info, &vec_stmt, slp_node, - slp_node_instance); + done = vect_transform_cycle_phi (as_a (vinfo), stmt_info, + &vec_stmt, slp_node, slp_node_instance); gcc_assert (done); break; case lc_phi_info_type: - done = vectorizable_lc_phi (stmt_info, &vec_stmt, slp_node); + done = vectorizable_lc_phi (as_a (vinfo), + stmt_info, &vec_stmt, slp_node); gcc_assert (done); break; @@ -11138,6 +11221,7 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, "stmt not supported.\n"); gcc_unreachable (); } + done = true; } /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT. @@ -11155,8 +11239,7 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer_by_reduction)) { - class loop *innerloop = LOOP_VINFO_LOOP ( - STMT_VINFO_LOOP_VINFO (stmt_info))->inner; + class loop *innerloop = LOOP_VINFO_LOOP (loop_vinfo)->inner; imm_use_iterator imm_iter; use_operand_p use_p; tree scalar_dest; @@ -11194,7 +11277,7 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info reduc_info; if (STMT_VINFO_REDUC_DEF (orig_stmt_info) && vect_stmt_to_vectorize (orig_stmt_info) == stmt_info - && (reduc_info = info_for_reduction (orig_stmt_info)) + && (reduc_info = info_for_reduction (vinfo, orig_stmt_info)) && STMT_VINFO_REDUC_TYPE (reduc_info) != FOLD_LEFT_REDUCTION && STMT_VINFO_REDUC_TYPE (reduc_info) != EXTRACT_LAST_REDUCTION) { @@ -11240,8 +11323,10 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Handle stmts whose DEF is used outside the loop-nest that is being vectorized. */ - done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, - slp_node_instance, true, NULL); + if (is_a (vinfo)) + done = can_vectorize_live_stmts (as_a (vinfo), + stmt_info, gsi, slp_node, + slp_node_instance, true, NULL); gcc_assert (done); return false; @@ -11252,9 +11337,8 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info. */ void -vect_remove_stores (stmt_vec_info first_stmt_info) +vect_remove_stores (vec_info *vinfo, stmt_vec_info first_stmt_info) { - vec_info *vinfo = first_stmt_info->vinfo; stmt_vec_info next_stmt_info = first_stmt_info; while (next_stmt_info) @@ -11707,13 +11791,14 @@ vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, widening operation (short in the above example). */ bool -supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info, +supportable_widening_operation (vec_info *vinfo, + enum tree_code code, stmt_vec_info stmt_info, tree vectype_out, tree vectype_in, enum tree_code *code1, enum tree_code *code2, int *multi_step_cvt, vec *interm_types) { - loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_info = dyn_cast (vinfo); class loop *vect_loop = NULL; machine_mode vec_mode; enum insn_code icode1, icode2; @@ -11769,7 +11854,7 @@ supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info, if (vect_loop && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction && !nested_in_vect_loop_p (vect_loop, stmt_info) - && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, + && supportable_widening_operation (vinfo, VEC_WIDEN_MULT_EVEN_EXPR, stmt_info, vectype_out, vectype_in, code1, code2, multi_step_cvt, interm_types)) @@ -12172,12 +12257,11 @@ vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index, statement does not help to determine the overall number of units. */ opt_result -vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, +vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info, tree *stmt_vectype_out, tree *nunits_vectype_out, unsigned int group_size) { - vec_info *vinfo = stmt_info->vinfo; gimple *stmt = stmt_info->stmt; /* For BB vectorization, we should always have a group size once we've diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c index 8f9444d58a3..41ff6791966 100644 --- a/gcc/tree-vectorizer.c +++ b/gcc/tree-vectorizer.c @@ -631,7 +631,6 @@ stmt_vec_info vec_info::new_stmt_vec_info (gimple *stmt) { stmt_vec_info res = XCNEW (class _stmt_vec_info); - res->vinfo = this; res->stmt = stmt; STMT_VINFO_TYPE (res) = undef_vec_info_type; diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index f7becb34ab4..a47ba1a6742 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -945,9 +945,6 @@ public: /* The stmt to which this info struct refers to. */ gimple *stmt; - /* The vec_info with respect to which STMT is vectorized. */ - vec_info *vinfo; - /* The vector type to be used for the LHS of this statement. */ tree vectype; @@ -1152,20 +1149,6 @@ struct gather_scatter_info { /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt -inline loop_vec_info -STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) -{ - if (loop_vec_info loop_vinfo = dyn_cast (stmt_vinfo->vinfo)) - return loop_vinfo; - return NULL; -} -inline bb_vec_info -STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) -{ - if (bb_vec_info bb_vinfo = dyn_cast (stmt_vinfo->vinfo)) - return bb_vinfo; - return NULL; -} #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype @@ -1377,11 +1360,12 @@ extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt, /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned -add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, +add_stmt_cost (vec_info *vinfo, void *data, int count, + enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { - unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind, + unsigned cost = targetm.vectorize.add_stmt_cost (vinfo, data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign, @@ -1407,12 +1391,12 @@ destroy_cost_data (void *data) } inline void -add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec) +add_stmt_costs (vec_info *vinfo, void *data, stmt_vector_for_cost *cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT (*cost_vec, i, cost) - add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info, + add_stmt_cost (vinfo, data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } @@ -1480,10 +1464,10 @@ vect_known_alignment_in_bytes (dr_vec_info *dr_info) in DR_INFO itself). */ static inline innermost_loop_behavior * -vect_dr_behavior (dr_vec_info *dr_info) +vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info) { stmt_vec_info stmt_info = dr_info->stmt; - loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); + loop_vec_info loop_vinfo = dyn_cast (vinfo); if (loop_vinfo == NULL || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)) return &DR_INNERMOST (dr_info->dr); @@ -1496,11 +1480,12 @@ vect_dr_behavior (dr_vec_info *dr_info) vect_dr_behavior to select the appropriate data_reference to use. */ inline tree -get_dr_vinfo_offset (dr_vec_info *dr_info, bool check_outer = false) +get_dr_vinfo_offset (vec_info *vinfo, + dr_vec_info *dr_info, bool check_outer = false) { innermost_loop_behavior *base; if (check_outer) - base = vect_dr_behavior (dr_info); + base = vect_dr_behavior (vinfo, dr_info); else base = &dr_info->dr->innermost; @@ -1705,7 +1690,8 @@ extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); -extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, +extern bool supportable_widening_operation (vec_info *, + enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec *); @@ -1715,31 +1701,36 @@ extern bool supportable_narrowing_operation (enum tree_code, tree, tree, extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); -extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); -extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, +extern stmt_vec_info vect_finish_replace_stmt (vec_info *, + stmt_vec_info, gimple *); +extern stmt_vec_info vect_finish_stmt_generation (vec_info *, + stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *); extern tree vect_get_store_rhs (stmt_vec_info); extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); -extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); -extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec *, - vec *, slp_tree); +extern tree vect_get_vec_def_for_operand (vec_info *, tree, + stmt_vec_info, tree = NULL); +extern void vect_get_vec_defs (vec_info *, tree, tree, stmt_vec_info, + vec *, vec *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (vec_info *, vec *, vec *); -extern tree vect_init_vector (stmt_vec_info, tree, tree, +extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree); -extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, +extern bool vect_transform_stmt (vec_info *, stmt_vec_info, + gimple_stmt_iterator *, slp_tree, slp_instance); -extern void vect_remove_stores (stmt_vec_info); +extern void vect_remove_stores (vec_info *, stmt_vec_info); extern bool vect_nop_conversion_p (stmt_vec_info); -extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, +extern opt_result vect_analyze_stmt (vec_info *, stmt_vec_info, bool *, + slp_tree, slp_instance, stmt_vector_for_cost *); -extern void vect_get_load_cost (stmt_vec_info, int, bool, +extern void vect_get_load_cost (vec_info *, stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); -extern void vect_get_store_cost (stmt_vec_info, int, +extern void vect_get_store_cost (vec_info *, stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (vec_info *, enum tree_code, tree); extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); @@ -1747,22 +1738,24 @@ extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); extern void optimize_mask_stores (class loop*); extern gcall *vect_gen_while (tree, tree, tree); extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); -extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *, +extern opt_result vect_get_vector_types_for_stmt (vec_info *, + stmt_vec_info, tree *, tree *, unsigned int = 0); extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment - (dr_vec_info *, bool); + (vec_info *, dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); -extern bool vect_slp_analyze_instance_dependence (slp_instance); +extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance); extern opt_result vect_enhance_data_refs_alignment (loop_vec_info); extern opt_result vect_analyze_data_refs_alignment (loop_vec_info); extern opt_result vect_verify_datarefs_alignment (loop_vec_info); -extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); +extern bool vect_slp_analyze_and_verify_instance_alignment (vec_info *, + slp_instance); extern opt_result vect_analyze_data_ref_accesses (vec_info *); extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree, @@ -1773,11 +1766,12 @@ extern opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec *); extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *); extern void vect_record_base_alignments (vec_info *); -extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, class loop *, tree, +extern tree vect_create_data_ref_ptr (vec_info *, + stmt_vec_info, tree, class loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); -extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, +extern tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); @@ -1785,18 +1779,22 @@ extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); -extern void vect_permute_store_chain (vec ,unsigned int, stmt_vec_info, - gimple_stmt_iterator *, vec *); -extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, +extern void vect_permute_store_chain (vec_info *, + vec ,unsigned int, stmt_vec_info, + gimple_stmt_iterator *, vec *); +extern tree vect_setup_realignment (vec_info *, + stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, class loop **); -extern void vect_transform_grouped_load (stmt_vec_info, vec , int, - gimple_stmt_iterator *); -extern void vect_record_grouped_load_vectors (stmt_vec_info, vec); +extern void vect_transform_grouped_load (vec_info *, stmt_vec_info, vec, + int, gimple_stmt_iterator *); +extern void vect_record_grouped_load_vectors (vec_info *, + stmt_vec_info, vec); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); -extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, +extern tree vect_create_addr_base_for_vector_ref (vec_info *, + stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ @@ -1818,25 +1816,31 @@ extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree, tree); extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); -extern stmt_vec_info info_for_reduction (stmt_vec_info); +extern stmt_vec_info info_for_reduction (vec_info *, stmt_vec_info); /* Drive for loop transformation stage. */ extern class loop *vect_transform_loop (loop_vec_info, gimple *); extern opt_loop_vec_info vect_analyze_loop_form (class loop *, vec_info_shared *); -extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, +extern bool vectorizable_live_operation (loop_vec_info, + stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance, int, bool, stmt_vector_for_cost *); -extern bool vectorizable_reduction (stmt_vec_info, slp_tree, slp_instance, +extern bool vectorizable_reduction (loop_vec_info, stmt_vec_info, + slp_tree, slp_instance, stmt_vector_for_cost *); -extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, +extern bool vectorizable_induction (loop_vec_info, stmt_vec_info, + gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); -extern bool vect_transform_reduction (stmt_vec_info, gimple_stmt_iterator *, +extern bool vect_transform_reduction (loop_vec_info, stmt_vec_info, + gimple_stmt_iterator *, stmt_vec_info *, slp_tree); -extern bool vect_transform_cycle_phi (stmt_vec_info, stmt_vec_info *, +extern bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info, + stmt_vec_info *, slp_tree, slp_instance); -extern bool vectorizable_lc_phi (stmt_vec_info, stmt_vec_info *, slp_tree); +extern bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info, + stmt_vec_info *, slp_tree); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, @@ -1846,7 +1850,7 @@ extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance, bool); -extern bool vect_transform_slp_perm_load (slp_tree, vec , +extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, vec, gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations (vec_info *); @@ -1854,7 +1858,8 @@ extern void vect_schedule_slp (vec_info *); extern opt_result vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); -extern void vect_get_slp_defs (slp_tree, vec > *, unsigned n = -1U); +extern void vect_get_slp_defs (vec_info *, slp_tree, vec > *, + unsigned n = -1U); extern bool vect_slp_bb (basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); -- 2.30.2