+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * tree-vectorizer.h (_slp_instance::unrolling_factor): Change
+ from an unsigned int to a poly_uint64.
+ (_loop_vec_info::slp_unrolling_factor): Likewise.
+ (_loop_vec_info::vectorization_factor): Change from an int
+ to a poly_uint64.
+ (MAX_VECTORIZATION_FACTOR): Bump from 64 to INT_MAX.
+ (vect_get_num_vectors): New function.
+ (vect_update_max_nunits, vect_vf_for_cost): Likewise.
+ (vect_get_num_copies): Use vect_get_num_vectors.
+ (vect_analyze_data_ref_dependences): Change max_vf from an int *
+ to an unsigned int *.
+ (vect_analyze_data_refs): Change min_vf from an int * to a
+ poly_uint64 *.
+ (vect_transform_slp_perm_load): Take the vf as a poly_uint64 rather
+ than an unsigned HOST_WIDE_INT.
+ * tree-vect-data-refs.c (vect_analyze_possibly_independent_ddr)
+ (vect_analyze_data_ref_dependence): Change max_vf from an int *
+ to an unsigned int *.
+ (vect_analyze_data_ref_dependences): Likewise.
+ (vect_compute_data_ref_alignment): Handle polynomial vf.
+ (vect_enhance_data_refs_alignment): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ (vect_shift_permute_load_chain): Likewise.
+ (vect_supportable_dr_alignment): Likewise.
+ (dependence_distance_ge_vf): Take the vectorization factor as a
+ poly_uint64 rather than an unsigned HOST_WIDE_INT.
+ (vect_analyze_data_refs): Change min_vf from an int * to a
+ poly_uint64 *.
+ * tree-vect-loop-manip.c (vect_gen_scalar_loop_niters): Take
+ vfm1 as a poly_uint64 rather than an int. Make the same change
+ for the returned bound_scalar.
+ (vect_gen_vector_loop_niters): Handle polynomial vf.
+ (vect_do_peeling): Likewise. Update call to
+ vect_gen_scalar_loop_niters and handle polynomial bound_scalars.
+ (vect_gen_vector_loop_niters_mult_vf): Assert that the vf must
+ be constant.
+ * tree-vect-loop.c (vect_determine_vectorization_factor)
+ (vect_update_vf_for_slp, vect_analyze_loop_2): Handle polynomial vf.
+ (vect_get_known_peeling_cost): Likewise.
+ (vect_estimate_min_profitable_iters, vectorizable_reduction): Likewise.
+ (vect_worthwhile_without_simd_p, vectorizable_induction): Likewise.
+ (vect_transform_loop): Likewise. Use the lowest possible VF when
+ updating the upper bounds of the loop.
+ (vect_min_worthwhile_factor): Make static. Return an unsigned int
+ rather than an int.
+ * tree-vect-slp.c (vect_attempt_slp_rearrange_stmts): Cope with
+ polynomial unroll factors.
+ (vect_analyze_slp_cost_1, vect_analyze_slp_instance): Likewise.
+ (vect_make_slp_decision): Likewise.
+ (vect_supported_load_permutation_p): Likewise, and polynomial
+ vf too.
+ (vect_analyze_slp_cost): Handle polynomial vf.
+ (vect_slp_analyze_node_operations): Likewise.
+ (vect_slp_analyze_bb_1): Likewise.
+ (vect_transform_slp_perm_load): Take the vf as a poly_uint64 rather
+ than an unsigned HOST_WIDE_INT.
+ * tree-vect-stmts.c (vectorizable_simd_clone_call, vectorizable_store)
+ (vectorizable_load): Handle polynomial vf.
+ * tree-vectorizer.c (simduid_to_vf::vf): Change from an int to
+ a poly_uint64.
+ (adjust_simduid_builtins, shrink_simd_arrays): Update accordingly.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc.dg/vect-opt-info-1.c: New test.
+
2018-01-02 Michael Meissner <meissner@linux.vnet.ibm.com>
* gcc.target/powerpc/float128-hw2.c: Add tests for ceilf128,
--- /dev/null
+/* { dg-options "-std=c99 -fopt-info -O3" } */
+
+void
+vadd (int *dst, int *op1, int *op2, int count)
+{
+ for (int i = 0; i < count; ++i)
+ dst[i] = op1[i] + op2[i];
+}
+
+/* { dg-message "loop vectorized" "" { target *-*-* } 6 } */
+/* { dg-message "loop versioned for vectorization because of possible aliasing" "" { target *-*-* } 6 } */
static bool
vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
loop_vec_info loop_vinfo,
- int loop_depth, int *max_vf)
+ int loop_depth, unsigned int *max_vf)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
lambda_vector dist_v;
would be a win. */
if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
{
- if (loop->safelen < *max_vf)
+ if ((unsigned int) loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
continue;
static bool
vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
- loop_vec_info loop_vinfo, int *max_vf)
+ loop_vec_info loop_vinfo,
+ unsigned int *max_vf)
{
unsigned int i;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
- if (loop->safelen < *max_vf)
+ if ((unsigned int) loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
- if (loop->safelen < *max_vf)
+ if ((unsigned int) loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
continue;
}
- if (abs (dist) >= 2
- && abs (dist) < *max_vf)
+ unsigned int abs_dist = abs (dist);
+ if (abs_dist >= 2 && abs_dist < *max_vf)
{
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf);
}
- if (abs (dist) >= *max_vf)
+ if (abs_dist >= *max_vf)
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
the maximum vectorization factor the data dependences allow. */
bool
-vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
+vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
+ unsigned int *max_vf)
{
unsigned int i;
struct data_dependence_relation *ddr;
the dataref evenly divides by the alignment. */
else
{
- unsigned vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
step_preserves_misalignment_p
- = ((DR_STEP_ALIGNMENT (dr) * vf) % vector_alignment) == 0;
+ = multiple_p (DR_STEP_ALIGNMENT (dr) * vf, vector_alignment);
if (!step_preserves_misalignment_p && dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
bool one_misalignment_unknown = false;
bool one_dr_unsupportable = false;
struct data_reference *unsupportable_dr = NULL;
- unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned possible_npeel_number = 1;
tree vectype;
- unsigned int nelements, mis, same_align_drs_max = 0;
+ unsigned int mis, same_align_drs_max = 0;
hash_table<peel_info_hasher> peeling_htab (1);
if (dump_enabled_p ())
size_zero_node) < 0;
vectype = STMT_VINFO_VECTYPE (stmt_info);
- nelements = TYPE_VECTOR_SUBPARTS (vectype);
unsigned int target_align = DR_TARGET_ALIGNMENT (dr);
unsigned int dr_size = vect_get_scalar_dr_size (dr);
mis = (negative ? DR_MISALIGNMENT (dr) : -DR_MISALIGNMENT (dr));
cost for every peeling option. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
- if (STMT_SLP_TYPE (stmt_info))
- possible_npeel_number
- = (vf * GROUP_SIZE (stmt_info)) / nelements;
- else
- possible_npeel_number = vf / nelements;
+ poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
+ ? vf * GROUP_SIZE (stmt_info) : vf);
+ possible_npeel_number
+ = vect_get_num_vectors (nscalars, vectype);
/* NPEEL_TMP is 0 when there is no misalignment, but also
allow peeling NELEMENTS. */
unsigned int load_outside_cost = 0;
unsigned int store_inside_cost = 0;
unsigned int store_outside_cost = 0;
+ unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
stmt_vector_for_cost dummy;
dummy.create (2);
vect_get_peeling_costs_all_drs (datarefs, dr0,
&load_inside_cost,
&load_outside_cost,
- &dummy, vf / 2, true);
+ &dummy, estimated_npeels, true);
dummy.release ();
if (first_store)
vect_get_peeling_costs_all_drs (datarefs, first_store,
&store_inside_cost,
&store_outside_cost,
- &dummy, vf / 2, true);
+ &dummy, estimated_npeels, true);
dummy.release ();
}
else
int dummy2;
peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
- (loop_vinfo, vf / 2, &dummy2,
+ (loop_vinfo, estimated_npeels, &dummy2,
&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
&prologue_cost_vec, &epilogue_cost_vec);
}
/* Cost model #2 - if peeling may result in a remaining loop not
- iterating enough to be vectorized then do not peel. */
+ iterating enough to be vectorized then do not peel. Since this
+ is a cost heuristic rather than a correctness decision, use the
+ most likely runtime value for variable vectorization factors. */
if (do_peeling
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- unsigned max_peel
- = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel;
- if (LOOP_VINFO_INT_NITERS (loop_vinfo)
- < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel)
+ unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
+ unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
+ if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
+ < assumed_vf + max_peel)
do_peeling = false;
}
static bool
dependence_distance_ge_vf (data_dependence_relation *ddr,
- unsigned int loop_depth, unsigned HOST_WIDE_INT vf)
+ unsigned int loop_depth, poly_uint64 vf)
{
if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
|| DDR_NUM_DIST_VECTS (ddr) == 0)
HOST_WIDE_INT dist = dist_v[loop_depth];
if (dist != 0
&& !(dist > 0 && DDR_REVERSED_P (ddr))
- && (unsigned HOST_WIDE_INT) abs_hwi (dist) < vf)
+ && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
return false;
}
= LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
vec<vec_object_pair> &check_unequal_addrs
= LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
- int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ddr_p ddr;
comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
}
- prune_runtime_alias_test_list (&comp_alias_ddrs,
- (unsigned HOST_WIDE_INT) vect_factor);
+ prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
unsigned int count = (comp_alias_ddrs.length ()
+ check_unequal_addrs.length ());
*/
bool
-vect_analyze_data_refs (vec_info *vinfo, int *min_vf)
+vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
{
struct loop *loop = NULL;
unsigned int i;
tree base, offset, init;
enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
bool simd_lane_access = false;
- int vf;
+ poly_uint64 vf;
again:
if (!dr || !DR_REF (dr))
/* Adjust the minimal vectorization factor according to the
vector type. */
vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
- if (vf > *min_vf)
- *min_vf = vf;
+ *min_vf = upper_bound (*min_vf, vf);
if (gatherscatter != SG_NONE)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ unsigned HOST_WIDE_INT vf;
+ if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+ /* Not supported for variable-length vectors. */
+ return false;
+
vec_perm_builder sel (nelt, nelt, 1);
sel.quick_grow (nelt);
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
- if (pow2p_hwi (length) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
+ if (pow2p_hwi (length) && vf > 4)
{
unsigned int j, log_length = exact_log2 (length);
for (i = 0; i < nelt / 2; ++i)
}
return true;
}
- if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
+ if (length == 3 && vf > 2)
{
unsigned int k = 0, l = 0;
same alignment, instead it depends on the SLP group size. */
if (loop_vinfo
&& STMT_SLP_TYPE (stmt_info)
- && (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- * GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)))
- % TYPE_VECTOR_SUBPARTS (vectype) != 0))
+ && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+ * GROUP_SIZE (vinfo_for_stmt
+ (GROUP_FIRST_ELEMENT (stmt_info))),
+ TYPE_VECTOR_SUBPARTS (vectype)))
;
else if (!loop_vinfo
|| (nested_in_vect_loop
static tree
vect_gen_scalar_loop_niters (tree niters_prolog, int int_niters_prolog,
- int bound_prolog, int vfm1, int th,
- int *bound_scalar, bool check_profitability)
+ int bound_prolog, poly_int64 vfm1, int th,
+ poly_uint64 *bound_scalar,
+ bool check_profitability)
{
tree type = TREE_TYPE (niters_prolog);
tree niters = fold_build2 (PLUS_EXPR, type, niters_prolog,
/* Peeling for constant times. */
if (int_niters_prolog >= 0)
{
- *bound_scalar = (int_niters_prolog + vfm1 < th
- ? th
- : vfm1 + int_niters_prolog);
+ *bound_scalar = upper_bound (int_niters_prolog + vfm1, th);
return build_int_cst (type, *bound_scalar);
}
/* Peeling for unknown times. Note BOUND_PROLOG is the upper
bound (inlcuded) of niters of prolog loop. */
- if (th >= vfm1 + bound_prolog)
+ if (known_ge (th, vfm1 + bound_prolog))
{
*bound_scalar = th;
return build_int_cst (type, th);
}
- /* Need to do runtime comparison, but BOUND_SCALAR remains the same. */
- else if (th > vfm1)
- return fold_build2 (MAX_EXPR, type, build_int_cst (type, th), niters);
+ /* Need to do runtime comparison. */
+ else if (maybe_gt (th, vfm1))
+ {
+ *bound_scalar = upper_bound (*bound_scalar, th);
+ return fold_build2 (MAX_EXPR, type,
+ build_int_cst (type, th), niters);
+ }
}
return niters;
}
{
tree ni_minus_gap, var;
tree niters_vector, step_vector, type = TREE_TYPE (niters);
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
tree log_vf = NULL_TREE;
else
ni_minus_gap = niters;
- if (1)
+ unsigned HOST_WIDE_INT const_vf;
+ if (vf.is_constant (&const_vf))
{
/* Create: niters >> log2(vf) */
/* If it's known that niters == number of latch executions + 1 doesn't
overflow, we can generate niters >> log2(vf); otherwise we generate
(niters - vf) >> log2(vf) + 1 by using the fact that we know ratio
will be at least one. */
- log_vf = build_int_cst (type, exact_log2 (vf));
+ log_vf = build_int_cst (type, exact_log2 (const_vf));
if (niters_no_overflow)
niters_vector = fold_build2 (RSHIFT_EXPR, type, ni_minus_gap, log_vf);
else
tree niters_vector,
tree *niters_vector_mult_vf_ptr)
{
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ /* We should be using a step_vector of VF if VF is variable. */
+ int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ();
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree type = TREE_TYPE (niters_vector);
tree log_vf = build_int_cst (type, exact_log2 (vf));
tree type = TREE_TYPE (niters), guard_cond;
basic_block guard_bb, guard_to;
profile_probability prob_prolog, prob_vector, prob_epilog;
- int bound_prolog = 0, bound_scalar = 0, bound = 0;
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ int bound_prolog = 0;
+ poly_uint64 bound_scalar = 0;
+ int estimated_vf;
int prolog_peeling = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
bool epilog_peeling = (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
|| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo));
return NULL;
prob_vector = profile_probability::guessed_always ().apply_scale (9, 10);
- if ((vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo)) == 2)
- vf = 3;
+ estimated_vf = vect_vf_for_cost (loop_vinfo);
+ if (estimated_vf == 2)
+ estimated_vf = 3;
prob_prolog = prob_epilog = profile_probability::guessed_always ()
- .apply_scale (vf - 1, vf);
- vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ .apply_scale (estimated_vf - 1, estimated_vf);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
struct loop *prolog, *epilog = NULL, *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct loop *first_loop = loop;
/* Skip to epilog if scalar loop may be preferred. It's only needed
when we peel for epilog loop and when it hasn't been checked with
loop versioning. */
- bool skip_vector = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && !LOOP_REQUIRES_VERSIONING (loop_vinfo));
+ bool skip_vector = ((!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && !LOOP_REQUIRES_VERSIONING (loop_vinfo))
+ || !vf.is_constant ());
/* Epilog loop must be executed if the number of iterations for epilog
loop is known at compile time, otherwise we need to add a check at
the end of vector loop and skip to the end of epilog loop. */
bool skip_epilog = (prolog_peeling < 0
- || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo));
+ || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || !vf.is_constant ());
/* PEELING_FOR_GAPS is special because epilog loop must be executed. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
skip_epilog = false;
needs to be scaled back later. */
basic_block bb_before_loop = loop_preheader_edge (loop)->src;
if (prob_vector.initialized_p ())
- scale_bbs_frequencies (&bb_before_loop, 1, prob_vector);
- scale_loop_profile (loop, prob_vector, bound);
+ {
+ scale_bbs_frequencies (&bb_before_loop, 1, prob_vector);
+ scale_loop_profile (loop, prob_vector, 0);
+ }
}
tree niters_prolog = build_int_cst (type, 0);
scale_bbs_frequencies (&bb_before_epilog, 1, prob_epilog);
}
- scale_loop_profile (epilog, prob_epilog, bound);
+ scale_loop_profile (epilog, prob_epilog, 0);
}
else
slpeel_update_phi_nodes_for_lcssa (epilog);
- bound = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? vf - 1 : vf - 2;
- /* We share epilog loop with scalar version loop. */
- bound = MAX (bound, bound_scalar - 1);
- record_niter_bound (epilog, bound, false, true);
+ unsigned HOST_WIDE_INT bound1, bound2;
+ if (vf.is_constant (&bound1) && bound_scalar.is_constant (&bound2))
+ {
+ bound1 -= LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 2;
+ if (bound2)
+ /* We share epilog loop with scalar version loop. */
+ bound1 = MAX (bound1, bound2 - 1);
+ record_niter_bound (epilog, bound1, false, true);
+ }
delete_update_ssa ();
adjust_vec_debug_stmts ();
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
- unsigned int vectorization_factor = 0;
+ poly_uint64 vectorization_factor = 1;
tree scalar_type = NULL_TREE;
gphi *phi;
tree vectype;
- unsigned int nunits;
stmt_vec_info stmt_info;
unsigned i;
HOST_WIDE_INT dummy;
dump_printf (MSG_NOTE, "\n");
}
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
- nunits);
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
+ TYPE_VECTOR_SUBPARTS (vectype));
- if (!vectorization_factor
- || (nunits > vectorization_factor))
- vectorization_factor = nunits;
+ vect_update_max_nunits (&vectorization_factor, vectype);
}
}
dump_printf (MSG_NOTE, "\n");
}
- nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
- if (!vectorization_factor
- || (nunits > vectorization_factor))
- vectorization_factor = nunits;
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
+ TYPE_VECTOR_SUBPARTS (vf_vectype));
+
+ vect_update_max_nunits (&vectorization_factor, vf_vectype);
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
/* TODO: Analyze cost. Decide if worth while to vectorize. */
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
- vectorization_factor);
- if (vectorization_factor <= 1)
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
+ dump_dec (MSG_NOTE, vectorization_factor);
+ dump_printf (MSG_NOTE, "\n");
+ }
+
+ if (known_le (vectorization_factor, 1U))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
- unsigned int vectorization_factor;
+ poly_uint64 vectorization_factor;
int i;
if (dump_enabled_p ())
"=== vect_update_vf_for_slp ===\n");
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- gcc_assert (vectorization_factor != 0);
+ gcc_assert (known_ne (vectorization_factor, 0U));
/* If all the stmts in the loop can be SLPed, we perform only SLP, and
vectorization factor of the loop is the unrolling factor required by
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains SLP and non-SLP stmts\n");
+ /* Both the vectorization factor and unroll factor have the form
+ current_vector_size * X for some rational X, so they must have
+ a common multiple. */
vectorization_factor
- = least_common_multiple (vectorization_factor,
+ = force_common_multiple (vectorization_factor,
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "Updating vectorization factor to %d\n",
- vectorization_factor);
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Updating vectorization factor to ");
+ dump_dec (MSG_NOTE, vectorization_factor);
+ dump_printf (MSG_NOTE, ".\n");
+ }
}
/* Function vect_analyze_loop_operations.
vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
{
bool ok;
- int max_vf = MAX_VECTORIZATION_FACTOR;
- int min_vf = 2;
+ unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
+ poly_uint64 min_vf = 2;
unsigned int n_stmts = 0;
/* The first group of checks is independent of the vector size. */
ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
if (!ok
- || max_vf < min_vf)
+ || (max_vf != MAX_VECTORIZATION_FACTOR
+ && maybe_lt (max_vf, min_vf)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.\n");
return false;
}
- if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
+ if (max_vf != MAX_VECTORIZATION_FACTOR
+ && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* Compute the scalar iteration cost. */
vect_compute_single_scalar_iteration_cost (loop_vinfo);
- int saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
HOST_WIDE_INT estimated_niter;
unsigned th;
int min_scalar_loop_bound;
start_over:
/* Now the vectorization factor is final. */
- unsigned vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- gcc_assert (vectorization_factor != 0);
+ poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ gcc_assert (known_ne (vectorization_factor, 0U));
+ unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "vectorization_factor = %d, niters = "
- HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
- LOOP_VINFO_INT_NITERS (loop_vinfo));
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vectorization_factor = ");
+ dump_dec (MSG_NOTE, vectorization_factor);
+ dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
+ LOOP_VINFO_INT_NITERS (loop_vinfo));
+ }
HOST_WIDE_INT max_niter
= likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
+ && (LOOP_VINFO_INT_NITERS (loop_vinfo) < assumed_vf))
|| (max_niter != -1
- && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
+ && (unsigned HOST_WIDE_INT) max_niter < assumed_vf))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
- if (wi::to_widest (scalar_niters) < vf)
+ if (known_lt (wi::to_widest (scalar_niters), vf))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
}
min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
- * vectorization_factor);
+ * assumed_vf);
/* Use the cost model only if it is more conservative than user specified
threshold. */
/* Decide whether we need to create an epilogue loop to handle
remaining scalar iterations. */
- th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo)
- / LOOP_VINFO_VECT_FACTOR (loop_vinfo))
- * LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
+ unsigned HOST_WIDE_INT const_vf;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
- if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
- - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
- < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
+ if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
+ - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
+ LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
}
else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
- || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
- < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
- /* In case of versioning, check if the maximum number of
- iterations is greater than th. If they are identical,
- the epilogue is unnecessary. */
+ || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
+ || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
+ < (unsigned) exact_log2 (const_vf))
+ /* In case of versioning, check if the maximum number of
+ iterations is greater than th. If they are identical,
+ the epilogue is unnecessary. */
&& (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
- || (unsigned HOST_WIDE_INT) max_niter > th)))
+ || ((unsigned HOST_WIDE_INT) max_niter
+ > (th / const_vf) * const_vf))))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
/* If an epilogue loop is required make sure we can create one. */
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
}
- gcc_assert (vectorization_factor
- == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+ gcc_assert (known_eq (vectorization_factor,
+ LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
/* Ok to vectorize! */
return true;
stmt_vector_for_cost *epilogue_cost_vec)
{
int retval = 0;
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ int assumed_vf = vect_vf_for_cost (loop_vinfo);
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- *peel_iters_epilogue = vf/2;
+ *peel_iters_epilogue = assumed_vf / 2;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
peel_iters_prologue = niters < peel_iters_prologue ?
niters : peel_iters_prologue;
- *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
+ *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
/* If we need to peel for gaps, but no peeling is required, we have to
peel VF iterations. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
- *peel_iters_epilogue = vf;
+ *peel_iters_epilogue = assumed_vf;
}
stmt_info_for_cost *si;
unsigned vec_epilogue_cost = 0;
int scalar_single_iter_cost = 0;
int scalar_outside_cost = 0;
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ int assumed_vf = vect_vf_for_cost (loop_vinfo);
int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
if (npeel < 0)
{
- peel_iters_prologue = vf/2;
+ peel_iters_prologue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"prologue peel iters set to vf/2.\n");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
- peel_iters_epilogue = vf/2;
+ peel_iters_epilogue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"epilogue peel iters set to vf/2 because "
"peeling for alignment is unknown.\n");
PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
SOC = scalar outside cost for run time cost model check. */
- if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
+ if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
{
if (vec_outside_cost <= 0)
min_profitable_iters = 0;
else
{
- min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
+ min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
+ * assumed_vf
- vec_inside_cost * peel_iters_prologue
- - vec_inside_cost * peel_iters_epilogue)
- / ((scalar_single_iter_cost * vf)
- - vec_inside_cost);
-
- if ((scalar_single_iter_cost * vf * min_profitable_iters)
- <= (((int) vec_inside_cost * min_profitable_iters)
- + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
- min_profitable_iters++;
+ - vec_inside_cost * peel_iters_epilogue)
+ / ((scalar_single_iter_cost * assumed_vf)
+ - vec_inside_cost);
+
+ if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
+ <= (((int) vec_inside_cost * min_profitable_iters)
+ + (((int) vec_outside_cost - scalar_outside_cost)
+ * assumed_vf)))
+ min_profitable_iters++;
}
}
/* vector version will never be profitable. */
"divided by the scalar iteration cost = %d "
"is greater or equal to the vectorization factor = %d"
".\n",
- vec_inside_cost, scalar_single_iter_cost, vf);
+ vec_inside_cost, scalar_single_iter_cost, assumed_vf);
*ret_min_profitable_niters = -1;
*ret_min_profitable_estimate = -1;
return;
min_profitable_iters);
/* We want the vectorized loop to execute at least once. */
- if (min_profitable_iters < (vf + peel_iters_prologue))
- min_profitable_iters = vf + peel_iters_prologue;
+ if (min_profitable_iters < (assumed_vf + peel_iters_prologue))
+ min_profitable_iters = assumed_vf + peel_iters_prologue;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
min_profitable_estimate = 0;
else
{
- min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
+ min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
+ * assumed_vf
- vec_inside_cost * peel_iters_prologue
- vec_inside_cost * peel_iters_epilogue)
- / ((scalar_single_iter_cost * vf)
+ / ((scalar_single_iter_cost * assumed_vf)
- vec_inside_cost);
}
min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
if (slp_node)
/* The size vect_schedule_slp_instance computes is off for us. */
- vec_num = ((LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- * SLP_TREE_SCALAR_STMTS (slp_node).length ())
- / TYPE_VECTOR_SUBPARTS (vectype_in));
+ vec_num = vect_get_num_vectors
+ (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+ * SLP_TREE_SCALAR_STMTS (slp_node).length (),
+ vectype_in);
else
vec_num = 1;
For a loop where we could vectorize the operation indicated by CODE,
return the minimum vectorization factor that makes it worthwhile
to use generic vectors. */
-int
+static unsigned int
vect_min_worthwhile_factor (enum tree_code code)
{
switch (code)
vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ unsigned HOST_WIDE_INT value;
return (loop_vinfo
- && (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- >= vect_min_worthwhile_factor (code)));
+ && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
+ && value >= vect_min_worthwhile_factor (code));
}
/* Function vectorizable_induction
gphi *induction_phi;
tree induc_def, vec_dest;
tree init_expr, step_expr;
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned i;
tree expr;
gimple_seq stmts;
tree niters_vector = NULL_TREE;
tree step_vector = NULL_TREE;
tree niters_vector_mult_vf = NULL_TREE;
- int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ unsigned int lowest_vf = constant_lower_bound (vf);
bool grouped_store;
bool slp_scheduled = false;
gimple *stmt, *pattern_stmt;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool transform_pattern_stmt = false;
bool check_profitability = false;
- int th;
+ unsigned int th;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
/* Use the more conservative vectorization threshold. If the number
of iterations is constant assume the cost check has been performed
by our caller. If the threshold makes all loops profitable that
- run at least the vectorization factor number of times checking
- is pointless, too. */
+ run at least the (estimated) vectorization factor number of times
+ checking is pointless, too. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
- if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+ if (th >= vect_vf_for_cost (loop_vinfo)
&& !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
check_profitability, niters_no_overflow);
if (niters_vector == NULL_TREE)
{
- if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && known_eq (lowest_vf, vf))
{
niters_vector
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
- LOOP_VINFO_INT_NITERS (loop_vinfo) / vf);
+ LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
step_vector = build_one_cst (TREE_TYPE (niters));
}
else
continue;
if (STMT_VINFO_VECTYPE (stmt_info)
- && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
- != (unsigned HOST_WIDE_INT) vf)
+ && (maybe_ne
+ (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
&& dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
= (unsigned int)
TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
- && nunits != (unsigned int) vf
+ && maybe_ne (nunits, vf)
&& dump_enabled_p ())
/* For SLP VF is set according to unrolling factor, and not
to vector size, hence for SLP this print is not valid. */
niters_vector_mult_vf,
!niters_no_overflow);
- scale_profile_for_vect_loop (loop, vf);
+ unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
+ scale_profile_for_vect_loop (loop, assumed_vf);
/* The minimum number of iterations performed by the epilogue. This
is 1 when peeling for gaps because we always need a final scalar
back to latch counts. */
if (loop->any_upper_bound)
loop->nb_iterations_upper_bound
- = wi::udiv_floor (loop->nb_iterations_upper_bound + bias, vf) - 1;
+ = wi::udiv_floor (loop->nb_iterations_upper_bound + bias,
+ lowest_vf) - 1;
if (loop->any_likely_upper_bound)
loop->nb_iterations_likely_upper_bound
- = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias, vf) - 1;
+ = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias,
+ lowest_vf) - 1;
if (loop->any_estimate)
loop->nb_iterations_estimate
- = wi::udiv_floor (loop->nb_iterations_estimate + bias, vf) - 1;
+ = wi::udiv_floor (loop->nb_iterations_estimate + bias,
+ assumed_vf) - 1;
if (dump_enabled_p ())
{
else if (!vector_sizes)
epilogue = NULL;
else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
+ && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
+ && known_eq (vf, lowest_vf))
{
int smallest_vec_size = 1 << ctz_hwi (vector_sizes);
int ratio = current_vector_size / smallest_vec_size;
- int eiters = LOOP_VINFO_INT_NITERS (loop_vinfo)
+ unsigned HOST_WIDE_INT eiters = LOOP_VINFO_INT_NITERS (loop_vinfo)
- LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
- eiters = eiters % vf;
+ eiters = eiters % lowest_vf;
epilogue->nb_iterations_upper_bound = eiters - 1;
- if (eiters < vf / ratio)
+ if (eiters < lowest_vf / ratio)
epilogue = NULL;
}
}
node->load_permutation);
/* We are done, no actual permutations need to be generated. */
- unsigned int unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_instn);
+ poly_uint64 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_instn);
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
/* But we have to keep those permutations that are required because
of handling of gaps. */
- if (unrolling_factor == 1
+ if (known_eq (unrolling_factor, 1U)
|| (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0))
SLP_TREE_LOAD_PERMUTATION (node).release ();
and the vectorization factor is not yet final.
??? The SLP instance unrolling factor might not be the maximum one. */
unsigned n_perms;
- unsigned test_vf
- = least_common_multiple (SLP_INSTANCE_UNROLLING_FACTOR (slp_instn),
+ poly_uint64 test_vf
+ = force_common_multiple (SLP_INSTANCE_UNROLLING_FACTOR (slp_instn),
LOOP_VINFO_VECT_FACTOR
- (STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt))));
+ (STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt))));
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
if (node->load_permutation.exists ()
&& !vect_transform_slp_perm_load (node, vNULL, NULL, test_vf,
gcc_assert (ncopies_for_cost
<= (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info)
+ nunits - 1) / nunits);
- ncopies_for_cost *= SLP_INSTANCE_UNROLLING_FACTOR (instance);
+ poly_uint64 uf = SLP_INSTANCE_UNROLLING_FACTOR (instance);
+ ncopies_for_cost *= estimated_poly_value (uf);
}
/* Record the cost for the vector loads. */
vect_model_load_cost (stmt_info, ncopies_for_cost,
unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
- /* Adjust the group_size by the vectorization factor which is always one
- for basic-block vectorization. */
+ /* Get the estimated vectorization factor, which is always one for
+ basic-block vectorization. */
+ unsigned int assumed_vf;
if (STMT_VINFO_LOOP_VINFO (stmt_info))
- group_size *= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info));
+ assumed_vf = vect_vf_for_cost (STMT_VINFO_LOOP_VINFO (stmt_info));
+ else
+ assumed_vf = 1;
unsigned nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
/* For reductions look at a reduction operand in case the reduction
operation is widening like DOT_PROD or SAD. */
default:;
}
}
- ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
+ ncopies_for_cost = least_common_multiple (nunits,
+ group_size * assumed_vf) / nunits;
prologue_cost_vec.create (10);
body_cost_vec.create (10);
slp_instance new_instance;
slp_tree node;
unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
- unsigned int unrolling_factor = 1, nunits;
+ unsigned int nunits;
tree vectype, scalar_type = NULL_TREE;
gimple *next;
unsigned int i;
if (node != NULL)
{
/* Calculate the unrolling factor based on the smallest type. */
- unrolling_factor
+ poly_uint64 unrolling_factor
= least_common_multiple (max_nunits, group_size) / group_size;
- if (unrolling_factor != 1
+ if (maybe_ne (unrolling_factor, 1U)
&& is_a <bb_vec_info> (vinfo))
{
/* The load requires permutation when unrolling exposes
a gap either because the group is larger than the SLP
group-size or because there is a gap between the groups. */
- && (unrolling_factor == 1
+ && (known_eq (unrolling_factor, 1U)
|| (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
{
bool
vect_make_slp_decision (loop_vec_info loop_vinfo)
{
- unsigned int i, unrolling_factor = 1;
+ unsigned int i;
+ poly_uint64 unrolling_factor = 1;
vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
int decided_to_slp = 0;
FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
/* FORNOW: SLP if you can. */
- if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
- unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
+ /* All unroll factors have the form current_vector_size * X for some
+ rational X, so they must have a common multiple. */
+ unrolling_factor
+ = force_common_multiple (unrolling_factor,
+ SLP_INSTANCE_UNROLLING_FACTOR (instance));
/* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
if (decided_to_slp && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "Decided to SLP %d instances. Unrolling factor %d\n",
- decided_to_slp, unrolling_factor);
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Decided to SLP %d instances. Unrolling factor ",
+ decided_to_slp);
+ dump_dec (MSG_NOTE, unrolling_factor);
+ dump_printf (MSG_NOTE, "\n");
+ }
return (decided_to_slp > 0);
}
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
else
{
- int vf;
+ poly_uint64 vf;
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
vf = loop_vinfo->vectorization_factor;
else
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (node_instance);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
SLP_TREE_NUMBER_OF_VEC_STMTS (node)
- = vf * group_size / TYPE_VECTOR_SUBPARTS (vectype);
+ = vect_get_num_vectors (vf * group_size, vectype);
}
/* Push SLP node def-type to stmt operands. */
bb_vec_info bb_vinfo;
slp_instance instance;
int i;
- int min_vf = 2;
+ poly_uint64 min_vf = 2;
/* The first group of checks is independent of the vector size. */
fatal = true;
bool
vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
- gimple_stmt_iterator *gsi, int vf,
- slp_instance slp_node_instance, bool analyze_only,
+ gimple_stmt_iterator *gsi, poly_uint64 vf,
+ slp_instance slp_node_instance, bool analyze_only,
unsigned *n_perms)
{
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
int mask_element;
machine_mode mode;
+ unsigned HOST_WIDE_INT const_vf;
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
mode = TYPE_MODE (vectype);
+ /* At the moment, all permutations are represented using per-element
+ indices, so we can't cope with variable vectorization factors. */
+ if (!vf.is_constant (&const_vf))
+ return false;
+
/* The generic VEC_PERM_EXPR code always uses an integral type of the
same size as the vector element being permuted. */
mask_element_type = lang_hooks.types.type_for_mode
bool noop_p = true;
*n_perms = 0;
- for (int j = 0; j < vf; j++)
+ for (unsigned int j = 0; j < const_vf; j++)
{
for (int k = 0; k < group_size; k++)
{
arginfo.quick_push (thisarginfo);
}
+ unsigned HOST_WIDE_INT vf;
+ if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not considering SIMD clones; not yet supported"
+ " for variable-width vectors.\n");
+ return NULL;
+ }
+
unsigned int badness = 0;
struct cgraph_node *bestn = NULL;
if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
n = n->simdclone->next_clone)
{
unsigned int this_badness = 0;
- if (n->simdclone->simdlen
- > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+ if (n->simdclone->simdlen > vf
|| n->simdclone->nargs != nargs)
continue;
- if (n->simdclone->simdlen
- < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
- this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
+ if (n->simdclone->simdlen < vf)
+ this_badness += (exact_log2 (vf)
- exact_log2 (n->simdclone->simdlen)) * 1024;
if (n->simdclone->inbranch)
this_badness += 2048;
fndecl = bestn->decl;
nunits = bestn->simdclone->simdlen;
- ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
+ ncopies = vf / nunits;
/* If the function isn't const, only allow it in simd loops where user
has asserted that at least nunits consecutive iterations can be
gather_scatter_info gs_info;
enum vect_def_type scatter_src_dt = vect_unknown_def_type;
gimple *new_stmt;
- int vf;
+ poly_uint64 vf;
vec_load_store_type vls_type;
tree ref_type;
tree dataref_offset = NULL_TREE;
gimple *ptr_incr = NULL;
int ncopies;
- int i, j, group_size, group_gap_adj;
+ int i, j, group_size;
+ poly_int64 group_gap_adj;
tree msq = NULL_TREE, lsq;
tree offset = NULL_TREE;
tree byte_offset = NULL_TREE;
bool slp_perm = false;
enum tree_code code;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- int vf;
+ poly_uint64 vf;
tree aggr_type;
gather_scatter_info gs_info;
vec_info *vinfo = stmt_info->vinfo;
on the unrolled body effectively re-orders stmts. */
if (ncopies > 1
&& STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
- && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
+ && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
+ STMT_VINFO_MIN_NEG_DIST (stmt_info)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
on the unrolled body effectively re-orders stmts. */
if (!PURE_SLP_STMT (stmt_info)
&& STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
- && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
+ && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
+ STMT_VINFO_MIN_NEG_DIST (stmt_info)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
fits in. */
if (slp_perm)
{
- ncopies = (group_size * vf + nunits - 1) / nunits;
+ /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
+ variable VF. */
+ unsigned int const_vf = vf.to_constant ();
+ ncopies = (group_size * const_vf + nunits - 1) / nunits;
dr_chain.create (ncopies);
}
else
fits in. */
if (slp_perm)
{
- vec_num = (group_size * vf + nunits - 1) / nunits;
+ /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
+ variable VF. */
+ unsigned int const_vf = vf.to_constant ();
+ vec_num = (group_size * const_vf + nunits - 1) / nunits;
group_gap_adj = vf * group_size - nunits * vec_num;
}
else
we need to skip the gaps after we manage to fully load
all elements. group_gap_adj is GROUP_SIZE here. */
group_elt += nunits;
- if (group_gap_adj != 0 && ! slp_perm
- && group_elt == group_size - group_gap_adj)
+ if (maybe_ne (group_gap_adj, 0U)
+ && !slp_perm
+ && known_eq (group_elt, group_size - group_gap_adj))
{
- wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
- * group_gap_adj);
+ poly_wide_int bump_val
+ = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
}
/* Bump the vector pointer to account for a gap or for excess
elements loaded for a permuted SLP load. */
- if (group_gap_adj != 0 && slp_perm)
+ if (maybe_ne (group_gap_adj, 0U) && slp_perm)
{
- wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
- * group_gap_adj);
+ poly_wide_int bump_val
+ = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
{
unsigned int simduid;
- int vf;
+ poly_uint64 vf;
/* hash_table support. */
static inline hashval_t hash (const simduid_to_vf *);
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
{
- unsigned int vf = 1;
+ poly_uint64 vf = 1;
enum internal_fn ifn;
gimple *stmt = gsi_stmt (i);
tree t;
if ((*iter)->simduid != -1U)
{
tree decl = (*iter)->decl;
- int vf = 1;
+ poly_uint64 vf = 1;
if (simduid_to_vf_htab)
{
simduid_to_vf *p = NULL, data;
unsigned int group_size;
/* The unrolling factor required to vectorized this SLP instance. */
- unsigned int unrolling_factor;
+ poly_uint64 unrolling_factor;
/* The group of nodes that contain loads of this SLP instance. */
vec<slp_tree> loads;
poly_uint64 versioning_threshold;
/* Unrolling factor */
- int vectorization_factor;
+ poly_uint64 vectorization_factor;
/* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
if there is no particular limit. */
/* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
- unsigned slp_unrolling_factor;
+ poly_uint64 slp_unrolling_factor;
/* Cost of a single scalar iteration. */
int single_scalar_iteration_cost;
conversion. */
#define MAX_INTERM_CVT_STEPS 3
-/* The maximum vectorization factor supported by any target (V64QI). */
-#define MAX_VECTORIZATION_FACTOR 64
+#define MAX_VECTORIZATION_FACTOR INT_MAX
/* Nonzero if TYPE represents a (scalar) boolean type or type
in the middle-end compatible with it (unsigned precision 1 integral
return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
}
+/* Return the number of vectors of type VECTYPE that are needed to get
+ NUNITS elements. NUNITS should be based on the vectorization factor,
+ so it is always a known multiple of the number of elements in VECTYPE. */
+
+static inline unsigned int
+vect_get_num_vectors (poly_uint64 nunits, tree vectype)
+{
+ return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
+}
+
/* Return the number of copies needed for loop vectorization when
a statement operates on vectors of type VECTYPE. This is the
vectorization factor divided by the number of elements in
static inline unsigned int
vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
{
- gcc_checking_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- % TYPE_VECTOR_SUBPARTS (vectype) == 0);
- return (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- / TYPE_VECTOR_SUBPARTS (vectype));
+ return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
+}
+
+/* Update maximum unit count *MAX_NUNITS so that it accounts for
+ the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
+ if we haven't yet recorded any vector types. */
+
+static inline void
+vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
+{
+ /* All unit counts have the form current_vector_size * X for some
+ rational X, so two unit sizes must have a common multiple.
+ Everything is a multiple of the initial value of 1. */
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ *max_nunits = force_common_multiple (*max_nunits, nunits);
+}
+
+/* Return the vectorization factor that should be used for costing
+ purposes while vectorizing the loop described by LOOP_VINFO.
+ Pick a reasonable estimate if the vectorization factor isn't
+ known at compile time. */
+
+static inline unsigned int
+vect_vf_for_cost (loop_vec_info loop_vinfo)
+{
+ return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
}
/* Return the size of the value accessed by unvectorized data reference DR.
(struct data_reference *, bool);
extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
HOST_WIDE_INT *);
-extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
+extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
extern bool vect_slp_analyze_instance_dependence (slp_instance);
extern bool vect_enhance_data_refs_alignment (loop_vec_info);
extern bool vect_analyze_data_refs_alignment (loop_vec_info);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern bool vect_check_gather_scatter (gimple *, loop_vec_info,
gather_scatter_info *);
-extern bool vect_analyze_data_refs (vec_info *, int *);
+extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *);
extern void vect_record_base_alignments (vec_info *);
extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
tree *, gimple_stmt_iterator *,
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance);
extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
- gimple_stmt_iterator *, int,
- slp_instance, bool, unsigned *);
+ gimple_stmt_iterator *, poly_uint64,
+ slp_instance, bool, unsigned *);
extern bool vect_slp_analyze_operations (vec_info *);
extern bool vect_schedule_slp (vec_info *);
extern bool vect_analyze_slp (vec_info *, unsigned);