return word_mode;
}
-static HOST_WIDE_INT aarch64_estimated_poly_value (poly_int64);
-
/* Compare an SVE mode SVE_M and an Advanced SIMD mode ASIMD_M
and return whether the SVE mode should be preferred over the
Advanced SIMD one in aarch64_autovectorize_vector_modes. */
return maybe_gt (nunits_sve, nunits_asimd);
/* Otherwise estimate the runtime width of the modes involved. */
- HOST_WIDE_INT est_sve = aarch64_estimated_poly_value (nunits_sve);
- HOST_WIDE_INT est_asimd = aarch64_estimated_poly_value (nunits_asimd);
+ HOST_WIDE_INT est_sve = estimated_poly_value (nunits_sve);
+ HOST_WIDE_INT est_asimd = estimated_poly_value (nunits_asimd);
/* Preferring SVE means picking it first unless the Advanced SIMD mode
is clearly wider. */
/* Implement TARGET_ESTIMATED_POLY_VALUE.
Look into the tuning structure for an estimate.
- VAL.coeffs[1] is multiplied by the number of VQ chunks over the initial
- Advanced SIMD 128 bits. */
+ KIND specifies the type of requested estimate: min, max or likely.
+ For cores with a known SVE width all three estimates are the same.
+ For generic SVE tuning we want to distinguish the maximum estimate from
+ the minimum and likely ones.
+ The likely estimate is the same as the minimum in that case to give a
+ conservative behavior of auto-vectorizing with SVE when it is a win
+ even for 128-bit SVE.
+ When SVE width information is available VAL.coeffs[1] is multiplied by
+ the number of VQ chunks over the initial Advanced SIMD 128 bits. */
static HOST_WIDE_INT
-aarch64_estimated_poly_value (poly_int64 val)
+aarch64_estimated_poly_value (poly_int64 val,
+ poly_value_estimate_kind kind
+ = POLY_VALUE_LIKELY)
{
enum aarch64_sve_vector_bits_enum width_source
= aarch64_tune_params.sve_width;
- /* If we still don't have an estimate, use the default. */
+ /* If there is no core-specific information then the minimum and likely
+ values are based on 128-bit vectors and the maximum is based on
+ the architectural maximum of 2048 bits. */
if (width_source == SVE_SCALABLE)
- return default_estimated_poly_value (val);
+ switch (kind)
+ {
+ case POLY_VALUE_MIN:
+ case POLY_VALUE_LIKELY:
+ return val.coeffs[0];
+ case POLY_VALUE_MAX:
+ return val.coeffs[0] + val.coeffs[1] * 15;
+ }
+ /* If the core provides width information, use that. */
HOST_WIDE_INT over_128 = width_source - 128;
return val.coeffs[0] + val.coeffs[1] * over_128 / 128;
}
as the delay slot can hide a pipeline bubble.
@end deftypefn
-@deftypefn {Target Hook} HOST_WIDE_INT TARGET_ESTIMATED_POLY_VALUE (poly_int64 @var{val})
+@deftypefn {Target Hook} HOST_WIDE_INT TARGET_ESTIMATED_POLY_VALUE (poly_int64 @var{val}, poly_value_estimate_kind @var{kind})
Return an estimate of the runtime value of @var{val}, for use in
-things like cost calculations or profiling frequencies. The default
+things like cost calculations or profiling frequencies. @var{kind} is used
+to ask for the minimum, maximum, and likely estimates of the value through
+the @code{POLY_VALUE_MIN}, @code{POLY_VALUE_MAX} and
+@code{POLY_VALUE_LIKELY} values. The default
implementation returns the lowest possible value of @var{val}.
@end deftypefn
DEFHOOK
(estimated_poly_value,
"Return an estimate of the runtime value of @var{val}, for use in\n\
-things like cost calculations or profiling frequencies. The default\n\
+things like cost calculations or profiling frequencies. @var{kind} is used\n\
+to ask for the minimum, maximum, and likely estimates of the value through\n\
+the @code{POLY_VALUE_MIN}, @code{POLY_VALUE_MAX} and\n\
+@code{POLY_VALUE_LIKELY} values. The default\n\
implementation returns the lowest possible value of @var{val}.",
- HOST_WIDE_INT, (poly_int64 val),
+ HOST_WIDE_INT, (poly_int64 val, poly_value_estimate_kind kind),
default_estimated_poly_value)
/* Permit speculative instructions in delay slots during delayed-branch
TCTX_CAPTURE_BY_COPY
};
+enum poly_value_estimate_kind
+{
+ POLY_VALUE_MIN,
+ POLY_VALUE_MAX,
+ POLY_VALUE_LIKELY
+};
+
extern bool verify_type_context (location_t, type_context_kind, const_tree,
bool = false);
provides a rough guess. */
static inline HOST_WIDE_INT
-estimated_poly_value (poly_int64 x)
+estimated_poly_value (poly_int64 x,
+ poly_value_estimate_kind kind = POLY_VALUE_LIKELY)
{
if (NUM_POLY_INT_COEFFS == 1)
return x.coeffs[0];
else
- return targetm.estimated_poly_value (x);
+ return targetm.estimated_poly_value (x, kind);
}
#ifdef GCC_TM_H
/* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */
HOST_WIDE_INT
-default_estimated_poly_value (poly_int64 x)
+default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
{
return x.coeffs[0];
}
/* Check whether the (fractional) cost per scalar iteration is lower
or higher: new_inside_cost / new_vf vs. old_inside_cost / old_vf. */
- poly_widest_int rel_new = (new_loop_vinfo->vec_inside_cost
- * poly_widest_int (old_vf));
- poly_widest_int rel_old = (old_loop_vinfo->vec_inside_cost
- * poly_widest_int (new_vf));
- if (maybe_lt (rel_old, rel_new))
- {
- /* When old_loop_vinfo uses a variable vectorization factor,
- we know that it has a lower cost for at least one runtime VF.
- However, we don't know how likely that VF is.
-
- One option would be to compare the costs for the estimated VFs.
- The problem is that that can put too much pressure on the cost
- model. E.g. if the estimated VF is also the lowest possible VF,
- and if old_loop_vinfo is 1 unit worse than new_loop_vinfo
- for the estimated VF, we'd then choose new_loop_vinfo even
- though (a) new_loop_vinfo might not actually be better than
- old_loop_vinfo for that VF and (b) it would be significantly
- worse at larger VFs.
-
- Here we go for a hacky compromise: pick new_loop_vinfo if it is
- no more expensive than old_loop_vinfo even after doubling the
- estimated old_loop_vinfo VF. For all but trivial loops, this
- ensures that we only pick new_loop_vinfo if it is significantly
- better than old_loop_vinfo at the estimated VF. */
- if (rel_new.is_constant ())
- return false;
-
- HOST_WIDE_INT new_estimated_vf = estimated_poly_value (new_vf);
- HOST_WIDE_INT old_estimated_vf = estimated_poly_value (old_vf);
- widest_int estimated_rel_new = (new_loop_vinfo->vec_inside_cost
- * widest_int (old_estimated_vf));
- widest_int estimated_rel_old = (old_loop_vinfo->vec_inside_cost
- * widest_int (new_estimated_vf));
- return estimated_rel_new * 2 <= estimated_rel_old;
- }
- if (known_lt (rel_new, rel_old))
+ poly_int64 rel_new = new_loop_vinfo->vec_inside_cost * old_vf;
+ poly_int64 rel_old = old_loop_vinfo->vec_inside_cost * new_vf;
+
+ HOST_WIDE_INT est_rel_new_min
+ = estimated_poly_value (rel_new, POLY_VALUE_MIN);
+ HOST_WIDE_INT est_rel_new_max
+ = estimated_poly_value (rel_new, POLY_VALUE_MAX);
+
+ HOST_WIDE_INT est_rel_old_min
+ = estimated_poly_value (rel_old, POLY_VALUE_MIN);
+ HOST_WIDE_INT est_rel_old_max
+ = estimated_poly_value (rel_old, POLY_VALUE_MAX);
+
+ /* Check first if we can make out an unambigous total order from the minimum
+ and maximum estimates. */
+ if (est_rel_new_min < est_rel_old_min
+ && est_rel_new_max < est_rel_old_max)
return true;
+ else if (est_rel_old_min < est_rel_new_min
+ && est_rel_old_max < est_rel_new_max)
+ return false;
+ /* When old_loop_vinfo uses a variable vectorization factor,
+ we know that it has a lower cost for at least one runtime VF.
+ However, we don't know how likely that VF is.
+
+ One option would be to compare the costs for the estimated VFs.
+ The problem is that that can put too much pressure on the cost
+ model. E.g. if the estimated VF is also the lowest possible VF,
+ and if old_loop_vinfo is 1 unit worse than new_loop_vinfo
+ for the estimated VF, we'd then choose new_loop_vinfo even
+ though (a) new_loop_vinfo might not actually be better than
+ old_loop_vinfo for that VF and (b) it would be significantly
+ worse at larger VFs.
+
+ Here we go for a hacky compromise: pick new_loop_vinfo if it is
+ no more expensive than old_loop_vinfo even after doubling the
+ estimated old_loop_vinfo VF. For all but trivial loops, this
+ ensures that we only pick new_loop_vinfo if it is significantly
+ better than old_loop_vinfo at the estimated VF. */
+
+ if (est_rel_old_min != est_rel_new_min
+ || est_rel_old_max != est_rel_new_max)
+ {
+ HOST_WIDE_INT est_rel_new_likely
+ = estimated_poly_value (rel_new, POLY_VALUE_LIKELY);
+ HOST_WIDE_INT est_rel_old_likely
+ = estimated_poly_value (rel_old, POLY_VALUE_LIKELY);
+
+ return est_rel_new_likely * 2 <= est_rel_old_likely;
+ }
/* If there's nothing to choose between the loop bodies, see whether
there's a difference in the prologue and epilogue costs. */