+2019-11-16 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-vectorizer.h (vect_get_vector_types_for_stmt): Take an
+ optional maximum nunits.
+ (get_vectype_for_scalar_type): Likewise. Also declare a form that
+ takes an slp_tree.
+ (get_mask_type_for_scalar_type): Take an optional slp_tree.
+ (vect_get_mask_type_for_stmt): Likewise.
+ * tree-vect-data-refs.c (vect_analyze_data_refs): Don't store
+ the vector type in STMT_VINFO_VECTYPE for BB vectorization.
+ * tree-vect-patterns.c (vect_recog_bool_pattern): Use
+ vect_get_vector_types_for_stmt instead of STMT_VINFO_VECTYPE
+ to get an assumed vector type for data references.
+ * tree-vect-slp.c (vect_update_shared_vectype): New function.
+ (vect_update_all_shared_vectypes): Likewise.
+ (vect_build_slp_tree_1): Pass the group size to
+ vect_get_vector_types_for_stmt. Use vect_update_shared_vectype
+ for BB vectorization.
+ (vect_build_slp_tree_2): Call vect_update_all_shared_vectypes
+ before building the vectof from scalars.
+ (vect_analyze_slp_instance): Pass the group size to
+ get_vectype_for_scalar_type.
+ (vect_slp_analyze_node_operations_1): Don't recompute the vector
+ types for BB vectorization here; just handle the case in which
+ we deferred the choice for booleans.
+ (vect_get_constant_vectors): Pass the slp_tree to
+ get_vectype_for_scalar_type.
+ * tree-vect-stmts.c (vect_prologue_cost_for_slp_op): Likewise.
+ (vectorizable_call): Likewise.
+ (vectorizable_simd_clone_call): Likewise.
+ (vectorizable_conversion): Likewise.
+ (vectorizable_shift): Likewise.
+ (vectorizable_operation): Likewise.
+ (vectorizable_comparison): Likewise.
+ (vect_is_simple_cond): Take the slp_tree as argument and
+ pass it to get_vectype_for_scalar_type.
+ (vectorizable_condition): Update call accordingly.
+ (get_vectype_for_scalar_type): Take a group_size argument.
+ For BB vectorization, limit the the vector to that number
+ of elements. Also define an overload that takes an slp_tree.
+ (get_mask_type_for_scalar_type): Add an slp_tree argument and
+ pass it to get_vectype_for_scalar_type.
+ (vect_get_vector_types_for_stmt): Add a group_size argument
+ and pass it to get_vectype_for_scalar_type. Don't use the
+ cached vector type for BB vectorization if a group size is given.
+ Handle data references in that case.
+ (vect_get_mask_type_for_stmt): Take an slp_tree argument and
+ pass it to get_mask_type_for_scalar_type.
+
2019-11-15 Jan Hubicka <hubicka@ucw.cz>
* ipa-inline.h (do_estimate_edge_time): Add nonspec_time
+2019-11-16 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.dg/vect/bb-slp-4.c: Expect the block to be vectorized
+ with -fno-vect-cost-model.
+ * gcc.dg/vect/bb-slp-bool-1.c: New test.
+ * gcc.target/aarch64/vect_mixed_sizes_14.c: Likewise.
+ * gcc.target/i386/pr84101.c: XFAIL for -m32.
+
2019-11-15 Joseph Myers <joseph@codesourcery.com>
* gcc.dg/c2x-attr-deprecated-4.c, gcc.dg/c2x-attr-fallthrough-4.c,
return 0;
}
-/* { dg-final { scan-tree-dump-times "basic block vectorized" 0 "slp2" } } */
-
+/* { dg-final { scan-tree-dump-times "basic block vectorized" 1 "slp2" } } */
--- /dev/null
+#include "tree-vect.h"
+
+void __attribute__ ((noipa))
+f1 (_Bool *x, unsigned short *y)
+{
+ x[0] = (y[0] == 1);
+ x[1] = (y[1] == 1);
+}
+
+void __attribute__ ((noipa))
+f2 (_Bool *x, unsigned short *y)
+{
+ x[0] = (y[0] == 1);
+ x[1] = (y[1] == 1);
+ x[2] = (y[2] == 1);
+ x[3] = (y[3] == 1);
+ x[4] = (y[4] == 1);
+ x[5] = (y[5] == 1);
+ x[6] = (y[6] == 1);
+ x[7] = (y[7] == 1);
+}
+
+_Bool x[8];
+unsigned short y[8] = { 11, 1, 9, 5, 1, 44, 1, 1 };
+
+int
+main (void)
+{
+ check_vect ();
+
+ f1 (x, y);
+
+ if (x[0] || !x[1])
+ __builtin_abort ();
+
+ x[1] = 0;
+
+ f2 (x, y);
+
+ if (x[0] || !x[1] || x[2] | x[3] || !x[4] || x[5] || !x[6] || !x[7])
+ __builtin_abort ();
+
+ return 0;
+}
--- /dev/null
+/* { dg-options "-O2 -ftree-vectorize" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+** foo:
+** (
+** ldr d([0-9]+), \[x1\]
+** ldr q([0-9]+), \[x0\]
+** saddw v([0-9]+)\.4s, v\2\.4s, v\1\.4h
+** str q\3, \[x0\]
+** |
+** ldr q([0-9]+), \[x0\]
+** ldr d([0-9]+), \[x1\]
+** saddw v([0-9]+)\.4s, v\4\.4s, v\5\.4h
+** str q\6, \[x0\]
+** )
+** ret
+*/
+void
+foo (int *x, short *y)
+{
+ x[0] += y[0];
+ x[1] += y[1];
+ x[2] += y[2];
+ x[3] += y[3];
+}
return p ;
}
-/* { dg-final { scan-tree-dump-not "basic block vectorized" "slp2" } } */
+/* See PR92266 for the XFAIL. */
+/* { dg-final { scan-tree-dump-not "basic block vectorized" "slp2" { xfail ilp32 } } } */
/* Set vectype for STMT. */
scalar_type = TREE_TYPE (DR_REF (dr));
- STMT_VINFO_VECTYPE (stmt_info)
- = get_vectype_for_scalar_type (vinfo, scalar_type);
- if (!STMT_VINFO_VECTYPE (stmt_info))
+ tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
+ if (!vectype)
{
if (dump_enabled_p ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"got vectype for stmt: %G%T\n",
- stmt_info->stmt, STMT_VINFO_VECTYPE (stmt_info));
+ stmt_info->stmt, vectype);
}
/* Adjust the minimal vectorization factor according to the
vector type. */
- vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
+ vf = TYPE_VECTOR_SUBPARTS (vectype);
*min_vf = upper_bound (*min_vf, vf);
+ /* Leave the BB vectorizer to pick the vector type later, based on
+ the final dataref group size and SLP node size. */
+ if (is_a <loop_vec_info> (vinfo))
+ STMT_VINFO_VECTYPE (stmt_info) = vectype;
+
if (gatherscatter != SG_NONE)
{
gather_scatter_info gs_info;
&& STMT_VINFO_DATA_REF (stmt_vinfo))
{
stmt_vec_info pattern_stmt_info;
- vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
- gcc_assert (vectype != NULL_TREE);
- if (!VECTOR_MODE_P (TYPE_MODE (vectype)))
+ tree nunits_vectype;
+ if (!vect_get_vector_types_for_stmt (stmt_vinfo, &vectype,
+ &nunits_vectype)
+ || !VECTOR_MODE_P (TYPE_MODE (vectype)))
return NULL;
if (check_bool_pattern (var, vinfo, bool_stmts))
return 0;
}
+/* Try to assign vector type VECTYPE to STMT_INFO for BB vectorization.
+ Return true if we can, meaning that this choice doesn't conflict with
+ existing SLP nodes that use STMT_INFO. */
+
+static bool
+vect_update_shared_vectype (stmt_vec_info stmt_info, tree vectype)
+{
+ tree old_vectype = STMT_VINFO_VECTYPE (stmt_info);
+ if (old_vectype && useless_type_conversion_p (vectype, old_vectype))
+ return true;
+
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
+ && DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
+ {
+ /* We maintain the invariant that if any statement in the group is
+ used, all other members of the group have the same vector type. */
+ stmt_vec_info first_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ stmt_vec_info member_info = first_info;
+ for (; member_info; member_info = DR_GROUP_NEXT_ELEMENT (member_info))
+ if (STMT_VINFO_NUM_SLP_USES (member_info) > 0
+ || is_pattern_stmt_p (member_info))
+ break;
+
+ if (!member_info)
+ {
+ for (member_info = first_info; member_info;
+ member_info = DR_GROUP_NEXT_ELEMENT (member_info))
+ STMT_VINFO_VECTYPE (member_info) = vectype;
+ return true;
+ }
+ }
+ else if (STMT_VINFO_NUM_SLP_USES (stmt_info) == 0
+ && !is_pattern_stmt_p (stmt_info))
+ {
+ STMT_VINFO_VECTYPE (stmt_info) = vectype;
+ return true;
+ }
+
+ if (dump_enabled_p ())
+ {
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: incompatible vector"
+ " types for: %G", stmt_info->stmt);
+ dump_printf_loc (MSG_NOTE, vect_location,
+ " old vector type: %T\n", old_vectype);
+ dump_printf_loc (MSG_NOTE, vect_location,
+ " new vector type: %T\n", vectype);
+ }
+ return false;
+}
+
+/* Try to infer and assign a vector type to all the statements in STMTS.
+ Used only for BB vectorization. */
+
+static bool
+vect_update_all_shared_vectypes (vec<stmt_vec_info> stmts)
+{
+ tree vectype, nunits_vectype;
+ if (!vect_get_vector_types_for_stmt (stmts[0], &vectype,
+ &nunits_vectype, stmts.length ()))
+ return false;
+
+ stmt_vec_info stmt_info;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (stmts, i, stmt_info)
+ if (!vect_update_shared_vectype (stmt_info, vectype))
+ return false;
+
+ return true;
+}
+
/* Return true if call statements CALL1 and CALL2 are similar enough
to be combined into the same SLP group. */
stmt_vec_info stmt_info;
FOR_EACH_VEC_ELT (stmts, i, stmt_info)
{
+ vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = stmt_info->stmt;
swap[i] = 0;
matches[i] = false;
tree nunits_vectype;
if (!vect_get_vector_types_for_stmt (stmt_info, &vectype,
- &nunits_vectype)
+ &nunits_vectype, group_size)
|| (nunits_vectype
&& !vect_record_max_nunits (stmt_info, group_size,
nunits_vectype, max_nunits)))
gcc_assert (vectype);
+ if (is_a <bb_vec_info> (vinfo)
+ && !vect_update_shared_vectype (stmt_info, vectype))
+ continue;
+
if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
{
rhs_code = CALL_EXPR;
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
if (SLP_TREE_DEF_TYPE (grandchild) != vect_external_def)
break;
- if (!grandchild)
+ if (!grandchild
+ && vect_update_all_shared_vectypes (oprnd_info->def_stmts))
{
/* Roll back. */
this_tree_size = old_tree_size;
do extra work to cancel the pattern so the uses see the
scalar version. */
&& !is_pattern_stmt_p (stmt_info)
- && !oprnd_info->any_pattern)
+ && !oprnd_info->any_pattern
+ && vect_update_all_shared_vectypes (oprnd_info->def_stmts))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
if (SLP_TREE_DEF_TYPE (grandchild) != vect_external_def)
break;
- if (!grandchild)
+ if (!grandchild
+ && (vect_update_all_shared_vectypes
+ (oprnd_info->def_stmts)))
{
/* Roll back. */
this_tree_size = old_tree_size;
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
scalar_type = TREE_TYPE (DR_REF (dr));
- vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
group_size = DR_GROUP_SIZE (stmt_info);
+ vectype = get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
}
else if (!dr && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
{
Memory accesses already got their vector type assigned
in vect_analyze_data_refs. */
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- if (bb_vinfo
- && ! STMT_VINFO_DATA_REF (stmt_info))
+ if (bb_vinfo && STMT_VINFO_VECTYPE (stmt_info) == boolean_type_node)
{
- tree vectype, nunits_vectype;
- if (!vect_get_vector_types_for_stmt (stmt_info, &vectype,
- &nunits_vectype))
- /* We checked this when building the node. */
- gcc_unreachable ();
- if (vectype == boolean_type_node)
- {
- vectype = vect_get_mask_type_for_stmt (stmt_info);
- if (!vectype)
- /* vect_get_mask_type_for_stmt has already explained the
- failure. */
- return false;
- }
+ tree vectype = vect_get_mask_type_for_stmt (stmt_info, node);
+ if (!vectype)
+ /* vect_get_mask_type_for_stmt has already explained the
+ failure. */
+ return false;
stmt_vec_info sstmt_info;
unsigned int i;
&& vect_mask_constant_operand_p (stmt_vinfo))
vector_type = truth_type_for (stmt_vectype);
else
- vector_type = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op));
+ vector_type = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), op_node);
/* ??? For lane-reducing ops we should also have the required number
of vector stmts initialized rather than second-guessing here. */
/* Without looking at the actual initializer a vector of
constants can be implemented as load from the constant pool.
When all elements are the same we can use a splat. */
- tree vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op));
+ tree vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), node);
unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
unsigned num_vects_to_check;
unsigned HOST_WIDE_INT const_nunits;
/* If all arguments are external or constant defs, infer the vector type
from the scalar type. */
if (!vectype_in)
- vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type);
+ vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type, slp_node);
if (vec_stmt)
gcc_assert (vectype_in);
if (!vectype_in)
&& bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
tree arg_type = TREE_TYPE (gimple_call_arg (stmt, i));
- arginfo[i].vectype = get_vectype_for_scalar_type (vinfo, arg_type);
+ arginfo[i].vectype = get_vectype_for_scalar_type (vinfo, arg_type,
+ slp_node);
if (arginfo[i].vectype == NULL
|| (simd_clone_subparts (arginfo[i].vectype)
> bestn->simdclone->simdlen))
/* If op0 is an external or constant def, infer the vector type
from the scalar type. */
if (!vectype_in)
- vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type);
+ vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type, slp_node);
if (vec_stmt)
gcc_assert (vectype_in);
if (!vectype_in)
/* If op0 is an external or constant def, infer the vector type
from the scalar type. */
if (!vectype)
- vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0));
+ vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0), slp_node);
if (vec_stmt)
gcc_assert (vectype);
if (!vectype)
"vector/vector shift/rotate found.\n");
if (!op1_vectype)
- op1_vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op1));
+ op1_vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op1),
+ slp_node);
incompatible_op1_vectype_p
= (op1_vectype == NULL_TREE
|| maybe_ne (TYPE_VECTOR_SUBPARTS (op1_vectype),
vectype = vectype_out;
}
else
- vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0));
+ vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0),
+ slp_node);
}
if (vec_stmt)
gcc_assert (vectype);
condition operands are supportable using vec_is_simple_use. */
static bool
-vect_is_simple_cond (tree cond, vec_info *vinfo,
+vect_is_simple_cond (tree cond, vec_info *vinfo, slp_tree slp_node,
tree *comp_vectype, enum vect_def_type *dts,
tree vectype)
{
scalar_type = build_nonstandard_integer_type
(tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
TYPE_UNSIGNED (scalar_type));
- *comp_vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
+ *comp_vectype = get_vectype_for_scalar_type (vinfo, scalar_type,
+ slp_node);
}
return true;
then_clause = gimple_assign_rhs2 (stmt);
else_clause = gimple_assign_rhs3 (stmt);
- if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
+ if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, slp_node,
&comp_vectype, &dts[0], slp_node ? NULL : vectype)
|| !comp_vectype)
return false;
/* Invariant comparison. */
if (!vectype)
{
- vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (rhs1));
+ vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (rhs1),
+ slp_node);
if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
return false;
}
/* Function get_vectype_for_scalar_type.
Returns the vector type corresponding to SCALAR_TYPE as supported
- by the target. */
+ by the target. If GROUP_SIZE is nonzero and we're performing BB
+ vectorization, make sure that the number of elements in the vector
+ is no bigger than GROUP_SIZE. */
tree
-get_vectype_for_scalar_type (vec_info *vinfo, tree scalar_type)
+get_vectype_for_scalar_type (vec_info *vinfo, tree scalar_type,
+ unsigned int group_size)
{
+ /* For BB vectorization, we should always have a group size once we've
+ constructed the SLP tree; the only valid uses of zero GROUP_SIZEs
+ are tentative requests during things like early data reference
+ analysis and pattern recognition. */
+ if (is_a <bb_vec_info> (vinfo))
+ gcc_assert (vinfo->slp_instances.is_empty () || group_size != 0);
+ else
+ group_size = 0;
+
tree vectype = get_related_vectype_for_scalar_type (vinfo->vector_mode,
scalar_type);
if (vectype && vinfo->vector_mode == VOIDmode)
vinfo->vector_mode = TYPE_MODE (vectype);
+ /* Register the natural choice of vector type, before the group size
+ has been applied. */
if (vectype)
vinfo->used_vector_modes.add (TYPE_MODE (vectype));
+ /* If the natural choice of vector type doesn't satisfy GROUP_SIZE,
+ try again with an explicit number of elements. */
+ if (vectype
+ && group_size
+ && maybe_ge (TYPE_VECTOR_SUBPARTS (vectype), group_size))
+ {
+ /* Start with the biggest number of units that fits within
+ GROUP_SIZE and halve it until we find a valid vector type.
+ Usually either the first attempt will succeed or all will
+ fail (in the latter case because GROUP_SIZE is too small
+ for the target), but it's possible that a target could have
+ a hole between supported vector types.
+
+ If GROUP_SIZE is not a power of 2, this has the effect of
+ trying the largest power of 2 that fits within the group,
+ even though the group is not a multiple of that vector size.
+ The BB vectorizer will then try to carve up the group into
+ smaller pieces. */
+ unsigned int nunits = 1 << floor_log2 (group_size);
+ do
+ {
+ vectype = get_related_vectype_for_scalar_type (vinfo->vector_mode,
+ scalar_type, nunits);
+ nunits /= 2;
+ }
+ while (nunits > 1 && !vectype);
+ }
+
return vectype;
}
+/* Return the vector type corresponding to SCALAR_TYPE as supported
+ by the target. NODE, if nonnull, is the SLP tree node that will
+ use the returned vector type. */
+
+tree
+get_vectype_for_scalar_type (vec_info *vinfo, tree scalar_type, slp_tree node)
+{
+ unsigned int group_size = 0;
+ if (node)
+ {
+ group_size = SLP_TREE_SCALAR_OPS (node).length ();
+ if (group_size == 0)
+ group_size = SLP_TREE_SCALAR_STMTS (node).length ();
+ }
+ return get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
+}
+
/* Function get_mask_type_for_scalar_type.
Returns the mask type corresponding to a result of comparison
- of vectors of specified SCALAR_TYPE as supported by target. */
+ of vectors of specified SCALAR_TYPE as supported by target.
+ NODE, if nonnull, is the SLP tree node that will use the returned
+ vector type. */
tree
-get_mask_type_for_scalar_type (vec_info *vinfo, tree scalar_type)
+get_mask_type_for_scalar_type (vec_info *vinfo, tree scalar_type,
+ slp_tree node)
{
- tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
+ tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type, node);
if (!vectype)
return NULL;
/* Try to compute the vector types required to vectorize STMT_INFO,
returning true on success and false if vectorization isn't possible.
+ If GROUP_SIZE is nonzero and we're performing BB vectorization,
+ take sure that the number of elements in the vectors is no bigger
+ than GROUP_SIZE.
On success:
opt_result
vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
tree *stmt_vectype_out,
- tree *nunits_vectype_out)
+ tree *nunits_vectype_out,
+ unsigned int group_size)
{
vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = stmt_info->stmt;
+ /* For BB vectorization, we should always have a group size once we've
+ constructed the SLP tree; the only valid uses of zero GROUP_SIZEs
+ are tentative requests during things like early data reference
+ analysis and pattern recognition. */
+ if (is_a <bb_vec_info> (vinfo))
+ gcc_assert (vinfo->slp_instances.is_empty () || group_size != 0);
+ else
+ group_size = 0;
+
*stmt_vectype_out = NULL_TREE;
*nunits_vectype_out = NULL_TREE;
tree vectype;
tree scalar_type = NULL_TREE;
- if (STMT_VINFO_VECTYPE (stmt_info))
+ if (group_size == 0 && STMT_VINFO_VECTYPE (stmt_info))
{
*stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
if (dump_enabled_p ())
}
else
{
- gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
- if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
+ if (data_reference *dr = STMT_VINFO_DATA_REF (stmt_info))
+ scalar_type = TREE_TYPE (DR_REF (dr));
+ else if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
/* Pure bool ops don't participate in number-of-units computation.
For comparisons use the types being compared. */
- if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
+ if (!STMT_VINFO_DATA_REF (stmt_info)
+ && VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
&& is_gimple_assign (stmt)
&& gimple_assign_rhs_code (stmt) != COND_EXPR)
{
}
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "get vectype for scalar type: %T\n", scalar_type);
- vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
+ {
+ if (group_size)
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "get vectype for scalar type (group size %d):"
+ " %T\n", group_size, scalar_type);
+ else
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "get vectype for scalar type: %T\n", scalar_type);
+ }
+ vectype = get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
if (!vectype)
return opt_result::failure_at (stmt,
"not vectorized:"
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for smallest scalar type: %T\n",
scalar_type);
- nunits_vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
+ nunits_vectype = get_vectype_for_scalar_type (vinfo, scalar_type,
+ group_size);
if (!nunits_vectype)
return opt_result::failure_at
(stmt, "not vectorized: unsupported data-type %T\n",
/* Try to determine the correct vector type for STMT_INFO, which is a
statement that produces a scalar boolean result. Return the vector
- type on success, otherwise return NULL_TREE. */
+ type on success, otherwise return NULL_TREE. NODE, if nonnull,
+ is the SLP tree node that will use the returned vector type. */
opt_tree
-vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
+vect_get_mask_type_for_stmt (stmt_vec_info stmt_info, slp_tree node)
{
vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = stmt_info->stmt;
&& !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
{
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
- mask_type = get_mask_type_for_scalar_type (vinfo, scalar_type);
+ mask_type = get_mask_type_for_scalar_type (vinfo, scalar_type, node);
if (!mask_type)
return opt_tree::failure_at (stmt,
/* In tree-vect-stmts.c. */
extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
poly_uint64 = 0);
-extern tree get_vectype_for_scalar_type (vec_info *, tree);
-extern tree get_mask_type_for_scalar_type (vec_info *, tree);
+extern tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int = 0);
+extern tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree);
+extern tree get_mask_type_for_scalar_type (vec_info *, tree, slp_tree = 0);
extern tree get_same_sized_vectype (tree, tree);
extern bool vect_chooses_same_modes_p (vec_info *, machine_mode);
extern bool vect_get_loop_mask_type (loop_vec_info);
extern gcall *vect_gen_while (tree, tree, tree);
extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
- tree *);
-extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info);
+ tree *, unsigned int = 0);
+extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, slp_tree = 0);
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);