static opt_result
vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
bool vectype_maybe_set_p,
- poly_uint64 *vf,
- vec<stmt_vec_info > *mask_producers)
+ poly_uint64 *vf)
{
gimple *stmt = stmt_info->stmt;
gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
|| vectype_maybe_set_p)
&& STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
- else if (stmt_vectype == boolean_type_node)
- mask_producers->safe_push (stmt_info);
else
STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
}
/* Subroutine of vect_determine_vectorization_factor. Set the vector
types of STMT_INFO and all attached pattern statements and update
- the vectorization factor VF accordingly. If some of the statements
- produce a mask result whose vector type can only be calculated later,
- add them to MASK_PRODUCERS. Return true on success or false if
- something prevented vectorization. */
+ the vectorization factor VF accordingly. Return true on success
+ or false if something prevented vectorization. */
static opt_result
-vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
- vec<stmt_vec_info > *mask_producers)
+vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf)
{
vec_info *vinfo = stmt_info->vinfo;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
stmt_info->stmt);
- opt_result res
- = vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers);
+ opt_result res = vect_determine_vf_for_stmt_1 (stmt_info, false, vf);
if (!res)
return res;
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: %G",
def_stmt_info->stmt);
- if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
- vf, mask_producers))
- res = vect_determine_vf_for_stmt_1 (def_stmt_info, true,
- vf, mask_producers);
+ res = vect_determine_vf_for_stmt_1 (def_stmt_info, true, vf);
if (!res)
return res;
}
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: %G",
stmt_info->stmt);
- res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers);
+ res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf);
if (!res)
return res;
}
tree vectype;
stmt_vec_info stmt_info;
unsigned i;
- auto_vec<stmt_vec_info> mask_producers;
DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
{
stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
opt_result res
- = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
- &mask_producers);
+ = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor);
if (!res)
return res;
}
return opt_result::failure_at (vect_location,
"not vectorized: unsupported data-type\n");
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
-
- for (i = 0; i < mask_producers.length (); i++)
- {
- stmt_info = mask_producers[i];
- opt_tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
- if (!mask_type)
- return opt_result::propagate_failure (mask_type);
- STMT_VINFO_VECTYPE (stmt_info) = mask_type;
- }
-
return opt_result::success ();
}
|| rhs_code == LROTATE_EXPR
|| rhs_code == RROTATE_EXPR)
{
- if (vectype == boolean_type_node)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: shift of a"
- " boolean.\n");
- /* Fatal mismatch. */
- matches[0] = false;
- return false;
- }
-
vec_mode = TYPE_MODE (vectype);
/* First see if we have a vector/vector shift. */
if (alt_stmt_code != ERROR_MARK
&& TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
{
- if (vectype == boolean_type_node
- || !vect_two_operations_perm_ok_p (stmts, group_size,
- vectype, alt_stmt_code))
+ if (!vect_two_operations_perm_ok_p (stmts, group_size,
+ vectype, alt_stmt_code))
{
for (i = 0; i < group_size; ++i)
if (gimple_assign_rhs_code (stmts[i]->stmt) == alt_stmt_code)
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
- /* For BB vectorization vector types are assigned here.
- Memory accesses already got their vector type assigned
- in vect_analyze_data_refs. */
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- if (bb_vinfo && STMT_VINFO_VECTYPE (stmt_info) == boolean_type_node)
- {
- unsigned int group_size = SLP_TREE_SCALAR_STMTS (node).length ();
- tree vectype = vect_get_mask_type_for_stmt (stmt_info, group_size);
- if (!vectype)
- /* vect_get_mask_type_for_stmt has already explained the
- failure. */
- return false;
-
- stmt_vec_info sstmt_info;
- unsigned int i;
- FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, sstmt_info)
- STMT_VINFO_VECTYPE (sstmt_info) = vectype;
- }
-
/* Calculate the number of vector statements to be created for the
scalar stmts in this node. For SLP reductions it is equal to the
number of vector statements in the children (which has already been
return false;
}
+ if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
+ != VECTOR_BOOLEAN_TYPE_P (vectype_in))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mixed mask and nonmask vector types\n");
+ return false;
+ }
+
/* FORNOW */
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
/* Most operations cannot handle bit-precision types without extra
truncations. */
- if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
+ bool mask_op_p = VECTOR_BOOLEAN_TYPE_P (vectype_out);
+ if (!mask_op_p
&& !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
/* Exception are bitwise binary operations. */
&& code != BIT_IOR_EXPR
if (maybe_ne (nunits_out, nunits_in))
return false;
+ tree vectype2 = NULL_TREE, vectype3 = NULL_TREE;
if (op_type == binary_op || op_type == ternary_op)
{
op1 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use (op1, vinfo, &dt[1]))
+ if (!vect_is_simple_use (op1, vinfo, &dt[1], &vectype2))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (op_type == ternary_op)
{
op2 = gimple_assign_rhs3 (stmt);
- if (!vect_is_simple_use (op2, vinfo, &dt[2]))
+ if (!vect_is_simple_use (op2, vinfo, &dt[2], &vectype3))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
gcc_assert (ncopies >= 1);
+ /* Reject attempts to combine mask types with nonmask types, e.g. if
+ we have an AND between a (nonmask) boolean loaded from memory and
+ a (mask) boolean result of a comparison.
+
+ TODO: We could easily fix these cases up using pattern statements. */
+ if (VECTOR_BOOLEAN_TYPE_P (vectype) != mask_op_p
+ || (vectype2 && VECTOR_BOOLEAN_TYPE_P (vectype2) != mask_op_p)
+ || (vectype3 && VECTOR_BOOLEAN_TYPE_P (vectype3) != mask_op_p))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mixed mask and nonmask vector types\n");
+ return false;
+ }
+
/* Supportable by target? */
vec_mode = TYPE_MODE (vectype);
- Set *STMT_VECTYPE_OUT to:
- NULL_TREE if the statement doesn't need to be vectorized;
- - boolean_type_node if the statement is a boolean operation whose
- vector type can only be determined once all the other vector types
- are known; and
- the equivalent of STMT_VINFO_VECTYPE otherwise.
- Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
tree scalar_type = NULL_TREE;
if (group_size == 0 && STMT_VINFO_VECTYPE (stmt_info))
{
- *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
+ vectype = STMT_VINFO_VECTYPE (stmt_info);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"precomputed vectype: %T\n", vectype);
}
+ else if (vect_use_mask_type_p (stmt_info))
+ {
+ unsigned int precision = stmt_info->mask_precision;
+ scalar_type = build_nonstandard_integer_type (precision, 1);
+ vectype = get_mask_type_for_scalar_type (vinfo, scalar_type, group_size);
+ if (!vectype)
+ return opt_result::failure_at (stmt, "not vectorized: unsupported"
+ " data-type %T\n", scalar_type);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
+ }
else
{
if (data_reference *dr = STMT_VINFO_DATA_REF (stmt_info))
else
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
- /* Pure bool ops don't participate in number-of-units computation.
- For comparisons use the types being compared. */
- if (!STMT_VINFO_DATA_REF (stmt_info)
- && VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
- && is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) != COND_EXPR)
- {
- *stmt_vectype_out = boolean_type_node;
-
- tree rhs1 = gimple_assign_rhs1 (stmt);
- if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
- && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
- scalar_type = TREE_TYPE (rhs1);
- else
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "pure bool operation.\n");
- return opt_result::success ();
- }
- }
-
if (dump_enabled_p ())
{
if (group_size)
" unsupported data-type %T\n",
scalar_type);
- if (!*stmt_vectype_out)
- *stmt_vectype_out = vectype;
-
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
}
+ *stmt_vectype_out = vectype;
/* Don't try to compute scalar types if the stmt produces a boolean
vector; use the existing vector type instead. */
tree nunits_vectype = vectype;
- if (!VECTOR_BOOLEAN_TYPE_P (vectype)
- && *stmt_vectype_out != boolean_type_node)
+ if (!VECTOR_BOOLEAN_TYPE_P (vectype))
{
/* The number of units is set according to the smallest scalar
type (or the largest vector size, but we only support one
}
}
- gcc_assert (*stmt_vectype_out == boolean_type_node
- || multiple_p (TYPE_VECTOR_SUBPARTS (nunits_vectype),
- TYPE_VECTOR_SUBPARTS (*stmt_vectype_out)));
+ gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (nunits_vectype),
+ TYPE_VECTOR_SUBPARTS (*stmt_vectype_out)));
if (dump_enabled_p ())
{
*nunits_vectype_out = nunits_vectype;
return opt_result::success ();
}
-
-/* Try to determine the correct vector type for STMT_INFO, which is a
- statement that produces a scalar boolean result. Return the vector
- type on success, otherwise return NULL_TREE. If GROUP_SIZE is nonzero
- and we're performing BB vectorization, make sure that the number of
- elements in the vector is no bigger than GROUP_SIZE. */
-
-opt_tree
-vect_get_mask_type_for_stmt (stmt_vec_info stmt_info, unsigned int group_size)
-{
- vec_info *vinfo = stmt_info->vinfo;
- gimple *stmt = stmt_info->stmt;
- tree mask_type = NULL;
- tree vectype, scalar_type;
-
- if (is_gimple_assign (stmt)
- && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
- && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
- {
- scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
- mask_type = get_mask_type_for_scalar_type (vinfo, scalar_type,
- group_size);
-
- if (!mask_type)
- return opt_tree::failure_at (stmt,
- "not vectorized: unsupported mask\n");
- }
- else
- {
- tree rhs;
- ssa_op_iter iter;
- enum vect_def_type dt;
-
- FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
- {
- if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
- return opt_tree::failure_at (stmt,
- "not vectorized:can't compute mask"
- " type for statement, %G", stmt);
-
- /* No vectype probably means external definition.
- Allow it in case there is another operand which
- allows to determine mask type. */
- if (!vectype)
- continue;
-
- if (!mask_type)
- mask_type = vectype;
- else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
- TYPE_VECTOR_SUBPARTS (vectype)))
- return opt_tree::failure_at (stmt,
- "not vectorized: different sized mask"
- " types in statement, %T and %T\n",
- mask_type, vectype);
- else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
- != VECTOR_BOOLEAN_TYPE_P (vectype))
- return opt_tree::failure_at (stmt,
- "not vectorized: mixed mask and "
- "nonmask vector types in statement, "
- "%T and %T\n",
- mask_type, vectype);
- }
-
- /* We may compare boolean value loaded as vector of integers.
- Fix mask_type in such case. */
- if (mask_type
- && !VECTOR_BOOLEAN_TYPE_P (mask_type)
- && gimple_code (stmt) == GIMPLE_ASSIGN
- && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
- mask_type = truth_type_for (mask_type);
- }
-
- /* No mask_type should mean loop invariant predicate.
- This is probably a subject for optimization in if-conversion. */
- if (!mask_type)
- return opt_tree::failure_at (stmt,
- "not vectorized: can't compute mask type "
- "for statement: %G", stmt);
-
- return opt_tree::success (mask_type);
-}