#include "omp-simd-clone.h"
#include "predict.h"
-/* Pattern recognition functions */
-static gimple *vect_recog_widen_sum_pattern (vec<gimple *> *, tree *,
- tree *);
-static gimple *vect_recog_widen_mult_pattern (vec<gimple *> *, tree *,
- tree *);
-static gimple *vect_recog_dot_prod_pattern (vec<gimple *> *, tree *,
- tree *);
-static gimple *vect_recog_sad_pattern (vec<gimple *> *, tree *,
- tree *);
-static gimple *vect_recog_pow_pattern (vec<gimple *> *, tree *, tree *);
-static gimple *vect_recog_over_widening_pattern (vec<gimple *> *, tree *,
- tree *);
-static gimple *vect_recog_widen_shift_pattern (vec<gimple *> *,
- tree *, tree *);
-static gimple *vect_recog_rotate_pattern (vec<gimple *> *, tree *, tree *);
-static gimple *vect_recog_vector_vector_shift_pattern (vec<gimple *> *,
- tree *, tree *);
-static gimple *vect_recog_divmod_pattern (vec<gimple *> *,
- tree *, tree *);
-
-static gimple *vect_recog_mult_pattern (vec<gimple *> *,
- tree *, tree *);
-
-static gimple *vect_recog_mixed_size_cond_pattern (vec<gimple *> *,
- tree *, tree *);
-static gimple *vect_recog_bool_pattern (vec<gimple *> *, tree *, tree *);
-static gimple *vect_recog_mask_conversion_pattern (vec<gimple *> *, tree *, tree *);
-static gimple *vect_recog_gather_scatter_pattern (vec<gimple *> *, tree *,
- tree *);
-
-struct vect_recog_func
-{
- vect_recog_func_ptr fn;
- const char *name;
-};
-
-/* Note that ordering matters - the first pattern matching on a stmt
- is taken which means usually the more complex one needs to preceed
- the less comples onex (widen_sum only after dot_prod or sad for example). */
-static vect_recog_func vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
- { vect_recog_widen_mult_pattern, "widen_mult" },
- { vect_recog_dot_prod_pattern, "dot_prod" },
- { vect_recog_sad_pattern, "sad" },
- { vect_recog_widen_sum_pattern, "widen_sum" },
- { vect_recog_pow_pattern, "pow" },
- { vect_recog_widen_shift_pattern, "widen_shift" },
- { vect_recog_over_widening_pattern, "over_widening" },
- { vect_recog_rotate_pattern, "rotate" },
- { vect_recog_vector_vector_shift_pattern, "vector_vector_shift" },
- { vect_recog_divmod_pattern, "divmod" },
- { vect_recog_mult_pattern, "mult" },
- { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
- { vect_recog_bool_pattern, "bool" },
- /* This must come before mask conversion, and includes the parts
- of mask conversion that are needed for gather and scatter
- internal functions. */
- { vect_recog_gather_scatter_pattern, "gather_scatter" },
- { vect_recog_mask_conversion_pattern, "mask_conversion" }
-};
-
/* Report that we've found an instance of pattern PATTERN in
statement STMT. */
append_pattern_def_seq (stmt_info, stmt);
}
+/* Return true if the target supports a vector version of CODE,
+ where CODE is known to map to a direct optab. ITYPE specifies
+ the type of (some of) the scalar inputs and OTYPE specifies the
+ type of the scalar result.
+
+ If CODE allows the inputs and outputs to have different type
+ (such as for WIDEN_SUM_EXPR), it is the input mode rather
+ than the output mode that determines the appropriate target pattern.
+ Operand 0 of the target pattern then specifies the mode that the output
+ must have.
+
+ When returning true, set *VECOTYPE_OUT to the vector version of OTYPE.
+ Also set *VECITYPE_OUT to the vector version of ITYPE if VECITYPE_OUT
+ is nonnull. */
+
+static bool
+vect_supportable_direct_optab_p (tree otype, tree_code code,
+ tree itype, tree *vecotype_out,
+ tree *vecitype_out = NULL)
+{
+ tree vecitype = get_vectype_for_scalar_type (itype);
+ if (!vecitype)
+ return false;
+
+ tree vecotype = get_vectype_for_scalar_type (otype);
+ if (!vecotype)
+ return false;
+
+ optab optab = optab_for_tree_code (code, vecitype, optab_default);
+ if (!optab)
+ return false;
+
+ insn_code icode = optab_handler (optab, TYPE_MODE (vecitype));
+ if (icode == CODE_FOR_nothing
+ || insn_data[icode].operand[0].mode != TYPE_MODE (vecotype))
+ return false;
+
+ *vecotype_out = vecotype;
+ if (vecitype_out)
+ *vecitype_out = vecitype;
+ return true;
+}
+
/* Check whether STMT2 is in the same loop or basic block as STMT1.
Which of the two applies depends on whether we're currently doing
loop-based or basic-block-based vectorization, as determined by
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
inner-loop nested in an outer-loop that us being vectorized). */
static gimple *
-vect_recog_dot_prod_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_dot_prod_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *stmt, *last_stmt = (*stmts)[0];
tree oprnd0, oprnd1;
vect_pattern_detected ("vect_recog_dot_prod_pattern", last_stmt);
half_type = TREE_TYPE (oprnd00);
- *type_in = half_type;
- *type_out = type;
+ if (!vect_supportable_direct_optab_p (type, DOT_PROD_EXPR, half_type,
+ type_out))
+ return NULL;
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (var, DOT_PROD_EXPR,
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
*/
static gimple *
-vect_recog_sad_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_sad_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = (*stmts)[0];
tree sad_oprnd0, sad_oprnd1;
vect_pattern_detected ("vect_recog_sad_pattern", last_stmt);
- *type_in = TREE_TYPE (sad_oprnd0);
- *type_out = sum_type;
+ if (!vect_supportable_direct_optab_p (sum_type, SAD_EXPR,
+ TREE_TYPE (sad_oprnd0), type_out))
+ return NULL;
tree var = vect_recog_temp_ssa_var (sum_type, NULL);
gimple *pattern_stmt = gimple_build_assign (var, SAD_EXPR, sad_oprnd0,
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
*/
static gimple *
-vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_widen_mult_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
gimple *def_stmt0, *def_stmt1;
&dummy_int, &dummy_vec))
return NULL;
- *type_in = vectype;
*type_out = get_vectype_for_scalar_type (type);
+ if (!*type_out)
+ return NULL;
/* Pattern supported. Create a stmt to be used to replace the pattern: */
var = vect_recog_temp_ssa_var (itype, NULL);
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
*/
static gimple *
-vect_recog_pow_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_pow_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = (*stmts)[0];
tree base, exp;
if (node->simd_clones == NULL)
return NULL;
}
+ *type_out = get_vectype_for_scalar_type (TREE_TYPE (base));
+ if (!*type_out)
+ return NULL;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
tree def = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
gimple *g = gimple_build_assign (def, MULT_EXPR, exp, logc);
new_pattern_def_seq (stmt_vinfo, g);
- *type_in = TREE_TYPE (base);
- *type_out = NULL_TREE;
tree res = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
g = gimple_build_call (exp_decl, 1, def);
gimple_call_set_lhs (g, res);
/* We now have a pow or powi builtin function call with a constant
exponent. */
- *type_out = NULL_TREE;
-
/* Catch squaring. */
if ((tree_fits_shwi_p (exp)
&& tree_to_shwi (exp) == 2)
|| (TREE_CODE (exp) == REAL_CST
&& real_equal (&TREE_REAL_CST (exp), &dconst2)))
{
- *type_in = TREE_TYPE (base);
+ if (!vect_supportable_direct_optab_p (TREE_TYPE (base), MULT_EXPR,
+ TREE_TYPE (base), type_out))
+ return NULL;
var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
stmt = gimple_build_assign (var, MULT_EXPR, base, base);
if (TREE_CODE (exp) == REAL_CST
&& real_equal (&TREE_REAL_CST (exp), &dconsthalf))
{
- *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
- if (*type_in
- && direct_internal_fn_supported_p (IFN_SQRT, *type_in,
+ *type_out = get_vectype_for_scalar_type (TREE_TYPE (base));
+ if (*type_out
+ && direct_internal_fn_supported_p (IFN_SQRT, *type_out,
OPTIMIZE_FOR_SPEED))
{
gcall *stmt = gimple_build_call_internal (IFN_SQRT, 1, base);
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
inner-loop nested in an outer-loop that us being vectorized). */
static gimple *
-vect_recog_widen_sum_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_widen_sum_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *stmt, *last_stmt = (*stmts)[0];
tree oprnd0, oprnd1;
if (!type_conversion_p (oprnd0, last_stmt, true, &half_type, &stmt,
&promotion)
|| !promotion)
- return NULL;
+ return NULL;
oprnd0 = gimple_assign_rhs1 (stmt);
vect_pattern_detected ("vect_recog_widen_sum_pattern", last_stmt);
- *type_in = half_type;
- *type_out = type;
+ if (!vect_supportable_direct_optab_p (type, WIDEN_SUM_EXPR, half_type,
+ type_out))
+ return NULL;
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (var, WIDEN_SUM_EXPR, oprnd0, oprnd1);
demotion operation. We also check that S3 and S4 have only one use. */
static gimple *
-vect_recog_over_widening_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_over_widening_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *stmt = stmts->pop ();
gimple *pattern_stmt = NULL, *new_def_stmt, *prev_stmt = NULL,
if (TYPE_UNSIGNED (new_type) != TYPE_UNSIGNED (use_type)
|| TYPE_PRECISION (new_type) != TYPE_PRECISION (use_type))
{
+ *type_out = get_vectype_for_scalar_type (use_type);
+ if (!*type_out)
+ return NULL;
+
/* Create NEW_TYPE->USE_TYPE conversion. */
new_oprnd = make_ssa_name (use_type);
pattern_stmt = gimple_build_assign (new_oprnd, NOP_EXPR, var);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) = pattern_stmt;
- *type_in = get_vectype_for_scalar_type (new_type);
- *type_out = get_vectype_for_scalar_type (use_type);
-
/* We created a pattern statement for the last statement in the
sequence, so we don't need to associate it with the pattern
statement created for PREV_STMT. Therefore, we add PREV_STMT
STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (use_stmt))
= STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (prev_stmt));
- *type_in = vectype;
- *type_out = NULL_TREE;
+ *type_out = vectype;
}
stmts->safe_push (use_stmt);
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the sequence of
WIDEN_LSHIFT_EXPR <a_t, CONST>. */
static gimple *
-vect_recog_widen_shift_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_widen_shift_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
gimple *def_stmt0;
&dummy_int, &dummy_vec))
return NULL;
- *type_in = vectype;
*type_out = vectype_out;
/* Pattern supported. Create a stmt to be used to replace the pattern. */
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the rotate
S0 stmt. */
static gimple *
-vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_in, tree *type_out)
+vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
tree oprnd0, oprnd1, lhs, var, var1, var2, vectype, type, stype, def, def2;
return NULL;
}
- *type_in = vectype;
*type_out = vectype;
- if (*type_in == NULL_TREE)
- return NULL;
if (dt == vect_external_def
&& TREE_CODE (oprnd1) == SSA_NAME
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the shift/rotate
S3 stmt. */
static gimple *
-vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
tree oprnd0, oprnd1, lhs, var;
if (!def_vinfo)
return NULL;
- *type_in = get_vectype_for_scalar_type (TREE_TYPE (oprnd0));
- *type_out = *type_in;
- if (*type_in == NULL_TREE)
+ *type_out = get_vectype_for_scalar_type (TREE_TYPE (oprnd0));
+ if (*type_out == NULL_TREE)
return NULL;
tree def = NULL_TREE;
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace
the multiplication. */
static gimple *
-vect_recog_mult_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_mult_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
tree oprnd0, oprnd1, vectype, itype;
vect_pattern_detected ("vect_recog_mult_pattern", last_stmt);
stmts->safe_push (last_stmt);
- *type_in = vectype;
*type_out = vectype;
return pattern_stmt;
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the division
S1 or modulo S4 stmt. */
static gimple *
-vect_recog_divmod_pattern (vec<gimple *> *stmts,
- tree *type_in, tree *type_out)
+vect_recog_divmod_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
tree oprnd0, oprnd1, vectype, itype, cond;
stmts->safe_push (last_stmt);
- *type_in = vectype;
*type_out = vectype;
return pattern_stmt;
}
stmts->safe_push (last_stmt);
- *type_in = vectype;
*type_out = vectype;
return pattern_stmt;
}
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the pattern.
a_T = (TYPE) a_it; */
static gimple *
-vect_recog_mixed_size_cond_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_mixed_size_cond_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = (*stmts)[0];
tree cond_expr, then_clause, else_clause;
def_stmt_info = new_stmt_vec_info (def_stmt, vinfo);
set_vinfo_for_stmt (def_stmt, def_stmt_info);
STMT_VINFO_VECTYPE (def_stmt_info) = vecitype;
- *type_in = vecitype;
*type_out = vectype;
vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt);
Output:
- * TYPE_IN: The type of the input arguments to the pattern.
-
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the pattern.
but the above is more efficient. */
static gimple *
-vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
enum tree_code rhs_code;
}
*type_out = vectype;
- *type_in = vectype;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
gimple_assign_rhs2 (last_stmt),
gimple_assign_rhs3 (last_stmt));
*type_out = vectype;
- *type_in = vectype;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
= STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
*type_out = vectype;
- *type_in = vectype;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
S4' c_1' = m_3'' ? c_2 : c_3; */
static gimple *
-vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
enum tree_code rhs_code;
= STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
*type_out = vectype1;
- *type_in = vectype1;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
gimple_assign_rhs3 (last_stmt));
*type_out = vectype1;
- *type_in = vectype1;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
pattern_stmt = gimple_build_assign (lhs, rhs_code, rhs1, rhs2);
*type_out = vectype1;
- *type_in = vectype1;
stmts->safe_push (last_stmt);
vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
/* Try to convert STMT into a call to a gather load or scatter store
internal function. Return the final statement on success and set
- *TYPE_IN and *TYPE_OUT to the vector type being loaded or stored.
+ *TYPE_OUT to the vector type being loaded or stored.
This function only handles gathers and scatters that were recognized
as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */
static gimple *
vect_try_gather_scatter_pattern (gimple *stmt, stmt_vec_info last_stmt_info,
- tree *type_in, tree *type_out)
+ tree *type_out)
{
/* Currently we only support this for loop vectorization. */
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
*type_out = vectype;
- *type_in = vectype;
vect_pattern_detected ("gather/scatter pattern", stmt);
return pattern_stmt;
/* Pattern wrapper around vect_try_gather_scatter_pattern. */
static gimple *
-vect_recog_gather_scatter_pattern (vec<gimple *> *stmts, tree *type_in,
- tree *type_out)
+vect_recog_gather_scatter_pattern (vec<gimple *> *stmts, tree *type_out)
{
gimple *last_stmt = stmts->pop ();
stmt_vec_info last_stmt_info = vinfo_for_stmt (last_stmt);
gimple *pattern_stmt = vect_try_gather_scatter_pattern (last_stmt,
last_stmt_info,
- type_in, type_out);
+ type_out);
if (pattern_stmt)
stmts->safe_push (last_stmt);
return pattern_stmt;
}
+typedef gimple *(*vect_recog_func_ptr) (vec<gimple *> *, tree *);
+
+struct vect_recog_func
+{
+ vect_recog_func_ptr fn;
+ const char *name;
+};
+
+/* Note that ordering matters - the first pattern matching on a stmt is
+ taken which means usually the more complex one needs to preceed the
+ less comples onex (widen_sum only after dot_prod or sad for example). */
+static vect_recog_func vect_vect_recog_func_ptrs[] = {
+ { vect_recog_widen_mult_pattern, "widen_mult" },
+ { vect_recog_dot_prod_pattern, "dot_prod" },
+ { vect_recog_sad_pattern, "sad" },
+ { vect_recog_widen_sum_pattern, "widen_sum" },
+ { vect_recog_pow_pattern, "pow" },
+ { vect_recog_widen_shift_pattern, "widen_shift" },
+ { vect_recog_over_widening_pattern, "over_widening" },
+ { vect_recog_rotate_pattern, "rotate" },
+ { vect_recog_vector_vector_shift_pattern, "vector_vector_shift" },
+ { vect_recog_divmod_pattern, "divmod" },
+ { vect_recog_mult_pattern, "mult" },
+ { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
+ { vect_recog_bool_pattern, "bool" },
+ /* This must come before mask conversion, and includes the parts
+ of mask conversion that are needed for gather and scatter
+ internal functions. */
+ { vect_recog_gather_scatter_pattern, "gather_scatter" },
+ { vect_recog_mask_conversion_pattern, "mask_conversion" }
+};
+
+const unsigned int NUM_PATTERNS = ARRAY_SIZE (vect_vect_recog_func_ptrs);
+
/* Mark statements that are involved in a pattern. */
static inline void
computation pattern.
STMT: A stmt from which the pattern search should start.
- If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
- expression that computes the same functionality and can be used to
- replace the sequence of stmts that are involved in the pattern.
-
- Output:
- This function checks if the expression returned by PATTERN_RECOG_FUNC is
- supported in vector form by the target. We use 'TYPE_IN' to obtain the
- relevant vector type. If 'TYPE_IN' is already a vector type, then this
- indicates that target support had already been checked by PATTERN_RECOG_FUNC.
- If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
- to the available target pattern.
+ If PATTERN_RECOG_FUNC successfully detected the pattern, it creates
+ a sequence of statements that has the same functionality and can be
+ used to replace STMT. It returns the last statement in the sequence
+ and adds any earlier statements to STMT's STMT_VINFO_PATTERN_DEF_SEQ.
+ PATTERN_RECOG_FUNC also sets *TYPE_OUT to the vector type of the final
+ statement, having first checked that the target supports the new operation
+ in that type.
This function also does some bookkeeping, as explained in the documentation
for vect_recog_pattern. */
stmt_vec_info stmt_info;
loop_vec_info loop_vinfo;
tree pattern_vectype;
- tree type_in, type_out;
- enum tree_code code;
int i;
stmts_to_replace->truncate (0);
stmts_to_replace->quick_push (stmt);
- pattern_stmt = recog_func->fn (stmts_to_replace, &type_in, &type_out);
+ pattern_stmt = recog_func->fn (stmts_to_replace, &pattern_vectype);
if (!pattern_stmt)
{
/* Clear related stmt info that analysis might have noted for
stmt = stmts_to_replace->last ();
stmt_info = vinfo_for_stmt (stmt);
loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ gcc_assert (pattern_vectype);
- if (VECTOR_BOOLEAN_TYPE_P (type_in)
- || VECTOR_TYPE_P (type_in))
- {
- /* No need to check target support (already checked by the pattern
- recognition function). */
- pattern_vectype = type_out ? type_out : type_in;
- }
- else
- {
- /* Check target support */
- type_in = get_vectype_for_scalar_type (type_in);
- if (!type_in)
- return false;
- if (type_out)
- type_out = get_vectype_for_scalar_type (type_out);
- else
- type_out = type_in;
- if (!type_out)
- return false;
- pattern_vectype = type_out;
-
- if (is_gimple_assign (pattern_stmt))
- {
- enum insn_code icode;
- code = gimple_assign_rhs_code (pattern_stmt);
- optab optab = optab_for_tree_code (code, type_in, optab_default);
- machine_mode vec_mode = TYPE_MODE (type_in);
- if (!optab
- || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
- || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
- return false;
- }
- else
- gcc_assert (is_gimple_call (pattern_stmt));
- }
-
/* Found a vectorizable pattern. */
if (dump_enabled_p ())
{