/* Statement Analysis and Transformation for Vectorization
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
vect_name = make_ssa_name (vect, new_stmt);
gimple_assign_set_lhs (new_stmt, vect_name);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
return vect_name;
}
new_stmt = gimple_build_assign (array_ref, vect);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
}
/* PTR is a pointer to an array of type TYPE. Return a representation
static tree
create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
{
- struct ptr_info_def *pi;
tree mem_ref, alias_ptr_type;
alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
/* Arrays have the same alignment as their type. */
- pi = get_ptr_info (ptr);
- pi->align = TYPE_ALIGN_UNIT (type);
- pi->misalign = 0;
+ set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
return mem_ref;
}
use_operand_p use_p;
gimple use_stmt;
tree lhs;
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if (is_gimple_assign (stmt))
lhs = gimple_assign_lhs (stmt);
continue;
use_stmt = USE_STMT (use_p);
+ if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
+ continue;
+
if (vinfo_for_stmt (use_stmt)
&& STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
{
- LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
that defined USE. This is done by calling mark_relevant and passing it
the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
+ - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
+ be performed.
Outputs:
Generally, LIVE_P and RELEVANT are used to define the liveness and
static bool
process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
- enum vect_relevant relevant, VEC(gimple,heap) **worklist)
+ enum vect_relevant relevant, VEC(gimple,heap) **worklist,
+ bool force)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
/* case 1: we are only interested in uses that need to be vectorized. Uses
that are used for address computation are not considered relevant. */
- if (!exist_non_indexing_operands_for_use_p (use, stmt))
+ if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
return true;
- if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
+ if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
break;
}
- if (is_pattern_stmt_p (vinfo_for_stmt (stmt)))
+ if (is_pattern_stmt_p (stmt_vinfo))
{
/* Pattern statements are not inserted into the code, so
FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
{
if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
- live_p, relevant, &worklist)
+ live_p, relevant, &worklist, false)
|| !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
- live_p, relevant, &worklist))
+ live_p, relevant, &worklist, false))
{
VEC_free (gimple, heap, worklist);
return false;
{
op = gimple_op (stmt, i);
if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
- &worklist))
+ &worklist, false))
{
VEC_free (gimple, heap, worklist);
return false;
{
tree arg = gimple_call_arg (stmt, i);
if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
- &worklist))
+ &worklist, false))
{
VEC_free (gimple, heap, worklist);
return false;
{
tree op = USE_FROM_PTR (use_p);
if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
- &worklist))
+ &worklist, false))
{
VEC_free (gimple, heap, worklist);
return false;
}
}
+
+ if (STMT_VINFO_GATHER_P (stmt_vinfo))
+ {
+ tree off;
+ tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
+ gcc_assert (decl);
+ if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
+ &worklist, true))
+ {
+ VEC_free (gimple, heap, worklist);
+ return false;
+ }
+ }
} /* while worklist */
VEC_free (gimple, heap, worklist);
}
-/* Function vect_cost_strided_group_size
+/* Model cost for type demotion and promotion operations. PWR is normally
+ zero for single-step promotions and demotions. It will be one if
+ two-step promotion/demotion is required, and so on. Each additional
+ step doubles the number of instructions required. */
+
+static void
+vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
+ enum vect_def_type *dt, int pwr)
+{
+ int i, tmp;
+ int inside_cost = 0, outside_cost = 0, single_stmt_cost;
+
+ /* The SLP costs were already calculated during SLP tree build. */
+ if (PURE_SLP_STMT (stmt_info))
+ return;
+
+ single_stmt_cost = vect_get_stmt_cost (vec_promote_demote);
+ for (i = 0; i < pwr + 1; i++)
+ {
+ tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
+ (i + 1) : i;
+ inside_cost += vect_pow2 (tmp) * single_stmt_cost;
+ }
+
+ /* FORNOW: Assuming maximum 2 args per stmts. */
+ for (i = 0; i < 2; i++)
+ {
+ if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
+ outside_cost += vect_get_stmt_cost (vector_stmt);
+ }
+
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump, "vect_model_promotion_demotion_cost: inside_cost = %d, "
+ "outside_cost = %d .", inside_cost, outside_cost);
+
+ /* Set the costs in STMT_INFO. */
+ stmt_vinfo_set_inside_of_loop_cost (stmt_info, NULL, inside_cost);
+ stmt_vinfo_set_outside_of_loop_cost (stmt_info, NULL, outside_cost);
+}
- For strided load or store, return the group_size only if it is the first
+/* Function vect_cost_group_size
+
+ For grouped load or store, return the group_size only if it is the first
load or store of a group, else return 1. This ensures that group size is
only returned once per group. */
static int
-vect_cost_strided_group_size (stmt_vec_info stmt_info)
+vect_cost_group_size (stmt_vec_info stmt_info)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
/* Function vect_model_store_cost
- Models cost for stores. In the case of strided accesses, one access
- has the overhead of the strided access attributed to it. */
+ Models cost for stores. In the case of grouped accesses, one access
+ has the overhead of the grouped access attributed to it. */
void
vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dt == vect_constant_def || dt == vect_external_def)
outside_cost = vect_get_stmt_cost (scalar_to_vec);
- /* Strided access? */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ /* Grouped access? */
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
if (slp_node)
{
else
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
}
/* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate stores. If a strided
+ equivalent to the cost of GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */
if (!store_lanes_p && group_size > 1)
{
/* Uses a high and low interleave operation for each needed permute. */
inside_cost = ncopies * exact_log2(group_size) * group_size
- * vect_get_stmt_cost (vector_stmt);
+ * vect_get_stmt_cost (vec_perm);
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
group_size);
-
}
/* Costs of the stores. */
/* Function vect_model_load_cost
- Models cost for loads. In the case of strided accesses, the last access
- has the overhead of the strided access attributed to it. Since unaligned
+ Models cost for loads. In the case of grouped accesses, the last access
+ has the overhead of the grouped access attributed to it. Since unaligned
accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
if (PURE_SLP_STMT (stmt_info))
return;
- /* Strided accesses? */
+ /* Grouped accesses? */
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
{
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
}
/* We assume that the cost of a single load-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate loads. If a strided
+ equivalent to the cost of GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (!load_lanes_p && group_size > 1)
{
/* Uses an even and odd extract operations for each needed permute. */
inside_cost = ncopies * exact_log2(group_size) * group_size
- * vect_get_stmt_cost (vector_stmt);
+ * vect_get_stmt_cost (vec_perm);
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
}
/* The loads themselves. */
- vect_get_load_cost (first_dr, ncopies,
- ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
- || slp_node),
- &inside_cost, &outside_cost);
+ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ /* N scalar loads plus gathering them into a vector.
+ ??? scalar_to_vec isn't the cost for that. */
+ inside_cost += (vect_get_stmt_cost (scalar_load) * ncopies
+ * TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)));
+ inside_cost += ncopies * vect_get_stmt_cost (scalar_to_vec);
+ }
+ else
+ vect_get_load_cost (first_dr, ncopies,
+ ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ || group_size > 1 || slp_node),
+ &inside_cost, &outside_cost);
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
case dr_explicit_realign:
{
*inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
- + vect_get_stmt_cost (vector_stmt));
+ + vect_get_stmt_cost (vec_perm));
/* FIXME: If the misalignment remains fixed across the iterations of
the containing loop, the following cost should be added to the
if (targetm.vectorize.builtin_mask_for_load)
*inside_cost += vect_get_stmt_cost (vector_stmt);
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump, "vect_model_load_cost: explicit realign");
+
break;
}
case dr_explicit_realign_optimized:
/* Unaligned software pipeline has a load of an address, an initial
load, and possibly a mask operation to "prime" the loop. However,
- if this is an access in a group of loads, which provide strided
+ if this is an access in a group of loads, which provide grouped
access, then the above cost should only be considered for one
access in the group. Inside the loop, there is a load op
and a realignment op. */
}
*inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
- + vect_get_stmt_cost (vector_stmt));
+ + vect_get_stmt_cost (vec_perm));
+
+ if (vect_print_dump_info (REPORT_COST))
+ fprintf (vect_dump,
+ "vect_model_load_cost: explicit realign optimized");
+
break;
}
}
}
+/* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
+ the loop preheader for the vectorized stmt STMT. */
-/* Function vect_init_vector.
-
- Insert a new stmt (INIT_STMT) that initializes a new vector variable with
- the vector elements of VECTOR_VAR. Place the initialization at BSI if it
- is not NULL. Otherwise, place the initialization at the loop preheader.
- Return the DEF of INIT_STMT.
- It will be used in the vectorization of STMT. */
-
-tree
-vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
- gimple_stmt_iterator *gsi)
+static void
+vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
- tree new_var;
- gimple init_stmt;
- tree vec_oprnd;
- edge pe;
- tree new_temp;
- basic_block new_bb;
-
- new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
- add_referenced_var (new_var);
- init_stmt = gimple_build_assign (new_var, vector_var);
- new_temp = make_ssa_name (new_var, init_stmt);
- gimple_assign_set_lhs (init_stmt, new_temp);
-
if (gsi)
- vect_finish_stmt_generation (stmt, init_stmt, gsi);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
else
{
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
if (loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ basic_block new_bb;
+ edge pe;
if (nested_in_vect_loop_p (loop, stmt))
loop = loop->inner;
pe = loop_preheader_edge (loop);
- new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
+ new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
gcc_assert (!new_bb);
}
else
gcc_assert (bb_vinfo);
bb = BB_VINFO_BB (bb_vinfo);
gsi_bb_start = gsi_after_labels (bb);
- gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
+ gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
}
}
if (vect_print_dump_info (REPORT_DETAILS))
{
fprintf (vect_dump, "created new init_stmt: ");
- print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
+ print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
+ }
+}
+
+/* Function vect_init_vector.
+
+ Insert a new stmt (INIT_STMT) that initializes a new variable of type
+ TYPE with the value VAL. If TYPE is a vector type and VAL does not have
+ vector type a vector with all elements equal to VAL is created first.
+ Place the initialization at BSI if it is not NULL. Otherwise, place the
+ initialization at the loop preheader.
+ Return the DEF of INIT_STMT.
+ It will be used in the vectorization of STMT. */
+
+tree
+vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
+{
+ tree new_var;
+ gimple init_stmt;
+ tree vec_oprnd;
+ tree new_temp;
+
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
+ {
+ if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
+ {
+ if (CONSTANT_CLASS_P (val))
+ val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
+ else
+ {
+ new_var = create_tmp_reg (TREE_TYPE (type), NULL);
+ add_referenced_var (new_var);
+ init_stmt = gimple_build_assign_with_ops (NOP_EXPR,
+ new_var, val,
+ NULL_TREE);
+ new_temp = make_ssa_name (new_var, init_stmt);
+ gimple_assign_set_lhs (init_stmt, new_temp);
+ vect_init_vector_1 (stmt, init_stmt, gsi);
+ val = new_temp;
+ }
+ }
+ val = build_vector_from_val (type, val);
}
+ new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
+ add_referenced_var (new_var);
+ init_stmt = gimple_build_assign (new_var, val);
+ new_temp = make_ssa_name (new_var, init_stmt);
+ gimple_assign_set_lhs (init_stmt, new_temp);
+ vect_init_vector_1 (stmt, init_stmt, gsi);
vec_oprnd = gimple_assign_lhs (init_stmt);
return vec_oprnd;
}
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
unsigned int nunits;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- tree vec_inv;
- tree vec_cst;
- tree t = NULL_TREE;
tree def;
- int i;
enum vect_def_type dt;
bool is_simple_use;
tree vector_type;
print_generic_expr (vect_dump, op, TDF_SLIM);
}
- is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
- &dt);
+ is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
+ &def_stmt, &def, &dt);
gcc_assert (is_simple_use);
if (vect_print_dump_info (REPORT_DETAILS))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
- vec_cst = build_vector_from_val (vector_type,
- fold_convert (TREE_TYPE (vector_type),
- op));
- return vect_init_vector (stmt, vec_cst, vector_type, NULL);
+ return vect_init_vector (stmt, op, vector_type, NULL);
}
/* Case 2: operand is defined outside the loop - loop invariant. */
{
vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
gcc_assert (vector_type);
- nunits = TYPE_VECTOR_SUBPARTS (vector_type);
if (scalar_def)
*scalar_def = def;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Create vector_inv.");
- for (i = nunits - 1; i >= 0; --i)
- {
- t = tree_cons (NULL_TREE, def, t);
- }
-
- /* FIXME: use build_constructor directly. */
- vec_inv = build_constructor_from_list (vector_type, t);
- return vect_init_vector (stmt, vec_inv, vector_type, NULL);
+ return vect_init_vector (stmt, def, vector_type, NULL);
}
/* Case 3: operand is defined inside the loop. */
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
static bool
-vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
+vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
+ slp_tree slp_node)
{
tree vec_dest;
tree scalar_dest;
int nunits_in;
int nunits_out;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
tree fndecl, new_temp, def, rhs_type;
gimple def_stmt;
enum vect_def_type dt[3]
size_t i, nargs;
tree lhs;
- /* FORNOW: unsupported in basic block SLP. */
- gcc_assert (loop_vinfo);
-
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
return false;
- /* FORNOW: SLP not supported. */
- if (STMT_SLP_TYPE (stmt_info))
- return false;
-
/* Is STMT a vectorizable call? */
if (!is_gimple_call (stmt))
return false;
if (!rhs_type)
rhs_type = TREE_TYPE (op);
- if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
+ if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[i], &opvectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
gcc_assert (!gimple_vuse (stmt));
- if (modifier == NARROW)
+ if (slp_node || PURE_SLP_STMT (stmt_info))
+ ncopies = 1;
+ else if (modifier == NARROW)
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
else
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
else
VEC_truncate (tree, vargs, 0);
+ if (slp_node)
+ {
+ VEC (slp_void_p, heap) *vec_defs
+ = VEC_alloc (slp_void_p, heap, nargs);
+ VEC (tree, heap) *vec_oprnds0;
+
+ for (i = 0; i < nargs; i++)
+ VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
+ vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
+ vec_oprnds0
+ = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0);
+
+ /* Arguments are ready. Create the new vector stmt. */
+ FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_oprnd0)
+ {
+ size_t k;
+ for (k = 0; k < nargs; k++)
+ {
+ VEC (tree, heap) *vec_oprndsk
+ = (VEC (tree, heap) *)
+ VEC_index (slp_void_p, vec_defs, k);
+ VEC_replace (tree, vargs, k,
+ VEC_index (tree, vec_oprndsk, i));
+ }
+ new_stmt = gimple_build_call_vec (fndecl, vargs);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
+ }
+
+ for (i = 0; i < nargs; i++)
+ {
+ VEC (tree, heap) *vec_oprndsi
+ = (VEC (tree, heap) *)
+ VEC_index (slp_void_p, vec_defs, i);
+ VEC_free (tree, heap, vec_oprndsi);
+ }
+ VEC_free (slp_void_p, heap, vec_defs);
+ continue;
+ }
+
for (i = 0; i < nargs; i++)
{
op = gimple_call_arg (stmt, i);
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
-
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
VEC_truncate (tree, vargs, 0);
+ if (slp_node)
+ {
+ VEC (slp_void_p, heap) *vec_defs
+ = VEC_alloc (slp_void_p, heap, nargs);
+ VEC (tree, heap) *vec_oprnds0;
+
+ for (i = 0; i < nargs; i++)
+ VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
+ vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
+ vec_oprnds0
+ = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0);
+
+ /* Arguments are ready. Create the new vector stmt. */
+ for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vec_oprnd0);
+ i += 2)
+ {
+ size_t k;
+ VEC_truncate (tree, vargs, 0);
+ for (k = 0; k < nargs; k++)
+ {
+ VEC (tree, heap) *vec_oprndsk
+ = (VEC (tree, heap) *)
+ VEC_index (slp_void_p, vec_defs, k);
+ VEC_quick_push (tree, vargs,
+ VEC_index (tree, vec_oprndsk, i));
+ VEC_quick_push (tree, vargs,
+ VEC_index (tree, vec_oprndsk, i + 1));
+ }
+ new_stmt = gimple_build_call_vec (fndecl, vargs);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
+ }
+
+ for (i = 0; i < nargs; i++)
+ {
+ VEC (tree, heap) *vec_oprndsi
+ = (VEC (tree, heap) *)
+ VEC_index (slp_void_p, vec_defs, i);
+ VEC_free (tree, heap, vec_oprndsi);
+ }
+ VEC_free (slp_void_p, heap, vec_defs);
+ continue;
+ }
+
for (i = 0; i < nargs; i++)
{
op = gimple_call_arg (stmt, i);
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
-
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
it defines is mapped to the new definition. So just replace
rhs of the statement with something harmless. */
+ if (slp_node)
+ return true;
+
type = TREE_TYPE (scalar_dest);
if (is_pattern_stmt_p (stmt_info))
lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
return new_stmt;
}
+
+/* Get vectorized definitions for loop-based vectorization. For the first
+ operand we call vect_get_vec_def_for_operand() (with OPRND containing
+ scalar operand), and for the rest we get a copy with
+ vect_get_vec_def_for_stmt_copy() using the previous vector definition
+ (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
+ The vectors are collected into VEC_OPRNDS. */
+
+static void
+vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
+ VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
+{
+ tree vec_oprnd;
+
+ /* Get first vector operand. */
+ /* All the vector operands except the very first one (that is scalar oprnd)
+ are stmt copies. */
+ if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
+ vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
+ else
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
+
+ VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+
+ /* Get second vector operand. */
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
+ VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+
+ *oprnd = vec_oprnd;
+
+ /* For conversion in multiple steps, continue to get operands
+ recursively. */
+ if (multi_step_cvt)
+ vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
+}
+
+
+/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
+ For multi-step conversions store the resulting vectors and call the function
+ recursively. */
+
+static void
+vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
+ int multi_step_cvt, gimple stmt,
+ VEC (tree, heap) *vec_dsts,
+ gimple_stmt_iterator *gsi,
+ slp_tree slp_node, enum tree_code code,
+ stmt_vec_info *prev_stmt_info)
+{
+ unsigned int i;
+ tree vop0, vop1, new_tmp, vec_dest;
+ gimple new_stmt;
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+
+ vec_dest = VEC_pop (tree, vec_dsts);
+
+ for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
+ {
+ /* Create demotion operation. */
+ vop0 = VEC_index (tree, *vec_oprnds, i);
+ vop1 = VEC_index (tree, *vec_oprnds, i + 1);
+ new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
+ new_tmp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_tmp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ if (multi_step_cvt)
+ /* Store the resulting vector for next recursive call. */
+ VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
+ else
+ {
+ /* This is the last step of the conversion sequence. Store the
+ vectors in SLP_NODE or in vector info of the scalar statement
+ (or in STMT_VINFO_RELATED_STMT chain). */
+ if (slp_node)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ else
+ {
+ if (!*prev_stmt_info)
+ STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
+
+ *prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ }
+ }
+
+ /* For multi-step demotion operations we first generate demotion operations
+ from the source type to the intermediate types, and then combine the
+ results (stored in VEC_OPRNDS) in demotion operation to the destination
+ type. */
+ if (multi_step_cvt)
+ {
+ /* At each level of recursion we have half of the operands we had at the
+ previous level. */
+ VEC_truncate (tree, *vec_oprnds, (i+1)/2);
+ vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
+ stmt, vec_dsts, gsi, slp_node,
+ VEC_PACK_TRUNC_EXPR,
+ prev_stmt_info);
+ }
+
+ VEC_quick_push (tree, vec_dsts, vec_dest);
+}
+
+
+/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
+ and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
+ the resulting vectors and call the function recursively. */
+
+static void
+vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
+ VEC (tree, heap) **vec_oprnds1,
+ gimple stmt, tree vec_dest,
+ gimple_stmt_iterator *gsi,
+ enum tree_code code1,
+ enum tree_code code2, tree decl1,
+ tree decl2, int op_type)
+{
+ int i;
+ tree vop0, vop1, new_tmp1, new_tmp2;
+ gimple new_stmt1, new_stmt2;
+ VEC (tree, heap) *vec_tmp = NULL;
+
+ vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
+ FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
+ {
+ if (op_type == binary_op)
+ vop1 = VEC_index (tree, *vec_oprnds1, i);
+ else
+ vop1 = NULL_TREE;
+
+ /* Generate the two halves of promotion operation. */
+ new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
+ op_type, vec_dest, gsi, stmt);
+ new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
+ op_type, vec_dest, gsi, stmt);
+ if (is_gimple_call (new_stmt1))
+ {
+ new_tmp1 = gimple_call_lhs (new_stmt1);
+ new_tmp2 = gimple_call_lhs (new_stmt2);
+ }
+ else
+ {
+ new_tmp1 = gimple_assign_lhs (new_stmt1);
+ new_tmp2 = gimple_assign_lhs (new_stmt2);
+ }
+
+ /* Store the results for the next step. */
+ VEC_quick_push (tree, vec_tmp, new_tmp1);
+ VEC_quick_push (tree, vec_tmp, new_tmp2);
+ }
+
+ VEC_free (tree, heap, *vec_oprnds0);
+ *vec_oprnds0 = vec_tmp;
+}
+
+
/* Check if STMT performs a conversion operation, that can be vectorized.
If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
static bool
{
tree vec_dest;
tree scalar_dest;
- tree op0;
+ tree op0, op1 = NULL_TREE;
tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
+ enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
tree decl1 = NULL_TREE, decl2 = NULL_TREE;
tree new_temp;
tree def;
int nunits_in;
int nunits_out;
tree vectype_out, vectype_in;
- int ncopies, j;
- tree rhs_type;
+ int ncopies, i, j;
+ tree lhs_type, rhs_type;
enum { NARROW, NONE, WIDEN } modifier;
- int i;
- VEC(tree,heap) *vec_oprnds0 = NULL;
+ VEC (tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
tree vop0;
- VEC(tree,heap) *dummy = NULL;
- int dummy_int;
+ bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ int multi_step_cvt = 0;
+ VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL;
+ tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
+ int op_type;
+ enum machine_mode rhs_mode;
+ unsigned short fltsz;
/* Is STMT a vectorizable conversion? */
- /* FORNOW: unsupported in basic block SLP. */
- gcc_assert (loop_vinfo);
-
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
return false;
code = gimple_assign_rhs_code (stmt);
- if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
+ if (!CONVERT_EXPR_CODE_P (code)
+ && code != FIX_TRUNC_EXPR
+ && code != FLOAT_EXPR
+ && code != WIDEN_MULT_EXPR
+ && code != WIDEN_LSHIFT_EXPR)
return false;
+ op_type = TREE_CODE_LENGTH (code);
+
/* Check types of lhs and rhs. */
scalar_dest = gimple_assign_lhs (stmt);
+ lhs_type = TREE_TYPE (scalar_dest);
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
op0 = gimple_assign_rhs1 (stmt);
rhs_type = TREE_TYPE (op0);
+
+ if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
+ && !((INTEGRAL_TYPE_P (lhs_type)
+ && INTEGRAL_TYPE_P (rhs_type))
+ || (SCALAR_FLOAT_TYPE_P (lhs_type)
+ && SCALAR_FLOAT_TYPE_P (rhs_type))))
+ return false;
+
+ if ((INTEGRAL_TYPE_P (lhs_type)
+ && (TYPE_PRECISION (lhs_type)
+ != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
+ || (INTEGRAL_TYPE_P (rhs_type)
+ && (TYPE_PRECISION (rhs_type)
+ != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,
+ "type conversion to/from bit-precision unsupported.");
+ return false;
+ }
+
/* Check the operands of the operation. */
- if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
+ if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
return false;
}
+ if (op_type == binary_op)
+ {
+ bool ok;
+
+ op1 = gimple_assign_rhs2 (stmt);
+ gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
+ /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
+ OP1. */
+ if (CONSTANT_CLASS_P (op0))
+ ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
+ &def_stmt, &def, &dt[1], &vectype_in);
+ else
+ ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt[1]);
+
+ if (!ok)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "use not simple.");
+ return false;
+ }
+ }
+
/* If op0 is an external or constant defs use a vector type of
the same size as the output vector type. */
if (!vectype_in)
if (!vectype_in)
{
if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "no vectype for scalar type ");
- print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
- }
+ {
+ fprintf (vect_dump, "no vectype for scalar type ");
+ print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
+ }
return false;
}
- /* FORNOW */
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
- if (nunits_in == nunits_out / 2)
+ if (nunits_in < nunits_out)
modifier = NARROW;
else if (nunits_out == nunits_in)
modifier = NONE;
- else if (nunits_out == nunits_in / 2)
- modifier = WIDEN;
- else
- return false;
-
- if (modifier == NARROW)
- ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
else
- ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
+ modifier = WIDEN;
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
case of SLP. */
if (slp_node || PURE_SLP_STMT (stmt_info))
ncopies = 1;
+ else if (modifier == NARROW)
+ ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
+ else
+ ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
/* Sanity check: make sure that at least one copy of the vectorized stmt
needs to be generated. */
gcc_assert (ncopies >= 1);
/* Supportable by target? */
- if ((modifier == NONE
- && !supportable_convert_operation (code, vectype_out, vectype_in, &decl1, &code1))
- || (modifier == WIDEN
- && !supportable_widening_operation (code, stmt,
- vectype_out, vectype_in,
- &decl1, &decl2,
- &code1, &code2,
- &dummy_int, &dummy))
- || (modifier == NARROW
- && !supportable_narrowing_operation (code, vectype_out, vectype_in,
- &code1, &dummy_int, &dummy)))
+ switch (modifier)
{
+ case NONE:
+ if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
+ return false;
+ if (supportable_convert_operation (code, vectype_out, vectype_in,
+ &decl1, &code1))
+ break;
+ /* FALLTHRU */
+ unsupported:
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "conversion not supported by target.");
+ fprintf (vect_dump, "conversion not supported by target.");
return false;
- }
- if (modifier != NONE)
- {
- /* FORNOW: SLP not supported. */
- if (STMT_SLP_TYPE (stmt_info))
- return false;
+ case WIDEN:
+ if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
+ &decl1, &decl2, &code1, &code2,
+ &multi_step_cvt, &interm_types))
+ {
+ /* Binary widening operation can only be supported directly by the
+ architecture. */
+ gcc_assert (!(multi_step_cvt && op_type == binary_op));
+ break;
+ }
+
+ if (code != FLOAT_EXPR
+ || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
+ <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
+ goto unsupported;
+
+ rhs_mode = TYPE_MODE (rhs_type);
+ fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
+ for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
+ rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
+ rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
+ {
+ cvt_type
+ = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
+ cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
+ if (cvt_type == NULL_TREE)
+ goto unsupported;
+
+ if (GET_MODE_SIZE (rhs_mode) == fltsz)
+ {
+ if (!supportable_convert_operation (code, vectype_out,
+ cvt_type, &decl1, &codecvt1))
+ goto unsupported;
+ }
+ else if (!supportable_widening_operation (code, stmt, vectype_out,
+ cvt_type, &decl1, &decl2,
+ &codecvt1, &codecvt2,
+ &multi_step_cvt,
+ &interm_types))
+ continue;
+ else
+ gcc_assert (multi_step_cvt == 0);
+
+ if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
+ vectype_in, NULL, NULL, &code1,
+ &code2, &multi_step_cvt,
+ &interm_types))
+ break;
+ }
+
+ if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
+ goto unsupported;
+
+ if (GET_MODE_SIZE (rhs_mode) == fltsz)
+ codecvt2 = ERROR_MARK;
+ else
+ {
+ multi_step_cvt++;
+ VEC_safe_push (tree, heap, interm_types, cvt_type);
+ cvt_type = NULL_TREE;
+ }
+ break;
+
+ case NARROW:
+ gcc_assert (op_type == unary_op);
+ if (supportable_narrowing_operation (code, vectype_out, vectype_in,
+ &code1, &multi_step_cvt,
+ &interm_types))
+ break;
+
+ if (code != FIX_TRUNC_EXPR
+ || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
+ >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
+ goto unsupported;
+
+ rhs_mode = TYPE_MODE (rhs_type);
+ cvt_type
+ = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
+ cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
+ if (cvt_type == NULL_TREE)
+ goto unsupported;
+ if (!supportable_convert_operation (code, cvt_type, vectype_in,
+ &decl1, &codecvt1))
+ goto unsupported;
+ if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
+ &code1, &multi_step_cvt,
+ &interm_types))
+ break;
+ goto unsupported;
+
+ default:
+ gcc_unreachable ();
}
if (!vec_stmt) /* transformation not required. */
{
- STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "=== vectorizable_conversion ===");
+ if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
+ {
+ STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
+ vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
+ }
+ else if (modifier == NARROW)
+ {
+ STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
+ vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
+ }
+ else
+ {
+ STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
+ vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
+ }
+ VEC_free (tree, heap, interm_types);
return true;
}
/** Transform. **/
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "transform conversion.");
+ fprintf (vect_dump, "transform conversion. ncopies = %d.", ncopies);
- /* Handle def. */
- vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
+ if (op_type == binary_op)
+ {
+ if (CONSTANT_CLASS_P (op0))
+ op0 = fold_convert (TREE_TYPE (op1), op0);
+ else if (CONSTANT_CLASS_P (op1))
+ op1 = fold_convert (TREE_TYPE (op0), op1);
+ }
+
+ /* In case of multi-step conversion, we first generate conversion operations
+ to the intermediate types, and then from that types to the final one.
+ We create vector destinations for the intermediate type (TYPES) received
+ from supportable_*_operation, and store them in the correct order
+ for future use in vect_create_vectorized_*_stmts (). */
+ vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
+ vec_dest = vect_create_destination_var (scalar_dest,
+ (cvt_type && modifier == WIDEN)
+ ? cvt_type : vectype_out);
+ VEC_quick_push (tree, vec_dsts, vec_dest);
+
+ if (multi_step_cvt)
+ {
+ for (i = VEC_length (tree, interm_types) - 1;
+ VEC_iterate (tree, interm_types, i, intermediate_type); i--)
+ {
+ vec_dest = vect_create_destination_var (scalar_dest,
+ intermediate_type);
+ VEC_quick_push (tree, vec_dsts, vec_dest);
+ }
+ }
- if (modifier == NONE && !slp_node)
- vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ if (cvt_type)
+ vec_dest = vect_create_destination_var (scalar_dest,
+ modifier == WIDEN
+ ? vectype_out : cvt_type);
+
+ if (!slp_node)
+ {
+ if (modifier == NONE)
+ vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ else if (modifier == WIDEN)
+ {
+ vec_oprnds0 = VEC_alloc (tree, heap,
+ (multi_step_cvt
+ ? vect_pow2 (multi_step_cvt) : 1));
+ if (op_type == binary_op)
+ vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ }
+ else
+ vec_oprnds0 = VEC_alloc (tree, heap,
+ 2 * (multi_step_cvt
+ ? vect_pow2 (multi_step_cvt) : 1));
+ }
+ else if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+ last_oprnd = op0;
prev_stmt_info = NULL;
switch (modifier)
{
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
- {
- /* Arguments are ready, create the new vector stmt. */
- if (code1 == CALL_EXPR)
- {
- new_stmt = gimple_build_call (decl1, 1, vop0);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_call_set_lhs (new_stmt, new_temp);
- }
- else
- {
- gcc_assert (TREE_CODE_LENGTH (code) == unary_op);
- new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0,
- NULL);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- }
+ {
+ /* Arguments are ready, create the new vector stmt. */
+ if (code1 == CALL_EXPR)
+ {
+ new_stmt = gimple_build_call (decl1, 1, vop0);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
+ new_stmt = gimple_build_assign_with_ops (code1, vec_dest,
+ vop0, NULL);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ }
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
- }
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ if (slp_node)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
+ }
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
the vector stmt by a factor VF/nunits. */
for (j = 0; j < ncopies; j++)
{
+ /* Handle uses. */
if (j == 0)
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- else
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
+ {
+ if (slp_node)
+ {
+ if (code == WIDEN_LSHIFT_EXPR)
+ {
+ unsigned int k;
- /* Generate first half of the widened result: */
- new_stmt
- = vect_gen_widened_results_half (code1, decl1,
- vec_oprnd0, vec_oprnd1,
- unary_op, vec_dest, gsi, stmt);
- if (j == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+ vec_oprnd1 = op1;
+ /* Store vec_oprnd1 for every vector stmt to be created
+ for SLP_NODE. We check during the analysis that all
+ the shift arguments are the same. */
+ for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+
+ vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
+ slp_node, -1);
+ }
+ else
+ vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
+ &vec_oprnds1, slp_node, -1);
+ }
+ else
+ {
+ vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
+ VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
+ if (op_type == binary_op)
+ {
+ if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnd1 = op1;
+ else
+ vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
+ NULL);
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ }
+ }
+ }
else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ {
+ vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
+ VEC_truncate (tree, vec_oprnds0, 0);
+ VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
+ if (op_type == binary_op)
+ {
+ if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnd1 = op1;
+ else
+ vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
+ vec_oprnd1);
+ VEC_truncate (tree, vec_oprnds1, 0);
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ }
+ }
- /* Generate second half of the widened result: */
- new_stmt
- = vect_gen_widened_results_half (code2, decl2,
- vec_oprnd0, vec_oprnd1,
- unary_op, vec_dest, gsi, stmt);
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ /* Arguments are ready. Create the new vector stmts. */
+ for (i = multi_step_cvt; i >= 0; i--)
+ {
+ tree this_dest = VEC_index (tree, vec_dsts, i);
+ enum tree_code c1 = code1, c2 = code2;
+ if (i == 0 && codecvt2 != ERROR_MARK)
+ {
+ c1 = codecvt1;
+ c2 = codecvt2;
+ }
+ vect_create_vectorized_promotion_stmts (&vec_oprnds0,
+ &vec_oprnds1,
+ stmt, this_dest, gsi,
+ c1, c2, decl1, decl2,
+ op_type);
+ }
+
+ FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ {
+ if (cvt_type)
+ {
+ if (codecvt1 == CALL_EXPR)
+ {
+ new_stmt = gimple_build_call (decl1, 1, vop0);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
+ new_temp = make_ssa_name (vec_dest, NULL);
+ new_stmt = gimple_build_assign_with_ops (codecvt1,
+ new_temp,
+ vop0, NULL);
+ }
+
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ }
+ else
+ new_stmt = SSA_NAME_DEF_STMT (vop0);
+
+ if (slp_node)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
+ else
+ {
+ if (!prev_stmt_info)
+ STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ }
}
+
+ *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
break;
case NARROW:
for (j = 0; j < ncopies; j++)
{
/* Handle uses. */
- if (j == 0)
- {
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
- }
+ if (slp_node)
+ vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
+ slp_node, -1);
else
{
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
+ VEC_truncate (tree, vec_oprnds0, 0);
+ vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
+ vect_pow2 (multi_step_cvt) - 1);
}
- /* Arguments are ready. Create the new vector stmt. */
- new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
- vec_oprnd1);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ /* Arguments are ready. Create the new vector stmts. */
+ if (cvt_type)
+ FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ {
+ if (codecvt1 == CALL_EXPR)
+ {
+ new_stmt = gimple_build_call (decl1, 1, vop0);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
+ new_temp = make_ssa_name (vec_dest, NULL);
+ new_stmt = gimple_build_assign_with_ops (codecvt1, new_temp,
+ vop0, NULL);
+ }
- if (j == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ VEC_replace (tree, vec_oprnds0, i, new_temp);
+ }
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
+ stmt, vec_dsts, gsi,
+ slp_node, code1,
+ &prev_stmt_info);
}
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
+ break;
}
- if (vec_oprnds0)
- VEC_free (tree, heap, vec_oprnds0);
+ VEC_free (tree, heap, vec_oprnds0);
+ VEC_free (tree, heap, vec_oprnds1);
+ VEC_free (tree, heap, vec_dsts);
+ VEC_free (tree, heap, interm_types);
return true;
}
if (code == VIEW_CONVERT_EXPR)
op = TREE_OPERAND (op, 0);
- if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
+ if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
if (vect_print_dump_info (REPORT_DETAILS))
}
op0 = gimple_assign_rhs1 (stmt);
- if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
+ if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
return false;
op1 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use_1 (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
- &dt[1], &op1_vectype))
+ if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt[1], &op1_vectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
}
op0 = gimple_assign_rhs1 (stmt);
- if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
+ if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
if (op_type == binary_op || op_type == ternary_op)
{
op1 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
- &dt[1]))
+ if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt[1]))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
if (op_type == ternary_op)
{
op2 = gimple_assign_rhs3 (stmt);
- if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
- &dt[2]))
+ if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt[2]))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
}
-/* Get vectorized definitions for loop-based vectorization. For the first
- operand we call vect_get_vec_def_for_operand() (with OPRND containing
- scalar operand), and for the rest we get a copy with
- vect_get_vec_def_for_stmt_copy() using the previous vector definition
- (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
- The vectors are collected into VEC_OPRNDS. */
-
-static void
-vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
- VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
-{
- tree vec_oprnd;
-
- /* Get first vector operand. */
- /* All the vector operands except the very first one (that is scalar oprnd)
- are stmt copies. */
- if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
- vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
- else
- vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
-
- VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
-
- /* Get second vector operand. */
- vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
- VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
-
- *oprnd = vec_oprnd;
-
- /* For conversion in multiple steps, continue to get operands
- recursively. */
- if (multi_step_cvt)
- vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
-}
-
+/* Function vectorizable_store.
-/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
- For multi-step conversions store the resulting vectors and call the function
- recursively. */
-
-static void
-vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
- int multi_step_cvt, gimple stmt,
- VEC (tree, heap) *vec_dsts,
- gimple_stmt_iterator *gsi,
- slp_tree slp_node, enum tree_code code,
- stmt_vec_info *prev_stmt_info)
-{
- unsigned int i;
- tree vop0, vop1, new_tmp, vec_dest;
- gimple new_stmt;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
- vec_dest = VEC_pop (tree, vec_dsts);
-
- for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
- {
- /* Create demotion operation. */
- vop0 = VEC_index (tree, *vec_oprnds, i);
- vop1 = VEC_index (tree, *vec_oprnds, i + 1);
- new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
- new_tmp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_tmp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
-
- if (multi_step_cvt)
- /* Store the resulting vector for next recursive call. */
- VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
- else
- {
- /* This is the last step of the conversion sequence. Store the
- vectors in SLP_NODE or in vector info of the scalar statement
- (or in STMT_VINFO_RELATED_STMT chain). */
- if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
- else
- {
- if (!*prev_stmt_info)
- STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
-
- *prev_stmt_info = vinfo_for_stmt (new_stmt);
- }
- }
- }
-
- /* For multi-step demotion operations we first generate demotion operations
- from the source type to the intermediate types, and then combine the
- results (stored in VEC_OPRNDS) in demotion operation to the destination
- type. */
- if (multi_step_cvt)
- {
- /* At each level of recursion we have have of the operands we had at the
- previous level. */
- VEC_truncate (tree, *vec_oprnds, (i+1)/2);
- vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
- stmt, vec_dsts, gsi, slp_node,
- code, prev_stmt_info);
- }
-}
-
-
-/* Function vectorizable_type_demotion
-
- Check if STMT performs a binary or unary operation that involves
- type demotion, and if it can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
-
-static bool
-vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
- gimple *vec_stmt, slp_tree slp_node)
-{
- tree vec_dest;
- tree scalar_dest;
- tree op0;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- enum tree_code code, code1 = ERROR_MARK;
- tree def;
- gimple def_stmt;
- enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
- stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
- tree vectype_out;
- int ncopies;
- int j, i;
- tree vectype_in;
- int multi_step_cvt = 0;
- VEC (tree, heap) *vec_oprnds0 = NULL;
- VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
- tree last_oprnd, intermediate_type;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
-
- if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
- return false;
-
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
- return false;
-
- /* Is STMT a vectorizable type-demotion operation? */
- if (!is_gimple_assign (stmt))
- return false;
-
- if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
- return false;
-
- code = gimple_assign_rhs_code (stmt);
- if (!CONVERT_EXPR_CODE_P (code))
- return false;
-
- scalar_dest = gimple_assign_lhs (stmt);
- vectype_out = STMT_VINFO_VECTYPE (stmt_info);
-
- /* Check the operands of the operation. */
- op0 = gimple_assign_rhs1 (stmt);
- if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
- || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
- && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)))))
- return false;
-
- if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
- || ((TYPE_PRECISION (TREE_TYPE (op0))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "type demotion to/from bit-precision unsupported.");
- return false;
- }
-
- if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
- &def_stmt, &def, &dt[0], &vectype_in))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "use not simple.");
- return false;
- }
- /* If op0 is an external def use a vector type with the
- same size as the output vector type if possible. */
- if (!vectype_in)
- vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
- if (vec_stmt)
- gcc_assert (vectype_in);
- if (!vectype_in)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "no vectype for scalar type ");
- print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
- }
-
- return false;
- }
-
- nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
- nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
- if (nunits_in >= nunits_out)
- return false;
-
- /* Multiple types in SLP are handled by creating the appropriate number of
- vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
- case of SLP. */
- if (slp_node || PURE_SLP_STMT (stmt_info))
- ncopies = 1;
- else
- ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
- gcc_assert (ncopies >= 1);
-
- /* Supportable by target? */
- if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
- &code1, &multi_step_cvt, &interm_types))
- return false;
-
- if (!vec_stmt) /* transformation not required. */
- {
- STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "=== vectorizable_demotion ===");
- vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
- return true;
- }
-
- /** Transform. **/
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
- ncopies);
-
- /* In case of multi-step demotion, we first generate demotion operations to
- the intermediate types, and then from that types to the final one.
- We create vector destinations for the intermediate type (TYPES) received
- from supportable_narrowing_operation, and store them in the correct order
- for future use in vect_create_vectorized_demotion_stmts(). */
- if (multi_step_cvt)
- vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
- else
- vec_dsts = VEC_alloc (tree, heap, 1);
-
- vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
- VEC_quick_push (tree, vec_dsts, vec_dest);
-
- if (multi_step_cvt)
- {
- for (i = VEC_length (tree, interm_types) - 1;
- VEC_iterate (tree, interm_types, i, intermediate_type); i--)
- {
- vec_dest = vect_create_destination_var (scalar_dest,
- intermediate_type);
- VEC_quick_push (tree, vec_dsts, vec_dest);
- }
- }
-
- /* In case the vectorization factor (VF) is bigger than the number
- of elements that we can fit in a vectype (nunits), we have to generate
- more than one vector stmt - i.e - we need to "unroll" the
- vector stmt by a factor VF/nunits. */
- last_oprnd = op0;
- prev_stmt_info = NULL;
- for (j = 0; j < ncopies; j++)
- {
- /* Handle uses. */
- if (slp_node)
- vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
- slp_node, -1);
- else
- {
- VEC_free (tree, heap, vec_oprnds0);
- vec_oprnds0 = VEC_alloc (tree, heap,
- (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
- vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
- vect_pow2 (multi_step_cvt) - 1);
- }
-
- /* Arguments are ready. Create the new vector stmts. */
- tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
- vect_create_vectorized_demotion_stmts (&vec_oprnds0,
- multi_step_cvt, stmt, tmp_vec_dsts,
- gsi, slp_node, code1,
- &prev_stmt_info);
- }
-
- VEC_free (tree, heap, vec_oprnds0);
- VEC_free (tree, heap, vec_dsts);
- VEC_free (tree, heap, tmp_vec_dsts);
- VEC_free (tree, heap, interm_types);
-
- *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
- return true;
-}
-
-
-/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
- and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
- the resulting vectors and call the function recursively. */
-
-static void
-vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
- VEC (tree, heap) **vec_oprnds1,
- int multi_step_cvt, gimple stmt,
- VEC (tree, heap) *vec_dsts,
- gimple_stmt_iterator *gsi,
- slp_tree slp_node, enum tree_code code1,
- enum tree_code code2, tree decl1,
- tree decl2, int op_type,
- stmt_vec_info *prev_stmt_info)
-{
- int i;
- tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
- gimple new_stmt1, new_stmt2;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- VEC (tree, heap) *vec_tmp;
-
- vec_dest = VEC_pop (tree, vec_dsts);
- vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
-
- FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
- {
- if (op_type == binary_op)
- vop1 = VEC_index (tree, *vec_oprnds1, i);
- else
- vop1 = NULL_TREE;
-
- /* Generate the two halves of promotion operation. */
- new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
- op_type, vec_dest, gsi, stmt);
- new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
- op_type, vec_dest, gsi, stmt);
- if (is_gimple_call (new_stmt1))
- {
- new_tmp1 = gimple_call_lhs (new_stmt1);
- new_tmp2 = gimple_call_lhs (new_stmt2);
- }
- else
- {
- new_tmp1 = gimple_assign_lhs (new_stmt1);
- new_tmp2 = gimple_assign_lhs (new_stmt2);
- }
-
- if (multi_step_cvt)
- {
- /* Store the results for the recursive call. */
- VEC_quick_push (tree, vec_tmp, new_tmp1);
- VEC_quick_push (tree, vec_tmp, new_tmp2);
- }
- else
- {
- /* Last step of promotion sequience - store the results. */
- if (slp_node)
- {
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
- }
- else
- {
- if (!*prev_stmt_info)
- STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
- else
- STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
-
- *prev_stmt_info = vinfo_for_stmt (new_stmt1);
- STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
- *prev_stmt_info = vinfo_for_stmt (new_stmt2);
- }
- }
- }
-
- if (multi_step_cvt)
- {
- /* For multi-step promotion operation we first generate we call the
- function recurcively for every stage. We start from the input type,
- create promotion operations to the intermediate types, and then
- create promotions to the output type. */
- *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
- vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
- multi_step_cvt - 1, stmt,
- vec_dsts, gsi, slp_node, code1,
- code2, decl2, decl2, op_type,
- prev_stmt_info);
- }
-
- VEC_free (tree, heap, vec_tmp);
-}
-
-
-/* Function vectorizable_type_promotion
-
- Check if STMT performs a binary or unary operation that involves
- type promotion, and if it can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
-
-static bool
-vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
- gimple *vec_stmt, slp_tree slp_node)
-{
- tree vec_dest;
- tree scalar_dest;
- tree op0, op1 = NULL;
- tree vec_oprnd0=NULL, vec_oprnd1=NULL;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
- tree decl1 = NULL_TREE, decl2 = NULL_TREE;
- int op_type;
- tree def;
- gimple def_stmt;
- enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
- stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
- tree vectype_out;
- int ncopies;
- int j, i;
- tree vectype_in;
- tree intermediate_type = NULL_TREE;
- int multi_step_cvt = 0;
- VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
- VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- unsigned int k;
-
- if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
- return false;
-
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
- return false;
-
- /* Is STMT a vectorizable type-promotion operation? */
- if (!is_gimple_assign (stmt))
- return false;
-
- if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
- return false;
-
- code = gimple_assign_rhs_code (stmt);
- if (!CONVERT_EXPR_CODE_P (code)
- && code != WIDEN_MULT_EXPR
- && code != WIDEN_LSHIFT_EXPR)
- return false;
-
- scalar_dest = gimple_assign_lhs (stmt);
- vectype_out = STMT_VINFO_VECTYPE (stmt_info);
-
- /* Check the operands of the operation. */
- op0 = gimple_assign_rhs1 (stmt);
- if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
- || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
- && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
- && CONVERT_EXPR_CODE_P (code))))
- return false;
-
- if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
- || ((TYPE_PRECISION (TREE_TYPE (op0))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "type promotion to/from bit-precision "
- "unsupported.");
- return false;
- }
-
- if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
- &def_stmt, &def, &dt[0], &vectype_in))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "use not simple.");
- return false;
- }
-
- op_type = TREE_CODE_LENGTH (code);
- if (op_type == binary_op)
- {
- bool ok;
-
- op1 = gimple_assign_rhs2 (stmt);
- if (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR)
- {
- /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
- OP1. */
- if (CONSTANT_CLASS_P (op0))
- ok = vect_is_simple_use_1 (op1, loop_vinfo, NULL,
- &def_stmt, &def, &dt[1], &vectype_in);
- else
- ok = vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def,
- &dt[1]);
-
- if (!ok)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "use not simple.");
- return false;
- }
- }
- }
-
- /* If op0 is an external or constant def use a vector type with
- the same size as the output vector type. */
- if (!vectype_in)
- vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
- if (vec_stmt)
- gcc_assert (vectype_in);
- if (!vectype_in)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "no vectype for scalar type ");
- print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
- }
-
- return false;
- }
-
- nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
- nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
- if (nunits_in <= nunits_out)
- return false;
-
- /* Multiple types in SLP are handled by creating the appropriate number of
- vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
- case of SLP. */
- if (slp_node || PURE_SLP_STMT (stmt_info))
- ncopies = 1;
- else
- ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
-
- gcc_assert (ncopies >= 1);
-
- /* Supportable by target? */
- if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
- &decl1, &decl2, &code1, &code2,
- &multi_step_cvt, &interm_types))
- return false;
-
- /* Binary widening operation can only be supported directly by the
- architecture. */
- gcc_assert (!(multi_step_cvt && op_type == binary_op));
-
- if (!vec_stmt) /* transformation not required. */
- {
- STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "=== vectorizable_promotion ===");
- vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
- return true;
- }
-
- /** Transform. **/
-
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
- ncopies);
-
- if (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR)
- {
- if (CONSTANT_CLASS_P (op0))
- op0 = fold_convert (TREE_TYPE (op1), op0);
- else if (CONSTANT_CLASS_P (op1))
- op1 = fold_convert (TREE_TYPE (op0), op1);
- }
-
- /* Handle def. */
- /* In case of multi-step promotion, we first generate promotion operations
- to the intermediate types, and then from that types to the final one.
- We store vector destination in VEC_DSTS in the correct order for
- recursive creation of promotion operations in
- vect_create_vectorized_promotion_stmts(). Vector destinations are created
- according to TYPES recieved from supportable_widening_operation(). */
- if (multi_step_cvt)
- vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
- else
- vec_dsts = VEC_alloc (tree, heap, 1);
-
- vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
- VEC_quick_push (tree, vec_dsts, vec_dest);
-
- if (multi_step_cvt)
- {
- for (i = VEC_length (tree, interm_types) - 1;
- VEC_iterate (tree, interm_types, i, intermediate_type); i--)
- {
- vec_dest = vect_create_destination_var (scalar_dest,
- intermediate_type);
- VEC_quick_push (tree, vec_dsts, vec_dest);
- }
- }
-
- if (!slp_node)
- {
- vec_oprnds0 = VEC_alloc (tree, heap,
- (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
- if (op_type == binary_op)
- vec_oprnds1 = VEC_alloc (tree, heap, 1);
- }
- else if (code == WIDEN_LSHIFT_EXPR)
- vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
-
- /* In case the vectorization factor (VF) is bigger than the number
- of elements that we can fit in a vectype (nunits), we have to generate
- more than one vector stmt - i.e - we need to "unroll" the
- vector stmt by a factor VF/nunits. */
-
- prev_stmt_info = NULL;
- for (j = 0; j < ncopies; j++)
- {
- /* Handle uses. */
- if (j == 0)
- {
- if (slp_node)
- {
- if (code == WIDEN_LSHIFT_EXPR)
- {
- vec_oprnd1 = op1;
- /* Store vec_oprnd1 for every vector stmt to be created
- for SLP_NODE. We check during the analysis that all
- the shift arguments are the same. */
- for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
-
- vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
- slp_node, -1);
- }
- else
- vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
- &vec_oprnds1, slp_node, -1);
- }
- else
- {
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
- if (op_type == binary_op)
- {
- if (code == WIDEN_LSHIFT_EXPR)
- vec_oprnd1 = op1;
- else
- vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
- }
- }
- }
- else
- {
- vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
- VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
- if (op_type == binary_op)
- {
- if (code == WIDEN_LSHIFT_EXPR)
- vec_oprnd1 = op1;
- else
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
- VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
- }
- }
-
- /* Arguments are ready. Create the new vector stmts. */
- tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
- vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
- multi_step_cvt, stmt,
- tmp_vec_dsts,
- gsi, slp_node, code1, code2,
- decl1, decl2, op_type,
- &prev_stmt_info);
- }
-
- VEC_free (tree, heap, vec_dsts);
- VEC_free (tree, heap, tmp_vec_dsts);
- VEC_free (tree, heap, interm_types);
- VEC_free (tree, heap, vec_oprnds0);
- VEC_free (tree, heap, vec_oprnds1);
-
- *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
- return true;
-}
-
-
-/* Function vectorizable_store.
-
- Check if STMT defines a non scalar data-ref (array/pointer/structure) that
- can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT defines a non scalar data-ref (array/pointer/structure) that
+ can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+ Return FALSE if not a vectorizable STMT, TRUE otherwise. */
static bool
vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
int ncopies;
int j;
gimple next_stmt, first_stmt = NULL;
- bool strided_store = false;
+ bool grouped_store = false;
bool store_lanes_p = false;
unsigned int group_size, i;
VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
gcc_assert (gimple_assign_single_p (stmt));
op = gimple_assign_rhs1 (stmt);
- if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
+ if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
- if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
+ if (tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
+ ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
+ size_zero_node) < 0)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "negative step for store.");
return false;
}
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_store = true;
+ grouped_store = true;
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (!slp && !PURE_SLP_STMT (stmt_info))
{
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_store_lanes_supported (vectype, group_size))
store_lanes_p = true;
- else if (!vect_strided_store_supported (vectype, group_size))
+ else if (!vect_grouped_store_supported (vectype, group_size))
return false;
}
{
gcc_assert (gimple_assign_single_p (next_stmt));
op = gimple_assign_rhs1 (next_stmt);
- if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
- &def, &dt))
+ if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
+ &def_stmt, &def, &dt))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
/** Transform. **/
- if (strided_store)
+ if (grouped_store)
{
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (slp)
{
- strided_store = false;
+ grouped_store = false;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
vector stmt by a factor VF/nunits. For more details see documentation in
vect_get_vec_def_for_copy_stmt. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: &base + 2 = x2
S2: &base = x0
Then permutation statements are generated:
- VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
- VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
+ VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
+ VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
...
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
op = VEC_index (tree, oprnds, i);
- vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
- &dt);
+ vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
+ &def, &dt);
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
VEC_replace(tree, dr_chain, i, vec_oprnd);
VEC_replace(tree, oprnds, i, vec_oprnd);
new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
gimple_call_set_lhs (new_stmt, data_ref);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
}
else
{
new_stmt = NULL;
- if (strided_store)
+ if (grouped_store)
{
result_chain = VEC_alloc (tree, heap, group_size);
/* Permute. */
next_stmt = first_stmt;
for (i = 0; i < vec_num; i++)
{
- struct ptr_info_def *pi;
+ unsigned align, misalign;
if (i > 0)
/* Bump the vector pointer. */
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
- else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
+ else if (grouped_store)
+ /* For grouped stores vectorized defs are interleaved in
vect_permute_store_chain(). */
vec_oprnd = VEC_index (tree, result_chain, i);
data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (aligned_access_p (first_dr))
- pi->misalign = 0;
+ misalign = 0;
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
+ misalign);
/* Arguments are ready. Create the new vector stmt. */
new_stmt = gimple_build_assign (data_ref, vec_oprnd);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (slp)
continue;
return true;
}
-/* Given a vector type VECTYPE returns a builtin DECL to be used
- for vector permutation and returns the mask that implements
+/* Given a vector type VECTYPE and permutation SEL returns
+ the VECTOR_CST mask that implements the permutation of the
+ vector elements. If that is impossible to do, returns NULL. */
+
+tree
+vect_gen_perm_mask (tree vectype, unsigned char *sel)
+{
+ tree mask_elt_type, mask_type, mask_vec, *mask_elts;
+ int i, nunits;
+
+ nunits = TYPE_VECTOR_SUBPARTS (vectype);
+
+ if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
+ return NULL;
+
+ mask_elt_type = lang_hooks.types.type_for_mode
+ (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ mask_type = get_vectype_for_scalar_type (mask_elt_type);
+
+ mask_elts = XALLOCAVEC (tree, nunits);
+ for (i = nunits - 1; i >= 0; i--)
+ mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
+ mask_vec = build_vector (mask_type, mask_elts);
+
+ return mask_vec;
+}
+
+/* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
reversal of the vector elements. If that is impossible to do,
returns NULL. */
static tree
perm_mask_for_reverse (tree vectype)
{
- tree mask_elt_type, mask_type, mask_vec;
int i, nunits;
unsigned char *sel;
for (i = 0; i < nunits; ++i)
sel[i] = nunits - 1 - i;
- if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
- return NULL;
-
- mask_elt_type
- = lang_hooks.types.type_for_size
- (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (vectype))), 1);
- mask_type = get_vectype_for_scalar_type (mask_elt_type);
-
- mask_vec = NULL;
- for (i = 0; i < nunits; i++)
- mask_vec = tree_cons (NULL, build_int_cst (mask_elt_type, i), mask_vec);
- mask_vec = build_vector (mask_type, mask_vec);
-
- return mask_vec;
+ return vect_gen_perm_mask (vectype, sel);
}
-/* Given a vector variable X, that was generated for the scalar LHS of
- STMT, generate instructions to reverse the vector elements of X,
- insert them a *GSI and return the permuted vector variable. */
+/* Given a vector variable X and Y, that was generated for the scalar
+ STMT, generate instructions to permute the vector elements of X and Y
+ using permutation mask MASK_VEC, insert them at *GSI and return the
+ permuted vector variable. */
static tree
-reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
+permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
+ gimple_stmt_iterator *gsi)
{
tree vectype = TREE_TYPE (x);
- tree mask_vec, perm_dest, data_ref;
+ tree perm_dest, data_ref;
gimple perm_stmt;
- mask_vec = perm_mask_for_reverse (vectype);
-
perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
+ data_ref = make_ssa_name (perm_dest, NULL);
/* Generate the permute statement. */
- perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
- x, x, mask_vec);
- data_ref = make_ssa_name (perm_dest, perm_stmt);
- gimple_set_lhs (perm_stmt, data_ref);
+ perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, data_ref,
+ x, y, mask_vec);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
return data_ref;
tree realignment_token = NULL_TREE;
gimple phi = NULL;
VEC(tree,heap) *dr_chain = NULL;
- bool strided_load = false;
+ bool grouped_load = false;
bool load_lanes_p = false;
gimple first_stmt;
bool inv_p;
- bool negative;
+ bool negative = false;
bool compute_in_loop = false;
struct loop *at_loop;
int vec_num;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
int vf;
tree aggr_type;
+ tree gather_base = NULL_TREE, gather_off = NULL_TREE;
+ tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
+ tree stride_base, stride_step;
+ int gather_scale = 1;
+ enum vect_def_type gather_dt = vect_unknown_def_type;
if (loop_vinfo)
{
else
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
- gcc_assert (ncopies >= 1);
+ gcc_assert (ncopies >= 1);
+
+ /* FORNOW. This restriction should be relaxed. */
+ if (nested_in_vect_loop && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types in nested loop.");
+ return false;
+ }
+
+ if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
+ return false;
+
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ return false;
+
+ /* Is vectorizable load? */
+ if (!is_gimple_assign (stmt))
+ return false;
+
+ scalar_dest = gimple_assign_lhs (stmt);
+ if (TREE_CODE (scalar_dest) != SSA_NAME)
+ return false;
+
+ code = gimple_assign_rhs_code (stmt);
+ if (code != ARRAY_REF
+ && code != INDIRECT_REF
+ && code != COMPONENT_REF
+ && code != IMAGPART_EXPR
+ && code != REALPART_EXPR
+ && code != MEM_REF
+ && TREE_CODE_CLASS (code) != tcc_declaration)
+ return false;
+
+ if (!STMT_VINFO_DATA_REF (stmt_info))
+ return false;
+
+ elem_type = TREE_TYPE (vectype);
+ mode = TYPE_MODE (vectype);
+
+ /* FORNOW. In some cases can vectorize even if data-type not supported
+ (e.g. - data copies). */
+ if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Aligned load, but unsupported type.");
+ return false;
+ }
+
+ /* Check if the load is a part of an interleaving chain. */
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ {
+ grouped_load = true;
+ /* FORNOW */
+ gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
+
+ first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ if (!slp && !PURE_SLP_STMT (stmt_info))
+ {
+ group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ if (vect_load_lanes_supported (vectype, group_size))
+ load_lanes_p = true;
+ else if (!vect_grouped_load_supported (vectype, group_size))
+ return false;
+ }
+ }
+
+
+ if (STMT_VINFO_GATHER_P (stmt_info))
+ {
+ gimple def_stmt;
+ tree def;
+ gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
+ &gather_off, &gather_scale);
+ gcc_assert (gather_decl);
+ if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
+ &def_stmt, &def, &gather_dt,
+ &gather_off_vectype))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "gather index use not simple.");
+ return false;
+ }
+ }
+ else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ if (!vect_check_strided_load (stmt, loop_vinfo,
+ &stride_base, &stride_step))
+ return false;
+ }
+ else
+ {
+ negative = tree_int_cst_compare (nested_in_vect_loop
+ ? STMT_VINFO_DR_STEP (stmt_info)
+ : DR_STEP (dr),
+ size_zero_node) < 0;
+ if (negative && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types with negative step.");
+ return false;
+ }
+
+ if (negative)
+ {
+ gcc_assert (!grouped_load);
+ alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
+ if (alignment_support_scheme != dr_aligned
+ && alignment_support_scheme != dr_unaligned_supported)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "negative step but alignment required.");
+ return false;
+ }
+ if (!perm_mask_for_reverse (vectype))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "negative step and reversing not supported.");
+ return false;
+ }
+ }
+ }
+
+ if (!vec_stmt) /* transformation not required. */
+ {
+ STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
+ vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
+ return true;
+ }
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
+
+ /** Transform. **/
+
+ if (STMT_VINFO_GATHER_P (stmt_info))
+ {
+ tree vec_oprnd0 = NULL_TREE, op;
+ tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
+ tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
+ tree ptr, mask, var, scale, perm_mask = NULL_TREE, prev_res = NULL_TREE;
+ edge pe = loop_preheader_edge (loop);
+ gimple_seq seq;
+ basic_block new_bb;
+ enum { NARROW, NONE, WIDEN } modifier;
+ int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
+
+ if (nunits == gather_off_nunits)
+ modifier = NONE;
+ else if (nunits == gather_off_nunits / 2)
+ {
+ unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
+ modifier = WIDEN;
+
+ for (i = 0; i < gather_off_nunits; ++i)
+ sel[i] = i | nunits;
+
+ perm_mask = vect_gen_perm_mask (gather_off_vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ }
+ else if (nunits == gather_off_nunits * 2)
+ {
+ unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
+ modifier = NARROW;
+
+ for (i = 0; i < nunits; ++i)
+ sel[i] = i < gather_off_nunits
+ ? i : i + nunits - gather_off_nunits;
+
+ perm_mask = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask != NULL_TREE);
+ ncopies *= 2;
+ }
+ else
+ gcc_unreachable ();
+
+ rettype = TREE_TYPE (TREE_TYPE (gather_decl));
+ srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
+ scaletype = TREE_VALUE (arglist);
+ gcc_checking_assert (types_compatible_p (srctype, rettype)
+ && types_compatible_p (srctype, masktype));
+
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+
+ ptr = fold_convert (ptrtype, gather_base);
+ if (!is_gimple_min_invariant (ptr))
+ {
+ ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
+ gcc_assert (!new_bb);
+ }
+
+ /* Currently we support only unconditional gather loads,
+ so mask should be all ones. */
+ if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
+ mask = build_int_cst (TREE_TYPE (masktype), -1);
+ else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
+ {
+ REAL_VALUE_TYPE r;
+ long tmp[6];
+ for (j = 0; j < 6; ++j)
+ tmp[j] = -1;
+ real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
+ mask = build_real (TREE_TYPE (masktype), r);
+ }
+ else
+ gcc_unreachable ();
+ mask = build_vector_from_val (masktype, mask);
+ mask = vect_init_vector (stmt, mask, masktype, NULL);
+
+ scale = build_int_cst (scaletype, gather_scale);
+
+ prev_stmt_info = NULL;
+ for (j = 0; j < ncopies; ++j)
+ {
+ if (modifier == WIDEN && (j & 1))
+ op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
+ perm_mask, stmt, gsi);
+ else if (j == 0)
+ op = vec_oprnd0
+ = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
+ else
+ op = vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
+
+ if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
+ {
+ gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
+ == TYPE_VECTOR_SUBPARTS (idxtype));
+ var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
+ add_referenced_var (var);
+ var = make_ssa_name (var, NULL);
+ op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
+ new_stmt
+ = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
+ op, NULL_TREE);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ op = var;
+ }
+
+ new_stmt
+ = gimple_build_call (gather_decl, 5, mask, ptr, op, mask, scale);
+
+ if (!useless_type_conversion_p (vectype, rettype))
+ {
+ gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
+ == TYPE_VECTOR_SUBPARTS (rettype));
+ var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
+ add_referenced_var (var);
+ op = make_ssa_name (var, new_stmt);
+ gimple_call_set_lhs (new_stmt, op);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ var = make_ssa_name (vec_dest, NULL);
+ op = build1 (VIEW_CONVERT_EXPR, vectype, op);
+ new_stmt
+ = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
+ NULL_TREE);
+ }
+ else
+ {
+ var = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, var);
+ }
+
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ if (modifier == NARROW)
+ {
+ if ((j & 1) == 0)
+ {
+ prev_res = var;
+ continue;
+ }
+ var = permute_vec_elements (prev_res, var,
+ perm_mask, stmt, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (var);
+ }
+
+ if (prev_stmt_info == NULL)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ return true;
+ }
+ else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ gimple_stmt_iterator incr_gsi;
+ bool insert_after;
+ gimple incr;
+ tree offvar;
+ tree ref = DR_REF (dr);
+ tree ivstep;
+ tree running_off;
+ VEC(constructor_elt, gc) *v = NULL;
+ gimple_seq stmts = NULL;
+
+ gcc_assert (stride_base && stride_step);
- /* FORNOW. This restriction should be relaxed. */
- if (nested_in_vect_loop && ncopies > 1)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "multiple types in nested loop.");
- return false;
- }
+ /* For a load with loop-invariant (but other than power-of-2)
+ stride (i.e. not a grouped access) like so:
- if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
- return false;
+ for (i = 0; i < n; i += stride)
+ ... = array[i];
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
- return false;
+ we generate a new induction variable and new accesses to
+ form a new vector (or vectors, depending on ncopies):
- /* Is vectorizable load? */
- if (!is_gimple_assign (stmt))
- return false;
+ for (j = 0; ; j += VF*stride)
+ tmp1 = array[j];
+ tmp2 = array[j + stride];
+ ...
+ vectemp = {tmp1, tmp2, ...}
+ */
- scalar_dest = gimple_assign_lhs (stmt);
- if (TREE_CODE (scalar_dest) != SSA_NAME)
- return false;
+ ivstep = stride_step;
+ ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
+ build_int_cst (TREE_TYPE (ivstep), vf));
- code = gimple_assign_rhs_code (stmt);
- if (code != ARRAY_REF
- && code != INDIRECT_REF
- && code != COMPONENT_REF
- && code != IMAGPART_EXPR
- && code != REALPART_EXPR
- && code != MEM_REF
- && TREE_CODE_CLASS (code) != tcc_declaration)
- return false;
+ standard_iv_increment_position (loop, &incr_gsi, &insert_after);
- if (!STMT_VINFO_DATA_REF (stmt_info))
- return false;
+ create_iv (stride_base, ivstep, NULL,
+ loop, &incr_gsi, insert_after,
+ &offvar, NULL);
+ incr = gsi_stmt (incr_gsi);
+ set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
- negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
- if (negative && ncopies > 1)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "multiple types with negative step.");
- return false;
- }
+ stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
- elem_type = TREE_TYPE (vectype);
- mode = TYPE_MODE (vectype);
+ prev_stmt_info = NULL;
+ running_off = offvar;
+ for (j = 0; j < ncopies; j++)
+ {
+ tree vec_inv;
- /* FORNOW. In some cases can vectorize even if data-type not supported
- (e.g. - data copies). */
- if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "Aligned load, but unsupported type.");
- return false;
- }
+ v = VEC_alloc (constructor_elt, gc, nunits);
+ for (i = 0; i < nunits; i++)
+ {
+ tree newref, newoff;
+ gimple incr;
+ if (TREE_CODE (ref) == ARRAY_REF)
+ newref = build4 (ARRAY_REF, TREE_TYPE (ref),
+ unshare_expr (TREE_OPERAND (ref, 0)),
+ running_off,
+ NULL_TREE, NULL_TREE);
+ else
+ newref = build2 (MEM_REF, TREE_TYPE (ref),
+ running_off,
+ TREE_OPERAND (ref, 1));
+
+ newref = force_gimple_operand_gsi (gsi, newref, true,
+ NULL_TREE, true,
+ GSI_SAME_STMT);
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
+ newoff = SSA_NAME_VAR (running_off);
+ if (POINTER_TYPE_P (TREE_TYPE (newoff)))
+ incr = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, newoff,
+ running_off, stride_step);
+ else
+ incr = gimple_build_assign_with_ops (PLUS_EXPR, newoff,
+ running_off, stride_step);
+ newoff = make_ssa_name (newoff, incr);
+ gimple_assign_set_lhs (incr, newoff);
+ vect_finish_stmt_generation (stmt, incr, gsi);
- /* Check if the load is a part of an interleaving chain. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
- {
- strided_load = true;
- /* FORNOW */
- gcc_assert (! nested_in_vect_loop);
+ running_off = newoff;
+ }
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (!slp && !PURE_SLP_STMT (stmt_info))
- {
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
- if (vect_load_lanes_supported (vectype, group_size))
- load_lanes_p = true;
- else if (!vect_strided_load_supported (vectype, group_size))
- return false;
- }
- }
+ vec_inv = build_constructor (vectype, v);
+ new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (new_temp);
- if (negative)
- {
- gcc_assert (!strided_load);
- alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
- if (alignment_support_scheme != dr_aligned
- && alignment_support_scheme != dr_unaligned_supported)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "negative step but alignment required.");
- return false;
- }
- if (!perm_mask_for_reverse (vectype))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "negative step and reversing not supported.");
- return false;
+ if (j == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
}
- }
-
- if (!vec_stmt) /* transformation not required. */
- {
- STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
- vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
return true;
}
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
-
- /** Transform. **/
-
- if (strided_load)
+ if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
/* VEC_NUM is the number of vect stmts to be created for this group. */
if (slp)
{
- strided_load = false;
+ grouped_load = false;
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
slp_perm = true;
information we recorded in RELATED_STMT field is used to vectorize
stmt S2. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: x2 = &base + 2
S2: x0 = &base
Then permutation statements are generated:
- VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
- VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
+ VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
+ VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
...
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
corresponds to the order of scalar stmts in the interleaving chain - see
the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
- STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
+ STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
In case of both multiple types and interleaving, the vector loads and
permutation stmts above are created for every copy. The result vector
This can only occur when vectorizing memory accesses in the inner-loop
nested within an outer-loop that is being vectorized. */
- if (loop && nested_in_vect_loop_p (loop, stmt)
+ if (nested_in_vect_loop
&& (TREE_INT_CST_LOW (DR_STEP (dr))
% GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
{
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
TYPE_SIZE_UNIT (aggr_type));
- if (strided_load || slp_perm)
+ if (grouped_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
if (load_lanes_p)
new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
gimple_call_set_lhs (new_stmt, vec_array);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
/* Extract each vector into an SSA_NAME. */
for (i = 0; i < vec_num; i++)
}
/* Record the mapping between SSA_NAMEs and statements. */
- vect_record_strided_load_vectors (stmt, dr_chain);
+ vect_record_grouped_load_vectors (stmt, dr_chain);
}
else
{
case dr_aligned:
case dr_unaligned_supported:
{
- struct ptr_info_def *pi;
+ unsigned int align, misalign;
+
data_ref
= build2 (MEM_REF, vectype, dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (alignment_support_scheme == dr_aligned)
{
gcc_assert (aligned_access_p (first_dr));
- pi->misalign = 0;
+ misalign = 0;
}
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr),
+ align, misalign);
break;
}
case dr_explicit_realign:
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
/* 3. Handle explicit realignment if necessary/supported.
Create in loop:
/* 4. Handle invariant-load. */
if (inv_p && !bb_vinfo)
{
- tree tem, vec_inv;
gimple_stmt_iterator gsi2 = *gsi;
- gcc_assert (!strided_load);
+ gcc_assert (!grouped_load);
gsi_next (&gsi2);
- tem = scalar_dest;
- if (!useless_type_conversion_p (TREE_TYPE (vectype),
- TREE_TYPE (tem)))
- {
- tem = fold_convert (TREE_TYPE (vectype), tem);
- tem = force_gimple_operand_gsi (&gsi2, tem, true,
- NULL_TREE, true,
- GSI_SAME_STMT);
- }
- vec_inv = build_vector_from_val (vectype, tem);
- new_temp = vect_init_vector (stmt, vec_inv,
+ new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
if (negative)
{
- new_temp = reverse_vec_elements (new_temp, stmt, gsi);
+ tree perm_mask = perm_mask_for_reverse (vectype);
+ new_temp = permute_vec_elements (new_temp, new_temp,
+ perm_mask, stmt, gsi);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
/* Collect vector loads and later create their permutation in
- vect_transform_strided_load (). */
- if (strided_load || slp_perm)
+ vect_transform_grouped_load (). */
+ if (grouped_load || slp_perm)
VEC_quick_push (tree, dr_chain, new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
}
else
{
- if (strided_load)
+ if (grouped_load)
{
if (!load_lanes_p)
- vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
+ vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
else
condition operands are supportable using vec_is_simple_use. */
static bool
-vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo, tree *comp_vectype)
+vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
+ bb_vec_info bb_vinfo, tree *comp_vectype)
{
tree lhs, rhs;
tree def;
if (TREE_CODE (lhs) == SSA_NAME)
{
gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
- if (!vect_is_simple_use_1 (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
- &dt, &vectype1))
+ if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
+ &lhs_def_stmt, &def, &dt, &vectype1))
return false;
}
else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
if (TREE_CODE (rhs) == SSA_NAME)
{
gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
- if (!vect_is_simple_use_1 (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
- &dt, &vectype2))
+ if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
+ &rhs_def_stmt, &def, &dt, &vectype2))
return false;
}
- else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
+ else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
&& TREE_CODE (rhs) != FIXED_CST)
return false;
bool
vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
- gimple *vec_stmt, tree reduc_def, int reduc_index)
+ gimple *vec_stmt, tree reduc_def, int reduc_index,
+ slp_tree slp_node)
{
tree scalar_dest = NULL_TREE;
tree vec_dest = NULL_TREE;
tree cond_expr, then_clause, else_clause;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- tree comp_vectype;
+ tree comp_vectype = NULL_TREE;
tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
tree vec_compare, vec_cond_expr;
tree def;
enum vect_def_type dt, dts[4];
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
- int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
+ int ncopies;
enum tree_code code;
stmt_vec_info prev_stmt_info = NULL;
- int j;
-
- /* FORNOW: unsupported in basic block SLP. */
- gcc_assert (loop_vinfo);
+ int i, j;
+ bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
+ VEC (tree, heap) *vec_oprnds2 = NULL, *vec_oprnds3 = NULL;
- /* FORNOW: SLP not supported. */
- if (STMT_SLP_TYPE (stmt_info))
- return false;
+ if (slp_node || PURE_SLP_STMT (stmt_info))
+ ncopies = 1;
+ else
+ ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
gcc_assert (ncopies >= 1);
if (reduc_index && ncopies > 1)
return false; /* FORNOW */
- if (!STMT_VINFO_RELEVANT_P (stmt_info))
+ if (reduc_index && STMT_SLP_TYPE (stmt_info))
+ return false;
+
+ if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
then_clause = gimple_assign_rhs2 (stmt);
else_clause = gimple_assign_rhs3 (stmt);
- if (!vect_is_simple_cond (cond_expr, loop_vinfo, &comp_vectype)
+ if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
+ &comp_vectype)
|| !comp_vectype)
return false;
if (TREE_CODE (then_clause) == SSA_NAME)
{
gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
- if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
+ if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
&then_def_stmt, &def, &dt))
return false;
}
if (TREE_CODE (else_clause) == SSA_NAME)
{
gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
- if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
+ if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
&else_def_stmt, &def, &dt))
return false;
}
return expand_vec_cond_expr_p (vectype, comp_vectype);
}
- /* Transform */
+ /* Transform. */
+
+ if (!slp_node)
+ {
+ vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnds2 = VEC_alloc (tree, heap, 1);
+ vec_oprnds3 = VEC_alloc (tree, heap, 1);
+ }
/* Handle def. */
scalar_dest = gimple_assign_lhs (stmt);
/* Handle cond expr. */
for (j = 0; j < ncopies; j++)
{
- gimple new_stmt;
+ gimple new_stmt = NULL;
if (j == 0)
{
- gimple gtemp;
- vec_cond_lhs =
+ if (slp_node)
+ {
+ VEC (tree, heap) *ops = VEC_alloc (tree, heap, 4);
+ VEC (slp_void_p, heap) *vec_defs;
+
+ vec_defs = VEC_alloc (slp_void_p, heap, 4);
+ VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 0));
+ VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 1));
+ VEC_safe_push (tree, heap, ops, then_clause);
+ VEC_safe_push (tree, heap, ops, else_clause);
+ vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
+ vec_oprnds3 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
+ vec_oprnds2 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
+ vec_oprnds1 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
+ vec_oprnds0 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
+
+ VEC_free (tree, heap, ops);
+ VEC_free (slp_void_p, heap, vec_defs);
+ }
+ else
+ {
+ gimple gtemp;
+ vec_cond_lhs =
vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
stmt, NULL);
- vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
- NULL, >emp, &def, &dts[0]);
- vec_cond_rhs =
- vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
- stmt, NULL);
- vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
- NULL, >emp, &def, &dts[1]);
- if (reduc_index == 1)
- vec_then_clause = reduc_def;
- else
- {
- vec_then_clause = vect_get_vec_def_for_operand (then_clause,
- stmt, NULL);
- vect_is_simple_use (then_clause, loop_vinfo,
- NULL, >emp, &def, &dts[2]);
- }
- if (reduc_index == 2)
- vec_else_clause = reduc_def;
- else
- {
- vec_else_clause = vect_get_vec_def_for_operand (else_clause,
+ vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
+ loop_vinfo, NULL, >emp, &def, &dts[0]);
+
+ vec_cond_rhs =
+ vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
+ stmt, NULL);
+ vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
+ loop_vinfo, NULL, >emp, &def, &dts[1]);
+ if (reduc_index == 1)
+ vec_then_clause = reduc_def;
+ else
+ {
+ vec_then_clause = vect_get_vec_def_for_operand (then_clause,
+ stmt, NULL);
+ vect_is_simple_use (then_clause, stmt, loop_vinfo,
+ NULL, >emp, &def, &dts[2]);
+ }
+ if (reduc_index == 2)
+ vec_else_clause = reduc_def;
+ else
+ {
+ vec_else_clause = vect_get_vec_def_for_operand (else_clause,
stmt, NULL);
- vect_is_simple_use (else_clause, loop_vinfo,
+ vect_is_simple_use (else_clause, stmt, loop_vinfo,
NULL, >emp, &def, &dts[3]);
+ }
}
}
else
{
- vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
- vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
+ vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
+ VEC_pop (tree, vec_oprnds0));
+ vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
+ VEC_pop (tree, vec_oprnds1));
vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
- vec_then_clause);
+ VEC_pop (tree, vec_oprnds2));
vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
- vec_else_clause);
+ VEC_pop (tree, vec_oprnds3));
+ }
+
+ if (!slp_node)
+ {
+ VEC_quick_push (tree, vec_oprnds0, vec_cond_lhs);
+ VEC_quick_push (tree, vec_oprnds1, vec_cond_rhs);
+ VEC_quick_push (tree, vec_oprnds2, vec_then_clause);
+ VEC_quick_push (tree, vec_oprnds3, vec_else_clause);
}
/* Arguments are ready. Create the new vector stmt. */
- vec_compare = build2 (TREE_CODE (cond_expr), vectype,
- vec_cond_lhs, vec_cond_rhs);
- vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
- vec_compare, vec_then_clause, vec_else_clause);
+ FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_cond_lhs)
+ {
+ vec_cond_rhs = VEC_index (tree, vec_oprnds1, i);
+ vec_then_clause = VEC_index (tree, vec_oprnds2, i);
+ vec_else_clause = VEC_index (tree, vec_oprnds3, i);
- new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- if (j == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ vec_compare = build2 (TREE_CODE (cond_expr), vectype,
+ vec_cond_lhs, vec_cond_rhs);
+ vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
+ vec_compare, vec_then_clause, vec_else_clause);
- prev_stmt_info = vinfo_for_stmt (new_stmt);
+ new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ if (slp_node)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ }
+
+ if (slp_node)
+ continue;
+
+ if (j == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
}
+ VEC_free (tree, heap, vec_oprnds0);
+ VEC_free (tree, heap, vec_oprnds1);
+ VEC_free (tree, heap, vec_oprnds2);
+ VEC_free (tree, heap, vec_oprnds3);
+
return true;
}
enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
bool ok;
tree scalar_type, vectype;
- gimple pattern_stmt, pattern_def_stmt;
+ gimple pattern_stmt;
+ gimple_seq pattern_def_seq;
if (vect_print_dump_info (REPORT_DETAILS))
{
}
if (is_pattern_stmt_p (stmt_info)
- && (pattern_def_stmt = STMT_VINFO_PATTERN_DEF_STMT (stmt_info))
- && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
- || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt))))
+ && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
{
- /* Analyze def stmt of STMT if it's a pattern stmt. */
- if (vect_print_dump_info (REPORT_DETAILS))
- {
- fprintf (vect_dump, "==> examining pattern def statement: ");
- print_gimple_stmt (vect_dump, pattern_def_stmt, 0, TDF_SLIM);
- }
+ gimple_stmt_iterator si;
- if (!vect_analyze_stmt (pattern_def_stmt, need_to_vectorize, node))
- return false;
- }
+ for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
+ {
+ gimple pattern_def_stmt = gsi_stmt (si);
+ if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
+ || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
+ {
+ /* Analyze def stmt of STMT if it's a pattern stmt. */
+ if (vect_print_dump_info (REPORT_DETAILS))
+ {
+ fprintf (vect_dump, "==> examining pattern def statement: ");
+ print_gimple_stmt (vect_dump, pattern_def_stmt, 0, TDF_SLIM);
+ }
+ if (!vect_analyze_stmt (pattern_def_stmt,
+ need_to_vectorize, node))
+ return false;
+ }
+ }
+ }
switch (STMT_VINFO_DEF_TYPE (stmt_info))
{
if (!bb_vinfo
&& (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
- ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
- || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
- || vectorizable_conversion (stmt, NULL, NULL, NULL)
+ ok = (vectorizable_conversion (stmt, NULL, NULL, NULL)
|| vectorizable_shift (stmt, NULL, NULL, NULL)
|| vectorizable_operation (stmt, NULL, NULL, NULL)
|| vectorizable_assignment (stmt, NULL, NULL, NULL)
|| vectorizable_load (stmt, NULL, NULL, NULL, NULL)
- || vectorizable_call (stmt, NULL, NULL)
+ || vectorizable_call (stmt, NULL, NULL, NULL)
|| vectorizable_store (stmt, NULL, NULL, NULL)
|| vectorizable_reduction (stmt, NULL, NULL, NULL)
- || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
+ || vectorizable_condition (stmt, NULL, NULL, NULL, 0, NULL));
else
{
if (bb_vinfo)
- ok = (vectorizable_type_promotion (stmt, NULL, NULL, node)
- || vectorizable_type_demotion (stmt, NULL, NULL, node)
- || vectorizable_shift (stmt, NULL, NULL, node)
+ ok = (vectorizable_conversion (stmt, NULL, NULL, node)
+ || vectorizable_shift (stmt, NULL, NULL, node)
|| vectorizable_operation (stmt, NULL, NULL, node)
|| vectorizable_assignment (stmt, NULL, NULL, node)
|| vectorizable_load (stmt, NULL, NULL, node, NULL)
- || vectorizable_store (stmt, NULL, NULL, node));
+ || vectorizable_call (stmt, NULL, NULL, node)
+ || vectorizable_store (stmt, NULL, NULL, node)
+ || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
}
if (!ok)
bool
vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
- bool *strided_store, slp_tree slp_node,
+ bool *grouped_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
bool is_store = false;
switch (STMT_VINFO_TYPE (stmt_info))
{
case type_demotion_vec_info_type:
- done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
- gcc_assert (done);
- break;
-
case type_promotion_vec_info_type:
- done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
- gcc_assert (done);
- break;
-
case type_conversion_vec_info_type:
done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
- *strided_store = true;
+ *grouped_store = true;
if (STMT_VINFO_VEC_STMT (stmt_info))
is_store = true;
}
break;
case condition_vec_info_type:
- gcc_assert (!slp_node);
- done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
+ done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
gcc_assert (done);
break;
case call_vec_info_type:
- gcc_assert (!slp_node);
- done = vectorizable_call (stmt, gsi, &vec_stmt);
+ done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
stmt = gsi_stmt (*gsi);
break;
while (next)
{
+ stmt_vec_info stmt_info = vinfo_for_stmt (next);
+
+ tmp = GROUP_NEXT_ELEMENT (stmt_info);
+ if (is_pattern_stmt_p (stmt_info))
+ next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */
next_si = gsi_for_stmt (next);
+ unlink_stmt_vdef (next);
gsi_remove (&next_si, true);
- tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ release_defs (next);
free_stmt_vec_info (next);
next = tmp;
}
STMT_VINFO_VECTORIZABLE (res) = true;
STMT_VINFO_IN_PATTERN_P (res) = false;
STMT_VINFO_RELATED_STMT (res) = NULL;
- STMT_VINFO_PATTERN_DEF_STMT (res) = NULL;
+ STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
STMT_VINFO_DATA_REF (res) = NULL;
STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
if (!stmt_info)
return;
+ /* Check if this statement has a related "pattern stmt"
+ (introduced by the vectorizer during the pattern recognition
+ pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
+ too. */
+ if (STMT_VINFO_IN_PATTERN_P (stmt_info))
+ {
+ stmt_vec_info patt_info
+ = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
+ if (patt_info)
+ {
+ gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
+ if (seq)
+ {
+ gimple_stmt_iterator si;
+ for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
+ free_stmt_vec_info (gsi_stmt (si));
+ }
+ free_stmt_vec_info (STMT_VINFO_RELATED_STMT (stmt_info));
+ }
+ }
+
VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
set_vinfo_for_stmt (stmt, NULL);
free (stmt_info);
if (nbytes == 0)
return NULL_TREE;
+ if (GET_MODE_CLASS (inner_mode) != MODE_INT
+ && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
+ return NULL_TREE;
+
/* We can't build a vector type of elements with alignment bigger than
their size. */
if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
/* For vector types of elements whose mode precision doesn't
match their types precision we use a element type of mode
precision. The vectorization routines will have to make sure
- they support the proper result truncation/extension. */
+ they support the proper result truncation/extension.
+ We also make sure to build vector types with INTEGER_TYPE
+ component type only. */
if (INTEGRAL_TYPE_P (scalar_type)
- && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
+ && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
+ || TREE_CODE (scalar_type) != INTEGER_TYPE))
scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
TYPE_UNSIGNED (scalar_type));
- if (GET_MODE_CLASS (inner_mode) != MODE_INT
- && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
- return NULL_TREE;
-
/* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
When the component mode passes the above test simply use a type
corresponding to that mode. The theory is that any use that
Input:
LOOP_VINFO - the vect info of the loop that is being vectorized.
BB_VINFO - the vect info of the basic block that is being vectorized.
- OPERAND - operand of a stmt in the loop or bb.
+ OPERAND - operand of STMT in the loop or bb.
DEF - the defining stmt in case OPERAND is an SSA_NAME.
Returns whether a stmt with OPERAND can be vectorized.
For now, operands defined outside the basic block are not supported. */
bool
-vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
+vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo, gimple *def_stmt,
tree *def, enum vect_def_type *dt)
{
print_generic_expr (vect_dump, operand, TDF_SLIM);
}
- if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
+ if (CONSTANT_CLASS_P (operand))
{
*dt = vect_constant_def;
return true;
*dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
}
- if (*dt == vect_unknown_def_type)
+ if (*dt == vect_unknown_def_type
+ || (stmt
+ && *dt == vect_double_reduction_def
+ && gimple_code (stmt) != GIMPLE_PHI))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Unsupported pattern.");
scalar operand. */
bool
-vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
+vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo, gimple *def_stmt,
tree *def, enum vect_def_type *dt, tree *vectype)
{
- if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
+ if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
+ def, dt))
return false;
/* Now get a vector type if the def is internal, otherwise supply
tree vectype = vectype_in;
tree wide_vectype = vectype_out;
enum tree_code c1, c2;
+ int i;
+ tree prev_type, intermediate_type;
+ enum machine_mode intermediate_mode, prev_mode;
+ optab optab3, optab4;
+ *multi_step_cvt = 0;
if (loop_info)
vect_loop = LOOP_VINFO_LOOP (loop_info);
/* The result of a vectorized widening operation usually requires two vectors
- (because the widened results do not fit int one vector). The generated
+ (because the widened results do not fit into one vector). The generated
vector results would normally be expected to be generated in the same
order as in the original scalar computation, i.e. if 8 results are
generated in each vector iteration, they are to be organized as follows:
switch (code)
{
case WIDEN_MULT_EXPR:
- if (BYTES_BIG_ENDIAN)
- {
- c1 = VEC_WIDEN_MULT_HI_EXPR;
- c2 = VEC_WIDEN_MULT_LO_EXPR;
- }
- else
- {
- c2 = VEC_WIDEN_MULT_HI_EXPR;
- c1 = VEC_WIDEN_MULT_LO_EXPR;
- }
+ c1 = VEC_WIDEN_MULT_LO_EXPR;
+ c2 = VEC_WIDEN_MULT_HI_EXPR;
break;
case WIDEN_LSHIFT_EXPR:
- if (BYTES_BIG_ENDIAN)
- {
- c1 = VEC_WIDEN_LSHIFT_HI_EXPR;
- c2 = VEC_WIDEN_LSHIFT_LO_EXPR;
- }
- else
- {
- c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
- c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
- }
+ c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
+ c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
break;
CASE_CONVERT:
- if (BYTES_BIG_ENDIAN)
- {
- c1 = VEC_UNPACK_HI_EXPR;
- c2 = VEC_UNPACK_LO_EXPR;
- }
- else
- {
- c2 = VEC_UNPACK_HI_EXPR;
- c1 = VEC_UNPACK_LO_EXPR;
- }
+ c1 = VEC_UNPACK_LO_EXPR;
+ c2 = VEC_UNPACK_HI_EXPR;
break;
case FLOAT_EXPR:
- if (BYTES_BIG_ENDIAN)
- {
- c1 = VEC_UNPACK_FLOAT_HI_EXPR;
- c2 = VEC_UNPACK_FLOAT_LO_EXPR;
- }
- else
- {
- c2 = VEC_UNPACK_FLOAT_HI_EXPR;
- c1 = VEC_UNPACK_FLOAT_LO_EXPR;
- }
+ c1 = VEC_UNPACK_FLOAT_LO_EXPR;
+ c2 = VEC_UNPACK_FLOAT_HI_EXPR;
break;
case FIX_TRUNC_EXPR:
gcc_unreachable ();
}
+ if (BYTES_BIG_ENDIAN)
+ {
+ enum tree_code ctmp = c1;
+ c1 = c2;
+ c2 = ctmp;
+ }
+
if (code == FIX_TRUNC_EXPR)
{
/* The signedness is determined from output operand. */
|| (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
return false;
+ *code1 = c1;
+ *code2 = c2;
+
+ if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
+ && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
+ return true;
+
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
- if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
- || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
- {
- int i;
- tree prev_type = vectype, intermediate_type;
- enum machine_mode intermediate_mode, prev_mode = vec_mode;
- optab optab3, optab4;
- if (!CONVERT_EXPR_CODE_P (code))
- return false;
+ prev_type = vectype;
+ prev_mode = vec_mode;
- *code1 = c1;
- *code2 = c2;
+ if (!CONVERT_EXPR_CODE_P (code))
+ return false;
- /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
- intermediate steps in promotion sequence. We try
- MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
- not. */
- *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
- for (i = 0; i < 3; i++)
- {
- intermediate_mode = insn_data[icode1].operand[0].mode;
- intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
- TYPE_UNSIGNED (prev_type));
- optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
- optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
-
- if (!optab3 || !optab4
- || ((icode1 = optab_handler (optab1, prev_mode))
- == CODE_FOR_nothing)
- || insn_data[icode1].operand[0].mode != intermediate_mode
- || ((icode2 = optab_handler (optab2, prev_mode))
- == CODE_FOR_nothing)
- || insn_data[icode2].operand[0].mode != intermediate_mode
- || ((icode1 = optab_handler (optab3, intermediate_mode))
- == CODE_FOR_nothing)
- || ((icode2 = optab_handler (optab4, intermediate_mode))
- == CODE_FOR_nothing))
- return false;
-
- VEC_quick_push (tree, *interm_types, intermediate_type);
- (*multi_step_cvt)++;
-
- if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
- && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
- return true;
-
- prev_type = intermediate_type;
- prev_mode = intermediate_mode;
- }
+ /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
+ intermediate steps in promotion sequence. We try
+ MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
+ not. */
+ *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
+ for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
+ {
+ intermediate_mode = insn_data[icode1].operand[0].mode;
+ intermediate_type
+ = lang_hooks.types.type_for_mode (intermediate_mode,
+ TYPE_UNSIGNED (prev_type));
+ optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
+ optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
+
+ if (!optab3 || !optab4
+ || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
+ || insn_data[icode1].operand[0].mode != intermediate_mode
+ || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
+ || insn_data[icode2].operand[0].mode != intermediate_mode
+ || ((icode1 = optab_handler (optab3, intermediate_mode))
+ == CODE_FOR_nothing)
+ || ((icode2 = optab_handler (optab4, intermediate_mode))
+ == CODE_FOR_nothing))
+ break;
- return false;
+ VEC_quick_push (tree, *interm_types, intermediate_type);
+ (*multi_step_cvt)++;
+
+ if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
+ && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
+ return true;
+
+ prev_type = intermediate_type;
+ prev_mode = intermediate_mode;
}
- *code1 = c1;
- *code2 = c2;
- return true;
+ VEC_free (tree, heap, *interm_types);
+ return false;
}
tree vectype = vectype_in;
tree narrow_vectype = vectype_out;
enum tree_code c1;
- tree intermediate_type, prev_type;
+ tree intermediate_type;
+ enum machine_mode intermediate_mode, prev_mode;
int i;
+ bool uns;
+ *multi_step_cvt = 0;
switch (code)
{
CASE_CONVERT:
if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
return false;
+ *code1 = c1;
+
+ if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
+ return true;
+
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
- if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
- {
- enum machine_mode intermediate_mode, prev_mode = vec_mode;
-
- *code1 = c1;
- prev_type = vectype;
- /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
- intermediate steps in promotion sequence. We try
- MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
- not. */
- *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
- for (i = 0; i < 3; i++)
- {
- intermediate_mode = insn_data[icode1].operand[0].mode;
- intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
- TYPE_UNSIGNED (prev_type));
- interm_optab = optab_for_tree_code (c1, intermediate_type,
- optab_default);
- if (!interm_optab
- || ((icode1 = optab_handler (optab1, prev_mode))
- == CODE_FOR_nothing)
- || insn_data[icode1].operand[0].mode != intermediate_mode
- || ((icode1 = optab_handler (interm_optab, intermediate_mode))
- == CODE_FOR_nothing))
- return false;
-
- VEC_quick_push (tree, *interm_types, intermediate_type);
- (*multi_step_cvt)++;
-
- if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
- return true;
-
- prev_type = intermediate_type;
- prev_mode = intermediate_mode;
- }
+ prev_mode = vec_mode;
+ if (code == FIX_TRUNC_EXPR)
+ uns = TYPE_UNSIGNED (vectype_out);
+ else
+ uns = TYPE_UNSIGNED (vectype);
+
+ /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
+ conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
+ costly than signed. */
+ if (code == FIX_TRUNC_EXPR && uns)
+ {
+ enum insn_code icode2;
+
+ intermediate_type
+ = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
+ interm_optab
+ = optab_for_tree_code (c1, intermediate_type, optab_default);
+ if (interm_optab != NULL
+ && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
+ && insn_data[icode1].operand[0].mode
+ == insn_data[icode2].operand[0].mode)
+ {
+ uns = false;
+ optab1 = interm_optab;
+ icode1 = icode2;
+ }
+ }
- return false;
+ /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
+ intermediate steps in promotion sequence. We try
+ MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
+ *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
+ for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
+ {
+ intermediate_mode = insn_data[icode1].operand[0].mode;
+ intermediate_type
+ = lang_hooks.types.type_for_mode (intermediate_mode, uns);
+ interm_optab
+ = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
+ optab_default);
+ if (!interm_optab
+ || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
+ || insn_data[icode1].operand[0].mode != intermediate_mode
+ || ((icode1 = optab_handler (interm_optab, intermediate_mode))
+ == CODE_FOR_nothing))
+ break;
+
+ VEC_quick_push (tree, *interm_types, intermediate_type);
+ (*multi_step_cvt)++;
+
+ if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
+ return true;
+
+ prev_mode = intermediate_mode;
+ optab1 = interm_optab;
}
- *code1 = c1;
- return true;
+ VEC_free (tree, heap, *interm_types);
+ return false;
}