vect_name = make_ssa_name (vect, new_stmt);
gimple_assign_set_lhs (new_stmt, vect_name);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
return vect_name;
}
new_stmt = gimple_build_assign (array_ref, vect);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
}
/* PTR is a pointer to an array of type TYPE. Return a representation
static tree
create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
{
- struct ptr_info_def *pi;
tree mem_ref, alias_ptr_type;
alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
/* Arrays have the same alignment as their type. */
- pi = get_ptr_info (ptr);
- pi->align = TYPE_ALIGN_UNIT (type);
- pi->misalign = 0;
+ set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
return mem_ref;
}
stmt_vinfo_set_outside_of_loop_cost (stmt_info, NULL, outside_cost);
}
-/* Function vect_cost_strided_group_size
+/* Function vect_cost_group_size
- For strided load or store, return the group_size only if it is the first
+ For grouped load or store, return the group_size only if it is the first
load or store of a group, else return 1. This ensures that group size is
only returned once per group. */
static int
-vect_cost_strided_group_size (stmt_vec_info stmt_info)
+vect_cost_group_size (stmt_vec_info stmt_info)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
/* Function vect_model_store_cost
- Models cost for stores. In the case of strided accesses, one access
- has the overhead of the strided access attributed to it. */
+ Models cost for stores. In the case of grouped accesses, one access
+ has the overhead of the grouped access attributed to it. */
void
vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dt == vect_constant_def || dt == vect_external_def)
outside_cost = vect_get_stmt_cost (scalar_to_vec);
- /* Strided access? */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ /* Grouped access? */
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
if (slp_node)
{
else
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
}
/* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate stores. If a strided
+ equivalent to the cost of GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */
if (!store_lanes_p && group_size > 1)
/* Function vect_model_load_cost
- Models cost for loads. In the case of strided accesses, the last access
- has the overhead of the strided access attributed to it. Since unaligned
+ Models cost for loads. In the case of grouped accesses, the last access
+ has the overhead of the grouped access attributed to it. Since unaligned
accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
if (PURE_SLP_STMT (stmt_info))
return;
- /* Strided accesses? */
+ /* Grouped accesses? */
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
{
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
}
/* We assume that the cost of a single load-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate loads. If a strided
+ equivalent to the cost of GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (!load_lanes_p && group_size > 1)
}
/* The loads themselves. */
- vect_get_load_cost (first_dr, ncopies,
- ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
- || slp_node),
- &inside_cost, &outside_cost);
+ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ /* N scalar loads plus gathering them into a vector.
+ ??? scalar_to_vec isn't the cost for that. */
+ inside_cost += (vect_get_stmt_cost (scalar_load) * ncopies
+ * TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)));
+ inside_cost += ncopies * vect_get_stmt_cost (scalar_to_vec);
+ }
+ else
+ vect_get_load_cost (first_dr, ncopies,
+ ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ || group_size > 1 || slp_node),
+ &inside_cost, &outside_cost);
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
/* Unaligned software pipeline has a load of an address, an initial
load, and possibly a mask operation to "prime" the loop. However,
- if this is an access in a group of loads, which provide strided
+ if this is an access in a group of loads, which provide grouped
access, then the above cost should only be considered for one
access in the group. Inside the loop, there is a load op
and a realignment op. */
static void
vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
{
-
if (gsi)
vect_finish_stmt_generation (stmt, new_stmt, gsi);
else
/* Function vect_init_vector.
- Insert a new stmt (INIT_STMT) that initializes a new vector variable with
- the vector elements of VECTOR_VAR. Place the initialization at BSI if it
- is not NULL. Otherwise, place the initialization at the loop preheader.
+ Insert a new stmt (INIT_STMT) that initializes a new variable of type
+ TYPE with the value VAL. If TYPE is a vector type and VAL does not have
+ vector type a vector with all elements equal to VAL is created first.
+ Place the initialization at BSI if it is not NULL. Otherwise, place the
+ initialization at the loop preheader.
Return the DEF of INIT_STMT.
It will be used in the vectorization of STMT. */
tree
-vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
- gimple_stmt_iterator *gsi)
+vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
{
tree new_var;
gimple init_stmt;
tree vec_oprnd;
tree new_temp;
- if (TREE_CODE (TREE_TYPE (vector_var)) != VECTOR_TYPE)
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
{
- if (!types_compatible_p (TREE_TYPE (vector_type),
- TREE_TYPE (vector_var)))
+ if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
{
- if (CONSTANT_CLASS_P (vector_var))
- vector_var = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
- vector_var);
+ if (CONSTANT_CLASS_P (val))
+ val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
else
{
- new_var = create_tmp_reg (TREE_TYPE (vector_type), NULL);
+ new_var = create_tmp_reg (TREE_TYPE (type), NULL);
add_referenced_var (new_var);
init_stmt = gimple_build_assign_with_ops (NOP_EXPR,
- new_var, vector_var,
+ new_var, val,
NULL_TREE);
new_temp = make_ssa_name (new_var, init_stmt);
gimple_assign_set_lhs (init_stmt, new_temp);
vect_init_vector_1 (stmt, init_stmt, gsi);
- vector_var = new_temp;
+ val = new_temp;
}
}
- vector_var = build_vector_from_val (vector_type, vector_var);
+ val = build_vector_from_val (type, val);
}
- new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
+ new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
add_referenced_var (new_var);
- init_stmt = gimple_build_assign (new_var, vector_var);
+ init_stmt = gimple_build_assign (new_var, val);
new_temp = make_ssa_name (new_var, init_stmt);
gimple_assign_set_lhs (init_stmt, new_temp);
vect_init_vector_1 (stmt, init_stmt, gsi);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
new_stmt);
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
-
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
new_stmt);
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
-
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
int ncopies;
int j;
gimple next_stmt, first_stmt = NULL;
- bool strided_store = false;
+ bool grouped_store = false;
bool store_lanes_p = false;
unsigned int group_size, i;
VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
return false;
}
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_store = true;
+ grouped_store = true;
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (!slp && !PURE_SLP_STMT (stmt_info))
{
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_store_lanes_supported (vectype, group_size))
store_lanes_p = true;
- else if (!vect_strided_store_supported (vectype, group_size))
+ else if (!vect_grouped_store_supported (vectype, group_size))
return false;
}
/** Transform. **/
- if (strided_store)
+ if (grouped_store)
{
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (slp)
{
- strided_store = false;
+ grouped_store = false;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
vector stmt by a factor VF/nunits. For more details see documentation in
vect_get_vec_def_for_copy_stmt. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: &base + 2 = x2
S2: &base = x0
used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
gimple_call_set_lhs (new_stmt, data_ref);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
}
else
{
new_stmt = NULL;
- if (strided_store)
+ if (grouped_store)
{
result_chain = VEC_alloc (tree, heap, group_size);
/* Permute. */
next_stmt = first_stmt;
for (i = 0; i < vec_num; i++)
{
- struct ptr_info_def *pi;
+ unsigned align, misalign;
if (i > 0)
/* Bump the vector pointer. */
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
- else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
+ else if (grouped_store)
+ /* For grouped stores vectorized defs are interleaved in
vect_permute_store_chain(). */
vec_oprnd = VEC_index (tree, result_chain, i);
data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (aligned_access_p (first_dr))
- pi->misalign = 0;
+ misalign = 0;
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
+ misalign);
/* Arguments are ready. Create the new vector stmt. */
new_stmt = gimple_build_assign (data_ref, vec_oprnd);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
if (slp)
continue;
tree realignment_token = NULL_TREE;
gimple phi = NULL;
VEC(tree,heap) *dr_chain = NULL;
- bool strided_load = false;
+ bool grouped_load = false;
bool load_lanes_p = false;
gimple first_stmt;
bool inv_p;
- bool negative;
+ bool negative = false;
bool compute_in_loop = false;
struct loop *at_loop;
int vec_num;
tree aggr_type;
tree gather_base = NULL_TREE, gather_off = NULL_TREE;
tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
+ tree stride_base, stride_step;
int gather_scale = 1;
enum vect_def_type gather_dt = vect_unknown_def_type;
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
- negative = tree_int_cst_compare (nested_in_vect_loop
- ? STMT_VINFO_DR_STEP (stmt_info)
- : DR_STEP (dr),
- size_zero_node) < 0;
- if (negative && ncopies > 1)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "multiple types with negative step.");
- return false;
- }
-
elem_type = TREE_TYPE (vectype);
mode = TYPE_MODE (vectype);
}
/* Check if the load is a part of an interleaving chain. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_load = true;
+ grouped_load = true;
/* FORNOW */
gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_load_lanes_supported (vectype, group_size))
load_lanes_p = true;
- else if (!vect_strided_load_supported (vectype, group_size))
+ else if (!vect_grouped_load_supported (vectype, group_size))
return false;
}
}
- if (negative)
- {
- gcc_assert (!strided_load && !STMT_VINFO_GATHER_P (stmt_info));
- alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
- if (alignment_support_scheme != dr_aligned
- && alignment_support_scheme != dr_unaligned_supported)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "negative step but alignment required.");
- return false;
- }
- if (!perm_mask_for_reverse (vectype))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "negative step and reversing not supported.");
- return false;
- }
- }
if (STMT_VINFO_GATHER_P (stmt_info))
{
return false;
}
}
+ else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ if (!vect_check_strided_load (stmt, loop_vinfo,
+ &stride_base, &stride_step))
+ return false;
+ }
+ else
+ {
+ negative = tree_int_cst_compare (nested_in_vect_loop
+ ? STMT_VINFO_DR_STEP (stmt_info)
+ : DR_STEP (dr),
+ size_zero_node) < 0;
+ if (negative && ncopies > 1)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "multiple types with negative step.");
+ return false;
+ }
+
+ if (negative)
+ {
+ gcc_assert (!grouped_load);
+ alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
+ if (alignment_support_scheme != dr_aligned
+ && alignment_support_scheme != dr_unaligned_supported)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "negative step but alignment required.");
+ return false;
+ }
+ if (!perm_mask_for_reverse (vectype))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "negative step and reversing not supported.");
+ return false;
+ }
+ }
+ }
if (!vec_stmt) /* transformation not required. */
{
}
return true;
}
+ else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ {
+ gimple_stmt_iterator incr_gsi;
+ bool insert_after;
+ gimple incr;
+ tree offvar;
+ tree ref = DR_REF (dr);
+ tree ivstep;
+ tree running_off;
+ VEC(constructor_elt, gc) *v = NULL;
+ gimple_seq stmts = NULL;
+
+ gcc_assert (stride_base && stride_step);
+
+ /* For a load with loop-invariant (but other than power-of-2)
+ stride (i.e. not a grouped access) like so:
+
+ for (i = 0; i < n; i += stride)
+ ... = array[i];
- if (strided_load)
+ we generate a new induction variable and new accesses to
+ form a new vector (or vectors, depending on ncopies):
+
+ for (j = 0; ; j += VF*stride)
+ tmp1 = array[j];
+ tmp2 = array[j + stride];
+ ...
+ vectemp = {tmp1, tmp2, ...}
+ */
+
+ ivstep = stride_step;
+ ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
+ build_int_cst (TREE_TYPE (ivstep), vf));
+
+ standard_iv_increment_position (loop, &incr_gsi, &insert_after);
+
+ create_iv (stride_base, ivstep, NULL,
+ loop, &incr_gsi, insert_after,
+ &offvar, NULL);
+ incr = gsi_stmt (incr_gsi);
+ set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
+
+ stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
+
+ prev_stmt_info = NULL;
+ running_off = offvar;
+ for (j = 0; j < ncopies; j++)
+ {
+ tree vec_inv;
+
+ v = VEC_alloc (constructor_elt, gc, nunits);
+ for (i = 0; i < nunits; i++)
+ {
+ tree newref, newoff;
+ gimple incr;
+ if (TREE_CODE (ref) == ARRAY_REF)
+ newref = build4 (ARRAY_REF, TREE_TYPE (ref),
+ unshare_expr (TREE_OPERAND (ref, 0)),
+ running_off,
+ NULL_TREE, NULL_TREE);
+ else
+ newref = build2 (MEM_REF, TREE_TYPE (ref),
+ running_off,
+ TREE_OPERAND (ref, 1));
+
+ newref = force_gimple_operand_gsi (gsi, newref, true,
+ NULL_TREE, true,
+ GSI_SAME_STMT);
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
+ newoff = SSA_NAME_VAR (running_off);
+ if (POINTER_TYPE_P (TREE_TYPE (newoff)))
+ incr = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, newoff,
+ running_off, stride_step);
+ else
+ incr = gimple_build_assign_with_ops (PLUS_EXPR, newoff,
+ running_off, stride_step);
+ newoff = make_ssa_name (newoff, incr);
+ gimple_assign_set_lhs (incr, newoff);
+ vect_finish_stmt_generation (stmt, incr, gsi);
+
+ running_off = newoff;
+ }
+
+ vec_inv = build_constructor (vectype, v);
+ new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (new_temp);
+
+ if (j == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ }
+ return true;
+ }
+
+ if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
/* VEC_NUM is the number of vect stmts to be created for this group. */
if (slp)
{
- strided_load = false;
+ grouped_load = false;
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
slp_perm = true;
information we recorded in RELATED_STMT field is used to vectorize
stmt S2. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: x2 = &base + 2
S2: x0 = &base
corresponds to the order of scalar stmts in the interleaving chain - see
the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
- STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
+ STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
In case of both multiple types and interleaving, the vector loads and
permutation stmts above are created for every copy. The result vector
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
TYPE_SIZE_UNIT (aggr_type));
- if (strided_load || slp_perm)
+ if (grouped_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
if (load_lanes_p)
new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
gimple_call_set_lhs (new_stmt, vec_array);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
/* Extract each vector into an SSA_NAME. */
for (i = 0; i < vec_num; i++)
}
/* Record the mapping between SSA_NAMEs and statements. */
- vect_record_strided_load_vectors (stmt, dr_chain);
+ vect_record_grouped_load_vectors (stmt, dr_chain);
}
else
{
case dr_aligned:
case dr_unaligned_supported:
{
- struct ptr_info_def *pi;
+ unsigned int align, misalign;
+
data_ref
= build2 (MEM_REF, vectype, dataref_ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
+ align = TYPE_ALIGN_UNIT (vectype);
if (alignment_support_scheme == dr_aligned)
{
gcc_assert (aligned_access_p (first_dr));
- pi->misalign = 0;
+ misalign = 0;
}
else if (DR_MISALIGNMENT (first_dr) == -1)
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->align = TYPE_ALIGN_UNIT (elem_type);
- pi->misalign = 0;
+ align = TYPE_ALIGN_UNIT (elem_type);
+ misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
TYPE_ALIGN (elem_type));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ misalign = DR_MISALIGNMENT (first_dr);
}
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr),
+ align, misalign);
break;
}
case dr_explicit_realign:
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
/* 3. Handle explicit realignment if necessary/supported.
Create in loop:
if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
- gcc_assert (!strided_load);
+ gcc_assert (!grouped_load);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
}
/* Collect vector loads and later create their permutation in
- vect_transform_strided_load (). */
- if (strided_load || slp_perm)
+ vect_transform_grouped_load (). */
+ if (grouped_load || slp_perm)
VEC_quick_push (tree, dr_chain, new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
}
else
{
- if (strided_load)
+ if (grouped_load)
{
if (!load_lanes_p)
- vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
+ vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
else
bool
vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
- bool *strided_store, slp_tree slp_node,
+ bool *grouped_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
bool is_store = false;
case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
- *strided_store = true;
+ *grouped_store = true;
if (STMT_VINFO_VEC_STMT (stmt_info))
is_store = true;
}
next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */
next_si = gsi_for_stmt (next);
+ unlink_stmt_vdef (next);
gsi_remove (&next_si, true);
+ release_defs (next);
free_stmt_vec_info (next);
next = tmp;
}