+2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-vectorizer.h (_stmt_vec_info::first_element): Change from
+ a gimple stmt to a stmt_vec_info.
+ (_stmt_vec_info::next_element): Likewise.
+ * tree-vect-data-refs.c (vect_update_misalignment_for_peel)
+ (vect_slp_analyze_and_verify_node_alignment)
+ (vect_analyze_group_access_1, vect_analyze_group_access)
+ (vect_small_gap_p, vect_prune_runtime_alias_test_list)
+ (vect_create_data_ref_ptr, vect_record_grouped_load_vectors)
+ (vect_supportable_dr_alignment): Update accordingly.
+ * tree-vect-loop.c (vect_fixup_reduc_chain): Likewise.
+ (vect_fixup_scalar_cycles_with_patterns, vect_is_slp_reduction)
+ (vect_is_simple_reduction, vectorizable_reduction): Likewise.
+ * tree-vect-patterns.c (vect_reassociating_reduction_p): Likewise.
+ * tree-vect-slp.c (vect_build_slp_tree_1)
+ (vect_attempt_slp_rearrange_stmts, vect_supported_load_permutation_p)
+ (vect_split_slp_store_group, vect_analyze_slp_instance)
+ (vect_analyze_slp, vect_transform_slp_perm_load): Likewise.
+ * tree-vect-stmts.c (vect_model_store_cost, vect_model_load_cost)
+ (get_group_load_store_type, get_load_store_type)
+ (get_group_alias_ptr_type, vectorizable_store, vectorizable_load)
+ (vect_transform_stmt, vect_remove_stores): Likewise.
+
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vectorizer.h (vect_dr_stmt): Return a stmt_vec_info rather
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
+ dr_size *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
the node is permuted in which case we start from the first
element in the group. */
stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
- gimple *first_stmt = first_stmt_info->stmt;
data_reference_p first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
- first_stmt = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
- data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+ data_reference_p dr = STMT_VINFO_DATA_REF (first_stmt_info);
vect_compute_data_ref_alignment (dr);
/* For creating the data-ref pointer we need alignment of the
first element anyway. */
if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
{
/* First stmt in the interleaving chain. Check the chain. */
- gimple *next = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
- gimple *prev = stmt_info;
+ stmt_vec_info prev = stmt_info;
HOST_WIDE_INT diff, gaps = 0;
/* By construction, all group members have INTEGER_CST DR_INITs. */
stmt, and the rest get their vectorized loads from the first
one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
- DR_INIT (STMT_VINFO_DATA_REF (
- vinfo_for_stmt (next)))))
+ DR_INIT (STMT_VINFO_DATA_REF (next))))
{
if (DR_IS_WRITE (data_ref))
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two or more load stmts share the same dr.\n");
- /* For load use the same data-ref load. */
- DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
+ /* For load use the same data-ref load. */
+ DR_GROUP_SAME_DR_STMT (next) = prev;
- prev = next;
- next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
- continue;
+ prev = next;
+ next = DR_GROUP_NEXT_ELEMENT (next);
+ continue;
}
- prev = next;
- data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
+ prev = next;
+ data_ref = STMT_VINFO_DATA_REF (next);
/* All group members have the same STEP by construction. */
gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
/* Store the gap from the previous member of the group. If there is no
gap in the access, DR_GROUP_GAP is always 1. */
- DR_GROUP_GAP (vinfo_for_stmt (next)) = diff;
+ DR_GROUP_GAP (next) = diff;
- prev_init = DR_INIT (data_ref);
- next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
- /* Count the number of data-refs in the chain. */
- count++;
+ prev_init = DR_INIT (data_ref);
+ next = DR_GROUP_NEXT_ELEMENT (next);
+ /* Count the number of data-refs in the chain. */
+ count++;
}
if (groupsize == 0)
if (!vect_analyze_group_access_1 (dr))
{
/* Dissolve the group if present. */
- gimple *next;
- gimple *stmt = DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (dr));
- while (stmt)
+ stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (dr));
+ while (stmt_info)
{
- stmt_vec_info vinfo = vinfo_for_stmt (stmt);
- next = DR_GROUP_NEXT_ELEMENT (vinfo);
- DR_GROUP_FIRST_ELEMENT (vinfo) = NULL;
- DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
- stmt = next;
+ stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
+ DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
+ stmt_info = next;
}
return false;
}
HOST_WIDE_INT count
= estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
if (DR_GROUP_FIRST_ELEMENT (stmt_info))
- count *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
+ count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
return estimated_poly_value (gap) <= count * vect_get_scalar_dr_size (dr);
}
int comp_res;
poly_uint64 lower_bound;
struct data_reference *dr_a, *dr_b;
- gimple *dr_group_first_a, *dr_group_first_b;
tree segment_length_a, segment_length_b;
unsigned HOST_WIDE_INT access_size_a, access_size_b;
unsigned int align_a, align_b;
- gimple *stmt_a, *stmt_b;
/* Ignore the alias if the VF we chose ended up being no greater
than the dependence distance. */
}
dr_a = DDR_A (ddr);
- stmt_a = vect_dr_stmt (DDR_A (ddr));
+ stmt_vec_info stmt_info_a = vect_dr_stmt (DDR_A (ddr));
dr_b = DDR_B (ddr);
- stmt_b = vect_dr_stmt (DDR_B (ddr));
+ stmt_vec_info stmt_info_b = vect_dr_stmt (DDR_B (ddr));
/* Skip the pair if inter-iteration dependencies are irrelevant
and intra-iteration dependencies are guaranteed to be honored. */
if (ignore_step_p
- && (vect_preserves_scalar_order_p (stmt_a, stmt_b)
+ && (vect_preserves_scalar_order_p (stmt_info_a, stmt_info_b)
|| vectorizable_with_step_bound_p (dr_a, dr_b, &lower_bound)))
{
if (dump_enabled_p ())
continue;
}
- dr_group_first_a = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
+ stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
if (dr_group_first_a)
{
- stmt_a = dr_group_first_a;
- dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
+ stmt_info_a = dr_group_first_a;
+ dr_a = STMT_VINFO_DATA_REF (stmt_info_a);
}
- dr_group_first_b = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
+ stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
if (dr_group_first_b)
{
- stmt_b = dr_group_first_b;
- dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
+ stmt_info_b = dr_group_first_b;
+ dr_b = STMT_VINFO_DATA_REF (stmt_info_b);
}
if (ignore_step_p)
/* Likewise for any of the data references in the stmt group. */
else if (DR_GROUP_SIZE (stmt_info) > 1)
{
- gimple *orig_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
do
{
- stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
if (!alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (DR_REF (sdr))))
need_ref_all = true;
break;
}
- orig_stmt = DR_GROUP_NEXT_ELEMENT (sinfo);
+ sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
}
- while (orig_stmt);
+ while (sinfo);
}
aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
need_ref_all);
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
- gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- gimple *next_stmt;
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
unsigned int i, gap_count;
tree tmp_data_ref;
/* Put a permuted data-ref in the VECTORIZED_STMT field.
Since we scan the chain starting from it's first node, their order
corresponds the order of data-refs in RESULT_CHAIN. */
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
gap_count = 1;
FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
{
- if (!next_stmt)
+ if (!next_stmt_info)
break;
/* Skip the gaps. Loads created for the gaps will be removed by dead
DR_GROUP_GAP is the number of steps in elements from the previous
access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
correspond to the gaps. */
- if (next_stmt != first_stmt
- && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
+ if (next_stmt_info != first_stmt_info
+ && gap_count < DR_GROUP_GAP (next_stmt_info))
{
gap_count++;
continue;
}
- while (next_stmt)
+ while (next_stmt_info)
{
stmt_vec_info new_stmt_info = vinfo->lookup_def (tmp_data_ref);
/* We assume that if VEC_STMT is not NULL, this is a case of multiple
copies, and we put the new vector statement in the first available
RELATED_STMT. */
- if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
- STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt_info;
+ if (!STMT_VINFO_VEC_STMT (next_stmt_info))
+ STMT_VINFO_VEC_STMT (next_stmt_info) = new_stmt_info;
else
{
- if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!DR_GROUP_SAME_DR_STMT (next_stmt_info))
{
stmt_vec_info prev_stmt_info
- = STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
+ = STMT_VINFO_VEC_STMT (next_stmt_info);
stmt_vec_info rel_stmt_info
= STMT_VINFO_RELATED_STMT (prev_stmt_info);
while (rel_stmt_info)
}
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
gap_count = 1;
- /* If NEXT_STMT accesses the same DR as the previous statement,
+ /* If NEXT_STMT_INFO accesses the same DR as the previous statement,
put the same TMP_DATA_REF as its vectorized statement; otherwise
get the next data-ref from RESULT_CHAIN. */
- if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!next_stmt_info || !DR_GROUP_SAME_DR_STMT (next_stmt_info))
break;
}
}
if (loop_vinfo
&& STMT_SLP_TYPE (stmt_info)
&& !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- * DR_GROUP_SIZE (vinfo_for_stmt
- (DR_GROUP_FIRST_ELEMENT (stmt_info))),
+ * (DR_GROUP_SIZE
+ (DR_GROUP_FIRST_ELEMENT (stmt_info))),
TYPE_VECTOR_SUBPARTS (vectype)))
;
else if (!loop_vinfo
REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
do
{
- stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
+ stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
- stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
- if (stmt)
+ stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
+ if (stmt_info)
REDUC_GROUP_NEXT_ELEMENT (stmtp)
- = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
+ = STMT_VINFO_RELATED_STMT (stmt_info);
}
- while (stmt);
+ while (stmt_info);
STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
}
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{
- gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
+ stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next)
{
- if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
+ if (! STMT_VINFO_IN_PATTERN_P (next))
break;
- next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next = REDUC_GROUP_NEXT_ELEMENT (next);
}
/* If not all stmt in the chain are patterns try to handle
the chain without patterns. */
vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
- vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
+ vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
unsigned int size = DR_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false)
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
- vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
+ vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
size = DR_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo);
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
- gimple *loop_use_stmt = NULL, *first, *next_stmt;
+ gimple *loop_use_stmt = NULL;
stmt_vec_info use_stmt_info, current_stmt_info = NULL;
tree lhs;
imm_use_iterator imm_iter;
use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
if (current_stmt_info)
{
- REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
+ REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
= REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
}
else
- REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
+ REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
lhs = gimple_assign_lhs (loop_use_stmt);
current_stmt_info = use_stmt_info;
/* Swap the operands, if needed, to make the reduction operand be the second
operand. */
lhs = PHI_RESULT (phi);
- next_stmt = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
- while (next_stmt)
+ stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
+ while (next_stmt_info)
{
+ gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
if (gimple_assign_rhs2 (next_stmt) == lhs)
{
tree op = gimple_assign_rhs1 (next_stmt);
&& vect_valid_reduction_input_p (def_stmt_info))
{
lhs = gimple_assign_lhs (next_stmt);
- next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
continue;
}
}
lhs = gimple_assign_lhs (next_stmt);
- next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
}
/* Save the chain for further analysis in SLP detection. */
- first = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
- LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
- REDUC_GROUP_SIZE (vinfo_for_stmt (first)) = size;
+ stmt_vec_info first_stmt_info
+ = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
+ LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
+ REDUC_GROUP_SIZE (first_stmt_info) = size;
return true;
}
}
/* Dissolve group eventually half-built by vect_is_slp_reduction. */
- gimple *first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
+ stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
while (first)
{
- gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
- REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
- REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
+ stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
+ REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
+ REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
first = next;
}
}
if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
- gcc_assert (slp_node && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt);
+ gcc_assert (slp_node
+ && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
if (gimple_code (stmt) == GIMPLE_PHI)
{
tree neutral_op = NULL_TREE;
if (slp_node)
neutral_op = neutral_op_for_slp_reduction
- (slp_node_instance->reduc_phis, code,
- REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
+ (slp_node_instance->reduc_phis, code,
+ REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL_STMT_VEC_INFO);
if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{
{
return (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
? STMT_VINFO_REDUC_TYPE (stmt_vinfo) != FOLD_LEFT_REDUCTION
- : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL);
+ : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL_STMT_VEC_INFO);
}
/* As above, but also require it to have code CODE and to be a reduction
int icode;
machine_mode optab_op2_mode;
machine_mode vec_mode;
- gimple *first_load = NULL, *prev_first_load = NULL;
+ stmt_vec_info first_load = NULL, prev_first_load = NULL;
/* For every stmt in NODE find its def stmt/s. */
stmt_vec_info stmt_info;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
- first_stmt_info
- = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (first_stmt_info));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
/* But we have to keep those permutations that are required because
of handling of gaps. */
if (known_eq (unrolling_factor, 1U)
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
unsigned int i, j, k, next;
slp_tree node;
- gimple *next_load;
if (dump_enabled_p ())
{
if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
continue;
bool subchain_p = true;
- next_load = NULL;
+ stmt_vec_info next_load_info = NULL;
stmt_vec_info load_info;
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load_info)
{
if (j != 0
- && (next_load != load_info
+ && (next_load_info != load_info
|| DR_GROUP_GAP (load_info) != 1))
{
subchain_p = false;
break;
}
- next_load = DR_GROUP_NEXT_ELEMENT (load_info);
+ next_load_info = DR_GROUP_NEXT_ELEMENT (load_info);
}
if (subchain_p)
SLP_TREE_LOAD_PERMUTATION (node).release ();
else
{
stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (node)[0];
- group_info
- = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (group_info));
+ group_info = DR_GROUP_FIRST_ELEMENT (group_info);
unsigned HOST_WIDE_INT nunits;
unsigned k, maxk = 0;
FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k)
vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
{
stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
- gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_vinfo);
gcc_assert (group1_size > 0);
int group2_size = DR_GROUP_SIZE (first_vinfo) - group1_size;
gcc_assert (group2_size > 0);
DR_GROUP_SIZE (first_vinfo) = group1_size;
- gimple *stmt = first_stmt;
+ stmt_vec_info stmt_info = first_vinfo;
for (unsigned i = group1_size; i > 1; i--)
{
- stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
- gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
+ stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ gcc_assert (DR_GROUP_GAP (stmt_info) == 1);
}
/* STMT is now the last element of the first group. */
- gimple *group2 = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
- DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
+ stmt_vec_info group2 = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ DR_GROUP_NEXT_ELEMENT (stmt_info) = 0;
- DR_GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
- for (stmt = group2; stmt; stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
+ DR_GROUP_SIZE (group2) = group2_size;
+ for (stmt_info = group2; stmt_info;
+ stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info))
{
- DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
- gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = group2;
+ gcc_assert (DR_GROUP_GAP (stmt_info) == 1);
}
/* For the second group, the DR_GROUP_GAP is that before the original group,
plus skipping over the first vector. */
- DR_GROUP_GAP (vinfo_for_stmt (group2))
- = DR_GROUP_GAP (first_vinfo) + group1_size;
+ DR_GROUP_GAP (group2) = DR_GROUP_GAP (first_vinfo) + group1_size;
/* DR_GROUP_GAP of the first group now has to skip over the second group too. */
DR_GROUP_GAP (first_vinfo) += group2_size;
slp_tree node;
unsigned int group_size;
tree vectype, scalar_type = NULL_TREE;
- gimple *next;
- stmt_vec_info next_info;
unsigned int i;
vec<slp_tree> loads;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
/* Create a node (a root of the SLP tree) for the packed grouped stores. */
scalar_stmts.create (group_size);
- next = stmt;
+ stmt_vec_info next_info = stmt_info;
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
- while (next)
+ while (next_info)
{
- next_info = vinfo_for_stmt (next);
if (STMT_VINFO_IN_PATTERN_P (next_info)
&& STMT_VINFO_RELATED_STMT (next_info))
scalar_stmts.safe_push (STMT_VINFO_RELATED_STMT (next_info));
else
scalar_stmts.safe_push (next_info);
- next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next_info = DR_GROUP_NEXT_ELEMENT (next_info);
}
}
else if (!dr && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
{
/* Collect the reduction stmts and store them in
SLP_TREE_SCALAR_STMTS. */
- while (next)
+ while (next_info)
{
- next_info = vinfo_for_stmt (next);
if (STMT_VINFO_IN_PATTERN_P (next_info)
&& STMT_VINFO_RELATED_STMT (next_info))
scalar_stmts.safe_push (STMT_VINFO_RELATED_STMT (next_info));
else
scalar_stmts.safe_push (next_info);
- next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next_info = REDUC_GROUP_NEXT_ELEMENT (next_info);
}
/* Mark the first element of the reduction chain as reduction to properly
transform the node. In the reduction analysis phase only the last
vec<unsigned> load_permutation;
int j;
stmt_vec_info load_info;
- gimple *first_stmt;
bool this_load_permuted = false;
load_permutation.create (group_size);
- first_stmt = DR_GROUP_FIRST_ELEMENT
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT
(SLP_TREE_SCALAR_STMTS (load_node)[0]);
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load_info)
{
int load_place = vect_get_place_in_interleaving_chain
- (load_info, first_stmt);
+ (load_info, first_stmt_info);
gcc_assert (load_place != -1);
if (load_place != j)
this_load_permuted = true;
a gap either because the group is larger than the SLP
group-size or because there is a gap between the groups. */
&& (known_eq (unrolling_factor, 1U)
- || (group_size == DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
- && DR_GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
+ || (group_size == DR_GROUP_SIZE (first_stmt_info)
+ && DR_GROUP_GAP (first_stmt_info) == 0)))
{
load_permutation.release ();
continue;
slp_tree load_node;
FOR_EACH_VEC_ELT (loads, i, load_node)
{
- gimple *first_stmt = DR_GROUP_FIRST_ELEMENT
+ stmt_vec_info stmt_vinfo = DR_GROUP_FIRST_ELEMENT
(SLP_TREE_SCALAR_STMTS (load_node)[0]);
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (first_stmt);
- /* Use SLP for strided accesses (or if we
- can't load-lanes). */
+ /* Use SLP for strided accesses (or if we can't load-lanes). */
if (STMT_VINFO_STRIDED_P (stmt_vinfo)
|| ! vect_load_lanes_supported
(STMT_VINFO_VECTYPE (stmt_vinfo),
max_tree_size))
{
/* Dissolve reduction chain group. */
- gimple *next, *stmt = first_element;
+ gimple *stmt = first_element;
while (stmt)
{
stmt_vec_info vinfo = vinfo_for_stmt (stmt);
- next = REDUC_GROUP_NEXT_ELEMENT (vinfo);
+ stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (vinfo);
REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL;
REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL;
stmt = next;
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
- stmt_info = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
+ stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
mode = TYPE_MODE (vectype);
stmt_vector_for_cost *cost_vec)
{
unsigned int inside_cost = 0, prologue_cost = 0;
- gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
+ stmt_vec_info first_stmt_info = stmt_info;
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
/* ??? Somehow we need to fix this at the callers. */
/* Grouped stores update all elements in the group at once,
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
once per group anyhow. */
- bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
+ bool first_stmt_p = (first_stmt_info == stmt_info);
/* We assume that the cost of a single store-lanes instruction is
equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
{
/* Uses a high and low interleave or shuffle operations for each
needed permute. */
- int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
- gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
unsigned int inside_cost = 0, prologue_cost = 0;
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
{
/* If the load is permuted then the alignment is determined by
the first group element not by the first scalar stmt DR. */
- gimple *stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* Record the cost for the permutation. */
unsigned n_perms;
unsigned assumed_nunits
- = vect_nunits_for_cost (STMT_VINFO_VECTYPE (stmt_info));
+ = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
vect_transform_slp_perm_load (slp_node, vNULL, NULL,
slp_vf, instance, true,
&n_perms);
inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
- stmt_info, 0, vect_body);
+ first_stmt_info, 0, vect_body);
/* And adjust the number of loads performed. This handles
redundancies as well as loads that are later dead. */
- auto_sbitmap perm (DR_GROUP_SIZE (stmt_info));
+ auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
bitmap_clear (perm);
for (unsigned i = 0;
i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
ncopies = 0;
bool load_seen = false;
- for (unsigned i = 0; i < DR_GROUP_SIZE (stmt_info); ++i)
+ for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
{
if (i % assumed_nunits == 0)
{
if (load_seen)
ncopies++;
gcc_assert (ncopies
- <= (DR_GROUP_SIZE (stmt_info) - DR_GROUP_GAP (stmt_info)
+ <= (DR_GROUP_SIZE (first_stmt_info)
+ - DR_GROUP_GAP (first_stmt_info)
+ assumed_nunits - 1) / assumed_nunits);
}
/* Grouped loads read all elements in the group at once,
so we want the DR for the first statement. */
+ stmt_vec_info first_stmt_info = stmt_info;
if (!slp_node && grouped_access_p)
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
once per group anyhow. */
- bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
+ bool first_stmt_p = (first_stmt_info == stmt_info);
/* We assume that the cost of a single load-lanes instruction is
equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
{
/* Uses an even and odd extract operations or shuffle operations
for each needed permute. */
- int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
- gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
- bool single_element_p = (stmt == first_stmt
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ data_reference *first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
+ bool single_element_p = (stmt_info == first_stmt_info
&& !DR_GROUP_NEXT_ELEMENT (stmt_info));
- unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (vinfo_for_stmt (first_stmt));
+ unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
*memory_access_type = VMAT_GATHER_SCATTER;
}
- if (vls_type != VLS_LOAD && first_stmt == stmt)
+ if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
{
/* STMT is the leader of the group. Check the operands of all the
stmts of the group. */
- gimple *next_stmt = DR_GROUP_NEXT_ELEMENT (stmt_info);
- while (next_stmt)
+ stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ while (next_stmt_info)
{
- tree op = vect_get_store_rhs (next_stmt);
+ tree op = vect_get_store_rhs (next_stmt_info);
enum vect_def_type dt;
if (!vect_is_simple_use (op, vinfo, &dt))
{
"use not simple.\n");
return false;
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
}
traditional behavior until that can be fixed. */
if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (stmt_info)
- && !(stmt == DR_GROUP_FIRST_ELEMENT (stmt_info)
+ && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
&& !DR_GROUP_NEXT_ELEMENT (stmt_info)
&& !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
{
get_group_alias_ptr_type (gimple *first_stmt)
{
struct data_reference *first_dr, *next_dr;
- gimple *next_stmt;
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
- while (next_stmt)
+ stmt_vec_info next_stmt_info
+ = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
+ while (next_stmt_info)
{
- next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
+ next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
if (get_alias_set (DR_REF (first_dr))
!= get_alias_set (DR_REF (next_dr)))
{
"conflicting alias set types.\n");
return ptr_type_node;
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
return reference_alias_ptr_type (DR_REF (first_dr));
}
gimple *ptr_incr = NULL;
int ncopies;
int j;
- gimple *next_stmt, *first_stmt;
+ stmt_vec_info first_stmt_info;
bool grouped_store;
unsigned int group_size, i;
vec<tree> oprnds = vNULL;
&& (slp || memory_access_type != VMAT_CONTIGUOUS));
if (grouped_store)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
group_size = vec_num = 1;
}
}
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- {
- gimple *group_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- DR_GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
- }
+ DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
if (grouped_store)
{
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
- if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ if (DR_GROUP_STORE_COUNT (first_stmt_info)
+ < DR_GROUP_SIZE (first_stmt_info)
&& !slp)
{
*vec_stmt = NULL;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
- gcc_assert (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- op = vect_get_store_rhs (first_stmt);
+ first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
+ == first_stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ op = vect_get_store_rhs (first_stmt_info);
}
else
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = group_size;
- ref_type = get_group_alias_ptr_type (first_stmt);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
prev_stmt_info = NULL;
alias_off = build_int_cst (ref_type, 0);
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (g = 0; g < group_size; g++)
{
running_off = offvar;
for (j = 0; j < ncopies; j++)
{
/* We've set op and dt above, from vect_get_store_rhs,
- and first_stmt == stmt. */
+ and first_stmt_info == stmt_info. */
if (j == 0)
{
if (slp)
}
else
{
- op = vect_get_store_rhs (next_stmt);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
+ op = vect_get_store_rhs (next_stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand
+ (op, next_stmt_info);
}
}
else
}
}
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
if (slp)
break;
}
If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < group_size; i++)
{
/* Since gaps are not supported for interleaved stores,
DR_GROUP_SIZE is the exact number of stmts in the chain.
- Therefore, NEXT_STMT can't be NULL_TREE. In case that
- there is no interleaving, DR_GROUP_SIZE is 1, and only one
- iteration of the loop will be executed. */
- op = vect_get_store_rhs (next_stmt);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
+ Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
+ that there is no interleaving, DR_GROUP_SIZE is 1,
+ and only one iteration of the loop will be executed. */
+ op = vect_get_store_rhs (next_stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand
+ (op, next_stmt_info);
dr_chain.quick_push (vec_oprnd);
oprnds.quick_push (vec_oprnd);
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt,
}
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type,
+ = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
simd_lane_access_p ? loop : NULL,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, &inv_p,
&result_chain);
}
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < vec_num; i++)
{
unsigned align, misalign;
if (slp)
continue;
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
- if (!next_stmt)
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
+ if (!next_stmt_info)
break;
}
}
gphi *phi = NULL;
vec<tree> dr_chain = vNULL;
bool grouped_load = false;
- gimple *first_stmt;
+ stmt_vec_info first_stmt_info;
stmt_vec_info first_stmt_info_for_drptr = NULL;
bool inv_p;
bool compute_in_loop = false;
gcc_assert (!nested_in_vect_loop);
gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
slp_perm = true;
if (grouped_load)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
}
if (slp && grouped_load)
{
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
- ref_type = get_group_alias_ptr_type (first_stmt);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
{
if (grouped_load)
cst_offset
= (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
- * vect_get_place_in_interleaving_chain (stmt, first_stmt));
+ * vect_get_place_in_interleaving_chain (stmt,
+ first_stmt_info));
group_size = 1;
ref_type = reference_alias_ptr_type (DR_REF (dr));
}
if (grouped_load)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
/* For SLP vectorization we directly vectorize a subchain
without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
- first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* For BB vectorization always use the first stmt to base
the data ref pointer on. */
if (bb_vinfo)
first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* Check if the chain of loads is already vectorized. */
- if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
+ if (STMT_VINFO_VEC_STMT (first_stmt_info)
/* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
??? But we can only do so if there is exactly one
as we have no way to get at the rest. Leave the CSE
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
return true;
}
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
group_gap_adj = 0;
/* VEC_NUM is the number of vect stmts to be created for this group. */
else
vec_num = group_size;
- ref_type = get_group_alias_ptr_type (first_stmt);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
group_size = vec_num = 1;
group_gap_adj = 0;
|| alignment_support_scheme == dr_explicit_realign)
&& !compute_in_loop)
{
- msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
+ msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE,
&at_loop);
if (alignment_support_scheme == dr_explicit_realign_optimized)
inv_p = false;
}
else if (first_stmt_info_for_drptr
- && first_stmt != first_stmt_info_for_drptr)
+ && first_stmt_info != first_stmt_info_for_drptr)
{
dataref_ptr
= vect_create_data_ref_ptr (first_stmt_info_for_drptr,
}
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
+ = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, &inv_p,
byte_offset, bump);
tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
if (compute_in_loop)
- msq = vect_setup_realignment (first_stmt, gsi,
+ msq = vect_setup_realignment (first_stmt_info, gsi,
&realignment_token,
dr_explicit_realign,
dataref_ptr, NULL);
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
*grouped_store = true;
- stmt_vec_info group_info
- = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
+ stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
is_store = true;
}
vect_remove_stores (gimple *first_stmt)
{
gimple *next = first_stmt;
- gimple *tmp;
gimple_stmt_iterator next_si;
while (next)
{
stmt_vec_info stmt_info = vinfo_for_stmt (next);
- tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
if (is_pattern_stmt_p (stmt_info))
next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */
/* Interleaving and reduction chains info. */
/* First element in the group. */
- gimple *first_element;
+ stmt_vec_info first_element;
/* Pointer to the next element in the group. */
- gimple *next_element;
+ stmt_vec_info next_element;
/* For data-refs, in case that two or more stmts share data-ref, this is the
pointer to the previously detected stmt with the same dr. */
gimple *same_dr_stmt;