+2018-05-25 Richard Biener <rguenther@suse.de>
+
+ * tree-vectorizer.h (STMT_VINFO_GROUP_*, GROUP_*): Remove.
+ (DR_GROUP_*): New, assert we have non-NULL ->data_ref_info.
+ (REDUC_GROUP_*): New, assert we have NULL ->data_ref_info.
+ (STMT_VINFO_GROUPED_ACCESS): Adjust.
+ * tree-vect-data-refs.c (everywhere): Adjust users.
+ * tree-vect-loop.c (everywhere): Likewise.
+ * tree-vect-slp.c (everywhere): Likewise.
+ * tree-vect-stmts.c (everywhere): Likewise.
+ * tree-vect-patterns.c (vect_reassociating_reduction_p): Likewise.
+
2018-05-25 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* configure.ac (gcc_cv_as_section_has_e): Move to common section.
/* We do not have to consider dependences between accesses that belong
to the same group, unless the stride could be smaller than the
group size. */
- if (GROUP_FIRST_ELEMENT (stmtinfo_a)
- && GROUP_FIRST_ELEMENT (stmtinfo_a) == GROUP_FIRST_ELEMENT (stmtinfo_b)
+ if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
+ && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
+ == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
&& !STMT_VINFO_STRIDED_P (stmtinfo_a))
return false;
/* If dra and drb are part of the same interleaving chain consider
them independent. */
if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
- && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
- == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
+ && (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
+ == DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
return false;
/* Unknown data dependence. */
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
+ dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
- dr_peel_size *= GROUP_SIZE (peel_stmt_info);
+ dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
/* For interleaving, only the alignment of the first access matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* Strided accesses perform only component accesses, alignment is
elem_size = vector_element_size (vector_size, nelements);
mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
- if (!multiple_p (nelements - mis_in_elements, GROUP_SIZE (stmt_info)))
+ if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
return false;
}
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* Strided accesses perform only component accesses, alignment is
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* Strided accesses perform only component accesses, alignment is
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* For invariant accesses there is nothing to enhance. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
- ? vf * GROUP_SIZE (stmt_info) : vf);
+ ? vf * DR_GROUP_SIZE (stmt_info) : vf);
possible_npeel_number
= vect_get_num_vectors (nscalars, vectype);
by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- npeel /= GROUP_SIZE (stmt_info);
+ npeel /= DR_GROUP_SIZE (stmt_info);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
matters. */
if (aligned_access_p (dr)
|| (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
if (STMT_VINFO_STRIDED_P (stmt_info))
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
- first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
+ first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (! vect_compute_data_ref_alignment (dr)
dr_step = tree_to_shwi (step);
/* Check that STEP is a multiple of type size. Otherwise there is
a non-element-sized gap at the end of the group which we
- cannot represent in GROUP_GAP or GROUP_SIZE.
+ cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
??? As we can handle non-constant step fine here we should
- simply remove uses of GROUP_GAP between the last and first
- element and instead rely on DR_STEP. GROUP_SIZE then would
+ simply remove uses of DR_GROUP_GAP between the last and first
+ element and instead rely on DR_STEP. DR_GROUP_SIZE then would
simply not include that gap. */
if ((dr_step % type_size) != 0)
{
groupsize = 0;
/* Not consecutive access is possible only if it is a part of interleaving. */
- if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ if (!DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
&& (dr_step % type_size) == 0
&& groupsize > 0)
{
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
- GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
- GROUP_GAP (stmt_info) = groupsize - 1;
+ DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
+ DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
+ DR_GROUP_GAP (stmt_info) = groupsize - 1;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
return true;
}
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
+ if (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{
/* First stmt in the interleaving chain. Check the chain. */
- gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+ gimple *next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
"Two or more load stmts share the same dr.\n");
/* For load use the same data-ref load. */
- GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
+ DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
prev = next;
- next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
continue;
}
last_accessed_element += diff;
/* Store the gap from the previous member of the group. If there is no
- gap in the access, GROUP_GAP is always 1. */
- GROUP_GAP (vinfo_for_stmt (next)) = diff;
+ gap in the access, DR_GROUP_GAP is always 1. */
+ DR_GROUP_GAP (vinfo_for_stmt (next)) = diff;
prev_init = DR_INIT (data_ref);
- next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
/* Count the number of data-refs in the chain. */
count++;
}
difference between the groupsize and the last accessed
element.
When there is no gap, this difference should be 0. */
- GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
+ DR_GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
- GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
+ DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
dump_printf (MSG_NOTE, "of size %u starting with ",
(unsigned)groupsize);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
+ if (DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
dump_printf_loc (MSG_NOTE, vect_location,
"There is a gap of %u elements after the group\n",
- GROUP_GAP (vinfo_for_stmt (stmt)));
+ DR_GROUP_GAP (vinfo_for_stmt (stmt)));
}
/* SLP: create an SLP data structure for every interleaving group of
{
/* Dissolve the group if present. */
gimple *next;
- gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
+ gimple *stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
while (stmt)
{
stmt_vec_info vinfo = vinfo_for_stmt (stmt);
- next = GROUP_NEXT_ELEMENT (vinfo);
- GROUP_FIRST_ELEMENT (vinfo) = NULL;
- GROUP_NEXT_ELEMENT (vinfo) = NULL;
+ next = DR_GROUP_NEXT_ELEMENT (vinfo);
+ DR_GROUP_FIRST_ELEMENT (vinfo) = NULL;
+ DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
stmt = next;
}
return false;
/* Allow loads with zero step in inner-loop vectorization. */
if (loop_vinfo && integer_zerop (step))
{
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
if (!nested_in_vect_loop_p (loop, stmt))
return DR_IS_READ (dr);
/* Allow references with zero step for outer loops marked
{
/* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
&& !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{
/* Mark that it is not interleaving. */
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
return true;
}
}
}
/* Link the found element into the group list. */
- if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
+ if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
{
- GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
+ DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
lastinfo = stmtinfo_a;
}
- GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
- GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
+ DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
+ DR_GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
lastinfo = stmtinfo_b;
}
}
tree ref_type = TREE_TYPE (DR_REF (dr));
unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
unsigned HOST_WIDE_INT access_size = ref_size;
- if (GROUP_FIRST_ELEMENT (stmt_vinfo))
+ if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
{
- gcc_assert (GROUP_FIRST_ELEMENT (stmt_vinfo) == DR_STMT (dr));
- access_size *= GROUP_SIZE (stmt_vinfo) - GROUP_GAP (stmt_vinfo);
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == DR_STMT (dr));
+ access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
}
if (STMT_VINFO_VEC_STMT (stmt_vinfo)
&& (vect_supportable_dr_alignment (dr, false)
stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
HOST_WIDE_INT count
= estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
- if (GROUP_FIRST_ELEMENT (stmt_info))
- count *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
+ if (DR_GROUP_FIRST_ELEMENT (stmt_info))
+ count *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
return estimated_poly_value (gap) <= count * vect_get_scalar_dr_size (dr);
}
continue;
}
- dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
+ dr_group_first_a = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
if (dr_group_first_a)
{
stmt_a = dr_group_first_a;
dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
}
- dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
+ dr_group_first_b = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
if (dr_group_first_b)
{
stmt_b = dr_group_first_b;
get_alias_set (DR_REF (dr))))
need_ref_all = true;
/* Likewise for any of the data references in the stmt group. */
- else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
+ else if (DR_GROUP_SIZE (stmt_info) > 1)
{
- gimple *orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
+ gimple *orig_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
do
{
stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
need_ref_all = true;
break;
}
- orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
+ orig_stmt = DR_GROUP_NEXT_ELEMENT (sinfo);
}
while (orig_stmt);
}
void
vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
{
- gimple *first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
+ gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
gimple *next_stmt, *new_stmt;
unsigned int i, gap_count;
tree tmp_data_ref;
/* Skip the gaps. Loads created for the gaps will be removed by dead
code elimination pass later. No need to check for the first stmt in
the group, since it always exists.
- GROUP_GAP is the number of steps in elements from the previous
- access (if there is no gap GROUP_GAP is 1). We skip loads that
+ DR_GROUP_GAP is the number of steps in elements from the previous
+ access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
correspond to the gaps. */
if (next_stmt != first_stmt
- && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
+ && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
{
gap_count++;
continue;
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
else
{
- if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
{
gimple *prev_stmt =
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
}
}
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
gap_count = 1;
/* If NEXT_STMT accesses the same DR as the previous statement,
put the same TMP_DATA_REF as its vectorized statement; otherwise
get the next data-ref from RESULT_CHAIN. */
- if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
break;
}
}
if (loop_vinfo
&& STMT_SLP_TYPE (stmt_info)
&& !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
- * GROUP_SIZE (vinfo_for_stmt
- (GROUP_FIRST_ELEMENT (stmt_info))),
+ * DR_GROUP_SIZE (vinfo_for_stmt
+ (DR_GROUP_FIRST_ELEMENT (stmt_info))),
TYPE_VECTOR_SUBPARTS (vectype)))
;
else if (!loop_vinfo
/* Store the reduction cycles for possible vectorization in
loop-aware SLP if it was not detected as reduction
chain. */
- if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
+ if (! REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
}
}
{
gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
gimple *stmtp;
- gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
- && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
- GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
+ gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
+ && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
+ REDUC_GROUP_SIZE (vinfo_for_stmt (firstp))
+ = REDUC_GROUP_SIZE (vinfo_for_stmt (stmt));
do
{
stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
- stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+ REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
+ stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
if (stmt)
- GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
+ REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
}
while (stmt);
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{
- gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
+ gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next)
{
if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
break;
- next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
/* If not all stmt in the chain are patterns try to handle
the chain without patterns. */
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
- vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
- unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
+ vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
+ unsigned int size = DR_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false)
&& ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
- vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
- bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo);
- size = STMT_VINFO_GROUP_SIZE (vinfo);
+ vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
+ bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
+ size = DR_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_load_lanes_supported (vectype, size, false)
&& ! vect_grouped_load_supported (vectype, single_element_p,
if (current_stmt)
{
current_stmt_info = vinfo_for_stmt (current_stmt);
- GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
- GROUP_FIRST_ELEMENT (use_stmt_info)
- = GROUP_FIRST_ELEMENT (current_stmt_info);
+ REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
+ REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
+ = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
}
else
- GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
+ REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
lhs = gimple_assign_lhs (loop_use_stmt);
current_stmt = loop_use_stmt;
/* Swap the operands, if needed, to make the reduction operand be the second
operand. */
lhs = PHI_RESULT (phi);
- next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
+ next_stmt = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
while (next_stmt)
{
if (gimple_assign_rhs2 (next_stmt) == lhs)
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
lhs = gimple_assign_lhs (next_stmt);
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
continue;
}
}
lhs = gimple_assign_lhs (next_stmt);
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
/* Save the chain for further analysis in SLP detection. */
- first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
+ first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
- GROUP_SIZE (vinfo_for_stmt (first)) = size;
+ REDUC_GROUP_SIZE (vinfo_for_stmt (first)) = size;
return true;
}
}
/* Dissolve group eventually half-built by vect_is_slp_reduction. */
- gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
+ gimple *first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
while (first)
{
- gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
- GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
+ gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
+ REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
+ REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
first = next;
}
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2).
- If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
- containing the operands.
+ If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
+ vectors containing the operands.
For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
vec_initial_defs.reserve (vec_num);
get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
&vec_initial_defs, vec_num,
- GROUP_FIRST_ELEMENT (stmt_info),
+ REDUC_GROUP_FIRST_ELEMENT (stmt_info),
neutral_op);
}
else
# b1 = phi <b2, b0>
a2 = operation (a1)
b2 = operation (b1) */
- slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
+ slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
/* True if we should implement SLP_REDUC using native reduction operations
instead of scalar operations. */
we may end up with more than one vector result. Here we reduce them to
one vector. */
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
+ if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
{
tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL;
}
else if (direct_slp_reduc)
{
- /* Here we create one vector for each of the GROUP_SIZE results,
+ /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
with the elements for other SLP statements replaced with the
neutral value. We can then do a normal reduction on each vector. */
tree mask_type = build_same_sized_truth_vector_type (index_type);
/* Create a vector that, for each element, identifies which of
- the GROUP_SIZE results should use it. */
+ the REDUC_GROUP_SIZE results should use it. */
tree index_mask = build_int_cst (index_elt_type, group_size - 1);
index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
build_vector_from_val (index_type, index_mask));
/* The only case where we need to reduce scalar results in SLP, is
unrolling. If the size of SCALAR_RESULTS is greater than
- GROUP_SIZE, we reduce them combining elements modulo
- GROUP_SIZE. */
+ REDUC_GROUP_SIZE, we reduce them combining elements modulo
+ REDUC_GROUP_SIZE. */
if (slp_reduc)
{
tree res, first_res, new_res;
/* In SLP reduction chain we reduce vector results into one vector if
- necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
- the last stmt in the reduction chain, since we are looking for the loop
- exit phi node. */
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
+ LHS of the last stmt in the reduction chain, since we are looking for
+ the loop exit phi node. */
+ if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
/* Handle reduction patterns. */
group_size = 1;
}
- /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
- case that GROUP_SIZE is greater than vectorization factor). Therefore, we
- need to match SCALAR_RESULTS with corresponding statements. The first
- (GROUP_SIZE / number of new vector stmts) scalar results correspond to
- the first vector stmt, etc.
- (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
+ /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
+ case that REDUC_GROUP_SIZE is greater than vectorization factor).
+ Therefore, we need to match SCALAR_RESULTS with corresponding statements.
+ The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
+ correspond to the first vector stmt, etc.
+ (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
if (group_size > new_phis.length ())
{
ratio = group_size / new_phis.length ();
/* In case of reduction chain we switch to the first stmt in the chain, but
we don't update STMT_INFO, since only the last stmt is marked as reduction
and has reduction properties. */
- if (GROUP_FIRST_ELEMENT (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)
+ && REDUC_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
{
- stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ stmt = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
first_p = false;
}
/* Not supportable if the reduction variable is used in the loop, unless
it's a reduction chain. */
if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
- && !GROUP_FIRST_ELEMENT (stmt_info))
+ && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
return false;
/* Reductions that are not used even in an enclosing outer-loop,
if (orig_stmt)
gcc_assert (tmp == orig_stmt
- || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt);
+ || (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp))
+ == orig_stmt));
else
/* We changed STMT to be the first stmt in reduction chain, hence we
check that in this case the first element in the chain is STMT. */
gcc_assert (stmt == tmp
- || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
+ || REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false;
/* For SLP reductions, see if there is a neutral value we can use. */
tree neutral_op = NULL_TREE;
if (slp_node)
- neutral_op
- = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code,
- GROUP_FIRST_ELEMENT (stmt_info) != NULL);
+ neutral_op = neutral_op_for_slp_reduction
+ (slp_node_instance->reduc_phis, code,
+ REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{
if (reduction_type == FOLD_LEFT_REDUCTION
&& slp_node
- && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* We cannot use in-order reductions in this case because there is
an implicit reassociation of the operations involved. */
/* Check extra constraints for variable-length unchained SLP reductions. */
if (STMT_SLP_TYPE (stmt_info)
- && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
+ && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& !nunits_out.is_constant ())
{
/* We checked above that we could build the initial vector when
interleaving chain was completed - free all the stores in
the chain. */
gsi_next (&si);
- vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
+ vect_remove_stores (DR_GROUP_FIRST_ELEMENT (stmt_info));
}
else
{
{
return (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
? STMT_VINFO_REDUC_TYPE (stmt_vinfo) != FOLD_LEFT_REDUCTION
- : GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL);
+ : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL);
}
/* Function vect_recog_dot_prod_pattern
gimple *next_stmt = first_stmt;
int result = 0;
- if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ if (first_stmt != DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
return -1;
do
{
if (next_stmt == stmt)
return result;
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (next_stmt)
- result += GROUP_GAP (vinfo_for_stmt (next_stmt));
+ result += DR_GROUP_GAP (vinfo_for_stmt (next_stmt));
}
while (next_stmt);
else
{
/* Load. */
- first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
+ first_load = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
if (prev_first_load)
{
/* Check that there are no loads from different interleaving
FOR_EACH_VEC_ELT (stmts, i, stmt)
{
/* But for reduction chains only check on the first stmt. */
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
- && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt)
+ if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
+ && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt)
continue;
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != def_type)
return NULL;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
- first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
+ first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
/* But we have to keep those permutations that are required because
of handling of gaps. */
if (known_eq (unrolling_factor, 1U)
- || (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
- && GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0))
+ || (group_size == DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ && DR_GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0))
SLP_TREE_LOAD_PERMUTATION (node).release ();
else
for (j = 0; j < SLP_TREE_LOAD_PERMUTATION (node).length (); ++j)
/* Reduction (there are no data-refs in the root).
In reduction chain the order of the loads is not important. */
if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
- && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
vect_attempt_slp_rearrange_stmts (slp_instn);
/* In basic block vectorization we allow any subchain of an interleaving
{
if (j != 0
&& (next_load != load
- || GROUP_GAP (vinfo_for_stmt (load)) != 1))
+ || DR_GROUP_GAP (vinfo_for_stmt (load)) != 1))
{
subchain_p = false;
break;
}
- next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
+ next_load = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
}
if (subchain_p)
SLP_TREE_LOAD_PERMUTATION (node).release ();
{
stmt_vec_info group_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
- group_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (group_info));
+ group_info = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (group_info));
unsigned HOST_WIDE_INT nunits;
unsigned k, maxk = 0;
FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k)
if (k > maxk)
maxk = k;
/* In BB vectorization we may not actually use a loaded vector
- accessing elements in excess of GROUP_SIZE. */
+ accessing elements in excess of DR_GROUP_SIZE. */
tree vectype = STMT_VINFO_VECTYPE (group_info);
if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
- || maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
+ || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1)))
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"BB vectorization with gaps at the end of "
vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
{
stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
- gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
gcc_assert (group1_size > 0);
- int group2_size = GROUP_SIZE (first_vinfo) - group1_size;
+ int group2_size = DR_GROUP_SIZE (first_vinfo) - group1_size;
gcc_assert (group2_size > 0);
- GROUP_SIZE (first_vinfo) = group1_size;
+ DR_GROUP_SIZE (first_vinfo) = group1_size;
gimple *stmt = first_stmt;
for (unsigned i = group1_size; i > 1; i--)
{
- stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
- gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
+ stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+ gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
}
/* STMT is now the last element of the first group. */
- gimple *group2 = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
- GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
+ gimple *group2 = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+ DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
- GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
- for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
+ DR_GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
+ for (stmt = group2; stmt; stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
{
- GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
- gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
+ DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
+ gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
}
- /* For the second group, the GROUP_GAP is that before the original group,
+ /* For the second group, the DR_GROUP_GAP is that before the original group,
plus skipping over the first vector. */
- GROUP_GAP (vinfo_for_stmt (group2)) =
- GROUP_GAP (first_vinfo) + group1_size;
+ DR_GROUP_GAP (vinfo_for_stmt (group2))
+ = DR_GROUP_GAP (first_vinfo) + group1_size;
- /* GROUP_GAP of the first group now has to skip over the second group too. */
- GROUP_GAP (first_vinfo) += group2_size;
+ /* DR_GROUP_GAP of the first group now has to skip over the second group too. */
+ DR_GROUP_GAP (first_vinfo) += group2_size;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
{
slp_instance new_instance;
slp_tree node;
- unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
+ unsigned int group_size;
tree vectype, scalar_type = NULL_TREE;
gimple *next;
unsigned int i;
struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
vec<gimple *> scalar_stmts;
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
{
- if (dr)
- {
- scalar_type = TREE_TYPE (DR_REF (dr));
- vectype = get_vectype_for_scalar_type (scalar_type);
- }
- else
- {
- gcc_assert (is_a <loop_vec_info> (vinfo));
- vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- }
-
- group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
+ scalar_type = TREE_TYPE (DR_REF (dr));
+ vectype = get_vectype_for_scalar_type (scalar_type);
+ group_size = DR_GROUP_SIZE (vinfo_for_stmt (stmt));
+ }
+ else if (!dr && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ {
+ gcc_assert (is_a <loop_vec_info> (vinfo));
+ vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
+ group_size = REDUC_GROUP_SIZE (vinfo_for_stmt (stmt));
}
else
{
/* Create a node (a root of the SLP tree) for the packed grouped stores. */
scalar_stmts.create (group_size);
next = stmt;
- if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
{
/* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
while (next)
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
else
scalar_stmts.safe_push (next);
- next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
+ }
+ }
+ else if (!dr && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ {
+ /* Collect the reduction stmts and store them in
+ SLP_TREE_SCALAR_STMTS. */
+ while (next)
+ {
+ if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
+ && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
+ scalar_stmts.safe_push (
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
+ else
+ scalar_stmts.safe_push (next);
+ next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
/* Mark the first element of the reduction chain as reduction to properly
transform the node. In the reduction analysis phase only the last
element of the chain is marked as reduction. */
- if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
- STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
+ STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
}
else
{
gimple *load, *first_stmt;
bool this_load_permuted = false;
load_permutation.create (group_size);
- first_stmt = GROUP_FIRST_ELEMENT
+ first_stmt = DR_GROUP_FIRST_ELEMENT
(vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
{
a gap either because the group is larger than the SLP
group-size or because there is a gap between the groups. */
&& (known_eq (unrolling_factor, 1U)
- || (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
- && GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
+ || (group_size == DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ && DR_GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
{
load_permutation.release ();
continue;
slp_tree load_node;
FOR_EACH_VEC_ELT (loads, i, load_node)
{
- gimple *first_stmt = GROUP_FIRST_ELEMENT
+ gimple *first_stmt = DR_GROUP_FIRST_ELEMENT
(vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
stmt_vec_info stmt_vinfo = vinfo_for_stmt (first_stmt);
/* Use SLP for strided accesses (or if we
if (STMT_VINFO_STRIDED_P (stmt_vinfo)
|| ! vect_load_lanes_supported
(STMT_VINFO_VECTYPE (stmt_vinfo),
- GROUP_SIZE (stmt_vinfo), false))
+ DR_GROUP_SIZE (stmt_vinfo), false))
break;
}
if (i == loads.length ())
vector size. */
unsigned HOST_WIDE_INT const_nunits;
if (is_a <bb_vec_info> (vinfo)
- && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
+ && DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& nunits.is_constant (&const_nunits))
{
/* We consider breaking the group only on VF boundaries from the existing
while (stmt)
{
stmt_vec_info vinfo = vinfo_for_stmt (stmt);
- next = GROUP_NEXT_ELEMENT (vinfo);
- GROUP_FIRST_ELEMENT (vinfo) = NULL;
- GROUP_NEXT_ELEMENT (vinfo) = NULL;
+ next = REDUC_GROUP_NEXT_ELEMENT (vinfo);
+ REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL;
+ REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL;
stmt = next;
}
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (first_element))
scalar stmts in this node. For SLP reductions it is equal to the
number of vector statements in the children (which has already been
calculated by the recursive call). Otherwise it is the number of
- scalar elements in one scalar iteration (GROUP_SIZE) multiplied by
+ scalar elements in one scalar iteration (DR_GROUP_SIZE) multiplied by
VF divided by the number of elements in a vector. */
- if (GROUP_FIRST_ELEMENT (stmt_info)
- && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
+ if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
+ && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
SLP_TREE_NUMBER_OF_VEC_STMTS (node)
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
else
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
- stmt_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
+ stmt_info = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
mode = TYPE_MODE (vectype);
for (int k = 0; k < group_size; k++)
{
unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
- + j * STMT_VINFO_GROUP_SIZE (stmt_info));
+ + j * DR_GROUP_SIZE (stmt_info));
vec_index = i / nunits;
mask_element = i % nunits;
if (vec_index == first_vec_index
/* Mark the first element of the reduction chain as reduction to properly
transform the node. In the analysis phase only the last element of the
chain is marked as reduction. */
- if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
+ if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
+ && REDUC_GROUP_FIRST_ELEMENT (stmt_info)
+ && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt)
{
STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
{
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
/* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate stores. If a grouped
+ equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */
if (first_stmt_p
{
/* Uses a high and low interleave or shuffle operations for each
needed permute. */
- int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
{
/* If the load is permuted then the alignment is determined by
the first group element not by the first scalar stmt DR. */
- gimple *stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ gimple *stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* Record the cost for the permutation. */
unsigned n_perms;
stmt_info, 0, vect_body);
/* And adjust the number of loads performed. This handles
redundancies as well as loads that are later dead. */
- auto_sbitmap perm (GROUP_SIZE (stmt_info));
+ auto_sbitmap perm (DR_GROUP_SIZE (stmt_info));
bitmap_clear (perm);
for (unsigned i = 0;
i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
ncopies = 0;
bool load_seen = false;
- for (unsigned i = 0; i < GROUP_SIZE (stmt_info); ++i)
+ for (unsigned i = 0; i < DR_GROUP_SIZE (stmt_info); ++i)
{
if (i % assumed_nunits == 0)
{
if (load_seen)
ncopies++;
gcc_assert (ncopies
- <= (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info)
+ <= (DR_GROUP_SIZE (stmt_info) - DR_GROUP_GAP (stmt_info)
+ assumed_nunits - 1) / assumed_nunits);
}
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
{
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
/* We assume that the cost of a single load-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate loads. If a grouped
+ equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (first_stmt_p
{
/* Uses an even and odd extract operations or shuffle operations
for each needed permute. */
- int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
- gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
bool single_element_p = (stmt == first_stmt
- && !GROUP_NEXT_ELEMENT (stmt_info));
- unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
+ && !DR_GROUP_NEXT_ELEMENT (stmt_info));
+ unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (vinfo_for_stmt (first_stmt));
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
{
if (STMT_VINFO_STRIDED_P (stmt_info))
{
- /* Try to use consecutive accesses of GROUP_SIZE elements,
+ /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
separated by the stride, until we have a complete vector.
Fall back to scalar accesses if that isn't possible. */
if (multiple_p (nunits, group_size))
{
/* STMT is the leader of the group. Check the operands of all the
stmts of the group. */
- gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
+ gimple *next_stmt = DR_GROUP_NEXT_ELEMENT (stmt_info);
while (next_stmt)
{
tree op = vect_get_store_rhs (next_stmt);
"use not simple.\n");
return false;
}
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
}
traditional behavior until that can be fixed. */
if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (stmt_info)
- && !(stmt == GROUP_FIRST_ELEMENT (stmt_info)
- && !GROUP_NEXT_ELEMENT (stmt_info)
- && !pow2p_hwi (GROUP_SIZE (stmt_info))))
+ && !(stmt == DR_GROUP_FIRST_ELEMENT (stmt_info)
+ && !DR_GROUP_NEXT_ELEMENT (stmt_info)
+ && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
gimple *next_stmt;
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
while (next_stmt)
{
next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
"conflicting alias set types.\n");
return ptr_type_node;
}
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
return reference_alias_ptr_type (DR_REF (first_dr));
}
&& (slp || memory_access_type != VMAT_CONTIGUOUS));
if (grouped_store)
{
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
}
else
{
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
+ gimple *group_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ DR_GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
}
if (grouped_store)
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
- if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
+ < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& !slp)
{
*vec_stmt = NULL;
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
- gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
op = vect_get_store_rhs (first_stmt);
}
}
}
}
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (slp)
break;
}
used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
{
/* Since gaps are not supported for interleaved stores,
- GROUP_SIZE is the exact number of stmts in the chain.
+ DR_GROUP_SIZE is the exact number of stmts in the chain.
Therefore, NEXT_STMT can't be NULL_TREE. In case that
- there is no interleaving, GROUP_SIZE is 1, and only one
+ there is no interleaving, DR_GROUP_SIZE is 1, and only one
iteration of the loop will be executed. */
op = vect_get_store_rhs (next_stmt);
vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
dr_chain.quick_push (vec_oprnd);
oprnds.quick_push (vec_oprnd);
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt,
DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
- If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
if (slp)
continue;
- next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (!next_stmt)
break;
}
gcc_assert (!nested_in_vect_loop);
gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
slp_perm = true;
/* Similarly when the stmt is a load that is both part of a SLP
instance and a loop vectorized stmt via the same-dr mechanism
we have to give up. */
- if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
+ if (DR_GROUP_SAME_DR_STMT (stmt_info)
&& (STMT_SLP_TYPE (stmt_info)
!= STMT_SLP_TYPE (vinfo_for_stmt
- (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
+ (DR_GROUP_SAME_DR_STMT (stmt_info)))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (grouped_load)
{
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
else
}
if (slp && grouped_load)
{
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
ref_type = get_group_alias_ptr_type (first_stmt);
}
else
if (grouped_load)
{
- first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
/* For SLP vectorization we directly vectorize a subchain
without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
/* With SLP permutation we load the gaps as well, without
we need to skip the gaps after we manage to fully load
- all elements. group_gap_adj is GROUP_SIZE here. */
+ all elements. group_gap_adj is DR_GROUP_SIZE here. */
group_elt += nunits;
if (maybe_ne (group_gap_adj, 0U)
&& !slp_perm
meanwhile. */
*grouped_store = true;
stmt_vec_info group_info
- = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
- if (GROUP_STORE_COUNT (group_info) == GROUP_SIZE (group_info))
+ = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
+ if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
is_store = true;
}
else
{
stmt_vec_info stmt_info = vinfo_for_stmt (next);
- tmp = GROUP_NEXT_ELEMENT (stmt_info);
+ tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
if (is_pattern_stmt_p (stmt_info))
next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */
STMT_SLP_TYPE (res) = loop_vect;
STMT_VINFO_NUM_SLP_USES (res) = 0;
- GROUP_FIRST_ELEMENT (res) = NULL;
- GROUP_NEXT_ELEMENT (res) = NULL;
- GROUP_SIZE (res) = 0;
- GROUP_STORE_COUNT (res) = 0;
- GROUP_GAP (res) = 0;
- GROUP_SAME_DR_STMT (res) = NULL;
+ res->first_element = NULL; /* GROUP_FIRST_ELEMENT */
+ res->next_element = NULL; /* GROUP_NEXT_ELEMENT */
+ res->size = 0; /* GROUP_SIZE */
+ res->store_count = 0; /* GROUP_STORE_COUNT */
+ res->gap = 0; /* GROUP_GAP */
+ res->same_dr_stmt = NULL; /* GROUP_SAME_DR_STMT */
return res;
}
#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
-#define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element
-#define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element
-#define STMT_VINFO_GROUP_SIZE(S) (S)->size
-#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
-#define STMT_VINFO_GROUP_GAP(S) (S)->gap
-#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
-#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
+#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->data_ref_info && DR_GROUP_FIRST_ELEMENT(S))
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
#define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
#define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
-#define GROUP_FIRST_ELEMENT(S) (S)->first_element
-#define GROUP_NEXT_ELEMENT(S) (S)->next_element
-#define GROUP_SIZE(S) (S)->size
-#define GROUP_STORE_COUNT(S) (S)->store_count
-#define GROUP_GAP(S) (S)->gap
-#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
+#define DR_GROUP_FIRST_ELEMENT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->first_element)
+#define DR_GROUP_NEXT_ELEMENT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->next_element)
+#define DR_GROUP_SIZE(S) (gcc_checking_assert ((S)->data_ref_info), (S)->size)
+#define DR_GROUP_STORE_COUNT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->store_count)
+#define DR_GROUP_GAP(S) (gcc_checking_assert ((S)->data_ref_info), (S)->gap)
+#define DR_GROUP_SAME_DR_STMT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->same_dr_stmt)
+
+#define REDUC_GROUP_FIRST_ELEMENT(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->first_element)
+#define REDUC_GROUP_NEXT_ELEMENT(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->next_element)
+#define REDUC_GROUP_SIZE(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->size)
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)