struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
+ stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
lambda_vector dist_v;
unsigned int loop_depth;
... = a[i];
a[i+1] = ...;
where loads from the group interleave with the store. */
- if (!vect_preserves_scalar_order_p (DR_STMT (dra), DR_STMT (drb)))
+ if (!vect_preserves_scalar_order_p (vect_dr_stmt(dra),
+ vect_dr_stmt (drb)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* If dra and drb are part of the same interleaving chain consider
them independent. */
- if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
- && (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
- == DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
+ if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (vect_dr_stmt (dra)))
+ && (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (dra)))
+ == DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (drb)))))
return false;
/* Unknown data dependence. */
unsigned int i;
FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr)
{
- gimple *stmt = DR_STMT (dr);
- if (!DR_IS_CONDITIONAL_IN_STMT (dr)
- && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
- {
- gimple *stmt = DR_STMT (dr);
- vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr));
+ gimple *stmt = vect_dr_stmt (dr);
+ if (!DR_IS_CONDITIONAL_IN_STMT (dr)
+ && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
+ {
+ vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr));
- /* If DR is nested in the loop that is being vectorized, we can also
- record the alignment of the base wrt the outer loop. */
- if (loop && nested_in_vect_loop_p (loop, stmt))
- {
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- vect_record_base_alignment
- (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
- }
- }
+ /* If DR is nested in the loop that is being vectorized, we can also
+ record the alignment of the base wrt the outer loop. */
+ if (loop && nested_in_vect_loop_p (loop, stmt))
+ {
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ vect_record_base_alignment
+ (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
+ }
+ }
}
}
static unsigned int
vect_calculate_target_alignment (struct data_reference *dr)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
return targetm.vectorize.preferred_vector_alignment (vectype);
FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */
-bool
+static bool
vect_compute_data_ref_alignment (struct data_reference *dr)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct data_reference *current_dr;
int dr_size = vect_get_scalar_dr_size (dr);
int dr_peel_size = vect_get_scalar_dr_size (dr_peel);
- stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
- stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
+ stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
+ stmt_vec_info peel_stmt_info = vinfo_for_stmt (vect_dr_stmt (dr_peel));
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
same_aligned_drs
- = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
+ = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (vect_dr_stmt (dr_peel)));
FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
{
if (current_dr != dr)
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
static bool
vector_alignment_reachable_p (struct data_reference *dr)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
stmt_vector_for_cost *body_cost_vec,
stmt_vector_for_cost *prologue_cost_vec)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
int ncopies;
ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
if (DR_IS_READ (dr))
- vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
+ vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost,
prologue_cost_vec, body_cost_vec, false);
else
- vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
+ vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
vect_peel_info elem = *slot;
int dummy;
unsigned int inside_cost = 0, outside_cost = 0;
- gimple *stmt = DR_STMT (elem->dr);
+ gimple *stmt = vect_dr_stmt (elem->dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
if (dr == dr0)
continue;
- stmt = DR_STMT (dr);
+ stmt = vect_dr_stmt (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt = DR_STMT (dr);
+ stmt = vect_dr_stmt (dr);
stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
peel_for_unknown_alignment.peel_info.count = 1
+ STMT_VINFO_SAME_ALIGN_REFS
- (vinfo_for_stmt (DR_STMT (dr0))).length ();
+ (vinfo_for_stmt (vect_dr_stmt (dr0))).length ();
}
peel_for_unknown_alignment.peel_info.npeel = 0;
if (do_peeling)
{
- stmt = DR_STMT (dr0);
+ stmt = vect_dr_stmt (dr0);
stmt_info = vinfo_for_stmt (stmt);
vectype = STMT_VINFO_VECTYPE (stmt_info);
/* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
- stmt_info = vinfo_for_stmt (DR_STMT (dr0));
+ stmt_info = vinfo_for_stmt (vect_dr_stmt (dr0));
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= DR_GROUP_SIZE (stmt_info);
{
/* Strided accesses perform only component accesses, alignment
is irrelevant for them. */
- stmt_info = vinfo_for_stmt (DR_STMT (dr));
+ stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
{
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt = DR_STMT (dr);
+ stmt = vect_dr_stmt (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
break;
}
- stmt = DR_STMT (dr);
+ stmt = vect_dr_stmt (dr);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
|| LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
- DR_STMT (dr));
+ vect_dr_stmt (dr));
}
}
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
+ stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return;
vect_record_base_alignments (vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
+ stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
if (STMT_VINFO_VECTORIZABLE (stmt_info)
&& !vect_compute_data_ref_alignment (dr))
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
if (bb_vinfo)
{
/* Mark the statement as unvectorizable. */
- STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
+ STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr))) = false;
return true;
}
{
/* Dissolve the group if present. */
gimple *next;
- gimple *stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
+ gimple *stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (dr)));
while (stmt)
{
stmt_vec_info vinfo = vinfo_for_stmt (stmt);
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
for (i = 0; i < datarefs_copy.length () - 1;)
{
data_reference_p dra = datarefs_copy[i];
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
stmt_vec_info lastinfo = NULL;
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
for (i = i + 1; i < datarefs_copy.length (); ++i)
{
data_reference_p drb = datarefs_copy[i];
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
+ stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
break;
|| data_ref_compare_tree (DR_BASE_ADDRESS (dra),
DR_BASE_ADDRESS (drb)) != 0
|| data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
- || !can_group_stmts_p (DR_STMT (dra), DR_STMT (drb)))
+ || !can_group_stmts_p (vect_dr_stmt (dra), vect_dr_stmt (drb)))
break;
/* Check that the data-refs have the same constant size. */
/* Link the found element into the group list. */
if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
{
- DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
+ DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = vect_dr_stmt (dra);
lastinfo = stmtinfo_a;
}
- DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
- DR_GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
+ DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = vect_dr_stmt (dra);
+ DR_GROUP_NEXT_ELEMENT (lastinfo) = vect_dr_stmt (drb);
lastinfo = stmtinfo_b;
}
}
FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
- if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
+ if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr)))
&& !vect_analyze_data_ref_access (dr))
{
if (dump_enabled_p ())
if (is_a <bb_vec_info> (vinfo))
{
/* Mark the statement as not vectorizable. */
- STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
+ STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr))) = false;
continue;
}
else
static unsigned HOST_WIDE_INT
vect_vfa_access_size (data_reference *dr)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (DR_STMT (dr));
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (vect_dr_stmt (dr));
tree ref_type = TREE_TYPE (DR_REF (dr));
unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
unsigned HOST_WIDE_INT access_size = ref_size;
if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
{
- gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == DR_STMT (dr));
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == vect_dr_stmt (dr));
access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
}
if (STMT_VINFO_VEC_STMT (stmt_vinfo)
static bool
vect_small_gap_p (loop_vec_info loop_vinfo, data_reference *dr, poly_int64 gap)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
+ stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
HOST_WIDE_INT count
= estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
if (DR_GROUP_FIRST_ELEMENT (stmt_info))
/* If the two accesses could be dependent within a scalar iteration,
make sure that we'd retain their order. */
if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_a), init_b)
- && !vect_preserves_scalar_order_p (DR_STMT (dr_a), DR_STMT (dr_b)))
+ && !vect_preserves_scalar_order_p (vect_dr_stmt (dr_a),
+ vect_dr_stmt (dr_b)))
return false;
/* There is no alias if abs (DR_STEP) is greater than or equal to
}
dr_a = DDR_A (ddr);
- stmt_a = DR_STMT (DDR_A (ddr));
+ stmt_a = vect_dr_stmt (DDR_A (ddr));
dr_b = DDR_B (ddr);
- stmt_b = DR_STMT (DDR_B (ddr));
+ stmt_b = vect_dr_stmt (DDR_B (ddr));
/* Skip the pair if inter-iteration dependencies are irrelevant
and intra-iteration dependencies are guaranteed to be honored. */
poly_uint64 vf;
gcc_assert (DR_REF (dr));
- stmt = DR_STMT (dr);
+ stmt = vect_dr_stmt (dr);
stmt_info = vinfo_for_stmt (stmt);
/* Check that analysis of the data-ref succeeded. */
vect_supportable_dr_alignment (struct data_reference *dr,
bool check_aligned_accesses)
{
- gimple *stmt = DR_STMT (dr);
+ gimple *stmt = vect_dr_stmt (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
stmt_vector_for_cost *cost_vec)
{
unsigned int inside_cost = 0, prologue_cost = 0;
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
/* Grouped stores update all elements in the group at once,
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
- {
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- }
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
scalar_store, stmt_info, 0, vect_body);
}
else
- vect_get_store_cost (dr, ncopies, &inside_cost, cost_vec);
+ vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
if (memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_STRIDED_SLP)
/* Calculate cost of DR's memory access. */
void
-vect_get_store_cost (struct data_reference *dr, int ncopies,
+vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
unsigned int *inside_cost,
stmt_vector_for_cost *body_cost_vec)
{
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
- gimple *stmt = DR_STMT (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
switch (alignment_support_scheme)
{
stmt_vector_for_cost *cost_vec)
{
gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
unsigned int inside_cost = 0, prologue_cost = 0;
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
+ assumed_nunits - 1) / assumed_nunits);
}
- /* ??? Need to transition load permutation (and load cost) handling
- from vect_analyze_slp_cost_1 to here. */
-
/* Grouped loads read all elements in the group at once,
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
- {
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- }
+ first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
scalar_load, stmt_info, 0, vect_body);
}
else
- vect_get_load_cost (dr, ncopies, first_stmt_p,
+ vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
&inside_cost, &prologue_cost,
cost_vec, cost_vec, true);
if (memory_access_type == VMAT_ELEMENTWISE
/* Calculate cost of DR's memory access. */
void
-vect_get_load_cost (struct data_reference *dr, int ncopies,
+vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
bool add_realign_cost, unsigned int *inside_cost,
unsigned int *prologue_cost,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *body_cost_vec,
bool record_prologue_costs)
{
+ data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
- gimple *stmt = DR_STMT (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
switch (alignment_support_scheme)
{