struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
+ stmt_vec_info stmtinfo_a = vect_dr_stmt (dra);
+ stmt_vec_info stmtinfo_b = vect_dr_stmt (drb);
lambda_vector dist_v;
unsigned int loop_depth;
/* If dra and drb are part of the same interleaving chain consider
them independent. */
- if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (vect_dr_stmt (dra)))
- && (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (dra)))
- == DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (drb)))))
+ if (STMT_VINFO_GROUPED_ACCESS (vect_dr_stmt (dra))
+ && (DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (dra))
+ == DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (drb))))
return false;
/* Unknown data dependence. */
unsigned int i;
FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
if (!DR_IS_CONDITIONAL_IN_STMT (dr)
&& STMT_VINFO_VECTORIZABLE (stmt_info)
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{
- vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr));
+ vect_record_base_alignment (vinfo, stmt_info, &DR_INNERMOST (dr));
/* If DR is nested in the loop that is being vectorized, we can also
record the alignment of the base wrt the outer loop. */
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ if (loop && nested_in_vect_loop_p (loop, stmt_info))
vect_record_base_alignment
- (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
+ (vinfo, stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
}
}
}
static unsigned int
vect_calculate_target_alignment (struct data_reference *dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
return targetm.vectorize.preferred_vector_alignment (vectype);
}
static void
vect_compute_data_ref_alignment (struct data_reference *dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
stays the same throughout the execution of the inner-loop, which is why
we have to check that the stride of the dataref in the inner-loop evenly
divides by the vector alignment. */
- else if (nested_in_vect_loop_p (loop, stmt))
+ else if (nested_in_vect_loop_p (loop, stmt_info))
{
step_preserves_misalignment_p
= (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0;
struct data_reference *current_dr;
int dr_size = vect_get_scalar_dr_size (dr);
int dr_peel_size = vect_get_scalar_dr_size (dr_peel);
- stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
- stmt_vec_info peel_stmt_info = vinfo_for_stmt (vect_dr_stmt (dr_peel));
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
+ stmt_vec_info peel_stmt_info = vect_dr_stmt (dr_peel);
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
- same_aligned_drs
- = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (vect_dr_stmt (dr_peel)));
+ same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (vect_dr_stmt (dr_peel));
FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
{
if (current_dr != dr)
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
continue;
/* Strided accesses perform only component accesses, alignment is
static bool
vector_alignment_reachable_p (struct data_reference *dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
stmt_vector_for_cost *body_cost_vec,
stmt_vector_for_cost *prologue_cost_vec)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
int ncopies;
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
- continue;
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
+ continue;
/* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
vect_peel_info elem = *slot;
int dummy;
unsigned int inside_cost = 0, outside_cost = 0;
- gimple *stmt = vect_dr_stmt (elem->dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (elem->dr);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
epilogue_cost_vec;
unsigned i;
struct data_reference *dr = NULL;
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
- gimple *stmt;
- stmt_vec_info stmt_info;
enum dr_alignment_support supportable_dr_alignment;
/* Ensure that all data refs can be vectorized after the peel. */
if (dr == dr0)
continue;
- stmt = vect_dr_stmt (dr);
- stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
continue;
/* Strided accesses perform only component accesses, alignment is
bool do_peeling = false;
bool do_versioning = false;
bool stat;
- gimple *stmt;
- stmt_vec_info stmt_info;
unsigned int npeel = 0;
bool one_misalignment_known = false;
bool one_misalignment_unknown = false;
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt = vect_dr_stmt (dr);
- stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
- continue;
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
+ continue;
/* For scatter-gather or invariant accesses there is nothing
to enhance. */
epilogue_cost_vec.release ();
peel_for_unknown_alignment.peel_info.count = 1
- + STMT_VINFO_SAME_ALIGN_REFS
- (vinfo_for_stmt (vect_dr_stmt (dr0))).length ();
+ + STMT_VINFO_SAME_ALIGN_REFS (vect_dr_stmt (dr0)).length ();
}
peel_for_unknown_alignment.peel_info.npeel = 0;
if (do_peeling)
{
- stmt = vect_dr_stmt (dr0);
- stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr0);
vectype = STMT_VINFO_VECTYPE (stmt_info);
if (known_alignment_for_access_p (dr0))
/* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
- stmt_info = vinfo_for_stmt (vect_dr_stmt (dr0));
+ stmt_info = vect_dr_stmt (dr0);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= DR_GROUP_SIZE (stmt_info);
{
/* Strided accesses perform only component accesses, alignment
is irrelevant for them. */
- stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
+ stmt_info = vect_dr_stmt (dr);
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
{
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt = vect_dr_stmt (dr);
- stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
/* For interleaving, only the alignment of the first access
matters. */
if (aligned_access_p (dr)
|| (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt))
+ && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info))
continue;
if (STMT_VINFO_STRIDED_P (stmt_info))
if (!supportable_dr_alignment)
{
- gimple *stmt;
int mask;
tree vectype;
break;
}
- stmt = vect_dr_stmt (dr);
- vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- gcc_assert (vectype);
+ stmt_info = vect_dr_stmt (dr);
+ vectype = STMT_VINFO_VECTYPE (stmt_info);
+ gcc_assert (vectype);
/* At present we don't support versioning for alignment
with variable VF, since there's no guarantee that the
gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
|| LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
- LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
- vect_dr_stmt (dr));
+ LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
}
}
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
+ stmt_vec_info stmtinfo_a = vect_dr_stmt (dra);
+ stmt_vec_info stmtinfo_b = vect_dr_stmt (drb);
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return;
vect_record_base_alignments (vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
if (STMT_VINFO_VECTORIZABLE (stmt_info))
vect_compute_data_ref_alignment (dr);
}
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
HOST_WIDE_INT dr_step = -1;
groupsize = 0;
/* Not consecutive access is possible only if it is a part of interleaving. */
- if (!DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+ if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
&& (dr_step % type_size) == 0
&& groupsize > 0)
{
- DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
- DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
+ DR_GROUP_SIZE (stmt_info) = groupsize;
DR_GROUP_GAP (stmt_info) = groupsize - 1;
if (dump_enabled_p ())
{
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
if (bb_vinfo)
- {
- /* Mark the statement as unvectorizable. */
- STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr))) = false;
- return true;
- }
+ {
+ /* Mark the statement as unvectorizable. */
+ STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr)) = false;
+ return true;
+ }
dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
STMT_VINFO_STRIDED_P (stmt_info) = true;
return true;
}
- if (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
+ if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
{
/* First stmt in the interleaving chain. Check the chain. */
- gimple *next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+ gimple *next = DR_GROUP_NEXT_ELEMENT (stmt_info);
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
- gimple *prev = stmt;
+ gimple *prev = stmt_info;
HOST_WIDE_INT diff, gaps = 0;
/* By construction, all group members have INTEGER_CST DR_INITs. */
difference between the groupsize and the last accessed
element.
When there is no gap, this difference should be 0. */
- DR_GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
+ DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
- DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
+ DR_GROUP_SIZE (stmt_info) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
dump_printf (MSG_NOTE, "store ");
dump_printf (MSG_NOTE, "of size %u starting with ",
(unsigned)groupsize);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- if (DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
+ dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
+ if (DR_GROUP_GAP (stmt_info) != 0)
dump_printf_loc (MSG_NOTE, vect_location,
"There is a gap of %u elements after the group\n",
- DR_GROUP_GAP (vinfo_for_stmt (stmt)));
+ DR_GROUP_GAP (stmt_info));
}
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (DR_IS_WRITE (dr) && !slp_impossible)
- {
- if (loop_vinfo)
- LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
- if (bb_vinfo)
- BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
- }
+ {
+ if (loop_vinfo)
+ LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
+ if (bb_vinfo)
+ BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
+ }
}
return true;
{
/* Dissolve the group if present. */
gimple *next;
- gimple *stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (vect_dr_stmt (dr)));
+ gimple *stmt = DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (dr));
while (stmt)
{
stmt_vec_info vinfo = vinfo_for_stmt (stmt);
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
/* Allow loads with zero step in inner-loop vectorization. */
if (loop_vinfo && integer_zerop (step))
{
- DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
- if (!nested_in_vect_loop_p (loop, stmt))
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
+ if (!nested_in_vect_loop_p (loop, stmt_info))
return DR_IS_READ (dr);
/* Allow references with zero step for outer loops marked
with pragma omp simd only - it guarantees absence of
}
}
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ if (loop && nested_in_vect_loop_p (loop, stmt_info))
{
/* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */
- DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
&& !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{
/* Mark that it is not interleaving. */
- DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
return true;
}
}
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ if (loop && nested_in_vect_loop_p (loop, stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
for (i = 0; i < datarefs_copy.length () - 1;)
{
data_reference_p dra = datarefs_copy[i];
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (vect_dr_stmt (dra));
+ stmt_vec_info stmtinfo_a = vect_dr_stmt (dra);
stmt_vec_info lastinfo = NULL;
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
for (i = i + 1; i < datarefs_copy.length (); ++i)
{
data_reference_p drb = datarefs_copy[i];
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (vect_dr_stmt (drb));
+ stmt_vec_info stmtinfo_b = vect_dr_stmt (drb);
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
break;
}
FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
- if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr)))
+ if (STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr))
&& !vect_analyze_data_ref_access (dr))
{
if (dump_enabled_p ())
"not vectorized: complicated access pattern.\n");
if (is_a <bb_vec_info> (vinfo))
- {
- /* Mark the statement as not vectorizable. */
- STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr))) = false;
- continue;
- }
+ {
+ /* Mark the statement as not vectorizable. */
+ STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr)) = false;
+ continue;
+ }
else
{
datarefs_copy.release ();
static unsigned HOST_WIDE_INT
vect_vfa_access_size (data_reference *dr)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (vect_dr_stmt (dr));
+ stmt_vec_info stmt_vinfo = vect_dr_stmt (dr);
tree ref_type = TREE_TYPE (DR_REF (dr));
unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
unsigned HOST_WIDE_INT access_size = ref_size;
static bool
vect_small_gap_p (loop_vec_info loop_vinfo, data_reference *dr, poly_int64 gap)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (vect_dr_stmt (dr));
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
HOST_WIDE_INT count
= estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
if (DR_GROUP_FIRST_ELEMENT (stmt_info))
vec<data_reference_p> datarefs = vinfo->shared->datarefs;
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
- gimple *stmt;
- stmt_vec_info stmt_info;
enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
poly_uint64 vf;
gcc_assert (DR_REF (dr));
- stmt = vect_dr_stmt (dr);
- stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
/* Check that analysis of the data-ref succeeded. */
if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
/* If target supports vector gather loads or scatter stores,
see if they can't be used. */
if (is_a <loop_vec_info> (vinfo)
- && !nested_in_vect_loop_p (loop, stmt))
+ && !nested_in_vect_loop_p (loop, stmt_info))
{
if (maybe_gather || maybe_scatter)
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
"failed ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
if (is_a <bb_vec_info> (vinfo))
{
/* See if this was detected as SIMD lane access. */
if (dr->aux == (void *)-1)
{
- if (nested_in_vect_loop_p (loop, stmt))
+ if (nested_in_vect_loop_p (loop, stmt_info))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
"failed ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
return false;
}
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base object not addressable "
"for stmt: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
if (is_a <bb_vec_info> (vinfo))
{
&& DR_STEP (dr)
&& TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
{
- if (nested_in_vect_loop_p (loop, stmt))
+ if (nested_in_vect_loop_p (loop, stmt_info))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for strided "
"load ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
return false;
}
inner-most enclosing loop). We do that by building a reference to the
first location accessed by the inner-loop, and analyze it relative to
the outer-loop. */
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ if (loop && nested_in_vect_loop_p (loop, stmt_info))
{
/* Build a reference to the first location accessed by the
inner loop: *(BASE + INIT + OFFSET). By construction,
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
scalar_type);
{
dump_printf_loc (MSG_NOTE, vect_location,
"got vectype for stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_VECTYPE (stmt_info));
dump_printf (MSG_NOTE, "\n");
if (gatherscatter != SG_NONE)
{
gather_scatter_info gs_info;
- if (!vect_check_gather_scatter (stmt, as_a <loop_vec_info> (vinfo),
+ if (!vect_check_gather_scatter (stmt_info,
+ as_a <loop_vec_info> (vinfo),
&gs_info)
|| !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset)))
{
"load " :
"not vectorized: not suitable for scatter "
"store ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
+ stmt_info->stmt, 0);
}
return false;
}
vect_supportable_dr_alignment (struct data_reference *dr,
bool check_aligned_accesses)
{
- gimple *stmt = vect_dr_stmt (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vect_dr_stmt (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
/* For now assume all conditional loads/stores support unaligned
access without any special code. */
- if (is_gimple_call (stmt)
- && gimple_call_internal_p (stmt)
- && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
- || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
- return dr_unaligned_supported;
+ if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
+ if (gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
+ return dr_unaligned_supported;
if (loop_vinfo)
{
vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
- nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
+ nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
}
/* Possibly unaligned access. */