/* Return true if STMT_INFO extends the result of a load. */
static bool
-aarch64_extending_load_p (stmt_vec_info stmt_info)
+aarch64_extending_load_p (class vec_info *vinfo, stmt_vec_info stmt_info)
{
gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
|| TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
return false;
- stmt_vec_info def_stmt_info = stmt_info->vinfo->lookup_def (rhs);
+ stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
return (def_stmt_info
&& STMT_VINFO_DATA_REF (def_stmt_info)
&& DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
operate on vector type VECTYPE. Adjust the cost as necessary for SVE
targets. */
static unsigned int
-aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind,
+aarch64_sve_adjust_stmt_cost (class vec_info *vinfo, vect_cost_for_stmt kind,
stmt_vec_info stmt_info, tree vectype,
unsigned int stmt_cost)
{
on the fly. Optimistically assume that a load followed by an extension
will fold to this form during combine, and that the extension therefore
comes for free. */
- if (kind == vector_stmt && aarch64_extending_load_p (stmt_info))
+ if (kind == vector_stmt && aarch64_extending_load_p (vinfo, stmt_info))
stmt_cost = 0;
/* For similar reasons, vector_stmt integer truncations are a no-op,
/* Implement targetm.vectorize.add_stmt_cost. */
static unsigned
-aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+aarch64_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
struct _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
aarch64_builtin_vectorization_cost (kind, vectype, misalign);
if (stmt_info && vectype && aarch64_sve_mode_p (TYPE_MODE (vectype)))
- stmt_cost = aarch64_sve_adjust_stmt_cost (kind, stmt_info, vectype,
- stmt_cost);
+ stmt_cost = aarch64_sve_adjust_stmt_cost (vinfo, kind, stmt_info,
+ vectype, stmt_cost);
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME */
retval = (unsigned) (count * stmt_cost);
/* Implement targetm.vectorize.add_stmt_cost. */
static unsigned
-arm_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+arm_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
struct _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME. */
retval = (unsigned) (count * stmt_cost);
/* Implement targetm.vectorize.add_stmt_cost. */
static unsigned
-ix86_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME. */
retval = (unsigned) (count * stmt_cost);
/* Implement targetm.vectorize.add_stmt_cost. */
static unsigned
-rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+rs6000_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
struct _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME. */
retval = (unsigned) (count * stmt_cost);
This hook should initialize target-specific data structures in preparation for modeling the costs of vectorizing a loop or basic block. The default allocates three unsigned integers for accumulating costs for the prologue, body, and epilogue of the loop or basic block. If @var{loop_info} is non-NULL, it identifies the loop being vectorized; otherwise a single block is being vectorized.
@end deftypefn
-@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
+@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (class vec_info *@var{}, void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
This hook should update the target-specific @var{data} in response to adding @var{count} copies of the given @var{kind} of statement to a loop or basic block. The default adds the builtin vectorizer cost for the copies of the statement to the accumulator specified by @var{where}, (the prologue, body, or epilogue) and returns the amount added. The return value should be viewed as a tentative cost that may later be revised.
@end deftypefn
"return value should be viewed as a tentative cost that may later be "
"revised.",
unsigned,
- (void *data, int count, enum vect_cost_for_stmt kind,
+ (class vec_info *, void *data, int count, enum vect_cost_for_stmt kind,
class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where),
default_add_stmt_cost)
/* These are defined in tree-vect-stmts.c. */
extern tree stmt_vectype (class _stmt_vec_info *);
-extern bool stmt_in_inner_loop_p (class _stmt_vec_info *);
+extern bool stmt_in_inner_loop_p (class vec_info *, class _stmt_vec_info *);
/* Assembler instructions for creating various kinds of integer object. */
it into the cost specified by WHERE, and returns the cost added. */
unsigned
-default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+default_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME. */
retval = (unsigned) (count * stmt_cost);
disambiguating the loads. */
static bool
-vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
+vect_slp_analyze_node_dependences (vec_info *vinfo,
+ slp_instance instance, slp_tree node,
vec<stmt_vec_info> stores,
stmt_vec_info last_store_info)
{
in NODE verifying we can sink them up to the last stmt in the
group. */
stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
- vec_info *vinfo = last_access_info->vinfo;
for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
{
stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k];
the maximum vectorization factor the data dependences allow. */
bool
-vect_slp_analyze_instance_dependence (slp_instance instance)
+vect_slp_analyze_instance_dependence (vec_info *vinfo, slp_instance instance)
{
DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
stmt_vec_info last_store_info = NULL;
if (store)
{
- if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL))
+ if (! vect_slp_analyze_node_dependences (vinfo, instance, store,
+ vNULL, NULL))
return false;
/* Mark stores in this instance and remember the last one. */
slp_tree load;
unsigned int i;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load)
- if (! vect_slp_analyze_node_dependences (instance, load,
+ if (! vect_slp_analyze_node_dependences (vinfo, instance, load,
store
? SLP_TREE_SCALAR_STMTS (store)
: vNULL, last_store_info))
in STMT_INFO. */
static void
-vect_record_base_alignment (stmt_vec_info stmt_info,
+vect_record_base_alignment (vec_info *vinfo, stmt_vec_info stmt_info,
innermost_loop_behavior *drb)
{
- vec_info *vinfo = stmt_info->vinfo;
bool existed;
innermost_loop_behavior *&entry
= vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
&& STMT_VINFO_VECTORIZABLE (stmt_info)
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{
- vect_record_base_alignment (stmt_info, &DR_INNERMOST (dr));
+ vect_record_base_alignment (vinfo, stmt_info, &DR_INNERMOST (dr));
/* If DR is nested in the loop that is being vectorized, we can also
record the alignment of the base wrt the outer loop. */
if (loop && nested_in_vect_loop_p (loop, stmt_info))
vect_record_base_alignment
- (stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
+ (vinfo, stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
}
}
}
only for trivial cases. TODO. */
static void
-vect_compute_data_ref_alignment (dr_vec_info *dr_info)
+vect_compute_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info)
{
stmt_vec_info stmt_info = dr_info->stmt;
- vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ vec_base_alignments *base_alignments = &vinfo->base_alignments;
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = NULL;
tree ref = DR_REF (dr_info->dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
return;
- innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
+ innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
bool step_preserves_misalignment_p;
poly_uint64 vector_alignment
Return TRUE if DR_INFO can be handled with respect to alignment. */
static opt_result
-verify_data_ref_alignment (dr_vec_info *dr_info)
+verify_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info)
{
enum dr_alignment_support supportable_dr_alignment
- = vect_supportable_dr_alignment (dr_info, false);
+ = vect_supportable_dr_alignment (vinfo, dr_info, false);
if (!supportable_dr_alignment)
return opt_result::failure_at
(dr_info->stmt->stmt,
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
- opt_result res = verify_data_ref_alignment (dr_info);
+ opt_result res = verify_data_ref_alignment (vinfo, dr_info);
if (!res)
return res;
}
/* Calculate the cost of the memory access represented by DR_INFO. */
static void
-vect_get_data_access_cost (dr_vec_info *dr_info,
+vect_get_data_access_cost (vec_info *vinfo, dr_vec_info *dr_info,
unsigned int *inside_cost,
unsigned int *outside_cost,
stmt_vector_for_cost *body_cost_vec,
stmt_vector_for_cost *prologue_cost_vec)
{
stmt_vec_info stmt_info = dr_info->stmt;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
int ncopies;
if (PURE_SLP_STMT (stmt_info))
ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
if (DR_IS_READ (dr_info->dr))
- vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost,
- prologue_cost_vec, body_cost_vec, false);
+ vect_get_load_cost (vinfo, stmt_info, ncopies, true, inside_cost,
+ outside_cost, prologue_cost_vec, body_cost_vec, false);
else
- vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
+ vect_get_store_cost (vinfo,stmt_info, ncopies, inside_cost, body_cost_vec);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
typedef struct _vect_peel_extended_info
{
+ vec_info *vinfo;
struct _vect_peel_info peel_info;
unsigned int inside_cost;
unsigned int outside_cost;
struct _vect_peel_info elem, *slot;
_vect_peel_info **new_slot;
bool supportable_dr_alignment
- = vect_supportable_dr_alignment (dr_info, true);
+ = vect_supportable_dr_alignment (loop_vinfo, dr_info, true);
elem.npeel = npeel;
slot = peeling_htab->find (&elem);
SET_DR_MISALIGNMENT (dr_info, 0);
else
vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
- vect_get_data_access_cost (dr_info, inside_cost, outside_cost,
+ vect_get_data_access_cost (loop_vinfo, dr_info, inside_cost, outside_cost,
body_cost_vec, prologue_cost_vec);
SET_DR_MISALIGNMENT (dr_info, save_misalignment);
}
vect_peel_info elem = *slot;
int dummy;
unsigned int inside_cost = 0, outside_cost = 0;
- stmt_vec_info stmt_info = elem->dr_info->stmt;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (min->vinfo);
stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
epilogue_cost_vec;
struct _vect_peel_extended_info res;
res.peel_info.dr_info = NULL;
+ res.vinfo = loop_vinfo;
if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
save_misalignment = DR_MISALIGNMENT (dr_info);
vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
supportable_dr_alignment
- = vect_supportable_dr_alignment (dr_info, false);
+ = vect_supportable_dr_alignment (loop_vinfo, dr_info, false);
SET_DR_MISALIGNMENT (dr_info, save_misalignment);
if (!supportable_dr_alignment)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
- supportable_dr_alignment = vect_supportable_dr_alignment (dr_info, true);
+ supportable_dr_alignment
+ = vect_supportable_dr_alignment (loop_vinfo, dr_info, true);
do_peeling = vector_alignment_reachable_p (dr_info);
if (do_peeling)
{
}
supportable_dr_alignment
- = vect_supportable_dr_alignment (dr_info, false);
+ = vect_supportable_dr_alignment (loop_vinfo, dr_info, false);
if (!supportable_dr_alignment)
{
{
dr_vec_info *dr_info = vinfo->lookup_dr (dr);
if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
- vect_compute_data_ref_alignment (dr_info);
+ vect_compute_data_ref_alignment (vinfo, dr_info);
}
return opt_result::success ();
/* Analyze alignment of DRs of stmts in NODE. */
static bool
-vect_slp_analyze_and_verify_node_alignment (slp_tree node)
+vect_slp_analyze_and_verify_node_alignment (vec_info *vinfo, slp_tree node)
{
/* We vectorize from the first scalar stmt in the node unless
the node is permuted in which case we start from the first
first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
- vect_compute_data_ref_alignment (dr_info);
+ vect_compute_data_ref_alignment (vinfo, dr_info);
/* For creating the data-ref pointer we need alignment of the
first element anyway. */
if (dr_info != first_dr_info)
- vect_compute_data_ref_alignment (first_dr_info);
- if (! verify_data_ref_alignment (dr_info))
+ vect_compute_data_ref_alignment (vinfo, first_dr_info);
+ if (! verify_data_ref_alignment (vinfo, dr_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
Return FALSE if a data reference is found that cannot be vectorized. */
bool
-vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
+vect_slp_analyze_and_verify_instance_alignment (vec_info *vinfo,
+ slp_instance instance)
{
DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
slp_tree node;
unsigned i;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
- if (! vect_slp_analyze_and_verify_node_alignment (node))
+ if (! vect_slp_analyze_and_verify_node_alignment (vinfo, node))
return false;
node = SLP_INSTANCE_TREE (instance);
if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0])
&& ! vect_slp_analyze_and_verify_node_alignment
- (SLP_INSTANCE_TREE (instance)))
+ (vinfo, SLP_INSTANCE_TREE (instance)))
return false;
return true;
Worker for vect_analyze_group_access. */
static bool
-vect_analyze_group_access_1 (dr_vec_info *dr_info)
+vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info)
{
data_reference *dr = dr_info->dr;
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
stmt_vec_info stmt_info = dr_info->stmt;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
HOST_WIDE_INT dr_step = -1;
HOST_WIDE_INT groupsize, last_accessed_element = 1;
bool slp_impossible = false;
Collect groups of strided stores for further use in SLP analysis. */
static bool
-vect_analyze_group_access (dr_vec_info *dr_info)
+vect_analyze_group_access (vec_info *vinfo, dr_vec_info *dr_info)
{
- if (!vect_analyze_group_access_1 (dr_info))
+ if (!vect_analyze_group_access_1 (vinfo, dr_info))
{
/* Dissolve the group if present. */
stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
analyze groups of accesses. */
static bool
-vect_analyze_data_ref_access (dr_vec_info *dr_info)
+vect_analyze_data_ref_access (vec_info *vinfo, dr_vec_info *dr_info)
{
data_reference *dr = dr_info->dr;
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
stmt_vec_info stmt_info = dr_info->stmt;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = NULL;
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
if (TREE_CODE (step) != INTEGER_CST)
return (STMT_VINFO_STRIDED_P (stmt_info)
&& (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
- || vect_analyze_group_access (dr_info)));
+ || vect_analyze_group_access (vinfo, dr_info)));
/* Not consecutive access - check if it's a part of interleaving group. */
- return vect_analyze_group_access (dr_info);
+ return vect_analyze_group_access (vinfo, dr_info);
}
/* Compare two data-references DRA and DRB to group them into chunks
{
dr_vec_info *dr_info = vinfo->lookup_dr (dr);
if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
- && !vect_analyze_data_ref_access (dr_info))
+ && !vect_analyze_data_ref_access (vinfo, dr_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
gives the worst-case number of bytes covered by the segment. */
static unsigned HOST_WIDE_INT
-vect_vfa_access_size (dr_vec_info *dr_info)
+vect_vfa_access_size (vec_info *vinfo, dr_vec_info *dr_info)
{
stmt_vec_info stmt_vinfo = dr_info->stmt;
tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
}
if (STMT_VINFO_VEC_STMT (stmt_vinfo)
- && (vect_supportable_dr_alignment (dr_info, false)
+ && (vect_supportable_dr_alignment (vinfo, dr_info, false)
== dr_explicit_realign_optimized))
{
/* We might access a full vector's worth. */
segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
}
- access_size_a = vect_vfa_access_size (dr_info_a);
- access_size_b = vect_vfa_access_size (dr_info_b);
+ access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a);
+ access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b);
align_a = vect_vfa_align (dr_info_a);
align_b = vect_vfa_align (dr_info_b);
FORNOW: We are only handling array accesses with step 1. */
tree
-vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
+vect_create_addr_base_for_vector_ref (vec_info *vinfo, stmt_vec_info stmt_info,
gimple_seq *new_stmt_list,
tree offset,
tree byte_offset)
gimple_seq seq = NULL;
tree vect_ptr_type;
tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
tree data_ref_base = unshare_expr (drb->base_address);
- tree base_offset = unshare_expr (get_dr_vinfo_offset (dr_info, true));
+ tree base_offset = unshare_expr (get_dr_vinfo_offset (vinfo, dr_info, true));
tree init = unshare_expr (drb->init);
if (loop_vinfo)
3. Return the pointer. */
tree
-vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
- class loop *at_loop, tree offset,
+vect_create_data_ref_ptr (vec_info *vinfo, stmt_vec_info stmt_info,
+ tree aggr_type, class loop *at_loop, tree offset,
tree *initial_address, gimple_stmt_iterator *gsi,
gimple **ptr_incr, bool only_init,
tree byte_offset, tree iv_step)
{
const char *base_name;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = NULL;
bool nested_in_vect_loop = false;
class loop *containing_loop = NULL;
bool insert_after;
tree indx_before_incr, indx_after_incr;
gimple *incr;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
gcc_assert (iv_step != NULL_TREE
|| TREE_CODE (aggr_type) == ARRAY_TYPE
/* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
- new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
+ new_temp = vect_create_addr_base_for_vector_ref (vinfo,
+ stmt_info, &new_stmt_list,
offset, byte_offset);
if (new_stmt_list)
{
{
/* Accesses to invariant addresses should be handled specially
by the caller. */
- tree step = vect_dr_behavior (dr_info)->step;
+ tree step = vect_dr_behavior (vinfo, dr_info)->step;
gcc_assert (!integer_zerop (step));
if (iv_step == NULL_TREE)
*/
tree
-bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
+bump_vector_ptr (vec_info *vinfo,
+ tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
stmt_vec_info stmt_info, tree bump)
{
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
dataref_ptr, update);
- vect_finish_stmt_generation (stmt_info, incr_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr_stmt, gsi);
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
I4: 6 14 22 30 7 15 23 31. */
void
-vect_permute_store_chain (vec<tree> dr_chain,
+vect_permute_store_chain (vec_info *vinfo, vec<tree> dr_chain,
unsigned int length,
stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_low);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect1 = data_ref;
vect2 = dr_chain[2];
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_high);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[j] = data_ref;
}
}
high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
vect2, perm_mask_high);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[2*j] = high;
/* Create interleaving stmt:
low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
vect2, perm_mask_low);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[2*j+1] = low;
}
memcpy (dr_chain.address (), result_chain->address (),
Return value - the result of the loop-header phi node. */
tree
-vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
- tree *realignment_token,
+vect_setup_realignment (vec_info *vinfo, stmt_vec_info stmt_info,
+ gimple_stmt_iterator *gsi, tree *realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
class loop **at_loop)
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
struct data_reference *dr = dr_info->dr;
class loop *loop = NULL;
gcc_assert (!compute_in_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- ptr = vect_create_data_ref_ptr (stmt_info, vectype,
+ ptr = vect_create_data_ref_ptr (vinfo, stmt_info, vectype,
loop_for_initial_load, NULL_TREE,
&init_addr, NULL, &inc, true);
if (TREE_CODE (ptr) == SSA_NAME)
if (!init_addr)
{
/* Generate the INIT_ADDR computation outside LOOP. */
- init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts,
+ init_addr = vect_create_addr_base_for_vector_ref (vinfo,
+ stmt_info, &stmts,
NULL_TREE);
if (loop)
{
4th vec (E4): 3 7 11 15 19 23 27 31. */
static void
-vect_permute_load_chain (vec<tree> dr_chain,
+vect_permute_load_chain (vec_info *vinfo, vec<tree> dr_chain,
unsigned int length,
stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_low);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
/* Create interleaving stmt (high part of):
high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_high);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[k] = data_ref;
}
}
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect,
perm_mask_even);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref;
/* data_ref = permute_odd (first_data_ref, second_data_ref); */
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect,
perm_mask_odd);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[j/2+length/2] = data_ref;
}
memcpy (dr_chain.address (), result_chain->address (),
*/
static bool
-vect_shift_permute_load_chain (vec<tree> dr_chain,
+vect_shift_permute_load_chain (vec_info *vinfo, vec<tree> dr_chain,
unsigned int length,
stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
unsigned int i;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
unsigned HOST_WIDE_INT nelt, vf;
if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, first_vect,
perm2_mask1);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect[0] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
second_vect, second_vect,
perm2_mask2);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect[1] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], shift1_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[j/2 + length/2] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], select_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref;
}
memcpy (dr_chain.address (), result_chain->address (),
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
dr_chain[k], dr_chain[k],
perm3_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect[k] = data_ref;
}
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[k % 3], vect[(k + 1) % 3],
shift1_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect_shift[k] = data_ref;
}
vect_shift[(4 - k) % 3],
vect_shift[(3 - k) % 3],
shift2_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
vect[k] = data_ref;
}
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
vect[0], shift3_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[nelt % 3] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
vect[1], shift4_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
(*result_chain)[0] = data_ref;
return true;
}
*/
void
-vect_transform_grouped_load (stmt_vec_info stmt_info, vec<tree> dr_chain,
+vect_transform_grouped_load (vec_info *vinfo, stmt_vec_info stmt_info,
+ vec<tree> dr_chain,
int size, gimple_stmt_iterator *gsi)
{
machine_mode mode;
mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
|| pow2p_hwi (size)
- || !vect_shift_permute_load_chain (dr_chain, size, stmt_info,
+ || !vect_shift_permute_load_chain (vinfo, dr_chain, size, stmt_info,
gsi, &result_chain))
- vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain);
- vect_record_grouped_load_vectors (stmt_info, result_chain);
+ vect_permute_load_chain (vinfo, dr_chain,
+ size, stmt_info, gsi, &result_chain);
+ vect_record_grouped_load_vectors (vinfo, stmt_info, result_chain);
result_chain.release ();
}
for each vector to the associated scalar statement. */
void
-vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
+vect_record_grouped_load_vectors (vec_info *vinfo, stmt_vec_info stmt_info,
vec<tree> result_chain)
{
- vec_info *vinfo = stmt_info->vinfo;
stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
unsigned int i, gap_count;
tree tmp_data_ref;
alignment. */
enum dr_alignment_support
-vect_supportable_dr_alignment (dr_vec_info *dr_info,
+vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info,
bool check_aligned_accesses)
{
data_reference *dr = dr_info->dr;
stmt_vec_info stmt_info = dr_info->stmt;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *vect_loop = NULL;
bool nested_in_vect_loop = false;
tree offset = (negative
? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1)
: size_zero_node);
- tree start_addr = vect_create_addr_base_for_vector_ref (stmt_info, seq,
+ tree start_addr = vect_create_addr_base_for_vector_ref (loop_vinfo,
+ stmt_info, seq,
offset);
tree type = unsigned_type_for (TREE_TYPE (start_addr));
if (target_align.is_constant (&target_align_c))
/* create: addr_tmp = (int)(address_of_first_vector) */
addr_base =
- vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
+ vect_create_addr_base_for_vector_ref (loop_vinfo,
+ stmt_info, &new_stmt_list,
offset);
if (new_stmt_list != NULL)
gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
may already be set for general statements (not just data refs). */
static opt_result
-vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
+vect_determine_vf_for_stmt_1 (vec_info *vinfo, stmt_vec_info stmt_info,
bool vectype_maybe_set_p,
poly_uint64 *vf)
{
}
tree stmt_vectype, nunits_vectype;
- opt_result res = vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
+ opt_result res = vect_get_vector_types_for_stmt (vinfo, stmt_info,
+ &stmt_vectype,
&nunits_vectype);
if (!res)
return res;
or false if something prevented vectorization. */
static opt_result
-vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf)
+vect_determine_vf_for_stmt (vec_info *vinfo,
+ stmt_vec_info stmt_info, poly_uint64 *vf)
{
- vec_info *vinfo = stmt_info->vinfo;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
stmt_info->stmt);
- opt_result res = vect_determine_vf_for_stmt_1 (stmt_info, false, vf);
+ opt_result res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, false, vf);
if (!res)
return res;
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: %G",
def_stmt_info->stmt);
- res = vect_determine_vf_for_stmt_1 (def_stmt_info, true, vf);
+ res = vect_determine_vf_for_stmt_1 (vinfo, def_stmt_info, true, vf);
if (!res)
return res;
}
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: %G",
stmt_info->stmt);
- res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf);
+ res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, true, vf);
if (!res)
return res;
}
{
stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
opt_result res
- = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor);
+ = vect_determine_vf_for_stmt (loop_vinfo,
+ stmt_info, &vectorization_factor);
if (!res)
return res;
}
this function would then return true for x_2. */
static bool
-vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
+vect_inner_phi_in_double_reduction_p (loop_vec_info loop_vinfo, gphi *phi)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
use_operand_p use_p;
ssa_op_iter op_iter;
FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
}
if (!access_fn
- || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi)
+ || vect_inner_phi_in_double_reduction_p (loop_vinfo, phi)
|| !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
|| (LOOP_VINFO_LOOP (loop_vinfo) != loop
&& TREE_CODE (step) != INTEGER_CST))
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
- (void) add_stmt_cost (target_cost_data, si->count,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count,
si->kind, si->stmt_info, si->misalign,
vect_body);
unsigned dummy, body_cost = 0;
if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def
|| (STMT_VINFO_DEF_TYPE (stmt_info)
== vect_double_reduction_def))
- && !vectorizable_lc_phi (stmt_info, NULL, NULL))
+ && !vectorizable_lc_phi (loop_vinfo,
+ stmt_info, NULL, NULL))
return opt_result::failure_at (phi, "unsupported phi\n");
}
need_to_vectorize = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
&& ! PURE_SLP_STMT (stmt_info))
- ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
+ ok = vectorizable_induction (loop_vinfo,
+ stmt_info, NULL, NULL, NULL,
&cost_vec);
else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| (STMT_VINFO_DEF_TYPE (stmt_info)
== vect_double_reduction_def)
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
- ok = vectorizable_reduction (stmt_info, NULL, NULL, &cost_vec);
+ ok = vectorizable_reduction (loop_vinfo,
+ stmt_info, NULL, NULL, &cost_vec);
}
/* SLP PHIs are tested by vect_slp_analyze_node_operations. */
if (ok
&& STMT_VINFO_LIVE_P (stmt_info)
&& !PURE_SLP_STMT (stmt_info))
- ok = vectorizable_live_operation (stmt_info, NULL, NULL, NULL,
+ ok = vectorizable_live_operation (loop_vinfo,
+ stmt_info, NULL, NULL, NULL,
-1, false, &cost_vec);
if (!ok)
if (!gimple_clobber_p (stmt))
{
opt_result res
- = vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
+ = vect_analyze_stmt (loop_vinfo,
+ loop_vinfo->lookup_stmt (stmt),
&need_to_vectorize,
NULL, NULL, &cost_vec);
if (!res)
}
} /* bbs */
- add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
+ add_stmt_costs (loop_vinfo, loop_vinfo->target_cost_data, &cost_vec);
/* All operations in the loop are either irrelevant (deal with loop
control, or dead), or only used outside the loop and can be moved
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
- (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
- vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
+ NULL, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
- (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
- vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
+ NULL, 0, vect_prologue);
len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
if (len)
/* Count LEN - 1 ANDs and LEN comparisons. */
- (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
- NULL, 0, vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, len * 2 - 1,
+ scalar_stmt, NULL, 0, vect_prologue);
len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
if (len)
{
for (unsigned int i = 0; i < len; ++i)
if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
nstmts += 1;
- (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
- NULL, 0, vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, nstmts,
+ scalar_stmt, NULL, 0, vect_prologue);
}
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
- (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
- vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, vector_stmt,
+ NULL, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
}
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
- (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
- vect_prologue);
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
+ NULL, 0, vect_prologue);
/* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now.
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
- (void) add_stmt_cost (target_cost_data, si->count,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count,
si->kind, si->stmt_info, si->misalign,
vect_epilogue);
}
simpler and safer to use the worst-case cost; if this ends up
being the tie-breaker between vectorizing or not, then it's
probably better not to vectorize. */
- (void) add_stmt_cost (target_cost_data, num_masks, vector_stmt,
+ (void) add_stmt_cost (loop_vinfo,
+ target_cost_data, num_masks, vector_stmt,
NULL, 0, vect_prologue);
- (void) add_stmt_cost (target_cost_data, num_masks - 1, vector_stmt,
+ (void) add_stmt_cost (loop_vinfo,
+ target_cost_data, num_masks - 1, vector_stmt,
NULL, 0, vect_body);
}
else if (npeel < 0)
branch per peeled loop. Even if scalar loop iterations are known,
vector iterations are not known since peeled prologue iterations are
not known. Hence guards remain the same. */
- (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_prologue);
- (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
+ (void) add_stmt_cost (loop_vinfo,
+ target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_prologue);
- (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_epilogue);
- (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
+ (void) add_stmt_cost (loop_vinfo,
+ target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_epilogue);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
{
- (void) add_stmt_cost (target_cost_data,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data,
si->count * peel_iters_prologue,
si->kind, si->stmt_info, si->misalign,
vect_prologue);
- (void) add_stmt_cost (target_cost_data,
+ (void) add_stmt_cost (loop_vinfo, target_cost_data,
si->count * peel_iters_epilogue,
si->kind, si->stmt_info, si->misalign,
vect_epilogue);
&epilogue_cost_vec);
FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
- (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
+ (void) add_stmt_cost (loop_vinfo,
+ data, si->count, si->kind, si->stmt_info,
si->misalign, vect_prologue);
FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
- (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
+ (void) add_stmt_cost (loop_vinfo,
+ data, si->count, si->kind, si->stmt_info,
si->misalign, vect_epilogue);
prologue_cost_vec.release ();
the loop, and the epilogue code that must be generated. */
static void
-vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
+vect_model_reduction_cost (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, internal_fn reduc_fn,
vect_reduction_type reduction_type,
int ncopies, stmt_vector_for_cost *cost_vec)
{
optab optab;
tree vectype;
machine_mode mode;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = NULL;
if (loop_vinfo)
A cost model should help decide between these two schemes. */
static tree
-get_initial_def_for_reduction (stmt_vec_info stmt_vinfo,
+get_initial_def_for_reduction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_vinfo,
enum tree_code code, tree init_val,
tree *adjustment_def)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
value will not change the result. */
static void
-get_initial_defs_for_reduction (slp_tree slp_node,
+get_initial_defs_for_reduction (vec_info *vinfo,
+ slp_tree slp_node,
vec<tree> *vec_oprnds,
unsigned int number_of_vectors,
bool reduc_chain, tree neutral_op)
{
vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
stmt_vec_info stmt_vinfo = stmts[0];
- vec_info *vinfo = stmt_vinfo->vinfo;
unsigned HOST_WIDE_INT nunits;
unsigned j, number_of_places_left_in_vector;
tree vector_type;
the stmt_vec_info the meta information is stored on. */
stmt_vec_info
-info_for_reduction (stmt_vec_info stmt_info)
+info_for_reduction (vec_info *vinfo, stmt_vec_info stmt_info)
{
stmt_info = vect_orig_stmt (stmt_info);
gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info));
{
edge pe = loop_preheader_edge (gimple_bb (phi)->loop_father);
stmt_vec_info info
- = stmt_info->vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe));
+ = vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe));
if (info && STMT_VINFO_DEF_TYPE (info) == vect_double_reduction_def)
stmt_info = info;
}
*/
static void
-vect_create_epilog_for_reduction (stmt_vec_info stmt_info,
+vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info,
slp_tree slp_node,
slp_instance slp_node_instance)
{
- stmt_vec_info reduc_info = info_for_reduction (stmt_info);
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
gcc_assert (reduc_info->is_reduc_info);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
/* For double reductions we need to get at the inner loop reduction
stmt which has the meta info attached. Our stmt_info is that of the
loop-closed PHI of the inner loop which we remember as
that should be used to control the operation in a fully-masked loop. */
static bool
-vectorize_fold_left_reduction (stmt_vec_info stmt_info,
+vectorize_fold_left_reduction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
gimple *reduc_def_stmt,
tree ops[3], tree vectype_in,
int reduc_index, vec_loop_masks *masks)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
stmt_vec_info new_stmt_info = NULL;
if (slp_node)
{
auto_vec<vec<tree> > vec_defs (2);
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs);
vec_oprnds0.safe_splice (vec_defs[1 - reduc_index]);
vec_defs[0].release ();
vec_defs[1].release ();
}
else
{
- tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
+ tree loop_vec_def0 = vect_get_vec_def_for_operand (loop_vinfo,
+ op0, stmt_info);
vec_oprnds0.create (1);
vec_oprnds0.quick_push (loop_vec_def0);
scalar_dest_def_info = stmt_info;
if (i == vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest);
- new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
+ new_stmt_info = vect_finish_replace_stmt (loop_vinfo,
+ scalar_dest_def_info,
new_stmt);
}
else
- new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
+ new_stmt_info = vect_finish_stmt_generation (loop_vinfo,
+ scalar_dest_def_info,
new_stmt, gsi);
if (slp_node)
does *NOT* necessarily hold for reduction patterns. */
bool
-vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node,
+vectorizable_reduction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, slp_tree slp_node,
slp_instance slp_node_instance,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree vectype_in = NULL_TREE;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum vect_def_type cond_reduc_dt = vect_unknown_def_type;
stmt_vec_info cond_stmt_vinfo = NULL;
return false;
/* The stmt we store reduction analysis meta on. */
- stmt_vec_info reduc_info = info_for_reduction (stmt_info);
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
reduc_info->is_reduc_info = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
else
vec_num = 1;
- vect_model_reduction_cost (stmt_info, reduc_fn, reduction_type, ncopies,
- cost_vec);
+ vect_model_reduction_cost (loop_vinfo, stmt_info, reduc_fn,
+ reduction_type, ncopies, cost_vec);
if (dump_enabled_p ()
&& reduction_type == FOLD_LEFT_REDUCTION)
dump_printf_loc (MSG_NOTE, vect_location,
value. */
bool
-vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vect_transform_reduction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node)
{
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
int i;
int ncopies;
int j;
int vec_num;
- stmt_vec_info reduc_info = info_for_reduction (stmt_info);
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
gcc_assert (reduc_info->is_reduc_info);
if (nested_in_vect_loop_p (loop, stmt_info))
{
internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
return vectorize_fold_left_reduction
- (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
+ (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
reduc_fn, ops, vectype_in, reduc_index, masks);
}
/* Get vec defs for all the operands except the reduction index,
ensuring the ordering of the ops in the vector is kept. */
auto_vec<vec<tree>, 3> vec_defs;
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs);
vec_oprnds0.safe_splice (vec_defs[0]);
vec_defs[0].release ();
vec_oprnds1.safe_splice (vec_defs[1]);
else
{
vec_oprnds0.quick_push
- (vect_get_vec_def_for_operand (ops[0], stmt_info));
+ (vect_get_vec_def_for_operand (loop_vinfo, ops[0], stmt_info));
vec_oprnds1.quick_push
- (vect_get_vec_def_for_operand (ops[1], stmt_info));
+ (vect_get_vec_def_for_operand (loop_vinfo, ops[1], stmt_info));
if (op_type == ternary_op)
vec_oprnds2.quick_push
- (vect_get_vec_def_for_operand (ops[2], stmt_info));
+ (vect_get_vec_def_for_operand (loop_vinfo, ops[2], stmt_info));
}
}
else
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (loop_vinfo,
+ stmt_info, call, gsi);
}
else
{
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (loop_vinfo,
+ stmt_info, new_stmt, gsi);
}
if (slp_node)
/* Transform phase of a cycle PHI. */
bool
-vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
+vect_transform_cycle_phi (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
slp_tree slp_node, slp_instance slp_node_instance)
{
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
int i;
int ncopies;
stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
- stmt_vec_info reduc_info = info_for_reduction (stmt_info);
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
gcc_assert (reduc_info->is_reduc_info);
if (STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION
= neutral_op_for_slp_reduction (slp_node, vectype_out,
STMT_VINFO_REDUC_CODE (reduc_info),
first != NULL);
- get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
+ get_initial_defs_for_reduction (loop_vinfo, slp_node_instance->reduc_phis,
&vec_initial_defs, vec_num,
first != NULL, neutral_op);
}
{
/* Do not use an adjustment def as that case is not supported
correctly if ncopies is not one. */
- vec_initial_def = vect_get_vec_def_for_operand (initial_def,
+ vec_initial_def = vect_get_vec_def_for_operand (loop_vinfo,
+ initial_def,
reduc_stmt_info);
}
else
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
adjustment_defp = NULL;
vec_initial_def
- = get_initial_def_for_reduction (reduc_stmt_info, code,
+ = get_initial_def_for_reduction (loop_vinfo, reduc_stmt_info, code,
initial_def, adjustment_defp);
STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info) = adjustment_def;
}
/* Vectorizes LC PHIs. */
bool
-vectorizable_lc_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
+vectorizable_lc_phi (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
slp_tree slp_node)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
if (!loop_vinfo
|| !is_a <gphi *> (stmt_info->stmt)
|| gimple_phi_num_args (stmt_info->stmt) != 1)
edge e = single_pred_edge (bb);
tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
vec<tree> vec_oprnds = vNULL;
- vect_get_vec_defs (gimple_phi_arg_def (stmt_info->stmt, 0), NULL_TREE,
+ vect_get_vec_defs (loop_vinfo,
+ gimple_phi_arg_def (stmt_info->stmt, 0), NULL_TREE,
stmt_info, &vec_oprnds, NULL, slp_node);
if (slp_node)
{
Return true if STMT_INFO is vectorizable in this way. */
bool
-vectorizable_induction (stmt_vec_info stmt_info,
+vectorizable_induction (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
bool nested_in_vect_loop = false;
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
- new_name = vect_init_vector (stmt_info, new_name,
+ new_name = vect_init_vector (loop_vinfo, stmt_info, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (step_vectype, new_name);
- vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL);
+ vec_step = vect_init_vector (loop_vinfo, stmt_info,
+ new_vec, step_vectype, NULL);
/* Now generate the IVs. */
unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
- new_name = vect_init_vector (stmt_info, new_name,
+ new_name = vect_init_vector (loop_vinfo, stmt_info, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (step_vectype, new_name);
- vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL);
+ vec_step = vect_init_vector (loop_vinfo, stmt_info, new_vec,
+ step_vectype, NULL);
for (; ivn < nvects; ++ivn)
{
gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
/* iv_loop is nested in the loop to be vectorized. init_expr had already
been created during vectorization of previous stmts. We obtain it
from the STMT_VINFO_VEC_STMT of the defining stmt. */
- vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
+ vec_init = vect_get_vec_def_for_operand (loop_vinfo,
+ init_expr, stmt_info);
/* If the initial value is not of proper type, convert it. */
if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
{
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL);
+ vec_step = vect_init_vector (loop_vinfo, stmt_info,
+ new_vec, step_vectype, NULL);
/* Create the following def-use cycle:
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (stmt_info, new_vec, step_vectype, NULL);
+ vec_step = vect_init_vector (loop_vinfo, stmt_info,
+ new_vec, step_vectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = induction_phi_info;
it can be supported. */
bool
-vectorizable_live_operation (stmt_vec_info stmt_info,
+vectorizable_live_operation (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
slp_tree slp_node, slp_instance slp_node_instance,
int slp_index, bool vec_stmt_p,
stmt_vector_for_cost *)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
tree lhs, lhs_type, bitsize, vec_bitsize;
else if (slp_index != 0)
return true;
}
- stmt_vec_info reduc_info = info_for_reduction (stmt_info);
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
gcc_assert (reduc_info->is_reduc_info);
if (STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION
|| STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION)
return true;
- vect_create_epilog_for_reduction (stmt_info, slp_node,
+ vect_create_epilog_for_reduction (loop_vinfo, stmt_info, slp_node,
slp_node_instance);
return true;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
- if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
+ if (vect_transform_stmt (loop_vinfo, stmt_info, gsi, NULL, NULL))
*seen_store = stmt_info;
}
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
- vect_transform_stmt (stmt_info, NULL, NULL, NULL);
+ vect_transform_stmt (loop_vinfo, stmt_info, NULL, NULL, NULL);
}
}
/* Interleaving. If IS_STORE is TRUE, the
vectorization of the interleaving chain was
completed - free all the stores in the chain. */
- vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
+ vect_remove_stores (loop_vinfo,
+ DR_GROUP_FIRST_ELEMENT (seen_store));
else
/* Free the attached stmt_vec_info and remove the stmt. */
loop_vinfo->remove_stmt (stmt_info);
VECTYPE if it doesn't have one already. */
static stmt_vec_info
-vect_init_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info,
- tree vectype)
+vect_init_pattern_stmt (vec_info *vinfo, gimple *pattern_stmt,
+ stmt_vec_info orig_stmt_info, tree vectype)
{
- vec_info *vinfo = orig_stmt_info->vinfo;
stmt_vec_info pattern_stmt_info = vinfo->lookup_stmt (pattern_stmt);
if (pattern_stmt_info == NULL)
- pattern_stmt_info = orig_stmt_info->vinfo->add_stmt (pattern_stmt);
+ pattern_stmt_info = vinfo->add_stmt (pattern_stmt);
gimple_set_bb (pattern_stmt, gimple_bb (orig_stmt_info->stmt));
pattern_stmt_info->pattern_stmt_p = true;
have one already. */
static void
-vect_set_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info,
- tree vectype)
+vect_set_pattern_stmt (vec_info *vinfo, gimple *pattern_stmt,
+ stmt_vec_info orig_stmt_info, tree vectype)
{
STMT_VINFO_IN_PATTERN_P (orig_stmt_info) = true;
STMT_VINFO_RELATED_STMT (orig_stmt_info)
- = vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, vectype);
+ = vect_init_pattern_stmt (vinfo, pattern_stmt, orig_stmt_info, vectype);
}
/* Add NEW_STMT to STMT_INFO's pattern definition statements. If VECTYPE
from which it was derived. */
static inline void
-append_pattern_def_seq (stmt_vec_info stmt_info, gimple *new_stmt,
+append_pattern_def_seq (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple *new_stmt,
tree vectype = NULL_TREE,
tree scalar_type_for_mask = NULL_TREE)
{
gcc_assert (!scalar_type_for_mask
== (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype)));
- vec_info *vinfo = stmt_info->vinfo;
if (vectype)
{
stmt_vec_info new_stmt_info = vinfo->add_stmt (new_stmt);
unsigned. */
static bool
-type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign,
+type_conversion_p (vec_info *vinfo, tree name, bool check_sign,
tree *orig_type, gimple **def_stmt, bool *promotion)
{
tree type = TREE_TYPE (name);
enum vect_def_type dt;
stmt_vec_info def_stmt_info;
- if (!vect_is_simple_use (name, stmt_vinfo->vinfo, &dt, &def_stmt_info,
- def_stmt))
+ if (!vect_is_simple_use (name, vinfo, &dt, &def_stmt_info, def_stmt))
return false;
if (dt != vect_internal_def
else
*promotion = false;
- if (!vect_is_simple_use (oprnd0, stmt_vinfo->vinfo, &dt))
+ if (!vect_is_simple_use (oprnd0, vinfo, &dt))
return false;
return true;
exists. */
static unsigned int
-vect_widened_op_tree (stmt_vec_info stmt_info, tree_code code,
+vect_widened_op_tree (vec_info *vinfo, stmt_vec_info stmt_info, tree_code code,
tree_code widened_code, bool shift_p,
unsigned int max_nops,
vect_unpromoted_value *unprom, tree *common_type)
{
/* Check for an integer operation with the right code. */
- vec_info *vinfo = stmt_info->vinfo;
gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
if (!assign)
return 0;
if (shift_p && i == 1)
return 0;
- if (!vect_look_through_possible_promotion (stmt_info->vinfo, op,
- this_unprom))
+ if (!vect_look_through_possible_promotion (vinfo, op, this_unprom))
return 0;
if (TYPE_PRECISION (this_unprom->type) == TYPE_PRECISION (type))
/* Recursively process the definition of the operand. */
stmt_vec_info def_stmt_info
= vinfo->lookup_def (this_unprom->op);
- nops = vect_widened_op_tree (def_stmt_info, code, widened_code,
- shift_p, max_nops, this_unprom,
- common_type);
+ nops = vect_widened_op_tree (vinfo, def_stmt_info, code,
+ widened_code, shift_p, max_nops,
+ this_unprom, common_type);
if (nops == 0)
return 0;
success. */
static bool
-vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs,
+vect_split_statement (vec_info *vinfo, stmt_vec_info stmt2_info, tree new_rhs,
gimple *stmt1, tree vectype)
{
- vec_info *vinfo = stmt2_info->vinfo;
if (is_pattern_stmt_p (stmt2_info))
{
/* STMT2_INFO is part of a pattern. Get the statement to which
the pattern is attached. */
stmt_vec_info orig_stmt2_info = STMT_VINFO_RELATED_STMT (stmt2_info);
- vect_init_pattern_stmt (stmt1, orig_stmt2_info, vectype);
+ vect_init_pattern_stmt (vinfo, stmt1, orig_stmt2_info, vectype);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
/* Add STMT1 as a singleton pattern definition sequence. */
gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info);
- vect_init_pattern_stmt (stmt1, stmt2_info, vectype);
+ vect_init_pattern_stmt (vinfo, stmt1, stmt2_info, vectype);
gimple_seq_add_stmt_without_update (def_seq, stmt1);
/* Build the second of the two pattern statements. */
tree new_lhs = vect_recog_temp_ssa_var (lhs_type, NULL);
gassign *new_stmt2 = gimple_build_assign (new_lhs, NOP_EXPR, new_rhs);
- vect_set_pattern_stmt (new_stmt2, stmt2_info, lhs_vectype);
+ vect_set_pattern_stmt (vinfo, new_stmt2, stmt2_info, lhs_vectype);
if (dump_enabled_p ())
{
available. VECTYPE is the vector form of TYPE. */
static tree
-vect_convert_input (stmt_vec_info stmt_info, tree type,
+vect_convert_input (vec_info *vinfo, stmt_vec_info stmt_info, tree type,
vect_unpromoted_value *unprom, tree vectype)
{
- vec_info *vinfo = stmt_info->vinfo;
-
/* Check for a no-op conversion. */
if (types_compatible_p (type, TREE_TYPE (unprom->op)))
return unprom->op;
input = vect_recog_temp_ssa_var (midtype, NULL);
gassign *new_stmt = gimple_build_assign (input, NOP_EXPR,
unprom->op);
- if (!vect_split_statement (unprom->caster, input, new_stmt,
+ if (!vect_split_statement (vinfo, unprom->caster, input, new_stmt,
vec_midtype))
- append_pattern_def_seq (stmt_info, new_stmt, vec_midtype);
+ append_pattern_def_seq (vinfo, stmt_info,
+ new_stmt, vec_midtype);
}
}
/* If OP is an external value, see if we can insert the new statement
on an incoming edge. */
if (input == unprom->op && unprom->dt == vect_external_def)
- if (edge e = vect_get_external_def_edge (stmt_info->vinfo, input))
+ if (edge e = vect_get_external_def_edge (vinfo, input))
{
basic_block new_bb = gsi_insert_on_edge_immediate (e, new_stmt);
gcc_assert (!new_bb);
}
/* As a (common) last resort, add the statement to the pattern itself. */
- append_pattern_def_seq (stmt_info, new_stmt, vectype);
+ append_pattern_def_seq (vinfo, stmt_info, new_stmt, vectype);
return new_op;
}
result in the corresponding elements of RESULT. */
static void
-vect_convert_inputs (stmt_vec_info stmt_info, unsigned int n,
+vect_convert_inputs (vec_info *vinfo, stmt_vec_info stmt_info, unsigned int n,
tree *result, tree type, vect_unpromoted_value *unprom,
tree vectype)
{
if (j < i)
result[i] = result[j];
else
- result[i] = vect_convert_input (stmt_info, type, &unprom[i], vectype);
+ result[i] = vect_convert_input (vinfo, stmt_info,
+ type, &unprom[i], vectype);
}
}
VECITYPE is the vector form of PATTERN_STMT's result type. */
static gimple *
-vect_convert_output (stmt_vec_info stmt_info, tree type, gimple *pattern_stmt,
- tree vecitype)
+vect_convert_output (vec_info *vinfo, stmt_vec_info stmt_info, tree type,
+ gimple *pattern_stmt, tree vecitype)
{
tree lhs = gimple_get_lhs (pattern_stmt);
if (!types_compatible_p (type, TREE_TYPE (lhs)))
{
- append_pattern_def_seq (stmt_info, pattern_stmt, vecitype);
+ append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vecitype);
tree cast_var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (cast_var, NOP_EXPR, lhs);
}
*OP0_OUT and *OP1_OUT. */
static bool
-vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code,
+vect_reassociating_reduction_p (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree_code code,
tree *op0_out, tree *op1_out)
{
- loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_info = dyn_cast <loop_vec_info> (vinfo);
if (!loop_info)
return false;
inner-loop nested in an outer-loop that us being vectorized). */
static gimple *
-vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_dot_prod_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
tree oprnd0, oprnd1;
gimple *last_stmt = stmt_vinfo->stmt;
- vec_info *vinfo = stmt_vinfo->vinfo;
tree type, half_type;
gimple *pattern_stmt;
tree var;
/* Starting from LAST_STMT, follow the defs of its uses in search
of the above pattern. */
- if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
+ if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR,
&oprnd0, &oprnd1))
return NULL;
/* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
inside the loop (in case we are analyzing an outer-loop). */
vect_unpromoted_value unprom0[2];
- if (!vect_widened_op_tree (mult_vinfo, MULT_EXPR, WIDEN_MULT_EXPR,
+ if (!vect_widened_op_tree (vinfo, mult_vinfo, MULT_EXPR, WIDEN_MULT_EXPR,
false, 2, unprom0, &half_type))
return NULL;
/* Get the inputs in the appropriate types. */
tree mult_oprnd[2];
- vect_convert_inputs (stmt_vinfo, 2, mult_oprnd, half_type,
+ vect_convert_inputs (vinfo, stmt_vinfo, 2, mult_oprnd, half_type,
unprom0, half_vectype);
var = vect_recog_temp_ssa_var (type, NULL);
*/
static gimple *
-vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_sad_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
- vec_info *vinfo = stmt_vinfo->vinfo;
tree half_type;
/* Look for the following pattern
of the above pattern. */
tree plus_oprnd0, plus_oprnd1;
- if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
+ if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR,
&plus_oprnd0, &plus_oprnd1))
return NULL;
/* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
inside the loop (in case we are analyzing an outer-loop). */
vect_unpromoted_value unprom[2];
- if (!vect_widened_op_tree (diff_stmt_vinfo, MINUS_EXPR, MINUS_EXPR,
+ if (!vect_widened_op_tree (vinfo, diff_stmt_vinfo, MINUS_EXPR, MINUS_EXPR,
false, 2, unprom, &half_type))
return NULL;
/* Get the inputs to the SAD_EXPR in the appropriate types. */
tree sad_oprnd[2];
- vect_convert_inputs (stmt_vinfo, 2, sad_oprnd, half_type,
+ vect_convert_inputs (vinfo, stmt_vinfo, 2, sad_oprnd, half_type,
unprom, half_vectype);
tree var = vect_recog_temp_ssa_var (sum_type, NULL);
name of the pattern being matched, for dump purposes. */
static gimple *
-vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out,
+vect_recog_widen_op_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out,
tree_code orig_code, tree_code wide_code,
bool shift_p, const char *name)
{
- vec_info *vinfo = last_stmt_info->vinfo;
gimple *last_stmt = last_stmt_info->stmt;
vect_unpromoted_value unprom[2];
tree half_type;
- if (!vect_widened_op_tree (last_stmt_info, orig_code, orig_code,
+ if (!vect_widened_op_tree (vinfo, last_stmt_info, orig_code, orig_code,
shift_p, 2, unprom, &half_type))
return NULL;
auto_vec<tree> dummy_vec;
if (!vectype
|| !vecitype
- || !supportable_widening_operation (wide_code, last_stmt_info,
+ || !supportable_widening_operation (vinfo, wide_code, last_stmt_info,
vecitype, vectype,
&dummy_code, &dummy_code,
&dummy_int, &dummy_vec))
return NULL;
tree oprnd[2];
- vect_convert_inputs (last_stmt_info, 2, oprnd, half_type, unprom, vectype);
+ vect_convert_inputs (vinfo, last_stmt_info,
+ 2, oprnd, half_type, unprom, vectype);
tree var = vect_recog_temp_ssa_var (itype, NULL);
gimple *pattern_stmt = gimple_build_assign (var, wide_code,
oprnd[0], oprnd[1]);
- return vect_convert_output (last_stmt_info, type, pattern_stmt, vecitype);
+ return vect_convert_output (vinfo, last_stmt_info,
+ type, pattern_stmt, vecitype);
}
/* Try to detect multiplication on widened inputs, converting MULT_EXPR
to WIDEN_MULT_EXPR. See vect_recog_widen_op_pattern for details. */
static gimple *
-vect_recog_widen_mult_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_widen_mult_pattern (vec_info *vinfo, stmt_vec_info last_stmt_info,
+ tree *type_out)
{
- return vect_recog_widen_op_pattern (last_stmt_info, type_out, MULT_EXPR,
- WIDEN_MULT_EXPR, false,
+ return vect_recog_widen_op_pattern (vinfo, last_stmt_info, type_out,
+ MULT_EXPR, WIDEN_MULT_EXPR, false,
"vect_recog_widen_mult_pattern");
}
*/
static gimple *
-vect_recog_pow_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_pow_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
- vec_info *vinfo = stmt_vinfo->vinfo;
gimple *last_stmt = stmt_vinfo->stmt;
tree base, exp;
gimple *stmt;
return NULL;
tree def = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
gimple *g = gimple_build_assign (def, MULT_EXPR, exp, logc);
- append_pattern_def_seq (stmt_vinfo, g);
+ append_pattern_def_seq (vinfo, stmt_vinfo, g);
tree res = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
g = gimple_build_call (exp_decl, 1, def);
gimple_call_set_lhs (g, res);
inner-loop nested in an outer-loop that us being vectorized). */
static gimple *
-vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_widen_sum_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
tree oprnd0, oprnd1;
- vec_info *vinfo = stmt_vinfo->vinfo;
tree type;
gimple *pattern_stmt;
tree var;
/* Starting from LAST_STMT, follow the defs of its uses in search
of the above pattern. */
- if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
+ if (!vect_reassociating_reduction_p (vinfo, stmt_vinfo, PLUS_EXPR,
&oprnd0, &oprnd1))
return NULL;
by users of the result. */
static gimple *
-vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_over_widening_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out)
{
gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
if (!last_stmt)
if (!new_precision)
return NULL;
- vec_info *vinfo = last_stmt_info->vinfo;
tree lhs = gimple_assign_lhs (last_stmt);
tree type = TREE_TYPE (lhs);
tree_code code = gimple_assign_rhs_code (last_stmt);
tree ops[3] = {};
for (unsigned int i = 1; i < first_op; ++i)
ops[i - 1] = gimple_op (last_stmt, i);
- vect_convert_inputs (last_stmt_info, nops, &ops[first_op - 1],
+ vect_convert_inputs (vinfo, last_stmt_info, nops, &ops[first_op - 1],
op_type, &unprom[0], op_vectype);
/* Use the operation to produce a result of type OP_TYPE. */
/* Convert back to the original signedness, if OP_TYPE is different
from NEW_TYPE. */
if (op_type != new_type)
- pattern_stmt = vect_convert_output (last_stmt_info, new_type,
+ pattern_stmt = vect_convert_output (vinfo, last_stmt_info, new_type,
pattern_stmt, op_vectype);
/* Promote the result to the original type. */
- pattern_stmt = vect_convert_output (last_stmt_info, type,
+ pattern_stmt = vect_convert_output (vinfo, last_stmt_info, type,
pattern_stmt, new_vectype);
return pattern_stmt;
where only the bottom half of res is used. */
static gimple *
-vect_recog_mulhs_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_mulhs_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out)
{
/* Check for a right shift. */
gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
if (!last_stmt
|| gimple_assign_rhs_code (last_stmt) != RSHIFT_EXPR)
return NULL;
- vec_info *vinfo = last_stmt_info->vinfo;
/* Check that the shift result is wider than the users of the
result need (i.e. that narrowing would be a natural choice). */
vect_unpromoted_value unprom_mult[2];
tree new_type;
unsigned int nops
- = vect_widened_op_tree (mulh_stmt_info, MULT_EXPR, WIDEN_MULT_EXPR,
+ = vect_widened_op_tree (vinfo, mulh_stmt_info, MULT_EXPR, WIDEN_MULT_EXPR,
false, 2, unprom_mult, &new_type);
if (nops != 2)
return NULL;
/* Generate the IFN_MULHRS call. */
tree new_var = vect_recog_temp_ssa_var (new_type, NULL);
tree new_ops[2];
- vect_convert_inputs (last_stmt_info, 2, new_ops, new_type,
+ vect_convert_inputs (vinfo, last_stmt_info, 2, new_ops, new_type,
unprom_mult, new_vectype);
gcall *mulhrs_stmt
= gimple_build_call_internal (ifn, 2, new_ops[0], new_ops[1]);
dump_printf_loc (MSG_NOTE, vect_location,
"created pattern stmt: %G", mulhrs_stmt);
- return vect_convert_output (last_stmt_info, lhs_type,
+ return vect_convert_output (vinfo, last_stmt_info, lhs_type,
mulhrs_stmt, new_vectype);
}
over plus and add a carry. */
static gimple *
-vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_average_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out)
{
/* Check for a shift right by one bit. */
gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
- vec_info *vinfo = last_stmt_info->vinfo;
if (!last_stmt
|| gimple_assign_rhs_code (last_stmt) != RSHIFT_EXPR
|| !integer_onep (gimple_assign_rhs2 (last_stmt)))
internal_fn ifn = IFN_AVG_FLOOR;
vect_unpromoted_value unprom[3];
tree new_type;
- unsigned int nops = vect_widened_op_tree (plus_stmt_info, PLUS_EXPR,
+ unsigned int nops = vect_widened_op_tree (vinfo, plus_stmt_info, PLUS_EXPR,
PLUS_EXPR, false, 3,
unprom, &new_type);
if (nops == 0)
tree new_var = vect_recog_temp_ssa_var (new_type, NULL);
tree new_ops[2];
- vect_convert_inputs (last_stmt_info, 2, new_ops, new_type,
+ vect_convert_inputs (vinfo, last_stmt_info, 2, new_ops, new_type,
unprom, new_vectype);
if (fallback_p)
tree shifted_op0 = vect_recog_temp_ssa_var (new_type, NULL);
g = gimple_build_assign (shifted_op0, RSHIFT_EXPR, new_ops[0], one_cst);
- append_pattern_def_seq (last_stmt_info, g, new_vectype);
+ append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype);
tree shifted_op1 = vect_recog_temp_ssa_var (new_type, NULL);
g = gimple_build_assign (shifted_op1, RSHIFT_EXPR, new_ops[1], one_cst);
- append_pattern_def_seq (last_stmt_info, g, new_vectype);
+ append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype);
tree sum_of_shifted = vect_recog_temp_ssa_var (new_type, NULL);
g = gimple_build_assign (sum_of_shifted, PLUS_EXPR,
shifted_op0, shifted_op1);
- append_pattern_def_seq (last_stmt_info, g, new_vectype);
+ append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype);
tree unmasked_carry = vect_recog_temp_ssa_var (new_type, NULL);
tree_code c = (ifn == IFN_AVG_CEIL) ? BIT_IOR_EXPR : BIT_AND_EXPR;
g = gimple_build_assign (unmasked_carry, c, new_ops[0], new_ops[1]);
- append_pattern_def_seq (last_stmt_info, g, new_vectype);
+ append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype);
tree carry = vect_recog_temp_ssa_var (new_type, NULL);
g = gimple_build_assign (carry, BIT_AND_EXPR, unmasked_carry, one_cst);
- append_pattern_def_seq (last_stmt_info, g, new_vectype);
+ append_pattern_def_seq (vinfo, last_stmt_info, g, new_vectype);
g = gimple_build_assign (new_var, PLUS_EXPR, sum_of_shifted, carry);
- return vect_convert_output (last_stmt_info, type, g, new_vectype);
+ return vect_convert_output (vinfo, last_stmt_info, type, g, new_vectype);
}
/* Generate the IFN_AVG* call. */
dump_printf_loc (MSG_NOTE, vect_location,
"created pattern stmt: %G", average_stmt);
- return vect_convert_output (last_stmt_info, type, average_stmt, new_vectype);
+ return vect_convert_output (vinfo, last_stmt_info,
+ type, average_stmt, new_vectype);
}
/* Recognize cases in which the input to a cast is wider than its
input doesn't. */
static gimple *
-vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_cast_forwprop_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out)
{
/* Check for a cast, including an integer-to-float conversion. */
gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
return NULL;
/* Try to find an unpromoted input. */
- vec_info *vinfo = last_stmt_info->vinfo;
vect_unpromoted_value unprom;
if (!vect_look_through_possible_promotion (vinfo, rhs, &unprom)
|| TYPE_PRECISION (unprom.type) >= TYPE_PRECISION (rhs_type))
to WIDEN_LSHIFT_EXPR. See vect_recog_widen_op_pattern for details. */
static gimple *
-vect_recog_widen_shift_pattern (stmt_vec_info last_stmt_info, tree *type_out)
+vect_recog_widen_shift_pattern (vec_info *vinfo,
+ stmt_vec_info last_stmt_info, tree *type_out)
{
- return vect_recog_widen_op_pattern (last_stmt_info, type_out, LSHIFT_EXPR,
- WIDEN_LSHIFT_EXPR, true,
+ return vect_recog_widen_op_pattern (vinfo, last_stmt_info, type_out,
+ LSHIFT_EXPR, WIDEN_LSHIFT_EXPR, true,
"vect_recog_widen_shift_pattern");
}
S0 stmt. */
static gimple *
-vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_rotate_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
tree oprnd0, oprnd1, lhs, var, var1, var2, vectype, type, stype, def, def2;
gimple *pattern_stmt, *def_stmt;
enum tree_code rhs_code;
- vec_info *vinfo = stmt_vinfo->vinfo;
enum vect_def_type dt;
optab optab1, optab2;
edge ext_def = NULL;
{
def = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
oprnd0 = def;
}
{
def = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
oprnd0 = def;
}
{
def = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd0);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
oprnd0 = def;
}
{
def = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
}
stype = TREE_TYPE (def);
scalar_int_mode smode = SCALAR_INT_TYPE_MODE (stype);
gcc_assert (!new_bb);
}
else
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype);
def2 = vect_recog_temp_ssa_var (stype, NULL);
tree mask = build_int_cst (stype, GET_MODE_PRECISION (smode) - 1);
gcc_assert (!new_bb);
}
else
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype);
}
var1 = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (var1, rhs_code == LROTATE_EXPR
? LSHIFT_EXPR : RSHIFT_EXPR,
oprnd0, def);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
var2 = vect_recog_temp_ssa_var (type, NULL);
def_stmt = gimple_build_assign (var2, rhs_code == LROTATE_EXPR
? RSHIFT_EXPR : LSHIFT_EXPR,
oprnd0, def2);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
/* Pattern detected. */
vect_pattern_detected ("vect_recog_rotate_pattern", last_stmt);
S3 stmt. */
static gimple *
-vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo,
+vect_recog_vector_vector_shift_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo,
tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
tree oprnd0, oprnd1, lhs, var;
gimple *pattern_stmt;
enum tree_code rhs_code;
- vec_info *vinfo = stmt_vinfo->vinfo;
if (!is_gimple_assign (last_stmt))
return NULL;
def_stmt = gimple_build_assign (def, BIT_AND_EXPR, rhs1, mask);
tree vecstype = get_vectype_for_scalar_type (vinfo,
TREE_TYPE (rhs1));
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecstype);
}
}
}
{
def = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
}
/* Pattern detected. */
VINFO. Return the last statement. */
static gimple *
-synth_lshift_by_additions (tree dest, tree op, HOST_WIDE_INT amnt,
- stmt_vec_info vinfo)
+synth_lshift_by_additions (vec_info *vinfo,
+ tree dest, tree op, HOST_WIDE_INT amnt,
+ stmt_vec_info stmt_info)
{
HOST_WIDE_INT i;
tree itype = TREE_TYPE (op);
= gimple_build_assign (tmp_var, PLUS_EXPR, prev_res, prev_res);
prev_res = tmp_var;
if (i < amnt - 1)
- append_pattern_def_seq (vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_info, stmt);
else
return stmt;
}
left shifts using additions. */
static tree
-apply_binop_and_append_stmt (tree_code code, tree op1, tree op2,
+apply_binop_and_append_stmt (vec_info *vinfo,
+ tree_code code, tree op1, tree op2,
stmt_vec_info stmt_vinfo, bool synth_shift_p)
{
if (integer_zerop (op2)
if (code == LSHIFT_EXPR
&& synth_shift_p)
{
- stmt = synth_lshift_by_additions (tmp_var, op1, TREE_INT_CST_LOW (op2),
- stmt_vinfo);
- append_pattern_def_seq (stmt_vinfo, stmt);
+ stmt = synth_lshift_by_additions (vinfo, tmp_var, op1,
+ TREE_INT_CST_LOW (op2), stmt_vinfo);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
return tmp_var;
}
stmt = gimple_build_assign (tmp_var, code, op1, op2);
- append_pattern_def_seq (stmt_vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
return tmp_var;
}
works on tree-ssa form. */
static gimple *
-vect_synth_mult_by_constant (tree op, tree val,
+vect_synth_mult_by_constant (vec_info *vinfo, tree op, tree val,
stmt_vec_info stmt_vinfo)
{
- vec_info *vinfo = stmt_vinfo->vinfo;
tree itype = TREE_TYPE (op);
machine_mode mode = TYPE_MODE (itype);
struct algorithm alg;
{
tree tmp_op = vect_recog_temp_ssa_var (multtype, NULL);
stmt = gimple_build_assign (tmp_op, CONVERT_EXPR, op);
- append_pattern_def_seq (stmt_vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
op = tmp_op;
}
case alg_shift:
if (synth_shift_p)
stmt
- = synth_lshift_by_additions (accum_tmp, accumulator, alg.log[i],
- stmt_vinfo);
+ = synth_lshift_by_additions (vinfo, accum_tmp, accumulator,
+ alg.log[i], stmt_vinfo);
else
stmt = gimple_build_assign (accum_tmp, LSHIFT_EXPR, accumulator,
shft_log);
break;
case alg_add_t_m2:
tmp_var
- = apply_binop_and_append_stmt (LSHIFT_EXPR, op, shft_log,
- stmt_vinfo, synth_shift_p);
+ = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, op, shft_log,
+ stmt_vinfo, synth_shift_p);
stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator,
tmp_var);
break;
case alg_sub_t_m2:
- tmp_var = apply_binop_and_append_stmt (LSHIFT_EXPR, op,
- shft_log, stmt_vinfo,
- synth_shift_p);
+ tmp_var = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, op,
+ shft_log, stmt_vinfo,
+ synth_shift_p);
/* In some algorithms the first step involves zeroing the
accumulator. If subtracting from such an accumulator
just emit the negation directly. */
break;
case alg_add_t2_m:
tmp_var
- = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
- stmt_vinfo, synth_shift_p);
+ = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator,
+ shft_log, stmt_vinfo, synth_shift_p);
stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, tmp_var, op);
break;
case alg_sub_t2_m:
tmp_var
- = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
- stmt_vinfo, synth_shift_p);
+ = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator,
+ shft_log, stmt_vinfo, synth_shift_p);
stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var, op);
break;
case alg_add_factor:
tmp_var
- = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
- stmt_vinfo, synth_shift_p);
+ = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator,
+ shft_log, stmt_vinfo, synth_shift_p);
stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator,
tmp_var);
break;
case alg_sub_factor:
tmp_var
- = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
- stmt_vinfo, synth_shift_p);
+ = apply_binop_and_append_stmt (vinfo, LSHIFT_EXPR, accumulator,
+ shft_log, stmt_vinfo, synth_shift_p);
stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var,
accumulator);
break;
but rather return it directly. */
if ((i < alg.ops - 1) || needs_fixup || cast_to_unsigned_p)
- append_pattern_def_seq (stmt_vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
accumulator = accum_tmp;
}
if (variant == negate_variant)
stmt = gimple_build_assign (accum_tmp, NEGATE_EXPR, accumulator);
accumulator = accum_tmp;
if (cast_to_unsigned_p)
- append_pattern_def_seq (stmt_vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
}
else if (variant == add_variant)
{
stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator, op);
accumulator = accum_tmp;
if (cast_to_unsigned_p)
- append_pattern_def_seq (stmt_vinfo, stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, stmt);
}
/* Move back to a signed if needed. */
if (cast_to_unsigned_p)
the multiplication. */
static gimple *
-vect_recog_mult_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_mult_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
- vec_info *vinfo = stmt_vinfo->vinfo;
gimple *last_stmt = stmt_vinfo->stmt;
tree oprnd0, oprnd1, vectype, itype;
gimple *pattern_stmt;
return NULL;
}
- pattern_stmt = vect_synth_mult_by_constant (oprnd0, oprnd1, stmt_vinfo);
+ pattern_stmt = vect_synth_mult_by_constant (vinfo,
+ oprnd0, oprnd1, stmt_vinfo);
if (!pattern_stmt)
return NULL;
S1 or modulo S4 stmt. */
static gimple *
-vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_divmod_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
- vec_info *vinfo = stmt_vinfo->vinfo;
gimple *last_stmt = stmt_vinfo->stmt;
tree oprnd0, oprnd1, vectype, itype, cond;
gimple *pattern_stmt, *def_stmt;
if (rhs_code == TRUNC_MOD_EXPR)
{
- append_pattern_def_seq (stmt_vinfo, div_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, div_stmt);
def_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
LSHIFT_EXPR, var_div, shift);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
pattern_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
MINUS_EXPR, oprnd0,
fold_build2 (MINUS_EXPR, itype, oprnd1,
build_int_cst (itype, 1)),
build_int_cst (itype, 0));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
var = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
= gimple_build_assign (var, PLUS_EXPR, oprnd0,
gimple_assign_lhs (def_stmt));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
shift = build_int_cst (itype, tree_log2 (oprnd1));
pattern_stmt
def_stmt = gimple_build_assign (signmask, COND_EXPR, cond,
build_int_cst (itype, 1),
build_int_cst (itype, 0));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
}
else
{
def_stmt = gimple_build_assign (var, COND_EXPR, cond,
build_int_cst (utype, -1),
build_int_cst (utype, 0));
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecutype);
var = vect_recog_temp_ssa_var (utype, NULL);
def_stmt = gimple_build_assign (var, RSHIFT_EXPR,
gimple_assign_lhs (def_stmt),
shift);
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecutype);
signmask = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
= gimple_build_assign (signmask, NOP_EXPR, var);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
}
def_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
PLUS_EXPR, oprnd0, signmask);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
def_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
BIT_AND_EXPR, gimple_assign_lhs (def_stmt),
fold_build2 (MINUS_EXPR, itype, oprnd1,
build_int_cst (itype, 1)));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
pattern_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
t1 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt = gimple_build_assign (t1, MULT_HIGHPART_EXPR, oprnd0,
build_int_cst (itype, ml));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t2 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
= gimple_build_assign (t2, MINUS_EXPR, oprnd0, t1);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t3 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
= gimple_build_assign (t3, RSHIFT_EXPR, t2, integer_one_node);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t4 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
if (post_shift != 1)
{
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
q = vect_recog_temp_ssa_var (itype, NULL);
pattern_stmt
def_stmt
= gimple_build_assign (t1, RSHIFT_EXPR, oprnd0,
build_int_cst (NULL, pre_shift));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
}
else
t1 = oprnd0;
if (post_shift)
{
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
q = vect_recog_temp_ssa_var (itype, NULL);
def_stmt
if (add)
{
/* t2 = t1 + oprnd0; */
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t2 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt = gimple_build_assign (t2, PLUS_EXPR, t1, oprnd0);
}
if (post_shift)
{
/* t3 = t2 >> post_shift; */
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t3 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt = gimple_build_assign (t3, RSHIFT_EXPR, t2,
build_int_cst (itype, post_shift));
t4 = 0;
or if we know from VRP that oprnd0 < 0
t4 = -1; */
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
t4 = vect_recog_temp_ssa_var (itype, NULL);
if (msb != 1)
def_stmt = gimple_build_assign (t4, INTEGER_CST,
else
def_stmt = gimple_build_assign (t4, RSHIFT_EXPR, oprnd0,
build_int_cst (itype, prec - 1));
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
/* q = t3 - t4; or q = t4 - t3; */
q = vect_recog_temp_ssa_var (itype, NULL);
/* We divided. Now finish by:
t1 = q * oprnd1;
r = oprnd0 - t1; */
- append_pattern_def_seq (stmt_vinfo, pattern_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt);
t1 = vect_recog_temp_ssa_var (itype, NULL);
def_stmt = gimple_build_assign (t1, MULT_EXPR, q, oprnd1);
- append_pattern_def_seq (stmt_vinfo, def_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt);
r = vect_recog_temp_ssa_var (itype, NULL);
pattern_stmt = gimple_build_assign (r, MINUS_EXPR, oprnd0, t1);
a_T = (TYPE) a_it; */
static gimple *
-vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_mixed_size_cond_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
- vec_info *vinfo = stmt_vinfo->vinfo;
gimple *last_stmt = stmt_vinfo->stmt;
tree cond_expr, then_clause, else_clause;
tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype;
return NULL;
if ((TREE_CODE (then_clause) != INTEGER_CST
- && !type_conversion_p (then_clause, stmt_vinfo, false, &orig_type0,
- &def_stmt0, &promotion))
+ && !type_conversion_p (vinfo, then_clause, false,
+ &orig_type0, &def_stmt0, &promotion))
|| (TREE_CODE (else_clause) != INTEGER_CST
- && !type_conversion_p (else_clause, stmt_vinfo, false, &orig_type1,
- &def_stmt1, &promotion)))
+ && !type_conversion_p (vinfo, else_clause, false,
+ &orig_type1, &def_stmt1, &promotion)))
return NULL;
if (orig_type0 && orig_type1
pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL),
NOP_EXPR, gimple_assign_lhs (def_stmt));
- append_pattern_def_seq (stmt_vinfo, def_stmt, vecitype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecitype);
*type_out = vectype;
vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt);
pattern sequence. */
static tree
-adjust_bool_pattern_cast (tree type, tree var, stmt_vec_info stmt_info)
+adjust_bool_pattern_cast (vec_info *vinfo,
+ tree type, tree var, stmt_vec_info stmt_info)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *cast_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL),
NOP_EXPR, var);
- append_pattern_def_seq (stmt_info, cast_stmt,
+ append_pattern_def_seq (vinfo, stmt_info, cast_stmt,
get_vectype_for_scalar_type (vinfo, type));
return gimple_assign_lhs (cast_stmt);
}
be associated with. DEFS is a map of pattern defs. */
static void
-adjust_bool_pattern (tree var, tree out_type,
+adjust_bool_pattern (vec_info *vinfo, tree var, tree out_type,
stmt_vec_info stmt_info, hash_map <tree, tree> &defs)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = SSA_NAME_DEF_STMT (var);
enum tree_code rhs_code, def_rhs_code;
tree itype, cond_expr, rhs1, rhs2, irhs1, irhs2;
int prec2 = TYPE_PRECISION (TREE_TYPE (irhs2));
int out_prec = TYPE_PRECISION (out_type);
if (absu_hwi (out_prec - prec1) < absu_hwi (out_prec - prec2))
- irhs2 = adjust_bool_pattern_cast (TREE_TYPE (irhs1), irhs2,
+ irhs2 = adjust_bool_pattern_cast (vinfo, TREE_TYPE (irhs1), irhs2,
stmt_info);
else if (absu_hwi (out_prec - prec1) > absu_hwi (out_prec - prec2))
- irhs1 = adjust_bool_pattern_cast (TREE_TYPE (irhs2), irhs1,
+ irhs1 = adjust_bool_pattern_cast (vinfo, TREE_TYPE (irhs2), irhs1,
stmt_info);
else
{
- irhs1 = adjust_bool_pattern_cast (out_type, irhs1, stmt_info);
- irhs2 = adjust_bool_pattern_cast (out_type, irhs2, stmt_info);
+ irhs1 = adjust_bool_pattern_cast (vinfo,
+ out_type, irhs1, stmt_info);
+ irhs2 = adjust_bool_pattern_cast (vinfo,
+ out_type, irhs2, stmt_info);
}
}
itype = TREE_TYPE (irhs1);
}
gimple_set_location (pattern_stmt, loc);
- append_pattern_def_seq (stmt_info, pattern_stmt,
+ append_pattern_def_seq (vinfo, stmt_info, pattern_stmt,
get_vectype_for_scalar_type (vinfo, itype));
defs.put (var, gimple_assign_lhs (pattern_stmt));
}
OUT_TYPE. Return the def of the pattern root. */
static tree
-adjust_bool_stmts (hash_set <gimple *> &bool_stmt_set,
+adjust_bool_stmts (vec_info *vinfo, hash_set <gimple *> &bool_stmt_set,
tree out_type, stmt_vec_info stmt_info)
{
/* Gather original stmts in the bool pattern in their order of appearance
/* Now process them in that order, producing pattern stmts. */
hash_map <tree, tree> defs;
for (unsigned i = 0; i < bool_stmts.length (); ++i)
- adjust_bool_pattern (gimple_assign_lhs (bool_stmts[i]),
+ adjust_bool_pattern (vinfo, gimple_assign_lhs (bool_stmts[i]),
out_type, stmt_info, defs);
/* Pop the last pattern seq stmt and install it as pattern root for STMT. */
but the above is more efficient. */
static gimple *
-vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_bool_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
enum tree_code rhs_code;
tree var, lhs, rhs, vectype;
- vec_info *vinfo = stmt_vinfo->vinfo;
gimple *pattern_stmt;
if (!is_gimple_assign (last_stmt))
if (check_bool_pattern (var, vinfo, bool_stmts))
{
- rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (lhs), stmt_vinfo);
+ rhs = adjust_bool_stmts (vinfo, bool_stmts,
+ TREE_TYPE (lhs), stmt_vinfo);
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs);
if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
{
tree new_vectype = get_vectype_for_scalar_type (vinfo, type);
- append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype);
+ append_pattern_def_seq (vinfo, stmt_vinfo,
+ pattern_stmt, new_vectype);
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
pattern_stmt = gimple_build_assign (lhs, CONVERT_EXPR, tmp);
if (!check_bool_pattern (var, vinfo, bool_stmts))
return NULL;
- rhs = adjust_bool_stmts (bool_stmts, type, stmt_vinfo);
+ rhs = adjust_bool_stmts (vinfo, bool_stmts, type, stmt_vinfo);
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
pattern_stmt
{
stmt_vec_info pattern_stmt_info;
tree nunits_vectype;
- if (!vect_get_vector_types_for_stmt (stmt_vinfo, &vectype,
+ if (!vect_get_vector_types_for_stmt (vinfo, stmt_vinfo, &vectype,
&nunits_vectype)
|| !VECTOR_MODE_P (TYPE_MODE (vectype)))
return NULL;
if (check_bool_pattern (var, vinfo, bool_stmts))
- rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (vectype), stmt_vinfo);
+ rhs = adjust_bool_stmts (vinfo, bool_stmts,
+ TREE_TYPE (vectype), stmt_vinfo);
else
{
tree type = integer_type_for_mask (var, vinfo);
rhs = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (rhs, COND_EXPR, var, cst1, cst0);
- append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype);
+ append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt, new_vectype);
}
lhs = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vectype), lhs);
{
tree rhs2 = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
gimple *cast_stmt = gimple_build_assign (rhs2, NOP_EXPR, rhs);
- append_pattern_def_seq (stmt_vinfo, cast_stmt);
+ append_pattern_def_seq (vinfo, stmt_vinfo, cast_stmt);
rhs = rhs2;
}
pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs);
Return converted mask. */
static tree
-build_mask_conversion (tree mask, tree vectype, stmt_vec_info stmt_vinfo)
+build_mask_conversion (vec_info *vinfo,
+ tree mask, tree vectype, stmt_vec_info stmt_vinfo)
{
gimple *stmt;
tree masktype, tmp;
masktype = truth_type_for (vectype);
tmp = vect_recog_temp_ssa_var (TREE_TYPE (masktype), NULL);
stmt = gimple_build_assign (tmp, CONVERT_EXPR, mask);
- append_pattern_def_seq (stmt_vinfo, stmt, masktype, TREE_TYPE (vectype));
+ append_pattern_def_seq (vinfo, stmt_vinfo,
+ stmt, masktype, TREE_TYPE (vectype));
return tmp;
}
S4' c_1' = m_3'' ? c_2 : c_3; */
static gimple *
-vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
+vect_recog_mask_conversion_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
enum tree_code rhs_code;
tree lhs = NULL_TREE, rhs1, rhs2, tmp, rhs1_type, rhs2_type;
tree vectype1, vectype2;
stmt_vec_info pattern_stmt_info;
- vec_info *vinfo = stmt_vinfo->vinfo;
/* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
if (is_gimple_call (last_stmt)
TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
- tmp = build_mask_conversion (mask_arg, vectype1, stmt_vinfo);
+ tmp = build_mask_conversion (vinfo, mask_arg, vectype1, stmt_vinfo);
auto_vec<tree, 8> args;
unsigned int nargs = gimple_call_num_args (last_stmt);
tmp = vect_recog_temp_ssa_var (TREE_TYPE (rhs1), NULL);
pattern_stmt = gimple_build_assign (tmp, rhs1);
rhs1 = tmp;
- append_pattern_def_seq (stmt_vinfo, pattern_stmt, vectype2,
+ append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt, vectype2,
rhs1_type);
}
if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
TYPE_VECTOR_SUBPARTS (vectype2)))
- tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo);
+ tmp = build_mask_conversion (vinfo, rhs1, vectype1, stmt_vinfo);
else
tmp = rhs1;
vectype1 = get_mask_type_for_scalar_type (vinfo, rhs1_type);
if (!vectype1)
return NULL;
- rhs2 = build_mask_conversion (rhs2, vectype1, stmt_vinfo);
+ rhs2 = build_mask_conversion (vinfo, rhs2, vectype1, stmt_vinfo);
}
else
{
vectype1 = get_mask_type_for_scalar_type (vinfo, rhs2_type);
if (!vectype1)
return NULL;
- rhs1 = build_mask_conversion (rhs1, vectype1, stmt_vinfo);
+ rhs1 = build_mask_conversion (vinfo, rhs1, vectype1, stmt_vinfo);
}
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
if (mask_vectype
&& maybe_ne (TYPE_VECTOR_SUBPARTS (vectype),
TYPE_VECTOR_SUBPARTS (mask_vectype)))
- mask = build_mask_conversion (mask, vectype, stmt_info);
+ mask = build_mask_conversion (vinfo, mask, vectype, stmt_info);
}
return mask;
}
to STMT_INFO. */
static tree
-vect_add_conversion_to_pattern (tree type, tree value, stmt_vec_info stmt_info)
+vect_add_conversion_to_pattern (vec_info *vinfo,
+ tree type, tree value, stmt_vec_info stmt_info)
{
if (useless_type_conversion_p (type, TREE_TYPE (value)))
return value;
- vec_info *vinfo = stmt_info->vinfo;
tree new_value = vect_recog_temp_ssa_var (type, NULL);
gassign *conversion = gimple_build_assign (new_value, CONVERT_EXPR, value);
- append_pattern_def_seq (stmt_info, conversion,
+ append_pattern_def_seq (vinfo, stmt_info, conversion,
get_vectype_for_scalar_type (vinfo, type));
return new_value;
}
as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */
static gimple *
-vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out)
+vect_recog_gather_scatter_pattern (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree *type_out)
{
/* Currently we only support this for loop vectorization. */
- loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_info->vinfo);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
if (!loop_vinfo)
return NULL;
latter to the same width as the vector elements. */
tree base = gs_info.base;
tree offset_type = TREE_TYPE (gs_info.offset_vectype);
- tree offset = vect_add_conversion_to_pattern (offset_type, gs_info.offset,
- stmt_info);
+ tree offset = vect_add_conversion_to_pattern (vinfo, offset_type,
+ gs_info.offset, stmt_info);
/* Build the new pattern statement. */
tree scale = size_int (gs_info.scale);
whose result is LHS. */
static bool
-vect_determine_min_output_precision_1 (stmt_vec_info stmt_info, tree lhs)
+vect_determine_min_output_precision_1 (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree lhs)
{
/* Take the maximum precision required by users of the result. */
- vec_info *vinfo = stmt_info->vinfo;
unsigned int precision = 0;
imm_use_iterator iter;
use_operand_p use;
/* Calculate min_output_precision for STMT_INFO. */
static void
-vect_determine_min_output_precision (stmt_vec_info stmt_info)
+vect_determine_min_output_precision (vec_info *vinfo, stmt_vec_info stmt_info)
{
/* We're only interested in statements with a narrowable result. */
tree lhs = gimple_get_lhs (stmt_info->stmt);
|| !vect_narrowable_type_p (TREE_TYPE (lhs)))
return;
- if (!vect_determine_min_output_precision_1 (stmt_info, lhs))
+ if (!vect_determine_min_output_precision_1 (vinfo, stmt_info, lhs))
stmt_info->min_output_precision = TYPE_PRECISION (TREE_TYPE (lhs));
}
result in STMT_INFO->mask_precision. */
static void
-vect_determine_mask_precision (stmt_vec_info stmt_info)
+vect_determine_mask_precision (vec_info *vinfo, stmt_vec_info stmt_info)
{
- vec_info *vinfo = stmt_info->vinfo;
-
if (!possible_vector_mask_operation_p (stmt_info)
|| stmt_info->mask_precision)
return;
have already done so for the users of its result. */
void
-vect_determine_stmt_precisions (stmt_vec_info stmt_info)
+vect_determine_stmt_precisions (vec_info *vinfo, stmt_vec_info stmt_info)
{
- vect_determine_min_output_precision (stmt_info);
+ vect_determine_min_output_precision (vinfo, stmt_info);
if (gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt))
{
vect_determine_precisions_from_range (stmt_info, stmt);
vect_determine_precisions_from_users (stmt_info, stmt);
}
- vect_determine_mask_precision (stmt_info);
+ vect_determine_mask_precision (vinfo, stmt_info);
}
/* Walk backwards through the vectorizable region to determine the
for (gimple_stmt_iterator si = gsi_last_bb (bb);
!gsi_end_p (si); gsi_prev (&si))
vect_determine_stmt_precisions
- (vinfo->lookup_stmt (gsi_stmt (si)));
+ (vinfo, vinfo->lookup_stmt (gsi_stmt (si)));
}
}
else
stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info))
- vect_determine_stmt_precisions (stmt_info);
+ vect_determine_stmt_precisions (vinfo, stmt_info);
}
while (stmt != gsi_stmt (bb_vinfo->region_begin));
}
}
-typedef gimple *(*vect_recog_func_ptr) (stmt_vec_info, tree *);
+typedef gimple *(*vect_recog_func_ptr) (vec_info *, stmt_vec_info, tree *);
struct vect_recog_func
{
/* Mark statements that are involved in a pattern. */
static inline void
-vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt,
+vect_mark_pattern_stmts (vec_info *vinfo,
+ stmt_vec_info orig_stmt_info, gimple *pattern_stmt,
tree pattern_vectype)
{
stmt_vec_info orig_stmt_info_saved = orig_stmt_info;
dump_printf_loc (MSG_NOTE, vect_location,
"extra pattern stmt: %G", gsi_stmt (si));
stmt_vec_info pattern_stmt_info
- = vect_init_pattern_stmt (gsi_stmt (si),
+ = vect_init_pattern_stmt (vinfo, gsi_stmt (si),
orig_stmt_info, pattern_vectype);
/* Stmts in the def sequence are not vectorizable cycle or
induction defs, instead they should all be vect_internal_def
if (orig_pattern_stmt)
{
- vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype);
+ vect_init_pattern_stmt (vinfo, pattern_stmt,
+ orig_stmt_info, pattern_vectype);
/* Insert all the new pattern statements before the original one. */
gimple_seq *orig_def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info);
gsi_remove (&gsi, false);
}
else
- vect_set_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype);
+ vect_set_pattern_stmt (vinfo,
+ pattern_stmt, orig_stmt_info, pattern_vectype);
/* Transfer reduction path info to the pattern. */
if (STMT_VINFO_REDUC_IDX (orig_stmt_info_saved) != -1)
{
- vec_info *vinfo = orig_stmt_info_saved->vinfo;
tree lookfor = gimple_op (orig_stmt_info_saved->stmt,
1 + STMT_VINFO_REDUC_IDX (orig_stmt_info));
/* Search the pattern def sequence and the main pattern stmt. Note
for vect_recog_pattern. */
static void
-vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info)
+vect_pattern_recog_1 (vec_info *vinfo,
+ vect_recog_func *recog_func, stmt_vec_info stmt_info)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *pattern_stmt;
loop_vec_info loop_vinfo;
tree pattern_vectype;
gimple_stmt_iterator gsi;
for (gsi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
!gsi_end_p (gsi); gsi_next (&gsi))
- vect_pattern_recog_1 (recog_func, vinfo->lookup_stmt (gsi_stmt (gsi)));
+ vect_pattern_recog_1 (vinfo, recog_func,
+ vinfo->lookup_stmt (gsi_stmt (gsi)));
return;
}
gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
- pattern_stmt = recog_func->fn (stmt_info, &pattern_vectype);
+ pattern_stmt = recog_func->fn (vinfo, stmt_info, &pattern_vectype);
if (!pattern_stmt)
{
/* Clear any half-formed pattern definition sequence. */
return;
}
- loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
gcc_assert (pattern_vectype);
/* Found a vectorizable pattern. */
recog_func->name, pattern_stmt);
/* Mark the stmts that are involved in the pattern. */
- vect_mark_pattern_stmts (stmt_info, pattern_stmt, pattern_vectype);
+ vect_mark_pattern_stmts (vinfo, stmt_info, pattern_stmt, pattern_vectype);
/* Patterns cannot be vectorized using SLP, because they change the order of
computation. */
stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
/* Scan over all generic vect_recog_xxx_pattern functions. */
for (j = 0; j < NUM_PATTERNS; j++)
- vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j],
+ vect_pattern_recog_1 (vinfo, &vect_vect_recog_func_ptrs[j],
stmt_info);
}
}
/* Scan over all generic vect_recog_xxx_pattern functions. */
for (j = 0; j < NUM_PATTERNS; j++)
- vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j], stmt_info);
+ vect_pattern_recog_1 (vinfo,
+ &vect_vect_recog_func_ptrs[j], stmt_info);
}
}
}
Used only for BB vectorization. */
static bool
-vect_update_all_shared_vectypes (vec<stmt_vec_info> stmts)
+vect_update_all_shared_vectypes (vec_info *vinfo, vec<stmt_vec_info> stmts)
{
tree vectype, nunits_vectype;
- if (!vect_get_vector_types_for_stmt (stmts[0], &vectype,
+ if (!vect_get_vector_types_for_stmt (vinfo, stmts[0], &vectype,
&nunits_vectype, stmts.length ()))
return false;
vect_build_slp_tree. */
static bool
-vect_record_max_nunits (stmt_vec_info stmt_info, unsigned int group_size,
+vect_record_max_nunits (vec_info *vinfo, stmt_vec_info stmt_info,
+ unsigned int group_size,
tree vectype, poly_uint64 *max_nunits)
{
if (!vectype)
before adjusting *max_nunits for basic-block vectorization. */
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
unsigned HOST_WIDE_INT const_nunits;
- if (STMT_VINFO_BB_VINFO (stmt_info)
+ if (is_a <bb_vec_info> (vinfo)
&& (!nunits.is_constant (&const_nunits)
|| const_nunits > group_size))
{
to (B1 <= A1 ? X1 : Y1); or be inverted to (A1 < B1) ? Y1 : X1. */
static bool
-vect_build_slp_tree_1 (unsigned char *swap,
+vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
vec<stmt_vec_info> stmts, unsigned int group_size,
poly_uint64 *max_nunits, bool *matches,
bool *two_operators)
stmt_vec_info stmt_info;
FOR_EACH_VEC_ELT (stmts, i, stmt_info)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = stmt_info->stmt;
swap[i] = 0;
matches[i] = false;
}
tree nunits_vectype;
- if (!vect_get_vector_types_for_stmt (stmt_info, &vectype,
+ if (!vect_get_vector_types_for_stmt (vinfo, stmt_info, &vectype,
&nunits_vectype, group_size)
|| (nunits_vectype
- && !vect_record_max_nunits (stmt_info, group_size,
+ && !vect_record_max_nunits (vinfo, stmt_info, group_size,
nunits_vectype, max_nunits)))
{
/* Fatal mismatch. */
{
tree scalar_type = TREE_TYPE (PHI_RESULT (stmt));
tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
- if (!vect_record_max_nunits (stmt_info, group_size, vectype, max_nunits))
+ if (!vect_record_max_nunits (vinfo, stmt_info, group_size, vectype,
+ max_nunits))
return NULL;
vect_def_type def_type = STMT_VINFO_DEF_TYPE (stmt_info);
bool two_operators = false;
unsigned char *swap = XALLOCAVEC (unsigned char, group_size);
- if (!vect_build_slp_tree_1 (swap, stmts, group_size,
+ if (!vect_build_slp_tree_1 (vinfo, swap, stmts, group_size,
&this_max_nunits, matches, &two_operators))
return NULL;
if (SLP_TREE_DEF_TYPE (grandchild) != vect_external_def)
break;
if (!grandchild
- && vect_update_all_shared_vectypes (oprnd_info->def_stmts))
+ && vect_update_all_shared_vectypes (vinfo,
+ oprnd_info->def_stmts))
{
/* Roll back. */
this_tree_size = old_tree_size;
scalar version. */
&& !is_pattern_stmt_p (stmt_info)
&& !oprnd_info->any_pattern
- && vect_update_all_shared_vectypes (oprnd_info->def_stmts))
+ && vect_update_all_shared_vectypes (vinfo, oprnd_info->def_stmts))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
break;
if (!grandchild
&& (vect_update_all_shared_vectypes
- (oprnd_info->def_stmts)))
+ (vinfo, oprnd_info->def_stmts)))
{
/* Roll back. */
this_tree_size = old_tree_size;
SLP_INSTN are supported. */
static bool
-vect_supported_load_permutation_p (slp_instance slp_instn)
+vect_supported_load_permutation_p (vec_info *vinfo, slp_instance slp_instn)
{
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
unsigned int i, j, k, next;
/* In basic block vectorization we allow any subchain of an interleaving
chain.
FORNOW: not supported in loop SLP because of realignment compications. */
- if (STMT_VINFO_BB_VINFO (stmt_info))
+ if (is_a <bb_vec_info> (vinfo))
{
/* Check whether the loads in an instance form a subchain and thus
no permutation is necessary. */
/* Verify the permutation can be generated. */
vec<tree> tem;
unsigned n_perms;
- if (!vect_transform_slp_perm_load (node, tem, NULL,
+ if (!vect_transform_slp_perm_load (vinfo, node, tem, NULL,
1, slp_instn, true, &n_perms))
{
if (dump_enabled_p ())
poly_uint64 test_vf
= force_common_multiple (SLP_INSTANCE_UNROLLING_FACTOR (slp_instn),
LOOP_VINFO_VECT_FACTOR
- (STMT_VINFO_LOOP_VINFO (stmt_info)));
+ (as_a <loop_vec_info> (vinfo)));
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
if (node->load_permutation.exists ()
- && !vect_transform_slp_perm_load (node, vNULL, NULL, test_vf,
+ && !vect_transform_slp_perm_load (vinfo, node, vNULL, NULL, test_vf,
slp_instn, true, &n_perms))
return false;
if (loads_permuted)
{
- if (!vect_supported_load_permutation_p (new_instance))
+ if (!vect_supported_load_permutation_p (vinfo, new_instance))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
static void
-vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype,
+vect_detect_hybrid_slp_stmts (loop_vec_info loop_vinfo, slp_tree node,
+ unsigned i, slp_vect_type stype,
hash_map<slp_tree, unsigned> &visited)
{
stmt_vec_info stmt_vinfo = SLP_TREE_SCALAR_STMTS (node)[i];
gimple *use_stmt;
stmt_vec_info use_vinfo;
slp_tree child;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
int j;
/* We need to union stype over the incoming graph edges but we still
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
if (SLP_TREE_DEF_TYPE (child) != vect_external_def
&& SLP_TREE_DEF_TYPE (child) != vect_constant_def)
- vect_detect_hybrid_slp_stmts (child, i, stype, visited);
+ vect_detect_hybrid_slp_stmts (loop_vinfo, child, i, stype, visited);
}
/* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
if (j < SLP_INSTANCE_GROUP_SIZE (instance))
{
any = true;
- vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
+ vect_detect_hybrid_slp_stmts (loop_vinfo,
+ SLP_INSTANCE_TREE (instance),
j, pure_slp, visited);
}
if (!any)
}
bool dummy;
- return vect_analyze_stmt (stmt_info, &dummy, node, node_instance, cost_vec);
+ return vect_analyze_stmt (vinfo, stmt_info, &dummy,
+ node, node_instance, cost_vec);
}
/* Try to build NODE from scalars, returning true on success.
visited.add (*x);
i++;
- add_stmt_costs (vinfo->target_cost_data, &cost_vec);
+ add_stmt_costs (vinfo, vinfo->target_cost_data, &cost_vec);
cost_vec.release ();
}
}
update LIFE according to uses of NODE. */
static void
-vect_bb_slp_scalar_cost (basic_block bb,
+vect_bb_slp_scalar_cost (vec_info *vinfo, basic_block bb,
slp_tree node, vec<bool, va_heap> *life,
stmt_vector_for_cost *cost_vec,
hash_set<slp_tree> &visited)
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info)
{
gimple *stmt = stmt_info->stmt;
- vec_info *vinfo = stmt_info->vinfo;
ssa_op_iter op_iter;
def_operand_p def_p;
/* Do not directly pass LIFE to the recursive call, copy it to
confine changes in the callee to the current child/subtree. */
subtree_life.safe_splice (*life);
- vect_bb_slp_scalar_cost (bb, child, &subtree_life, cost_vec,
+ vect_bb_slp_scalar_cost (vinfo, bb, child, &subtree_life, cost_vec,
visited);
subtree_life.truncate (0);
}
{
auto_vec<bool, 20> life;
life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
- vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
+ vect_bb_slp_scalar_cost (bb_vinfo, BB_VINFO_BB (bb_vinfo),
SLP_INSTANCE_TREE (instance),
&life, &scalar_costs, visited);
}
void *target_cost_data = init_cost (NULL);
- add_stmt_costs (target_cost_data, &scalar_costs);
+ add_stmt_costs (bb_vinfo, target_cost_data, &scalar_costs);
scalar_costs.release ();
unsigned dummy;
finish_cost (target_cost_data, &dummy, &scalar_cost, &dummy);
dependence in the SLP instances. */
for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); )
{
- if (! vect_slp_analyze_and_verify_instance_alignment (instance)
- || ! vect_slp_analyze_instance_dependence (instance))
+ if (! vect_slp_analyze_and_verify_instance_alignment (bb_vinfo, instance)
+ || ! vect_slp_analyze_instance_dependence (bb_vinfo, instance))
{
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
/* Return 1 if vector type STMT_VINFO is a boolean vector. */
static bool
-vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, unsigned op_num)
+vect_mask_constant_operand_p (vec_info *vinfo,
+ stmt_vec_info stmt_vinfo, unsigned op_num)
{
enum tree_code code = gimple_expr_code (stmt_vinfo->stmt);
tree op, vectype;
gassign *stmt = as_a <gassign *> (stmt_vinfo->stmt);
op = gimple_assign_rhs1 (stmt);
- if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &dt, &vectype))
+ if (!vect_is_simple_use (op, vinfo, &dt, &vectype))
gcc_unreachable ();
return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
op = TREE_OPERAND (cond, 0);
}
- if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &dt, &vectype))
+ if (!vect_is_simple_use (op, vinfo, &dt, &vectype))
gcc_unreachable ();
return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
operands. */
static void
-vect_get_constant_vectors (slp_tree slp_node, unsigned op_num,
+vect_get_constant_vectors (vec_info *vinfo,
+ slp_tree slp_node, unsigned op_num,
vec<tree> *vec_oprnds)
{
slp_tree op_node = SLP_TREE_CHILDREN (slp_node)[op_num];
stmt_vec_info stmt_vinfo = SLP_TREE_SCALAR_STMTS (slp_node)[0];
- vec_info *vinfo = stmt_vinfo->vinfo;
unsigned HOST_WIDE_INT nunits;
tree vec_cst;
unsigned j, number_of_places_left_in_vector;
/* Check if vector type is a boolean vector. */
tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
- && vect_mask_constant_operand_p (stmt_vinfo, op_num))
+ && vect_mask_constant_operand_p (vinfo, stmt_vinfo, op_num))
vector_type = truth_type_for (stmt_vectype);
else
vector_type = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), op_node);
constant_p = false;
if (TREE_CODE (orig_op) == SSA_NAME
&& !SSA_NAME_IS_DEFAULT_DEF (orig_op)
- && STMT_VINFO_BB_VINFO (stmt_vinfo)
- && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
+ && is_a <bb_vec_info> (vinfo)
+ && (as_a <bb_vec_info> (vinfo)->bb
== gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
place_after_defs = true;
stmt_vec_info last_stmt_info
= vect_find_last_scalar_stmt_in_slp (slp_node);
gsi = gsi_for_stmt (last_stmt_info->stmt);
- init = vect_init_vector (stmt_vinfo, vec_cst, vector_type,
- &gsi);
+ init = vect_init_vector (vinfo, stmt_vinfo, vec_cst,
+ vector_type, &gsi);
}
else
- init = vect_init_vector (stmt_vinfo, vec_cst, vector_type,
- NULL);
+ init = vect_init_vector (vinfo, stmt_vinfo, vec_cst,
+ vector_type, NULL);
if (ctor_seq != NULL)
{
gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
vect_get_slp_vect_defs () to retrieve them. */
void
-vect_get_slp_defs (slp_tree slp_node, vec<vec<tree> > *vec_oprnds, unsigned n)
+vect_get_slp_defs (vec_info *vinfo,
+ slp_tree slp_node, vec<vec<tree> > *vec_oprnds, unsigned n)
{
if (n == -1U)
n = SLP_TREE_CHILDREN (slp_node).length ();
vect_get_slp_vect_defs (child, &vec_defs);
}
else
- vect_get_constant_vectors (slp_node, i, &vec_defs);
+ vect_get_constant_vectors (vinfo, slp_node, i, &vec_defs);
vec_oprnds->quick_push (vec_defs);
}
SLP_NODE_INSTANCE. */
bool
-vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
+vect_transform_slp_perm_load (vec_info *vinfo,
+ slp_tree node, vec<tree> dr_chain,
gimple_stmt_iterator *gsi, poly_uint64 vf,
slp_instance slp_node_instance, bool analyze_only,
unsigned *n_perms)
{
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
- vec_info *vinfo = stmt_info->vinfo;
int vec_index = 0;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
first_vec, second_vec,
mask_vec);
perm_stmt_info
- = vect_finish_stmt_generation (stmt_info, perm_stmt,
+ = vect_finish_stmt_generation (vinfo,
+ stmt_info, perm_stmt,
gsi);
}
else
/* Vectorize SLP instance tree in postorder. */
static void
-vect_schedule_slp_instance (slp_tree node, slp_instance instance)
+vect_schedule_slp_instance (vec_info *vinfo,
+ slp_tree node, slp_instance instance)
{
gimple_stmt_iterator si;
stmt_vec_info stmt_info;
return;
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
- vect_schedule_slp_instance (child, instance);
+ vect_schedule_slp_instance (vinfo, child, instance);
/* Push SLP node def-type to stmts. */
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vec<stmt_vec_info> v1;
unsigned j;
tree tmask = NULL_TREE;
- vect_transform_stmt (stmt_info, &si, node, instance);
+ vect_transform_stmt (vinfo, stmt_info, &si, node, instance);
v0 = SLP_TREE_VEC_STMTS (node).copy ();
SLP_TREE_VEC_STMTS (node).truncate (0);
gimple_assign_set_rhs_code (stmt, ocode);
- vect_transform_stmt (stmt_info, &si, node, instance);
+ vect_transform_stmt (vinfo, stmt_info, &si, node, instance);
gimple_assign_set_rhs_code (stmt, code0);
v1 = SLP_TREE_VEC_STMTS (node).copy ();
SLP_TREE_VEC_STMTS (node).truncate (0);
gimple_assign_lhs (v1[j]->stmt),
tmask);
SLP_TREE_VEC_STMTS (node).quick_push
- (vect_finish_stmt_generation (stmt_info, vstmt, &si));
+ (vect_finish_stmt_generation (vinfo, stmt_info, vstmt, &si));
}
v0.release ();
v1.release ();
}
}
if (!done_p)
- vect_transform_stmt (stmt_info, &si, node, instance);
+ vect_transform_stmt (vinfo, stmt_info, &si, node, instance);
/* Restore stmt def-types. */
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
SLP instances may refer to the same scalar stmt. */
static void
-vect_remove_slp_scalar_calls (slp_tree node, hash_set<slp_tree> &visited)
+vect_remove_slp_scalar_calls (vec_info *vinfo,
+ slp_tree node, hash_set<slp_tree> &visited)
{
gimple *new_stmt;
gimple_stmt_iterator gsi;
return;
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
- vect_remove_slp_scalar_calls (child, visited);
+ vect_remove_slp_scalar_calls (vinfo, child, visited);
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info)
{
lhs = gimple_call_lhs (stmt);
new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
gsi = gsi_for_stmt (stmt);
- stmt_info->vinfo->replace_stmt (&gsi, stmt_info, new_stmt);
+ vinfo->replace_stmt (&gsi, stmt_info, new_stmt);
SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
}
}
static void
-vect_remove_slp_scalar_calls (slp_tree node)
+vect_remove_slp_scalar_calls (vec_info *vinfo, slp_tree node)
{
hash_set<slp_tree> visited;
- vect_remove_slp_scalar_calls (node, visited);
+ vect_remove_slp_scalar_calls (vinfo, node, visited);
}
/* Vectorize the instance root. */
{
slp_tree node = SLP_INSTANCE_TREE (instance);
/* Schedule the tree of INSTANCE. */
- vect_schedule_slp_instance (node, instance);
+ vect_schedule_slp_instance (vinfo, node, instance);
if (SLP_INSTANCE_ROOT_STMT (instance))
vectorize_slp_instance_root_stmt (node, instance);
stmts starting from the SLP tree root if they have no
uses. */
if (is_a <loop_vec_info> (vinfo))
- vect_remove_slp_scalar_calls (root);
+ vect_remove_slp_scalar_calls (vinfo, root);
for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store_info)
&& j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
/* Return TRUE iff the given statement is in an inner loop relative to
the loop being vectorized. */
bool
-stmt_in_inner_loop_p (class _stmt_vec_info *stmt_info)
+stmt_in_inner_loop_p (vec_info *vinfo, class _stmt_vec_info *stmt_info)
{
gimple *stmt = STMT_VINFO_STMT (stmt_info);
basic_block bb = gimple_bb (stmt);
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop* loop;
if (!loop_vinfo)
with scalar destination SCALAR_DEST. */
static tree
-read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+read_vector_array (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
{
tree vect_type, vect, vect_name, array_ref;
new_stmt = gimple_build_assign (vect, array_ref);
vect_name = make_ssa_name (vect, new_stmt);
gimple_assign_set_lhs (new_stmt, vect_name);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
return vect_name;
}
The store is part of the vectorization of STMT_INFO. */
static void
-write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+write_vector_array (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree vect, tree array, unsigned HOST_WIDE_INT n)
{
tree array_ref;
NULL_TREE, NULL_TREE);
new_stmt = gimple_build_assign (array_ref, vect);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
/* PTR is a pointer to an array of type TYPE. Return a representation
Emit the clobber before *GSI. */
static void
-vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
- tree var)
+vect_clobber_variable (vec_info *vinfo, stmt_vec_info stmt_info,
+ gimple_stmt_iterator *gsi, tree var)
{
tree clobber = build_clobber (TREE_TYPE (var));
gimple *new_stmt = gimple_build_assign (var, clobber);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
/* Compute the prologue cost for invariant or constant operands. */
static unsigned
-vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
+vect_prologue_cost_for_slp_op (vec_info *vinfo,
+ slp_tree node, stmt_vec_info stmt_info,
unsigned opno, enum vect_def_type dt,
stmt_vector_for_cost *cost_vec)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
tree op = gimple_op (stmt, opno);
unsigned prologue_cost = 0;
be generated for the single vector op. We will handle that shortly. */
static void
-vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
+vect_model_simple_cost (vec_info *vinfo,
+ stmt_vec_info stmt_info, int ncopies,
enum vect_def_type *dt,
int ndts,
slp_tree node,
enum vect_def_type dt;
if (!op || op == lhs)
continue;
- if (vect_is_simple_use (op, stmt_info->vinfo, &dt)
+ if (vect_is_simple_use (op, vinfo, &dt)
&& (dt == vect_constant_def || dt == vect_external_def))
- prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
+ prologue_cost += vect_prologue_cost_for_slp_op (vinfo, node,
+ stmt_info,
i, dt, cost_vec);
}
}
has the overhead of the grouped access attributed to it. */
static void
-vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
+vect_model_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
enum vect_def_type dt,
vect_memory_access_type memory_access_type,
vec_load_store_type vls_type, slp_tree slp_node,
if (vls_type == VLS_STORE_INVARIANT)
{
if (slp_node)
- prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
+ prologue_cost += vect_prologue_cost_for_slp_op (vinfo, slp_node,
+ stmt_info,
1, dt, cost_vec);
else
prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
scalar_store, stmt_info, 0, vect_body);
}
else
- vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
+ vect_get_store_cost (vinfo, stmt_info, ncopies, &inside_cost, cost_vec);
if (memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_STRIDED_SLP)
/* Calculate cost of DR's memory access. */
void
-vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
+vect_get_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
unsigned int *inside_cost,
stmt_vector_for_cost *body_cost_vec)
{
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
int alignment_support_scheme
- = vect_supportable_dr_alignment (dr_info, false);
+ = vect_supportable_dr_alignment (vinfo, dr_info, false);
switch (alignment_support_scheme)
{
access scheme chosen. */
static void
-vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
+vect_model_load_cost (vec_info *vinfo,
+ stmt_vec_info stmt_info, unsigned ncopies,
vect_memory_access_type memory_access_type,
slp_instance instance,
slp_tree slp_node,
unsigned assumed_nunits
= vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
- vect_transform_slp_perm_load (slp_node, vNULL, NULL,
+ vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL,
slp_vf, instance, true,
&n_perms);
inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
scalar_load, stmt_info, 0, vect_body);
}
else
- vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
+ vect_get_load_cost (vinfo, stmt_info, ncopies, first_stmt_p,
&inside_cost, &prologue_cost,
cost_vec, cost_vec, true);
if (memory_access_type == VMAT_ELEMENTWISE
/* Calculate cost of DR's memory access. */
void
-vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
+vect_get_load_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
bool add_realign_cost, unsigned int *inside_cost,
unsigned int *prologue_cost,
stmt_vector_for_cost *prologue_cost_vec,
{
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
int alignment_support_scheme
- = vect_supportable_dr_alignment (dr_info, false);
+ = vect_supportable_dr_alignment (vinfo, dr_info, false);
switch (alignment_support_scheme)
{
the loop preheader for the vectorized stmt STMT_VINFO. */
static void
-vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
+vect_init_vector_1 (vec_info *vinfo, stmt_vec_info stmt_vinfo, gimple *new_stmt,
gimple_stmt_iterator *gsi)
{
if (gsi)
- vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_vinfo, new_stmt, gsi);
else
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
if (loop_vinfo)
{
}
else
{
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
basic_block bb;
gimple_stmt_iterator gsi_bb_start;
It will be used in the vectorization of STMT_INFO. */
tree
-vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
+vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
gimple_stmt_iterator *gsi)
{
gimple *init_stmt;
new_temp = make_ssa_name (TREE_TYPE (type));
init_stmt = gimple_build_assign (new_temp, COND_EXPR,
val, true_val, false_val);
- vect_init_vector_1 (stmt_info, init_stmt, gsi);
+ vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
val = new_temp;
}
}
{
init_stmt = gsi_stmt (gsi2);
gsi_remove (&gsi2, false);
- vect_init_vector_1 (stmt_info, init_stmt, gsi);
+ vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
}
}
}
new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
init_stmt = gimple_build_assign (new_temp, val);
- vect_init_vector_1 (stmt_info, init_stmt, gsi);
+ vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
return new_temp;
}
vector invariant. */
tree
-vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
+vect_get_vec_def_for_operand (vec_info *vinfo,
+ tree op, stmt_vec_info stmt_vinfo, tree vectype)
{
gimple *def_stmt;
enum vect_def_type dt;
bool is_simple_use;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
vector_type = get_vectype_for_scalar_type (loop_vinfo, TREE_TYPE (op));
gcc_assert (vector_type);
- return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
+ return vect_init_vector (vinfo, stmt_vinfo, op, vector_type, NULL);
}
else
return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
/* Get vectorized definitions for OP0 and OP1. */
void
-vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
+vect_get_vec_defs (vec_info *vinfo, tree op0, tree op1, stmt_vec_info stmt_info,
vec<tree> *vec_oprnds0,
vec<tree> *vec_oprnds1,
slp_tree slp_node)
if (slp_node)
{
auto_vec<vec<tree> > vec_defs (SLP_TREE_CHILDREN (slp_node).length ());
- vect_get_slp_defs (slp_node, &vec_defs, op1 ? 2 : 1);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs, op1 ? 2 : 1);
*vec_oprnds0 = vec_defs[0];
if (op1)
*vec_oprnds1 = vec_defs[1];
tree vec_oprnd;
vec_oprnds0->create (1);
- vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand (vinfo, op0, stmt_info);
vec_oprnds0->quick_push (vec_oprnd);
if (op1)
{
vec_oprnds1->create (1);
- vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand (vinfo, op1, stmt_info);
vec_oprnds1->quick_push (vec_oprnd);
}
}
statement and create and return a stmt_vec_info for it. */
static stmt_vec_info
-vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
+vect_finish_stmt_generation_1 (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple *vec_stmt)
{
- vec_info *vinfo = stmt_info->vinfo;
-
stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
if (dump_enabled_p ())
stmt_vec_info for VEC_STMT. */
stmt_vec_info
-vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
+vect_finish_replace_stmt (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple *vec_stmt)
{
gimple *scalar_stmt = vect_orig_stmt (stmt_info)->stmt;
gcc_assert (gimple_get_lhs (scalar_stmt) == gimple_get_lhs (vec_stmt));
gimple_stmt_iterator gsi = gsi_for_stmt (scalar_stmt);
gsi_replace (&gsi, vec_stmt, true);
- return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
+ return vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt);
}
/* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
stmt_vec_info
-vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
+vect_finish_stmt_generation (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple *vec_stmt,
gimple_stmt_iterator *gsi)
{
gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
}
}
gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
- return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
+ return vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt);
}
/* We want to vectorize a call to combined function CFN with function
}
-static tree permute_vec_elements (tree, tree, tree, stmt_vec_info,
+static tree permute_vec_elements (vec_info *, tree, tree, tree, stmt_vec_info,
gimple_stmt_iterator *);
/* Check whether a load or store statement in the loop described by
is negative, 0 if it is zero, and 1 if it is greater than zero. */
static int
-compare_step_with_zero (stmt_vec_info stmt_info)
+compare_step_with_zero (vec_info *vinfo, stmt_vec_info stmt_info)
{
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
- return tree_int_cst_compare (vect_dr_behavior (dr_info)->step,
+ return tree_int_cst_compare (vect_dr_behavior (vinfo, dr_info)->step,
size_zero_node);
}
accesses consecutive elements with a negative step. */
static vect_memory_access_type
-get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
+get_negative_load_store_type (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree vectype,
vec_load_store_type vls_type,
unsigned int ncopies)
{
return VMAT_ELEMENTWISE;
}
- alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false);
+ alignment_support_scheme = vect_supportable_dr_alignment (vinfo,
+ dr_info, false);
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
{
as well as at the end. */
static bool
-get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
+get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
+ tree vectype, bool slp,
bool masked_p, vec_load_store_type vls_type,
vect_memory_access_type *memory_access_type,
gather_scatter_info *gs_info)
{
- vec_info *vinfo = stmt_info->vinfo;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
if (overrun_p
&& !masked_p
&& (((alignment_support_scheme
- = vect_supportable_dr_alignment (first_dr_info, false)))
+ = vect_supportable_dr_alignment (vinfo,
+ first_dr_info, false)))
== dr_aligned
|| alignment_support_scheme == dr_unaligned_supported)
&& known_eq (nunits, (group_size - gap) * 2)
"Peeling for outer loop is not supported\n");
return false;
}
- int cmp = compare_step_with_zero (stmt_info);
+ int cmp = compare_step_with_zero (vinfo, stmt_info);
if (cmp < 0)
*memory_access_type = get_negative_load_store_type
- (stmt_info, vectype, vls_type, 1);
+ (vinfo, stmt_info, vectype, vls_type, 1);
else
{
gcc_assert (!loop_vinfo || cmp > 0);
if (!STMT_VINFO_STRIDED_P (first_stmt_info)
&& (can_overrun_p || !would_overrun_p)
- && compare_step_with_zero (stmt_info) > 0)
+ && compare_step_with_zero (vinfo, stmt_info) > 0)
{
/* First cope with the degenerate case of a single-element
vector. */
NCOPIES is the number of vector statements that will be needed. */
static bool
-get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
+get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
+ tree vectype, bool slp,
bool masked_p, vec_load_store_type vls_type,
unsigned int ncopies,
vect_memory_access_type *memory_access_type,
gather_scatter_info *gs_info)
{
- vec_info *vinfo = stmt_info->vinfo;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{
}
else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
+ if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp, masked_p,
vls_type, memory_access_type, gs_info))
return false;
}
}
else
{
- int cmp = compare_step_with_zero (stmt_info);
+ int cmp = compare_step_with_zero (vinfo, stmt_info);
if (cmp < 0)
*memory_access_type = get_negative_load_store_type
- (stmt_info, vectype, vls_type, ncopies);
+ (vinfo, stmt_info, vectype, vls_type, ncopies);
else if (cmp == 0)
{
gcc_assert (vls_type == VLS_LOAD);
in *MASK_VECTYPE_OUT. */
static bool
-vect_check_scalar_mask (stmt_vec_info stmt_info, tree mask,
+vect_check_scalar_mask (vec_info *vinfo, stmt_vec_info stmt_info, tree mask,
vect_def_type *mask_dt_out,
tree *mask_vectype_out)
{
- vec_info *vinfo = stmt_info->vinfo;
if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
{
if (dump_enabled_p ())
enum vect_def_type mask_dt;
tree mask_vectype;
- if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
+ if (!vect_is_simple_use (mask, vinfo, &mask_dt, &mask_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
*RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
static bool
-vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
+vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info, tree rhs,
vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
vec_load_store_type *vls_type_out)
{
enum vect_def_type rhs_dt;
tree rhs_vectype;
- if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
+ if (!vect_is_simple_use (rhs, vinfo, &rhs_dt, &rhs_vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
floats are interpreted as a bitmask. */
static tree
-vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype)
+vect_build_all_ones_mask (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree masktype)
{
if (TREE_CODE (masktype) == INTEGER_TYPE)
return build_int_cst (masktype, -1);
{
tree mask = build_int_cst (TREE_TYPE (masktype), -1);
mask = build_vector_from_val (masktype, mask);
- return vect_init_vector (stmt_info, mask, masktype, NULL);
+ return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL);
}
else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
{
real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
tree mask = build_real (TREE_TYPE (masktype), r);
mask = build_vector_from_val (masktype, mask);
- return vect_init_vector (stmt_info, mask, masktype, NULL);
+ return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL);
}
gcc_unreachable ();
}
STMT_INFO as a gather load. */
static tree
-vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype)
+vect_build_zero_merge_argument (vec_info *vinfo,
+ stmt_vec_info stmt_info, tree vectype)
{
tree merge;
if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
else
gcc_unreachable ();
merge = build_vector_from_val (vectype, merge);
- return vect_init_vector (stmt_info, merge, vectype, NULL);
+ return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
}
/* Build a gather load call while vectorizing STMT_INFO. Insert new
MASK is null. */
static void
-vect_build_gather_load_calls (stmt_vec_info stmt_info,
+vect_build_gather_load_calls (vec_info *vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt,
gather_scatter_info *gs_info,
tree mask)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (!mask)
{
- src_op = vect_build_zero_merge_argument (stmt_info, rettype);
- mask_op = vect_build_all_ones_mask (stmt_info, masktype);
+ src_op = vect_build_zero_merge_argument (vinfo, stmt_info, rettype);
+ mask_op = vect_build_all_ones_mask (vinfo, stmt_info, masktype);
}
for (int j = 0; j < ncopies; ++j)
{
tree op, var;
if (modifier == WIDEN && (j & 1))
- op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
+ op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0,
perm_mask, stmt_info, gsi);
else if (j == 0)
op = vec_oprnd0
- = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
+ = vect_get_vec_def_for_operand (vinfo, gs_info->offset, stmt_info);
else
op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
vec_oprnd0);
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
op = var;
}
if (mask)
{
if (mask_perm_mask && (j & 1))
- mask_op = permute_vec_elements (mask_op, mask_op,
+ mask_op = permute_vec_elements (vinfo, mask_op, mask_op,
mask_perm_mask, stmt_info, gsi);
else
{
if (j == 0)
- vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
+ vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info);
else if (modifier != NARROW || (j & 1) == 0)
vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo,
vec_mask);
mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_op = var;
}
}
= gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
: VEC_UNPACK_LO_EXPR,
mask_op);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_op = var;
}
src_op = mask_op;
mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_op);
gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_arg = var;
if (!useless_type_conversion_p (real_masktype, utype))
{
<= TYPE_PRECISION (real_masktype));
var = vect_get_new_ssa_name (real_masktype, vect_scalar_var);
new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_arg = var;
}
src_op = build_zero_cst (srctype);
TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_call, op);
- vect_finish_stmt_generation (stmt_info, new_call, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi);
var = make_ssa_name (vec_dest);
op = build1 (VIEW_CONVERT_EXPR, vectype, op);
gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
else
{
var = make_ssa_name (vec_dest, new_call);
gimple_call_set_lhs (new_call, var);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi);
}
if (modifier == NARROW)
prev_res = var;
continue;
}
- var = permute_vec_elements (prev_res, var, perm_mask,
+ var = permute_vec_elements (vinfo, prev_res, var, perm_mask,
stmt_info, gsi);
new_stmt_info = loop_vinfo->lookup_def (var);
}
containing loop. */
static void
-vect_get_gather_scatter_ops (class loop *loop, stmt_vec_info stmt_info,
+vect_get_gather_scatter_ops (vec_info *vinfo,
+ class loop *loop, stmt_vec_info stmt_info,
gather_scatter_info *gs_info,
tree *dataref_ptr, tree *vec_offset)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
- *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info,
+ *vec_offset = vect_get_vec_def_for_operand (vinfo, gs_info->offset, stmt_info,
gs_info->offset_vectype);
}
vectorization. */
static tree
-vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type,
+vect_get_data_ptr_increment (vec_info *vinfo,
+ dr_vec_info *dr_info, tree aggr_type,
vect_memory_access_type memory_access_type)
{
if (memory_access_type == VMAT_INVARIANT)
return size_zero_node;
tree iv_step = TYPE_SIZE_UNIT (aggr_type);
- tree step = vect_dr_behavior (dr_info)->step;
+ tree step = vect_dr_behavior (vinfo, dr_info)->step;
if (tree_int_cst_sgn (step) == -1)
iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
return iv_step;
/* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
static bool
-vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_bswap (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
tree vectype_in, stmt_vector_for_cost *cost_vec)
{
tree op, vectype;
gcall *stmt = as_a <gcall *> (stmt_info->stmt);
- vec_info *vinfo = stmt_info->vinfo;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
unsigned ncopies;
op = gimple_call_arg (stmt, 0);
{
/* Handle uses. */
if (j == 0)
- vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
+ vect_get_vec_defs (vinfo, op, NULL, stmt_info, &vec_oprnds, NULL,
+ slp_node);
else
vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
tree tem = make_ssa_name (char_vectype);
new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
char_vectype, vop));
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
tree tem2 = make_ssa_name (char_vectype);
new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
tem, tem, bswap_vconst);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
tem = make_ssa_name (vectype);
new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
vectype, tem2));
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_call (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree vectype_out, vectype_in;
poly_uint64 nunits_in;
poly_uint64 nunits_out;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
tree fndecl, new_temp, rhs_type;
enum vect_def_type dt[4]
= { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
if ((int) i == mask_opno)
{
- if (!vect_check_scalar_mask (stmt_info, op, &dt[i], &vectypes[i]))
+ if (!vect_check_scalar_mask (vinfo,
+ stmt_info, op, &dt[i], &vectypes[i]))
return false;
continue;
}
&& (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
|| gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
|| gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
- return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
+ return vectorizable_bswap (vinfo, stmt_info, gsi, vec_stmt, slp_node,
vectype_in, cost_vec);
else
{
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_call");
- vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
+ vect_model_simple_cost (vinfo, stmt_info,
+ ncopies, dt, ndts, slp_node, cost_vec);
if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
record_stmt_cost (cost_vec, ncopies / 2,
vec_promote_demote, stmt_info, 0, vect_body);
auto_vec<vec<tree> > vec_defs (nargs);
vec<tree> vec_oprnds0;
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_oprnds0 = vec_defs[0];
/* Arguments are ready. Create the new vector stmt. */
= gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (call, half_res);
gimple_call_set_nothrow (call, true);
- vect_finish_stmt_generation (stmt_info, call, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
if ((i & 1) == 0)
{
prev_res = half_res;
= gimple_build_assign (new_temp, convert_code,
prev_res, half_res);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt,
- gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
else
{
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ call, gsi);
}
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
}
op = gimple_call_arg (stmt, i);
if (j == 0)
vec_oprnd0
- = vect_get_vec_def_for_operand (op, stmt_info, vectypes[i]);
+ = vect_get_vec_def_for_operand (vinfo,
+ op, stmt_info, vectypes[i]);
else
vec_oprnd0
= vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]);
tree new_var
= vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
gimple *init_stmt = gimple_build_assign (new_var, cst);
- vect_init_vector_1 (stmt_info, init_stmt, NULL);
+ vect_init_vector_1 (vinfo, stmt_info, init_stmt, NULL);
new_temp = make_ssa_name (vec_dest);
gimple *new_stmt = gimple_build_assign (new_temp, new_var);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
else if (modifier == NARROW)
{
gcall *call = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (call, half_res);
gimple_call_set_nothrow (call, true);
- vect_finish_stmt_generation (stmt_info, call, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
if ((j & 1) == 0)
{
prev_res = half_res;
gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
prev_res, half_res);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
else
{
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
}
if (j == (modifier == NARROW ? 1 : 0))
auto_vec<vec<tree> > vec_defs (nargs);
vec<tree> vec_oprnds0;
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_oprnds0 = vec_defs[0];
/* Arguments are ready. Create the new vector stmt. */
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
}
if (j == 0)
{
vec_oprnd0
- = vect_get_vec_def_for_operand (op, stmt_info,
+ = vect_get_vec_def_for_operand (vinfo, op, stmt_info,
vectypes[i]);
vec_oprnd1
= vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_simd_clone_call (stmt_vec_info stmt_info,
+vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *)
stmt_vec_info prev_stmt_info;
tree vectype;
unsigned int nunits;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
tree fndecl, new_temp;
int ncopies, j;
}
STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
-/* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
+/* vect_model_simple_cost (vinfo, stmt_info, ncopies,
+ dt, slp_node, cost_vec); */
return true;
}
gcc_assert ((k & (k - 1)) == 0);
if (m == 0)
vec_oprnd0
- = vect_get_vec_def_for_operand (op, stmt_info);
+ = vect_get_vec_def_for_operand (vinfo, op, stmt_info);
else
{
vec_oprnd0 = arginfo[i].op;
gassign *new_stmt
= gimple_build_assign (make_ssa_name (atype),
vec_oprnd0);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
vargs.safe_push (gimple_assign_lhs (new_stmt));
}
else
{
if (m == 0 && l == 0)
vec_oprnd0
- = vect_get_vec_def_for_operand (op, stmt_info);
+ = vect_get_vec_def_for_operand (vinfo,
+ op, stmt_info);
else
vec_oprnd0
= vect_get_vec_def_for_stmt_copy (vinfo,
gassign *new_stmt
= gimple_build_assign (make_ssa_name (atype),
vec_oprnd0);
- vect_finish_stmt_generation (stmt_info, new_stmt,
- gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
vargs.safe_push (gimple_assign_lhs (new_stmt));
}
}
gassign *new_stmt
= gimple_build_assign (new_temp, code,
arginfo[i].op, tcst);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
vargs.safe_push (new_temp);
}
break;
gimple_call_set_lhs (new_call, new_temp);
}
stmt_vec_info new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi);
if (vec_dest)
{
gimple *new_stmt
= gimple_build_assign (make_ssa_name (vectype), t);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
if (j == 0 && l == 0)
STMT_VINFO_VEC_STMT (stmt_info)
}
if (ratype)
- vect_clobber_variable (stmt_info, gsi, new_temp);
+ vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
continue;
}
else if (simd_clone_subparts (vectype) > nunits)
gimple *new_stmt
= gimple_build_assign (make_ssa_name (rtype), tem);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt,
- gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
gimple_assign_lhs (new_stmt));
}
- vect_clobber_variable (stmt_info, gsi, new_temp);
+ vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
}
else
CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
gimple *new_stmt
= gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if ((unsigned) j == k - 1)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
gimple *new_stmt
= gimple_build_assign (make_ssa_name (vec_dest), t);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
- vect_clobber_variable (stmt_info, gsi, new_temp);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
+ vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
}
}
STMT_INFO is the original scalar stmt that we are vectorizing. */
static gimple *
-vect_gen_widened_results_half (enum tree_code code,
+vect_gen_widened_results_half (vec_info *vinfo, enum tree_code code,
tree vec_oprnd0, tree vec_oprnd1, int op_type,
tree vec_dest, gimple_stmt_iterator *gsi,
stmt_vec_info stmt_info)
new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
return new_stmt;
}
The vectors are collected into VEC_OPRNDS. */
static void
-vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info,
+vect_get_loop_based_defs (vec_info *vinfo, tree *oprnd, stmt_vec_info stmt_info,
vec<tree> *vec_oprnds, int multi_step_cvt)
{
- vec_info *vinfo = stmt_info->vinfo;
tree vec_oprnd;
/* Get first vector operand. */
/* All the vector operands except the very first one (that is scalar oprnd)
are stmt copies. */
if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
- vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand (vinfo, *oprnd, stmt_info);
else
vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd);
/* For conversion in multiple steps, continue to get operands
recursively. */
if (multi_step_cvt)
- vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds,
+ vect_get_loop_based_defs (vinfo, oprnd, stmt_info, vec_oprnds,
multi_step_cvt - 1);
}
recursively. */
static void
-vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
+vect_create_vectorized_demotion_stmts (vec_info *vinfo, vec<tree> *vec_oprnds,
int multi_step_cvt,
stmt_vec_info stmt_info,
vec<tree> vec_dsts,
new_tmp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_tmp);
stmt_vec_info new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (multi_step_cvt)
/* Store the resulting vector for next recursive call. */
/* At each level of recursion we have half of the operands we had at the
previous level. */
vec_oprnds->truncate ((i+1)/2);
- vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
+ vect_create_vectorized_demotion_stmts (vinfo, vec_oprnds,
+ multi_step_cvt - 1,
stmt_info, vec_dsts, gsi,
slp_node, VEC_PACK_TRUNC_EXPR,
prev_stmt_info);
call the function recursively. */
static void
-vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
+vect_create_vectorized_promotion_stmts (vec_info *vinfo,
+ vec<tree> *vec_oprnds0,
vec<tree> *vec_oprnds1,
stmt_vec_info stmt_info, tree vec_dest,
gimple_stmt_iterator *gsi,
vop1 = NULL_TREE;
/* Generate the two halves of promotion operation. */
- new_stmt1 = vect_gen_widened_results_half (code1, vop0, vop1,
+ new_stmt1 = vect_gen_widened_results_half (vinfo, code1, vop0, vop1,
op_type, vec_dest, gsi,
stmt_info);
- new_stmt2 = vect_gen_widened_results_half (code2, vop0, vop1,
+ new_stmt2 = vect_gen_widened_results_half (vinfo, code2, vop0, vop1,
op_type, vec_dest, gsi,
stmt_info);
if (is_gimple_call (new_stmt1))
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_conversion (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree op0, op1 = NULL_TREE;
tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
tree new_temp;
vec<tree> vec_oprnds0 = vNULL;
vec<tree> vec_oprnds1 = vNULL;
tree vop0;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
int multi_step_cvt = 0;
vec<tree> interm_types = vNULL;
tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
return false;
case WIDEN:
- if (supportable_widening_operation (code, stmt_info, vectype_out,
+ if (supportable_widening_operation (vinfo, code, stmt_info, vectype_out,
vectype_in, &code1, &code2,
&multi_step_cvt, &interm_types))
{
cvt_type, &codecvt1))
goto unsupported;
}
- else if (!supportable_widening_operation (code, stmt_info,
+ else if (!supportable_widening_operation (vinfo, code, stmt_info,
vectype_out, cvt_type,
&codecvt1, &codecvt2,
&multi_step_cvt,
else
gcc_assert (multi_step_cvt == 0);
- if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
+ if (supportable_widening_operation (vinfo, NOP_EXPR, stmt_info,
+ cvt_type,
vectype_in, &code1, &code2,
&multi_step_cvt, &interm_types))
{
if (modifier == NONE)
{
STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
- vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
+ vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node,
cost_vec);
}
else if (modifier == NARROW)
for (j = 0; j < ncopies; j++)
{
if (j == 0)
- vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
+ vect_get_vec_defs (vinfo, op0, NULL, stmt_info, &vec_oprnds0,
NULL, slp_node);
else
vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
vec_oprnds1.quick_push (vec_oprnd1);
- vect_get_vec_defs (op0, NULL_TREE, stmt_info,
+ vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info,
&vec_oprnds0, NULL, slp_node);
}
else
- vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
+ vect_get_vec_defs (vinfo, op0, op1, stmt_info, &vec_oprnds0,
&vec_oprnds1, slp_node);
}
else
{
- vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
+ vec_oprnd0 = vect_get_vec_def_for_operand (vinfo,
+ op0, stmt_info);
vec_oprnds0.quick_push (vec_oprnd0);
if (op_type == binary_op)
{
vec_oprnd1 = op1;
else
vec_oprnd1
- = vect_get_vec_def_for_operand (op1, stmt_info);
+ = vect_get_vec_def_for_operand (vinfo,
+ op1, stmt_info);
vec_oprnds1.quick_push (vec_oprnd1);
}
}
c1 = codecvt1;
c2 = codecvt2;
}
- vect_create_vectorized_promotion_stmts (&vec_oprnds0,
+ vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0,
&vec_oprnds1, stmt_info,
this_dest, gsi,
c1, c2, op_type);
gassign *new_stmt
= gimple_build_assign (new_temp, codecvt1, vop0);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
else
new_stmt_info = vinfo->lookup_def (vop0);
{
/* Handle uses. */
if (slp_node)
- vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
- slp_node);
+ vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, &vec_oprnds0,
+ NULL, slp_node);
else
{
vec_oprnds0.truncate (0);
- vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0,
+ vect_get_loop_based_defs (vinfo,
+ &last_oprnd, stmt_info, &vec_oprnds0,
vect_pow2 (multi_step_cvt) - 1);
}
new_temp = make_ssa_name (vec_dest);
gassign *new_stmt
= gimple_build_assign (new_temp, codecvt1, vop0);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
vec_oprnds0[i] = new_temp;
}
- vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
+ vect_create_vectorized_demotion_stmts (vinfo, &vec_oprnds0,
+ multi_step_cvt,
stmt_info, vec_dsts, gsi,
slp_node, code1,
&prev_stmt_info);
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_assignment (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree vec_dest;
tree scalar_dest;
tree op;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
tree new_temp;
enum vect_def_type dt[1] = {vect_unknown_def_type};
int ndts = 1;
int i, j;
vec<tree> vec_oprnds = vNULL;
tree vop;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
stmt_vec_info prev_stmt_info = NULL;
enum tree_code code;
tree vectype_in;
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_assignment");
if (!vect_nop_conversion_p (stmt_info))
- vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
+ vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node,
cost_vec);
return true;
}
{
/* Handle uses. */
if (j == 0)
- vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
+ vect_get_vec_defs (vinfo, op, NULL, stmt_info, &vec_oprnds, NULL,
+ slp_node);
else
vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_shift (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree op0, op1 = NULL;
tree vec_oprnd1 = NULL_TREE;
tree vectype;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
enum tree_code code;
machine_mode vec_mode;
tree new_temp;
tree vop0, vop1;
unsigned int k;
bool scalar_shift_arg = true;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
bool incompatible_op1_vectype_p = false;
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
{
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_shift");
- vect_model_simple_cost (stmt_info, ncopies, dt,
+ vect_model_simple_cost (vinfo, stmt_info, ncopies, dt,
scalar_shift_arg ? 1 : ndts, slp_node, cost_vec);
return true;
}
{
op1 = fold_convert (TREE_TYPE (vectype), op1);
if (dt[1] != vect_constant_def)
- op1 = vect_init_vector (stmt_info, op1,
+ op1 = vect_init_vector (vinfo, stmt_info, op1,
TREE_TYPE (vectype), NULL);
}
/* If the argument was the same in all lanes create
the correctly typed vector shift amount directly. */
op1 = fold_convert (TREE_TYPE (vectype), op1);
- op1 = vect_init_vector (stmt_info, op1, TREE_TYPE (vectype),
+ op1 = vect_init_vector (vinfo, stmt_info,
+ op1, TREE_TYPE (vectype),
!loop_vinfo ? gsi : NULL);
- vec_oprnd1 = vect_init_vector (stmt_info, op1, vectype,
+ vec_oprnd1 = vect_init_vector (vinfo, stmt_info, op1, vectype,
!loop_vinfo ? gsi : NULL);
vec_oprnds1.create (slp_node->vec_stmts_size);
for (k = 0; k < slp_node->vec_stmts_size; k++)
(a special case for certain kind of vector shifts); otherwise,
operand 1 should be of a vector type (the usual case). */
if (vec_oprnd1)
- vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
- slp_node);
+ vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info,
+ &vec_oprnds0, NULL, slp_node);
else
- vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
- slp_node);
+ vect_get_vec_defs (vinfo, op0, op1, stmt_info,
+ &vec_oprnds0, &vec_oprnds1, slp_node);
}
else
vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_operation (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
tree vectype;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
enum tree_code code, orig_code;
machine_mode vec_mode;
tree new_temp;
vec<tree> vec_oprnds1 = vNULL;
vec<tree> vec_oprnds2 = vNULL;
tree vop0, vop1, vop2;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
DUMP_VECT_SCOPE ("vectorizable_operation");
- vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
+ vect_model_simple_cost (vinfo, stmt_info,
+ ncopies, dt, ndts, slp_node, cost_vec);
return true;
}
if (j == 0)
{
if (op_type == binary_op)
- vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
- slp_node);
+ vect_get_vec_defs (vinfo, op0, op1, stmt_info,
+ &vec_oprnds0, &vec_oprnds1, slp_node);
else if (op_type == ternary_op)
{
if (slp_node)
{
auto_vec<vec<tree> > vec_defs(3);
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_oprnds0 = vec_defs[0];
vec_oprnds1 = vec_defs[1];
vec_oprnds2 = vec_defs[2];
}
else
{
- vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
+ vect_get_vec_defs (vinfo, op0, op1, stmt_info, &vec_oprnds0,
&vec_oprnds1, NULL);
- vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
- NULL, NULL);
+ vect_get_vec_defs (vinfo, op2, NULL_TREE, stmt_info,
+ &vec_oprnds2, NULL, NULL);
}
}
else
- vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
- slp_node);
+ vect_get_vec_defs (vinfo, op0, NULL_TREE, stmt_info, &vec_oprnds0,
+ NULL, slp_node);
}
else
{
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
}
else
{
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (vec_cvt_dest)
{
new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
new_temp);
new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
- new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
}
if (slp_node)
Check magic stores for #pragma omp scan {in,ex}clusive reductions. */
static bool
-check_scan_store (stmt_vec_info stmt_info, tree vectype,
+check_scan_store (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype,
enum vect_def_type rhs_dt, bool slp, tree mask,
vect_memory_access_type memory_access_type)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
tree ref_type;
|| loop_vinfo == NULL
|| LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
|| STMT_VINFO_GROUPED_ACCESS (stmt_info)
- || !integer_zerop (get_dr_vinfo_offset (dr_info))
+ || !integer_zerop (get_dr_vinfo_offset (vinfo, dr_info))
|| !integer_zerop (DR_INIT (dr_info->dr))
|| !(ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)))
|| !alias_sets_conflict_p (get_alias_set (vectype),
Handle only the transformation, checking is done in check_scan_store. */
static bool
-vectorizable_scan_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_scan_store (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, int ncopies)
{
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
tree ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
- vec_info *vinfo = stmt_info->vinfo;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (dump_enabled_p ())
tree vec_oprnd3 = NULL_TREE;
tree dataref_ptr = DR_BASE_ADDRESS (dr_info->dr);
tree dataref_offset = build_int_cst (ref_type, 0);
- tree bump = vect_get_data_ptr_increment (dr_info, vectype, VMAT_CONTIGUOUS);
+ tree bump = vect_get_data_ptr_increment (vinfo, dr_info,
+ vectype, VMAT_CONTIGUOUS);
tree ldataref_ptr = NULL_TREE;
tree orig = NULL_TREE;
if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && !inscan_var_store)
stmt_vec_info new_stmt_info;
if (j == 0)
{
- vec_oprnd1 = vect_get_vec_def_for_operand (*init, stmt_info);
+ vec_oprnd1 = vect_get_vec_def_for_operand (vinfo, *init, stmt_info);
if (ldataref_ptr == NULL)
- vec_oprnd2 = vect_get_vec_def_for_operand (rhs1, stmt_info);
- vec_oprnd3 = vect_get_vec_def_for_operand (rhs2, stmt_info);
+ vec_oprnd2 = vect_get_vec_def_for_operand (vinfo, rhs1, stmt_info);
+ vec_oprnd3 = vect_get_vec_def_for_operand (vinfo, rhs2, stmt_info);
orig = vec_oprnd3;
}
else
dataref_offset);
vect_copy_ref_info (data_ref, DR_REF (load1_dr_info->dr));
gimple *g = gimple_build_assign (vec_oprnd2, data_ref);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
if (prev_stmt_info == NULL)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
else
!= scan_store_kind_perm))
? zero_vec : vec_oprnd1, v,
perms[i]);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
if (prev_stmt_info == NULL)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
else
tree new_temp2 = make_ssa_name (vectype);
g = gimple_build_assign (new_temp2, VEC_COND_EXPR, vb.build (),
new_temp, vec_oprnd1);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info,
+ g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
new_temp = new_temp2;
tree new_temp2 = make_ssa_name (vectype);
g = gimple_build_assign (new_temp2, code, v, new_temp);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
tree new_temp = make_ssa_name (vectype);
gimple *g = gimple_build_assign (new_temp, code, orig, v);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
{
last_perm_arg = make_ssa_name (vectype);
g = gimple_build_assign (last_perm_arg, code, new_temp, vec_oprnd2);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
}
orig = make_ssa_name (vectype);
g = gimple_build_assign (orig, VEC_PERM_EXPR, last_perm_arg,
last_perm_arg, perms[units_log2]);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
dataref_offset);
vect_copy_ref_info (data_ref, DR_REF (dr_info->dr));
g = gimple_build_assign (data_ref, new_temp);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, g, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
}
vect_copy_ref_info (data_ref, DR_REF (dr_info->dr));
gimple *g = gimple_build_assign (data_ref, orig);
stmt_vec_info new_stmt_info
- = vect_finish_stmt_generation (stmt_info, g, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
prev_stmt_info = new_stmt_info;
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_store (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree op;
tree vec_oprnd = NULL_TREE;
tree elem_type;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = NULL;
machine_mode vec_mode;
tree dummy;
vec<tree> vec_oprnds = vNULL;
bool slp = (slp_node != NULL);
unsigned int vec_num;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- vec_info *vinfo = stmt_info->vinfo;
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
tree aggr_type;
gather_scatter_info gs_info;
poly_uint64 vf;
if (mask_index >= 0)
{
mask = gimple_call_arg (call, mask_index);
- if (!vect_check_scalar_mask (stmt_info, mask, &mask_dt,
+ if (!vect_check_scalar_mask (vinfo, stmt_info, mask, &mask_dt,
&mask_vectype))
return false;
}
return false;
}
- if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
+ if (!vect_check_store_rhs (vinfo, stmt_info,
+ op, &rhs_dt, &rhs_vectype, &vls_type))
return false;
elem_type = TREE_TYPE (vectype);
return false;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
- &memory_access_type, &gs_info))
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, vls_type,
+ ncopies, &memory_access_type, &gs_info))
return false;
if (mask)
if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1 && !vec_stmt)
{
- if (!check_scan_store (stmt_info, vectype, rhs_dt, slp, mask,
+ if (!check_scan_store (vinfo, stmt_info, vectype, rhs_dt, slp, mask,
memory_access_type))
return false;
}
memory_access_type, &gs_info, mask);
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
- vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
- vls_type, slp_node, cost_vec);
+ vect_model_store_cost (vinfo, stmt_info, ncopies, rhs_dt,
+ memory_access_type, vls_type, slp_node, cost_vec);
return true;
}
gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
if (mask == NULL_TREE)
{
mask_arg = build_int_cst (masktype, -1);
- mask_arg = vect_init_vector (stmt_info, mask_arg, masktype, NULL);
+ mask_arg = vect_init_vector (vinfo, stmt_info,
+ mask_arg, masktype, NULL);
}
scale = build_int_cst (scaletype, gs_info.scale);
{
if (j == 0)
{
- src = vec_oprnd1 = vect_get_vec_def_for_operand (op, stmt_info);
- op = vec_oprnd0 = vect_get_vec_def_for_operand (gs_info.offset,
+ src = vec_oprnd1 = vect_get_vec_def_for_operand (vinfo,
+ op, stmt_info);
+ op = vec_oprnd0 = vect_get_vec_def_for_operand (vinfo,
+ gs_info.offset,
stmt_info);
if (mask)
- mask_op = vec_mask = vect_get_vec_def_for_operand (mask,
+ mask_op = vec_mask = vect_get_vec_def_for_operand (vinfo, mask,
stmt_info);
}
else if (modifier != NONE && (j & 1))
src
= vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
vec_oprnd1);
- op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
- stmt_info, gsi);
+ op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0,
+ perm_mask, stmt_info, gsi);
if (mask)
mask_op
= vec_mask = vect_get_vec_def_for_stmt_copy (vinfo,
}
else if (modifier == NARROW)
{
- src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
- stmt_info, gsi);
+ src = permute_vec_elements (vinfo, vec_oprnd1, vec_oprnd1,
+ perm_mask, stmt_info, gsi);
op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo,
vec_oprnd0);
}
src = build1 (VIEW_CONVERT_EXPR, srctype, src);
gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
src = var;
}
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
op = var;
}
= gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
: VEC_UNPACK_LO_EXPR,
mask_op);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_arg = var;
}
tree optype = TREE_TYPE (mask_arg);
mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_arg);
gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_arg = var;
if (!useless_type_conversion_p (masktype, utype))
{
<= TYPE_PRECISION (masktype));
var = vect_get_new_ssa_name (masktype, vect_scalar_var);
new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
mask_arg = var;
}
}
gcall *new_stmt
= gimple_build_call (gs_info.decl, 5, ptr, mask_arg, op, src, scale);
stmt_vec_info new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (prev_stmt_info == NULL)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
return true;
}
else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
- return vectorizable_scan_store (stmt_info, gsi, vec_stmt, ncopies);
+ return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
- dr_offset = get_dr_vinfo_offset (first_dr_info);
+ dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info);
stride_base
= fold_build_pointer_plus
(DR_BASE_ADDRESS (first_dr_info->dr),
tree newoff = copy_ssa_name (running_off, NULL);
incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, pos);
- vect_finish_stmt_generation (stmt_info, incr, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
running_off = newoff;
}
unsigned int group_el = 0;
{
if (slp)
{
- vect_get_vec_defs (op, NULL_TREE, stmt_info,
+ vect_get_vec_defs (vinfo, op, NULL_TREE, stmt_info,
&vec_oprnds, NULL, slp_node);
vec_oprnd = vec_oprnds[0];
}
{
op = vect_get_store_rhs (next_stmt_info);
vec_oprnd = vect_get_vec_def_for_operand
- (op, next_stmt_info);
+ (vinfo, op, next_stmt_info);
}
}
else
gimple *pun
= gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
lvectype, vec_oprnd));
- vect_finish_stmt_generation (stmt_info, pun, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
vec_oprnd = tem;
}
for (i = 0; i < nstores; i++)
/* And store it to *running_off. */
assign = gimple_build_assign (newref, elem);
stmt_vec_info assign_info
- = vect_finish_stmt_generation (stmt_info, assign, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ assign, gsi);
group_el += lnel;
if (! slp
newoff = copy_ssa_name (running_off, NULL);
incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, stride_step);
- vect_finish_stmt_generation (stmt_info, incr, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
running_off = newoff;
group_el = 0;
alignment_support_scheme = dr_unaligned_supported;
else
alignment_support_scheme
- = vect_supportable_dr_alignment (first_dr_info, false);
+ = vect_supportable_dr_alignment (vinfo, first_dr_info, false);
gcc_assert (alignment_support_scheme);
vec_loop_masks *loop_masks
aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
else
aggr_type = vectype;
- bump = vect_get_data_ptr_increment (dr_info, aggr_type,
+ bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type,
memory_access_type);
}
if (slp)
{
/* Get vectorized arguments for SLP_NODE. */
- vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
+ vect_get_vec_defs (vinfo, op, NULL_TREE, stmt_info, &vec_oprnds,
NULL, slp_node);
vec_oprnd = vec_oprnds[0];
and only one iteration of the loop will be executed. */
op = vect_get_store_rhs (next_stmt_info);
vec_oprnd = vect_get_vec_def_for_operand
- (op, next_stmt_info);
+ (vinfo, op, next_stmt_info);
dr_chain.quick_push (vec_oprnd);
oprnds.quick_push (vec_oprnd);
next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
if (mask)
- vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
+ vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info,
mask_vectype);
}
&& !loop_masks
&& TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
&& VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
- && integer_zerop (get_dr_vinfo_offset (first_dr_info))
+ && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
&& integer_zerop (DR_INIT (first_dr_info->dr))
&& alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (TREE_TYPE (ref_type))))
dataref_offset = build_int_cst (ref_type, 0);
}
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
- vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
+ vect_get_gather_scatter_ops (vinfo, loop, stmt_info, &gs_info,
&dataref_ptr, &vec_offset);
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
+ = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
simd_lane_access_p ? loop : NULL,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, NULL_TREE, bump);
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
else
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
}
/* Invalidate the current contents of VEC_ARRAY. This should
become an RTL clobber too, which prevents the vector registers
from being upward-exposed. */
- vect_clobber_variable (stmt_info, gsi, vec_array);
+ vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
/* Store the individual vectors into the array. */
for (i = 0; i < vec_num; i++)
{
vec_oprnd = dr_chain[i];
- write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
+ write_vector_array (vinfo, stmt_info,
+ gsi, vec_oprnd, vec_array, i);
}
tree final_mask = NULL;
gimple_call_set_lhs (call, data_ref);
}
gimple_call_set_nothrow (call, true);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info,
+ call, gsi);
/* Record that VEC_ARRAY is now dead. */
- vect_clobber_variable (stmt_info, gsi, vec_array);
+ vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
}
else
{
if (j == 0)
result_chain.create (group_size);
/* Permute. */
- vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
- &result_chain);
+ vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
+ gsi, &result_chain);
}
stmt_vec_info next_stmt_info = first_stmt_info;
scale, vec_oprnd);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
break;
}
if (i > 0)
/* Bump the vector pointer. */
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
- stmt_info, bump);
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
+ gsi, stmt_info, bump);
if (slp)
vec_oprnd = vec_oprnds[i];
misalign = 0;
else if (DR_MISALIGNMENT (first_dr_info) == -1)
{
- align = dr_alignment (vect_dr_behavior (first_dr_info));
+ align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
misalign = 0;
}
else
gimple *perm_stmt
= gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
vec_oprnd, perm_mask);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
perm_stmt = SSA_NAME_DEF_STMT (new_temp);
vec_oprnd = new_temp;
final_mask, vec_oprnd);
gimple_call_set_nothrow (call, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, call, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
}
else
{
gassign *new_stmt
= gimple_build_assign (data_ref, vec_oprnd);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
if (slp)
permuted vector variable. */
static tree
-permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
+permute_vec_elements (vec_info *vinfo,
+ tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi)
{
tree vectype = TREE_TYPE (x);
/* Generate the permute statement. */
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
- vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
return data_ref;
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_load (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
slp_instance slp_node_instance,
stmt_vector_for_cost *cost_vec)
tree vec_dest = NULL;
tree data_ref = NULL;
stmt_vec_info prev_stmt_info;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
class loop *loop = NULL;
class loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
bool nested_in_vect_loop = false;
int vec_num;
bool slp = (slp_node != NULL);
bool slp_perm = false;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
poly_uint64 vf;
tree aggr_type;
gather_scatter_info gs_info;
- vec_info *vinfo = stmt_info->vinfo;
tree ref_type;
enum vect_def_type mask_dt = vect_unknown_def_type;
if (mask_index >= 0)
{
mask = gimple_call_arg (call, mask_index);
- if (!vect_check_scalar_mask (stmt_info, mask, &mask_dt,
+ if (!vect_check_scalar_mask (vinfo, stmt_info, mask, &mask_dt,
&mask_vectype))
return false;
}
group_size = 1;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
- &memory_access_type, &gs_info))
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, VLS_LOAD,
+ ncopies, &memory_access_type, &gs_info))
return false;
if (mask)
memory_access_type, &gs_info, mask);
STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
- vect_model_load_cost (stmt_info, ncopies, memory_access_type,
+ vect_model_load_cost (vinfo, stmt_info, ncopies, memory_access_type,
slp_node_instance, slp_node, cost_vec);
return true;
}
if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{
- vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask);
+ vect_build_gather_load_calls (vinfo,
+ stmt_info, gsi, vec_stmt, &gs_info, mask);
return true;
}
stmt_vec_info new_stmt_info;
if (hoist_p)
{
- new_temp = vect_init_vector (stmt_info, scalar_dest,
+ new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest,
vectype, NULL);
gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
new_stmt_info = vinfo->add_stmt (new_stmt);
}
else
{
- new_temp = vect_init_vector (stmt_info, scalar_dest,
+ new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest,
vectype, &gsi2);
new_stmt_info = vinfo->lookup_def (new_temp);
}
ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
}
- dr_offset = get_dr_vinfo_offset (first_dr_info);
+ dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info);
stride_base
= fold_build_pointer_plus
(DR_BASE_ADDRESS (first_dr_info->dr),
gassign *new_stmt
= gimple_build_assign (make_ssa_name (ltype), data_ref);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (nloads > 1)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
gimple_assign_lhs (new_stmt));
tree newoff = copy_ssa_name (running_off);
gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, stride_step);
- vect_finish_stmt_generation (stmt_info, incr, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
running_off = newoff;
group_el = 0;
if (nloads > 1)
{
tree vec_inv = build_constructor (lvectype, v);
- new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
+ new_temp = vect_init_vector (vinfo, stmt_info,
+ vec_inv, lvectype, gsi);
new_stmt_info = vinfo->lookup_def (new_temp);
if (lvectype != vectype)
{
build1 (VIEW_CONVERT_EXPR,
vectype, new_temp));
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
}
if (slp_perm)
{
unsigned n_perms;
- vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
+ vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf,
slp_node_instance, false, &n_perms);
}
return true;
alignment_support_scheme = dr_unaligned_supported;
else
alignment_support_scheme
- = vect_supportable_dr_alignment (first_dr_info, false);
+ = vect_supportable_dr_alignment (vinfo, first_dr_info, false);
gcc_assert (alignment_support_scheme);
vec_loop_masks *loop_masks
initialized yet, use first_stmt_info_for_drptr DR by bumping the
distance from first_stmt_info DR instead as below. */
if (!diff_first_stmt_info)
- msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
+ msq = vect_setup_realignment (loop_vinfo,
+ first_stmt_info, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE,
&at_loop);
if (alignment_support_scheme == dr_explicit_realign_optimized)
aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
else
aggr_type = vectype;
- bump = vect_get_data_ptr_increment (dr_info, aggr_type,
+ bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type,
memory_access_type);
}
if (simd_lane_access_p
&& TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
&& VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
- && integer_zerop (get_dr_vinfo_offset (first_dr_info))
+ && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
&& integer_zerop (DR_INIT (first_dr_info->dr))
&& alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (TREE_TYPE (ref_type)))
else if (diff_first_stmt_info)
{
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt_info_for_drptr,
+ = vect_create_data_ref_ptr (vinfo, first_stmt_info_for_drptr,
aggr_type, at_loop, offset, &dummy,
gsi, &ptr_incr, simd_lane_access_p,
byte_offset, bump);
size_binop (MINUS_EXPR,
DR_INIT (first_dr_info->dr),
DR_INIT (ptrdr)));
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, diff);
if (alignment_support_scheme == dr_explicit_realign)
{
- msq = vect_setup_realignment (first_stmt_info_for_drptr, gsi,
+ msq = vect_setup_realignment (vinfo,
+ first_stmt_info_for_drptr, gsi,
&realignment_token,
alignment_support_scheme,
dataref_ptr, &at_loop);
}
}
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
- vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
+ vect_get_gather_scatter_ops (vinfo, loop, stmt_info, &gs_info,
&dataref_ptr, &vec_offset);
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
+ = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
+ at_loop,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p,
byte_offset, bump);
if (slp_node)
{
auto_vec<vec<tree> > vec_defs (1);
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_mask = vec_defs[0][0];
}
else
- vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
+ vec_mask = vect_get_vec_def_for_operand (vinfo, mask, stmt_info,
mask_vectype);
}
}
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
else
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
if (mask)
vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
}
gimple_call_set_lhs (call, vec_array);
gimple_call_set_nothrow (call, true);
- new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
+ new_stmt_info = vect_finish_stmt_generation (vinfo, stmt_info,
+ call, gsi);
/* Extract each vector into an SSA_NAME. */
for (i = 0; i < vec_num; i++)
{
- new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
+ new_temp = read_vector_array (vinfo, stmt_info, gsi, scalar_dest,
vec_array, i);
dr_chain.quick_push (new_temp);
}
/* Record the mapping between SSA_NAMEs and statements. */
- vect_record_grouped_load_vectors (stmt_info, dr_chain);
+ vect_record_grouped_load_vectors (vinfo, stmt_info, dr_chain);
/* Record that VEC_ARRAY is now dead. */
- vect_clobber_variable (stmt_info, gsi, vec_array);
+ vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
}
else
{
vec_mask, gsi);
if (i > 0)
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
- stmt_info, bump);
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
+ gsi, stmt_info, bump);
/* 2. Create the vector-load in the loop. */
gimple *new_stmt = NULL;
else if (DR_MISALIGNMENT (first_dr_info) == -1)
{
align = dr_alignment
- (vect_dr_behavior (first_dr_info));
+ (vect_dr_behavior (vinfo, first_dr_info));
misalign = 0;
}
else
DR_REF (first_dr_info->dr));
tree tem = make_ssa_name (ltype);
new_stmt = gimple_build_assign (tem, data_ref);
- vect_finish_stmt_generation (stmt_info, new_stmt,
- gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
data_ref = NULL;
vec<constructor_elt, va_gc> *v;
vec_alloc (v, 2);
tree new_vname = make_ssa_name (new_vtype);
new_stmt = gimple_build_assign (
new_vname, build_constructor (new_vtype, v));
- vect_finish_stmt_generation (stmt_info,
+ vect_finish_stmt_generation (vinfo, stmt_info,
new_stmt, gsi);
new_stmt = gimple_build_assign (
vec_dest, build1 (VIEW_CONVERT_EXPR, vectype,
tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
if (compute_in_loop)
- msq = vect_setup_realignment (first_stmt_info, gsi,
+ msq = vect_setup_realignment (vinfo, first_stmt_info, gsi,
&realignment_token,
dr_explicit_realign,
dataref_ptr, NULL);
build_int_cst
(TREE_TYPE (dataref_ptr),
-(HOST_WIDE_INT) align));
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0));
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
gimple_move_vops (new_stmt, stmt_info->stmt);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
msq = new_temp;
bump = size_binop (MULT_EXPR, vs,
TYPE_SIZE_UNIT (elem_type));
bump = size_binop (MINUS_EXPR, bump, size_one_node);
- ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
+ ptr = bump_vector_ptr (vinfo, dataref_ptr, NULL, gsi,
stmt_info, bump);
new_stmt = gimple_build_assign
(NULL_TREE, BIT_AND_EXPR, ptr,
(TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
ptr = copy_ssa_name (ptr, new_stmt);
gimple_assign_set_lhs (new_stmt, ptr);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0));
(new_temp, BIT_AND_EXPR, dataref_ptr,
build_int_cst (TREE_TYPE (dataref_ptr),
-(HOST_WIDE_INT) align));
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, new_temp,
build_int_cst (ref_type, 0));
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
/* 3. Handle explicit realignment if necessary/supported.
Create in loop:
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
if (alignment_support_scheme == dr_explicit_realign_optimized)
{
if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
{
tree perm_mask = perm_mask_for_reverse (vectype);
- new_temp = permute_vec_elements (new_temp, new_temp,
+ new_temp = permute_vec_elements (vinfo, new_temp, new_temp,
perm_mask, stmt_info, gsi);
new_stmt_info = vinfo->lookup_def (new_temp);
}
= (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
* group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val);
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
- stmt_info, bump);
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
+ gsi, stmt_info, bump);
group_elt = 0;
}
}
= (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
* group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val);
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
stmt_info, bump);
}
}
if (slp_perm)
{
unsigned n_perms;
- if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
+ if (!vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf,
slp_node_instance, false,
&n_perms))
{
if (grouped_load)
{
if (memory_access_type != VMAT_LOAD_STORE_LANES)
- vect_transform_grouped_load (stmt_info, dr_chain,
+ vect_transform_grouped_load (vinfo, stmt_info, dr_chain,
group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_condition (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt,
slp_tree slp_node, stmt_vector_for_cost *cost_vec)
{
- vec_info *vinfo = stmt_info->vinfo;
tree scalar_dest = NULL_TREE;
tree vec_dest = NULL_TREE;
tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
tree vec_compare;
tree new_temp;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
enum vect_def_type dts[4]
= {vect_unknown_def_type, vect_unknown_def_type,
vect_unknown_def_type, vect_unknown_def_type};
enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
stmt_vec_info prev_stmt_info = NULL;
int i, j;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
vec<tree> vec_oprnds0 = vNULL;
vec<tree> vec_oprnds1 = vNULL;
vec<tree> vec_oprnds2 = vNULL;
{
if (STMT_SLP_TYPE (stmt_info))
return false;
- reduc_info = info_for_reduction (stmt_info);
+ reduc_info = info_for_reduction (vinfo, stmt_info);
reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
reduc_index = STMT_VINFO_REDUC_IDX (stmt_info);
gcc_assert (reduction_type != EXTRACT_LAST_REDUCTION
then_clause = gimple_assign_rhs2 (stmt);
else_clause = gimple_assign_rhs3 (stmt);
- if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, slp_node,
+ if (!vect_is_simple_cond (cond_expr, vinfo, slp_node,
&comp_vectype, &dts[0], vectype)
|| !comp_vectype)
return false;
- if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1))
+ if (!vect_is_simple_use (then_clause, vinfo, &dts[2], &vectype1))
return false;
- if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2))
+ if (!vect_is_simple_use (else_clause, vinfo, &dts[3], &vectype2))
return false;
if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
return false;
STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
- vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
+ vect_model_simple_cost (vinfo, stmt_info, ncopies, dts, ndts, slp_node,
cost_vec, kind);
return true;
}
if (slp_node)
{
auto_vec<vec<tree>, 4> vec_defs;
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_oprnds3 = vec_defs.pop ();
vec_oprnds2 = vec_defs.pop ();
if (!masked)
if (masked)
{
vec_cond_lhs
- = vect_get_vec_def_for_operand (cond_expr, stmt_info,
+ = vect_get_vec_def_for_operand (vinfo, cond_expr, stmt_info,
comp_vectype);
}
else
{
vec_cond_lhs
- = vect_get_vec_def_for_operand (cond_expr0,
+ = vect_get_vec_def_for_operand (vinfo, cond_expr0,
stmt_info, comp_vectype);
vec_cond_rhs
- = vect_get_vec_def_for_operand (cond_expr1,
+ = vect_get_vec_def_for_operand (vinfo, cond_expr1,
stmt_info, comp_vectype);
}
- vec_then_clause = vect_get_vec_def_for_operand (then_clause,
+ vec_then_clause = vect_get_vec_def_for_operand (vinfo,
+ then_clause,
stmt_info);
if (reduction_type != EXTRACT_LAST_REDUCTION)
- vec_else_clause = vect_get_vec_def_for_operand (else_clause,
+ vec_else_clause = vect_get_vec_def_for_operand (vinfo,
+ else_clause,
stmt_info);
}
}
new_stmt
= gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
vec_cond_rhs);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (bitop2 == NOP_EXPR)
vec_compare = new_temp;
else if (bitop2 == BIT_NOT_EXPR)
new_stmt
= gimple_build_assign (vec_compare, bitop2,
vec_cond_lhs, new_temp);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
}
}
tree vec_compare_name = make_ssa_name (vec_cmp_type);
gassign *new_stmt = gimple_build_assign (vec_compare_name,
vec_compare);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
vec_compare = vec_compare_name;
}
gassign *new_stmt = gimple_build_assign (vec_compare_name,
BIT_NOT_EXPR,
vec_compare);
- vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
vec_compare = vec_compare_name;
}
gassign *g
= gimple_build_assign (tmp2, BIT_AND_EXPR, vec_compare,
loop_mask);
- vect_finish_stmt_generation (stmt_info, g, gsi);
+ vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
vec_compare = tmp2;
}
}
gimple_call_set_lhs (new_stmt, lhs);
SSA_NAME_DEF_STMT (lhs) = new_stmt;
if (old_stmt == gsi_stmt (*gsi))
- new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
+ new_stmt_info = vect_finish_replace_stmt (vinfo,
+ stmt_info, new_stmt);
else
{
/* In this case we're moving the definition to later in the
gimple_stmt_iterator old_gsi = gsi_for_stmt (old_stmt);
gsi_remove (&old_gsi, true);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
}
else
= gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
vec_then_clause, vec_else_clause);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vectorizable_comparison (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt,
slp_tree slp_node, stmt_vector_for_cost *cost_vec)
{
- vec_info *vinfo = stmt_info->vinfo;
tree lhs, rhs1, rhs2;
tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
tree new_temp;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
int ndts = 2;
poly_uint64 nunits;
enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
stmt_vec_info prev_stmt_info = NULL;
int i, j;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
vec<tree> vec_oprnds0 = vNULL;
vec<tree> vec_oprnds1 = vNULL;
tree mask_type;
rhs1 = gimple_assign_rhs1 (stmt);
rhs2 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1))
+ if (!vect_is_simple_use (rhs1, vinfo, &dts[0], &vectype1))
return false;
- if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2))
+ if (!vect_is_simple_use (rhs2, vinfo, &dts[1], &vectype2))
return false;
if (vectype1 && vectype2
}
STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
- vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
+ vect_model_simple_cost (vinfo, stmt_info,
+ ncopies * (1 + (bitop2 != NOP_EXPR)),
dts, ndts, slp_node, cost_vec);
return true;
}
if (slp_node)
{
auto_vec<vec<tree>, 2> vec_defs;
- vect_get_slp_defs (slp_node, &vec_defs);
+ vect_get_slp_defs (vinfo, slp_node, &vec_defs);
vec_oprnds1 = vec_defs.pop ();
vec_oprnds0 = vec_defs.pop ();
if (swap_p)
}
else
{
- vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
+ vec_rhs1 = vect_get_vec_def_for_operand (vinfo, rhs1, stmt_info,
vectype);
- vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
+ vec_rhs2 = vect_get_vec_def_for_operand (vinfo, rhs2, stmt_info,
vectype);
}
}
gassign *new_stmt = gimple_build_assign (new_temp, code,
vec_rhs1, vec_rhs2);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
else
{
new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
vec_rhs2);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (bitop2 != NOP_EXPR)
{
tree res = make_ssa_name (mask);
new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
new_temp);
new_stmt_info
- = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
+ = vect_finish_stmt_generation (vinfo, stmt_info,
+ new_stmt, gsi);
}
}
if (slp_node)
GSI and VEC_STMT_P are as for vectorizable_live_operation. */
static bool
-can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+can_vectorize_live_stmts (loop_vec_info loop_vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
slp_tree slp_node, slp_instance slp_node_instance,
bool vec_stmt_p,
stmt_vector_for_cost *cost_vec)
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
{
if (STMT_VINFO_LIVE_P (slp_stmt_info)
- && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node,
+ && !vectorizable_live_operation (loop_vinfo,
+ slp_stmt_info, gsi, slp_node,
slp_node_instance, i,
vec_stmt_p, cost_vec))
return false;
}
}
else if (STMT_VINFO_LIVE_P (stmt_info)
- && !vectorizable_live_operation (stmt_info, gsi, slp_node,
- slp_node_instance, -1,
+ && !vectorizable_live_operation (loop_vinfo, stmt_info, gsi,
+ slp_node, slp_node_instance, -1,
vec_stmt_p, cost_vec))
return false;
/* Make sure the statement is vectorizable. */
opt_result
-vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
+vect_analyze_stmt (vec_info *vinfo,
+ stmt_vec_info stmt_info, bool *need_to_vectorize,
slp_tree node, slp_instance node_instance,
stmt_vector_for_cost *cost_vec)
{
- vec_info *vinfo = stmt_info->vinfo;
- bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
bool ok;
gimple_seq pattern_def_seq;
pattern_def_stmt_info->stmt);
opt_result res
- = vect_analyze_stmt (pattern_def_stmt_info,
+ = vect_analyze_stmt (vinfo, pattern_def_stmt_info,
need_to_vectorize, node, node_instance,
cost_vec);
if (!res)
pattern_stmt_info->stmt);
opt_result res
- = vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
+ = vect_analyze_stmt (vinfo, pattern_stmt_info, need_to_vectorize, node,
node_instance, cost_vec);
if (!res)
return res;
/* Prefer vectorizable_call over vectorizable_simd_clone_call so
-mveclibabi= takes preference over library functions with
the simd attribute. */
- ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
+ ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec)
+ || vectorizable_simd_clone_call (vinfo, stmt_info, NULL, NULL, node,
cost_vec)
- || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
- cost_vec)
- || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_reduction (stmt_info, node, node_instance, cost_vec)
- || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_condition (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_comparison (stmt_info, NULL, NULL, node,
+ || vectorizable_conversion (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_operation (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_assignment (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_load (vinfo, stmt_info,
+ NULL, NULL, node, node_instance, cost_vec)
+ || vectorizable_store (vinfo, stmt_info, NULL, NULL, node, cost_vec)
+ || vectorizable_reduction (as_a <loop_vec_info> (vinfo), stmt_info,
+ node, node_instance, cost_vec)
+ || vectorizable_induction (as_a <loop_vec_info> (vinfo), stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_shift (vinfo, stmt_info, NULL, NULL, node, cost_vec)
+ || vectorizable_condition (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node,
cost_vec)
- || vectorizable_lc_phi (stmt_info, NULL, node));
+ || vectorizable_lc_phi (as_a <loop_vec_info> (vinfo),
+ stmt_info, NULL, node));
else
{
if (bb_vinfo)
- ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
- cost_vec)
- || vectorizable_conversion (stmt_info, NULL, NULL, node,
+ ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec)
+ || vectorizable_simd_clone_call (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_conversion (vinfo, stmt_info, NULL, NULL, node,
cost_vec)
- || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_assignment (stmt_info, NULL, NULL, node,
+ || vectorizable_shift (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_operation (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_assignment (vinfo, stmt_info, NULL, NULL, node,
cost_vec)
- || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
- cost_vec)
- || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_condition (stmt_info, NULL, NULL, node, cost_vec)
- || vectorizable_comparison (stmt_info, NULL, NULL, node,
+ || vectorizable_load (vinfo, stmt_info,
+ NULL, NULL, node, node_instance, cost_vec)
+ || vectorizable_store (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_condition (vinfo, stmt_info,
+ NULL, NULL, node, cost_vec)
+ || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node,
cost_vec));
}
if (!bb_vinfo
&& STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
&& STMT_VINFO_TYPE (stmt_info) != lc_phi_info_type
- && !can_vectorize_live_stmts (stmt_info, NULL, node, node_instance,
+ && !can_vectorize_live_stmts (as_a <loop_vec_info> (vinfo),
+ stmt_info, NULL, node, node_instance,
false, cost_vec))
return opt_result::failure_at (stmt_info->stmt,
"not vectorized:"
Create a vectorized stmt to replace STMT_INFO, and insert it at GSI. */
bool
-vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
+vect_transform_stmt (vec_info *vinfo,
+ stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
slp_tree slp_node, slp_instance slp_node_instance)
{
- vec_info *vinfo = stmt_info->vinfo;
bool is_store = false;
stmt_vec_info vec_stmt = NULL;
bool done;
gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
- bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
+ bool nested_p = (loop_vinfo
&& nested_in_vect_loop_p
- (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
- stmt_info));
+ (LOOP_VINFO_LOOP (loop_vinfo), stmt_info));
gimple *stmt = stmt_info->stmt;
switch (STMT_VINFO_TYPE (stmt_info))
case type_demotion_vec_info_type:
case type_promotion_vec_info_type:
case type_conversion_vec_info_type:
- done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
- NULL);
+ done = vectorizable_conversion (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done);
break;
case induc_vec_info_type:
- done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
+ done = vectorizable_induction (as_a <loop_vec_info> (vinfo),
+ stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done);
break;
case shift_vec_info_type:
- done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
+ done = vectorizable_shift (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done);
break;
case op_vec_info_type:
- done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
+ done = vectorizable_operation (vinfo, stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done);
break;
case assignment_vec_info_type:
- done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
- NULL);
+ done = vectorizable_assignment (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done);
break;
case load_vec_info_type:
- done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
+ done = vectorizable_load (vinfo, stmt_info, gsi, &vec_stmt, slp_node,
slp_node_instance, NULL);
gcc_assert (done);
break;
case store_vec_info_type:
- done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
+ done = vectorizable_store (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
break;
case condition_vec_info_type:
- done = vectorizable_condition (stmt_info, gsi, &vec_stmt, slp_node, NULL);
+ done = vectorizable_condition (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done);
break;
case comparison_vec_info_type:
- done = vectorizable_comparison (stmt_info, gsi, &vec_stmt,
+ done = vectorizable_comparison (vinfo, stmt_info, gsi, &vec_stmt,
slp_node, NULL);
gcc_assert (done);
break;
case call_vec_info_type:
- done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
+ done = vectorizable_call (vinfo, stmt_info,
+ gsi, &vec_stmt, slp_node, NULL);
stmt = gsi_stmt (*gsi);
break;
case call_simd_clone_vec_info_type:
- done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
+ done = vectorizable_simd_clone_call (vinfo, stmt_info, gsi, &vec_stmt,
slp_node, NULL);
stmt = gsi_stmt (*gsi);
break;
case reduc_vec_info_type:
- done = vect_transform_reduction (stmt_info, gsi, &vec_stmt, slp_node);
+ done = vect_transform_reduction (as_a <loop_vec_info> (vinfo), stmt_info,
+ gsi, &vec_stmt, slp_node);
gcc_assert (done);
break;
case cycle_phi_info_type:
- done = vect_transform_cycle_phi (stmt_info, &vec_stmt, slp_node,
- slp_node_instance);
+ done = vect_transform_cycle_phi (as_a <loop_vec_info> (vinfo), stmt_info,
+ &vec_stmt, slp_node, slp_node_instance);
gcc_assert (done);
break;
case lc_phi_info_type:
- done = vectorizable_lc_phi (stmt_info, &vec_stmt, slp_node);
+ done = vectorizable_lc_phi (as_a <loop_vec_info> (vinfo),
+ stmt_info, &vec_stmt, slp_node);
gcc_assert (done);
break;
"stmt not supported.\n");
gcc_unreachable ();
}
+ done = true;
}
/* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
|| STMT_VINFO_RELEVANT (stmt_info) ==
vect_used_in_outer_by_reduction))
{
- class loop *innerloop = LOOP_VINFO_LOOP (
- STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
+ class loop *innerloop = LOOP_VINFO_LOOP (loop_vinfo)->inner;
imm_use_iterator imm_iter;
use_operand_p use_p;
tree scalar_dest;
stmt_vec_info reduc_info;
if (STMT_VINFO_REDUC_DEF (orig_stmt_info)
&& vect_stmt_to_vectorize (orig_stmt_info) == stmt_info
- && (reduc_info = info_for_reduction (orig_stmt_info))
+ && (reduc_info = info_for_reduction (vinfo, orig_stmt_info))
&& STMT_VINFO_REDUC_TYPE (reduc_info) != FOLD_LEFT_REDUCTION
&& STMT_VINFO_REDUC_TYPE (reduc_info) != EXTRACT_LAST_REDUCTION)
{
/* Handle stmts whose DEF is used outside the loop-nest that is
being vectorized. */
- done = can_vectorize_live_stmts (stmt_info, gsi, slp_node,
- slp_node_instance, true, NULL);
+ if (is_a <loop_vec_info> (vinfo))
+ done = can_vectorize_live_stmts (as_a <loop_vec_info> (vinfo),
+ stmt_info, gsi, slp_node,
+ slp_node_instance, true, NULL);
gcc_assert (done);
return false;
stmt_vec_info. */
void
-vect_remove_stores (stmt_vec_info first_stmt_info)
+vect_remove_stores (vec_info *vinfo, stmt_vec_info first_stmt_info)
{
- vec_info *vinfo = first_stmt_info->vinfo;
stmt_vec_info next_stmt_info = first_stmt_info;
while (next_stmt_info)
widening operation (short in the above example). */
bool
-supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
+supportable_widening_operation (vec_info *vinfo,
+ enum tree_code code, stmt_vec_info stmt_info,
tree vectype_out, tree vectype_in,
enum tree_code *code1, enum tree_code *code2,
int *multi_step_cvt,
vec<tree> *interm_types)
{
- loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_info = dyn_cast <loop_vec_info> (vinfo);
class loop *vect_loop = NULL;
machine_mode vec_mode;
enum insn_code icode1, icode2;
if (vect_loop
&& STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
&& !nested_in_vect_loop_p (vect_loop, stmt_info)
- && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
+ && supportable_widening_operation (vinfo, VEC_WIDEN_MULT_EVEN_EXPR,
stmt_info, vectype_out,
vectype_in, code1, code2,
multi_step_cvt, interm_types))
statement does not help to determine the overall number of units. */
opt_result
-vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
+vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
tree *stmt_vectype_out,
tree *nunits_vectype_out,
unsigned int group_size)
{
- vec_info *vinfo = stmt_info->vinfo;
gimple *stmt = stmt_info->stmt;
/* For BB vectorization, we should always have a group size once we've
vec_info::new_stmt_vec_info (gimple *stmt)
{
stmt_vec_info res = XCNEW (class _stmt_vec_info);
- res->vinfo = this;
res->stmt = stmt;
STMT_VINFO_TYPE (res) = undef_vec_info_type;
/* The stmt to which this info struct refers to. */
gimple *stmt;
- /* The vec_info with respect to which STMT is vectorized. */
- vec_info *vinfo;
-
/* The vector type to be used for the LHS of this statement. */
tree vectype;
/* Access Functions. */
#define STMT_VINFO_TYPE(S) (S)->type
#define STMT_VINFO_STMT(S) (S)->stmt
-inline loop_vec_info
-STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
-{
- if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
- return loop_vinfo;
- return NULL;
-}
-inline bb_vec_info
-STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
-{
- if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
- return bb_vinfo;
- return NULL;
-}
#define STMT_VINFO_RELEVANT(S) (S)->relevant
#define STMT_VINFO_LIVE_P(S) (S)->live
#define STMT_VINFO_VECTYPE(S) (S)->vectype
/* Alias targetm.vectorize.add_stmt_cost. */
static inline unsigned
-add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+add_stmt_cost (vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info, int misalign,
enum vect_cost_model_location where)
{
- unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind,
+ unsigned cost = targetm.vectorize.add_stmt_cost (vinfo, data, count, kind,
stmt_info, misalign, where);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign,
}
inline void
-add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec)
+add_stmt_costs (vec_info *vinfo, void *data, stmt_vector_for_cost *cost_vec)
{
stmt_info_for_cost *cost;
unsigned i;
FOR_EACH_VEC_ELT (*cost_vec, i, cost)
- add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info,
+ add_stmt_cost (vinfo, data, cost->count, cost->kind, cost->stmt_info,
cost->misalign, cost->where);
}
in DR_INFO itself). */
static inline innermost_loop_behavior *
-vect_dr_behavior (dr_vec_info *dr_info)
+vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
{
stmt_vec_info stmt_info = dr_info->stmt;
- loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo);
if (loop_vinfo == NULL
|| !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
return &DR_INNERMOST (dr_info->dr);
vect_dr_behavior to select the appropriate data_reference to use. */
inline tree
-get_dr_vinfo_offset (dr_vec_info *dr_info, bool check_outer = false)
+get_dr_vinfo_offset (vec_info *vinfo,
+ dr_vec_info *dr_info, bool check_outer = false)
{
innermost_loop_behavior *base;
if (check_outer)
- base = vect_dr_behavior (dr_info);
+ base = vect_dr_behavior (vinfo, dr_info);
else
base = &dr_info->dr->innermost;
extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
tree *, stmt_vec_info * = NULL,
gimple ** = NULL);
-extern bool supportable_widening_operation (enum tree_code, stmt_vec_info,
+extern bool supportable_widening_operation (vec_info *,
+ enum tree_code, stmt_vec_info,
tree, tree, enum tree_code *,
enum tree_code *, int *,
vec<tree> *);
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
-extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *);
-extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *,
+extern stmt_vec_info vect_finish_replace_stmt (vec_info *,
+ stmt_vec_info, gimple *);
+extern stmt_vec_info vect_finish_stmt_generation (vec_info *,
+ stmt_vec_info, gimple *,
gimple_stmt_iterator *);
extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
extern tree vect_get_store_rhs (stmt_vec_info);
extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type);
-extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL);
-extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *,
- vec<tree> *, slp_tree);
+extern tree vect_get_vec_def_for_operand (vec_info *, tree,
+ stmt_vec_info, tree = NULL);
+extern void vect_get_vec_defs (vec_info *, tree, tree, stmt_vec_info,
+ vec<tree> *, vec<tree> *, slp_tree);
extern void vect_get_vec_defs_for_stmt_copy (vec_info *,
vec<tree> *, vec<tree> *);
-extern tree vect_init_vector (stmt_vec_info, tree, tree,
+extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree);
-extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *,
+extern bool vect_transform_stmt (vec_info *, stmt_vec_info,
+ gimple_stmt_iterator *,
slp_tree, slp_instance);
-extern void vect_remove_stores (stmt_vec_info);
+extern void vect_remove_stores (vec_info *, stmt_vec_info);
extern bool vect_nop_conversion_p (stmt_vec_info);
-extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree,
+extern opt_result vect_analyze_stmt (vec_info *, stmt_vec_info, bool *,
+ slp_tree,
slp_instance, stmt_vector_for_cost *);
-extern void vect_get_load_cost (stmt_vec_info, int, bool,
+extern void vect_get_load_cost (vec_info *, stmt_vec_info, int, bool,
unsigned int *, unsigned int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *, bool);
-extern void vect_get_store_cost (stmt_vec_info, int,
+extern void vect_get_store_cost (vec_info *, stmt_vec_info, int,
unsigned int *, stmt_vector_for_cost *);
extern bool vect_supportable_shift (vec_info *, enum tree_code, tree);
extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
extern void optimize_mask_stores (class loop*);
extern gcall *vect_gen_while (tree, tree, tree);
extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
-extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
+extern opt_result vect_get_vector_types_for_stmt (vec_info *,
+ stmt_vec_info, tree *,
tree *, unsigned int = 0);
extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
extern enum dr_alignment_support vect_supportable_dr_alignment
- (dr_vec_info *, bool);
+ (vec_info *, dr_vec_info *, bool);
extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
HOST_WIDE_INT *);
extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
-extern bool vect_slp_analyze_instance_dependence (slp_instance);
+extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
extern opt_result vect_verify_datarefs_alignment (loop_vec_info);
-extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
+extern bool vect_slp_analyze_and_verify_instance_alignment (vec_info *,
+ slp_instance);
extern opt_result vect_analyze_data_ref_accesses (vec_info *);
extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree,
vec<data_reference_p> *);
extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *);
extern void vect_record_base_alignments (vec_info *);
-extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, class loop *, tree,
+extern tree vect_create_data_ref_ptr (vec_info *,
+ stmt_vec_info, tree, class loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool,
tree = NULL_TREE, tree = NULL_TREE);
-extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *,
+extern tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *,
stmt_vec_info, tree);
extern void vect_copy_ref_info (tree, tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
-extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
- gimple_stmt_iterator *, vec<tree> *);
-extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
+extern void vect_permute_store_chain (vec_info *,
+ vec<tree> ,unsigned int, stmt_vec_info,
+ gimple_stmt_iterator *, vec<tree> *);
+extern tree vect_setup_realignment (vec_info *,
+ stmt_vec_info, gimple_stmt_iterator *,
tree *, enum dr_alignment_support, tree,
class loop **);
-extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
- gimple_stmt_iterator *);
-extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
+extern void vect_transform_grouped_load (vec_info *, stmt_vec_info, vec<tree>,
+ int, gimple_stmt_iterator *);
+extern void vect_record_grouped_load_vectors (vec_info *,
+ stmt_vec_info, vec<tree>);
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
const char * = NULL);
-extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *,
+extern tree vect_create_addr_base_for_vector_ref (vec_info *,
+ stmt_vec_info, gimple_seq *,
tree, tree = NULL_TREE);
/* In tree-vect-loop.c. */
unsigned int, tree, tree);
extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
unsigned int, tree, unsigned int);
-extern stmt_vec_info info_for_reduction (stmt_vec_info);
+extern stmt_vec_info info_for_reduction (vec_info *, stmt_vec_info);
/* Drive for loop transformation stage. */
extern class loop *vect_transform_loop (loop_vec_info, gimple *);
extern opt_loop_vec_info vect_analyze_loop_form (class loop *,
vec_info_shared *);
-extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
+extern bool vectorizable_live_operation (loop_vec_info,
+ stmt_vec_info, gimple_stmt_iterator *,
slp_tree, slp_instance, int,
bool, stmt_vector_for_cost *);
-extern bool vectorizable_reduction (stmt_vec_info, slp_tree, slp_instance,
+extern bool vectorizable_reduction (loop_vec_info, stmt_vec_info,
+ slp_tree, slp_instance,
stmt_vector_for_cost *);
-extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *,
+extern bool vectorizable_induction (loop_vec_info, stmt_vec_info,
+ gimple_stmt_iterator *,
stmt_vec_info *, slp_tree,
stmt_vector_for_cost *);
-extern bool vect_transform_reduction (stmt_vec_info, gimple_stmt_iterator *,
+extern bool vect_transform_reduction (loop_vec_info, stmt_vec_info,
+ gimple_stmt_iterator *,
stmt_vec_info *, slp_tree);
-extern bool vect_transform_cycle_phi (stmt_vec_info, stmt_vec_info *,
+extern bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info,
+ stmt_vec_info *,
slp_tree, slp_instance);
-extern bool vectorizable_lc_phi (stmt_vec_info, stmt_vec_info *, slp_tree);
+extern bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info,
+ stmt_vec_info *, slp_tree);
extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
stmt_vector_for_cost *,
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance, bool);
-extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
+extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, vec<tree>,
gimple_stmt_iterator *, poly_uint64,
slp_instance, bool, unsigned *);
extern bool vect_slp_analyze_operations (vec_info *);
extern opt_result vect_analyze_slp (vec_info *, unsigned);
extern bool vect_make_slp_decision (loop_vec_info);
extern void vect_detect_hybrid_slp (loop_vec_info);
-extern void vect_get_slp_defs (slp_tree, vec<vec<tree> > *, unsigned n = -1U);
+extern void vect_get_slp_defs (vec_info *, slp_tree, vec<vec<tree> > *,
+ unsigned n = -1U);
extern bool vect_slp_bb (basic_block);
extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);