+2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-vectorizer.h (nested_in_vect_loop_p): Move further down
+ file and take a stmt_vec_info instead of a gimple stmt.
+ (supportable_widening_operation, vect_finish_replace_stmt)
+ (vect_finish_stmt_generation, vect_get_store_rhs)
+ (vect_get_vec_def_for_operand_1, vect_get_vec_def_for_operand)
+ (vect_get_vec_defs, vect_init_vector, vect_transform_stmt)
+ (vect_remove_stores, vect_analyze_stmt, vectorizable_condition)
+ (vect_get_smallest_scalar_type, vect_check_gather_scatter)
+ (vect_create_data_ref_ptr, bump_vector_ptr)
+ (vect_permute_store_chain, vect_setup_realignment)
+ (vect_transform_grouped_load, vect_record_grouped_load_vectors)
+ (vect_create_addr_base_for_vector_ref, vectorizable_live_operation)
+ (vectorizable_reduction, vectorizable_induction)
+ (get_initial_def_for_reduction, is_simple_and_all_uses_invariant)
+ (vect_get_place_in_interleaving_chain): Take stmt_vec_infos rather
+ than gimple stmts as arguments.
+ * tree-vect-data-refs.c (vect_get_smallest_scalar_type)
+ (vect_preserves_scalar_order_p, vect_slp_analyze_node_dependences)
+ (can_group_stmts_p, vect_check_gather_scatter)
+ (vect_create_addr_base_for_vector_ref, vect_create_data_ref_ptr)
+ (bump_vector_ptr, vect_permute_store_chain, vect_setup_realignment)
+ (vect_permute_load_chain, vect_shift_permute_load_chain)
+ (vect_transform_grouped_load)
+ (vect_record_grouped_load_vectors): Likewise.
+ * tree-vect-loop.c (vect_fixup_reduc_chain)
+ (get_initial_def_for_reduction, vect_create_epilog_for_reduction)
+ (vectorize_fold_left_reduction, is_nonwrapping_integer_induction)
+ (vectorizable_reduction, vectorizable_induction)
+ (vectorizable_live_operation, vect_loop_kill_debug_uses): Likewise.
+ * tree-vect-patterns.c (type_conversion_p, adjust_bool_stmts)
+ (vect_get_load_store_mask): Likewise.
+ * tree-vect-slp.c (vect_get_place_in_interleaving_chain)
+ (vect_analyze_slp_instance, vect_mask_constant_operand_p): Likewise.
+ * tree-vect-stmts.c (vect_mark_relevant)
+ (is_simple_and_all_uses_invariant)
+ (exist_non_indexing_operands_for_use_p, process_use)
+ (vect_init_vector_1, vect_init_vector, vect_get_vec_def_for_operand_1)
+ (vect_get_vec_def_for_operand, vect_get_vec_defs)
+ (vect_finish_stmt_generation_1, vect_finish_replace_stmt)
+ (vect_finish_stmt_generation, vect_truncate_gather_scatter_offset)
+ (compare_step_with_zero, vect_get_store_rhs, get_group_load_store_type)
+ (get_negative_load_store_type, get_load_store_type)
+ (vect_check_load_store_mask, vect_check_store_rhs)
+ (vect_build_gather_load_calls, vect_get_strided_load_store_ops)
+ (vectorizable_bswap, vectorizable_call, vectorizable_simd_clone_call)
+ (vect_create_vectorized_demotion_stmts, vectorizable_conversion)
+ (vectorizable_assignment, vectorizable_shift, vectorizable_operation)
+ (get_group_alias_ptr_type, vectorizable_store, hoist_defs_of_uses)
+ (vectorizable_load, vectorizable_condition, vectorizable_comparison)
+ (vect_analyze_stmt, vect_transform_stmt, vect_remove_stores)
+ (supportable_widening_operation): Likewise.
+
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vect-data-refs.c (vect_describe_gather_scatter_call): Take
}
-/* Return the smallest scalar part of STMT.
+/* Return the smallest scalar part of STMT_INFO.
This is used to determine the vectype of the stmt. We generally set the
vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
types. */
tree
-vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit,
- HOST_WIDE_INT *rhs_size_unit)
+vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
+ HOST_WIDE_INT *lhs_size_unit,
+ HOST_WIDE_INT *rhs_size_unit)
{
- tree scalar_type = gimple_expr_type (stmt);
+ tree scalar_type = gimple_expr_type (stmt_info->stmt);
HOST_WIDE_INT lhs, rhs;
/* During the analysis phase, this function is called on arbitrary
lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
- gassign *assign = dyn_cast <gassign *> (stmt);
+ gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
if (assign
&& (gimple_assign_cast_p (assign)
|| gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
}
-/* Return true if we know that the order of vectorized STMT_A and
- vectorized STMT_B will be the same as the order of STMT_A and STMT_B.
- At least one of the statements is a write. */
+/* Return true if we know that the order of vectorized STMTINFO_A and
+ vectorized STMTINFO_B will be the same as the order of STMTINFO_A and
+ STMTINFO_B. At least one of the statements is a write. */
static bool
-vect_preserves_scalar_order_p (gimple *stmt_a, gimple *stmt_b)
+vect_preserves_scalar_order_p (stmt_vec_info stmtinfo_a,
+ stmt_vec_info stmtinfo_b)
{
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (stmt_a);
- stmt_vec_info stmtinfo_b = vinfo_for_stmt (stmt_b);
-
/* Single statements are always kept in their original order. */
if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
&& !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
static bool
vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
vec<stmt_vec_info> stores,
- gimple *last_store)
+ stmt_vec_info last_store_info)
{
/* This walks over all stmts involved in the SLP load/store done
in NODE verifying we can sink them up to the last stmt in the
been sunk to (and we verify if we can do that as well). */
if (gimple_visited_p (stmt))
{
- if (stmt_info != last_store)
+ if (stmt_info != last_store_info)
continue;
unsigned i;
stmt_vec_info store_info;
return gimple_assign_rhs1 (stmt);
}
-/* Return true if vectorizable_* routines can handle statements STMT1
- and STMT2 being in a single group. */
+/* Return true if vectorizable_* routines can handle statements STMT1_INFO
+ and STMT2_INFO being in a single group. */
static bool
-can_group_stmts_p (gimple *stmt1, gimple *stmt2)
+can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
{
- if (gimple_assign_single_p (stmt1))
- return gimple_assign_single_p (stmt2);
+ if (gimple_assign_single_p (stmt1_info->stmt))
+ return gimple_assign_single_p (stmt2_info->stmt);
- gcall *call1 = dyn_cast <gcall *> (stmt1);
+ gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
if (call1 && gimple_call_internal_p (call1))
{
/* Check for two masked loads or two masked stores. */
- gcall *call2 = dyn_cast <gcall *> (stmt2);
+ gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
if (!call2 || !gimple_call_internal_p (call2))
return false;
internal_fn ifn = gimple_call_internal_fn (call1);
info->memory_type = TREE_TYPE (DR_REF (dr));
}
-/* Return true if a non-affine read or write in STMT is suitable for a
+/* Return true if a non-affine read or write in STMT_INFO is suitable for a
gather load or scatter store. Describe the operation in *INFO if so. */
bool
-vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
+vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
gather_scatter_info *info)
{
HOST_WIDE_INT scale = 1;
poly_int64 pbitpos, pbitsize;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
tree decl = NULL_TREE, base, off;
that will be accessed for a data reference.
Input:
- STMT: The statement containing the data reference.
+ STMT_INFO: The statement containing the data reference.
NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
OFFSET: Optional. If supplied, it is be added to the initial address.
LOOP: Specify relative to which loop-nest should the address be computed.
FORNOW: We are only handling array accesses with step 1. */
tree
-vect_create_addr_base_for_vector_ref (gimple *stmt,
+vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
gimple_seq *new_stmt_list,
tree offset,
tree byte_offset)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
const char *base_name;
tree addr_base;
/* Function vect_create_data_ref_ptr.
Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
- location accessed in the loop by STMT, along with the def-use update
+ location accessed in the loop by STMT_INFO, along with the def-use update
chain to appropriately advance the pointer through the loop iterations.
Also set aliasing information for the pointer. This pointer is used by
the callers to this function to create a memory reference expression for
vector load/store access.
Input:
- 1. STMT: a stmt that references memory. Expected to be of the form
+ 1. STMT_INFO: a stmt that references memory. Expected to be of the form
GIMPLE_ASSIGN <name, data-ref> or
GIMPLE_ASSIGN <data-ref, name>.
2. AGGR_TYPE: the type of the reference, which should be either a vector
or an array.
3. AT_LOOP: the loop where the vector memref is to be created.
4. OFFSET (optional): an offset to be added to the initial address accessed
- by the data-ref in STMT.
+ by the data-ref in STMT_INFO.
5. BSI: location where the new stmts are to be placed if there is no loop
6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
pointing to the initial address.
7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
- to the initial address accessed by the data-ref in STMT. This is
+ to the initial address accessed by the data-ref in STMT_INFO. This is
similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
in bytes.
8. IV_STEP (optional, defaults to NULL): the amount that should be added
4. Return the pointer. */
tree
-vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
- tree offset, tree *initial_address,
- gimple_stmt_iterator *gsi, gimple **ptr_incr,
- bool only_init, bool *inv_p, tree byte_offset,
- tree iv_step)
+vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
+ struct loop *at_loop, tree offset,
+ tree *initial_address, gimple_stmt_iterator *gsi,
+ gimple **ptr_incr, bool only_init, bool *inv_p,
+ tree byte_offset, tree iv_step)
{
const char *base_name;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
bool nested_in_vect_loop = false;
the loop. The increment amount across iterations is expected
to be vector_size.
BSI - location where the new update stmt is to be placed.
- STMT - the original scalar memory-access stmt that is being vectorized.
+ STMT_INFO - the original scalar memory-access stmt that is being vectorized.
BUMP - optional. The offset by which to bump the pointer. If not given,
the offset is assumed to be vector_size.
tree
bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
- gimple *stmt, tree bump)
+ stmt_vec_info stmt_info, tree bump)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree update = TYPE_SIZE_UNIT (vectype);
void
vect_permute_store_chain (vec<tree> dr_chain,
unsigned int length,
- gimple *stmt,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vect1, vect2, high, low;
gimple *perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
dr_explicit_realign_optimized.
The code above sets up a new (vector) pointer, pointing to the first
- location accessed by STMT, and a "floor-aligned" load using that pointer.
- It also generates code to compute the "realignment-token" (if the relevant
- target hook was defined), and creates a phi-node at the loop-header bb
- whose arguments are the result of the prolog-load (created by this
- function) and the result of a load that takes place in the loop (to be
- created by the caller to this function).
+ location accessed by STMT_INFO, and a "floor-aligned" load using that
+ pointer. It also generates code to compute the "realignment-token"
+ (if the relevant target hook was defined), and creates a phi-node at the
+ loop-header bb whose arguments are the result of the prolog-load (created
+ by this function) and the result of a load that takes place in the loop
+ (to be created by the caller to this function).
For the case of dr_explicit_realign_optimized:
The caller to this function uses the phi-result (msq) to create the
result = realign_load (msq, lsq, realignment_token);
Input:
- STMT - (scalar) load stmt to be vectorized. This load accesses
- a memory location that may be unaligned.
+ STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
+ a memory location that may be unaligned.
BSI - place where new code is to be inserted.
ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
is used.
Return value - the result of the loop-header phi node. */
tree
-vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
+vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree *realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
struct loop **at_loop)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
static void
vect_permute_load_chain (vec<tree> dr_chain,
unsigned int length,
- gimple *stmt,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree data_ref, first_vect, second_vect;
tree perm_mask_even, perm_mask_odd;
tree perm3_mask_low, perm3_mask_high;
static bool
vect_shift_permute_load_chain (vec<tree> dr_chain,
unsigned int length,
- gimple *stmt,
+ stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
tree perm2_mask1, perm2_mask2, perm3_mask;
tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
*/
void
-vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
- gimple_stmt_iterator *gsi)
+vect_transform_grouped_load (stmt_vec_info stmt_info, vec<tree> dr_chain,
+ int size, gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
machine_mode mode;
vec<tree> result_chain = vNULL;
}
/* RESULT_CHAIN contains the output of a group of grouped loads that were
- generated as part of the vectorization of STMT. Assign the statement
+ generated as part of the vectorization of STMT_INFO. Assign the statement
for each vector to the associated scalar statement. */
void
-vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
+vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
+ vec<tree> result_chain)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
unsigned int i, gap_count;
vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
}
-/* Transfer group and reduction information from STMT to its pattern stmt. */
+/* Transfer group and reduction information from STMT_INFO to its
+ pattern stmt. */
static void
-vect_fixup_reduc_chain (gimple *stmt)
+vect_fixup_reduc_chain (stmt_vec_info stmt_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
stmt_vec_info stmtp;
gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
/* Function get_initial_def_for_reduction
Input:
- STMT - a stmt that performs a reduction operation in the loop.
+ STMT_VINFO - a stmt that performs a reduction operation in the loop.
INIT_VAL - the initial value of the reduction variable
Output:
ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
of the reduction (used for adjusting the epilog - see below).
- Return a vector variable, initialized according to the operation that STMT
- performs. This vector will be used as the initial value of the
- vector of partial results.
+ Return a vector variable, initialized according to the operation that
+ STMT_VINFO performs. This vector will be used as the initial value
+ of the vector of partial results.
Option1 (adjust in epilog): Initialize the vector as follows:
add/bit or/xor: [0,0,...,0,0]
for (i=0;i<n;i++)
s = s + a[i];
- STMT is 's = s + a[i]', and the reduction variable is 's'.
+ STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
For a vector of 4 units, we want to return either [0,0,0,init_val],
or [0,0,0,0] and let the caller know that it needs to adjust
the result at the end by 'init_val'.
A cost model should help decide between these two schemes. */
tree
-get_initial_def_for_reduction (gimple *stmt, tree init_val,
+get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
tree *adjustment_def)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
reduction statements.
- STMT is the scalar reduction stmt that is being vectorized.
+ STMT_INFO is the scalar reduction stmt that is being vectorized.
NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
number of elements that we can fit in a vectype (nunits). In this case
we have to generate more than one vector stmt - i.e - we need to "unroll"
statement that is defined by REDUCTION_PHI.
DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
SLP_NODE is an SLP node containing a group of reduction statements. The
- first one in this group is STMT.
+ first one in this group is STMT_INFO.
INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
be smaller than any value of the IV in the loop, for MIN_EXPR larger than
loop:
vec_def = phi <null, null> # REDUCTION_PHI
- VECT_DEF = vector_stmt # vectorized form of STMT
- s_loop = scalar_stmt # (scalar) STMT
+ VECT_DEF = vector_stmt # vectorized form of STMT_INFO
+ s_loop = scalar_stmt # (scalar) STMT_INFO
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
use <s_out0>
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
- VECT_DEF = vector_stmt # vectorized form of STMT
- s_loop = scalar_stmt # (scalar) STMT
+ VECT_DEF = vector_stmt # vectorized form of STMT_INFO
+ s_loop = scalar_stmt # (scalar) STMT_INFO
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
*/
static void
-vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
+vect_create_epilog_for_reduction (vec<tree> vect_defs,
+ stmt_vec_info stmt_info,
gimple *reduc_def_stmt,
int ncopies, internal_fn reduc_fn,
vec<stmt_vec_info> reduction_phis,
tree induc_val, enum tree_code induc_code,
tree neutral_op)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info prev_phi_info;
tree vectype;
machine_mode mode;
return lhs;
}
-/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
+/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
statement that sets the live-out value. REDUC_DEF_STMT is the phi
- statement. CODE is the operation performed by STMT and OPS are
+ statement. CODE is the operation performed by STMT_INFO and OPS are
its scalar operands. REDUC_INDEX is the index of the operand in
OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
implements in-order reduction, or IFN_LAST if we should open-code it.
that should be used to control the operation in a fully-masked loop. */
static bool
-vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorize_fold_left_reduction (stmt_vec_info stmt_info,
+ gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
gimple *reduc_def_stmt,
tree_code code, internal_fn reduc_fn,
tree ops[3], tree vectype_in,
int reduc_index, vec_loop_masks *masks)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
/* Function is_nonwrapping_integer_induction.
- Check if STMT (which is part of loop LOOP) both increments and
+ Check if STMT_VINO (which is part of loop LOOP) both increments and
does not cause overflow. */
static bool
-is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
+is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
+ gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
- tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
+ tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
widest_int ni, max_loop_value, lhs_max;
wi::overflow_type overflow = wi::OVF_NONE;
/* Function vectorizable_reduction.
- Check if STMT performs a reduction operation that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT_INFO performs a reduction operation that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise.
+ Return true if STMT_INFO is vectorizable in this way.
This function also handles reduction idioms (patterns) that have been
- recognized in advance during vect_pattern_recog. In this case, STMT may be
- of this form:
+ recognized in advance during vect_pattern_recog. In this case, STMT_INFO
+ may be of this form:
X = pattern_expr (arg0, arg1, ..., X)
- and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
- sequence that had been detected and replaced by the pattern-stmt (STMT).
+ and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
+ sequence that had been detected and replaced by the pattern-stmt
+ (STMT_INFO).
This function also handles reduction of condition expressions, for example:
for (int i = 0; i < N; i++)
index into the vector of results.
In some cases of reduction patterns, the type of the reduction variable X is
- different than the type of the other arguments of STMT.
- In such cases, the vectype that is used when transforming STMT into a vector
- stmt is different than the vectype that is used to determine the
+ different than the type of the other arguments of STMT_INFO.
+ In such cases, the vectype that is used when transforming STMT_INFO into
+ a vector stmt is different than the vectype that is used to determine the
vectorization factor, because it consists of a different number of elements
than the actual number of elements that are being operated upon in parallel.
does *NOT* necessarily hold for reduction patterns. */
bool
-vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
slp_instance slp_node_instance,
stmt_vector_for_cost *cost_vec)
{
tree vec_dest;
tree scalar_dest;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
tree vectype_in = NULL_TREE;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
inside the loop body. The last operand is the reduction variable,
which is defined by the loop-header-phi. */
- gcc_assert (is_gimple_assign (stmt));
+ gassign *stmt = as_a <gassign *> (stmt_info->stmt);
/* Flatten RHS. */
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
/* Function vectorizable_induction
- Check if PHI performs an induction computation that can be vectorized.
+ Check if STMT_INFO performs an induction computation that can be vectorized.
If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
phi to replace it, put it in VEC_STMT, and add it to the same basic block.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Return true if STMT_INFO is vectorizable in this way. */
bool
-vectorizable_induction (gimple *phi,
+vectorizable_induction (stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (phi);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
edge latch_e;
tree loop_arg;
gimple_stmt_iterator si;
- basic_block bb = gimple_bb (phi);
- if (gimple_code (phi) != GIMPLE_PHI)
+ gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
+ if (!phi)
return false;
if (!STMT_VINFO_RELEVANT_P (stmt_info))
}
/* Find the first insertion point in the BB. */
+ basic_block bb = gimple_bb (phi);
si = gsi_after_labels (bb);
/* For SLP induction we have to generate several IVs as for example
/* Function vectorizable_live_operation.
- STMT computes a value that is used outside the loop. Check if
+ STMT_INFO computes a value that is used outside the loop. Check if
it can be supported. */
bool
-vectorizable_live_operation (gimple *stmt,
+vectorizable_live_operation (stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
slp_tree slp_node, int slp_index,
stmt_vec_info *vec_stmt,
stmt_vector_for_cost *)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
}
/* If stmt has a related stmt, then use that for getting the lhs. */
- if (is_pattern_stmt_p (stmt_info))
- stmt = STMT_VINFO_RELATED_STMT (stmt_info);
+ gimple *stmt = (is_pattern_stmt_p (stmt_info)
+ ? STMT_VINFO_RELATED_STMT (stmt_info)->stmt
+ : stmt_info->stmt);
lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
: gimple_get_lhs (stmt);
return true;
}
-/* Kill any debug uses outside LOOP of SSA names defined in STMT. */
+/* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
static void
-vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
+vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
{
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
def_operand_p def_p;
gimple *ustmt;
- FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
+ FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
{
FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
{
return NULL;
}
-/* Check whether NAME, an ssa-name used in USE_STMT,
+/* Check whether NAME, an ssa-name used in STMT_VINFO,
is a result of a type promotion, such that:
DEF_STMT: NAME = NOP (name0)
If CHECK_SIGN is TRUE, check that either both types are signed or both are
unsigned. */
static bool
-type_conversion_p (tree name, gimple *use_stmt, bool check_sign,
+type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign,
tree *orig_type, gimple **def_stmt, bool *promotion)
{
- stmt_vec_info stmt_vinfo;
tree type = TREE_TYPE (name);
tree oprnd0;
enum vect_def_type dt;
- stmt_vinfo = vinfo_for_stmt (use_stmt);
stmt_vec_info def_stmt_info;
if (!vect_is_simple_use (name, stmt_vinfo->vinfo, &dt, &def_stmt_info,
def_stmt))
}
/* Create pattern stmts for all stmts participating in the bool pattern
- specified by BOOL_STMT_SET and its root STMT with the desired type
+ specified by BOOL_STMT_SET and its root STMT_INFO with the desired type
OUT_TYPE. Return the def of the pattern root. */
static tree
adjust_bool_stmts (hash_set <gimple *> &bool_stmt_set,
- tree out_type, gimple *stmt)
+ tree out_type, stmt_vec_info stmt_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
/* Gather original stmts in the bool pattern in their order of appearance
in the IL. */
auto_vec<gimple *> bool_stmts (bool_stmt_set.elements ());
return pattern_stmt;
}
-/* STMT is a load or store. If the load or store is conditional, return
+/* STMT_INFO is a load or store. If the load or store is conditional, return
the boolean condition under which it occurs, otherwise return null. */
static tree
-vect_get_load_store_mask (gimple *stmt)
+vect_get_load_store_mask (stmt_vec_info stmt_info)
{
- if (gassign *def_assign = dyn_cast <gassign *> (stmt))
+ if (gassign *def_assign = dyn_cast <gassign *> (stmt_info->stmt))
{
gcc_assert (gimple_assign_single_p (def_assign));
return NULL_TREE;
}
- if (gcall *def_call = dyn_cast <gcall *> (stmt))
+ if (gcall *def_call = dyn_cast <gcall *> (stmt_info->stmt))
{
internal_fn ifn = gimple_call_internal_fn (def_call);
int mask_index = internal_fn_mask_index (ifn);
}
-/* Find the place of the data-ref in STMT in the interleaving chain that starts
- from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
+/* Find the place of the data-ref in STMT_INFO in the interleaving chain
+ that starts from FIRST_STMT_INFO. Return -1 if the data-ref is not a part
+ of the chain. */
int
-vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
+vect_get_place_in_interleaving_chain (stmt_vec_info stmt_info,
+ stmt_vec_info first_stmt_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- stmt_vec_info first_stmt_info = vinfo_for_stmt (first_stmt);
stmt_vec_info next_stmt_info = first_stmt_info;
int result = 0;
static bool
vect_analyze_slp_instance (vec_info *vinfo,
- gimple *stmt, unsigned max_tree_size)
+ stmt_vec_info stmt_info, unsigned max_tree_size)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
slp_instance new_instance;
slp_tree node;
unsigned int group_size;
/* Return 1 if vector type of boolean constant which is OPNUM
- operand in statement STMT is a boolean vector. */
+ operand in statement STMT_VINFO is a boolean vector. */
static bool
-vect_mask_constant_operand_p (gimple *stmt, int opnum)
+vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, int opnum)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
- enum tree_code code = gimple_expr_code (stmt);
+ enum tree_code code = gimple_expr_code (stmt_vinfo->stmt);
tree op, vectype;
enum vect_def_type dt;
on the other comparison operand. */
if (TREE_CODE_CLASS (code) == tcc_comparison)
{
+ gassign *stmt = as_a <gassign *> (stmt_vinfo->stmt);
if (opnum)
op = gimple_assign_rhs1 (stmt);
else
if (code == COND_EXPR)
{
+ gassign *stmt = as_a <gassign *> (stmt_vinfo->stmt);
tree cond = gimple_assign_rhs1 (stmt);
if (TREE_CODE (cond) == SSA_NAME)
/* Function vect_mark_relevant.
- Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
+ Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
static void
-vect_mark_relevant (vec<stmt_vec_info> *worklist, gimple *stmt,
+vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
enum vect_relevant relevant, bool live_p)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
save_relevant = STMT_VINFO_RELEVANT (stmt_info);
save_live_p = STMT_VINFO_LIVE_P (stmt_info);
- stmt = stmt_info->stmt;
}
STMT_VINFO_LIVE_P (stmt_info) |= live_p;
/* Function is_simple_and_all_uses_invariant
- Return true if STMT is simple and all uses of it are invariant. */
+ Return true if STMT_INFO is simple and all uses of it are invariant. */
bool
-is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
+is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
+ loop_vec_info loop_vinfo)
{
tree op;
ssa_op_iter iter;
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
/* Function exist_non_indexing_operands_for_use_p
- USE is one of the uses attached to STMT. Check if USE is
- used in STMT for anything other than indexing an array. */
+ USE is one of the uses attached to STMT_INFO. Check if USE is
+ used in STMT_INFO for anything other than indexing an array. */
static bool
-exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
+exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
{
tree operand;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* USE corresponds to some operand in STMT. If there is no data
reference in STMT, then any operand that corresponds to USE
Function process_use.
Inputs:
- - a USE in STMT in a loop represented by LOOP_VINFO
+ - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
- RELEVANT - enum value to be set in the STMT_VINFO of the stmt
that defined USE. This is done by calling mark_relevant and passing it
the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
Outputs:
Generally, LIVE_P and RELEVANT are used to define the liveness and
relevance info of the DEF_STMT of this USE:
- STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
- STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
+ STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
+ STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
Exceptions:
- case 1: If USE is used only for address computations (e.g. array indexing),
which does not need to be directly vectorized, then the liveness/relevance
of the respective DEF_STMT is left unchanged.
- - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
- skip DEF_STMT cause it had already been processed.
- - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
- be modified accordingly.
+ - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
+ we skip DEF_STMT cause it had already been processed.
+ - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
+ "relevant" will be modified accordingly.
Return true if everything is as expected. Return false otherwise. */
static bool
-process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
+process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
bool force)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
stmt_vec_info dstmt_vinfo;
basic_block bb, def_bb;
enum vect_def_type dt;
}
/* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
- the loop preheader for the vectorized stmt STMT. */
+ the loop preheader for the vectorized stmt STMT_VINFO. */
static void
-vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
+vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
+ gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
if (gsi)
vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
else
Place the initialization at BSI if it is not NULL. Otherwise, place the
initialization at the loop preheader.
Return the DEF of INIT_STMT.
- It will be used in the vectorization of STMT. */
+ It will be used in the vectorization of STMT_INFO. */
tree
-vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
+vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
+ gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gimple *init_stmt;
tree new_temp;
/* Function vect_get_vec_def_for_operand_1.
- For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
- DT that will be used in the vectorized stmt. */
+ For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
+ with type DT that will be used in the vectorized stmt. */
tree
-vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
+vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info,
+ enum vect_def_type dt)
{
tree vec_oprnd;
stmt_vec_info vec_stmt_info;
- stmt_vec_info def_stmt_info = NULL;
switch (dt)
{
case vect_internal_def:
{
/* Get the def from the vectorized stmt. */
- def_stmt_info = vinfo_for_stmt (def_stmt);
-
vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
/* Get vectorized pattern statement. */
if (!vec_stmt_info
case vect_nested_cycle:
case vect_induction_def:
{
- gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
+ gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI);
/* Get the def from the vectorized stmt. */
- def_stmt_info = vinfo_for_stmt (def_stmt);
vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
vec_oprnd = PHI_RESULT (phi);
/* Function vect_get_vec_def_for_operand.
- OP is an operand in STMT. This function returns a (vector) def that will be
- used in the vectorized stmt for STMT.
+ OP is an operand in STMT_VINFO. This function returns a (vector) def
+ that will be used in the vectorized stmt for STMT_VINFO.
In the case that OP is an SSA_NAME which is defined in the loop, then
STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
vector invariant. */
tree
-vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
+vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
{
gimple *def_stmt;
enum vect_def_type dt;
bool is_simple_use;
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
if (dump_enabled_p ())
/* Get vectorized definitions for OP0 and OP1. */
void
-vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
+vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
vec<tree> *vec_oprnds0,
vec<tree> *vec_oprnds1,
slp_tree slp_node)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (slp_node)
{
int nops = (op1 == NULL_TREE) ? 1 : 2;
statement and create and return a stmt_vec_info for it. */
static stmt_vec_info
-vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
+vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
return vec_stmt_info;
}
-/* Replace the scalar statement STMT with a new vector statement VEC_STMT,
- which sets the same scalar result as STMT did. Create and return a
+/* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
+ which sets the same scalar result as STMT_INFO did. Create and return a
stmt_vec_info for VEC_STMT. */
stmt_vec_info
-vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt)
+vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt));
gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt);
return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
}
-/* Add VEC_STMT to the vectorized implementation of STMT and insert it
+/* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
stmt_vec_info
-vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
+vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
if (!gsi_end_p (*gsi)
}
/* Determine whether we can use a gather load or scatter store to vectorize
- strided load or store STMT by truncating the current offset to a smaller
- width. We need to be able to construct an offset vector:
+ strided load or store STMT_INFO by truncating the current offset to a
+ smaller width. We need to be able to construct an offset vector:
{ 0, X, X*2, X*3, ... }
- without loss of precision, where X is STMT's DR_STEP.
+ without loss of precision, where X is STMT_INFO's DR_STEP.
Return true if this is possible, describing the gather load or scatter
store in GS_INFO. MASKED_P is true if the load or store is conditional. */
static bool
-vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
- bool masked_p,
+vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
+ loop_vec_info loop_vinfo, bool masked_p,
gather_scatter_info *gs_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree step = DR_STEP (dr);
if (TREE_CODE (step) != INTEGER_CST)
return true;
}
-/* STMT is a non-strided load or store, meaning that it accesses
+/* STMT_INFO is a non-strided load or store, meaning that it accesses
elements with a known constant step. Return -1 if that step
is negative, 0 if it is zero, and 1 if it is greater than zero. */
static int
-compare_step_with_zero (gimple *stmt)
+compare_step_with_zero (stmt_vec_info stmt_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
return tree_int_cst_compare (vect_dr_behavior (dr)->step,
size_zero_node);
return vect_gen_perm_mask_checked (vectype, indices);
}
-/* STMT is either a masked or unconditional store. Return the value
+/* STMT_INFO is either a masked or unconditional store. Return the value
being stored. */
tree
-vect_get_store_rhs (gimple *stmt)
+vect_get_store_rhs (stmt_vec_info stmt_info)
{
- if (gassign *assign = dyn_cast <gassign *> (stmt))
+ if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
{
gcc_assert (gimple_assign_single_p (assign));
return gimple_assign_rhs1 (assign);
}
- if (gcall *call = dyn_cast <gcall *> (stmt))
+ if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
{
internal_fn ifn = gimple_call_internal_fn (call);
int index = internal_fn_stored_value_index (ifn);
gcc_assert (index >= 0);
- return gimple_call_arg (stmt, index);
+ return gimple_call_arg (call, index);
}
gcc_unreachable ();
}
/* A subroutine of get_load_store_type, with a subset of the same
- arguments. Handle the case where STMT is part of a grouped load
+ arguments. Handle the case where STMT_INFO is part of a grouped load
or store.
For stores, the statements in the group are all consecutive
as well as at the end. */
static bool
-get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
+get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
bool masked_p, vec_load_store_type vls_type,
vect_memory_access_type *memory_access_type,
gather_scatter_info *gs_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
}
/* A subroutine of get_load_store_type, with a subset of the same
- arguments. Handle the case where STMT is a load or store that
+ arguments. Handle the case where STMT_INFO is a load or store that
accesses consecutive elements with a negative step. */
static vect_memory_access_type
-get_negative_load_store_type (gimple *stmt, tree vectype,
+get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
vec_load_store_type vls_type,
unsigned int ncopies)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
dr_alignment_support alignment_support_scheme;
return VMAT_CONTIGUOUS_REVERSE;
}
-/* Analyze load or store statement STMT of type VLS_TYPE. Return true
+/* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
if there is a memory access type that the vectorized form can use,
storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
or scatters, fill in GS_INFO accordingly.
NCOPIES is the number of vector statements that will be needed. */
static bool
-get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
- vec_load_store_type vls_type, unsigned int ncopies,
+get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
+ bool masked_p, vec_load_store_type vls_type,
+ unsigned int ncopies,
vect_memory_access_type *memory_access_type,
gather_scatter_info *gs_info)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
}
/* Return true if boolean argument MASK is suitable for vectorizing
- conditional load or store STMT. When returning true, store the type
+ conditional load or store STMT_INFO. When returning true, store the type
of the definition in *MASK_DT_OUT and the type of the vectorized mask
in *MASK_VECTYPE_OUT. */
static bool
-vect_check_load_store_mask (gimple *stmt, tree mask,
+vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask,
vect_def_type *mask_dt_out,
tree *mask_vectype_out)
{
return false;
}
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
enum vect_def_type mask_dt;
tree mask_vectype;
if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
}
/* Return true if stored value RHS is suitable for vectorizing store
- statement STMT. When returning true, store the type of the
+ statement STMT_INFO. When returning true, store the type of the
definition in *RHS_DT_OUT, the type of the vectorized store value in
*RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
static bool
-vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
- tree *rhs_vectype_out, vec_load_store_type *vls_type_out)
+vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
+ vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
+ vec_load_store_type *vls_type_out)
{
/* In the case this is a store from a constant make sure
native_encode_expr can handle it. */
return false;
}
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
enum vect_def_type rhs_dt;
tree rhs_vectype;
if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
return vect_init_vector (stmt_info, merge, vectype, NULL);
}
-/* Build a gather load call while vectorizing STMT. Insert new instructions
- before GSI and add them to VEC_STMT. GS_INFO describes the gather load
- operation. If the load is conditional, MASK is the unvectorized
- condition and MASK_DT is its definition type, otherwise MASK is null. */
+/* Build a gather load call while vectorizing STMT_INFO. Insert new
+ instructions before GSI and add them to VEC_STMT. GS_INFO describes
+ the gather load operation. If the load is conditional, MASK is the
+ unvectorized condition and MASK_DT is its definition type, otherwise
+ MASK is null. */
static void
-vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
+vect_build_gather_load_calls (stmt_vec_info stmt_info,
+ gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt,
- gather_scatter_info *gs_info, tree mask,
- vect_def_type mask_dt)
+ gather_scatter_info *gs_info,
+ tree mask, vect_def_type mask_dt)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
/* Prepare to implement a grouped or strided load or store using
the gather load or scatter store operation described by GS_INFO.
- STMT is the load or store statement.
+ STMT_INFO is the load or store statement.
Set *DATAREF_BUMP to the amount that should be added to the base
address after each copy of the vectorized statement. Set *VEC_OFFSET
I * DR_STEP / SCALE. */
static void
-vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo,
+vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
+ loop_vec_info loop_vinfo,
gather_scatter_info *gs_info,
tree *dataref_bump, tree *vec_offset)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
/* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
static bool
-vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
tree vectype_in, enum vect_def_type *dt,
stmt_vector_for_cost *cost_vec)
{
tree op, vectype;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ gcall *stmt = as_a <gcall *> (stmt_info->stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
unsigned ncopies;
unsigned HOST_WIDE_INT nunits, num_bytes;
/* Function vectorizable_call.
- Check if GS performs a function call that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT_INFO performs a function call that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
+vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree op;
tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
+ stmt_vec_info prev_stmt_info;
tree vectype_out, vectype_in;
poly_uint64 nunits_in;
poly_uint64 nunits_out;
/* Function vectorizable_simd_clone_call.
- Check if STMT performs a function call that can be vectorized
+ Check if STMT_INFO performs a function call that can be vectorized
by calling a simd clone of the function.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_simd_clone_call (stmt_vec_info stmt_info,
+ gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *)
{
tree scalar_dest;
tree op, type;
tree vec_oprnd0 = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
+ stmt_vec_info prev_stmt_info;
tree vectype;
unsigned int nunits;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
/* Is STMT a vectorizable call? */
- if (!is_gimple_call (stmt))
+ gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
+ if (!stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
static void
vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
- int multi_step_cvt, gimple *stmt,
+ int multi_step_cvt,
+ stmt_vec_info stmt_info,
vec<tree> vec_dsts,
gimple_stmt_iterator *gsi,
slp_tree slp_node, enum tree_code code,
{
unsigned int i;
tree vop0, vop1, new_tmp, vec_dest;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_dest = vec_dsts.pop ();
}
-/* Check if STMT performs a conversion operation, that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+/* Check if STMT_INFO performs a conversion operation that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree op0, op1 = NULL_TREE;
tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
&& ! vec_stmt)
return false;
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
/* Function vectorizable_assignment.
- Check if STMT performs an assignment (copy) that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT_INFO performs an assignment (copy) that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree vec_dest;
tree scalar_dest;
tree op;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
tree new_temp;
enum vect_def_type dt[1] = {vect_unknown_def_type};
return false;
/* Is vectorizable assignment? */
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
scalar_dest = gimple_assign_lhs (stmt);
/* Function vectorizable_shift.
- Check if STMT performs a shift operation that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT_INFO performs a shift operation that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree scalar_dest;
tree op0, op1 = NULL;
tree vec_oprnd1 = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum tree_code code;
return false;
/* Is STMT a vectorizable binary/unary operation? */
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
/* Function vectorizable_operation.
- Check if STMT performs a binary, unary or ternary operation that can
+ Check if STMT_INFO performs a binary, unary or ternary operation that can
be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree vec_dest;
tree scalar_dest;
tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum tree_code code, orig_code;
return false;
/* Is STMT a vectorizable binary/unary operation? */
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
/* Function get_group_alias_ptr_type.
- Return the alias type for the group starting at FIRST_STMT. */
+ Return the alias type for the group starting at FIRST_STMT_INFO. */
static tree
-get_group_alias_ptr_type (gimple *first_stmt)
+get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
{
- stmt_vec_info first_stmt_info = vinfo_for_stmt (first_stmt);
struct data_reference *first_dr, *next_dr;
first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
/* Function vectorizable_store.
- Check if STMT defines a non scalar data-ref (array/pointer/structure) that
- can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
+ that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
tree data_ref;
tree op;
tree vec_oprnd = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
tree elem_type;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
return data_ref;
}
-/* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
+/* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
inserting them on the loops preheader edge. Returns true if we
- were successful in doing so (and thus STMT can be moved then),
+ were successful in doing so (and thus STMT_INFO can be moved then),
otherwise returns false. */
static bool
-hoist_defs_of_uses (gimple *stmt, struct loop *loop)
+hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
{
ssa_op_iter i;
tree op;
bool any = false;
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (op);
if (!gimple_nop_p (def_stmt)
if (!any)
return true;
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (op);
if (!gimple_nop_p (def_stmt)
/* vectorizable_load.
- Check if STMT reads a non scalar data-ref (array/pointer/structure) that
- can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt to replace it, put it in VEC_STMT, and insert it at BSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
+ that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
+ stmt to replace it, put it in VEC_STMT, and insert it at GSI.
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, slp_tree slp_node,
slp_instance slp_node_instance,
stmt_vector_for_cost *cost_vec)
tree scalar_dest;
tree vec_dest = NULL;
tree data_ref = NULL;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info prev_stmt_info;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
- struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
+ struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
bool nested_in_vect_loop = false;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
tree elem_type;
&& !nested_in_vect_loop
&& hoist_defs_of_uses (stmt_info, loop))
{
+ gassign *stmt = as_a <gassign *> (stmt_info->stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
/* vectorizable_condition.
- Check if STMT is conditional modify expression that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT_INFO is conditional modify expression that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
at GSI.
- When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
- to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
- else clause if it is 2).
+ When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector
+ variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1,
+ and in else clause if it is 2).
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Return true if STMT_INFO is vectorizable in this way. */
bool
-vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, tree reduc_def,
int reduc_index, slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
tree vec_dest = NULL_TREE;
tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
tree then_clause, else_clause;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree comp_vectype = NULL_TREE;
tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
}
/* Is vectorizable conditional operation? */
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
code = gimple_assign_rhs_code (stmt);
/* vectorizable_comparison.
- Check if STMT is comparison expression that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT_INFO is comparison expression that can be vectorized.
+ If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
comparison, put it in VEC_STMT, and insert it at GSI.
- Return FALSE if not a vectorizable STMT, TRUE otherwise. */
+ Return true if STMT_INFO is vectorizable in this way. */
static bool
-vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
+vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info *vec_stmt, tree reduc_def,
slp_tree slp_node, stmt_vector_for_cost *cost_vec)
{
tree lhs, rhs1, rhs2;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
return false;
}
- if (!is_gimple_assign (stmt))
+ gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!stmt)
return false;
code = gimple_assign_rhs_code (stmt);
/* Make sure the statement is vectorizable. */
bool
-vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
- slp_instance node_instance, stmt_vector_for_cost *cost_vec)
+vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
+ slp_tree node, slp_instance node_instance,
+ stmt_vector_for_cost *cost_vec)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
|| STMT_VINFO_LIVE_P (pattern_stmt_info)))
{
/* Analyze PATTERN_STMT instead of the original stmt. */
- stmt = pattern_stmt_info->stmt;
stmt_info = pattern_stmt_info;
if (dump_enabled_p ())
{
/* Function vect_transform_stmt.
- Create a vectorized stmt to replace STMT, and insert it at BSI. */
+ Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
bool
-vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
+vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
bool *grouped_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
vec_info *vinfo = stmt_info->vinfo;
bool is_store = false;
stmt_vec_info vec_stmt = NULL;
(LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
stmt_info));
+ gimple *stmt = stmt_info->stmt;
switch (STMT_VINFO_TYPE (stmt_info))
{
case type_demotion_vec_info_type:
stmt_vec_info. */
void
-vect_remove_stores (gimple *first_stmt)
+vect_remove_stores (stmt_vec_info first_stmt_info)
{
- stmt_vec_info next_stmt_info = vinfo_for_stmt (first_stmt);
+ stmt_vec_info next_stmt_info = first_stmt_info;
gimple_stmt_iterator next_si;
while (next_stmt_info)
widening operation (short in the above example). */
bool
-supportable_widening_operation (enum tree_code code, gimple *stmt,
+supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
tree vectype_out, tree vectype_in,
enum tree_code *code1, enum tree_code *code2,
int *multi_step_cvt,
vec<tree> *interm_types)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *vect_loop = NULL;
machine_mode vec_mode;
return (loop_vec_info) loop->aux;
}
-static inline bool
-nested_in_vect_loop_p (struct loop *loop, gimple *stmt)
-{
- return (loop->inner
- && (loop->inner == (gimple_bb (stmt))->loop_father));
-}
-
typedef struct _bb_vec_info : public vec_info
{
_bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *);
}
}
+static inline bool
+nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info)
+{
+ return (loop->inner
+ && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
+}
+
/* Return the earlier statement between STMT1_INFO and STMT2_INFO. */
static inline stmt_vec_info
extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
tree *, stmt_vec_info * = NULL,
gimple ** = NULL);
-extern bool supportable_widening_operation (enum tree_code, gimple *, tree,
- tree, enum tree_code *,
+extern bool supportable_widening_operation (enum tree_code, stmt_vec_info,
+ tree, tree, enum tree_code *,
enum tree_code *, int *,
vec<tree> *);
extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
-extern stmt_vec_info vect_finish_replace_stmt (gimple *, gimple *);
-extern stmt_vec_info vect_finish_stmt_generation (gimple *, gimple *,
+extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *);
+extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *,
gimple_stmt_iterator *);
extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
-extern tree vect_get_store_rhs (gimple *);
-extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type);
-extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL);
-extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
+extern tree vect_get_store_rhs (stmt_vec_info);
+extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type);
+extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL);
+extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *,
vec<tree> *, slp_tree);
extern void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *,
vec<tree> *, vec<tree> *);
-extern tree vect_init_vector (gimple *, tree, tree,
+extern tree vect_init_vector (stmt_vec_info, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
-extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *,
+extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *,
bool *, slp_tree, slp_instance);
-extern void vect_remove_stores (gimple *);
-extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance,
+extern void vect_remove_stores (stmt_vec_info);
+extern bool vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, slp_instance,
stmt_vector_for_cost *);
-extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
+extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *,
stmt_vec_info *, tree, int, slp_tree,
stmt_vector_for_cost *);
extern void vect_get_load_cost (stmt_vec_info, int, bool,
extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
extern enum dr_alignment_support vect_supportable_dr_alignment
(struct data_reference *, bool);
-extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
+extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
HOST_WIDE_INT *);
extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
extern bool vect_slp_analyze_instance_dependence (slp_instance);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int,
signop, int, internal_fn *, tree *);
-extern bool vect_check_gather_scatter (gimple *, loop_vec_info,
+extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info,
gather_scatter_info *);
extern bool vect_find_stmt_data_reference (loop_p, gimple *,
vec<data_reference_p> *);
extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *);
extern void vect_record_base_alignments (vec_info *);
-extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
+extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool, bool *,
tree = NULL_TREE, tree = NULL_TREE);
-extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
- tree);
+extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *,
+ stmt_vec_info, tree);
extern void vect_copy_ref_info (tree, tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
-extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *,
+extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
gimple_stmt_iterator *, vec<tree> *);
-extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *,
- enum dr_alignment_support, tree,
+extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
+ tree *, enum dr_alignment_support, tree,
struct loop **);
-extern void vect_transform_grouped_load (gimple *, vec<tree> , int,
+extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
gimple_stmt_iterator *);
-extern void vect_record_grouped_load_vectors (gimple *, vec<tree> );
+extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
const char * = NULL);
-extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *,
+extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *,
tree, tree = NULL_TREE);
/* In tree-vect-loop.c. */
/* Drive for loop transformation stage. */
extern struct loop *vect_transform_loop (loop_vec_info);
extern loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *);
-extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
+extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
slp_tree, int, stmt_vec_info *,
stmt_vector_for_cost *);
-extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
+extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *,
stmt_vec_info *, slp_tree, slp_instance,
stmt_vector_for_cost *);
-extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *,
+extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *,
stmt_vec_info *, slp_tree,
stmt_vector_for_cost *);
-extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
+extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *);
extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
stmt_vector_for_cost *,
extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *);
extern bool vect_slp_bb (basic_block);
extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
-extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info);
+extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode,
unsigned int * = NULL,
tree * = NULL, tree * = NULL);
extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
unsigned int, vec<tree> &);
-extern int vect_get_place_in_interleaving_chain (gimple *, gimple *);
+extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
/* In tree-vect-patterns.c. */
/* Pattern recognition functions.