From: Richard Sandiford Date: Tue, 31 Jul 2018 14:25:30 +0000 (+0000) Subject: [32/46] Use stmt_vec_info in function interfaces (part 2) X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=32e8e429c685629fc4363138f564f41de47aa7a2;p=gcc.git [32/46] Use stmt_vec_info in function interfaces (part 2) This second part handles the mechanical change from a gimple stmt argument to a stmt_vec_info argument. It updates the function comments if they referred to the argument by name, but it doesn't try to retrofit mentions to other functions. 2018-07-31 Richard Sandiford gcc/ * tree-vectorizer.h (nested_in_vect_loop_p): Move further down file and take a stmt_vec_info instead of a gimple stmt. (supportable_widening_operation, vect_finish_replace_stmt) (vect_finish_stmt_generation, vect_get_store_rhs) (vect_get_vec_def_for_operand_1, vect_get_vec_def_for_operand) (vect_get_vec_defs, vect_init_vector, vect_transform_stmt) (vect_remove_stores, vect_analyze_stmt, vectorizable_condition) (vect_get_smallest_scalar_type, vect_check_gather_scatter) (vect_create_data_ref_ptr, bump_vector_ptr) (vect_permute_store_chain, vect_setup_realignment) (vect_transform_grouped_load, vect_record_grouped_load_vectors) (vect_create_addr_base_for_vector_ref, vectorizable_live_operation) (vectorizable_reduction, vectorizable_induction) (get_initial_def_for_reduction, is_simple_and_all_uses_invariant) (vect_get_place_in_interleaving_chain): Take stmt_vec_infos rather than gimple stmts as arguments. * tree-vect-data-refs.c (vect_get_smallest_scalar_type) (vect_preserves_scalar_order_p, vect_slp_analyze_node_dependences) (can_group_stmts_p, vect_check_gather_scatter) (vect_create_addr_base_for_vector_ref, vect_create_data_ref_ptr) (bump_vector_ptr, vect_permute_store_chain, vect_setup_realignment) (vect_permute_load_chain, vect_shift_permute_load_chain) (vect_transform_grouped_load) (vect_record_grouped_load_vectors): Likewise. * tree-vect-loop.c (vect_fixup_reduc_chain) (get_initial_def_for_reduction, vect_create_epilog_for_reduction) (vectorize_fold_left_reduction, is_nonwrapping_integer_induction) (vectorizable_reduction, vectorizable_induction) (vectorizable_live_operation, vect_loop_kill_debug_uses): Likewise. * tree-vect-patterns.c (type_conversion_p, adjust_bool_stmts) (vect_get_load_store_mask): Likewise. * tree-vect-slp.c (vect_get_place_in_interleaving_chain) (vect_analyze_slp_instance, vect_mask_constant_operand_p): Likewise. * tree-vect-stmts.c (vect_mark_relevant) (is_simple_and_all_uses_invariant) (exist_non_indexing_operands_for_use_p, process_use) (vect_init_vector_1, vect_init_vector, vect_get_vec_def_for_operand_1) (vect_get_vec_def_for_operand, vect_get_vec_defs) (vect_finish_stmt_generation_1, vect_finish_replace_stmt) (vect_finish_stmt_generation, vect_truncate_gather_scatter_offset) (compare_step_with_zero, vect_get_store_rhs, get_group_load_store_type) (get_negative_load_store_type, get_load_store_type) (vect_check_load_store_mask, vect_check_store_rhs) (vect_build_gather_load_calls, vect_get_strided_load_store_ops) (vectorizable_bswap, vectorizable_call, vectorizable_simd_clone_call) (vect_create_vectorized_demotion_stmts, vectorizable_conversion) (vectorizable_assignment, vectorizable_shift, vectorizable_operation) (get_group_alias_ptr_type, vectorizable_store, hoist_defs_of_uses) (vectorizable_load, vectorizable_condition, vectorizable_comparison) (vect_analyze_stmt, vect_transform_stmt, vect_remove_stores) (supportable_widening_operation): Likewise. From-SVN: r263147 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 87bdc43d06f..ea675977431 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,57 @@ +2018-07-31 Richard Sandiford + + * tree-vectorizer.h (nested_in_vect_loop_p): Move further down + file and take a stmt_vec_info instead of a gimple stmt. + (supportable_widening_operation, vect_finish_replace_stmt) + (vect_finish_stmt_generation, vect_get_store_rhs) + (vect_get_vec_def_for_operand_1, vect_get_vec_def_for_operand) + (vect_get_vec_defs, vect_init_vector, vect_transform_stmt) + (vect_remove_stores, vect_analyze_stmt, vectorizable_condition) + (vect_get_smallest_scalar_type, vect_check_gather_scatter) + (vect_create_data_ref_ptr, bump_vector_ptr) + (vect_permute_store_chain, vect_setup_realignment) + (vect_transform_grouped_load, vect_record_grouped_load_vectors) + (vect_create_addr_base_for_vector_ref, vectorizable_live_operation) + (vectorizable_reduction, vectorizable_induction) + (get_initial_def_for_reduction, is_simple_and_all_uses_invariant) + (vect_get_place_in_interleaving_chain): Take stmt_vec_infos rather + than gimple stmts as arguments. + * tree-vect-data-refs.c (vect_get_smallest_scalar_type) + (vect_preserves_scalar_order_p, vect_slp_analyze_node_dependences) + (can_group_stmts_p, vect_check_gather_scatter) + (vect_create_addr_base_for_vector_ref, vect_create_data_ref_ptr) + (bump_vector_ptr, vect_permute_store_chain, vect_setup_realignment) + (vect_permute_load_chain, vect_shift_permute_load_chain) + (vect_transform_grouped_load) + (vect_record_grouped_load_vectors): Likewise. + * tree-vect-loop.c (vect_fixup_reduc_chain) + (get_initial_def_for_reduction, vect_create_epilog_for_reduction) + (vectorize_fold_left_reduction, is_nonwrapping_integer_induction) + (vectorizable_reduction, vectorizable_induction) + (vectorizable_live_operation, vect_loop_kill_debug_uses): Likewise. + * tree-vect-patterns.c (type_conversion_p, adjust_bool_stmts) + (vect_get_load_store_mask): Likewise. + * tree-vect-slp.c (vect_get_place_in_interleaving_chain) + (vect_analyze_slp_instance, vect_mask_constant_operand_p): Likewise. + * tree-vect-stmts.c (vect_mark_relevant) + (is_simple_and_all_uses_invariant) + (exist_non_indexing_operands_for_use_p, process_use) + (vect_init_vector_1, vect_init_vector, vect_get_vec_def_for_operand_1) + (vect_get_vec_def_for_operand, vect_get_vec_defs) + (vect_finish_stmt_generation_1, vect_finish_replace_stmt) + (vect_finish_stmt_generation, vect_truncate_gather_scatter_offset) + (compare_step_with_zero, vect_get_store_rhs, get_group_load_store_type) + (get_negative_load_store_type, get_load_store_type) + (vect_check_load_store_mask, vect_check_store_rhs) + (vect_build_gather_load_calls, vect_get_strided_load_store_ops) + (vectorizable_bswap, vectorizable_call, vectorizable_simd_clone_call) + (vect_create_vectorized_demotion_stmts, vectorizable_conversion) + (vectorizable_assignment, vectorizable_shift, vectorizable_operation) + (get_group_alias_ptr_type, vectorizable_store, hoist_defs_of_uses) + (vectorizable_load, vectorizable_condition, vectorizable_comparison) + (vect_analyze_stmt, vect_transform_stmt, vect_remove_stores) + (supportable_widening_operation): Likewise. + 2018-07-31 Richard Sandiford * tree-vect-data-refs.c (vect_describe_gather_scatter_call): Take diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 3d076037504..5a36d098e49 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -99,7 +99,7 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab, } -/* Return the smallest scalar part of STMT. +/* Return the smallest scalar part of STMT_INFO. This is used to determine the vectype of the stmt. We generally set the vectype according to the type of the result (lhs). For stmts whose result-type is different than the type of the arguments (e.g., demotion, @@ -117,10 +117,11 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab, types. */ tree -vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit, - HOST_WIDE_INT *rhs_size_unit) +vect_get_smallest_scalar_type (stmt_vec_info stmt_info, + HOST_WIDE_INT *lhs_size_unit, + HOST_WIDE_INT *rhs_size_unit) { - tree scalar_type = gimple_expr_type (stmt); + tree scalar_type = gimple_expr_type (stmt_info->stmt); HOST_WIDE_INT lhs, rhs; /* During the analysis phase, this function is called on arbitrary @@ -130,7 +131,7 @@ vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit, lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); - gassign *assign = dyn_cast (stmt); + gassign *assign = dyn_cast (stmt_info->stmt); if (assign && (gimple_assign_cast_p (assign) || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR @@ -191,16 +192,14 @@ vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value) LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value); } -/* Return true if we know that the order of vectorized STMT_A and - vectorized STMT_B will be the same as the order of STMT_A and STMT_B. - At least one of the statements is a write. */ +/* Return true if we know that the order of vectorized STMTINFO_A and + vectorized STMTINFO_B will be the same as the order of STMTINFO_A and + STMTINFO_B. At least one of the statements is a write. */ static bool -vect_preserves_scalar_order_p (gimple *stmt_a, gimple *stmt_b) +vect_preserves_scalar_order_p (stmt_vec_info stmtinfo_a, + stmt_vec_info stmtinfo_b) { - stmt_vec_info stmtinfo_a = vinfo_for_stmt (stmt_a); - stmt_vec_info stmtinfo_b = vinfo_for_stmt (stmt_b); - /* Single statements are always kept in their original order. */ if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a) && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)) @@ -666,7 +665,7 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) static bool vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, vec stores, - gimple *last_store) + stmt_vec_info last_store_info) { /* This walks over all stmts involved in the SLP load/store done in NODE verifying we can sink them up to the last stmt in the @@ -712,7 +711,7 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, been sunk to (and we verify if we can do that as well). */ if (gimple_visited_p (stmt)) { - if (stmt_info != last_store) + if (stmt_info != last_store_info) continue; unsigned i; stmt_vec_info store_info; @@ -2843,20 +2842,20 @@ strip_conversion (tree op) return gimple_assign_rhs1 (stmt); } -/* Return true if vectorizable_* routines can handle statements STMT1 - and STMT2 being in a single group. */ +/* Return true if vectorizable_* routines can handle statements STMT1_INFO + and STMT2_INFO being in a single group. */ static bool -can_group_stmts_p (gimple *stmt1, gimple *stmt2) +can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { - if (gimple_assign_single_p (stmt1)) - return gimple_assign_single_p (stmt2); + if (gimple_assign_single_p (stmt1_info->stmt)) + return gimple_assign_single_p (stmt2_info->stmt); - gcall *call1 = dyn_cast (stmt1); + gcall *call1 = dyn_cast (stmt1_info->stmt); if (call1 && gimple_call_internal_p (call1)) { /* Check for two masked loads or two masked stores. */ - gcall *call2 = dyn_cast (stmt2); + gcall *call2 = dyn_cast (stmt2_info->stmt); if (!call2 || !gimple_call_internal_p (call2)) return false; internal_fn ifn = gimple_call_internal_fn (call1); @@ -3643,17 +3642,16 @@ vect_describe_gather_scatter_call (stmt_vec_info stmt_info, info->memory_type = TREE_TYPE (DR_REF (dr)); } -/* Return true if a non-affine read or write in STMT is suitable for a +/* Return true if a non-affine read or write in STMT_INFO is suitable for a gather load or scatter store. Describe the operation in *INFO if so. */ bool -vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo, +vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, gather_scatter_info *info) { HOST_WIDE_INT scale = 1; poly_int64 pbitpos, pbitsize; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree offtype = NULL_TREE; tree decl = NULL_TREE, base, off; @@ -4473,7 +4471,7 @@ vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr) that will be accessed for a data reference. Input: - STMT: The statement containing the data reference. + STMT_INFO: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. LOOP: Specify relative to which loop-nest should the address be computed. @@ -4502,12 +4500,11 @@ vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr) FORNOW: We are only handling array accesses with step 1. */ tree -vect_create_addr_base_for_vector_ref (gimple *stmt, +vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info, gimple_seq *new_stmt_list, tree offset, tree byte_offset) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); const char *base_name; tree addr_base; @@ -4588,26 +4585,26 @@ vect_create_addr_base_for_vector_ref (gimple *stmt, /* Function vect_create_data_ref_ptr. Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first - location accessed in the loop by STMT, along with the def-use update + location accessed in the loop by STMT_INFO, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This pointer is used by the callers to this function to create a memory reference expression for vector load/store access. Input: - 1. STMT: a stmt that references memory. Expected to be of the form + 1. STMT_INFO: a stmt that references memory. Expected to be of the form GIMPLE_ASSIGN or GIMPLE_ASSIGN . 2. AGGR_TYPE: the type of the reference, which should be either a vector or an array. 3. AT_LOOP: the loop where the vector memref is to be created. 4. OFFSET (optional): an offset to be added to the initial address accessed - by the data-ref in STMT. + by the data-ref in STMT_INFO. 5. BSI: location where the new stmts are to be placed if there is no loop 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain pointing to the initial address. 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added - to the initial address accessed by the data-ref in STMT. This is + to the initial address accessed by the data-ref in STMT_INFO. This is similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET in bytes. 8. IV_STEP (optional, defaults to NULL): the amount that should be added @@ -4643,14 +4640,13 @@ vect_create_addr_base_for_vector_ref (gimple *stmt, 4. Return the pointer. */ tree -vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, - tree offset, tree *initial_address, - gimple_stmt_iterator *gsi, gimple **ptr_incr, - bool only_init, bool *inv_p, tree byte_offset, - tree iv_step) +vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, + struct loop *at_loop, tree offset, + tree *initial_address, gimple_stmt_iterator *gsi, + gimple **ptr_incr, bool only_init, bool *inv_p, + tree byte_offset, tree iv_step) { const char *base_name; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; bool nested_in_vect_loop = false; @@ -4905,7 +4901,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, the loop. The increment amount across iterations is expected to be vector_size. BSI - location where the new update stmt is to be placed. - STMT - the original scalar memory-access stmt that is being vectorized. + STMT_INFO - the original scalar memory-access stmt that is being vectorized. BUMP - optional. The offset by which to bump the pointer. If not given, the offset is assumed to be vector_size. @@ -4915,9 +4911,8 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, tree bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi, - gimple *stmt, tree bump) + stmt_vec_info stmt_info, tree bump) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree update = TYPE_SIZE_UNIT (vectype); @@ -5217,11 +5212,10 @@ vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count, void vect_permute_store_chain (vec dr_chain, unsigned int length, - gimple *stmt, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec *result_chain) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vect1, vect2, high, low; gimple *perm_stmt; tree vectype = STMT_VINFO_VECTYPE (stmt_info); @@ -5368,12 +5362,12 @@ vect_permute_store_chain (vec dr_chain, dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first - location accessed by STMT, and a "floor-aligned" load using that pointer. - It also generates code to compute the "realignment-token" (if the relevant - target hook was defined), and creates a phi-node at the loop-header bb - whose arguments are the result of the prolog-load (created by this - function) and the result of a load that takes place in the loop (to be - created by the caller to this function). + location accessed by STMT_INFO, and a "floor-aligned" load using that + pointer. It also generates code to compute the "realignment-token" + (if the relevant target hook was defined), and creates a phi-node at the + loop-header bb whose arguments are the result of the prolog-load (created + by this function) and the result of a load that takes place in the loop + (to be created by the caller to this function). For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the @@ -5392,8 +5386,8 @@ vect_permute_store_chain (vec dr_chain, result = realign_load (msq, lsq, realignment_token); Input: - STMT - (scalar) load stmt to be vectorized. This load accesses - a memory location that may be unaligned. + STMT_INFO - (scalar) load stmt to be vectorized. This load accesses + a memory location that may be unaligned. BSI - place where new code is to be inserted. ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes is used. @@ -5404,13 +5398,12 @@ vect_permute_store_chain (vec dr_chain, Return value - the result of the loop-header phi node. */ tree -vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi, +vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, tree *realignment_token, enum dr_alignment_support alignment_support_scheme, tree init_addr, struct loop **at_loop) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); @@ -5839,11 +5832,10 @@ vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count, static void vect_permute_load_chain (vec dr_chain, unsigned int length, - gimple *stmt, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec *result_chain) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree data_ref, first_vect, second_vect; tree perm_mask_even, perm_mask_odd; tree perm3_mask_low, perm3_mask_high; @@ -6043,11 +6035,10 @@ vect_permute_load_chain (vec dr_chain, static bool vect_shift_permute_load_chain (vec dr_chain, unsigned int length, - gimple *stmt, + stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, vec *result_chain) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vect[3], vect_shift[3], data_ref, first_vect, second_vect; tree perm2_mask1, perm2_mask2, perm3_mask; tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask; @@ -6311,10 +6302,9 @@ vect_shift_permute_load_chain (vec dr_chain, */ void -vect_transform_grouped_load (gimple *stmt, vec dr_chain, int size, - gimple_stmt_iterator *gsi) +vect_transform_grouped_load (stmt_vec_info stmt_info, vec dr_chain, + int size, gimple_stmt_iterator *gsi) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); machine_mode mode; vec result_chain = vNULL; @@ -6337,13 +6327,13 @@ vect_transform_grouped_load (gimple *stmt, vec dr_chain, int size, } /* RESULT_CHAIN contains the output of a group of grouped loads that were - generated as part of the vectorization of STMT. Assign the statement + generated as part of the vectorization of STMT_INFO. Assign the statement for each vector to the associated scalar statement. */ void -vect_record_grouped_load_vectors (gimple *stmt, vec result_chain) +vect_record_grouped_load_vectors (stmt_vec_info stmt_info, + vec result_chain) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); unsigned int i, gap_count; diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index da2a2eeb2bd..7fe8442e522 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -648,12 +648,12 @@ vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } -/* Transfer group and reduction information from STMT to its pattern stmt. */ +/* Transfer group and reduction information from STMT_INFO to its + pattern stmt. */ static void -vect_fixup_reduc_chain (gimple *stmt) +vect_fixup_reduc_chain (stmt_vec_info stmt_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info); stmt_vec_info stmtp; gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp) @@ -3998,15 +3998,15 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies, /* Function get_initial_def_for_reduction Input: - STMT - a stmt that performs a reduction operation in the loop. + STMT_VINFO - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). - Return a vector variable, initialized according to the operation that STMT - performs. This vector will be used as the initial value of the - vector of partial results. + Return a vector variable, initialized according to the operation that + STMT_VINFO performs. This vector will be used as the initial value + of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] @@ -4027,7 +4027,7 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies, for (i=0;i 1 in case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits). In this case we have to generate more than one vector stmt - i.e - we need to "unroll" @@ -4334,7 +4333,7 @@ get_initial_defs_for_reduction (slp_tree slp_node, statement that is defined by REDUCTION_PHI. DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. SLP_NODE is an SLP node containing a group of reduction statements. The - first one in this group is STMT. + first one in this group is STMT_INFO. INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to be smaller than any value of the IV in the loop, for MIN_EXPR larger than @@ -4359,8 +4358,8 @@ get_initial_defs_for_reduction (slp_tree slp_node, loop: vec_def = phi # REDUCTION_PHI - VECT_DEF = vector_stmt # vectorized form of STMT - s_loop = scalar_stmt # (scalar) STMT + VECT_DEF = vector_stmt # vectorized form of STMT_INFO + s_loop = scalar_stmt # (scalar) STMT_INFO loop_exit: s_out0 = phi # (scalar) EXIT_PHI use @@ -4370,8 +4369,8 @@ get_initial_defs_for_reduction (slp_tree slp_node, loop: vec_def = phi # REDUCTION_PHI - VECT_DEF = vector_stmt # vectorized form of STMT - s_loop = scalar_stmt # (scalar) STMT + VECT_DEF = vector_stmt # vectorized form of STMT_INFO + s_loop = scalar_stmt # (scalar) STMT_INFO loop_exit: s_out0 = phi # (scalar) EXIT_PHI v_out1 = phi # NEW_EXIT_PHI @@ -4383,7 +4382,8 @@ get_initial_defs_for_reduction (slp_tree slp_node, */ static void -vect_create_epilog_for_reduction (vec vect_defs, gimple *stmt, +vect_create_epilog_for_reduction (vec vect_defs, + stmt_vec_info stmt_info, gimple *reduc_def_stmt, int ncopies, internal_fn reduc_fn, vec reduction_phis, @@ -4393,7 +4393,6 @@ vect_create_epilog_for_reduction (vec vect_defs, gimple *stmt, tree induc_val, enum tree_code induc_code, tree neutral_op) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_phi_info; tree vectype; machine_mode mode; @@ -5816,9 +5815,9 @@ vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest, return lhs; } -/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the +/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the statement that sets the live-out value. REDUC_DEF_STMT is the phi - statement. CODE is the operation performed by STMT and OPS are + statement. CODE is the operation performed by STMT_INFO and OPS are its scalar operands. REDUC_INDEX is the index of the operand in OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that implements in-order reduction, or IFN_LAST if we should open-code it. @@ -5826,14 +5825,14 @@ vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest, that should be used to control the operation in a fully-masked loop. */ static bool -vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, +vectorize_fold_left_reduction (stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, gimple *reduc_def_stmt, tree_code code, internal_fn reduc_fn, tree ops[3], tree vectype_in, int reduc_index, vec_loop_masks *masks) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); @@ -5962,16 +5961,16 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, /* Function is_nonwrapping_integer_induction. - Check if STMT (which is part of loop LOOP) both increments and + Check if STMT_VINO (which is part of loop LOOP) both increments and does not cause overflow. */ static bool -is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) +is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop) { - stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + gphi *phi = as_a (stmt_vinfo->stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo); - tree lhs_type = TREE_TYPE (gimple_phi_result (stmt)); + tree lhs_type = TREE_TYPE (gimple_phi_result (phi)); widest_int ni, max_loop_value, lhs_max; wi::overflow_type overflow = wi::OVF_NONE; @@ -6004,17 +6003,18 @@ is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) /* Function vectorizable_reduction. - Check if STMT performs a reduction operation that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized + Check if STMT_INFO performs a reduction operation that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. + Return true if STMT_INFO is vectorizable in this way. This function also handles reduction idioms (patterns) that have been - recognized in advance during vect_pattern_recog. In this case, STMT may be - of this form: + recognized in advance during vect_pattern_recog. In this case, STMT_INFO + may be of this form: X = pattern_expr (arg0, arg1, ..., X) - and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original - sequence that had been detected and replaced by the pattern-stmt (STMT). + and its STMT_VINFO_RELATED_STMT points to the last stmt in the original + sequence that had been detected and replaced by the pattern-stmt + (STMT_INFO). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) @@ -6026,9 +6026,9 @@ is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is - different than the type of the other arguments of STMT. - In such cases, the vectype that is used when transforming STMT into a vector - stmt is different than the vectype that is used to determine the + different than the type of the other arguments of STMT_INFO. + In such cases, the vectype that is used when transforming STMT_INFO into + a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. @@ -6052,14 +6052,13 @@ is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) does *NOT* necessarily hold for reduction patterns. */ bool -vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, slp_instance slp_node_instance, stmt_vector_for_cost *cost_vec) { tree vec_dest; tree scalar_dest; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); tree vectype_in = NULL_TREE; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -6247,7 +6246,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ - gcc_assert (is_gimple_assign (stmt)); + gassign *stmt = as_a (stmt_info->stmt); /* Flatten RHS. */ switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) @@ -7240,18 +7239,17 @@ vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code) /* Function vectorizable_induction - Check if PHI performs an induction computation that can be vectorized. + Check if STMT_INFO performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Return true if STMT_INFO is vectorizable in this way. */ bool -vectorizable_induction (gimple *phi, +vectorizable_induction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { - stmt_vec_info stmt_info = vinfo_for_stmt (phi); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned ncopies; @@ -7276,9 +7274,9 @@ vectorizable_induction (gimple *phi, edge latch_e; tree loop_arg; gimple_stmt_iterator si; - basic_block bb = gimple_bb (phi); - if (gimple_code (phi) != GIMPLE_PHI) + gphi *phi = dyn_cast (stmt_info->stmt); + if (!phi) return false; if (!STMT_VINFO_RELEVANT_P (stmt_info)) @@ -7426,6 +7424,7 @@ vectorizable_induction (gimple *phi, } /* Find the first insertion point in the BB. */ + basic_block bb = gimple_bb (phi); si = gsi_after_labels (bb); /* For SLP induction we have to generate several IVs as for example @@ -7791,17 +7790,16 @@ vectorizable_induction (gimple *phi, /* Function vectorizable_live_operation. - STMT computes a value that is used outside the loop. Check if + STMT_INFO computes a value that is used outside the loop. Check if it can be supported. */ bool -vectorizable_live_operation (gimple *stmt, +vectorizable_live_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, slp_tree slp_node, int slp_index, stmt_vec_info *vec_stmt, stmt_vector_for_cost *) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; @@ -7908,8 +7906,9 @@ vectorizable_live_operation (gimple *stmt, } /* If stmt has a related stmt, then use that for getting the lhs. */ - if (is_pattern_stmt_p (stmt_info)) - stmt = STMT_VINFO_RELATED_STMT (stmt_info); + gimple *stmt = (is_pattern_stmt_p (stmt_info) + ? STMT_VINFO_RELATED_STMT (stmt_info)->stmt + : stmt_info->stmt); lhs = (is_a (stmt)) ? gimple_phi_result (stmt) : gimple_get_lhs (stmt); @@ -8010,17 +8009,17 @@ vectorizable_live_operation (gimple *stmt, return true; } -/* Kill any debug uses outside LOOP of SSA names defined in STMT. */ +/* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */ static void -vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt) +vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info) { ssa_op_iter op_iter; imm_use_iterator imm_iter; def_operand_p def_p; gimple *ustmt; - FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) + FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF) { FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) { diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index fa11c6ec684..72785c882a4 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -236,22 +236,20 @@ vect_get_internal_def (vec_info *vinfo, tree op) return NULL; } -/* Check whether NAME, an ssa-name used in USE_STMT, +/* Check whether NAME, an ssa-name used in STMT_VINFO, is a result of a type promotion, such that: DEF_STMT: NAME = NOP (name0) If CHECK_SIGN is TRUE, check that either both types are signed or both are unsigned. */ static bool -type_conversion_p (tree name, gimple *use_stmt, bool check_sign, +type_conversion_p (tree name, stmt_vec_info stmt_vinfo, bool check_sign, tree *orig_type, gimple **def_stmt, bool *promotion) { - stmt_vec_info stmt_vinfo; tree type = TREE_TYPE (name); tree oprnd0; enum vect_def_type dt; - stmt_vinfo = vinfo_for_stmt (use_stmt); stmt_vec_info def_stmt_info; if (!vect_is_simple_use (name, stmt_vinfo->vinfo, &dt, &def_stmt_info, def_stmt)) @@ -3498,15 +3496,13 @@ sort_after_uid (const void *p1, const void *p2) } /* Create pattern stmts for all stmts participating in the bool pattern - specified by BOOL_STMT_SET and its root STMT with the desired type + specified by BOOL_STMT_SET and its root STMT_INFO with the desired type OUT_TYPE. Return the def of the pattern root. */ static tree adjust_bool_stmts (hash_set &bool_stmt_set, - tree out_type, gimple *stmt) + tree out_type, stmt_vec_info stmt_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - /* Gather original stmts in the bool pattern in their order of appearance in the IL. */ auto_vec bool_stmts (bool_stmt_set.elements ()); @@ -4126,19 +4122,19 @@ vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out) return pattern_stmt; } -/* STMT is a load or store. If the load or store is conditional, return +/* STMT_INFO is a load or store. If the load or store is conditional, return the boolean condition under which it occurs, otherwise return null. */ static tree -vect_get_load_store_mask (gimple *stmt) +vect_get_load_store_mask (stmt_vec_info stmt_info) { - if (gassign *def_assign = dyn_cast (stmt)) + if (gassign *def_assign = dyn_cast (stmt_info->stmt)) { gcc_assert (gimple_assign_single_p (def_assign)); return NULL_TREE; } - if (gcall *def_call = dyn_cast (stmt)) + if (gcall *def_call = dyn_cast (stmt_info->stmt)) { internal_fn ifn = gimple_call_internal_fn (def_call); int mask_index = internal_fn_mask_index (ifn); diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index c36d130804b..d3c44bca406 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -195,14 +195,14 @@ vect_free_oprnd_info (vec &oprnds_info) } -/* Find the place of the data-ref in STMT in the interleaving chain that starts - from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */ +/* Find the place of the data-ref in STMT_INFO in the interleaving chain + that starts from FIRST_STMT_INFO. Return -1 if the data-ref is not a part + of the chain. */ int -vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt) +vect_get_place_in_interleaving_chain (stmt_vec_info stmt_info, + stmt_vec_info first_stmt_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - stmt_vec_info first_stmt_info = vinfo_for_stmt (first_stmt); stmt_vec_info next_stmt_info = first_stmt_info; int result = 0; @@ -1918,9 +1918,8 @@ calculate_unrolling_factor (poly_uint64 nunits, unsigned int group_size) static bool vect_analyze_slp_instance (vec_info *vinfo, - gimple *stmt, unsigned max_tree_size) + stmt_vec_info stmt_info, unsigned max_tree_size) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); slp_instance new_instance; slp_tree node; unsigned int group_size; @@ -3118,13 +3117,12 @@ vect_slp_bb (basic_block bb) /* Return 1 if vector type of boolean constant which is OPNUM - operand in statement STMT is a boolean vector. */ + operand in statement STMT_VINFO is a boolean vector. */ static bool -vect_mask_constant_operand_p (gimple *stmt, int opnum) +vect_mask_constant_operand_p (stmt_vec_info stmt_vinfo, int opnum) { - stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); - enum tree_code code = gimple_expr_code (stmt); + enum tree_code code = gimple_expr_code (stmt_vinfo->stmt); tree op, vectype; enum vect_def_type dt; @@ -3132,6 +3130,7 @@ vect_mask_constant_operand_p (gimple *stmt, int opnum) on the other comparison operand. */ if (TREE_CODE_CLASS (code) == tcc_comparison) { + gassign *stmt = as_a (stmt_vinfo->stmt); if (opnum) op = gimple_assign_rhs1 (stmt); else @@ -3145,6 +3144,7 @@ vect_mask_constant_operand_p (gimple *stmt, int opnum) if (code == COND_EXPR) { + gassign *stmt = as_a (stmt_vinfo->stmt); tree cond = gimple_assign_rhs1 (stmt); if (TREE_CODE (cond) == SSA_NAME) diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 5353e721a0e..cc3c36ed429 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -192,13 +192,12 @@ vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Function vect_mark_relevant. - Mark STMT as "relevant for vectorization" and add it to WORKLIST. */ + Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */ static void -vect_mark_relevant (vec *worklist, gimple *stmt, +vect_mark_relevant (vec *worklist, stmt_vec_info stmt_info, enum vect_relevant relevant, bool live_p) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info); bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); @@ -229,7 +228,6 @@ vect_mark_relevant (vec *worklist, gimple *stmt, gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info); save_relevant = STMT_VINFO_RELEVANT (stmt_info); save_live_p = STMT_VINFO_LIVE_P (stmt_info); - stmt = stmt_info->stmt; } STMT_VINFO_LIVE_P (stmt_info) |= live_p; @@ -251,15 +249,17 @@ vect_mark_relevant (vec *worklist, gimple *stmt, /* Function is_simple_and_all_uses_invariant - Return true if STMT is simple and all uses of it are invariant. */ + Return true if STMT_INFO is simple and all uses of it are invariant. */ bool -is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo) +is_simple_and_all_uses_invariant (stmt_vec_info stmt_info, + loop_vec_info loop_vinfo) { tree op; ssa_op_iter iter; - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) @@ -361,14 +361,13 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, /* Function exist_non_indexing_operands_for_use_p - USE is one of the uses attached to STMT. Check if USE is - used in STMT for anything other than indexing an array. */ + USE is one of the uses attached to STMT_INFO. Check if USE is + used in STMT_INFO for anything other than indexing an array. */ static bool -exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) +exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info) { tree operand; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); /* USE corresponds to some operand in STMT. If there is no data reference in STMT, then any operand that corresponds to USE @@ -428,7 +427,7 @@ exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) Function process_use. Inputs: - - a USE in STMT in a loop represented by LOOP_VINFO + - a USE in STMT_VINFO in a loop represented by LOOP_VINFO - RELEVANT - enum value to be set in the STMT_VINFO of the stmt that defined USE. This is done by calling mark_relevant and passing it the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant). @@ -438,25 +437,24 @@ exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) Outputs: Generally, LIVE_P and RELEVANT are used to define the liveness and relevance info of the DEF_STMT of this USE: - STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p - STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant + STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p + STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant Exceptions: - case 1: If USE is used only for address computations (e.g. array indexing), which does not need to be directly vectorized, then the liveness/relevance of the respective DEF_STMT is left unchanged. - - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we - skip DEF_STMT cause it had already been processed. - - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will - be modified accordingly. + - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt, + we skip DEF_STMT cause it had already been processed. + - case 3: If DEF_STMT and STMT_VINFO are in different nests, then + "relevant" will be modified accordingly. Return true if everything is as expected. Return false otherwise. */ static bool -process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, +process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo, enum vect_relevant relevant, vec *worklist, bool force) { - stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); stmt_vec_info dstmt_vinfo; basic_block bb, def_bb; enum vect_def_type dt; @@ -1342,12 +1340,12 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, } /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in - the loop preheader for the vectorized stmt STMT. */ + the loop preheader for the vectorized stmt STMT_VINFO. */ static void -vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi) +vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, + gimple_stmt_iterator *gsi) { - stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); if (gsi) vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi); else @@ -1396,12 +1394,12 @@ vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi) Place the initialization at BSI if it is not NULL. Otherwise, place the initialization at the loop preheader. Return the DEF of INIT_STMT. - It will be used in the vectorization of STMT. */ + It will be used in the vectorization of STMT_INFO. */ tree -vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi) +vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, + gimple_stmt_iterator *gsi) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); gimple *init_stmt; tree new_temp; @@ -1456,15 +1454,15 @@ vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi) /* Function vect_get_vec_def_for_operand_1. - For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type - DT that will be used in the vectorized stmt. */ + For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def + with type DT that will be used in the vectorized stmt. */ tree -vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) +vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info, + enum vect_def_type dt) { tree vec_oprnd; stmt_vec_info vec_stmt_info; - stmt_vec_info def_stmt_info = NULL; switch (dt) { @@ -1478,8 +1476,6 @@ vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) case vect_internal_def: { /* Get the def from the vectorized stmt. */ - def_stmt_info = vinfo_for_stmt (def_stmt); - vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); /* Get vectorized pattern statement. */ if (!vec_stmt_info @@ -1501,10 +1497,9 @@ vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) case vect_nested_cycle: case vect_induction_def: { - gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI); + gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI); /* Get the def from the vectorized stmt. */ - def_stmt_info = vinfo_for_stmt (def_stmt); vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); if (gphi *phi = dyn_cast (vec_stmt_info->stmt)) vec_oprnd = PHI_RESULT (phi); @@ -1521,8 +1516,8 @@ vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) /* Function vect_get_vec_def_for_operand. - OP is an operand in STMT. This function returns a (vector) def that will be - used in the vectorized stmt for STMT. + OP is an operand in STMT_VINFO. This function returns a (vector) def + that will be used in the vectorized stmt for STMT_VINFO. In the case that OP is an SSA_NAME which is defined in the loop, then STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def. @@ -1532,12 +1527,11 @@ vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt) vector invariant. */ tree -vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype) +vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) { gimple *def_stmt; enum vect_def_type dt; bool is_simple_use; - stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); if (dump_enabled_p ()) @@ -1683,12 +1677,11 @@ vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, /* Get vectorized definitions for OP0 and OP1. */ void -vect_get_vec_defs (tree op0, tree op1, gimple *stmt, +vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, vec *vec_oprnds0, vec *vec_oprnds1, slp_tree slp_node) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (slp_node) { int nops = (op1 == NULL_TREE) ? 1 : 2; @@ -1727,9 +1720,8 @@ vect_get_vec_defs (tree op0, tree op1, gimple *stmt, statement and create and return a stmt_vec_info for it. */ static stmt_vec_info -vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt) +vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt); @@ -1752,14 +1744,13 @@ vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt) return vec_stmt_info; } -/* Replace the scalar statement STMT with a new vector statement VEC_STMT, - which sets the same scalar result as STMT did. Create and return a +/* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT, + which sets the same scalar result as STMT_INFO did. Create and return a stmt_vec_info for VEC_STMT. */ stmt_vec_info -vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt) +vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt)); gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt); @@ -1768,14 +1759,13 @@ vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt) return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); } -/* Add VEC_STMT to the vectorized implementation of STMT and insert it +/* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it before *GSI. Create and return a stmt_vec_info for VEC_STMT. */ stmt_vec_info -vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt, +vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt, gimple_stmt_iterator *gsi) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL); if (!gsi_end_p (*gsi) @@ -1976,22 +1966,21 @@ prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask, } /* Determine whether we can use a gather load or scatter store to vectorize - strided load or store STMT by truncating the current offset to a smaller - width. We need to be able to construct an offset vector: + strided load or store STMT_INFO by truncating the current offset to a + smaller width. We need to be able to construct an offset vector: { 0, X, X*2, X*3, ... } - without loss of precision, where X is STMT's DR_STEP. + without loss of precision, where X is STMT_INFO's DR_STEP. Return true if this is possible, describing the gather load or scatter store in GS_INFO. MASKED_P is true if the load or store is conditional. */ static bool -vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo, - bool masked_p, +vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info, + loop_vec_info loop_vinfo, bool masked_p, gather_scatter_info *gs_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree step = DR_STEP (dr); if (TREE_CODE (step) != INTEGER_CST) @@ -2112,14 +2101,13 @@ vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info, return true; } -/* STMT is a non-strided load or store, meaning that it accesses +/* STMT_INFO is a non-strided load or store, meaning that it accesses elements with a known constant step. Return -1 if that step is negative, 0 if it is zero, and 1 if it is greater than zero. */ static int -compare_step_with_zero (gimple *stmt) +compare_step_with_zero (stmt_vec_info stmt_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); return tree_int_cst_compare (vect_dr_behavior (dr)->step, size_zero_node); @@ -2144,29 +2132,29 @@ perm_mask_for_reverse (tree vectype) return vect_gen_perm_mask_checked (vectype, indices); } -/* STMT is either a masked or unconditional store. Return the value +/* STMT_INFO is either a masked or unconditional store. Return the value being stored. */ tree -vect_get_store_rhs (gimple *stmt) +vect_get_store_rhs (stmt_vec_info stmt_info) { - if (gassign *assign = dyn_cast (stmt)) + if (gassign *assign = dyn_cast (stmt_info->stmt)) { gcc_assert (gimple_assign_single_p (assign)); return gimple_assign_rhs1 (assign); } - if (gcall *call = dyn_cast (stmt)) + if (gcall *call = dyn_cast (stmt_info->stmt)) { internal_fn ifn = gimple_call_internal_fn (call); int index = internal_fn_stored_value_index (ifn); gcc_assert (index >= 0); - return gimple_call_arg (stmt, index); + return gimple_call_arg (call, index); } gcc_unreachable (); } /* A subroutine of get_load_store_type, with a subset of the same - arguments. Handle the case where STMT is part of a grouped load + arguments. Handle the case where STMT_INFO is part of a grouped load or store. For stores, the statements in the group are all consecutive @@ -2175,12 +2163,11 @@ vect_get_store_rhs (gimple *stmt) as well as at the end. */ static bool -get_group_load_store_type (gimple *stmt, tree vectype, bool slp, +get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, bool masked_p, vec_load_store_type vls_type, vect_memory_access_type *memory_access_type, gather_scatter_info *gs_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; @@ -2350,15 +2337,14 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, } /* A subroutine of get_load_store_type, with a subset of the same - arguments. Handle the case where STMT is a load or store that + arguments. Handle the case where STMT_INFO is a load or store that accesses consecutive elements with a negative step. */ static vect_memory_access_type -get_negative_load_store_type (gimple *stmt, tree vectype, +get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype, vec_load_store_type vls_type, unsigned int ncopies) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); dr_alignment_support alignment_support_scheme; @@ -2400,7 +2386,7 @@ get_negative_load_store_type (gimple *stmt, tree vectype, return VMAT_CONTIGUOUS_REVERSE; } -/* Analyze load or store statement STMT of type VLS_TYPE. Return true +/* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true if there is a memory access type that the vectorized form can use, storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers or scatters, fill in GS_INFO accordingly. @@ -2411,12 +2397,12 @@ get_negative_load_store_type (gimple *stmt, tree vectype, NCOPIES is the number of vector statements that will be needed. */ static bool -get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, - vec_load_store_type vls_type, unsigned int ncopies, +get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, + bool masked_p, vec_load_store_type vls_type, + unsigned int ncopies, vect_memory_access_type *memory_access_type, gather_scatter_info *gs_info) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); @@ -2496,12 +2482,12 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, } /* Return true if boolean argument MASK is suitable for vectorizing - conditional load or store STMT. When returning true, store the type + conditional load or store STMT_INFO. When returning true, store the type of the definition in *MASK_DT_OUT and the type of the vectorized mask in *MASK_VECTYPE_OUT. */ static bool -vect_check_load_store_mask (gimple *stmt, tree mask, +vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask, vect_def_type *mask_dt_out, tree *mask_vectype_out) { @@ -2521,7 +2507,6 @@ vect_check_load_store_mask (gimple *stmt, tree mask, return false; } - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); enum vect_def_type mask_dt; tree mask_vectype; if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype)) @@ -2566,13 +2551,14 @@ vect_check_load_store_mask (gimple *stmt, tree mask, } /* Return true if stored value RHS is suitable for vectorizing store - statement STMT. When returning true, store the type of the + statement STMT_INFO. When returning true, store the type of the definition in *RHS_DT_OUT, the type of the vectorized store value in *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */ static bool -vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out, - tree *rhs_vectype_out, vec_load_store_type *vls_type_out) +vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, + vect_def_type *rhs_dt_out, tree *rhs_vectype_out, + vec_load_store_type *vls_type_out) { /* In the case this is a store from a constant make sure native_encode_expr can handle it. */ @@ -2584,7 +2570,6 @@ vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out, return false; } - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); enum vect_def_type rhs_dt; tree rhs_vectype; if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype)) @@ -2666,18 +2651,19 @@ vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) return vect_init_vector (stmt_info, merge, vectype, NULL); } -/* Build a gather load call while vectorizing STMT. Insert new instructions - before GSI and add them to VEC_STMT. GS_INFO describes the gather load - operation. If the load is conditional, MASK is the unvectorized - condition and MASK_DT is its definition type, otherwise MASK is null. */ +/* Build a gather load call while vectorizing STMT_INFO. Insert new + instructions before GSI and add them to VEC_STMT. GS_INFO describes + the gather load operation. If the load is conditional, MASK is the + unvectorized condition and MASK_DT is its definition type, otherwise + MASK is null. */ static void -vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, +vect_build_gather_load_calls (stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, - gather_scatter_info *gs_info, tree mask, - vect_def_type mask_dt) + gather_scatter_info *gs_info, + tree mask, vect_def_type mask_dt) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_info); @@ -2897,7 +2883,7 @@ vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info, /* Prepare to implement a grouped or strided load or store using the gather load or scatter store operation described by GS_INFO. - STMT is the load or store statement. + STMT_INFO is the load or store statement. Set *DATAREF_BUMP to the amount that should be added to the base address after each copy of the vectorized statement. Set *VEC_OFFSET @@ -2905,11 +2891,11 @@ vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info, I * DR_STEP / SCALE. */ static void -vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo, +vect_get_strided_load_store_ops (stmt_vec_info stmt_info, + loop_vec_info loop_vinfo, gather_scatter_info *gs_info, tree *dataref_bump, tree *vec_offset) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_info); @@ -2963,13 +2949,13 @@ vect_get_data_ptr_increment (data_reference *dr, tree aggr_type, /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ static bool -vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, tree vectype_in, enum vect_def_type *dt, stmt_vector_for_cost *cost_vec) { tree op, vectype; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); + gcall *stmt = as_a (stmt_info->stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); unsigned ncopies; unsigned HOST_WIDE_INT nunits, num_bytes; @@ -3103,13 +3089,13 @@ simple_integer_narrowing (tree vectype_out, tree vectype_in, /* Function vectorizable_call. - Check if GS performs a function call that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Check if STMT_INFO performs a function call that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, +vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -3118,7 +3104,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, tree scalar_dest; tree op; tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info; + stmt_vec_info prev_stmt_info; tree vectype_out, vectype_in; poly_uint64 nunits_in; poly_uint64 nunits_out; @@ -3747,14 +3733,15 @@ simd_clone_subparts (tree vectype) /* Function vectorizable_simd_clone_call. - Check if STMT performs a function call that can be vectorized + Check if STMT_INFO performs a function call that can be vectorized by calling a simd clone of the function. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_simd_clone_call (stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *) { @@ -3762,7 +3749,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest; tree op, type; tree vec_oprnd0 = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info; + stmt_vec_info prev_stmt_info; tree vectype; unsigned int nunits; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -3778,7 +3765,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, vec *ret_ctor_elts = NULL; /* Is STMT a vectorizable call? */ - if (!is_gimple_call (stmt)) + gcall *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; fndecl = gimple_call_fndecl (stmt); @@ -4487,7 +4475,8 @@ vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, static void vect_create_vectorized_demotion_stmts (vec *vec_oprnds, - int multi_step_cvt, gimple *stmt, + int multi_step_cvt, + stmt_vec_info stmt_info, vec vec_dsts, gimple_stmt_iterator *gsi, slp_tree slp_node, enum tree_code code, @@ -4495,7 +4484,6 @@ vect_create_vectorized_demotion_stmts (vec *vec_oprnds, { unsigned int i; tree vop0, vop1, new_tmp, vec_dest; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_dest = vec_dsts.pop (); @@ -4606,13 +4594,13 @@ vect_create_vectorized_promotion_stmts (vec *vec_oprnds0, } -/* Check if STMT performs a conversion operation, that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized +/* Check if STMT_INFO performs a conversion operation that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -4620,7 +4608,6 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest; tree op0, op1 = NULL_TREE; tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; @@ -4655,7 +4642,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, && ! vec_stmt) return false; - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) @@ -5220,20 +5208,19 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, /* Function vectorizable_assignment. - Check if STMT performs an assignment (copy) that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Check if STMT_INFO performs an assignment (copy) that can be vectorized. + If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { tree vec_dest; tree scalar_dest; tree op; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree new_temp; enum vect_def_type dt[1] = {vect_unknown_def_type}; @@ -5256,7 +5243,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, return false; /* Is vectorizable assignment? */ - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; scalar_dest = gimple_assign_lhs (stmt); @@ -5422,13 +5410,13 @@ vect_supportable_shift (enum tree_code code, tree scalar_type) /* Function vectorizable_shift. - Check if STMT performs a shift operation that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Check if STMT_INFO performs a shift operation that can be vectorized. + If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { @@ -5436,7 +5424,6 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest; tree op0, op1 = NULL; tree vec_oprnd1 = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); enum tree_code code; @@ -5470,7 +5457,8 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, return false; /* Is STMT a vectorizable binary/unary operation? */ - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) @@ -5789,21 +5777,20 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, /* Function vectorizable_operation. - Check if STMT performs a binary, unary or ternary operation that can + Check if STMT_INFO performs a binary, unary or ternary operation that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { tree vec_dest; tree scalar_dest; tree op0, op1 = NULL_TREE, op2 = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); enum tree_code code, orig_code; @@ -5836,7 +5823,8 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, return false; /* Is STMT a vectorizable binary/unary operation? */ - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) @@ -6215,12 +6203,11 @@ ensure_base_align (struct data_reference *dr) /* Function get_group_alias_ptr_type. - Return the alias type for the group starting at FIRST_STMT. */ + Return the alias type for the group starting at FIRST_STMT_INFO. */ static tree -get_group_alias_ptr_type (gimple *first_stmt) +get_group_alias_ptr_type (stmt_vec_info first_stmt_info) { - stmt_vec_info first_stmt_info = vinfo_for_stmt (first_stmt); struct data_reference *first_dr, *next_dr; first_dr = STMT_VINFO_DATA_REF (first_stmt_info); @@ -6244,21 +6231,20 @@ get_group_alias_ptr_type (gimple *first_stmt) /* Function vectorizable_store. - Check if STMT defines a non scalar data-ref (array/pointer/structure) that - can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure) + that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { tree data_ref; tree op; tree vec_oprnd = NULL_TREE; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; tree elem_type; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -7350,19 +7336,19 @@ permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, return data_ref; } -/* Hoist the definitions of all SSA uses on STMT out of the loop LOOP, +/* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP, inserting them on the loops preheader edge. Returns true if we - were successful in doing so (and thus STMT can be moved then), + were successful in doing so (and thus STMT_INFO can be moved then), otherwise returns false. */ static bool -hoist_defs_of_uses (gimple *stmt, struct loop *loop) +hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop) { ssa_op_iter i; tree op; bool any = false; - FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) + FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) { gimple *def_stmt = SSA_NAME_DEF_STMT (op); if (!gimple_nop_p (def_stmt) @@ -7390,7 +7376,7 @@ hoist_defs_of_uses (gimple *stmt, struct loop *loop) if (!any) return true; - FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) + FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) { gimple *def_stmt = SSA_NAME_DEF_STMT (op); if (!gimple_nop_p (def_stmt) @@ -7407,14 +7393,14 @@ hoist_defs_of_uses (gimple *stmt, struct loop *loop) /* vectorizable_load. - Check if STMT reads a non scalar data-ref (array/pointer/structure) that - can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized - stmt to replace it, put it in VEC_STMT, and insert it at BSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure) + that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized + stmt to replace it, put it in VEC_STMT, and insert it at GSI. + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, slp_tree slp_node, slp_instance slp_node_instance, stmt_vector_for_cost *cost_vec) @@ -7422,11 +7408,10 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest; tree vec_dest = NULL; tree data_ref = NULL; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; - struct loop *containing_loop = (gimple_bb (stmt))->loop_father; + struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father; bool nested_in_vect_loop = false; struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL; tree elem_type; @@ -8532,6 +8517,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, && !nested_in_vect_loop && hoist_defs_of_uses (stmt_info, loop)) { + gassign *stmt = as_a (stmt_info->stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, @@ -8730,19 +8716,19 @@ vect_is_simple_cond (tree cond, vec_info *vinfo, /* vectorizable_condition. - Check if STMT is conditional modify expression that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized + Check if STMT_INFO is conditional modify expression that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it at GSI. - When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable - to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in - else clause if it is 2). + When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector + variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, + and in else clause if it is 2). - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Return true if STMT_INFO is vectorizable in this way. */ bool -vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, tree reduc_def, int reduc_index, slp_tree slp_node, stmt_vector_for_cost *cost_vec) @@ -8751,7 +8737,6 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, tree vec_dest = NULL_TREE; tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; tree then_clause, else_clause; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree comp_vectype = NULL_TREE; tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE; tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; @@ -8800,7 +8785,8 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, } /* Is vectorizable conditional operation? */ - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; code = gimple_assign_rhs_code (stmt); @@ -9138,19 +9124,18 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, /* vectorizable_comparison. - Check if STMT is comparison expression that can be vectorized. - If VEC_STMT is also passed, vectorize the STMT: create a vectorized + Check if STMT_INFO is comparison expression that can be vectorized. + If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized comparison, put it in VEC_STMT, and insert it at GSI. - Return FALSE if not a vectorizable STMT, TRUE otherwise. */ + Return true if STMT_INFO is vectorizable in this way. */ static bool -vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, +vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, stmt_vec_info *vec_stmt, tree reduc_def, slp_tree slp_node, stmt_vector_for_cost *cost_vec) { tree lhs, rhs1, rhs2; - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; @@ -9197,7 +9182,8 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, return false; } - if (!is_gimple_assign (stmt)) + gassign *stmt = dyn_cast (stmt_info->stmt); + if (!stmt) return false; code = gimple_assign_rhs_code (stmt); @@ -9446,10 +9432,10 @@ can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, /* Make sure the statement is vectorizable. */ bool -vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, - slp_instance node_instance, stmt_vector_for_cost *cost_vec) +vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, + slp_tree node, slp_instance node_instance, + stmt_vector_for_cost *cost_vec) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); @@ -9525,7 +9511,6 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, || STMT_VINFO_LIVE_P (pattern_stmt_info))) { /* Analyze PATTERN_STMT instead of the original stmt. */ - stmt = pattern_stmt_info->stmt; stmt_info = pattern_stmt_info; if (dump_enabled_p ()) { @@ -9682,14 +9667,13 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, /* Function vect_transform_stmt. - Create a vectorized stmt to replace STMT, and insert it at BSI. */ + Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */ bool -vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, +vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, bool *grouped_store, slp_tree slp_node, slp_instance slp_node_instance) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); vec_info *vinfo = stmt_info->vinfo; bool is_store = false; stmt_vec_info vec_stmt = NULL; @@ -9703,6 +9687,7 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)), stmt_info)); + gimple *stmt = stmt_info->stmt; switch (STMT_VINFO_TYPE (stmt_info)) { case type_demotion_vec_info_type: @@ -9861,9 +9846,9 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, stmt_vec_info. */ void -vect_remove_stores (gimple *first_stmt) +vect_remove_stores (stmt_vec_info first_stmt_info) { - stmt_vec_info next_stmt_info = vinfo_for_stmt (first_stmt); + stmt_vec_info next_stmt_info = first_stmt_info; gimple_stmt_iterator next_si; while (next_stmt_info) @@ -10329,13 +10314,12 @@ vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, widening operation (short in the above example). */ bool -supportable_widening_operation (enum tree_code code, gimple *stmt, +supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info, tree vectype_out, tree vectype_in, enum tree_code *code1, enum tree_code *code2, int *multi_step_cvt, vec *interm_types) { - stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *vect_loop = NULL; machine_mode vec_mode; diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 69c7f6831b7..8a0ca0a4925 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -627,13 +627,6 @@ loop_vec_info_for_loop (struct loop *loop) return (loop_vec_info) loop->aux; } -static inline bool -nested_in_vect_loop_p (struct loop *loop, gimple *stmt) -{ - return (loop->inner - && (loop->inner == (gimple_bb (stmt))->loop_father)); -} - typedef struct _bb_vec_info : public vec_info { _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); @@ -1119,6 +1112,13 @@ set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info) } } +static inline bool +nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info) +{ + return (loop->inner + && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father)); +} + /* Return the earlier statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info @@ -1493,8 +1493,8 @@ extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); -extern bool supportable_widening_operation (enum tree_code, gimple *, tree, - tree, enum tree_code *, +extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, + tree, tree, enum tree_code *, enum tree_code *, int *, vec *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, @@ -1505,26 +1505,26 @@ extern void free_stmt_vec_info (gimple *stmt); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); -extern stmt_vec_info vect_finish_replace_stmt (gimple *, gimple *); -extern stmt_vec_info vect_finish_stmt_generation (gimple *, gimple *, +extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); +extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, gimple_stmt_iterator *); extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info); -extern tree vect_get_store_rhs (gimple *); -extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type); -extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL); -extern void vect_get_vec_defs (tree, tree, gimple *, vec *, +extern tree vect_get_store_rhs (stmt_vec_info); +extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); +extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); +extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec *, vec *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *, vec *, vec *); -extern tree vect_init_vector (gimple *, tree, tree, +extern tree vect_init_vector (stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree); -extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *, +extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, bool *, slp_tree, slp_instance); -extern void vect_remove_stores (gimple *); -extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance, +extern void vect_remove_stores (stmt_vec_info); +extern bool vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); -extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *, +extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, tree, int, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost (stmt_vec_info, int, bool, @@ -1546,7 +1546,7 @@ extern tree vect_get_mask_type_for_stmt (stmt_vec_info); extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int); extern enum dr_alignment_support vect_supportable_dr_alignment (struct data_reference *, bool); -extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *, +extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); @@ -1558,36 +1558,36 @@ extern bool vect_analyze_data_ref_accesses (vec_info *); extern bool vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); -extern bool vect_check_gather_scatter (gimple *, loop_vec_info, +extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *); extern bool vect_find_stmt_data_reference (loop_p, gimple *, vec *); extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *); extern void vect_record_base_alignments (vec_info *); -extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree, +extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, bool *, tree = NULL_TREE, tree = NULL_TREE); -extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *, - tree); +extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, + stmt_vec_info, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); -extern void vect_permute_store_chain (vec ,unsigned int, gimple *, +extern void vect_permute_store_chain (vec ,unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec *); -extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *, - enum dr_alignment_support, tree, +extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, + tree *, enum dr_alignment_support, tree, struct loop **); -extern void vect_transform_grouped_load (gimple *, vec , int, +extern void vect_transform_grouped_load (stmt_vec_info, vec , int, gimple_stmt_iterator *); -extern void vect_record_grouped_load_vectors (gimple *, vec ); +extern void vect_record_grouped_load_vectors (stmt_vec_info, vec); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); -extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *, +extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ @@ -1613,16 +1613,16 @@ extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop (loop_vec_info); extern loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *); -extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *, +extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); -extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *, +extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); -extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, +extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); -extern tree get_initial_def_for_reduction (gimple *, tree, tree *); +extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, @@ -1643,13 +1643,13 @@ extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec , slp_tree, vec > *); extern bool vect_slp_bb (basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); -extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info); +extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, unsigned int * = NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave (gimple_seq *, tree, vec, unsigned int, vec &); -extern int vect_get_place_in_interleaving_chain (gimple *, gimple *); +extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* Pattern recognition functions.