/* Initialize misalignment to unknown. */
SET_DR_MISALIGNMENT (dr, -1);
- /* Strided loads perform only component accesses, misalignment information
+ /* Strided accesses perform only component accesses, misalignment information
is irrelevant for them. */
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
return true;
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
- /* Strided loads perform only component accesses, alignment is
+ /* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
if (integer_zerop (DR_STEP (dr)))
continue;
- /* Strided loads perform only component accesses, alignment is
+ /* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
- /* Strided loads perform only component accesses, alignment is
+ /* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ if (STMT_VINFO_STRIDED_P (stmt_info))
{
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
/* Assume this is a DR handled by non-constant strided load case. */
if (TREE_CODE (step) != INTEGER_CST)
- return (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ return (STMT_VINFO_STRIDED_P (stmt_info)
&& (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
|| vect_analyze_group_access (dr)));
else if (loop_vinfo
&& TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
{
- if (nested_in_vect_loop_p (loop, stmt)
- || !DR_IS_READ (dr))
+ if (nested_in_vect_loop_p (loop, stmt))
{
if (dump_enabled_p ())
{
}
return false;
}
- STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
+ STMT_VINFO_STRIDED_P (stmt_info) = true;
}
}
}
/* Costs of the stores. */
- vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
+ if (STMT_VINFO_STRIDED_P (stmt_info))
+ {
+ /* N scalar stores plus extracting the elements. */
+ tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ inside_cost += record_stmt_cost (body_cost_vec,
+ ncopies * TYPE_VECTOR_SUBPARTS (vectype),
+ scalar_store, stmt_info, 0, vect_body);
+ inside_cost += record_stmt_cost (body_cost_vec,
+ ncopies * TYPE_VECTOR_SUBPARTS (vectype),
+ vec_to_scalar, stmt_info, 0, vect_body);
+ }
+ else
+ vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (!load_lanes_p && group_size > 1
- && !STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ && !STMT_VINFO_STRIDED_P (stmt_info))
{
/* Uses an even and odd extract operations or shuffle operations
for each needed permute. */
}
/* The loads themselves. */
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
+ if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* N scalar loads plus gathering them into a vector. */
|| group_size > 1 || slp_node),
&inside_cost, &prologue_cost,
prologue_cost_vec, body_cost_vec, true);
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ if (STMT_VINFO_STRIDED_P (stmt_info))
inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
stmt_info, 0, vect_body);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
- if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ if (STMT_VINFO_STRIDED_P (stmt_info))
return false;
if (STMT_VINFO_GATHER_P (stmt_info))
tree dataref_ptr = NULL_TREE;
tree dataref_offset = NULL_TREE;
gimple ptr_incr = NULL;
- int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies;
int j;
gimple next_stmt, first_stmt = NULL;
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
- negative =
- tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
- ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
- size_zero_node) < 0;
- if (negative && ncopies > 1)
+ if (!STMT_VINFO_STRIDED_P (stmt_info))
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "multiple types with negative step.\n");
- return false;
- }
-
- if (negative)
- {
- gcc_assert (!grouped_store);
- alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
- if (alignment_support_scheme != dr_aligned
- && alignment_support_scheme != dr_unaligned_supported)
+ negative =
+ tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
+ ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
+ size_zero_node) < 0;
+ if (negative && ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "negative step but alignment required.\n");
+ "multiple types with negative step.\n");
return false;
}
- if (dt != vect_constant_def
- && dt != vect_external_def
- && !perm_mask_for_reverse (vectype))
+ if (negative)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "negative step and reversing not supported.\n");
- return false;
+ gcc_assert (!grouped_store);
+ alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
+ if (alignment_support_scheme != dr_aligned
+ && alignment_support_scheme != dr_unaligned_supported)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "negative step but alignment required.\n");
+ return false;
+ }
+ if (dt != vect_constant_def
+ && dt != vect_external_def
+ && !perm_mask_for_reverse (vectype))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "negative step and reversing not supported.\n");
+ return false;
+ }
}
}
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d\n", ncopies);
+ if (STMT_VINFO_STRIDED_P (stmt_info))
+ {
+ gimple_stmt_iterator incr_gsi;
+ bool insert_after;
+ gimple incr;
+ tree offvar;
+ tree ivstep;
+ tree running_off;
+ gimple_seq stmts = NULL;
+ tree stride_base, stride_step, alias_off;
+ tree vec_oprnd;
+
+ gcc_assert (!nested_in_vect_loop_p (loop, stmt));
+
+ stride_base
+ = fold_build_pointer_plus
+ (unshare_expr (DR_BASE_ADDRESS (dr)),
+ size_binop (PLUS_EXPR,
+ convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
+ convert_to_ptrofftype (DR_INIT(dr))));
+ stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
+
+ /* For a store with loop-invariant (but other than power-of-2)
+ stride (i.e. not a grouped access) like so:
+
+ for (i = 0; i < n; i += stride)
+ array[i] = ...;
+
+ we generate a new induction variable and new stores from
+ the components of the (vectorized) rhs:
+
+ for (j = 0; ; j += VF*stride)
+ vectemp = ...;
+ tmp1 = vectemp[0];
+ array[j] = tmp1;
+ tmp2 = vectemp[1];
+ array[j + stride] = tmp2;
+ ...
+ */
+
+ ivstep = stride_step;
+ ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
+ build_int_cst (TREE_TYPE (ivstep),
+ ncopies * nunits));
+
+ standard_iv_increment_position (loop, &incr_gsi, &insert_after);
+
+ create_iv (stride_base, ivstep, NULL,
+ loop, &incr_gsi, insert_after,
+ &offvar, NULL);
+ incr = gsi_stmt (incr_gsi);
+ set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
+
+ stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
+
+ prev_stmt_info = NULL;
+ running_off = offvar;
+ alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
+ for (j = 0; j < ncopies; j++)
+ {
+ /* We've set op and dt above, from gimple_assign_rhs1(stmt),
+ and first_stmt == stmt. */
+ if (j == 0)
+ vec_oprnd = vect_get_vec_def_for_operand (op, first_stmt, NULL);
+ else
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
+
+ for (i = 0; i < nunits; i++)
+ {
+ tree newref, newoff;
+ gimple incr, assign;
+ tree size = TYPE_SIZE (elem_type);
+ /* Extract the i'th component. */
+ tree pos = fold_build2 (MULT_EXPR, bitsizetype, bitsize_int (i),
+ size);
+ tree elem = fold_build3 (BIT_FIELD_REF, elem_type, vec_oprnd,
+ size, pos);
+
+ elem = force_gimple_operand_gsi (gsi, elem, true,
+ NULL_TREE, true,
+ GSI_SAME_STMT);
+
+ newref = build2 (MEM_REF, TREE_TYPE (vectype),
+ running_off, alias_off);
+
+ /* And store it to *running_off. */
+ assign = gimple_build_assign (newref, elem);
+ vect_finish_stmt_generation (stmt, assign, gsi);
+
+ newoff = copy_ssa_name (running_off, NULL);
+ incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
+ running_off, stride_step);
+ vect_finish_stmt_generation (stmt, incr, gsi);
+
+ running_off = newoff;
+ if (j == 0 && i == i)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = assign;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
+ prev_stmt_info = vinfo_for_stmt (assign);
+ }
+ }
+ return true;
+ }
+
dr_chain.create (group_size);
oprnds.create (group_size);
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (!slp
&& !PURE_SLP_STMT (stmt_info)
- && !STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ && !STMT_VINFO_STRIDED_P (stmt_info))
{
if (vect_load_lanes_supported (vectype, group_size))
load_lanes_p = true;
return false;
}
}
- else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ else if (STMT_VINFO_STRIDED_P (stmt_info))
{
if ((grouped_load
&& (slp || PURE_SLP_STMT (stmt_info)))
}
return true;
}
- else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
+ else if (STMT_VINFO_STRIDED_P (stmt_info))
{
gimple_stmt_iterator incr_gsi;
bool insert_after;