static bool
get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
- tree vectype, bool slp,
+ tree vectype, slp_tree slp_node,
bool masked_p, vec_load_store_type vls_type,
vect_memory_access_type *memory_access_type,
+ dr_alignment_support *alignment_support_scheme,
gather_scatter_info *gs_info)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0);
/* Stores can't yet have gaps. */
- gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
+ gcc_assert (slp_node || vls_type == VLS_LOAD || gap == 0);
- if (slp)
+ if (slp_node)
{
+ /* For SLP vectorization we directly vectorize a subchain
+ without permutation. */
+ if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
+ first_dr_info
+ = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]);
if (STMT_VINFO_STRIDED_P (first_stmt_info))
{
/* Try to use consecutive accesses of DR_GROUP_SIZE elements,
*memory_access_type = VMAT_GATHER_SCATTER;
}
+ if (*memory_access_type == VMAT_GATHER_SCATTER
+ || *memory_access_type == VMAT_ELEMENTWISE)
+ *alignment_support_scheme = dr_unaligned_supported;
+ else
+ *alignment_support_scheme
+ = vect_supportable_dr_alignment (vinfo, first_dr_info, false);
+
if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
{
/* STMT is the leader of the group. Check the operands of all the
/* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
if there is a memory access type that the vectorized form can use,
storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
- or scatters, fill in GS_INFO accordingly.
+ or scatters, fill in GS_INFO accordingly. In addition
+ *ALIGNMENT_SUPPORT_SCHEME is filled out and false is returned if
+ the target does not support the alignment scheme.
SLP says whether we're performing SLP rather than loop vectorization.
MASKED_P is true if the statement is conditional on a vectorized mask.
static bool
get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
- tree vectype, bool slp,
+ tree vectype, slp_tree slp_node,
bool masked_p, vec_load_store_type vls_type,
unsigned int ncopies,
vect_memory_access_type *memory_access_type,
+ dr_alignment_support *alignment_support_scheme,
gather_scatter_info *gs_info)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
vls_type == VLS_LOAD ? "gather" : "scatter");
return false;
}
+ /* Gather-scatter accesses perform only component accesses, alignment
+ is irrelevant for them. */
+ *alignment_support_scheme = dr_unaligned_supported;
}
else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp, masked_p,
- vls_type, memory_access_type, gs_info))
+ if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node,
+ masked_p,
+ vls_type, memory_access_type,
+ alignment_support_scheme, gs_info))
return false;
}
else if (STMT_VINFO_STRIDED_P (stmt_info))
{
- gcc_assert (!slp);
+ gcc_assert (!slp_node);
if (loop_vinfo
&& vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
masked_p, gs_info))
*memory_access_type = VMAT_GATHER_SCATTER;
else
*memory_access_type = VMAT_ELEMENTWISE;
+ /* Alignment is irrelevant here. */
+ *alignment_support_scheme = dr_unaligned_supported;
}
else
{
}
else
*memory_access_type = VMAT_CONTIGUOUS;
+ *alignment_support_scheme
+ = vect_supportable_dr_alignment (vinfo,
+ STMT_VINFO_DR_INFO (stmt_info), false);
}
if ((*memory_access_type == VMAT_ELEMENTWISE
return false;
}
+ if (*alignment_support_scheme == dr_unaligned_unsupported)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "unsupported unaligned access\n");
+ return false;
+ }
+
/* FIXME: At the moment the cost model seems to underestimate the
cost of using elementwise accesses. This check preserves the
traditional behavior until that can be fixed. */
class loop *loop = NULL;
machine_mode vec_mode;
tree dummy;
- enum dr_alignment_support alignment_support_scheme;
enum vect_def_type rhs_dt = vect_unknown_def_type;
enum vect_def_type mask_dt = vect_unknown_def_type;
tree dataref_ptr = NULL_TREE;
return false;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, vls_type,
- ncopies, &memory_access_type, &gs_info))
+ enum dr_alignment_support alignment_support_scheme;
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, vls_type,
+ ncopies, &memory_access_type,
+ &alignment_support_scheme, &gs_info))
return false;
if (mask)
tree new_temp;
machine_mode mode;
tree dummy;
- enum dr_alignment_support alignment_support_scheme;
tree dataref_ptr = NULL_TREE;
tree dataref_offset = NULL_TREE;
gimple *ptr_incr = NULL;
group_size = 1;
vect_memory_access_type memory_access_type;
- if (!get_load_store_type (vinfo, stmt_info, vectype, slp, mask, VLS_LOAD,
- ncopies, &memory_access_type, &gs_info))
+ enum dr_alignment_support alignment_support_scheme;
+ if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, VLS_LOAD,
+ ncopies, &memory_access_type,
+ &alignment_support_scheme, &gs_info))
return false;
if (mask)
ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
}
- /* Gather-scatter accesses perform only component accesses, alignment
- is irrelevant for them. */
- if (memory_access_type == VMAT_GATHER_SCATTER)
- alignment_support_scheme = dr_unaligned_supported;
- else
- alignment_support_scheme
- = vect_supportable_dr_alignment (vinfo, first_dr_info, false);
-
gcc_assert (alignment_support_scheme);
vec_loop_masks *loop_masks
= (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)