}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
+ if (is_gimple_debug (gsi_stmt (si)))
+ continue;
stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
gimple_stmt_iterator *);
/* Check whether a load or store statement in the loop described by
- LOOP_VINFO is possible in a fully-masked loop. This is testing
- whether the vectorizer pass has the appropriate support, as well as
- whether the target does.
+ LOOP_VINFO is possible in a loop using partial vectors. This is
+ testing whether the vectorizer pass has the appropriate support,
+ as well as whether the target does.
VLS_TYPE says whether the statement is a load or store and VECTYPE
is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
its arguments. If the load or store is conditional, SCALAR_MASK is the
condition under which it occurs.
- Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
- supported, otherwise record the required mask types. */
+ Clear LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P if a loop using partial
+ vectors is not supported, otherwise record the required rgroup control
+ types. */
static void
-check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
- vec_load_store_type vls_type, int group_size,
- vect_memory_access_type memory_access_type,
- gather_scatter_info *gs_info, tree scalar_mask)
+check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
+ vec_load_store_type vls_type,
+ int group_size,
+ vect_memory_access_type
+ memory_access_type,
+ gather_scatter_info *gs_info,
+ tree scalar_mask)
{
/* Invariant loads need no special support. */
if (memory_access_type == VMAT_INVARIANT)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't use a fully-masked loop because the"
- " target doesn't have an appropriate masked"
+ "can't operate on partial vectors because"
+ " the target doesn't have an appropriate"
" load/store-lanes instruction.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
return;
}
unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't use a fully-masked loop because the"
- " target doesn't have an appropriate masked"
+ "can't operate on partial vectors because"
+ " the target doesn't have an appropriate"
" gather load or scatter store instruction.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
return;
}
unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
scalar loop. We need more work to support other mappings. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't use a fully-masked loop because an access"
- " isn't contiguous.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ "can't operate on partial vectors because an"
+ " access isn't contiguous.\n");
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
return;
}
"can't use a fully-masked loop because the target"
" doesn't have the appropriate masked load or"
" store.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
return;
}
/* We might load more scalars than we need for permuting SLP loads.
should only change the active lanes of the reduction chain,
keeping the inactive lanes as-is. */
if (loop_vinfo
- && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
+ && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
&& reduc_idx >= 0)
{
if (cond_fn == IFN_LAST
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because no"
" conditional operation is available.\n");
- LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
}
else
vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
if (loop_vinfo
- && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
- memory_access_type, &gs_info, mask);
+ && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
+ check_load_store_for_partial_vectors (loop_vinfo, vectype, vls_type,
+ group_size, memory_access_type,
+ &gs_info, mask);
if (slp_node
&& !vect_maybe_update_slp_op_vectype (SLP_TREE_CHILDREN (slp_node)[0],
STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
if (loop_vinfo
- && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
- check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
- memory_access_type, &gs_info, mask);
+ && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
+ check_load_store_for_partial_vectors (loop_vinfo, vectype, VLS_LOAD,
+ group_size, memory_access_type,
+ &gs_info, mask);
STMT_VINFO_TYPE (orig_stmt_info) = load_vec_info_type;
vect_model_load_cost (vinfo, stmt_info, ncopies, vf, memory_access_type,
}
if (loop_vinfo
- && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
+ && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
&& reduction_type == EXTRACT_LAST_REDUCTION)
vect_record_loop_mask (loop_vinfo, &LOOP_VINFO_MASKS (loop_vinfo),
ncopies * vec_num, vectype, NULL);
FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
{
vec_then_clause = vec_oprnds2[i];
- vec_else_clause = vec_oprnds3[i];
+ if (reduction_type != EXTRACT_LAST_REDUCTION)
+ vec_else_clause = vec_oprnds3[i];
if (swap_cond_operands)
std::swap (vec_then_clause, vec_else_clause);