+2018-06-18 David Malcolm <dmalcolm@redhat.com>
+
+ * tree-vect-data-refs.c (vect_analyze_data_ref_dependences):
+ Replace dump_printf_loc call with DUMP_VECT_SCOPE.
+ (vect_slp_analyze_instance_dependence): Likewise.
+ (vect_enhance_data_refs_alignment): Likewise.
+ (vect_analyze_data_refs_alignment): Likewise.
+ (vect_slp_analyze_and_verify_instance_alignment
+ (vect_analyze_data_ref_accesses): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ (vect_analyze_data_refs): Likewise.
+ * tree-vect-loop-manip.c (vect_update_inits_of_drs): Likewise.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+ (vect_analyze_scalar_cycles_1): Likewise.
+ (vect_get_loop_niters): Likewise.
+ (vect_analyze_loop_form_1): Likewise.
+ (vect_update_vf_for_slp): Likewise.
+ (vect_analyze_loop_operations): Likewise.
+ (vect_analyze_loop): Likewise.
+ (vectorizable_induction): Likewise.
+ (vect_transform_loop): Likewise.
+ * tree-vect-patterns.c (vect_pattern_recog): Likewise.
+ * tree-vect-slp.c (vect_analyze_slp): Likewise.
+ (vect_make_slp_decision): Likewise.
+ (vect_detect_hybrid_slp): Likewise.
+ (vect_slp_analyze_operations): Likewise.
+ (vect_slp_bb): Likewise.
+ * tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
+ (vectorizable_bswap): Likewise.
+ (vectorizable_call): Likewise.
+ (vectorizable_simd_clone_call): Likewise.
+ (vectorizable_conversion): Likewise.
+ (vectorizable_assignment): Likewise.
+ (vectorizable_shift): Likewise.
+ (vectorizable_operation): Likewise.
+ * tree-vectorizer.h (DUMP_VECT_SCOPE): New macro.
+
2018-06-18 Martin Sebor <msebor@redhat.com>
PR tree-optimization/81384
unsigned int i;
struct data_dependence_relation *ddr;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_data_ref_dependences ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
LOOP_VINFO_DDRS (loop_vinfo)
.create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
bool
vect_slp_analyze_instance_dependence (slp_instance instance)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_slp_analyze_instance_dependence ===\n");
+ DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
/* The stores of this instance are at the root of the SLP tree. */
slp_tree store = SLP_INSTANCE_TREE (instance);
unsigned int mis, same_align_drs_max = 0;
hash_table<peel_info_hasher> peeling_htab (1);
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_enhance_data_refs_alignment ===\n");
+ DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
/* Reset data so we can safely be called multiple times. */
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
bool
vect_analyze_data_refs_alignment (loop_vec_info vinfo)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_data_refs_alignment ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
/* Mark groups of data references with same alignment using
data dependence information. */
bool
vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_slp_analyze_and_verify_instance_alignment ===\n");
+ DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
slp_tree node;
unsigned i;
vec<data_reference_p> datarefs = vinfo->datarefs;
struct data_reference *dr;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_data_ref_accesses ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
if (datarefs.is_empty ())
return true;
unsigned int i;
tree length_factor;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_prune_runtime_alias_test_list ===\n");
+ DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
/* Step values are irrelevant for aliasing if the number of vector
iterations is equal to the number of scalar iterations (which can
struct data_reference *dr;
tree scalar_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_data_refs ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_data_refs");
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
loop = LOOP_VINFO_LOOP (loop_vinfo);
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_update_inits_of_dr ===\n");
+ DUMP_VECT_SCOPE ("vect_update_inits_of_dr");
/* Adjust niters to sizetype and insert stmts on loop preheader edge. */
if (!types_compatible_p (sizetype, TREE_TYPE (niters)))
unsigned i;
auto_vec<stmt_vec_info> mask_producers;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_determine_vectorization_factor ===\n");
+ DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
for (i = 0; i < nbbs; i++)
{
gphi_iterator gsi;
bool double_reduc;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_scalar_cycles ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
/* First - identify all inductions. Reduction detection assumes that all the
inductions have been identified, therefore, this order must not be
*assumptions = boolean_true_node;
*number_of_iterationsm1 = chrec_dont_know;
*number_of_iterations = chrec_dont_know;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== get_loop_niters ===\n");
+ DUMP_VECT_SCOPE ("get_loop_niters");
if (!exit)
return cond;
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_loop_form ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_loop_form");
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
poly_uint64 vectorization_factor;
int i;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_update_vf_for_slp ===\n");
+ DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
bool need_to_vectorize = false;
bool ok;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_analyze_loop_operations ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
stmt_vector_for_cost cost_vec;
cost_vec.create (2);
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "===== analyze_loop_nest =====\n");
+ DUMP_VECT_SCOPE ("analyze_loop_nest");
if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_induction ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_induction");
vect_model_induction_cost (stmt_info, ncopies, cost_vec);
return true;
}
bool check_profitability = false;
unsigned int th;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
+ DUMP_VECT_SCOPE ("vec_transform_loop");
/* Use the more conservative vectorization threshold. If the number
of iterations is constant assume the cost check has been performed
{
slp_scheduled = true;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== scheduling SLP instances ===\n");
+ DUMP_VECT_SCOPE ("scheduling SLP instances");
vect_schedule_slp (loop_vinfo);
}
auto_vec<gimple *, 1> stmts_to_replace;
gimple *stmt;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_pattern_recog ===\n");
+ DUMP_VECT_SCOPE ("vect_pattern_recog");
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
{
unsigned int i;
gimple *first_element;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
+ DUMP_VECT_SCOPE ("vect_analyze_slp");
/* Find SLP sequences starting from groups of grouped stores. */
FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
slp_instance instance;
int decided_to_slp = 0;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
- "\n");
+ DUMP_VECT_SCOPE ("vect_make_slp_decision");
FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
- "\n");
+ DUMP_VECT_SCOPE ("vect_detect_hybrid_slp");
/* First walk all pattern stmt in the loop and mark defs of uses as
hybrid because immediate uses in them are not recorded. */
slp_instance instance;
int i;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_slp_analyze_operations ===\n");
+ DUMP_VECT_SCOPE ("vect_slp_analyze_operations");
scalar_stmts_to_slp_tree_map_t *visited
= new scalar_stmts_to_slp_tree_map_t ();
bool any_vectorized = false;
auto_vector_sizes vector_sizes;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
+ DUMP_VECT_SCOPE ("vect_slp_analyze_bb");
/* Autodetect first vector size we try. */
current_vector_size = 0;
bool live_p;
enum vect_relevant relevant;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vect_mark_stmts_to_be_vectorized ===\n");
+ DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
auto_vec<gimple *, 64> worklist;
if (! vec_stmt)
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
- "\n");
+ DUMP_VECT_SCOPE ("vectorizable_bswap");
if (! slp_node)
{
record_stmt_cost (cost_vec,
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
- "\n");
+ DUMP_VECT_SCOPE ("vectorizable_call");
vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
record_stmt_cost (cost_vec, ncopies / 2,
STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
}
STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_simd_clone_call ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
/* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
return true;
}
if (!vec_stmt) /* transformation not required. */
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_conversion ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_conversion");
if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
{
STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_assignment ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_assignment");
vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
return true;
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_shift ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_shift");
vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
return true;
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "=== vectorizable_operation ===\n");
+ DUMP_VECT_SCOPE ("vectorizable_operation");
vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
return true;
}
/* Source location */
extern source_location vect_location;
+/* If dumping is enabled, emit a MSG_NOTE at vect_location about
+ entering MSG within the vectorizer. MSG should be a string literal. */
+
+#define DUMP_VECT_SCOPE(MSG) \
+ do { \
+ if (dump_enabled_p ()) \
+ dump_printf_loc (MSG_NOTE, vect_location, \
+ "=== " MSG " ===\n"); \
+ } while (0)
+
/*-----------------------------------------------------------------*/
/* Function prototypes. */
/*-----------------------------------------------------------------*/