Introduce DUMP_VECT_SCOPE macro
authorDavid Malcolm <dmalcolm@redhat.com>
Mon, 18 Jun 2018 18:49:40 +0000 (18:49 +0000)
committerDavid Malcolm <dmalcolm@gcc.gnu.org>
Mon, 18 Jun 2018 18:49:40 +0000 (18:49 +0000)
gcc/ChangeLog:
* tree-vect-data-refs.c (vect_analyze_data_ref_dependences):
Replace dump_printf_loc call with DUMP_VECT_SCOPE.
(vect_slp_analyze_instance_dependence): Likewise.
(vect_enhance_data_refs_alignment): Likewise.
(vect_analyze_data_refs_alignment): Likewise.
(vect_slp_analyze_and_verify_instance_alignment
(vect_analyze_data_ref_accesses): Likewise.
(vect_prune_runtime_alias_test_list): Likewise.
(vect_analyze_data_refs): Likewise.
* tree-vect-loop-manip.c (vect_update_inits_of_drs): Likewise.
* tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
(vect_analyze_scalar_cycles_1): Likewise.
(vect_get_loop_niters): Likewise.
(vect_analyze_loop_form_1): Likewise.
(vect_update_vf_for_slp): Likewise.
(vect_analyze_loop_operations): Likewise.
(vect_analyze_loop): Likewise.
(vectorizable_induction): Likewise.
(vect_transform_loop): Likewise.
* tree-vect-patterns.c (vect_pattern_recog): Likewise.
* tree-vect-slp.c (vect_analyze_slp): Likewise.
(vect_make_slp_decision): Likewise.
(vect_detect_hybrid_slp): Likewise.
(vect_slp_analyze_operations): Likewise.
(vect_slp_bb): Likewise.
* tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
(vectorizable_bswap): Likewise.
(vectorizable_call): Likewise.
(vectorizable_simd_clone_call): Likewise.
(vectorizable_conversion): Likewise.
(vectorizable_assignment): Likewise.
(vectorizable_shift): Likewise.
(vectorizable_operation): Likewise.
* tree-vectorizer.h (DUMP_VECT_SCOPE): New macro.

From-SVN: r261710

gcc/ChangeLog
gcc/tree-vect-data-refs.c
gcc/tree-vect-loop-manip.c
gcc/tree-vect-loop.c
gcc/tree-vect-patterns.c
gcc/tree-vect-slp.c
gcc/tree-vect-stmts.c
gcc/tree-vectorizer.h

index b3449654b1e08c0dca22dfd307be5fa01a7161cf..914e075afce1b136541e028a8b8e9a15d920e7d5 100644 (file)
@@ -1,3 +1,40 @@
+2018-06-18  David Malcolm  <dmalcolm@redhat.com>
+
+       * tree-vect-data-refs.c (vect_analyze_data_ref_dependences):
+       Replace dump_printf_loc call with DUMP_VECT_SCOPE.
+       (vect_slp_analyze_instance_dependence): Likewise.
+       (vect_enhance_data_refs_alignment): Likewise.
+       (vect_analyze_data_refs_alignment): Likewise.
+       (vect_slp_analyze_and_verify_instance_alignment
+       (vect_analyze_data_ref_accesses): Likewise.
+       (vect_prune_runtime_alias_test_list): Likewise.
+       (vect_analyze_data_refs): Likewise.
+       * tree-vect-loop-manip.c (vect_update_inits_of_drs): Likewise.
+       * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+       (vect_analyze_scalar_cycles_1): Likewise.
+       (vect_get_loop_niters): Likewise.
+       (vect_analyze_loop_form_1): Likewise.
+       (vect_update_vf_for_slp): Likewise.
+       (vect_analyze_loop_operations): Likewise.
+       (vect_analyze_loop): Likewise.
+       (vectorizable_induction): Likewise.
+       (vect_transform_loop): Likewise.
+       * tree-vect-patterns.c (vect_pattern_recog): Likewise.
+       * tree-vect-slp.c (vect_analyze_slp): Likewise.
+       (vect_make_slp_decision): Likewise.
+       (vect_detect_hybrid_slp): Likewise.
+       (vect_slp_analyze_operations): Likewise.
+       (vect_slp_bb): Likewise.
+       * tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
+       (vectorizable_bswap): Likewise.
+       (vectorizable_call): Likewise.
+       (vectorizable_simd_clone_call): Likewise.
+       (vectorizable_conversion): Likewise.
+       (vectorizable_assignment): Likewise.
+       (vectorizable_shift): Likewise.
+       (vectorizable_operation): Likewise.
+       * tree-vectorizer.h (DUMP_VECT_SCOPE): New macro.
+
 2018-06-18  Martin Sebor  <msebor@redhat.com>
 
        PR tree-optimization/81384
index 3eb67c93dff07e18f6ea98805f5a06920385c960..3e66e25e9cf56b42cfbae4f0ff3bcc6da63fd0f3 100644 (file)
@@ -562,9 +562,7 @@ vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
   unsigned int i;
   struct data_dependence_relation *ddr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_dependences ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
 
   LOOP_VINFO_DDRS (loop_vinfo)
     .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
@@ -741,9 +739,7 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
 bool
 vect_slp_analyze_instance_dependence (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_instance_dependence ===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
 
   /* The stores of this instance are at the root of the SLP tree.  */
   slp_tree store = SLP_INSTANCE_TREE (instance);
@@ -1685,9 +1681,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
   unsigned int mis, same_align_drs_max = 0;
   hash_table<peel_info_hasher> peeling_htab (1);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_enhance_data_refs_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
 
   /* Reset data so we can safely be called multiple times.  */
   LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
@@ -2345,9 +2339,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
 bool
 vect_analyze_data_refs_alignment (loop_vec_info vinfo)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_refs_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
 
   /* Mark groups of data references with same alignment using
      data dependence information.  */
@@ -2426,9 +2418,7 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node)
 bool
 vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_and_verify_instance_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
 
   slp_tree node;
   unsigned i;
@@ -2931,9 +2921,7 @@ vect_analyze_data_ref_accesses (vec_info *vinfo)
   vec<data_reference_p> datarefs = vinfo->datarefs;
   struct data_reference *dr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_accesses ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
 
   if (datarefs.is_empty ())
     return true;
@@ -3379,9 +3367,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
   unsigned int i;
   tree length_factor;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_prune_runtime_alias_test_list ===\n");
+  DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
 
   /* Step values are irrelevant for aliasing if the number of vector
      iterations is equal to the number of scalar iterations (which can
@@ -4075,9 +4061,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
   struct data_reference *dr;
   tree scalar_type;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_analyze_data_refs ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_refs");
 
   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
     loop = LOOP_VINFO_LOOP (loop_vinfo);
index 7443e7f473e339bac2f54c13d8cd7e58c4dde9ef..3eab650c4e6e3c6de173f6081b5db04aa21478b2 100644 (file)
@@ -1733,9 +1733,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters,
   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
   struct data_reference *dr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_update_inits_of_dr ===\n");
+  DUMP_VECT_SCOPE ("vect_update_inits_of_dr");
 
   /* Adjust niters to sizetype and insert stmts on loop preheader edge.  */
   if (!types_compatible_p (sizetype, TREE_TYPE (niters)))
index 385d62f4cb3adcaa941a22c34bd4439a499a9da6..8e45aecfc761512d1fe333370b9383442cfb1c76 100644 (file)
@@ -296,9 +296,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
   unsigned i;
   auto_vec<stmt_vec_info> mask_producers;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_determine_vectorization_factor ===\n");
+  DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
 
   for (i = 0; i < nbbs; i++)
     {
@@ -479,9 +477,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
   gphi_iterator gsi;
   bool double_reduc;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_scalar_cycles ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
 
   /* First - identify all inductions.  Reduction detection assumes that all the
      inductions have been identified, therefore, this order must not be
@@ -727,9 +723,7 @@ vect_get_loop_niters (struct loop *loop, tree *assumptions,
   *assumptions = boolean_true_node;
   *number_of_iterationsm1 = chrec_dont_know;
   *number_of_iterations = chrec_dont_know;
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== get_loop_niters ===\n");
+  DUMP_VECT_SCOPE ("get_loop_niters");
 
   if (!exit)
     return cond;
@@ -1170,9 +1164,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
                          tree *assumptions, tree *number_of_iterationsm1,
                          tree *number_of_iterations, gcond **inner_loop_cond)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_analyze_loop_form ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_loop_form");
 
   /* Different restrictions apply when we are considering an inner-most loop,
      vs. an outer (nested) loop.
@@ -1422,9 +1414,7 @@ vect_update_vf_for_slp (loop_vec_info loop_vinfo)
   poly_uint64 vectorization_factor;
   int i;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_update_vf_for_slp ===\n");
+  DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
 
   vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   gcc_assert (known_ne (vectorization_factor, 0U));
@@ -1527,9 +1517,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
   bool need_to_vectorize = false;
   bool ok;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_analyze_loop_operations ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
 
   stmt_vector_for_cost cost_vec;
   cost_vec.create (2);
@@ -2308,9 +2296,7 @@ vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
   targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
   unsigned int next_size = 0;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "===== analyze_loop_nest =====\n");
+  DUMP_VECT_SCOPE ("analyze_loop_nest");
 
   if (loop_outer (loop)
       && loop_vec_info_for_loop (loop_outer (loop))
@@ -7460,9 +7446,7 @@ vectorizable_induction (gimple *phi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_induction ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_induction");
       vect_model_induction_cost (stmt_info, ncopies, cost_vec);
       return true;
     }
@@ -8335,8 +8319,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
   bool check_profitability = false;
   unsigned int th;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
+  DUMP_VECT_SCOPE ("vec_transform_loop");
 
   /* Use the more conservative vectorization threshold.  If the number
      of iterations is constant assume the cost check has been performed
@@ -8618,9 +8601,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
                {
                  slp_scheduled = true;
 
-                 if (dump_enabled_p ())
-                   dump_printf_loc (MSG_NOTE, vect_location,
-                                    "=== scheduling SLP instances ===\n");
+                 DUMP_VECT_SCOPE ("scheduling SLP instances");
 
                  vect_schedule_slp (loop_vinfo);
                }
index 6621392b7e8088088fee7454d76b4d09006b675e..c530810aa3e5a92c19453c494a2db8d1c15aeefd 100644 (file)
@@ -4670,9 +4670,7 @@ vect_pattern_recog (vec_info *vinfo)
   auto_vec<gimple *, 1> stmts_to_replace;
   gimple *stmt;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_pattern_recog ===\n");
+  DUMP_VECT_SCOPE ("vect_pattern_recog");
 
   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
     {
index 5e8a0d0fc43fa1577e9d2a5d8c68abf8aa7eb1fd..74abf5f552367de3c68f9ab814e67a31e3ef1900 100644 (file)
@@ -2177,8 +2177,7 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
   unsigned int i;
   gimple *first_element;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_slp");
 
   /* Find SLP sequences starting from groups of grouped stores.  */
   FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
@@ -2231,9 +2230,7 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
   slp_instance instance;
   int decided_to_slp = 0;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
-                     "\n");
+  DUMP_VECT_SCOPE ("vect_make_slp_decision");
 
   FOR_EACH_VEC_ELT (slp_instances, i, instance)
     {
@@ -2399,9 +2396,7 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
   vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
   slp_instance instance;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
-                     "\n");
+  DUMP_VECT_SCOPE ("vect_detect_hybrid_slp");
 
   /* First walk all pattern stmt in the loop and mark defs of uses as
      hybrid because immediate uses in them are not recorded.  */
@@ -2622,9 +2617,7 @@ vect_slp_analyze_operations (vec_info *vinfo)
   slp_instance instance;
   int i;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                    "=== vect_slp_analyze_operations ===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_operations");
 
   scalar_stmts_to_slp_tree_map_t *visited
     = new scalar_stmts_to_slp_tree_map_t ();
@@ -2981,8 +2974,7 @@ vect_slp_bb (basic_block bb)
   bool any_vectorized = false;
   auto_vector_sizes vector_sizes;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_bb");
 
   /* Autodetect first vector size we try.  */
   current_vector_size = 0;
index 9f365e31e4954cfef25c814220fb0fda1c7e3d11..f2f91df935ce86a95889fc991f42c104f2d1c639 100644 (file)
@@ -640,9 +640,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
   bool live_p;
   enum vect_relevant relevant;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_mark_stmts_to_be_vectorized ===\n");
+  DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
 
   auto_vec<gimple *, 64> worklist;
 
@@ -3027,9 +3025,7 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
   if (! vec_stmt)
     {
       STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
-                         "\n");
+      DUMP_VECT_SCOPE ("vectorizable_bswap");
       if (! slp_node)
        {
          record_stmt_cost (cost_vec,
@@ -3346,9 +3342,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
-                         "\n");
+      DUMP_VECT_SCOPE ("vectorizable_call");
       vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
       if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
        record_stmt_cost (cost_vec, ncopies / 2,
@@ -4023,9 +4017,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
            STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
          }
       STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
-      if (dump_enabled_p ())
-       dump_printf_loc (MSG_NOTE, vect_location,
-                        "=== vectorizable_simd_clone_call ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
 /*      vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
       return true;
     }
@@ -4865,9 +4857,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
 
   if (!vec_stmt)               /* transformation not required.  */
     {
-      if (dump_enabled_p ())
-       dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_conversion ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_conversion");
       if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
         {
          STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
@@ -5279,9 +5269,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_assignment ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_assignment");
       vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
       return true;
     }
@@ -5644,9 +5632,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_shift ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_shift");
       vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
       return true;
     }
@@ -5968,9 +5954,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_operation ===\n");
+      DUMP_VECT_SCOPE ("vectorizable_operation");
       vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
       return true;
     }
index f4b4dec64f0ac2522d92947f9faeafb1912739ec..8bb9e3e6e8896c2538105d443941ab76589d3ffd 100644 (file)
@@ -1425,6 +1425,16 @@ vect_get_scalar_dr_size (struct data_reference *dr)
 /* Source location */
 extern source_location vect_location;
 
+/* If dumping is enabled, emit a MSG_NOTE at vect_location about
+   entering MSG within the vectorizer.  MSG should be a string literal. */
+
+#define DUMP_VECT_SCOPE(MSG) \
+  do {                                         \
+    if (dump_enabled_p ())                     \
+      dump_printf_loc (MSG_NOTE, vect_location, \
+                      "=== " MSG " ===\n");    \
+  } while (0)
+
 /*-----------------------------------------------------------------*/
 /* Function prototypes.                                            */
 /*-----------------------------------------------------------------*/