pass cfun to pass::execute
[gcc.git] / gcc / tree-vectorizer.c
index 0227d08c9f0928bc857952a7f5f34933959ccce1..d7de964fa3776bfe0d96f2e444d43a4ca0554aa5 100644 (file)
@@ -1,5 +1,5 @@
 /* Vectorizer
-   Copyright (C) 2003-2013 Free Software Foundation, Inc.
+   Copyright (C) 2003-2014 Free Software Foundation, Inc.
    Contributed by Dorit Naishlos <dorit@il.ibm.com>
 
 This file is part of GCC.
@@ -59,21 +59,323 @@ along with GCC; see the file COPYING3.  If not see
 #include "coretypes.h"
 #include "dumpfile.h"
 #include "tm.h"
-#include "ggc.h"
 #include "tree.h"
+#include "stor-layout.h"
 #include "tree-pretty-print.h"
-#include "tree-flow.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimple-walk.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-cfg.h"
 #include "cfgloop.h"
 #include "tree-vectorizer.h"
 #include "tree-pass.h"
+#include "tree-ssa-propagate.h"
+#include "dbgcnt.h"
+#include "gimple-fold.h"
 
 /* Loop or bb location.  */
-LOC vect_location;
+source_location vect_location;
 
 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
 vec<vec_void_p> stmt_vec_info_vec;
+\f
+/* For mapping simduid to vectorization factor.  */
+
+struct simduid_to_vf : typed_free_remove<simduid_to_vf>
+{
+  unsigned int simduid;
+  int vf;
+
+  /* hash_table support.  */
+  typedef simduid_to_vf value_type;
+  typedef simduid_to_vf compare_type;
+  static inline hashval_t hash (const value_type *);
+  static inline int equal (const value_type *, const compare_type *);
+};
 
+inline hashval_t
+simduid_to_vf::hash (const value_type *p)
+{
+  return p->simduid;
+}
+
+inline int
+simduid_to_vf::equal (const value_type *p1, const value_type *p2)
+{
+  return p1->simduid == p2->simduid;
+}
+
+/* This hash maps the OMP simd array to the corresponding simduid used
+   to index into it.  Like thus,
+
+        _7 = GOMP_SIMD_LANE (simduid.0)
+        ...
+        ...
+        D.1737[_7] = stuff;
+
+
+   This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
+   simduid.0.  */
+
+struct simd_array_to_simduid : typed_free_remove<simd_array_to_simduid>
+{
+  tree decl;
+  unsigned int simduid;
+
+  /* hash_table support.  */
+  typedef simd_array_to_simduid value_type;
+  typedef simd_array_to_simduid compare_type;
+  static inline hashval_t hash (const value_type *);
+  static inline int equal (const value_type *, const compare_type *);
+};
+
+inline hashval_t
+simd_array_to_simduid::hash (const value_type *p)
+{
+  return DECL_UID (p->decl);
+}
+
+inline int
+simd_array_to_simduid::equal (const value_type *p1, const value_type *p2)
+{
+  return p1->decl == p2->decl;
+}
+
+/* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF and IFN_GOMP_SIMD_LAST_LANE
+   into their corresponding constants.  */
+
+static void
+adjust_simduid_builtins (hash_table <simduid_to_vf> &htab)
+{
+  basic_block bb;
+
+  FOR_EACH_BB_FN (bb, cfun)
+    {
+      gimple_stmt_iterator i;
+
+      for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
+       {
+         unsigned int vf = 1;
+         enum internal_fn ifn;
+         gimple stmt = gsi_stmt (i);
+         tree t;
+         if (!is_gimple_call (stmt)
+             || !gimple_call_internal_p (stmt))
+           continue;
+         ifn = gimple_call_internal_fn (stmt);
+         switch (ifn)
+           {
+           case IFN_GOMP_SIMD_LANE:
+           case IFN_GOMP_SIMD_VF:
+           case IFN_GOMP_SIMD_LAST_LANE:
+             break;
+           default:
+             continue;
+           }
+         tree arg = gimple_call_arg (stmt, 0);
+         gcc_assert (arg != NULL_TREE);
+         gcc_assert (TREE_CODE (arg) == SSA_NAME);
+         simduid_to_vf *p = NULL, data;
+         data.simduid = DECL_UID (SSA_NAME_VAR (arg));
+         if (htab.is_created ())
+           p = htab.find (&data);
+         if (p)
+           vf = p->vf;
+         switch (ifn)
+           {
+           case IFN_GOMP_SIMD_VF:
+             t = build_int_cst (unsigned_type_node, vf);
+             break;
+           case IFN_GOMP_SIMD_LANE:
+             t = build_int_cst (unsigned_type_node, 0);
+             break;
+           case IFN_GOMP_SIMD_LAST_LANE:
+             t = gimple_call_arg (stmt, 1);
+             break;
+           default:
+             gcc_unreachable ();
+           }
+         update_call_from_tree (&i, t);
+       }
+    }
+}
+
+/* Helper structure for note_simd_array_uses.  */
+
+struct note_simd_array_uses_struct
+{
+  hash_table <simd_array_to_simduid> *htab;
+  unsigned int simduid;
+};
+
+/* Callback for note_simd_array_uses, called through walk_gimple_op.  */
+
+static tree
+note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
+{
+  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+  struct note_simd_array_uses_struct *ns
+    = (struct note_simd_array_uses_struct *) wi->info;
+
+  if (TYPE_P (*tp))
+    *walk_subtrees = 0;
+  else if (VAR_P (*tp)
+          && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
+          && DECL_CONTEXT (*tp) == current_function_decl)
+    {
+      simd_array_to_simduid data;
+      if (!ns->htab->is_created ())
+       ns->htab->create (15);
+      data.decl = *tp;
+      data.simduid = ns->simduid;
+      simd_array_to_simduid **slot = ns->htab->find_slot (&data, INSERT);
+      if (*slot == NULL)
+       {
+         simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
+         *p = data;
+         *slot = p;
+       }
+      else if ((*slot)->simduid != ns->simduid)
+       (*slot)->simduid = -1U;
+      *walk_subtrees = 0;
+    }
+  return NULL_TREE;
+}
+
+/* Find "omp simd array" temporaries and map them to corresponding
+   simduid.  */
+
+static void
+note_simd_array_uses (hash_table <simd_array_to_simduid> *htab)
+{
+  basic_block bb;
+  gimple_stmt_iterator gsi;
+  struct walk_stmt_info wi;
+  struct note_simd_array_uses_struct ns;
+
+  memset (&wi, 0, sizeof (wi));
+  wi.info = &ns;
+  ns.htab = htab;
+
+  FOR_EACH_BB_FN (bb, cfun)
+    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+      {
+       gimple stmt = gsi_stmt (gsi);
+       if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
+         continue;
+       switch (gimple_call_internal_fn (stmt))
+         {
+         case IFN_GOMP_SIMD_LANE:
+         case IFN_GOMP_SIMD_VF:
+         case IFN_GOMP_SIMD_LAST_LANE:
+           break;
+         default:
+           continue;
+         }
+       tree lhs = gimple_call_lhs (stmt);
+       if (lhs == NULL_TREE)
+         continue;
+       imm_use_iterator use_iter;
+       gimple use_stmt;
+       ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
+       FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
+         if (!is_gimple_debug (use_stmt))
+           walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
+      }
+}
 \f
+/* A helper function to free data refs.  */
+
+void
+vect_destroy_datarefs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
+{
+  vec<data_reference_p> datarefs;
+  struct data_reference *dr;
+  unsigned int i;
+
+ if (loop_vinfo)
+    datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
+  else
+    datarefs = BB_VINFO_DATAREFS (bb_vinfo);
+
+  FOR_EACH_VEC_ELT (datarefs, i, dr)
+    if (dr->aux)
+      {
+        free (dr->aux);
+        dr->aux = NULL;
+      }
+
+  free_data_refs (datarefs);
+}
+
+
+/* If LOOP has been versioned during ifcvt, return the internal call
+   guarding it.  */
+
+static gimple
+vect_loop_vectorized_call (struct loop *loop)
+{
+  basic_block bb = loop_preheader_edge (loop)->src;
+  gimple g;
+  do
+    {
+      g = last_stmt (bb);
+      if (g)
+       break;
+      if (!single_pred_p (bb))
+       break;
+      bb = single_pred (bb);
+    }
+  while (1);
+  if (g && gimple_code (g) == GIMPLE_COND)
+    {
+      gimple_stmt_iterator gsi = gsi_for_stmt (g);
+      gsi_prev (&gsi);
+      if (!gsi_end_p (gsi))
+       {
+         g = gsi_stmt (gsi);
+         if (is_gimple_call (g)
+             && gimple_call_internal_p (g)
+             && gimple_call_internal_fn (g) == IFN_LOOP_VECTORIZED
+             && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
+                 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
+           return g;
+       }
+    }
+  return NULL;
+}
+
+/* Fold LOOP_VECTORIZED internal call G to VALUE and
+   update any immediate uses of it's LHS.  */
+
+static void
+fold_loop_vectorized_call (gimple g, tree value)
+{
+  tree lhs = gimple_call_lhs (g);
+  use_operand_p use_p;
+  imm_use_iterator iter;
+  gimple use_stmt;
+  gimple_stmt_iterator gsi = gsi_for_stmt (g);
+
+  update_call_from_tree (&gsi, value);
+  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+    {
+      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+       SET_USE (use_p, value);
+      update_stmt (use_stmt);
+    }
+}
+
 /* Function vectorize_loops.
 
    Entry point to loop vectorization phase.  */
@@ -84,14 +386,24 @@ vectorize_loops (void)
   unsigned int i;
   unsigned int num_vectorized_loops = 0;
   unsigned int vect_loops_num;
-  loop_iterator li;
   struct loop *loop;
+  hash_table <simduid_to_vf> simduid_to_vf_htab;
+  hash_table <simd_array_to_simduid> simd_array_to_simduid_htab;
+  bool any_ifcvt_loops = false;
+  unsigned ret = 0;
 
-  vect_loops_num = number_of_loops ();
+  vect_loops_num = number_of_loops (cfun);
 
   /* Bail out if there are no loops.  */
   if (vect_loops_num <= 1)
-    return 0;
+    {
+      if (cfun->has_simduid_loops)
+       adjust_simduid_builtins (simduid_to_vf_htab);
+      return 0;
+    }
+
+  if (cfun->has_simduid_loops)
+    note_simd_array_uses (&simd_array_to_simduid_htab);
 
   init_stmt_vec_info_vec ();
 
@@ -100,15 +412,20 @@ vectorize_loops (void)
   /* If some loop was duplicated, it gets bigger number
      than all previously defined loops.  This fact allows us to run
      only over initial loops skipping newly generated ones.  */
-  FOR_EACH_LOOP (li, loop, 0)
-    if (optimize_loop_nest_for_speed_p (loop))
+  FOR_EACH_LOOP (loop, 0)
+    if (loop->dont_vectorize)
+      any_ifcvt_loops = true;
+    else if ((flag_tree_loop_vectorize
+             && optimize_loop_nest_for_speed_p (loop))
+            || loop->force_vectorize)
       {
        loop_vec_info loop_vinfo;
        vect_location = find_loop_location (loop);
-        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
+        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
            && dump_enabled_p ())
-         dump_printf (MSG_ALL, "\nAnalyzing loop at %s:%d\n",
-                       LOC_FILE (vect_location), LOC_LINE (vect_location));
+         dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
+                       LOCATION_FILE (vect_location),
+                      LOCATION_LINE (vect_location));
 
        loop_vinfo = vect_analyze_loop (loop);
        loop->aux = loop_vinfo;
@@ -116,30 +433,101 @@ vectorize_loops (void)
        if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
          continue;
 
-        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
+        if (!dbg_cnt (vect_loop))
+         break;
+
+       gimple loop_vectorized_call = vect_loop_vectorized_call (loop);
+       if (loop_vectorized_call)
+         {
+           tree arg = gimple_call_arg (loop_vectorized_call, 1);
+           basic_block *bbs;
+           unsigned int i;
+           struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
+
+           LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
+           gcc_checking_assert (vect_loop_vectorized_call
+                                       (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
+                                == loop_vectorized_call);
+           bbs = get_loop_body (scalar_loop);
+           for (i = 0; i < scalar_loop->num_nodes; i++)
+             {
+               basic_block bb = bbs[i];
+               gimple_stmt_iterator gsi;
+               for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+                    gsi_next (&gsi))
+                 {
+                   gimple phi = gsi_stmt (gsi);
+                   gimple_set_uid (phi, 0);
+                 }
+               for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+                    gsi_next (&gsi))
+                 {
+                   gimple stmt = gsi_stmt (gsi);
+                   gimple_set_uid (stmt, 0);
+                 }
+             }
+           free (bbs);
+         }
+
+        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
            && dump_enabled_p ())
-          dump_printf (MSG_ALL, "\n\nVectorizing loop at %s:%d\n",
-                       LOC_FILE (vect_location), LOC_LINE (vect_location));
+          dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+                           "loop vectorized\n");
        vect_transform_loop (loop_vinfo);
        num_vectorized_loops++;
+       /* Now that the loop has been vectorized, allow it to be unrolled
+          etc.  */
+       loop->force_vectorize = false;
+
+       if (loop->simduid)
+         {
+           simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
+           if (!simduid_to_vf_htab.is_created ())
+             simduid_to_vf_htab.create (15);
+           simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
+           simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
+           *simduid_to_vf_htab.find_slot (simduid_to_vf_data, INSERT)
+             = simduid_to_vf_data;
+         }
+
+       if (loop_vectorized_call)
+         {
+           fold_loop_vectorized_call (loop_vectorized_call, boolean_true_node);
+           ret |= TODO_cleanup_cfg;
+         }
       }
 
-  vect_location = UNKNOWN_LOC;
+  vect_location = UNKNOWN_LOCATION;
 
   statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
   if (dump_enabled_p ()
       || (num_vectorized_loops > 0 && dump_enabled_p ()))
-    dump_printf_loc (MSG_ALL, vect_location,
+    dump_printf_loc (MSG_NOTE, vect_location,
                      "vectorized %u loops in function.\n",
                      num_vectorized_loops);
 
   /*  ----------- Finalize. -----------  */
 
+  if (any_ifcvt_loops)
+    for (i = 1; i < vect_loops_num; i++)
+      {
+       loop = get_loop (cfun, i);
+       if (loop && loop->dont_vectorize)
+         {
+           gimple g = vect_loop_vectorized_call (loop);
+           if (g)
+             {
+               fold_loop_vectorized_call (g, boolean_false_node);
+               ret |= TODO_cleanup_cfg;
+             }
+         }
+      }
+
   for (i = 1; i < vect_loops_num; i++)
     {
       loop_vec_info loop_vinfo;
 
-      loop = get_loop (i);
+      loop = get_loop (cfun, i);
       if (!loop)
        continue;
       loop_vinfo = (loop_vec_info) loop->aux;
@@ -149,6 +537,40 @@ vectorize_loops (void)
 
   free_stmt_vec_info_vec ();
 
+  /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins.  */
+  if (cfun->has_simduid_loops)
+    adjust_simduid_builtins (simduid_to_vf_htab);
+
+  /* Shrink any "omp array simd" temporary arrays to the
+     actual vectorization factors.  */
+  if (simd_array_to_simduid_htab.is_created ())
+    {
+      for (hash_table <simd_array_to_simduid>::iterator iter
+          = simd_array_to_simduid_htab.begin ();
+          iter != simd_array_to_simduid_htab.end (); ++iter)
+       if ((*iter).simduid != -1U)
+         {
+           tree decl = (*iter).decl;
+           int vf = 1;
+           if (simduid_to_vf_htab.is_created ())
+             {
+               simduid_to_vf *p = NULL, data;
+               data.simduid = (*iter).simduid;
+               p = simduid_to_vf_htab.find (&data);
+               if (p)
+                 vf = p->vf;
+             }
+           tree atype
+             = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
+           TREE_TYPE (decl) = atype;
+           relayout_decl (decl);
+         }
+
+      simd_array_to_simduid_htab.dispose ();
+    }
+  if (simduid_to_vf_htab.is_created ())
+    simduid_to_vf_htab.dispose ();
+
   if (num_vectorized_loops > 0)
     {
       /* If we vectorized any loop only virtual SSA form needs to be updated.
@@ -158,29 +580,62 @@ vectorize_loops (void)
       return TODO_cleanup_cfg;
     }
 
-  return 0;
+  return ret;
 }
 
 
 /*  Entry point to basic block SLP phase.  */
 
-static unsigned int
-execute_vect_slp (void)
+namespace {
+
+const pass_data pass_data_slp_vectorize =
+{
+  GIMPLE_PASS, /* type */
+  "slp", /* name */
+  OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
+  true, /* has_execute */
+  TV_TREE_SLP_VECTORIZATION, /* tv_id */
+  ( PROP_ssa | PROP_cfg ), /* properties_required */
+  0, /* properties_provided */
+  0, /* properties_destroyed */
+  0, /* todo_flags_start */
+  ( TODO_verify_ssa | TODO_update_ssa
+    | TODO_verify_stmts ), /* todo_flags_finish */
+};
+
+class pass_slp_vectorize : public gimple_opt_pass
+{
+public:
+  pass_slp_vectorize (gcc::context *ctxt)
+    : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
+  {}
+
+  /* opt_pass methods: */
+  virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
+  virtual unsigned int execute (function *);
+
+}; // class pass_slp_vectorize
+
+unsigned int
+pass_slp_vectorize::execute (function *fun)
 {
   basic_block bb;
 
   init_stmt_vec_info_vec ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, fun)
     {
       vect_location = find_bb_location (bb);
 
       if (vect_slp_analyze_bb (bb))
         {
+          if (!dbg_cnt (vect_slp))
+            break;
+
           vect_slp_transform_bb (bb);
           if (dump_enabled_p ())
             dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
-                            "basic block vectorized using SLP\n");
+                            "basic block vectorized\n");
         }
     }
 
@@ -188,37 +643,13 @@ execute_vect_slp (void)
   return 0;
 }
 
-static bool
-gate_vect_slp (void)
-{
-  /* Apply SLP either if the vectorizer is on and the user didn't specify
-     whether to run SLP or not, or if the SLP flag was set by the user.  */
-  return ((flag_tree_vectorize != 0 && flag_tree_slp_vectorize != 0)
-          || flag_tree_slp_vectorize == 1);
-}
+} // anon namespace
 
-struct gimple_opt_pass pass_slp_vectorize =
+gimple_opt_pass *
+make_pass_slp_vectorize (gcc::context *ctxt)
 {
- {
-  GIMPLE_PASS,
-  "slp",                                /* name */
-  OPTGROUP_LOOP
-  | OPTGROUP_VEC,                       /* optinfo_flags */
-  gate_vect_slp,                        /* gate */
-  execute_vect_slp,                     /* execute */
-  NULL,                                 /* sub */
-  NULL,                                 /* next */
-  0,                                    /* static_pass_number */
-  TV_TREE_SLP_VECTORIZATION,            /* tv_id */
-  PROP_ssa | PROP_cfg,                  /* properties_required */
-  0,                                    /* properties_provided */
-  0,                                    /* properties_destroyed */
-  0,                                    /* todo_flags_start */
-  TODO_verify_ssa
-    | TODO_update_ssa
-    | TODO_verify_stmts                 /* todo_flags_finish */
- }
-};
+  return new pass_slp_vectorize (ctxt);
+}
 
 
 /* Increase alignment of global arrays to improve vectorization potential.
@@ -231,18 +662,18 @@ struct gimple_opt_pass pass_slp_vectorize =
 static unsigned int
 increase_alignment (void)
 {
-  struct varpool_node *vnode;
+  varpool_node *vnode;
 
-  vect_location = UNKNOWN_LOC;
+  vect_location = UNKNOWN_LOCATION;
 
   /* Increase the alignment of all global arrays for vectorization.  */
   FOR_EACH_DEFINED_VARIABLE (vnode)
     {
-      tree vectype, decl = vnode->symbol.decl;
+      tree vectype, decl = vnode->decl;
       tree t;
       unsigned int alignment;
 
-      t = TREE_TYPE(decl);
+      t = TREE_TYPE (decl);
       if (TREE_CODE (t) != ARRAY_TYPE)
         continue;
       vectype = get_vectype_for_scalar_type (strip_array_types (t));
@@ -265,30 +696,43 @@ increase_alignment (void)
 }
 
 
-static bool
-gate_increase_alignment (void)
+namespace {
+
+const pass_data pass_data_ipa_increase_alignment =
 {
-  return flag_section_anchors && flag_tree_vectorize;
-}
+  SIMPLE_IPA_PASS, /* type */
+  "increase_alignment", /* name */
+  OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
+  true, /* has_execute */
+  TV_IPA_OPT, /* tv_id */
+  0, /* properties_required */
+  0, /* properties_provided */
+  0, /* properties_destroyed */
+  0, /* todo_flags_start */
+  0, /* todo_flags_finish */
+};
+
+class pass_ipa_increase_alignment : public simple_ipa_opt_pass
+{
+public:
+  pass_ipa_increase_alignment (gcc::context *ctxt)
+    : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
+  {}
 
+  /* opt_pass methods: */
+  virtual bool gate (function *)
+    {
+      return flag_section_anchors && flag_tree_loop_vectorize;
+    }
+
+  virtual unsigned int execute (function *) { return increase_alignment (); }
+
+}; // class pass_ipa_increase_alignment
 
-struct simple_ipa_opt_pass pass_ipa_increase_alignment =
+} // anon namespace
+
+simple_ipa_opt_pass *
+make_pass_ipa_increase_alignment (gcc::context *ctxt)
 {
- {
-  SIMPLE_IPA_PASS,
-  "increase_alignment",                 /* name */
-  OPTGROUP_LOOP
-  | OPTGROUP_VEC,                       /* optinfo_flags */
-  gate_increase_alignment,              /* gate */
-  increase_alignment,                   /* execute */
-  NULL,                                 /* sub */
-  NULL,                                 /* next */
-  0,                                    /* static_pass_number */
-  TV_IPA_OPT,                           /* tv_id */
-  0,                                    /* properties_required */
-  0,                                    /* properties_provided */
-  0,                                    /* properties_destroyed */
-  0,                                    /* todo_flags_start */
-  0                                     /* todo_flags_finish */
- }
-};
+  return new pass_ipa_increase_alignment (ctxt);
+}