Fix PR c++/70347 (default member initializer not picked up by union)
[gcc.git] / libgomp / loop_ull.c
index de56ae0b7ced9ffb356741c7b902c62a97b5200c..fa902b464d59259c619f400ad0846f351d238383 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005-2015 Free Software Foundation, Inc.
+/* Copyright (C) 2005-2016 Free Software Foundation, Inc.
    Contributed by Richard Henderson <rth@redhat.com>.
 
    This file is part of the GNU Offloading and Multi Processing Library
@@ -175,15 +175,15 @@ GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end,
     {
     case GFS_STATIC:
       return gomp_loop_ull_static_start (up, start, end, incr,
-                                        icv->run_sched_modifier,
+                                        icv->run_sched_chunk_size,
                                         istart, iend);
     case GFS_DYNAMIC:
       return gomp_loop_ull_dynamic_start (up, start, end, incr,
-                                         icv->run_sched_modifier,
+                                         icv->run_sched_chunk_size,
                                          istart, iend);
     case GFS_GUIDED:
       return gomp_loop_ull_guided_start (up, start, end, incr,
-                                        icv->run_sched_modifier,
+                                        icv->run_sched_chunk_size,
                                         istart, iend);
     case GFS_AUTO:
       /* For now map to schedule(static), later on we could play with feedback
@@ -279,15 +279,15 @@ GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end,
     {
     case GFS_STATIC:
       return gomp_loop_ull_ordered_static_start (up, start, end, incr,
-                                                icv->run_sched_modifier,
+                                                icv->run_sched_chunk_size,
                                                 istart, iend);
     case GFS_DYNAMIC:
       return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr,
-                                                 icv->run_sched_modifier,
+                                                 icv->run_sched_chunk_size,
                                                  istart, iend);
     case GFS_GUIDED:
       return gomp_loop_ull_ordered_guided_start (up, start, end, incr,
-                                                icv->run_sched_modifier,
+                                                icv->run_sched_chunk_size,
                                                 istart, iend);
     case GFS_AUTO:
       /* For now map to schedule(static), later on we could play with feedback
@@ -299,6 +299,114 @@ GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end,
     }
 }
 
+/* The *_doacross_*_start routines are similar.  The only difference is that
+   this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
+   section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
+   and other COUNTS array elements tell the library number of iterations
+   in the ordered inner loops.  */
+
+static bool
+gomp_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
+                                    gomp_ull chunk_size, gomp_ull *istart,
+                                    gomp_ull *iend)
+{
+  struct gomp_thread *thr = gomp_thread ();
+
+  thr->ts.static_trip = 0;
+  if (gomp_work_share_start (false))
+    {
+      gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
+                         GFS_STATIC, chunk_size);
+      gomp_doacross_ull_init (ncounts, counts, chunk_size);
+      gomp_work_share_init_done ();
+    }
+
+  return !gomp_iter_ull_static_next (istart, iend);
+}
+
+static bool
+gomp_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
+                                     gomp_ull chunk_size, gomp_ull *istart,
+                                     gomp_ull *iend)
+{
+  struct gomp_thread *thr = gomp_thread ();
+  bool ret;
+
+  if (gomp_work_share_start (false))
+    {
+      gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
+                         GFS_DYNAMIC, chunk_size);
+      gomp_doacross_ull_init (ncounts, counts, chunk_size);
+      gomp_work_share_init_done ();
+    }
+
+#if defined HAVE_SYNC_BUILTINS && defined __LP64__
+  ret = gomp_iter_ull_dynamic_next (istart, iend);
+#else
+  gomp_mutex_lock (&thr->ts.work_share->lock);
+  ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
+  gomp_mutex_unlock (&thr->ts.work_share->lock);
+#endif
+
+  return ret;
+}
+
+static bool
+gomp_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
+                                    gomp_ull chunk_size, gomp_ull *istart,
+                                    gomp_ull *iend)
+{
+  struct gomp_thread *thr = gomp_thread ();
+  bool ret;
+
+  if (gomp_work_share_start (false))
+    {
+      gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
+                         GFS_GUIDED, chunk_size);
+      gomp_doacross_ull_init (ncounts, counts, chunk_size);
+      gomp_work_share_init_done ();
+    }
+
+#if defined HAVE_SYNC_BUILTINS && defined __LP64__
+  ret = gomp_iter_ull_guided_next (istart, iend);
+#else
+  gomp_mutex_lock (&thr->ts.work_share->lock);
+  ret = gomp_iter_ull_guided_next_locked (istart, iend);
+  gomp_mutex_unlock (&thr->ts.work_share->lock);
+#endif
+
+  return ret;
+}
+
+bool
+GOMP_loop_ull_doacross_runtime_start (unsigned ncounts, gomp_ull *counts,
+                                     gomp_ull *istart, gomp_ull *iend)
+{
+  struct gomp_task_icv *icv = gomp_icv (false);
+  switch (icv->run_sched_var)
+    {
+    case GFS_STATIC:
+      return gomp_loop_ull_doacross_static_start (ncounts, counts,
+                                                 icv->run_sched_chunk_size,
+                                                 istart, iend);
+    case GFS_DYNAMIC:
+      return gomp_loop_ull_doacross_dynamic_start (ncounts, counts,
+                                                  icv->run_sched_chunk_size,
+                                                  istart, iend);
+    case GFS_GUIDED:
+      return gomp_loop_ull_doacross_guided_start (ncounts, counts,
+                                                 icv->run_sched_chunk_size,
+                                                 istart, iend);
+    case GFS_AUTO:
+      /* For now map to schedule(static), later on we could play with feedback
+        driven choice.  */
+      return gomp_loop_ull_doacross_static_start (ncounts, counts,
+                                                 0, istart, iend);
+    default:
+      abort ();
+    }
+}
+
 /* The *_next routines are called when the thread completes processing of
    the iteration block currently assigned to it.  If the work-share
    construct is bound directly to a parallel construct, then the iteration
@@ -458,6 +566,10 @@ extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start
        __attribute__((alias ("gomp_loop_ull_dynamic_start")));
 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start
        __attribute__((alias ("gomp_loop_ull_guided_start")));
+extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_nonmonotonic_dynamic_start
+       __attribute__((alias ("gomp_loop_ull_dynamic_start")));
+extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_nonmonotonic_guided_start
+       __attribute__((alias ("gomp_loop_ull_guided_start")));
 
 extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start
        __attribute__((alias ("gomp_loop_ull_ordered_static_start")));
@@ -466,12 +578,23 @@ extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynam
 extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start
        __attribute__((alias ("gomp_loop_ull_ordered_guided_start")));
 
+extern __typeof(gomp_loop_ull_doacross_static_start) GOMP_loop_ull_doacross_static_start
+       __attribute__((alias ("gomp_loop_ull_doacross_static_start")));
+extern __typeof(gomp_loop_ull_doacross_dynamic_start) GOMP_loop_ull_doacross_dynamic_start
+       __attribute__((alias ("gomp_loop_ull_doacross_dynamic_start")));
+extern __typeof(gomp_loop_ull_doacross_guided_start) GOMP_loop_ull_doacross_guided_start
+       __attribute__((alias ("gomp_loop_ull_doacross_guided_start")));
+
 extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next
        __attribute__((alias ("gomp_loop_ull_static_next")));
 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next
        __attribute__((alias ("gomp_loop_ull_dynamic_next")));
 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next
        __attribute__((alias ("gomp_loop_ull_guided_next")));
+extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_nonmonotonic_dynamic_next
+       __attribute__((alias ("gomp_loop_ull_dynamic_next")));
+extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_nonmonotonic_guided_next
+       __attribute__((alias ("gomp_loop_ull_guided_next")));
 
 extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next
        __attribute__((alias ("gomp_loop_ull_ordered_static_next")));
@@ -507,6 +630,25 @@ GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
                                     iend);
 }
 
+bool
+GOMP_loop_ull_nonmonotonic_dynamic_start (bool up, gomp_ull start,
+                                         gomp_ull end, gomp_ull incr,
+                                         gomp_ull chunk_size,
+                                         gomp_ull *istart, gomp_ull *iend)
+{
+  return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
+                                     iend);
+}
+
+bool
+GOMP_loop_ull_nonmonotonic_guided_start (bool up, gomp_ull start, gomp_ull end,
+                                        gomp_ull incr, gomp_ull chunk_size,
+                                        gomp_ull *istart, gomp_ull *iend)
+{
+  return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
+                                    iend);
+}
+
 bool
 GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
                                    gomp_ull incr, gomp_ull chunk_size,
@@ -534,6 +676,33 @@ GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
                                             istart, iend);
 }
 
+bool
+GOMP_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
+                                    gomp_ull chunk_size, gomp_ull *istart,
+                                    gomp_ull *iend)
+{
+  return gomp_loop_ull_doacross_static_start (ncounts, counts, chunk_size,
+                                             istart, iend);
+}
+
+bool
+GOMP_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
+                                     gomp_ull chunk_size, gomp_ull *istart,
+                                     gomp_ull *iend)
+{
+  return gomp_loop_ull_doacross_dynamic_start (ncounts, counts, chunk_size,
+                                              istart, iend);
+}
+
+bool
+GOMP_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
+                                    gomp_ull chunk_size, gomp_ull *istart,
+                                    gomp_ull *iend)
+{
+  return gomp_loop_ull_doacross_guided_start (ncounts, counts, chunk_size,
+                                             istart, iend);
+}
+
 bool
 GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
 {
@@ -552,6 +721,18 @@ GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
   return gomp_loop_ull_guided_next (istart, iend);
 }
 
+bool
+GOMP_loop_ull_nonmonotonic_dynamic_next (gomp_ull *istart, gomp_ull *iend)
+{
+  return gomp_loop_ull_dynamic_next (istart, iend);
+}
+
+bool
+GOMP_loop_ull_nonmonotonic_guided_next (gomp_ull *istart, gomp_ull *iend)
+{
+  return gomp_loop_ull_guided_next (istart, iend);
+}
+
 bool
 GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
 {