re PR target/80090 (Incorrect assembler - output_addr_const may generate visibility...
[gcc.git] / libgomp / team.c
index c99413502d5d0d65cf031f5ab9d7ce2b8125cfe5..676614ae5d011a73841ebe0b244ff535d0112bb6 100644 (file)
@@ -1,56 +1,46 @@
-/* Copyright (C) 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2005-2017 Free Software Foundation, Inc.
    Contributed by Richard Henderson <rth@redhat.com>.
 
-   This file is part of the GNU OpenMP Library (libgomp).
+   This file is part of the GNU Offloading and Multi Processing Library
+   (libgomp).
 
    Libgomp is free software; you can redistribute it and/or modify it
-   under the terms of the GNU Lesser General Public License as published by
-   the Free Software Foundation; either version 2.1 of the License, or
-   (at your option) any later version.
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
 
    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-   FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
+   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    more details.
 
-   You should have received a copy of the GNU Lesser General Public License 
-   along with libgomp; see the file COPYING.LIB.  If not, write to the
-   Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-   MA 02110-1301, USA.  */
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
 
-/* As a special exception, if you link this library with other files, some
-   of which are compiled with GCC, to produce an executable, this library
-   does not by itself cause the resulting executable to be covered by the
-   GNU General Public License.  This exception does not however invalidate
-   any other reasons why the executable file might be covered by the GNU
-   General Public License.  */
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
 
 /* This file handles the maintainence of threads in response to team
    creation and termination.  */
 
 #include "libgomp.h"
+#include "pool.h"
 #include <stdlib.h>
 #include <string.h>
 
-#ifdef HAVE_ALLOCA_H
-# include <alloca.h>
-#endif
-
-
-/* This array manages threads spawned from the top level, which will
-   return to the idle loop once the current PARALLEL construct ends.  */
-static struct gomp_thread **gomp_threads;
-static unsigned gomp_threads_size;
-static unsigned gomp_threads_used;
-
+#ifdef LIBGOMP_USE_PTHREADS
 /* This attribute contains PTHREAD_CREATE_DETACHED.  */
-static pthread_attr_t gomp_thread_attr;
+pthread_attr_t gomp_thread_attr;
+
+/* This key is for the thread destructor.  */
+pthread_key_t gomp_thread_destructor;
 
-/* This barrier holds and releases threads waiting in gomp_threads.  */
-static gomp_barrier_t gomp_threads_dock;
 
 /* This is the libgomp per-thread data structure.  */
-#ifdef HAVE_TLS
+#if defined HAVE_TLS || defined USE_EMUTLS
 __thread struct gomp_thread gomp_tls_data;
 #else
 pthread_key_t gomp_tls_key;
@@ -61,9 +51,12 @@ pthread_key_t gomp_tls_key;
 
 struct gomp_thread_start_data
 {
-  struct gomp_team_state ts;
   void (*fn) (void *);
   void *fn_data;
+  struct gomp_team_state ts;
+  struct gomp_task *task;
+  struct gomp_thread_pool *thread_pool;
+  unsigned int place;
   bool nested;
 };
 
@@ -76,10 +69,11 @@ gomp_thread_start (void *xdata)
 {
   struct gomp_thread_start_data *data = xdata;
   struct gomp_thread *thr;
+  struct gomp_thread_pool *pool;
   void (*local_fn) (void *);
   void *local_data;
 
-#ifdef HAVE_TLS
+#if defined HAVE_TLS || defined USE_EMUTLS
   thr = &gomp_tls_data;
 #else
   struct gomp_thread local_thr;
@@ -91,75 +85,123 @@ gomp_thread_start (void *xdata)
   /* Extract what we need from data.  */
   local_fn = data->fn;
   local_data = data->fn_data;
+  thr->thread_pool = data->thread_pool;
   thr->ts = data->ts;
+  thr->task = data->task;
+  thr->place = data->place;
 
   thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
 
+  /* Make thread pool local. */
+  pool = thr->thread_pool;
+
   if (data->nested)
     {
-      gomp_barrier_wait (&thr->ts.team->barrier);
+      struct gomp_team *team = thr->ts.team;
+      struct gomp_task *task = thr->task;
+
+      gomp_barrier_wait (&team->barrier);
+
       local_fn (local_data);
-      gomp_barrier_wait (&thr->ts.team->barrier);
+      gomp_team_barrier_wait_final (&team->barrier);
+      gomp_finish_task (task);
+      gomp_barrier_wait_last (&team->barrier);
     }
   else
     {
-      gomp_threads[thr->ts.team_id] = thr;
+      pool->threads[thr->ts.team_id] = thr;
 
-      gomp_barrier_wait (&gomp_threads_dock);
+      gomp_simple_barrier_wait (&pool->threads_dock);
       do
        {
-         struct gomp_team *team;
+         struct gomp_team *team = thr->ts.team;
+         struct gomp_task *task = thr->task;
 
          local_fn (local_data);
+         gomp_team_barrier_wait_final (&team->barrier);
+         gomp_finish_task (task);
 
-         /* Clear out the team and function data.  This is a debugging
-            signal that we're in fact back in the dock.  */
-         team = thr->ts.team;
-         thr->fn = NULL;
-         thr->data = NULL;
-         thr->ts.team = NULL;
-         thr->ts.work_share = NULL;
-         thr->ts.team_id = 0;
-         thr->ts.work_share_generation = 0;
-         thr->ts.static_trip = 0;
-
-         gomp_barrier_wait (&team->barrier);
-         gomp_barrier_wait (&gomp_threads_dock);
+         gomp_simple_barrier_wait (&pool->threads_dock);
 
          local_fn = thr->fn;
          local_data = thr->data;
+         thr->fn = NULL;
        }
       while (local_fn);
     }
 
+  gomp_sem_destroy (&thr->release);
+  thr->thread_pool = NULL;
+  thr->task = NULL;
   return NULL;
 }
+#endif
 
+static inline struct gomp_team *
+get_last_team (unsigned nthreads)
+{
+  struct gomp_thread *thr = gomp_thread ();
+  if (thr->ts.team == NULL)
+    {
+      struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads);
+      struct gomp_team *last_team = pool->last_team;
+      if (last_team != NULL && last_team->nthreads == nthreads)
+        {
+          pool->last_team = NULL;
+          return last_team;
+        }
+    }
+  return NULL;
+}
 
 /* Create a new team data structure.  */
 
-static struct gomp_team *
-new_team (unsigned nthreads, struct gomp_work_share *work_share)
+struct gomp_team *
+gomp_new_team (unsigned nthreads)
 {
   struct gomp_team *team;
-  size_t size;
+  int i;
 
-  size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
-  team = gomp_malloc (size);
-  gomp_mutex_init (&team->work_share_lock);
+  team = get_last_team (nthreads);
+  if (team == NULL)
+    {
+      size_t extra = sizeof (team->ordered_release[0])
+                    + sizeof (team->implicit_task[0]);
+      team = gomp_malloc (sizeof (*team) + nthreads * extra);
 
-  team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
-  team->generation_mask = 3;
-  team->oldest_live_gen = work_share == NULL;
-  team->num_live_gen = work_share != NULL;
-  team->work_shares[0] = work_share;
+#ifndef HAVE_SYNC_BUILTINS
+      gomp_mutex_init (&team->work_share_list_free_lock);
+#endif
+      gomp_barrier_init (&team->barrier, nthreads);
+      gomp_mutex_init (&team->task_lock);
 
-  team->nthreads = nthreads;
-  gomp_barrier_init (&team->barrier, nthreads);
+      team->nthreads = nthreads;
+    }
+
+  team->work_share_chunk = 8;
+#ifdef HAVE_SYNC_BUILTINS
+  team->single_count = 0;
+#endif
+  team->work_shares_to_free = &team->work_shares[0];
+  gomp_init_work_share (&team->work_shares[0], false, nthreads);
+  team->work_shares[0].next_alloc = NULL;
+  team->work_share_list_free = NULL;
+  team->work_share_list_alloc = &team->work_shares[1];
+  for (i = 1; i < 7; i++)
+    team->work_shares[i].next_free = &team->work_shares[i + 1];
+  team->work_shares[i].next_free = NULL;
 
   gomp_sem_init (&team->master_release, 0);
+  team->ordered_release = (void *) &team->implicit_task[nthreads];
   team->ordered_release[0] = &team->master_release;
 
+  priority_queue_init (&team->task_queue);
+  team->task_count = 0;
+  team->task_queued_count = 0;
+  team->task_running_count = 0;
+  team->work_share_cancelled = 0;
+  team->team_cancelled = 0;
+
   return team;
 }
 
@@ -169,30 +211,115 @@ new_team (unsigned nthreads, struct gomp_work_share *work_share)
 static void
 free_team (struct gomp_team *team)
 {
-  free (team->work_shares);
-  gomp_mutex_destroy (&team->work_share_lock);
+#ifndef HAVE_SYNC_BUILTINS
+  gomp_mutex_destroy (&team->work_share_list_free_lock);
+#endif
   gomp_barrier_destroy (&team->barrier);
-  gomp_sem_destroy (&team->master_release);
+  gomp_mutex_destroy (&team->task_lock);
+  priority_queue_free (&team->task_queue);
   free (team);
 }
 
+static void
+gomp_free_pool_helper (void *thread_pool)
+{
+  struct gomp_thread *thr = gomp_thread ();
+  struct gomp_thread_pool *pool
+    = (struct gomp_thread_pool *) thread_pool;
+  gomp_simple_barrier_wait_last (&pool->threads_dock);
+  gomp_sem_destroy (&thr->release);
+  thr->thread_pool = NULL;
+  thr->task = NULL;
+#ifdef LIBGOMP_USE_PTHREADS
+  pthread_exit (NULL);
+#elif defined(__nvptx__)
+  asm ("exit;");
+#else
+#error gomp_free_pool_helper must terminate the thread
+#endif
+}
+
+/* Free a thread pool and release its threads. */
+
+void
+gomp_free_thread (void *arg __attribute__((unused)))
+{
+  struct gomp_thread *thr = gomp_thread ();
+  struct gomp_thread_pool *pool = thr->thread_pool;
+  if (pool)
+    {
+      if (pool->threads_used > 0)
+       {
+         int i;
+         for (i = 1; i < pool->threads_used; i++)
+           {
+             struct gomp_thread *nthr = pool->threads[i];
+             nthr->fn = gomp_free_pool_helper;
+             nthr->data = pool;
+           }
+         /* This barrier undocks threads docked on pool->threads_dock.  */
+         gomp_simple_barrier_wait (&pool->threads_dock);
+         /* And this waits till all threads have called gomp_barrier_wait_last
+            in gomp_free_pool_helper.  */
+         gomp_simple_barrier_wait (&pool->threads_dock);
+         /* Now it is safe to destroy the barrier and free the pool.  */
+         gomp_simple_barrier_destroy (&pool->threads_dock);
+
+#ifdef HAVE_SYNC_BUILTINS
+         __sync_fetch_and_add (&gomp_managed_threads,
+                               1L - pool->threads_used);
+#else
+         gomp_mutex_lock (&gomp_managed_threads_lock);
+         gomp_managed_threads -= pool->threads_used - 1L;
+         gomp_mutex_unlock (&gomp_managed_threads_lock);
+#endif
+       }
+      if (pool->last_team)
+       free_team (pool->last_team);
+#ifndef __nvptx__
+      free (pool->threads);
+      free (pool);
+#endif
+      thr->thread_pool = NULL;
+    }
+  if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0))
+    gomp_team_end ();
+  if (thr->task != NULL)
+    {
+      struct gomp_task *task = thr->task;
+      gomp_end_task ();
+      free (task);
+    }
+}
 
 /* Launch a team.  */
 
+#ifdef LIBGOMP_USE_PTHREADS
 void
 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
-                struct gomp_work_share *work_share)
+                unsigned flags, struct gomp_team *team)
 {
   struct gomp_thread_start_data *start_data;
   struct gomp_thread *thr, *nthr;
-  struct gomp_team *team;
+  struct gomp_task *task;
+  struct gomp_task_icv *icv;
   bool nested;
+  struct gomp_thread_pool *pool;
   unsigned i, n, old_threads_used = 0;
+  pthread_attr_t thread_attr, *attr;
+  unsigned long nthreads_var;
+  char bind, bind_var;
+  unsigned int s = 0, rest = 0, p = 0, k = 0;
+  unsigned int affinity_count = 0;
+  struct gomp_thread **affinity_thr = NULL;
 
   thr = gomp_thread ();
-  nested = thr->ts.team != NULL;
-
-  team = new_team (nthreads, work_share);
+  nested = thr->ts.level;
+  pool = thr->thread_pool;
+  task = thr->task;
+  icv = task ? &task->icv : &gomp_global_icv;
+  if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0)
+    gomp_init_affinity ();
 
   /* Always save the previous state, even if this isn't a nested team.
      In particular, we should save any work share state from an outer
@@ -200,16 +327,105 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
   team->prev_ts = thr->ts;
 
   thr->ts.team = team;
-  thr->ts.work_share = work_share;
   thr->ts.team_id = 0;
-  thr->ts.work_share_generation = 0;
+  ++thr->ts.level;
+  if (nthreads > 1)
+    ++thr->ts.active_level;
+  thr->ts.work_share = &team->work_shares[0];
+  thr->ts.last_work_share = NULL;
+#ifdef HAVE_SYNC_BUILTINS
+  thr->ts.single_count = 0;
+#endif
   thr->ts.static_trip = 0;
+  thr->task = &team->implicit_task[0];
+  nthreads_var = icv->nthreads_var;
+  if (__builtin_expect (gomp_nthreads_var_list != NULL, 0)
+      && thr->ts.level < gomp_nthreads_var_list_len)
+    nthreads_var = gomp_nthreads_var_list[thr->ts.level];
+  bind_var = icv->bind_var;
+  if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false)
+    bind_var = flags & 7;
+  bind = bind_var;
+  if (__builtin_expect (gomp_bind_var_list != NULL, 0)
+      && thr->ts.level < gomp_bind_var_list_len)
+    bind_var = gomp_bind_var_list[thr->ts.level];
+  gomp_init_task (thr->task, task, icv);
+  team->implicit_task[0].icv.nthreads_var = nthreads_var;
+  team->implicit_task[0].icv.bind_var = bind_var;
 
   if (nthreads == 1)
     return;
 
   i = 1;
 
+  if (__builtin_expect (gomp_places_list != NULL, 0))
+    {
+      /* Depending on chosen proc_bind model, set subpartition
+        for the master thread and initialize helper variables
+        P and optionally S, K and/or REST used by later place
+        computation for each additional thread.  */
+      p = thr->place - 1;
+      switch (bind)
+       {
+       case omp_proc_bind_true:
+       case omp_proc_bind_close:
+         if (nthreads > thr->ts.place_partition_len)
+           {
+             /* T > P.  S threads will be placed in each place,
+                and the final REM threads placed one by one
+                into the already occupied places.  */
+             s = nthreads / thr->ts.place_partition_len;
+             rest = nthreads % thr->ts.place_partition_len;
+           }
+         else
+           s = 1;
+         k = 1;
+         break;
+       case omp_proc_bind_master:
+         /* Each thread will be bound to master's place.  */
+         break;
+       case omp_proc_bind_spread:
+         if (nthreads <= thr->ts.place_partition_len)
+           {
+             /* T <= P.  Each subpartition will have in between s
+                and s+1 places (subpartitions starting at or
+                after rest will have s places, earlier s+1 places),
+                each thread will be bound to the first place in
+                its subpartition (except for the master thread
+                that can be bound to another place in its
+                subpartition).  */
+             s = thr->ts.place_partition_len / nthreads;
+             rest = thr->ts.place_partition_len % nthreads;
+             rest = (s + 1) * rest + thr->ts.place_partition_off;
+             if (p < rest)
+               {
+                 p -= (p - thr->ts.place_partition_off) % (s + 1);
+                 thr->ts.place_partition_len = s + 1;
+               }
+             else
+               {
+                 p -= (p - rest) % s;
+                 thr->ts.place_partition_len = s;
+               }
+             thr->ts.place_partition_off = p;
+           }
+         else
+           {
+             /* T > P.  Each subpartition will have just a single
+                place and we'll place between s and s+1
+                threads into each subpartition.  */
+             s = nthreads / thr->ts.place_partition_len;
+             rest = nthreads % thr->ts.place_partition_len;
+             thr->ts.place_partition_off = p;
+             thr->ts.place_partition_len = 1;
+             k = 1;
+           }
+         break;
+       }
+    }
+  else
+    bind = omp_proc_bind_false;
+
   /* We only allow the reuse of idle threads for non-nested PARALLEL
      regions.  This appears to be implied by the semantics of
      threadprivate variables, but perhaps that's reading too much into
@@ -217,14 +433,14 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
      only the initial program thread will modify gomp_threads.  */
   if (!nested)
     {
-      old_threads_used = gomp_threads_used;
+      old_threads_used = pool->threads_used;
 
       if (nthreads <= old_threads_used)
        n = nthreads;
       else if (old_threads_used == 0)
        {
          n = 0;
-         gomp_barrier_init (&gomp_threads_dock, nthreads);
+         gomp_simple_barrier_init (&pool->threads_dock, nthreads);
        }
       else
        {
@@ -232,77 +448,417 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
 
          /* Increase the barrier threshold to make sure all new
             threads arrive before the team is released.  */
-         gomp_barrier_reinit (&gomp_threads_dock, nthreads);
+         gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
        }
 
       /* Not true yet, but soon will be.  We're going to release all
-        threads from the dock, and those that aren't part of the 
+        threads from the dock, and those that aren't part of the
         team will exit.  */
-      gomp_threads_used = nthreads;
+      pool->threads_used = nthreads;
+
+      /* If necessary, expand the size of the gomp_threads array.  It is
+        expected that changes in the number of threads are rare, thus we
+        make no effort to expand gomp_threads_size geometrically.  */
+      if (nthreads >= pool->threads_size)
+       {
+         pool->threads_size = nthreads + 1;
+         pool->threads
+           = gomp_realloc (pool->threads,
+                           pool->threads_size
+                           * sizeof (struct gomp_thread_data *));
+       }
 
       /* Release existing idle threads.  */
       for (; i < n; ++i)
        {
-         nthr = gomp_threads[i];
+         unsigned int place_partition_off = thr->ts.place_partition_off;
+         unsigned int place_partition_len = thr->ts.place_partition_len;
+         unsigned int place = 0;
+         if (__builtin_expect (gomp_places_list != NULL, 0))
+           {
+             switch (bind)
+               {
+               case omp_proc_bind_true:
+               case omp_proc_bind_close:
+                 if (k == s)
+                   {
+                     ++p;
+                     if (p == (team->prev_ts.place_partition_off
+                               + team->prev_ts.place_partition_len))
+                       p = team->prev_ts.place_partition_off;
+                     k = 1;
+                     if (i == nthreads - rest)
+                       s = 1;
+                   }
+                 else
+                   ++k;
+                 break;
+               case omp_proc_bind_master:
+                 break;
+               case omp_proc_bind_spread:
+                 if (k == 0)
+                   {
+                     /* T <= P.  */
+                     if (p < rest)
+                       p += s + 1;
+                     else
+                       p += s;
+                     if (p == (team->prev_ts.place_partition_off
+                               + team->prev_ts.place_partition_len))
+                       p = team->prev_ts.place_partition_off;
+                     place_partition_off = p;
+                     if (p < rest)
+                       place_partition_len = s + 1;
+                     else
+                       place_partition_len = s;
+                   }
+                 else
+                   {
+                     /* T > P.  */
+                     if (k == s)
+                       {
+                         ++p;
+                         if (p == (team->prev_ts.place_partition_off
+                                   + team->prev_ts.place_partition_len))
+                           p = team->prev_ts.place_partition_off;
+                         k = 1;
+                         if (i == nthreads - rest)
+                           s = 1;
+                       }
+                     else
+                       ++k;
+                     place_partition_off = p;
+                     place_partition_len = 1;
+                   }
+                 break;
+               }
+             if (affinity_thr != NULL
+                 || (bind != omp_proc_bind_true
+                     && pool->threads[i]->place != p + 1)
+                 || pool->threads[i]->place <= place_partition_off
+                 || pool->threads[i]->place > (place_partition_off
+                                               + place_partition_len))
+               {
+                 unsigned int l;
+                 if (affinity_thr == NULL)
+                   {
+                     unsigned int j;
+
+                     if (team->prev_ts.place_partition_len > 64)
+                       affinity_thr
+                         = gomp_malloc (team->prev_ts.place_partition_len
+                                        * sizeof (struct gomp_thread *));
+                     else
+                       affinity_thr
+                         = gomp_alloca (team->prev_ts.place_partition_len
+                                        * sizeof (struct gomp_thread *));
+                     memset (affinity_thr, '\0',
+                             team->prev_ts.place_partition_len
+                             * sizeof (struct gomp_thread *));
+                     for (j = i; j < old_threads_used; j++)
+                       {
+                         if (pool->threads[j]->place
+                             > team->prev_ts.place_partition_off
+                             && (pool->threads[j]->place
+                                 <= (team->prev_ts.place_partition_off
+                                     + team->prev_ts.place_partition_len)))
+                           {
+                             l = pool->threads[j]->place - 1
+                                 - team->prev_ts.place_partition_off;
+                             pool->threads[j]->data = affinity_thr[l];
+                             affinity_thr[l] = pool->threads[j];
+                           }
+                         pool->threads[j] = NULL;
+                       }
+                     if (nthreads > old_threads_used)
+                       memset (&pool->threads[old_threads_used],
+                               '\0', ((nthreads - old_threads_used)
+                                      * sizeof (struct gomp_thread *)));
+                     n = nthreads;
+                     affinity_count = old_threads_used - i;
+                   }
+                 if (affinity_count == 0)
+                   break;
+                 l = p;
+                 if (affinity_thr[l - team->prev_ts.place_partition_off]
+                     == NULL)
+                   {
+                     if (bind != omp_proc_bind_true)
+                       continue;
+                     for (l = place_partition_off;
+                          l < place_partition_off + place_partition_len;
+                          l++)
+                       if (affinity_thr[l - team->prev_ts.place_partition_off]
+                           != NULL)
+                         break;
+                     if (l == place_partition_off + place_partition_len)
+                       continue;
+                   }
+                 nthr = affinity_thr[l - team->prev_ts.place_partition_off];
+                 affinity_thr[l - team->prev_ts.place_partition_off]
+                   = (struct gomp_thread *) nthr->data;
+                 affinity_count--;
+                 pool->threads[i] = nthr;
+               }
+             else
+               nthr = pool->threads[i];
+             place = p + 1;
+           }
+         else
+           nthr = pool->threads[i];
          nthr->ts.team = team;
-         nthr->ts.work_share = work_share;
+         nthr->ts.work_share = &team->work_shares[0];
+         nthr->ts.last_work_share = NULL;
          nthr->ts.team_id = i;
-         nthr->ts.work_share_generation = 0;
+         nthr->ts.level = team->prev_ts.level + 1;
+         nthr->ts.active_level = thr->ts.active_level;
+         nthr->ts.place_partition_off = place_partition_off;
+         nthr->ts.place_partition_len = place_partition_len;
+#ifdef HAVE_SYNC_BUILTINS
+         nthr->ts.single_count = 0;
+#endif
          nthr->ts.static_trip = 0;
+         nthr->task = &team->implicit_task[i];
+         nthr->place = place;
+         gomp_init_task (nthr->task, task, icv);
+         team->implicit_task[i].icv.nthreads_var = nthreads_var;
+         team->implicit_task[i].icv.bind_var = bind_var;
          nthr->fn = fn;
          nthr->data = data;
          team->ordered_release[i] = &nthr->release;
        }
 
+      if (__builtin_expect (affinity_thr != NULL, 0))
+       {
+         /* If AFFINITY_THR is non-NULL just because we had to
+            permute some threads in the pool, but we've managed
+            to find exactly as many old threads as we'd find
+            without affinity, we don't need to handle this
+            specially anymore.  */
+         if (nthreads <= old_threads_used
+             ? (affinity_count == old_threads_used - nthreads)
+             : (i == old_threads_used))
+           {
+             if (team->prev_ts.place_partition_len > 64)
+               free (affinity_thr);
+             affinity_thr = NULL;
+             affinity_count = 0;
+           }
+         else
+           {
+             i = 1;
+             /* We are going to compute the places/subpartitions
+                again from the beginning.  So, we need to reinitialize
+                vars modified by the switch (bind) above inside
+                of the loop, to the state they had after the initial
+                switch (bind).  */
+             switch (bind)
+               {
+               case omp_proc_bind_true:
+               case omp_proc_bind_close:
+                 if (nthreads > thr->ts.place_partition_len)
+                   /* T > P.  S has been changed, so needs
+                      to be recomputed.  */
+                   s = nthreads / thr->ts.place_partition_len;
+                 k = 1;
+                 p = thr->place - 1;
+                 break;
+               case omp_proc_bind_master:
+                 /* No vars have been changed.  */
+                 break;
+               case omp_proc_bind_spread:
+                 p = thr->ts.place_partition_off;
+                 if (k != 0)
+                   {
+                     /* T > P.  */
+                     s = nthreads / team->prev_ts.place_partition_len;
+                     k = 1;
+                   }
+                 break;
+               }
+
+             /* Increase the barrier threshold to make sure all new
+                threads and all the threads we're going to let die
+                arrive before the team is released.  */
+             if (affinity_count)
+               gomp_simple_barrier_reinit (&pool->threads_dock,
+                                           nthreads + affinity_count);
+           }
+       }
+
       if (i == nthreads)
        goto do_release;
 
-      /* If necessary, expand the size of the gomp_threads array.  It is
-        expected that changes in the number of threads is rare, thus we
-        make no effort to expand gomp_threads_size geometrically.  */
-      if (nthreads >= gomp_threads_size)
-       {
-         gomp_threads_size = nthreads + 1;
-         gomp_threads
-           = gomp_realloc (gomp_threads,
-                           gomp_threads_size
-                           * sizeof (struct gomp_thread_data *));
-       }
     }
 
-  start_data = alloca (sizeof (struct gomp_thread_start_data) * (nthreads-i));
+  if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0))
+    {
+      long diff = (long) (nthreads + affinity_count) - (long) old_threads_used;
+
+      if (old_threads_used == 0)
+       --diff;
+
+#ifdef HAVE_SYNC_BUILTINS
+      __sync_fetch_and_add (&gomp_managed_threads, diff);
+#else
+      gomp_mutex_lock (&gomp_managed_threads_lock);
+      gomp_managed_threads += diff;
+      gomp_mutex_unlock (&gomp_managed_threads_lock);
+#endif
+    }
+
+  attr = &gomp_thread_attr;
+  if (__builtin_expect (gomp_places_list != NULL, 0))
+    {
+      size_t stacksize;
+      pthread_attr_init (&thread_attr);
+      pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
+      if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
+       pthread_attr_setstacksize (&thread_attr, stacksize);
+      attr = &thread_attr;
+    }
+
+  start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
+                           * (nthreads-i));
 
   /* Launch new threads.  */
-  for (; i < nthreads; ++i, ++start_data)
+  for (; i < nthreads; ++i)
     {
       pthread_t pt;
       int err;
 
+      start_data->ts.place_partition_off = thr->ts.place_partition_off;
+      start_data->ts.place_partition_len = thr->ts.place_partition_len;
+      start_data->place = 0;
+      if (__builtin_expect (gomp_places_list != NULL, 0))
+       {
+         switch (bind)
+           {
+           case omp_proc_bind_true:
+           case omp_proc_bind_close:
+             if (k == s)
+               {
+                 ++p;
+                 if (p == (team->prev_ts.place_partition_off
+                           + team->prev_ts.place_partition_len))
+                   p = team->prev_ts.place_partition_off;
+                 k = 1;
+                 if (i == nthreads - rest)
+                   s = 1;
+               }
+             else
+               ++k;
+             break;
+           case omp_proc_bind_master:
+             break;
+           case omp_proc_bind_spread:
+             if (k == 0)
+               {
+                 /* T <= P.  */
+                 if (p < rest)
+                   p += s + 1;
+                 else
+                   p += s;
+                 if (p == (team->prev_ts.place_partition_off
+                           + team->prev_ts.place_partition_len))
+                   p = team->prev_ts.place_partition_off;
+                 start_data->ts.place_partition_off = p;
+                 if (p < rest)
+                   start_data->ts.place_partition_len = s + 1;
+                 else
+                   start_data->ts.place_partition_len = s;
+               }
+             else
+               {
+                 /* T > P.  */
+                 if (k == s)
+                   {
+                     ++p;
+                     if (p == (team->prev_ts.place_partition_off
+                               + team->prev_ts.place_partition_len))
+                       p = team->prev_ts.place_partition_off;
+                     k = 1;
+                     if (i == nthreads - rest)
+                       s = 1;
+                   }
+                 else
+                   ++k;
+                 start_data->ts.place_partition_off = p;
+                 start_data->ts.place_partition_len = 1;
+               }
+             break;
+           }
+         start_data->place = p + 1;
+         if (affinity_thr != NULL && pool->threads[i] != NULL)
+           continue;
+         gomp_init_thread_affinity (attr, p);
+       }
+
+      start_data->fn = fn;
+      start_data->fn_data = data;
       start_data->ts.team = team;
-      start_data->ts.work_share = work_share;
+      start_data->ts.work_share = &team->work_shares[0];
+      start_data->ts.last_work_share = NULL;
       start_data->ts.team_id = i;
-      start_data->ts.work_share_generation = 0;
+      start_data->ts.level = team->prev_ts.level + 1;
+      start_data->ts.active_level = thr->ts.active_level;
+#ifdef HAVE_SYNC_BUILTINS
+      start_data->ts.single_count = 0;
+#endif
       start_data->ts.static_trip = 0;
-      start_data->fn = fn;
-      start_data->fn_data = data;
+      start_data->task = &team->implicit_task[i];
+      gomp_init_task (start_data->task, task, icv);
+      team->implicit_task[i].icv.nthreads_var = nthreads_var;
+      team->implicit_task[i].icv.bind_var = bind_var;
+      start_data->thread_pool = pool;
       start_data->nested = nested;
 
-      err = pthread_create (&pt, &gomp_thread_attr,
-                           gomp_thread_start, start_data);
+      attr = gomp_adjust_thread_attr (attr, &thread_attr);
+      err = pthread_create (&pt, attr, gomp_thread_start, start_data++);
       if (err != 0)
        gomp_fatal ("Thread creation failed: %s", strerror (err));
     }
 
+  if (__builtin_expect (attr == &thread_attr, 0))
+    pthread_attr_destroy (&thread_attr);
+
  do_release:
-  gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
+  if (nested)
+    gomp_barrier_wait (&team->barrier);
+  else
+    gomp_simple_barrier_wait (&pool->threads_dock);
 
   /* Decrease the barrier threshold to match the number of threads
      that should arrive back at the end of this team.  The extra
      threads should be exiting.  Note that we arrange for this test
-     to never be true for nested teams.  */
-  if (nthreads < old_threads_used)
-    gomp_barrier_reinit (&gomp_threads_dock, nthreads);
+     to never be true for nested teams.  If AFFINITY_COUNT is non-zero,
+     the barrier as well as gomp_managed_threads was temporarily
+     set to NTHREADS + AFFINITY_COUNT.  For NTHREADS < OLD_THREADS_COUNT,
+     AFFINITY_COUNT if non-zero will be always at least
+     OLD_THREADS_COUNT - NTHREADS.  */
+  if (__builtin_expect (nthreads < old_threads_used, 0)
+      || __builtin_expect (affinity_count, 0))
+    {
+      long diff = (long) nthreads - (long) old_threads_used;
+
+      if (affinity_count)
+       diff = -affinity_count;
+
+      gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
+
+#ifdef HAVE_SYNC_BUILTINS
+      __sync_fetch_and_add (&gomp_managed_threads, diff);
+#else
+      gomp_mutex_lock (&gomp_managed_threads_lock);
+      gomp_managed_threads += diff;
+      gomp_mutex_unlock (&gomp_managed_threads_lock);
+#endif
+    }
+  if (__builtin_expect (affinity_thr != NULL, 0)
+      && team->prev_ts.place_partition_len > 64)
+    free (affinity_thr);
 }
+#endif
 
 
 /* Terminate the current team.  This is only to be called by the master
@@ -314,35 +870,106 @@ gomp_team_end (void)
   struct gomp_thread *thr = gomp_thread ();
   struct gomp_team *team = thr->ts.team;
 
-  gomp_barrier_wait (&team->barrier);
+  /* This barrier handles all pending explicit threads.
+     As #pragma omp cancel parallel might get awaited count in
+     team->barrier in a inconsistent state, we need to use a different
+     counter here.  */
+  gomp_team_barrier_wait_final (&team->barrier);
+  if (__builtin_expect (team->team_cancelled, 0))
+    {
+      struct gomp_work_share *ws = team->work_shares_to_free;
+      do
+       {
+         struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws);
+         if (next_ws == NULL)
+           gomp_ptrlock_set (&ws->next_ws, ws);
+         gomp_fini_work_share (ws);
+         ws = next_ws;
+       }
+      while (ws != NULL);
+    }
+  else
+    gomp_fini_work_share (thr->ts.work_share);
 
+  gomp_end_task ();
   thr->ts = team->prev_ts;
 
-  free_team (team);
+  if (__builtin_expect (thr->ts.team != NULL, 0))
+    {
+#ifdef HAVE_SYNC_BUILTINS
+      __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
+#else
+      gomp_mutex_lock (&gomp_managed_threads_lock);
+      gomp_managed_threads -= team->nthreads - 1L;
+      gomp_mutex_unlock (&gomp_managed_threads_lock);
+#endif
+      /* This barrier has gomp_barrier_wait_last counterparts
+        and ensures the team can be safely destroyed.  */
+      gomp_barrier_wait (&team->barrier);
+    }
+
+  if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
+    {
+      struct gomp_work_share *ws = team->work_shares[0].next_alloc;
+      do
+       {
+         struct gomp_work_share *next_ws = ws->next_alloc;
+         free (ws);
+         ws = next_ws;
+       }
+      while (ws != NULL);
+    }
+  gomp_sem_destroy (&team->master_release);
+
+  if (__builtin_expect (thr->ts.team != NULL, 0)
+      || __builtin_expect (team->nthreads == 1, 0))
+    free_team (team);
+  else
+    {
+      struct gomp_thread_pool *pool = thr->thread_pool;
+      if (pool->last_team)
+       free_team (pool->last_team);
+      pool->last_team = team;
+      gomp_release_thread_pool (pool);
+    }
 }
 
+#ifdef LIBGOMP_USE_PTHREADS
 
 /* Constructors for this file.  */
 
 static void __attribute__((constructor))
 initialize_team (void)
 {
-  struct gomp_thread *thr;
-
-#ifndef HAVE_TLS
+#if !defined HAVE_TLS && !defined USE_EMUTLS
   static struct gomp_thread initial_thread_tls_data;
 
   pthread_key_create (&gomp_tls_key, NULL);
   pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
 #endif
 
-#ifdef HAVE_TLS
-  thr = &gomp_tls_data;
-#else
-  thr = &initial_thread_tls_data;
+  if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
+    gomp_fatal ("could not create thread pool destructor.");
+}
+
+static void __attribute__((destructor))
+team_destructor (void)
+{
+  /* Without this dlclose on libgomp could lead to subsequent
+     crashes.  */
+  pthread_key_delete (gomp_thread_destructor);
+}
 #endif
-  gomp_sem_init (&thr->release, 0);
 
-  pthread_attr_init (&gomp_thread_attr);
-  pthread_attr_setdetachstate (&gomp_thread_attr, PTHREAD_CREATE_DETACHED);
+struct gomp_task_icv *
+gomp_new_icv (void)
+{
+  struct gomp_thread *thr = gomp_thread ();
+  struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
+  gomp_init_task (task, NULL, &gomp_global_icv);
+  thr->task = task;
+#ifdef LIBGOMP_USE_PTHREADS
+  pthread_setspecific (gomp_thread_destructor, thr);
+#endif
+  return &task->icv;
 }