-/* Copyright (C) 2005-2015 Free Software Foundation, Inc.
+/* Copyright (C) 2005-2017 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
creation and termination. */
#include "libgomp.h"
+#include "pool.h"
#include <stdlib.h>
#include <string.h>
+#ifdef LIBGOMP_USE_PTHREADS
/* This attribute contains PTHREAD_CREATE_DETACHED. */
pthread_attr_t gomp_thread_attr;
{
pool->threads[thr->ts.team_id] = thr;
- gomp_barrier_wait (&pool->threads_dock);
+ gomp_simple_barrier_wait (&pool->threads_dock);
do
{
struct gomp_team *team = thr->ts.team;
gomp_team_barrier_wait_final (&team->barrier);
gomp_finish_task (task);
- gomp_barrier_wait (&pool->threads_dock);
+ gomp_simple_barrier_wait (&pool->threads_dock);
local_fn = thr->fn;
local_data = thr->data;
thr->task = NULL;
return NULL;
}
+#endif
+static inline struct gomp_team *
+get_last_team (unsigned nthreads)
+{
+ struct gomp_thread *thr = gomp_thread ();
+ if (thr->ts.team == NULL)
+ {
+ struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads);
+ struct gomp_team *last_team = pool->last_team;
+ if (last_team != NULL && last_team->nthreads == nthreads)
+ {
+ pool->last_team = NULL;
+ return last_team;
+ }
+ }
+ return NULL;
+}
/* Create a new team data structure. */
gomp_new_team (unsigned nthreads)
{
struct gomp_team *team;
- size_t size;
int i;
- size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
- + sizeof (team->implicit_task[0]));
- team = gomp_malloc (size);
+ team = get_last_team (nthreads);
+ if (team == NULL)
+ {
+ size_t extra = sizeof (team->ordered_release[0])
+ + sizeof (team->implicit_task[0]);
+ team = gomp_malloc (sizeof (*team) + nthreads * extra);
+
+#ifndef HAVE_SYNC_BUILTINS
+ gomp_mutex_init (&team->work_share_list_free_lock);
+#endif
+ gomp_barrier_init (&team->barrier, nthreads);
+ gomp_mutex_init (&team->task_lock);
+
+ team->nthreads = nthreads;
+ }
team->work_share_chunk = 8;
#ifdef HAVE_SYNC_BUILTINS
team->single_count = 0;
-#else
- gomp_mutex_init (&team->work_share_list_free_lock);
#endif
team->work_shares_to_free = &team->work_shares[0];
gomp_init_work_share (&team->work_shares[0], false, nthreads);
team->work_shares[i].next_free = &team->work_shares[i + 1];
team->work_shares[i].next_free = NULL;
- team->nthreads = nthreads;
- gomp_barrier_init (&team->barrier, nthreads);
-
gomp_sem_init (&team->master_release, 0);
team->ordered_release = (void *) &team->implicit_task[nthreads];
team->ordered_release[0] = &team->master_release;
- gomp_mutex_init (&team->task_lock);
- team->task_queue = NULL;
+ priority_queue_init (&team->task_queue);
team->task_count = 0;
team->task_queued_count = 0;
team->task_running_count = 0;
static void
free_team (struct gomp_team *team)
{
+#ifndef HAVE_SYNC_BUILTINS
+ gomp_mutex_destroy (&team->work_share_list_free_lock);
+#endif
gomp_barrier_destroy (&team->barrier);
gomp_mutex_destroy (&team->task_lock);
+ priority_queue_free (&team->task_queue);
free (team);
}
-/* Allocate and initialize a thread pool. */
-
-static struct gomp_thread_pool *gomp_new_thread_pool (void)
-{
- struct gomp_thread_pool *pool
- = gomp_malloc (sizeof(struct gomp_thread_pool));
- pool->threads = NULL;
- pool->threads_size = 0;
- pool->threads_used = 0;
- pool->last_team = NULL;
- return pool;
-}
-
static void
gomp_free_pool_helper (void *thread_pool)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool
= (struct gomp_thread_pool *) thread_pool;
- gomp_barrier_wait_last (&pool->threads_dock);
+ gomp_simple_barrier_wait_last (&pool->threads_dock);
gomp_sem_destroy (&thr->release);
thr->thread_pool = NULL;
thr->task = NULL;
+#ifdef LIBGOMP_USE_PTHREADS
pthread_exit (NULL);
+#elif defined(__nvptx__)
+ asm ("exit;");
+#else
+#error gomp_free_pool_helper must terminate the thread
+#endif
}
/* Free a thread pool and release its threads. */
nthr->data = pool;
}
/* This barrier undocks threads docked on pool->threads_dock. */
- gomp_barrier_wait (&pool->threads_dock);
+ gomp_simple_barrier_wait (&pool->threads_dock);
/* And this waits till all threads have called gomp_barrier_wait_last
in gomp_free_pool_helper. */
- gomp_barrier_wait (&pool->threads_dock);
+ gomp_simple_barrier_wait (&pool->threads_dock);
/* Now it is safe to destroy the barrier and free the pool. */
- gomp_barrier_destroy (&pool->threads_dock);
+ gomp_simple_barrier_destroy (&pool->threads_dock);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads,
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
- free (pool->threads);
if (pool->last_team)
free_team (pool->last_team);
+#ifndef __nvptx__
+ free (pool->threads);
free (pool);
+#endif
thr->thread_pool = NULL;
}
+ if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0))
+ gomp_team_end ();
if (thr->task != NULL)
{
struct gomp_task *task = thr->task;
/* Launch a team. */
+#ifdef LIBGOMP_USE_PTHREADS
void
gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
unsigned flags, struct gomp_team *team)
struct gomp_thread **affinity_thr = NULL;
thr = gomp_thread ();
- nested = thr->ts.team != NULL;
- if (__builtin_expect (thr->thread_pool == NULL, 0))
- {
- thr->thread_pool = gomp_new_thread_pool ();
- thr->thread_pool->threads_busy = nthreads;
- pthread_setspecific (gomp_thread_destructor, thr);
- }
+ nested = thr->ts.level;
pool = thr->thread_pool;
task = thr->task;
icv = task ? &task->icv : &gomp_global_icv;
else if (old_threads_used == 0)
{
n = 0;
- gomp_barrier_init (&pool->threads_dock, nthreads);
+ gomp_simple_barrier_init (&pool->threads_dock, nthreads);
}
else
{
/* Increase the barrier threshold to make sure all new
threads arrive before the team is released. */
- gomp_barrier_reinit (&pool->threads_dock, nthreads);
+ gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
}
/* Not true yet, but soon will be. We're going to release all
threads and all the threads we're going to let die
arrive before the team is released. */
if (affinity_count)
- gomp_barrier_reinit (&pool->threads_dock,
- nthreads + affinity_count);
+ gomp_simple_barrier_reinit (&pool->threads_dock,
+ nthreads + affinity_count);
}
}
start_data->thread_pool = pool;
start_data->nested = nested;
+ attr = gomp_adjust_thread_attr (attr, &thread_attr);
err = pthread_create (&pt, attr, gomp_thread_start, start_data++);
if (err != 0)
gomp_fatal ("Thread creation failed: %s", strerror (err));
}
- if (__builtin_expect (gomp_places_list != NULL, 0))
+ if (__builtin_expect (attr == &thread_attr, 0))
pthread_attr_destroy (&thread_attr);
do_release:
- gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock);
+ if (nested)
+ gomp_barrier_wait (&team->barrier);
+ else
+ gomp_simple_barrier_wait (&pool->threads_dock);
/* Decrease the barrier threshold to match the number of threads
that should arrive back at the end of this team. The extra
if (affinity_count)
diff = -affinity_count;
- gomp_barrier_reinit (&pool->threads_dock, nthreads);
+ gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, diff);
&& team->prev_ts.place_partition_len > 64)
free (affinity_thr);
}
+#endif
/* Terminate the current team. This is only to be called by the master
while (ws != NULL);
}
gomp_sem_destroy (&team->master_release);
-#ifndef HAVE_SYNC_BUILTINS
- gomp_mutex_destroy (&team->work_share_list_free_lock);
-#endif
if (__builtin_expect (thr->ts.team != NULL, 0)
|| __builtin_expect (team->nthreads == 1, 0))
if (pool->last_team)
free_team (pool->last_team);
pool->last_team = team;
+ gomp_release_thread_pool (pool);
}
}
+#ifdef LIBGOMP_USE_PTHREADS
/* Constructors for this file. */
crashes. */
pthread_key_delete (gomp_thread_destructor);
}
+#endif
struct gomp_task_icv *
gomp_new_icv (void)
struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
gomp_init_task (task, NULL, &gomp_global_icv);
thr->task = task;
+#ifdef LIBGOMP_USE_PTHREADS
pthread_setspecific (gomp_thread_destructor, thr);
+#endif
return &task->icv;
}