X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=libgomp%2Flibgomp.h;h=f5415bb156c6eb24511adbfa293710620043a3ab;hb=cd68edf894d6b72e5bc37ac205deef9d237ab70b;hp=b694356f67edb6ae49818246873096ad1ad69719;hpb=f50eecba1389ff9da5112b2aaeb93c6ebaa38afc;p=gcc.git diff --git a/libgomp/libgomp.h b/libgomp/libgomp.h index b694356f67e..f5415bb156c 100644 --- a/libgomp/libgomp.h +++ b/libgomp/libgomp.h @@ -1,7 +1,8 @@ -/* Copyright (C) 2005-2014 Free Software Foundation, Inc. +/* Copyright (C) 2005-2020 Free Software Foundation, Inc. Contributed by Richard Henderson . - This file is part of the GNU OpenMP Library (libgomp). + This file is part of the GNU Offloading and Multi Processing Library + (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -23,9 +24,10 @@ . */ /* This file contains data types and function declarations that are not - part of the official OpenMP user interface. There are declarations - in here that are part of the GNU OpenMP ABI, in that the compiler is - required to know about them and use them. + part of the official OpenACC or OpenMP user interfaces. There are + declarations in here that are part of the GNU Offloading and Multi + Processing ABI, in that the compiler is required to know about them + and use them. The convention is that the all caps prefix "GOMP" is used group items that are part of the external ABI, and the lower case prefix "gomp" @@ -34,12 +36,38 @@ #ifndef LIBGOMP_H #define LIBGOMP_H 1 +#ifndef _LIBGOMP_CHECKING_ +/* Define to 1 to perform internal sanity checks. */ +#define _LIBGOMP_CHECKING_ 0 +#endif + #include "config.h" -#include "gstdint.h" +#include +#include "libgomp-plugin.h" +#include "gomp-constants.h" +#ifdef HAVE_PTHREAD_H #include +#endif #include #include +#include + +/* Needed for memset in priority_queue.c. */ +#if _LIBGOMP_CHECKING_ +# ifdef STRING_WITH_STRINGS +# include +# include +# else +# ifdef HAVE_STRING_H +# include +# else +# ifdef HAVE_STRINGS_H +# include +# endif +# endif +# endif +#endif #ifdef HAVE_ATTRIBUTE_VISIBILITY # pragma GCC visibility push(hidden) @@ -56,9 +84,123 @@ enum memmodel MEMMODEL_SEQ_CST = 5 }; +/* alloc.c */ + +#if defined(HAVE_ALIGNED_ALLOC) \ + || defined(HAVE__ALIGNED_MALLOC) \ + || defined(HAVE_POSIX_MEMALIGN) \ + || defined(HAVE_MEMALIGN) +/* Defined if gomp_aligned_alloc doesn't use fallback version + and free can be used instead of gomp_aligned_free. */ +#define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1 +#endif + +extern void *gomp_malloc (size_t) __attribute__((malloc)); +extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); +extern void *gomp_realloc (void *, size_t); +extern void *gomp_aligned_alloc (size_t, size_t) + __attribute__((malloc, alloc_size (2))); +extern void gomp_aligned_free (void *); + +/* Avoid conflicting prototypes of alloca() in system headers by using + GCC's builtin alloca(). */ +#define gomp_alloca(x) __builtin_alloca(x) + +/* Optimized allocators for team-specific data that will die with the team. */ + +#ifdef __AMDGCN__ +/* The arena is initialized in config/gcn/team.c. */ +#define TEAM_ARENA_SIZE 64*1024 /* Must match the value in plugin-gcn.c. */ +#define TEAM_ARENA_START 16 /* LDS offset of free pointer. */ +#define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */ +#define TEAM_ARENA_END 32 /* LDS offset of end pointer. */ + +static inline void * __attribute__((malloc)) +team_malloc (size_t size) +{ + /* 4-byte align the size. */ + size = (size + 3) & ~3; + + /* Allocate directly from the arena. + The compiler does not support DS atomics, yet. */ + void *result; + asm ("ds_add_rtn_u64 %0, %1, %2\n\ts_waitcnt 0" + : "=v"(result) : "v"(TEAM_ARENA_FREE), "v"(size), "e"(1L) : "memory"); + + /* Handle OOM. */ + if (result + size > *(void * __lds *)TEAM_ARENA_END) + { + /* While this is experimental, let's make sure we know when OOM + happens. */ + const char msg[] = "GCN team arena exhausted\n"; + write (2, msg, sizeof(msg)-1); + + /* Fall back to using the heap (slowly). */ + result = gomp_malloc (size); + } + return result; +} + +static inline void * __attribute__((malloc)) +team_malloc_cleared (size_t size) +{ + char *result = team_malloc (size); + + /* Clear the allocated memory. */ + __builtin_memset (result, 0, size); + + return result; +} + +static inline void +team_free (void *ptr) +{ + /* The whole arena is freed when the kernel exits. + However, if we fell back to using heap then we should free it. + It would be better if this function could be a no-op, but at least + LDS loads are cheap. */ + if (ptr < *(void * __lds *)TEAM_ARENA_START + || ptr >= *(void * __lds *)TEAM_ARENA_END) + free (ptr); +} +#else +#define team_malloc(...) gomp_malloc (__VA_ARGS__) +#define team_malloc_cleared(...) gomp_malloc_cleared (__VA_ARGS__) +#define team_free(...) free (__VA_ARGS__) +#endif + +/* error.c */ + +extern void gomp_vdebug (int, const char *, va_list); +extern void gomp_debug (int, const char *, ...) + __attribute__ ((format (printf, 2, 3))); +#define gomp_vdebug(KIND, FMT, VALIST) \ + do { \ + if (__builtin_expect (gomp_debug_var, 0)) \ + (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ + } while (0) +#define gomp_debug(KIND, ...) \ + do { \ + if (__builtin_expect (gomp_debug_var, 0)) \ + (gomp_debug) ((KIND), __VA_ARGS__); \ + } while (0) +extern void gomp_verror (const char *, va_list); +extern void gomp_error (const char *, ...) + __attribute__ ((format (printf, 1, 2))); +extern void gomp_vfatal (const char *, va_list) + __attribute__ ((noreturn)); +extern void gomp_fatal (const char *, ...) + __attribute__ ((noreturn, format (printf, 1, 2))); + +struct gomp_task; +struct gomp_taskgroup; +struct htab; + +#include "priority_queue.h" #include "sem.h" #include "mutex.h" #include "bar.h" +#include "simple-bar.h" #include "ptrlock.h" @@ -71,7 +213,48 @@ enum gomp_schedule_type GFS_STATIC, GFS_DYNAMIC, GFS_GUIDED, - GFS_AUTO + GFS_AUTO, + GFS_MONOTONIC = 0x80000000U +}; + +struct gomp_doacross_work_share +{ + union { + /* chunk_size copy, as ws->chunk_size is multiplied by incr for + GFS_DYNAMIC. */ + long chunk_size; + /* Likewise, but for ull implementation. */ + unsigned long long chunk_size_ull; + /* For schedule(static,0) this is the number + of iterations assigned to the last thread, i.e. number of + iterations / number of threads. */ + long q; + /* Likewise, but for ull implementation. */ + unsigned long long q_ull; + }; + /* Size of each array entry (padded to cache line size). */ + unsigned long elt_sz; + /* Number of dimensions in sink vectors. */ + unsigned int ncounts; + /* True if the iterations can be flattened. */ + bool flattened; + /* Actual array (of elt_sz sized units), aligned to cache line size. + This is indexed by team_id for GFS_STATIC and outermost iteration + / chunk_size for other schedules. */ + unsigned char *array; + /* These two are only used for schedule(static,0). */ + /* This one is number of iterations % number of threads. */ + long t; + union { + /* And this one is cached t * (q + 1). */ + long boundary; + /* Likewise, but for the ull implementation. */ + unsigned long long boundary_ull; + }; + /* Pointer to extra memory if needed for lastprivate(conditional). */ + void *extra; + /* Array of shift counts for each dimension if they can be flattened. */ + unsigned int shift_counts[]; }; struct gomp_work_share @@ -105,13 +288,18 @@ struct gomp_work_share }; }; - /* This is a circular queue that details which threads will be allowed - into the ordered region and in which order. When a thread allocates - iterations on which it is going to work, it also registers itself at - the end of the array. When a thread reaches the ordered region, it - checks to see if it is the one at the head of the queue. If not, it - blocks on its RELEASE semaphore. */ - unsigned *ordered_team_ids; + union { + /* This is a circular queue that details which threads will be allowed + into the ordered region and in which order. When a thread allocates + iterations on which it is going to work, it also registers itself at + the end of the array. When a thread reaches the ordered region, it + checks to see if it is the one at the head of the queue. If not, it + blocks on its RELEASE semaphore. */ + unsigned *ordered_team_ids; + + /* This is a pointer to DOACROSS work share data. */ + struct gomp_doacross_work_share *doacross; + }; /* This is the number of threads that have registered themselves in the circular queue ordered_team_ids. */ @@ -166,6 +354,9 @@ struct gomp_work_share struct gomp_work_share *next_free; }; + /* Task reductions for this work-sharing construct. */ + uintptr_t *task_reductions; + /* If only few threads are in the team, ordered_team_ids can point to this array which fills the padding at the end of this struct. */ unsigned inline_ordered_team_ids[0]; @@ -230,7 +421,7 @@ struct gomp_task_icv { unsigned long nthreads_var; enum gomp_schedule_type run_sched_var; - int run_sched_modifier; + int run_sched_chunk_size; int default_device_var; unsigned int thread_limit_var; bool dyn_var; @@ -246,6 +437,7 @@ extern gomp_mutex_t gomp_managed_threads_lock; #endif extern unsigned long gomp_max_active_levels_var; extern bool gomp_cancel_var; +extern int gomp_max_task_priority_var; extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; extern unsigned long gomp_available_cpus, gomp_managed_threads; extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; @@ -253,25 +445,41 @@ extern char *gomp_bind_var_list; extern unsigned long gomp_bind_var_list_len; extern void **gomp_places_list; extern unsigned long gomp_places_list_len; +extern unsigned int gomp_num_teams_var; +extern int gomp_debug_var; +extern bool gomp_display_affinity_var; +extern char *gomp_affinity_format_var; +extern size_t gomp_affinity_format_len; +extern int goacc_device_num; +extern char *goacc_device_type; +extern int goacc_default_dims[GOMP_DIM_MAX]; enum gomp_task_kind { + /* Implicit task. */ GOMP_TASK_IMPLICIT, - GOMP_TASK_IFFALSE, + /* Undeferred task. */ + GOMP_TASK_UNDEFERRED, + /* Task created by GOMP_task and waiting to be run. */ GOMP_TASK_WAITING, - GOMP_TASK_TIED + /* Task currently executing or scheduled and about to execute. */ + GOMP_TASK_TIED, + /* Used for target tasks that have vars mapped and async run started, + but not yet completed. Once that completes, they will be readded + into the queues as GOMP_TASK_WAITING in order to perform the var + unmapping. */ + GOMP_TASK_ASYNC_RUNNING }; -struct gomp_task; -struct gomp_taskgroup; -struct htab; - struct gomp_task_depend_entry { + /* Address of dependency. */ void *addr; struct gomp_task_depend_entry *next; struct gomp_task_depend_entry *prev; + /* Task that provides the dependency in ADDR. */ struct gomp_task *task; + /* Depend entry is of type "IN". */ bool is_in; bool redundant; bool redundant_out; @@ -290,8 +498,8 @@ struct gomp_taskwait { bool in_taskwait; bool in_depend_wait; + /* Number of tasks we are waiting for. */ size_t n_depend; - struct gomp_task *last_parent_depends_on; gomp_sem_t taskwait_sem; }; @@ -299,20 +507,31 @@ struct gomp_taskwait struct gomp_task { + /* Parent of this task. */ struct gomp_task *parent; - struct gomp_task *children; - struct gomp_task *next_child; - struct gomp_task *prev_child; - struct gomp_task *next_queue; - struct gomp_task *prev_queue; - struct gomp_task *next_taskgroup; - struct gomp_task *prev_taskgroup; + /* Children of this task. */ + struct priority_queue children_queue; + /* Taskgroup this task belongs in. */ struct gomp_taskgroup *taskgroup; + /* Tasks that depend on this task. */ struct gomp_dependers_vec *dependers; struct htab *depend_hash; struct gomp_taskwait *taskwait; + /* Number of items in DEPEND. */ size_t depend_count; + /* Number of tasks this task depends on. Once this counter reaches + 0, we have no unsatisfied dependencies, and this task can be put + into the various queues to be scheduled. */ size_t num_dependees; + + /* Priority of this task. */ + int priority; + /* The priority node for this task in each of the different queues. + We put this here to avoid allocating space for each priority + node. Then we play offsetof() games to convert between pnode[] + entries and the gomp_task in which they reside. */ + struct priority_node pnode[3]; + struct gomp_task_icv icv; void (*fn) (void *); void *fn_data; @@ -320,20 +539,60 @@ struct gomp_task bool in_tied_task; bool final_task; bool copy_ctors_done; + /* Set for undeferred tasks with unsatisfied dependencies which + block further execution of their parent until the dependencies + are satisfied. */ bool parent_depends_on; + /* Dependencies provided and/or needed for this task. DEPEND_COUNT + is the number of items available. */ struct gomp_task_depend_entry depend[]; }; +/* This structure describes a single #pragma omp taskgroup. */ + struct gomp_taskgroup { struct gomp_taskgroup *prev; - struct gomp_task *children; + /* Queue of tasks that belong in this taskgroup. */ + struct priority_queue taskgroup_queue; + uintptr_t *reductions; bool in_taskgroup_wait; bool cancelled; + bool workshare; gomp_sem_t taskgroup_sem; size_t num_children; }; +/* Various state of OpenMP async offloading tasks. */ +enum gomp_target_task_state +{ + GOMP_TARGET_TASK_DATA, + GOMP_TARGET_TASK_BEFORE_MAP, + GOMP_TARGET_TASK_FALLBACK, + GOMP_TARGET_TASK_READY_TO_RUN, + GOMP_TARGET_TASK_RUNNING, + GOMP_TARGET_TASK_FINISHED +}; + +/* This structure describes a target task. */ + +struct gomp_target_task +{ + struct gomp_device_descr *devicep; + void (*fn) (void *); + size_t mapnum; + size_t *sizes; + unsigned short *kinds; + unsigned int flags; + enum gomp_target_task_state state; + struct target_mem_desc *tgt; + struct gomp_task *task; + struct gomp_team *team; + /* Device-specific target arguments. */ + void **args; + void *hostaddrs[]; +}; + /* This structure describes a "team" of threads. These are the threads that are spawned by a PARALLEL constructs, as well as the work sharing constructs that the team encounters. */ @@ -396,7 +655,8 @@ struct gomp_team struct gomp_work_share work_shares[8]; gomp_mutex_t task_lock; - struct gomp_task *task_queue; + /* Scheduled tasks. */ + struct priority_queue task_queue; /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ unsigned int task_count; /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ @@ -441,6 +701,19 @@ struct gomp_thread /* User pthread thread pool */ struct gomp_thread_pool *thread_pool; + +#if defined(LIBGOMP_USE_PTHREADS) \ + && (!defined(HAVE_TLS) \ + || !defined(__GLIBC__) \ + || !defined(USING_INITIAL_EXEC_TLS)) + /* pthread_t of the thread containing this gomp_thread. + On Linux when using initial-exec TLS, + (typeof (pthread_t)) gomp_thread () - pthread_self () + is constant in all threads, so we can optimize and not + store it. */ +#define GOMP_NEEDS_THREAD_HANDLE 1 + pthread_t handle; +#endif }; @@ -451,12 +724,15 @@ struct gomp_thread_pool struct gomp_thread **threads; unsigned threads_size; unsigned threads_used; + /* The last team is used for non-nested teams to delay their destruction to + make sure all the threads in the team move on to the pool's barrier before + the team's barrier is destroyed. */ struct gomp_team *last_team; /* Number of threads running in this contention group. */ unsigned long threads_busy; - /* This barrier holds and releases threads waiting in threads. */ - gomp_barrier_t threads_dock; + /* This barrier holds and releases threads waiting in thread pools. */ + gomp_simple_barrier_t threads_dock; }; enum gomp_cancel_kind @@ -471,7 +747,33 @@ enum gomp_cancel_kind /* ... and here is that TLS data. */ -#if defined HAVE_TLS || defined USE_EMUTLS +#if defined __nvptx__ +extern struct gomp_thread *nvptx_thrs __attribute__((shared)); +static inline struct gomp_thread *gomp_thread (void) +{ + int tid; + asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); + return nvptx_thrs + tid; +} +#elif defined __AMDGCN__ +static inline struct gomp_thread *gcn_thrs (void) +{ + /* The value is at the bottom of LDS. */ + struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; + return *thrs; +} +static inline void set_gcn_thrs (struct gomp_thread *val) +{ + /* The value is at the bottom of LDS. */ + struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; + *thrs = val; +} +static inline struct gomp_thread *gomp_thread (void) +{ + int tid = __builtin_gcn_dim_pos(1); + return gcn_thrs () + tid; +} +#elif defined HAVE_TLS || defined USE_EMUTLS extern __thread struct gomp_thread gomp_tls_data; static inline struct gomp_thread *gomp_thread (void) { @@ -500,15 +802,21 @@ static inline struct gomp_task_icv *gomp_icv (bool write) return &gomp_global_icv; } +#ifdef LIBGOMP_USE_PTHREADS /* The attributes to be used during thread creation. */ extern pthread_attr_t gomp_thread_attr; +extern pthread_key_t gomp_thread_destructor; +#endif + /* Function prototypes. */ /* affinity.c */ extern void gomp_init_affinity (void); +#ifdef LIBGOMP_USE_PTHREADS extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); +#endif extern void **gomp_affinity_alloc (unsigned long, bool); extern void gomp_affinity_init_place (void *); extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, @@ -519,23 +827,26 @@ extern bool gomp_affinity_same_place (void *, void *); extern bool gomp_affinity_finalize_place_list (bool); extern bool gomp_affinity_init_level (int, unsigned long, bool); extern void gomp_affinity_print_place (void *); +extern void gomp_get_place_proc_ids_8 (int, int64_t *); +extern void gomp_display_affinity_place (char *, size_t, size_t *, int); -/* alloc.c */ - -extern void *gomp_malloc (size_t) __attribute__((malloc)); -extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); -extern void *gomp_realloc (void *, size_t); - -/* Avoid conflicting prototypes of alloca() in system headers by using - GCC's builtin alloca(). */ -#define gomp_alloca(x) __builtin_alloca(x) - -/* error.c */ +/* affinity-fmt.c */ -extern void gomp_error (const char *, ...) - __attribute__((format (printf, 1, 2))); -extern void gomp_fatal (const char *, ...) - __attribute__((noreturn, format (printf, 1, 2))); +extern bool gomp_print_string (const char *str, size_t len); +extern void gomp_set_affinity_format (const char *, size_t); +extern void gomp_display_string (char *, size_t, size_t *, const char *, + size_t); +#ifdef LIBGOMP_USE_PTHREADS +typedef pthread_t gomp_thread_handle; +#else +typedef struct {} gomp_thread_handle; +#endif +extern size_t gomp_display_affinity (char *, size_t, const char *, + gomp_thread_handle, + struct gomp_team_state *, unsigned int); +extern void gomp_display_affinity_thread (gomp_thread_handle, + struct gomp_team_state *, + unsigned int) __attribute__((cold)); /* iter.c */ @@ -572,6 +883,9 @@ extern void gomp_ordered_next (void); extern void gomp_ordered_static_init (void); extern void gomp_ordered_static_next (void); extern void gomp_ordered_sync (void); +extern void gomp_doacross_init (unsigned, long *, long, size_t); +extern void gomp_doacross_ull_init (unsigned, unsigned long long *, + unsigned long long, size_t); /* parallel.c */ @@ -588,6 +902,16 @@ extern void gomp_init_task (struct gomp_task *, struct gomp_task *, struct gomp_task_icv *); extern void gomp_end_task (void); extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); +extern void gomp_task_maybe_wait_for_dependencies (void **); +extern bool gomp_create_target_task (struct gomp_device_descr *, + void (*) (void *), size_t, void **, + size_t *, unsigned short *, unsigned int, + void **, void **, + enum gomp_target_task_state); +extern struct gomp_taskgroup *gomp_parallel_reduction_register (uintptr_t *, + unsigned); +extern void gomp_workshare_taskgroup_start (void); +extern void gomp_workshare_task_reduction_register (uintptr_t *, uintptr_t *); static void inline gomp_finish_task (struct gomp_task *task) @@ -600,19 +924,279 @@ gomp_finish_task (struct gomp_task *task) extern struct gomp_team *gomp_new_team (unsigned); extern void gomp_team_start (void (*) (void *), void *, unsigned, - unsigned, struct gomp_team *); + unsigned, struct gomp_team *, + struct gomp_taskgroup *); extern void gomp_team_end (void); extern void gomp_free_thread (void *); +extern int gomp_pause_host (void); /* target.c */ +extern void gomp_init_targets_once (void); extern int gomp_get_num_devices (void); +extern bool gomp_target_task_fn (void *); + +/* Splay tree definitions. */ +typedef struct splay_tree_node_s *splay_tree_node; +typedef struct splay_tree_s *splay_tree; +typedef struct splay_tree_key_s *splay_tree_key; + +struct target_var_desc { + /* Splay key. */ + splay_tree_key key; + /* True if data should be copied from device to host at the end. */ + bool copy_from; + /* True if data always should be copied from device to host at the end. */ + bool always_copy_from; + /* True if variable should be detached at end of region. */ + bool do_detach; + /* Relative offset against key host_start. */ + uintptr_t offset; + /* Actual length. */ + uintptr_t length; +}; + +struct target_mem_desc { + /* Reference count. */ + uintptr_t refcount; + /* All the splay nodes allocated together. */ + splay_tree_node array; + /* Start of the target region. */ + uintptr_t tgt_start; + /* End of the targer region. */ + uintptr_t tgt_end; + /* Handle to free. */ + void *to_free; + /* Previous target_mem_desc. */ + struct target_mem_desc *prev; + /* Number of items in following list. */ + size_t list_count; + + /* Corresponding target device descriptor. */ + struct gomp_device_descr *device_descr; + + /* List of target items to remove (or decrease refcount) + at the end of region. */ + struct target_var_desc list[]; +}; + +/* Special value for refcount - infinity. */ +#define REFCOUNT_INFINITY (~(uintptr_t) 0) +/* Special value for refcount - tgt_offset contains target address of the + artificial pointer to "omp declare target link" object. */ +#define REFCOUNT_LINK (~(uintptr_t) 1) + +/* Special offset values. */ +#define OFFSET_INLINED (~(uintptr_t) 0) +#define OFFSET_POINTER (~(uintptr_t) 1) +#define OFFSET_STRUCT (~(uintptr_t) 2) + +/* Auxiliary structure for infrequently-used or API-specific data. */ + +struct splay_tree_aux { + /* Pointer to the original mapping of "omp declare target link" object. */ + splay_tree_key link_key; + /* For a block with attached pointers, the attachment counters for each. + Only used for OpenACC. */ + uintptr_t *attach_count; +}; + +struct splay_tree_key_s { + /* Address of the host object. */ + uintptr_t host_start; + /* Address immediately after the host object. */ + uintptr_t host_end; + /* Descriptor of the target memory. */ + struct target_mem_desc *tgt; + /* Offset from tgt->tgt_start to the start of the target object. */ + uintptr_t tgt_offset; + /* Reference count. */ + uintptr_t refcount; + /* Reference counts beyond those that represent genuine references in the + linked splay tree key/target memory structures, e.g. for multiple OpenACC + "present increment" operations (via "acc enter data") referring to the same + host-memory block. */ + uintptr_t virtual_refcount; + struct splay_tree_aux *aux; +}; + +/* The comparison function. */ + +static inline int +splay_compare (splay_tree_key x, splay_tree_key y) +{ + if (x->host_start == x->host_end + && y->host_start == y->host_end) + return 0; + if (x->host_end <= y->host_start) + return -1; + if (x->host_start >= y->host_end) + return 1; + return 0; +} + +#include "splay-tree.h" + +typedef struct acc_dispatch_t +{ + /* Execute. */ + __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; + + /* Create/destroy TLS data. */ + __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; + __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) + *destroy_thread_data_func; + + struct { + /* Once created and put into the "active" list, asyncqueues are then never + destructed and removed from the "active" list, other than if the TODO + device is shut down. */ + gomp_mutex_t lock; + int nasyncqueue; + struct goacc_asyncqueue **asyncqueue; + struct goacc_asyncqueue_list *active; + + __typeof (GOMP_OFFLOAD_openacc_async_construct) *construct_func; + __typeof (GOMP_OFFLOAD_openacc_async_destruct) *destruct_func; + __typeof (GOMP_OFFLOAD_openacc_async_test) *test_func; + __typeof (GOMP_OFFLOAD_openacc_async_synchronize) *synchronize_func; + __typeof (GOMP_OFFLOAD_openacc_async_serialize) *serialize_func; + __typeof (GOMP_OFFLOAD_openacc_async_queue_callback) *queue_callback_func; + + __typeof (GOMP_OFFLOAD_openacc_async_exec) *exec_func; + __typeof (GOMP_OFFLOAD_openacc_async_dev2host) *dev2host_func; + __typeof (GOMP_OFFLOAD_openacc_async_host2dev) *host2dev_func; + } async; + + __typeof (GOMP_OFFLOAD_openacc_get_property) *get_property_func; + + /* NVIDIA target specific routines. */ + struct { + __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) + *get_current_device_func; + __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) + *get_current_context_func; + __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; + __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; + } cuda; +} acc_dispatch_t; + +/* Various state of the accelerator device. */ +enum gomp_device_state +{ + GOMP_DEVICE_UNINITIALIZED, + GOMP_DEVICE_INITIALIZED, + GOMP_DEVICE_FINALIZED +}; + +/* This structure describes accelerator device. + It contains name of the corresponding libgomp plugin, function handlers for + interaction with the device, ID-number of the device, and information about + mapped memory. */ +struct gomp_device_descr +{ + /* Immutable data, which is only set during initialization, and which is not + guarded by the lock. */ + + /* The name of the device. */ + const char *name; + + /* Capabilities of device (supports OpenACC, OpenMP). */ + unsigned int capabilities; + + /* This is the ID number of device among devices of the same type. */ + int target_id; + + /* This is the TYPE of device. */ + enum offload_target_type type; + + /* Function handlers. */ + __typeof (GOMP_OFFLOAD_get_name) *get_name_func; + __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; + __typeof (GOMP_OFFLOAD_get_type) *get_type_func; + __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; + __typeof (GOMP_OFFLOAD_init_device) *init_device_func; + __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; + __typeof (GOMP_OFFLOAD_version) *version_func; + __typeof (GOMP_OFFLOAD_load_image) *load_image_func; + __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; + __typeof (GOMP_OFFLOAD_alloc) *alloc_func; + __typeof (GOMP_OFFLOAD_free) *free_func; + __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; + __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; + __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; + __typeof (GOMP_OFFLOAD_can_run) *can_run_func; + __typeof (GOMP_OFFLOAD_run) *run_func; + __typeof (GOMP_OFFLOAD_async_run) *async_run_func; + + /* Splay tree containing information about mapped memory regions. */ + struct splay_tree_s mem_map; + + /* Mutex for the mutable data. */ + gomp_mutex_t lock; + + /* Current state of the device. OpenACC allows to move from INITIALIZED state + back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED + to FINALIZED state (at program shutdown). */ + enum gomp_device_state state; + + /* OpenACC-specific data and functions. */ + /* This is mutable because of its mutable target_data member. */ + acc_dispatch_t openacc; +}; + +/* Kind of the pragma, for which gomp_map_vars () is called. */ +enum gomp_map_vars_kind +{ + GOMP_MAP_VARS_OPENACC, + GOMP_MAP_VARS_OPENACC_ENTER_DATA, + GOMP_MAP_VARS_TARGET, + GOMP_MAP_VARS_DATA, + GOMP_MAP_VARS_ENTER_DATA +}; + +extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *, + unsigned short *); +struct gomp_coalesce_buf; +extern void gomp_copy_host2dev (struct gomp_device_descr *, + struct goacc_asyncqueue *, void *, const void *, + size_t, struct gomp_coalesce_buf *); +extern void gomp_copy_dev2host (struct gomp_device_descr *, + struct goacc_asyncqueue *, void *, const void *, + size_t); +extern uintptr_t gomp_map_val (struct target_mem_desc *, void **, size_t); +extern void gomp_attach_pointer (struct gomp_device_descr *, + struct goacc_asyncqueue *, splay_tree, + splay_tree_key, uintptr_t, size_t, + struct gomp_coalesce_buf *); +extern void gomp_detach_pointer (struct gomp_device_descr *, + struct goacc_asyncqueue *, splay_tree_key, + uintptr_t, bool, struct gomp_coalesce_buf *); + +extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, + size_t, void **, void **, + size_t *, void *, bool, + enum gomp_map_vars_kind); +extern struct target_mem_desc *gomp_map_vars_async (struct gomp_device_descr *, + struct goacc_asyncqueue *, + size_t, void **, void **, + size_t *, void *, bool, + enum gomp_map_vars_kind); +extern void gomp_unmap_vars (struct target_mem_desc *, bool); +extern void gomp_unmap_vars_async (struct target_mem_desc *, bool, + struct goacc_asyncqueue *); +extern void gomp_init_device (struct gomp_device_descr *); +extern bool gomp_fini_device (struct gomp_device_descr *); +extern void gomp_unload_device (struct gomp_device_descr *); +extern bool gomp_remove_var (struct gomp_device_descr *, splay_tree_key); +extern void gomp_remove_var_async (struct gomp_device_descr *, splay_tree_key, + struct goacc_asyncqueue *); /* work.c */ -extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); +extern void gomp_init_work_share (struct gomp_work_share *, size_t, unsigned); extern void gomp_fini_work_share (struct gomp_work_share *); -extern bool gomp_work_share_start (bool); +extern bool gomp_work_share_start (size_t); extern void gomp_work_share_end (void); extern bool gomp_work_share_end_cancel (void); extern void gomp_work_share_end_nowait (void); @@ -668,8 +1252,6 @@ extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; -# define strong_alias(fn, al) \ - extern __typeof (fn) al __attribute__ ((alias (#fn))); # define omp_lock_symver(fn) \ __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); @@ -692,13 +1274,26 @@ extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; # define attribute_hidden #endif +#if __GNUC__ >= 9 +# define HAVE_ATTRIBUTE_COPY +#endif + +#ifdef HAVE_ATTRIBUTE_COPY +# define attribute_copy(arg) __attribute__ ((copy (arg))) +#else +# define attribute_copy(arg) +#endif + #ifdef HAVE_ATTRIBUTE_ALIAS +# define strong_alias(fn, al) \ + extern __typeof (fn) al __attribute__ ((alias (#fn))) attribute_copy (fn); + # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) # define ialias_str1(x) ialias_str2(x) # define ialias_str2(x) #x # define ialias(fn) \ extern __typeof (fn) gomp_ialias_##fn \ - __attribute__ ((alias (#fn))) attribute_hidden; + __attribute__ ((alias (#fn))) attribute_hidden attribute_copy (fn); # define ialias_redirect(fn) \ extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; # define ialias_call(fn) gomp_ialias_ ## fn @@ -708,4 +1303,72 @@ extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; # define ialias_call(fn) fn #endif +/* Helper function for priority_node_to_task() and + task_to_priority_node(). + + Return the offset from a task to its priority_node entry. The + priority_node entry is has a type of TYPE. */ + +static inline size_t +priority_queue_offset (enum priority_queue_type type) +{ + return offsetof (struct gomp_task, pnode[(int) type]); +} + +/* Return the task associated with a priority NODE of type TYPE. */ + +static inline struct gomp_task * +priority_node_to_task (enum priority_queue_type type, + struct priority_node *node) +{ + return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); +} + +/* Return the priority node of type TYPE for a given TASK. */ + +static inline struct priority_node * +task_to_priority_node (enum priority_queue_type type, + struct gomp_task *task) +{ + return (struct priority_node *) ((char *) task + + priority_queue_offset (type)); +} + +#ifdef LIBGOMP_USE_PTHREADS +static inline gomp_thread_handle +gomp_thread_self (void) +{ + return pthread_self (); +} + +static inline gomp_thread_handle +gomp_thread_to_pthread_t (struct gomp_thread *thr) +{ + struct gomp_thread *this_thr = gomp_thread (); + if (thr == this_thr) + return pthread_self (); +#ifdef GOMP_NEEDS_THREAD_HANDLE + return thr->handle; +#else + /* On Linux with initial-exec TLS, the pthread_t of the thread containing + thr can be computed from thr, this_thr and pthread_self (), + as the distance between this_thr and pthread_self () is constant. */ + return pthread_self () + ((uintptr_t) thr - (uintptr_t) this_thr); +#endif +} +#else +static inline gomp_thread_handle +gomp_thread_self (void) +{ + return (gomp_thread_handle) {}; +} + +static inline gomp_thread_handle +gomp_thread_to_pthread_t (struct gomp_thread *thr) +{ + (void) thr; + return gomp_thread_self (); +} +#endif + #endif /* LIBGOMP_H */