/* OpenACC Runtime initialization routines
- Copyright (C) 2013-2019 Free Software Foundation, Inc.
+ Copyright (C) 2013-2020 Free Software Foundation, Inc.
Contributed by Mentor Embedded.
void
acc_map_data (void *h, void *d, size_t s)
{
- struct target_mem_desc *tgt = NULL;
size_t mapnum = 1;
void *hostaddrs = h;
void *devaddrs = d;
gomp_mutex_unlock (&acc_dev->lock);
- tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
- &kinds, true, GOMP_MAP_VARS_ENTER_DATA);
+ struct target_mem_desc *tgt
+ = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
+ &kinds, true,
+ GOMP_MAP_VARS_OPENACC | GOMP_MAP_VARS_ENTER_DATA);
assert (tgt);
+ assert (tgt->list_count == 1);
splay_tree_key n = tgt->list[0].key;
+ assert (n);
assert (n->refcount == 1);
- assert (n->virtual_refcount == 0);
+ assert (n->dynamic_refcount == 0);
/* Special reference counting behavior. */
n->refcount = REFCOUNT_INFINITY;
(void *) n->host_start, (int) host_size, (void *) h);
}
/* TODO This currently doesn't catch 'REFCOUNT_INFINITY' usage different from
- 'acc_map_data'. Maybe 'virtual_refcount' can be used for disambiguating
+ 'acc_map_data'. Maybe 'dynamic_refcount' can be used for disambiguating
the different 'REFCOUNT_INFINITY' cases, or simply separate
'REFCOUNT_INFINITY' values per different usage ('REFCOUNT_ACC_MAP_DATA'
etc.)? */
(void *) h, (int) host_size);
}
- splay_tree_remove (&acc_dev->mem_map, n);
-
struct target_mem_desc *tgt = n->tgt;
if (tgt->refcount == REFCOUNT_INFINITY)
gomp_mutex_unlock (&acc_dev->lock);
gomp_fatal ("cannot unmap target block");
}
- else if (tgt->refcount > 1)
- tgt->refcount--;
- else
- {
- free (tgt->array);
- free (tgt);
- }
+
+ /* Above, we've verified that the mapping must have been set up by
+ 'acc_map_data'. */
+ assert (tgt->refcount == 1);
+
+ /* Nullifying these fields prevents 'gomp_unmap_tgt' via 'gomp_remove_var'
+ from freeing the target memory. */
+ tgt->tgt_end = 0;
+ tgt->to_free = NULL;
+
+ bool is_tgt_unmapped = gomp_remove_var (acc_dev, n);
+ assert (is_tgt_unmapped);
gomp_mutex_unlock (&acc_dev->lock);
}
+/* Helper function to map a single dynamic data item, represented by a single
+ mapping. The acc_dev->lock should be held on entry, and remains locked on
+ exit. */
+
+static void *
+goacc_map_var_existing (struct gomp_device_descr *acc_dev, void *hostaddr,
+ size_t size, splay_tree_key n)
+{
+ assert (n);
+
+ /* Present. */
+ void *d = (void *) (n->tgt->tgt_start + n->tgt_offset + hostaddr
+ - n->host_start);
+
+ if (hostaddr + size > (void *) n->host_end)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("[%p,+%d] not mapped", hostaddr, (int) size);
+ }
+
+ assert (n->refcount != REFCOUNT_LINK);
+ if (n->refcount != REFCOUNT_INFINITY)
+ n->refcount++;
+ n->dynamic_refcount++;
+
+ return d;
+}
+
/* Enter dynamic mapping for a single datum. Return the device pointer. */
static void *
n = lookup_host (acc_dev, hostaddrs[0], sizes[0]);
if (n)
{
- void *h = hostaddrs[0];
- size_t s = sizes[0];
-
- /* Present. */
- d = (void *) (n->tgt->tgt_start + n->tgt_offset + h - n->host_start);
-
- if ((h + s) > (void *)n->host_end)
- {
- gomp_mutex_unlock (&acc_dev->lock);
- gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
- }
-
- assert (n->refcount != REFCOUNT_LINK);
- if (n->refcount != REFCOUNT_INFINITY)
- {
- n->refcount++;
- n->virtual_refcount++;
- }
-
+ d = goacc_map_var_existing (acc_dev, hostaddrs[0], sizes[0], n);
gomp_mutex_unlock (&acc_dev->lock);
}
else
goacc_aq aq = get_goacc_asyncqueue (async);
- gomp_map_vars_async (acc_dev, aq, mapnum, hostaddrs, NULL, sizes, kinds,
- true, GOMP_MAP_VARS_OPENACC_ENTER_DATA);
+ struct target_mem_desc *tgt
+ = gomp_map_vars_async (acc_dev, aq, mapnum, hostaddrs, NULL, sizes,
+ kinds, true, (GOMP_MAP_VARS_OPENACC
+ | GOMP_MAP_VARS_ENTER_DATA));
+ assert (tgt);
+ assert (tgt->list_count == 1);
+ n = tgt->list[0].key;
+ assert (n);
+ assert (n->refcount == 1);
+ assert (n->dynamic_refcount == 0);
+ n->dynamic_refcount++;
- gomp_mutex_lock (&acc_dev->lock);
- n = lookup_host (acc_dev, hostaddrs[0], sizes[0]);
- assert (n != NULL);
- assert (n->tgt_offset == 0);
- assert ((uintptr_t) hostaddrs[0] == n->host_start);
- d = (void *) n->tgt->tgt_start;
- gomp_mutex_unlock (&acc_dev->lock);
+ d = (void *) tgt->tgt_start;
}
if (profiling_p)
#endif
-/* Exit a dynamic mapping for a single variable. */
+/* Helper function to unmap a single data item. Device lock should be held on
+ entry, and remains locked on exit. */
static void
-goacc_exit_datum (void *h, size_t s, unsigned short kind, int async)
+goacc_exit_datum_1 (struct gomp_device_descr *acc_dev, void *h, size_t s,
+ unsigned short kind, splay_tree_key n, goacc_aq aq)
{
- /* No need to call lazy open, as the data must already have been
- mapped. */
-
- kind &= 0xff;
-
- struct goacc_thread *thr = goacc_thread ();
- struct gomp_device_descr *acc_dev = thr->dev;
-
- if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- return;
-
- acc_prof_info prof_info;
- acc_api_info api_info;
- bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
- if (profiling_p)
- {
- prof_info.async = async;
- prof_info.async_queue = prof_info.async;
- }
-
- gomp_mutex_lock (&acc_dev->lock);
-
- splay_tree_key n = lookup_host (acc_dev, h, s);
- if (!n)
- /* PR92726, RP92970, PR92984: no-op. */
- goto out;
+ assert (kind != GOMP_MAP_DETACH
+ && kind != GOMP_MAP_FORCE_DETACH);
if ((uintptr_t) h < n->host_start || (uintptr_t) h + s > n->host_end)
{
(void *) h, (int) s, (void *) n->host_start, (int) host_size);
}
- bool finalize = (kind == GOMP_MAP_DELETE
- || kind == GOMP_MAP_FORCE_FROM);
+ bool finalize = (kind == GOMP_MAP_FORCE_FROM
+ || kind == GOMP_MAP_DELETE);
+
+ assert (n->refcount != REFCOUNT_LINK);
+ if (n->refcount != REFCOUNT_INFINITY
+ && n->refcount < n->dynamic_refcount)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("Dynamic reference counting assert fail\n");
+ }
+
if (finalize)
{
if (n->refcount != REFCOUNT_INFINITY)
- n->refcount -= n->virtual_refcount;
- n->virtual_refcount = 0;
+ n->refcount -= n->dynamic_refcount;
+ n->dynamic_refcount = 0;
}
-
- if (n->virtual_refcount > 0)
+ else if (n->dynamic_refcount)
{
if (n->refcount != REFCOUNT_INFINITY)
n->refcount--;
- n->virtual_refcount--;
+ n->dynamic_refcount--;
}
- else if (n->refcount > 0 && n->refcount != REFCOUNT_INFINITY)
- n->refcount--;
if (n->refcount == 0)
{
- goacc_aq aq = get_goacc_asyncqueue (async);
-
bool copyout = (kind == GOMP_MAP_FROM
|| kind == GOMP_MAP_FORCE_FROM);
if (copyout)
gomp_remove_var_async (acc_dev, n, aq);
else
{
+ size_t num_mappings = 0;
+ /* If the target_mem_desc represents a single data mapping, we can
+ check that it is freed when this splay tree key's refcount reaches
+ zero. Otherwise (e.g. for a 'GOMP_MAP_STRUCT' mapping with
+ multiple members), fall back to skipping the test. */
+ for (size_t l_i = 0; l_i < n->tgt->list_count; ++l_i)
+ if (n->tgt->list[l_i].key
+ && !n->tgt->list[l_i].is_attach)
+ ++num_mappings;
bool is_tgt_unmapped = gomp_remove_var (acc_dev, n);
- assert (is_tgt_unmapped);
+ assert (is_tgt_unmapped || num_mappings > 1);
}
}
+}
+
+
+/* Exit a dynamic mapping for a single variable. */
+
+static void
+goacc_exit_datum (void *h, size_t s, unsigned short kind, int async)
+{
+ /* No need to call lazy open, as the data must already have been
+ mapped. */
+
+ kind &= 0xff;
+
+ struct goacc_thread *thr = goacc_thread ();
+ struct gomp_device_descr *acc_dev = thr->dev;
+
+ if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ return;
+
+ acc_prof_info prof_info;
+ acc_api_info api_info;
+ bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
+ if (profiling_p)
+ {
+ prof_info.async = async;
+ prof_info.async_queue = prof_info.async;
+ }
+
+ gomp_mutex_lock (&acc_dev->lock);
+
+ splay_tree_key n = lookup_host (acc_dev, h, s);
+ /* Non-present data is a no-op: PR92726, RP92970, PR92984. */
+ if (n)
+ {
+ goacc_aq aq = get_goacc_asyncqueue (async);
+ goacc_exit_datum_1 (acc_dev, h, s, kind, n, aq);
+ }
- out:
gomp_mutex_unlock (&acc_dev->lock);
if (profiling_p)
update_dev_host (0, h, s, async);
}
+void
+acc_attach_async (void **hostaddr, int async)
+{
+ struct goacc_thread *thr = goacc_thread ();
+ struct gomp_device_descr *acc_dev = thr->dev;
+ goacc_aq aq = get_goacc_asyncqueue (async);
+
+ struct splay_tree_key_s cur_node;
+ splay_tree_key n;
+
+ if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ return;
+
+ gomp_mutex_lock (&acc_dev->lock);
+
+ cur_node.host_start = (uintptr_t) hostaddr;
+ cur_node.host_end = cur_node.host_start + sizeof (void *);
+ n = splay_tree_lookup (&acc_dev->mem_map, &cur_node);
+
+ if (n == NULL)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("struct not mapped for acc_attach");
+ }
+
+ gomp_attach_pointer (acc_dev, aq, &acc_dev->mem_map, n, (uintptr_t) hostaddr,
+ 0, NULL);
+
+ gomp_mutex_unlock (&acc_dev->lock);
+}
+
+void
+acc_attach (void **hostaddr)
+{
+ acc_attach_async (hostaddr, acc_async_sync);
+}
+
+static void
+goacc_detach_internal (void **hostaddr, int async, bool finalize)
+{
+ struct goacc_thread *thr = goacc_thread ();
+ struct gomp_device_descr *acc_dev = thr->dev;
+ struct splay_tree_key_s cur_node;
+ splay_tree_key n;
+ struct goacc_asyncqueue *aq = get_goacc_asyncqueue (async);
+
+ if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ return;
+
+ gomp_mutex_lock (&acc_dev->lock);
+
+ cur_node.host_start = (uintptr_t) hostaddr;
+ cur_node.host_end = cur_node.host_start + sizeof (void *);
+ n = splay_tree_lookup (&acc_dev->mem_map, &cur_node);
+
+ if (n == NULL)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("struct not mapped for acc_detach");
+ }
+
+ gomp_detach_pointer (acc_dev, aq, n, (uintptr_t) hostaddr, finalize, NULL);
+
+ gomp_mutex_unlock (&acc_dev->lock);
+}
+
+void
+acc_detach (void **hostaddr)
+{
+ goacc_detach_internal (hostaddr, acc_async_sync, false);
+}
+
+void
+acc_detach_async (void **hostaddr, int async)
+{
+ goacc_detach_internal (hostaddr, async, false);
+}
+
+void
+acc_detach_finalize (void **hostaddr)
+{
+ goacc_detach_internal (hostaddr, acc_async_sync, true);
+}
+
+void
+acc_detach_finalize_async (void **hostaddr, int async)
+{
+ goacc_detach_internal (hostaddr, async, true);
+}
+
/* Some types of (pointer) variables use several consecutive mappings, which
must be treated as a group for enter/exit data directives. This function
returns the last mapping in such a group (inclusive), or POS for singleton
mappings. */
static int
-find_group_last (int pos, size_t mapnum, unsigned short *kinds)
+find_group_last (int pos, size_t mapnum, size_t *sizes, unsigned short *kinds)
{
unsigned char kind0 = kinds[pos] & 0xff;
- int first_pos = pos, last_pos = pos;
+ int first_pos = pos;
- if (kind0 == GOMP_MAP_TO_PSET)
- {
- while (pos + 1 < mapnum && (kinds[pos + 1] & 0xff) == GOMP_MAP_POINTER)
- last_pos = ++pos;
- /* We expect at least one GOMP_MAP_POINTER after a GOMP_MAP_TO_PSET. */
- assert (last_pos > first_pos);
- }
- else
+ switch (kind0)
{
+ case GOMP_MAP_TO_PSET:
+ if (pos + 1 < mapnum
+ && (kinds[pos + 1] & 0xff) == GOMP_MAP_ATTACH)
+ return pos + 1;
+
+ while (pos + 1 < mapnum
+ && (kinds[pos + 1] & 0xff) == GOMP_MAP_POINTER)
+ pos++;
+ /* We expect at least one GOMP_MAP_POINTER (if not a single
+ GOMP_MAP_ATTACH) after a GOMP_MAP_TO_PSET. */
+ assert (pos > first_pos);
+ break;
+
+ case GOMP_MAP_STRUCT:
+ pos += sizes[pos];
+ break;
+
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ /* These mappings are only expected after some other mapping. If we
+ see one by itself, something has gone wrong. */
+ gomp_fatal ("unexpected mapping");
+ break;
+
+ case GOMP_MAP_ATTACH:
+ break;
+
+ default:
/* GOMP_MAP_ALWAYS_POINTER can only appear directly after some other
mapping. */
+ if (pos + 1 < mapnum)
+ {
+ unsigned char kind1 = kinds[pos + 1] & 0xff;
+ if (kind1 == GOMP_MAP_ALWAYS_POINTER)
+ return pos + 1;
+ }
+
+ /* We can have a single GOMP_MAP_ATTACH mapping after a to/from
+ mapping. */
if (pos + 1 < mapnum
- && (kinds[pos + 1] & 0xff) == GOMP_MAP_ALWAYS_POINTER)
+ && (kinds[pos + 1] & 0xff) == GOMP_MAP_ATTACH)
return pos + 1;
- /* We can have one or several GOMP_MAP_POINTER mappings after a to/from
+ /* We can have zero or more GOMP_MAP_POINTER mappings after a to/from
(etc.) mapping. */
- while (pos + 1 < mapnum && (kinds[pos + 1] & 0xff) == GOMP_MAP_POINTER)
- last_pos = ++pos;
+ while (pos + 1 < mapnum
+ && (kinds[pos + 1] & 0xff) == GOMP_MAP_POINTER)
+ pos++;
}
- return last_pos;
+ return pos;
}
/* Map variables for OpenACC "enter data". We can't just call
void **hostaddrs, size_t *sizes,
unsigned short *kinds, goacc_aq aq)
{
+ gomp_mutex_lock (&acc_dev->lock);
+
for (size_t i = 0; i < mapnum; i++)
{
- int group_last = find_group_last (i, mapnum, kinds);
+ splay_tree_key n;
+ size_t group_last = find_group_last (i, mapnum, sizes, kinds);
+ bool struct_p = false;
+ size_t size, groupnum = (group_last - i) + 1;
+
+ switch (kinds[i] & 0xff)
+ {
+ case GOMP_MAP_STRUCT:
+ {
+ size = (uintptr_t) hostaddrs[group_last] + sizes[group_last]
+ - (uintptr_t) hostaddrs[i];
+ struct_p = true;
+ }
+ break;
+
+ case GOMP_MAP_ATTACH:
+ size = sizeof (void *);
+ break;
+
+ default:
+ size = sizes[i];
+ }
+
+ n = lookup_host (acc_dev, hostaddrs[i], size);
+
+ if (n && struct_p)
+ {
+ for (size_t j = i + 1; j <= group_last; j++)
+ {
+ struct splay_tree_key_s cur_node;
+ cur_node.host_start = (uintptr_t) hostaddrs[j];
+ cur_node.host_end = cur_node.host_start + sizes[j];
+ splay_tree_key n2
+ = splay_tree_lookup (&acc_dev->mem_map, &cur_node);
+ if (!n2
+ || n2->tgt != n->tgt
+ || n2->host_start - n->host_start
+ != n2->tgt_offset - n->tgt_offset)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("Trying to map into device [%p..%p) structure "
+ "element when other mapped elements from the "
+ "same structure weren't mapped together with "
+ "it", (void *) cur_node.host_start,
+ (void *) cur_node.host_end);
+ }
+ }
+ /* This is a special case because we must increment the refcount by
+ the number of mapped struct elements, rather than by one. */
+ if (n->refcount != REFCOUNT_INFINITY)
+ n->refcount += groupnum - 1;
+ n->dynamic_refcount += groupnum - 1;
+ }
+ else if (n && groupnum == 1)
+ {
+ void *h = hostaddrs[i];
+ size_t s = sizes[i];
+
+ if ((kinds[i] & 0xff) == GOMP_MAP_ATTACH)
+ {
+ gomp_attach_pointer (acc_dev, aq, &acc_dev->mem_map, n,
+ (uintptr_t) h, s, NULL);
+ /* OpenACC 'attach'/'detach' doesn't affect structured/dynamic
+ reference counts ('n->refcount', 'n->dynamic_refcount'). */
+ }
+ else
+ goacc_map_var_existing (acc_dev, h, s, n);
+ }
+ else if (n && groupnum > 1)
+ {
+ assert (n->refcount != REFCOUNT_INFINITY
+ && n->refcount != REFCOUNT_LINK);
+
+ for (size_t j = i + 1; j <= group_last; j++)
+ if ((kinds[j] & 0xff) == GOMP_MAP_ATTACH)
+ {
+ splay_tree_key m
+ = lookup_host (acc_dev, hostaddrs[j], sizeof (void *));
+ gomp_attach_pointer (acc_dev, aq, &acc_dev->mem_map, m,
+ (uintptr_t) hostaddrs[j], sizes[j], NULL);
+ }
+
+ bool processed = false;
+
+ struct target_mem_desc *tgt = n->tgt;
+ for (size_t j = 0; j < tgt->list_count; j++)
+ if (tgt->list[j].key == n)
+ {
+ /* We are processing a group of mappings (e.g.
+ [GOMP_MAP_TO, GOMP_MAP_TO_PSET, GOMP_MAP_POINTER]).
+ Find the right group in the target_mem_desc's variable
+ list, and increment the refcounts for each item in that
+ group. */
+ for (size_t k = 0; k < groupnum; k++)
+ if (j + k < tgt->list_count
+ && tgt->list[j + k].key
+ && !tgt->list[j + k].is_attach)
+ {
+ tgt->list[j + k].key->refcount++;
+ tgt->list[j + k].key->dynamic_refcount++;
+ }
+ processed = true;
+ break;
+ }
- gomp_map_vars_async (acc_dev, aq,
- (group_last - i) + 1,
- &hostaddrs[i], NULL,
- &sizes[i], &kinds[i], true,
- GOMP_MAP_VARS_OPENACC_ENTER_DATA);
+ if (!processed)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("dynamic refcount incrementing failed for "
+ "pointer/pset");
+ }
+ }
+ else if (hostaddrs[i])
+ {
+ /* The data is not mapped already. Map it now, unless the first
+ member in the group has a NULL pointer (e.g. a non-present
+ optional parameter). */
+ gomp_mutex_unlock (&acc_dev->lock);
+
+ struct target_mem_desc *tgt
+ = gomp_map_vars_async (acc_dev, aq, groupnum, &hostaddrs[i], NULL,
+ &sizes[i], &kinds[i], true,
+ (GOMP_MAP_VARS_OPENACC
+ | GOMP_MAP_VARS_ENTER_DATA));
+ assert (tgt);
+
+ gomp_mutex_lock (&acc_dev->lock);
+
+ for (size_t j = 0; j < tgt->list_count; j++)
+ {
+ n = tgt->list[j].key;
+ if (n && !tgt->list[j].is_attach)
+ n->dynamic_refcount++;
+ }
+ }
i = group_last;
}
+
+ gomp_mutex_unlock (&acc_dev->lock);
}
/* Unmap variables for OpenACC "exit data". */
{
gomp_mutex_lock (&acc_dev->lock);
+ /* Handle "detach" before copyback/deletion of mapped data. */
for (size_t i = 0; i < mapnum; ++i)
{
unsigned char kind = kinds[i] & 0xff;
- bool copyfrom = false;
bool finalize = false;
+ switch (kind)
+ {
+ case GOMP_MAP_FORCE_DETACH:
+ finalize = true;
+ /* Fallthrough. */
- if (kind == GOMP_MAP_FORCE_FROM
- || kind == GOMP_MAP_DELETE)
- finalize = true;
+ case GOMP_MAP_DETACH:
+ {
+ struct splay_tree_key_s cur_node;
+ uintptr_t hostaddr = (uintptr_t) hostaddrs[i];
+ cur_node.host_start = hostaddr;
+ cur_node.host_end = cur_node.host_start + sizeof (void *);
+ splay_tree_key n
+ = splay_tree_lookup (&acc_dev->mem_map, &cur_node);
+
+ if (n == NULL)
+ {
+ gomp_mutex_unlock (&acc_dev->lock);
+ gomp_fatal ("struct not mapped for detach operation");
+ }
+
+ gomp_detach_pointer (acc_dev, aq, n, hostaddr, finalize, NULL);
+ }
+ break;
+ default:
+ ;
+ }
+ }
+
+ for (size_t i = 0; i < mapnum; ++i)
+ {
+ unsigned char kind = kinds[i] & 0xff;
switch (kind)
{
case GOMP_MAP_FROM:
case GOMP_MAP_FORCE_FROM:
- case GOMP_MAP_ALWAYS_FROM:
- copyfrom = true;
- /* Fallthrough. */
-
case GOMP_MAP_TO_PSET:
case GOMP_MAP_POINTER:
case GOMP_MAP_DELETE:
if (n == NULL)
continue;
- if (finalize)
- {
- if (n->refcount != REFCOUNT_INFINITY)
- n->refcount -= n->virtual_refcount;
- n->virtual_refcount = 0;
- }
-
- if (n->virtual_refcount > 0)
- {
- if (n->refcount != REFCOUNT_INFINITY)
- n->refcount--;
- n->virtual_refcount--;
- }
- else if (n->refcount > 0 && n->refcount != REFCOUNT_INFINITY)
- n->refcount--;
-
- if (copyfrom
- && (kind != GOMP_MAP_FROM || n->refcount == 0))
- gomp_copy_dev2host (acc_dev, aq, (void *) cur_node.host_start,
- (void *) (n->tgt->tgt_start + n->tgt_offset
- + cur_node.host_start
- - n->host_start),
- cur_node.host_end - cur_node.host_start);
-
- if (n->refcount == 0)
- gomp_remove_var_async (acc_dev, n, aq);
+ goacc_exit_datum_1 (acc_dev, hostaddrs[i], size, kind, n, aq);
}
break;
+
+ case GOMP_MAP_STRUCT:
+ /* Skip the 'GOMP_MAP_STRUCT' itself, and use the regular processing
+ for all its entries. This special handling exists for GCC 10.1
+ compatibility; afterwards, we're not generating these no-op
+ 'GOMP_MAP_STRUCT's anymore. */
+ break;
+
+ case GOMP_MAP_DETACH:
+ case GOMP_MAP_FORCE_DETACH:
+ /* OpenACC 'attach'/'detach' doesn't affect structured/dynamic
+ reference counts ('n->refcount', 'n->dynamic_refcount'). */
+ break;
+
default:
gomp_fatal (">>>> goacc_exit_data_internal UNHANDLED kind 0x%.2x",
kind);
{
unsigned char kind = kinds[i] & 0xff;
- if (kind == GOMP_MAP_POINTER || kind == GOMP_MAP_TO_PSET)
+ if (kind == GOMP_MAP_POINTER
+ || kind == GOMP_MAP_TO_PSET
+ || kind == GOMP_MAP_STRUCT)
continue;
if (kind == GOMP_MAP_FORCE_ALLOC
|| kind == GOMP_MAP_FORCE_PRESENT
+ || kind == GOMP_MAP_ATTACH
|| kind == GOMP_MAP_FORCE_TO
|| kind == GOMP_MAP_TO
|| kind == GOMP_MAP_ALLOC)
if (kind == GOMP_MAP_RELEASE
|| kind == GOMP_MAP_DELETE
+ || kind == GOMP_MAP_DETACH
+ || kind == GOMP_MAP_FORCE_DETACH
|| kind == GOMP_MAP_FROM
|| kind == GOMP_MAP_FORCE_FROM)
break;