Fix potential race condition in OpenACC "exit data" operations
[gcc.git] / libgomp / oacc-mem.c
1 /* OpenACC Runtime initialization routines
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Mentor Embedded.
6
7 This file is part of the GNU Offloading and Multi Processing Library
8 (libgomp).
9
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
23
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
28
29 #include "openacc.h"
30 #include "libgomp.h"
31 #include "gomp-constants.h"
32 #include "oacc-int.h"
33 #include <string.h>
34 #include <assert.h>
35
36 /* Return block containing [H->S), or NULL if not contained. The device lock
37 for DEV must be locked on entry, and remains locked on exit. */
38
39 static splay_tree_key
40 lookup_host (struct gomp_device_descr *dev, void *h, size_t s)
41 {
42 struct splay_tree_key_s node;
43 splay_tree_key key;
44
45 node.host_start = (uintptr_t) h;
46 node.host_end = (uintptr_t) h + s;
47
48 key = splay_tree_lookup (&dev->mem_map, &node);
49
50 return key;
51 }
52
53 /* Helper for lookup_dev. Iterate over splay tree. */
54
55 static splay_tree_key
56 lookup_dev_1 (splay_tree_node node, uintptr_t d, size_t s)
57 {
58 splay_tree_key key = &node->key;
59 if (d >= key->tgt->tgt_start && d + s <= key->tgt->tgt_end)
60 return key;
61
62 key = NULL;
63 if (node->left)
64 key = lookup_dev_1 (node->left, d, s);
65 if (!key && node->right)
66 key = lookup_dev_1 (node->right, d, s);
67
68 return key;
69 }
70
71 /* Return block containing [D->S), or NULL if not contained.
72
73 This iterates over the splay tree. This is not expected to be a common
74 operation.
75
76 The device lock associated with MEM_MAP must be locked on entry, and remains
77 locked on exit. */
78
79 static splay_tree_key
80 lookup_dev (splay_tree mem_map, void *d, size_t s)
81 {
82 if (!mem_map || !mem_map->root)
83 return NULL;
84
85 return lookup_dev_1 (mem_map->root, (uintptr_t) d, s);
86 }
87
88
89 /* OpenACC is silent on how memory exhaustion is indicated. We return
90 NULL. */
91
92 void *
93 acc_malloc (size_t s)
94 {
95 if (!s)
96 return NULL;
97
98 goacc_lazy_initialize ();
99
100 struct goacc_thread *thr = goacc_thread ();
101
102 assert (thr->dev);
103
104 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
105 return malloc (s);
106
107 acc_prof_info prof_info;
108 acc_api_info api_info;
109 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
110
111 void *res = thr->dev->alloc_func (thr->dev->target_id, s);
112
113 if (profiling_p)
114 {
115 thr->prof_info = NULL;
116 thr->api_info = NULL;
117 }
118
119 return res;
120 }
121
122 void
123 acc_free (void *d)
124 {
125 splay_tree_key k;
126
127 if (!d)
128 return;
129
130 struct goacc_thread *thr = goacc_thread ();
131
132 assert (thr && thr->dev);
133
134 struct gomp_device_descr *acc_dev = thr->dev;
135
136 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
137 return free (d);
138
139 acc_prof_info prof_info;
140 acc_api_info api_info;
141 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
142
143 gomp_mutex_lock (&acc_dev->lock);
144
145 /* We don't have to call lazy open here, as the ptr value must have
146 been returned by acc_malloc. It's not permitted to pass NULL in
147 (unless you got that null from acc_malloc). */
148 if ((k = lookup_dev (&acc_dev->mem_map, d, 1)))
149 {
150 void *offset = d - k->tgt->tgt_start + k->tgt_offset;
151 void *h = k->host_start + offset;
152 size_t h_size = k->host_end - k->host_start;
153 gomp_mutex_unlock (&acc_dev->lock);
154 /* PR92503 "[OpenACC] Behavior of 'acc_free' if the memory space is still
155 used in a mapping". */
156 gomp_fatal ("refusing to free device memory space at %p that is still"
157 " mapped at [%p,+%d]",
158 d, h, (int) h_size);
159 }
160 else
161 gomp_mutex_unlock (&acc_dev->lock);
162
163 if (!acc_dev->free_func (acc_dev->target_id, d))
164 gomp_fatal ("error in freeing device memory in %s", __FUNCTION__);
165
166 if (profiling_p)
167 {
168 thr->prof_info = NULL;
169 thr->api_info = NULL;
170 }
171 }
172
173 static void
174 memcpy_tofrom_device (bool from, void *d, void *h, size_t s, int async,
175 const char *libfnname)
176 {
177 /* No need to call lazy open here, as the device pointer must have
178 been obtained from a routine that did that. */
179 struct goacc_thread *thr = goacc_thread ();
180
181 assert (thr && thr->dev);
182
183 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
184 {
185 if (from)
186 memmove (h, d, s);
187 else
188 memmove (d, h, s);
189 return;
190 }
191
192 acc_prof_info prof_info;
193 acc_api_info api_info;
194 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
195 if (profiling_p)
196 {
197 prof_info.async = async;
198 prof_info.async_queue = prof_info.async;
199 }
200
201 goacc_aq aq = get_goacc_asyncqueue (async);
202 if (from)
203 gomp_copy_dev2host (thr->dev, aq, h, d, s);
204 else
205 gomp_copy_host2dev (thr->dev, aq, d, h, s, /* TODO: cbuf? */ NULL);
206
207 if (profiling_p)
208 {
209 thr->prof_info = NULL;
210 thr->api_info = NULL;
211 }
212 }
213
214 void
215 acc_memcpy_to_device (void *d, void *h, size_t s)
216 {
217 memcpy_tofrom_device (false, d, h, s, acc_async_sync, __FUNCTION__);
218 }
219
220 void
221 acc_memcpy_to_device_async (void *d, void *h, size_t s, int async)
222 {
223 memcpy_tofrom_device (false, d, h, s, async, __FUNCTION__);
224 }
225
226 void
227 acc_memcpy_from_device (void *h, void *d, size_t s)
228 {
229 memcpy_tofrom_device (true, d, h, s, acc_async_sync, __FUNCTION__);
230 }
231
232 void
233 acc_memcpy_from_device_async (void *h, void *d, size_t s, int async)
234 {
235 memcpy_tofrom_device (true, d, h, s, async, __FUNCTION__);
236 }
237
238 /* Return the device pointer that corresponds to host data H. Or NULL
239 if no mapping. */
240
241 void *
242 acc_deviceptr (void *h)
243 {
244 splay_tree_key n;
245 void *d;
246 void *offset;
247
248 goacc_lazy_initialize ();
249
250 struct goacc_thread *thr = goacc_thread ();
251 struct gomp_device_descr *dev = thr->dev;
252
253 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
254 return h;
255
256 /* In the following, no OpenACC Profiling Interface events can possibly be
257 generated. */
258
259 gomp_mutex_lock (&dev->lock);
260
261 n = lookup_host (dev, h, 1);
262
263 if (!n)
264 {
265 gomp_mutex_unlock (&dev->lock);
266 return NULL;
267 }
268
269 offset = h - n->host_start;
270
271 d = n->tgt->tgt_start + n->tgt_offset + offset;
272
273 gomp_mutex_unlock (&dev->lock);
274
275 return d;
276 }
277
278 /* Return the host pointer that corresponds to device data D. Or NULL
279 if no mapping. */
280
281 void *
282 acc_hostptr (void *d)
283 {
284 splay_tree_key n;
285 void *h;
286 void *offset;
287
288 goacc_lazy_initialize ();
289
290 struct goacc_thread *thr = goacc_thread ();
291 struct gomp_device_descr *acc_dev = thr->dev;
292
293 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
294 return d;
295
296 /* In the following, no OpenACC Profiling Interface events can possibly be
297 generated. */
298
299 gomp_mutex_lock (&acc_dev->lock);
300
301 n = lookup_dev (&acc_dev->mem_map, d, 1);
302
303 if (!n)
304 {
305 gomp_mutex_unlock (&acc_dev->lock);
306 return NULL;
307 }
308
309 offset = d - n->tgt->tgt_start + n->tgt_offset;
310
311 h = n->host_start + offset;
312
313 gomp_mutex_unlock (&acc_dev->lock);
314
315 return h;
316 }
317
318 /* Return 1 if host data [H,+S] is present on the device. */
319
320 int
321 acc_is_present (void *h, size_t s)
322 {
323 splay_tree_key n;
324
325 if (!s || !h)
326 return 0;
327
328 goacc_lazy_initialize ();
329
330 struct goacc_thread *thr = goacc_thread ();
331 struct gomp_device_descr *acc_dev = thr->dev;
332
333 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
334 return h != NULL;
335
336 /* In the following, no OpenACC Profiling Interface events can possibly be
337 generated. */
338
339 gomp_mutex_lock (&acc_dev->lock);
340
341 n = lookup_host (acc_dev, h, s);
342
343 if (n && ((uintptr_t)h < n->host_start
344 || (uintptr_t)h + s > n->host_end
345 || s > n->host_end - n->host_start))
346 n = NULL;
347
348 gomp_mutex_unlock (&acc_dev->lock);
349
350 return n != NULL;
351 }
352
353 /* Create a mapping for host [H,+S] -> device [D,+S] */
354
355 void
356 acc_map_data (void *h, void *d, size_t s)
357 {
358 struct target_mem_desc *tgt = NULL;
359 size_t mapnum = 1;
360 void *hostaddrs = h;
361 void *devaddrs = d;
362 size_t sizes = s;
363 unsigned short kinds = GOMP_MAP_ALLOC;
364
365 goacc_lazy_initialize ();
366
367 struct goacc_thread *thr = goacc_thread ();
368 struct gomp_device_descr *acc_dev = thr->dev;
369
370 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
371 {
372 if (d != h)
373 gomp_fatal ("cannot map data on shared-memory system");
374 }
375 else
376 {
377 struct goacc_thread *thr = goacc_thread ();
378
379 if (!d || !h || !s)
380 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
381 (void *)h, (int)s, (void *)d, (int)s);
382
383 acc_prof_info prof_info;
384 acc_api_info api_info;
385 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
386
387 gomp_mutex_lock (&acc_dev->lock);
388
389 if (lookup_host (acc_dev, h, s))
390 {
391 gomp_mutex_unlock (&acc_dev->lock);
392 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h,
393 (int)s);
394 }
395
396 if (lookup_dev (&thr->dev->mem_map, d, s))
397 {
398 gomp_mutex_unlock (&acc_dev->lock);
399 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d,
400 (int)s);
401 }
402
403 gomp_mutex_unlock (&acc_dev->lock);
404
405 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
406 &kinds, true, GOMP_MAP_VARS_OPENACC);
407 splay_tree_key n = tgt->list[0].key;
408 assert (n->refcount == 1);
409 assert (n->dynamic_refcount == 0);
410 /* Special reference counting behavior. */
411 n->refcount = REFCOUNT_INFINITY;
412
413 if (profiling_p)
414 {
415 thr->prof_info = NULL;
416 thr->api_info = NULL;
417 }
418 }
419 }
420
421 void
422 acc_unmap_data (void *h)
423 {
424 struct goacc_thread *thr = goacc_thread ();
425 struct gomp_device_descr *acc_dev = thr->dev;
426
427 /* No need to call lazy open, as the address must have been mapped. */
428
429 /* This is a no-op on shared-memory targets. */
430 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
431 return;
432
433 acc_prof_info prof_info;
434 acc_api_info api_info;
435 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
436
437 size_t host_size;
438
439 gomp_mutex_lock (&acc_dev->lock);
440
441 splay_tree_key n = lookup_host (acc_dev, h, 1);
442 struct target_mem_desc *t;
443
444 if (!n)
445 {
446 gomp_mutex_unlock (&acc_dev->lock);
447 gomp_fatal ("%p is not a mapped block", (void *)h);
448 }
449
450 host_size = n->host_end - n->host_start;
451
452 if (n->host_start != (uintptr_t) h)
453 {
454 gomp_mutex_unlock (&acc_dev->lock);
455 gomp_fatal ("[%p,%d] surrounds %p",
456 (void *) n->host_start, (int) host_size, (void *) h);
457 }
458 /* TODO This currently doesn't catch 'REFCOUNT_INFINITY' usage different from
459 'acc_map_data'. Maybe 'dynamic_refcount' can be used for disambiguating
460 the different 'REFCOUNT_INFINITY' cases, or simply separate
461 'REFCOUNT_INFINITY' values per different usage ('REFCOUNT_ACC_MAP_DATA'
462 etc.)? */
463 else if (n->refcount != REFCOUNT_INFINITY)
464 {
465 gomp_mutex_unlock (&acc_dev->lock);
466 gomp_fatal ("refusing to unmap block [%p,+%d] that has not been mapped"
467 " by 'acc_map_data'",
468 (void *) h, (int) host_size);
469 }
470
471 /* Mark for removal. */
472 n->refcount = 1;
473
474 t = n->tgt;
475
476 if (t->refcount == 2)
477 {
478 /* This is the last reference, so pull the descriptor off the
479 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
480 freeing the device memory. */
481 t->tgt_end = 0;
482 t->to_free = 0;
483 }
484
485 gomp_mutex_unlock (&acc_dev->lock);
486
487 gomp_unmap_vars (t, true);
488
489 if (profiling_p)
490 {
491 thr->prof_info = NULL;
492 thr->api_info = NULL;
493 }
494 }
495
496 #define FLAG_PRESENT (1 << 0)
497 #define FLAG_CREATE (1 << 1)
498 #define FLAG_COPY (1 << 2)
499
500 static void *
501 present_create_copy (unsigned f, void *h, size_t s, int async)
502 {
503 void *d;
504 splay_tree_key n;
505
506 if (!h || !s)
507 gomp_fatal ("[%p,+%d] is a bad range", (void *)h, (int)s);
508
509 goacc_lazy_initialize ();
510
511 struct goacc_thread *thr = goacc_thread ();
512 struct gomp_device_descr *acc_dev = thr->dev;
513
514 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
515 return h;
516
517 acc_prof_info prof_info;
518 acc_api_info api_info;
519 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
520 if (profiling_p)
521 {
522 prof_info.async = async;
523 prof_info.async_queue = prof_info.async;
524 }
525
526 gomp_mutex_lock (&acc_dev->lock);
527
528 n = lookup_host (acc_dev, h, s);
529 if (n)
530 {
531 /* Present. */
532 d = (void *) (n->tgt->tgt_start + n->tgt_offset + h - n->host_start);
533
534 if (!(f & FLAG_PRESENT))
535 {
536 gomp_mutex_unlock (&acc_dev->lock);
537 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
538 (void *)h, (int)s, (void *)d, (int)s);
539 }
540 if ((h + s) > (void *)n->host_end)
541 {
542 gomp_mutex_unlock (&acc_dev->lock);
543 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
544 }
545
546 assert (n->refcount != REFCOUNT_LINK);
547 if (n->refcount != REFCOUNT_INFINITY)
548 n->refcount++;
549 n->dynamic_refcount++;
550
551 gomp_mutex_unlock (&acc_dev->lock);
552 }
553 else if (!(f & FLAG_CREATE))
554 {
555 gomp_mutex_unlock (&acc_dev->lock);
556 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
557 }
558 else
559 {
560 struct target_mem_desc *tgt;
561 size_t mapnum = 1;
562 unsigned short kinds;
563 void *hostaddrs = h;
564
565 if (f & FLAG_COPY)
566 kinds = GOMP_MAP_TO;
567 else
568 kinds = GOMP_MAP_ALLOC;
569
570 gomp_mutex_unlock (&acc_dev->lock);
571
572 goacc_aq aq = get_goacc_asyncqueue (async);
573
574 tgt = gomp_map_vars_async (acc_dev, aq, mapnum, &hostaddrs, NULL, &s,
575 &kinds, true, GOMP_MAP_VARS_OPENACC);
576 n = tgt->list[0].key;
577 assert (n->refcount == 1);
578 assert (n->dynamic_refcount == 0);
579 n->dynamic_refcount++;
580
581 d = tgt->to_free;
582 }
583
584 if (profiling_p)
585 {
586 thr->prof_info = NULL;
587 thr->api_info = NULL;
588 }
589
590 return d;
591 }
592
593 void *
594 acc_create (void *h, size_t s)
595 {
596 return present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s, acc_async_sync);
597 }
598
599 void
600 acc_create_async (void *h, size_t s, int async)
601 {
602 present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s, async);
603 }
604
605 /* acc_present_or_create used to be what acc_create is now. */
606 /* acc_pcreate is acc_present_or_create by a different name. */
607 #ifdef HAVE_ATTRIBUTE_ALIAS
608 strong_alias (acc_create, acc_present_or_create)
609 strong_alias (acc_create, acc_pcreate)
610 #else
611 void *
612 acc_present_or_create (void *h, size_t s)
613 {
614 return acc_create (h, s);
615 }
616
617 void *
618 acc_pcreate (void *h, size_t s)
619 {
620 return acc_create (h, s);
621 }
622 #endif
623
624 void *
625 acc_copyin (void *h, size_t s)
626 {
627 return present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s,
628 acc_async_sync);
629 }
630
631 void
632 acc_copyin_async (void *h, size_t s, int async)
633 {
634 present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s, async);
635 }
636
637 /* acc_present_or_copyin used to be what acc_copyin is now. */
638 /* acc_pcopyin is acc_present_or_copyin by a different name. */
639 #ifdef HAVE_ATTRIBUTE_ALIAS
640 strong_alias (acc_copyin, acc_present_or_copyin)
641 strong_alias (acc_copyin, acc_pcopyin)
642 #else
643 void *
644 acc_present_or_copyin (void *h, size_t s)
645 {
646 return acc_copyin (h, s);
647 }
648
649 void *
650 acc_pcopyin (void *h, size_t s)
651 {
652 return acc_copyin (h, s);
653 }
654 #endif
655
656 #define FLAG_COPYOUT (1 << 0)
657 #define FLAG_FINALIZE (1 << 1)
658
659 static void
660 delete_copyout (unsigned f, void *h, size_t s, int async, const char *libfnname)
661 {
662 splay_tree_key n;
663 struct goacc_thread *thr = goacc_thread ();
664 struct gomp_device_descr *acc_dev = thr->dev;
665
666 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
667 return;
668
669 acc_prof_info prof_info;
670 acc_api_info api_info;
671 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
672 if (profiling_p)
673 {
674 prof_info.async = async;
675 prof_info.async_queue = prof_info.async;
676 }
677
678 gomp_mutex_lock (&acc_dev->lock);
679
680 n = lookup_host (acc_dev, h, s);
681
682 /* No need to call lazy open, as the data must already have been
683 mapped. */
684
685 if (!n)
686 {
687 gomp_mutex_unlock (&acc_dev->lock);
688 gomp_fatal ("[%p,%d] is not mapped", (void *)h, (int)s);
689 }
690
691 if ((uintptr_t) h < n->host_start || (uintptr_t) h + s > n->host_end)
692 {
693 size_t host_size = n->host_end - n->host_start;
694 gomp_mutex_unlock (&acc_dev->lock);
695 gomp_fatal ("[%p,+%d] outside mapped block [%p,+%d]",
696 (void *) h, (int) s, (void *) n->host_start, (int) host_size);
697 }
698
699 assert (n->refcount != REFCOUNT_LINK);
700 if (n->refcount != REFCOUNT_INFINITY
701 && n->refcount < n->dynamic_refcount)
702 {
703 gomp_mutex_unlock (&acc_dev->lock);
704 gomp_fatal ("Dynamic reference counting assert fail\n");
705 }
706
707 if (f & FLAG_FINALIZE)
708 {
709 if (n->refcount != REFCOUNT_INFINITY)
710 n->refcount -= n->dynamic_refcount;
711 n->dynamic_refcount = 0;
712 }
713 else if (n->dynamic_refcount)
714 {
715 if (n->refcount != REFCOUNT_INFINITY)
716 n->refcount--;
717 n->dynamic_refcount--;
718 }
719
720 if (n->refcount == 0)
721 {
722 goacc_aq aq = get_goacc_asyncqueue (async);
723
724 if (f & FLAG_COPYOUT)
725 {
726 void *d = (void *) (n->tgt->tgt_start + n->tgt_offset
727 + (uintptr_t) h - n->host_start);
728 gomp_copy_dev2host (acc_dev, aq, h, d, s);
729 }
730 gomp_remove_var_async (acc_dev, n, aq);
731 }
732
733 gomp_mutex_unlock (&acc_dev->lock);
734
735 if (profiling_p)
736 {
737 thr->prof_info = NULL;
738 thr->api_info = NULL;
739 }
740 }
741
742 void
743 acc_delete (void *h , size_t s)
744 {
745 delete_copyout (0, h, s, acc_async_sync, __FUNCTION__);
746 }
747
748 void
749 acc_delete_async (void *h , size_t s, int async)
750 {
751 delete_copyout (0, h, s, async, __FUNCTION__);
752 }
753
754 void
755 acc_delete_finalize (void *h , size_t s)
756 {
757 delete_copyout (FLAG_FINALIZE, h, s, acc_async_sync, __FUNCTION__);
758 }
759
760 void
761 acc_delete_finalize_async (void *h , size_t s, int async)
762 {
763 delete_copyout (FLAG_FINALIZE, h, s, async, __FUNCTION__);
764 }
765
766 void
767 acc_copyout (void *h, size_t s)
768 {
769 delete_copyout (FLAG_COPYOUT, h, s, acc_async_sync, __FUNCTION__);
770 }
771
772 void
773 acc_copyout_async (void *h, size_t s, int async)
774 {
775 delete_copyout (FLAG_COPYOUT, h, s, async, __FUNCTION__);
776 }
777
778 void
779 acc_copyout_finalize (void *h, size_t s)
780 {
781 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, acc_async_sync,
782 __FUNCTION__);
783 }
784
785 void
786 acc_copyout_finalize_async (void *h, size_t s, int async)
787 {
788 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, async, __FUNCTION__);
789 }
790
791 static void
792 update_dev_host (int is_dev, void *h, size_t s, int async)
793 {
794 splay_tree_key n;
795 void *d;
796
797 goacc_lazy_initialize ();
798
799 struct goacc_thread *thr = goacc_thread ();
800 struct gomp_device_descr *acc_dev = thr->dev;
801
802 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
803 return;
804
805 /* Fortran optional arguments that are non-present result in a
806 NULL host address here. This can safely be ignored as it is
807 not possible to 'update' a non-present optional argument. */
808 if (h == NULL)
809 return;
810
811 acc_prof_info prof_info;
812 acc_api_info api_info;
813 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
814 if (profiling_p)
815 {
816 prof_info.async = async;
817 prof_info.async_queue = prof_info.async;
818 }
819
820 gomp_mutex_lock (&acc_dev->lock);
821
822 n = lookup_host (acc_dev, h, s);
823
824 if (!n)
825 {
826 gomp_mutex_unlock (&acc_dev->lock);
827 gomp_fatal ("[%p,%d] is not mapped", h, (int)s);
828 }
829
830 d = (void *) (n->tgt->tgt_start + n->tgt_offset
831 + (uintptr_t) h - n->host_start);
832
833 goacc_aq aq = get_goacc_asyncqueue (async);
834
835 if (is_dev)
836 gomp_copy_host2dev (acc_dev, aq, d, h, s, /* TODO: cbuf? */ NULL);
837 else
838 gomp_copy_dev2host (acc_dev, aq, h, d, s);
839
840 gomp_mutex_unlock (&acc_dev->lock);
841
842 if (profiling_p)
843 {
844 thr->prof_info = NULL;
845 thr->api_info = NULL;
846 }
847 }
848
849 void
850 acc_update_device (void *h, size_t s)
851 {
852 update_dev_host (1, h, s, acc_async_sync);
853 }
854
855 void
856 acc_update_device_async (void *h, size_t s, int async)
857 {
858 update_dev_host (1, h, s, async);
859 }
860
861 void
862 acc_update_self (void *h, size_t s)
863 {
864 update_dev_host (0, h, s, acc_async_sync);
865 }
866
867 void
868 acc_update_self_async (void *h, size_t s, int async)
869 {
870 update_dev_host (0, h, s, async);
871 }
872
873
874 /* OpenACC 'enter data', 'exit data': 'GOACC_enter_exit_data' and its helper
875 functions. */
876
877 /* Special handling for 'GOMP_MAP_POINTER', 'GOMP_MAP_TO_PSET'.
878
879 Only the first mapping is considered in reference counting; the following
880 ones implicitly follow suit. */
881
882 static void
883 goacc_insert_pointer (size_t mapnum, void **hostaddrs, size_t *sizes,
884 void *kinds, int async)
885 {
886 struct target_mem_desc *tgt;
887 struct goacc_thread *thr = goacc_thread ();
888 struct gomp_device_descr *acc_dev = thr->dev;
889
890 if (*hostaddrs == NULL)
891 return;
892
893 if (acc_is_present (*hostaddrs, *sizes))
894 {
895 splay_tree_key n;
896 gomp_mutex_lock (&acc_dev->lock);
897 n = lookup_host (acc_dev, *hostaddrs, *sizes);
898 assert (n->refcount != REFCOUNT_INFINITY
899 && n->refcount != REFCOUNT_LINK);
900 gomp_mutex_unlock (&acc_dev->lock);
901
902 tgt = n->tgt;
903 for (size_t i = 0; i < tgt->list_count; i++)
904 if (tgt->list[i].key == n)
905 {
906 for (size_t j = 0; j < mapnum; j++)
907 if (i + j < tgt->list_count && tgt->list[i + j].key)
908 {
909 tgt->list[i + j].key->refcount++;
910 tgt->list[i + j].key->dynamic_refcount++;
911 }
912 return;
913 }
914 /* Should not reach here. */
915 gomp_fatal ("Dynamic refcount incrementing failed for pointer/pset");
916 }
917
918 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__);
919 goacc_aq aq = get_goacc_asyncqueue (async);
920 tgt = gomp_map_vars_async (acc_dev, aq, mapnum, hostaddrs,
921 NULL, sizes, kinds, true, GOMP_MAP_VARS_OPENACC);
922 splay_tree_key n = tgt->list[0].key;
923 assert (n->refcount == 1);
924 assert (n->dynamic_refcount == 0);
925 n->dynamic_refcount++;
926 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__);
927 }
928
929 static void
930 goacc_remove_pointer (void *h, size_t s, bool force_copyfrom, int async,
931 int finalize, int mapnum)
932 {
933 struct goacc_thread *thr = goacc_thread ();
934 struct gomp_device_descr *acc_dev = thr->dev;
935 splay_tree_key n;
936 struct target_mem_desc *t;
937 int minrefs = (mapnum == 1) ? 2 : 3;
938
939 if (!acc_is_present (h, s))
940 return;
941
942 gomp_mutex_lock (&acc_dev->lock);
943
944 n = lookup_host (acc_dev, h, 1);
945
946 if (!n)
947 {
948 gomp_mutex_unlock (&acc_dev->lock);
949 gomp_fatal ("%p is not a mapped block", (void *)h);
950 }
951
952 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__);
953
954 t = n->tgt;
955
956 assert (n->refcount != REFCOUNT_INFINITY
957 && n->refcount != REFCOUNT_LINK);
958 if (n->refcount < n->dynamic_refcount)
959 {
960 gomp_mutex_unlock (&acc_dev->lock);
961 gomp_fatal ("Dynamic reference counting assert fail\n");
962 }
963
964 if (finalize)
965 {
966 n->refcount -= n->dynamic_refcount;
967 n->dynamic_refcount = 0;
968 }
969 else if (n->dynamic_refcount)
970 {
971 n->refcount--;
972 n->dynamic_refcount--;
973 }
974
975 gomp_mutex_unlock (&acc_dev->lock);
976
977 if (n->refcount == 0)
978 {
979 /* Set refcount to 1 to allow gomp_unmap_vars to unmap it. */
980 n->refcount = 1;
981 t->refcount = minrefs;
982 for (size_t i = 0; i < t->list_count; i++)
983 if (t->list[i].key == n)
984 {
985 t->list[i].copy_from = force_copyfrom ? 1 : 0;
986 break;
987 }
988
989 /* If running synchronously, unmap immediately. */
990 if (async < acc_async_noval)
991 gomp_unmap_vars (t, true);
992 else
993 {
994 goacc_aq aq = get_goacc_asyncqueue (async);
995 gomp_unmap_vars_async (t, true, aq);
996 }
997 }
998
999 gomp_mutex_unlock (&acc_dev->lock);
1000
1001 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__);
1002 }
1003
1004 /* Return the number of mappings associated with 'GOMP_MAP_TO_PSET' or
1005 'GOMP_MAP_POINTER'. */
1006
1007 static int
1008 find_pointer (int pos, size_t mapnum, unsigned short *kinds)
1009 {
1010 if (pos + 1 >= mapnum)
1011 return 0;
1012
1013 unsigned char kind = kinds[pos+1] & 0xff;
1014
1015 if (kind == GOMP_MAP_TO_PSET)
1016 return 3;
1017 else if (kind == GOMP_MAP_POINTER)
1018 return 2;
1019
1020 return 0;
1021 }
1022
1023 void
1024 GOACC_enter_exit_data (int flags_m, size_t mapnum, void **hostaddrs,
1025 size_t *sizes, unsigned short *kinds, int async,
1026 int num_waits, ...)
1027 {
1028 int flags = GOACC_FLAGS_UNMARSHAL (flags_m);
1029
1030 struct goacc_thread *thr;
1031 struct gomp_device_descr *acc_dev;
1032 bool data_enter = false;
1033 size_t i;
1034
1035 goacc_lazy_initialize ();
1036
1037 thr = goacc_thread ();
1038 acc_dev = thr->dev;
1039
1040 /* Determine whether "finalize" semantics apply to all mappings of this
1041 OpenACC directive. */
1042 bool finalize = false;
1043 if (mapnum > 0)
1044 {
1045 unsigned char kind = kinds[0] & 0xff;
1046 if (kind == GOMP_MAP_DELETE
1047 || kind == GOMP_MAP_FORCE_FROM)
1048 finalize = true;
1049 }
1050
1051 /* Determine if this is an "acc enter data". */
1052 for (i = 0; i < mapnum; ++i)
1053 {
1054 unsigned char kind = kinds[i] & 0xff;
1055
1056 if (kind == GOMP_MAP_POINTER || kind == GOMP_MAP_TO_PSET)
1057 continue;
1058
1059 if (kind == GOMP_MAP_FORCE_ALLOC
1060 || kind == GOMP_MAP_FORCE_PRESENT
1061 || kind == GOMP_MAP_FORCE_TO
1062 || kind == GOMP_MAP_TO
1063 || kind == GOMP_MAP_ALLOC)
1064 {
1065 data_enter = true;
1066 break;
1067 }
1068
1069 if (kind == GOMP_MAP_RELEASE
1070 || kind == GOMP_MAP_DELETE
1071 || kind == GOMP_MAP_FROM
1072 || kind == GOMP_MAP_FORCE_FROM)
1073 break;
1074
1075 gomp_fatal (">>>> GOACC_enter_exit_data UNHANDLED kind 0x%.2x",
1076 kind);
1077 }
1078
1079 bool profiling_p = GOACC_PROFILING_DISPATCH_P (true);
1080
1081 acc_prof_info prof_info;
1082 if (profiling_p)
1083 {
1084 thr->prof_info = &prof_info;
1085
1086 prof_info.event_type
1087 = data_enter ? acc_ev_enter_data_start : acc_ev_exit_data_start;
1088 prof_info.valid_bytes = _ACC_PROF_INFO_VALID_BYTES;
1089 prof_info.version = _ACC_PROF_INFO_VERSION;
1090 prof_info.device_type = acc_device_type (acc_dev->type);
1091 prof_info.device_number = acc_dev->target_id;
1092 prof_info.thread_id = -1;
1093 prof_info.async = async;
1094 prof_info.async_queue = prof_info.async;
1095 prof_info.src_file = NULL;
1096 prof_info.func_name = NULL;
1097 prof_info.line_no = -1;
1098 prof_info.end_line_no = -1;
1099 prof_info.func_line_no = -1;
1100 prof_info.func_end_line_no = -1;
1101 }
1102 acc_event_info enter_exit_data_event_info;
1103 if (profiling_p)
1104 {
1105 enter_exit_data_event_info.other_event.event_type
1106 = prof_info.event_type;
1107 enter_exit_data_event_info.other_event.valid_bytes
1108 = _ACC_OTHER_EVENT_INFO_VALID_BYTES;
1109 enter_exit_data_event_info.other_event.parent_construct
1110 = data_enter ? acc_construct_enter_data : acc_construct_exit_data;
1111 enter_exit_data_event_info.other_event.implicit = 0;
1112 enter_exit_data_event_info.other_event.tool_info = NULL;
1113 }
1114 acc_api_info api_info;
1115 if (profiling_p)
1116 {
1117 thr->api_info = &api_info;
1118
1119 api_info.device_api = acc_device_api_none;
1120 api_info.valid_bytes = _ACC_API_INFO_VALID_BYTES;
1121 api_info.device_type = prof_info.device_type;
1122 api_info.vendor = -1;
1123 api_info.device_handle = NULL;
1124 api_info.context_handle = NULL;
1125 api_info.async_handle = NULL;
1126 }
1127
1128 if (profiling_p)
1129 goacc_profiling_dispatch (&prof_info, &enter_exit_data_event_info,
1130 &api_info);
1131
1132 if ((acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
1133 || (flags & GOACC_FLAG_HOST_FALLBACK))
1134 {
1135 prof_info.device_type = acc_device_host;
1136 api_info.device_type = prof_info.device_type;
1137
1138 goto out_prof;
1139 }
1140
1141 if (num_waits)
1142 {
1143 va_list ap;
1144
1145 va_start (ap, num_waits);
1146 goacc_wait (async, num_waits, &ap);
1147 va_end (ap);
1148 }
1149
1150 /* In c, non-pointers and arrays are represented by a single data clause.
1151 Dynamically allocated arrays and subarrays are represented by a data
1152 clause followed by an internal GOMP_MAP_POINTER.
1153
1154 In fortran, scalars and not allocated arrays are represented by a
1155 single data clause. Allocated arrays and subarrays have three mappings:
1156 1) the original data clause, 2) a PSET 3) a pointer to the array data.
1157 */
1158
1159 if (data_enter)
1160 {
1161 for (i = 0; i < mapnum; i++)
1162 {
1163 unsigned char kind = kinds[i] & 0xff;
1164
1165 /* Scan for pointers and PSETs. */
1166 int pointer = find_pointer (i, mapnum, kinds);
1167
1168 if (!pointer)
1169 {
1170 switch (kind)
1171 {
1172 case GOMP_MAP_ALLOC:
1173 case GOMP_MAP_FORCE_ALLOC:
1174 acc_create_async (hostaddrs[i], sizes[i], async);
1175 break;
1176 case GOMP_MAP_TO:
1177 case GOMP_MAP_FORCE_TO:
1178 acc_copyin_async (hostaddrs[i], sizes[i], async);
1179 break;
1180 default:
1181 gomp_fatal (">>>> GOACC_enter_exit_data UNHANDLED kind 0x%.2x",
1182 kind);
1183 break;
1184 }
1185 }
1186 else
1187 {
1188 goacc_insert_pointer (pointer, &hostaddrs[i], &sizes[i], &kinds[i],
1189 async);
1190 /* Increment 'i' by two because OpenACC requires fortran
1191 arrays to be contiguous, so each PSET is associated with
1192 one of MAP_FORCE_ALLOC/MAP_FORCE_PRESET/MAP_FORCE_TO, and
1193 one MAP_POINTER. */
1194 i += pointer - 1;
1195 }
1196 }
1197 }
1198 else
1199 for (i = 0; i < mapnum; ++i)
1200 {
1201 unsigned char kind = kinds[i] & 0xff;
1202
1203 int pointer = find_pointer (i, mapnum, kinds);
1204
1205 if (!pointer)
1206 {
1207 switch (kind)
1208 {
1209 case GOMP_MAP_RELEASE:
1210 case GOMP_MAP_DELETE:
1211 if (acc_is_present (hostaddrs[i], sizes[i]))
1212 {
1213 if (finalize)
1214 acc_delete_finalize_async (hostaddrs[i], sizes[i], async);
1215 else
1216 acc_delete_async (hostaddrs[i], sizes[i], async);
1217 }
1218 break;
1219 case GOMP_MAP_FROM:
1220 case GOMP_MAP_FORCE_FROM:
1221 if (finalize)
1222 acc_copyout_finalize_async (hostaddrs[i], sizes[i], async);
1223 else
1224 acc_copyout_async (hostaddrs[i], sizes[i], async);
1225 break;
1226 default:
1227 gomp_fatal (">>>> GOACC_enter_exit_data UNHANDLED kind 0x%.2x",
1228 kind);
1229 break;
1230 }
1231 }
1232 else
1233 {
1234 bool copyfrom = (kind == GOMP_MAP_FORCE_FROM
1235 || kind == GOMP_MAP_FROM);
1236 goacc_remove_pointer (hostaddrs[i], sizes[i], copyfrom, async,
1237 finalize, pointer);
1238 /* See the above comment. */
1239 i += pointer - 1;
1240 }
1241 }
1242
1243 out_prof:
1244 if (profiling_p)
1245 {
1246 prof_info.event_type
1247 = data_enter ? acc_ev_enter_data_end : acc_ev_exit_data_end;
1248 enter_exit_data_event_info.other_event.event_type = prof_info.event_type;
1249 goacc_profiling_dispatch (&prof_info, &enter_exit_data_event_info,
1250 &api_info);
1251
1252 thr->prof_info = NULL;
1253 thr->api_info = NULL;
1254 }
1255 }