]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgomp/oacc-mem.c
Update OpenACC data clause semantics to the 2.5 behavior
[thirdparty/gcc.git] / libgomp / oacc-mem.c
1 /* OpenACC Runtime initialization routines
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Mentor Embedded.
6
7 This file is part of the GNU Offloading and Multi Processing Library
8 (libgomp).
9
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
23
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
28
29 #include "openacc.h"
30 #include "config.h"
31 #include "libgomp.h"
32 #include "gomp-constants.h"
33 #include "oacc-int.h"
34 #include <stdint.h>
35 #include <string.h>
36 #include <assert.h>
37
38 /* Return block containing [H->S), or NULL if not contained. The device lock
39 for DEV must be locked on entry, and remains locked on exit. */
40
41 static splay_tree_key
42 lookup_host (struct gomp_device_descr *dev, void *h, size_t s)
43 {
44 struct splay_tree_key_s node;
45 splay_tree_key key;
46
47 node.host_start = (uintptr_t) h;
48 node.host_end = (uintptr_t) h + s;
49
50 key = splay_tree_lookup (&dev->mem_map, &node);
51
52 return key;
53 }
54
55 /* Return block containing [D->S), or NULL if not contained.
56 The list isn't ordered by device address, so we have to iterate
57 over the whole array. This is not expected to be a common
58 operation. The device lock associated with TGT must be locked on entry, and
59 remains locked on exit. */
60
61 static splay_tree_key
62 lookup_dev (struct target_mem_desc *tgt, void *d, size_t s)
63 {
64 int i;
65 struct target_mem_desc *t;
66
67 if (!tgt)
68 return NULL;
69
70 for (t = tgt; t != NULL; t = t->prev)
71 {
72 if (t->tgt_start <= (uintptr_t) d && t->tgt_end >= (uintptr_t) d + s)
73 break;
74 }
75
76 if (!t)
77 return NULL;
78
79 for (i = 0; i < t->list_count; i++)
80 {
81 void * offset;
82
83 splay_tree_key k = &t->array[i].key;
84 offset = d - t->tgt_start + k->tgt_offset;
85
86 if (k->host_start + offset <= (void *) k->host_end)
87 return k;
88 }
89
90 return NULL;
91 }
92
93 /* OpenACC is silent on how memory exhaustion is indicated. We return
94 NULL. */
95
96 void *
97 acc_malloc (size_t s)
98 {
99 if (!s)
100 return NULL;
101
102 goacc_lazy_initialize ();
103
104 struct goacc_thread *thr = goacc_thread ();
105
106 assert (thr->dev);
107
108 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
109 return malloc (s);
110
111 return thr->dev->alloc_func (thr->dev->target_id, s);
112 }
113
114 /* OpenACC 2.0a (3.2.16) doesn't specify what to do in the event
115 the device address is mapped. We choose to check if it mapped,
116 and if it is, to unmap it. */
117 void
118 acc_free (void *d)
119 {
120 splay_tree_key k;
121
122 if (!d)
123 return;
124
125 struct goacc_thread *thr = goacc_thread ();
126
127 assert (thr && thr->dev);
128
129 struct gomp_device_descr *acc_dev = thr->dev;
130
131 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
132 return free (d);
133
134 gomp_mutex_lock (&acc_dev->lock);
135
136 /* We don't have to call lazy open here, as the ptr value must have
137 been returned by acc_malloc. It's not permitted to pass NULL in
138 (unless you got that null from acc_malloc). */
139 if ((k = lookup_dev (acc_dev->openacc.data_environ, d, 1)))
140 {
141 void *offset;
142
143 offset = d - k->tgt->tgt_start + k->tgt_offset;
144
145 gomp_mutex_unlock (&acc_dev->lock);
146
147 acc_unmap_data ((void *)(k->host_start + offset));
148 }
149 else
150 gomp_mutex_unlock (&acc_dev->lock);
151
152 if (!acc_dev->free_func (acc_dev->target_id, d))
153 gomp_fatal ("error in freeing device memory in %s", __FUNCTION__);
154 }
155
156 void
157 acc_memcpy_to_device (void *d, void *h, size_t s)
158 {
159 /* No need to call lazy open here, as the device pointer must have
160 been obtained from a routine that did that. */
161 struct goacc_thread *thr = goacc_thread ();
162
163 assert (thr && thr->dev);
164
165 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
166 {
167 memmove (d, h, s);
168 return;
169 }
170
171 if (!thr->dev->host2dev_func (thr->dev->target_id, d, h, s))
172 gomp_fatal ("error in %s", __FUNCTION__);
173 }
174
175 void
176 acc_memcpy_from_device (void *h, void *d, size_t s)
177 {
178 /* No need to call lazy open here, as the device pointer must have
179 been obtained from a routine that did that. */
180 struct goacc_thread *thr = goacc_thread ();
181
182 assert (thr && thr->dev);
183
184 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
185 {
186 memmove (h, d, s);
187 return;
188 }
189
190 if (!thr->dev->dev2host_func (thr->dev->target_id, h, d, s))
191 gomp_fatal ("error in %s", __FUNCTION__);
192 }
193
194 /* Return the device pointer that corresponds to host data H. Or NULL
195 if no mapping. */
196
197 void *
198 acc_deviceptr (void *h)
199 {
200 splay_tree_key n;
201 void *d;
202 void *offset;
203
204 goacc_lazy_initialize ();
205
206 struct goacc_thread *thr = goacc_thread ();
207 struct gomp_device_descr *dev = thr->dev;
208
209 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
210 return h;
211
212 gomp_mutex_lock (&dev->lock);
213
214 n = lookup_host (dev, h, 1);
215
216 if (!n)
217 {
218 gomp_mutex_unlock (&dev->lock);
219 return NULL;
220 }
221
222 offset = h - n->host_start;
223
224 d = n->tgt->tgt_start + n->tgt_offset + offset;
225
226 gomp_mutex_unlock (&dev->lock);
227
228 return d;
229 }
230
231 /* Return the host pointer that corresponds to device data D. Or NULL
232 if no mapping. */
233
234 void *
235 acc_hostptr (void *d)
236 {
237 splay_tree_key n;
238 void *h;
239 void *offset;
240
241 goacc_lazy_initialize ();
242
243 struct goacc_thread *thr = goacc_thread ();
244 struct gomp_device_descr *acc_dev = thr->dev;
245
246 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
247 return d;
248
249 gomp_mutex_lock (&acc_dev->lock);
250
251 n = lookup_dev (acc_dev->openacc.data_environ, d, 1);
252
253 if (!n)
254 {
255 gomp_mutex_unlock (&acc_dev->lock);
256 return NULL;
257 }
258
259 offset = d - n->tgt->tgt_start + n->tgt_offset;
260
261 h = n->host_start + offset;
262
263 gomp_mutex_unlock (&acc_dev->lock);
264
265 return h;
266 }
267
268 /* Return 1 if host data [H,+S] is present on the device. */
269
270 int
271 acc_is_present (void *h, size_t s)
272 {
273 splay_tree_key n;
274
275 if (!s || !h)
276 return 0;
277
278 goacc_lazy_initialize ();
279
280 struct goacc_thread *thr = goacc_thread ();
281 struct gomp_device_descr *acc_dev = thr->dev;
282
283 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
284 return h != NULL;
285
286 gomp_mutex_lock (&acc_dev->lock);
287
288 n = lookup_host (acc_dev, h, s);
289
290 if (n && ((uintptr_t)h < n->host_start
291 || (uintptr_t)h + s > n->host_end
292 || s > n->host_end - n->host_start))
293 n = NULL;
294
295 gomp_mutex_unlock (&acc_dev->lock);
296
297 return n != NULL;
298 }
299
300 /* Create a mapping for host [H,+S] -> device [D,+S] */
301
302 void
303 acc_map_data (void *h, void *d, size_t s)
304 {
305 struct target_mem_desc *tgt = NULL;
306 size_t mapnum = 1;
307 void *hostaddrs = h;
308 void *devaddrs = d;
309 size_t sizes = s;
310 unsigned short kinds = GOMP_MAP_ALLOC;
311
312 goacc_lazy_initialize ();
313
314 struct goacc_thread *thr = goacc_thread ();
315 struct gomp_device_descr *acc_dev = thr->dev;
316
317 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
318 {
319 if (d != h)
320 gomp_fatal ("cannot map data on shared-memory system");
321 }
322 else
323 {
324 struct goacc_thread *thr = goacc_thread ();
325
326 if (!d || !h || !s)
327 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
328 (void *)h, (int)s, (void *)d, (int)s);
329
330 gomp_mutex_lock (&acc_dev->lock);
331
332 if (lookup_host (acc_dev, h, s))
333 {
334 gomp_mutex_unlock (&acc_dev->lock);
335 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h,
336 (int)s);
337 }
338
339 if (lookup_dev (thr->dev->openacc.data_environ, d, s))
340 {
341 gomp_mutex_unlock (&acc_dev->lock);
342 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d,
343 (int)s);
344 }
345
346 gomp_mutex_unlock (&acc_dev->lock);
347
348 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
349 &kinds, true, GOMP_MAP_VARS_OPENACC);
350 tgt->list[0].key->refcount = REFCOUNT_INFINITY;
351 }
352
353 gomp_mutex_lock (&acc_dev->lock);
354 tgt->prev = acc_dev->openacc.data_environ;
355 acc_dev->openacc.data_environ = tgt;
356 gomp_mutex_unlock (&acc_dev->lock);
357 }
358
359 void
360 acc_unmap_data (void *h)
361 {
362 struct goacc_thread *thr = goacc_thread ();
363 struct gomp_device_descr *acc_dev = thr->dev;
364
365 /* No need to call lazy open, as the address must have been mapped. */
366
367 /* This is a no-op on shared-memory targets. */
368 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
369 return;
370
371 size_t host_size;
372
373 gomp_mutex_lock (&acc_dev->lock);
374
375 splay_tree_key n = lookup_host (acc_dev, h, 1);
376 struct target_mem_desc *t;
377
378 if (!n)
379 {
380 gomp_mutex_unlock (&acc_dev->lock);
381 gomp_fatal ("%p is not a mapped block", (void *)h);
382 }
383
384 host_size = n->host_end - n->host_start;
385
386 if (n->host_start != (uintptr_t) h)
387 {
388 gomp_mutex_unlock (&acc_dev->lock);
389 gomp_fatal ("[%p,%d] surrounds %p",
390 (void *) n->host_start, (int) host_size, (void *) h);
391 }
392
393 /* Mark for removal. */
394 n->refcount = 1;
395
396 t = n->tgt;
397
398 if (t->refcount == 2)
399 {
400 struct target_mem_desc *tp;
401
402 /* This is the last reference, so pull the descriptor off the
403 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
404 freeing the device memory. */
405 t->tgt_end = 0;
406 t->to_free = 0;
407
408 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
409 tp = t, t = t->prev)
410 if (n->tgt == t)
411 {
412 if (tp)
413 tp->prev = t->prev;
414 else
415 acc_dev->openacc.data_environ = t->prev;
416
417 break;
418 }
419 }
420
421 gomp_mutex_unlock (&acc_dev->lock);
422
423 gomp_unmap_vars (t, true);
424 }
425
426 #define FLAG_PRESENT (1 << 0)
427 #define FLAG_CREATE (1 << 1)
428 #define FLAG_COPY (1 << 2)
429
430 static void *
431 present_create_copy (unsigned f, void *h, size_t s)
432 {
433 void *d;
434 splay_tree_key n;
435
436 if (!h || !s)
437 gomp_fatal ("[%p,+%d] is a bad range", (void *)h, (int)s);
438
439 goacc_lazy_initialize ();
440
441 struct goacc_thread *thr = goacc_thread ();
442 struct gomp_device_descr *acc_dev = thr->dev;
443
444 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
445 return h;
446
447 gomp_mutex_lock (&acc_dev->lock);
448
449 n = lookup_host (acc_dev, h, s);
450 if (n)
451 {
452 /* Present. */
453 d = (void *) (n->tgt->tgt_start + n->tgt_offset);
454
455 if (!(f & FLAG_PRESENT))
456 {
457 gomp_mutex_unlock (&acc_dev->lock);
458 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
459 (void *)h, (int)s, (void *)d, (int)s);
460 }
461 if ((h + s) > (void *)n->host_end)
462 {
463 gomp_mutex_unlock (&acc_dev->lock);
464 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
465 }
466
467 if (n->refcount != REFCOUNT_INFINITY)
468 {
469 n->refcount++;
470 n->dynamic_refcount++;
471 }
472 gomp_mutex_unlock (&acc_dev->lock);
473 }
474 else if (!(f & FLAG_CREATE))
475 {
476 gomp_mutex_unlock (&acc_dev->lock);
477 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
478 }
479 else
480 {
481 struct target_mem_desc *tgt;
482 size_t mapnum = 1;
483 unsigned short kinds;
484 void *hostaddrs = h;
485
486 if (f & FLAG_COPY)
487 kinds = GOMP_MAP_TO;
488 else
489 kinds = GOMP_MAP_ALLOC;
490
491 gomp_mutex_unlock (&acc_dev->lock);
492
493 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, NULL, &s, &kinds, true,
494 GOMP_MAP_VARS_OPENACC);
495 /* Initialize dynamic refcount. */
496 tgt->list[0].key->dynamic_refcount = 1;
497
498 gomp_mutex_lock (&acc_dev->lock);
499
500 d = tgt->to_free;
501 tgt->prev = acc_dev->openacc.data_environ;
502 acc_dev->openacc.data_environ = tgt;
503
504 gomp_mutex_unlock (&acc_dev->lock);
505 }
506
507 return d;
508 }
509
510 void *
511 acc_create (void *h, size_t s)
512 {
513 return present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s);
514 }
515
516 void *
517 acc_copyin (void *h, size_t s)
518 {
519 return present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s);
520 }
521
522 void *
523 acc_present_or_create (void *h, size_t s)
524 {
525 return present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s);
526 }
527
528 /* acc_pcreate is acc_present_or_create by a different name. */
529 #ifdef HAVE_ATTRIBUTE_ALIAS
530 strong_alias (acc_present_or_create, acc_pcreate)
531 #else
532 void *
533 acc_pcreate (void *h, size_t s)
534 {
535 return acc_present_or_create (h, s);
536 }
537 #endif
538
539 void *
540 acc_present_or_copyin (void *h, size_t s)
541 {
542 return present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s);
543 }
544
545 /* acc_pcopyin is acc_present_or_copyin by a different name. */
546 #ifdef HAVE_ATTRIBUTE_ALIAS
547 strong_alias (acc_present_or_copyin, acc_pcopyin)
548 #else
549 void *
550 acc_pcopyin (void *h, size_t s)
551 {
552 return acc_present_or_copyin (h, s);
553 }
554 #endif
555
556 #define FLAG_COPYOUT (1 << 0)
557 #define FLAG_FINALIZE (1 << 1)
558
559 static void
560 delete_copyout (unsigned f, void *h, size_t s, const char *libfnname)
561 {
562 size_t host_size;
563 splay_tree_key n;
564 void *d;
565 struct goacc_thread *thr = goacc_thread ();
566 struct gomp_device_descr *acc_dev = thr->dev;
567
568 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
569 return;
570
571 gomp_mutex_lock (&acc_dev->lock);
572
573 n = lookup_host (acc_dev, h, s);
574
575 /* No need to call lazy open, as the data must already have been
576 mapped. */
577
578 if (!n)
579 {
580 gomp_mutex_unlock (&acc_dev->lock);
581 gomp_fatal ("[%p,%d] is not mapped", (void *)h, (int)s);
582 }
583
584 d = (void *) (n->tgt->tgt_start + n->tgt_offset
585 + (uintptr_t) h - n->host_start);
586
587 host_size = n->host_end - n->host_start;
588
589 if (n->host_start != (uintptr_t) h || host_size != s)
590 {
591 gomp_mutex_unlock (&acc_dev->lock);
592 gomp_fatal ("[%p,%d] surrounds2 [%p,+%d]",
593 (void *) n->host_start, (int) host_size, (void *) h, (int) s);
594 }
595
596 if (n->refcount == REFCOUNT_INFINITY)
597 {
598 n->refcount = 0;
599 n->dynamic_refcount = 0;
600 }
601 if (n->refcount < n->dynamic_refcount)
602 {
603 gomp_mutex_unlock (&acc_dev->lock);
604 gomp_fatal ("Dynamic reference counting assert fail\n");
605 }
606
607 if (f & FLAG_FINALIZE)
608 {
609 n->refcount -= n->dynamic_refcount;
610 n->dynamic_refcount = 0;
611 }
612 else if (n->dynamic_refcount)
613 {
614 n->dynamic_refcount--;
615 n->refcount--;
616 }
617
618 if (n->refcount == 0)
619 {
620 if (n->tgt->refcount == 2)
621 {
622 struct target_mem_desc *tp, *t;
623 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
624 tp = t, t = t->prev)
625 if (n->tgt == t)
626 {
627 if (tp)
628 tp->prev = t->prev;
629 else
630 acc_dev->openacc.data_environ = t->prev;
631 break;
632 }
633 }
634
635 if (f & FLAG_COPYOUT)
636 acc_dev->dev2host_func (acc_dev->target_id, h, d, s);
637
638 gomp_remove_var (acc_dev, n);
639 }
640
641 gomp_mutex_unlock (&acc_dev->lock);
642 }
643
644 void
645 acc_delete (void *h , size_t s)
646 {
647 delete_copyout (0, h, s, __FUNCTION__);
648 }
649
650 void
651 acc_delete_finalize (void *h , size_t s)
652 {
653 delete_copyout (FLAG_FINALIZE, h, s, __FUNCTION__);
654 }
655
656 void
657 acc_delete_finalize_async (void *h , size_t s, int async)
658 {
659 delete_copyout (FLAG_FINALIZE, h, s, __FUNCTION__);
660 }
661
662 void
663 acc_copyout (void *h, size_t s)
664 {
665 delete_copyout (FLAG_COPYOUT, h, s, __FUNCTION__);
666 }
667
668 void
669 acc_copyout_finalize (void *h, size_t s)
670 {
671 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, __FUNCTION__);
672 }
673
674 void
675 acc_copyout_finalize_async (void *h, size_t s, int async)
676 {
677 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, __FUNCTION__);
678 }
679
680 static void
681 update_dev_host (int is_dev, void *h, size_t s)
682 {
683 splay_tree_key n;
684 void *d;
685
686 goacc_lazy_initialize ();
687
688 struct goacc_thread *thr = goacc_thread ();
689 struct gomp_device_descr *acc_dev = thr->dev;
690
691 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
692 return;
693
694 gomp_mutex_lock (&acc_dev->lock);
695
696 n = lookup_host (acc_dev, h, s);
697
698 if (!n)
699 {
700 gomp_mutex_unlock (&acc_dev->lock);
701 gomp_fatal ("[%p,%d] is not mapped", h, (int)s);
702 }
703
704 d = (void *) (n->tgt->tgt_start + n->tgt_offset
705 + (uintptr_t) h - n->host_start);
706
707 if (is_dev)
708 acc_dev->host2dev_func (acc_dev->target_id, d, h, s);
709 else
710 acc_dev->dev2host_func (acc_dev->target_id, h, d, s);
711
712 gomp_mutex_unlock (&acc_dev->lock);
713 }
714
715 void
716 acc_update_device (void *h, size_t s)
717 {
718 update_dev_host (1, h, s);
719 }
720
721 void
722 acc_update_self (void *h, size_t s)
723 {
724 update_dev_host (0, h, s);
725 }
726
727 void
728 gomp_acc_insert_pointer (size_t mapnum, void **hostaddrs, size_t *sizes,
729 void *kinds)
730 {
731 struct target_mem_desc *tgt;
732 struct goacc_thread *thr = goacc_thread ();
733 struct gomp_device_descr *acc_dev = thr->dev;
734
735 if (acc_is_present (*hostaddrs, *sizes))
736 {
737 splay_tree_key n;
738 gomp_mutex_lock (&acc_dev->lock);
739 n = lookup_host (acc_dev, *hostaddrs, *sizes);
740 gomp_mutex_unlock (&acc_dev->lock);
741
742 tgt = n->tgt;
743 for (size_t i = 0; i < tgt->list_count; i++)
744 if (tgt->list[i].key == n)
745 {
746 for (size_t j = 0; j < mapnum; j++)
747 if (i + j < tgt->list_count && tgt->list[i + j].key)
748 {
749 tgt->list[i + j].key->refcount++;
750 tgt->list[i + j].key->dynamic_refcount++;
751 }
752 return;
753 }
754 /* Should not reach here. */
755 gomp_fatal ("Dynamic refcount incrementing failed for pointer/pset");
756 }
757
758 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__);
759 tgt = gomp_map_vars (acc_dev, mapnum, hostaddrs,
760 NULL, sizes, kinds, true, GOMP_MAP_VARS_OPENACC);
761 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__);
762
763 /* Initialize dynamic refcount. */
764 tgt->list[0].key->dynamic_refcount = 1;
765
766 gomp_mutex_lock (&acc_dev->lock);
767 tgt->prev = acc_dev->openacc.data_environ;
768 acc_dev->openacc.data_environ = tgt;
769 gomp_mutex_unlock (&acc_dev->lock);
770 }
771
772 void
773 gomp_acc_remove_pointer (void *h, size_t s, bool force_copyfrom, int async,
774 int finalize, int mapnum)
775 {
776 struct goacc_thread *thr = goacc_thread ();
777 struct gomp_device_descr *acc_dev = thr->dev;
778 splay_tree_key n;
779 struct target_mem_desc *t;
780 int minrefs = (mapnum == 1) ? 2 : 3;
781
782 if (!acc_is_present (h, s))
783 return;
784
785 gomp_mutex_lock (&acc_dev->lock);
786
787 n = lookup_host (acc_dev, h, 1);
788
789 if (!n)
790 {
791 gomp_mutex_unlock (&acc_dev->lock);
792 gomp_fatal ("%p is not a mapped block", (void *)h);
793 }
794
795 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__);
796
797 t = n->tgt;
798
799 if (n->refcount < n->dynamic_refcount)
800 {
801 gomp_mutex_unlock (&acc_dev->lock);
802 gomp_fatal ("Dynamic reference counting assert fail\n");
803 }
804
805 if (finalize)
806 {
807 n->refcount -= n->dynamic_refcount;
808 n->dynamic_refcount = 0;
809 }
810 else if (n->dynamic_refcount)
811 {
812 n->dynamic_refcount--;
813 n->refcount--;
814 }
815
816 gomp_mutex_unlock (&acc_dev->lock);
817
818 if (n->refcount == 0)
819 {
820 if (t->refcount == minrefs)
821 {
822 /* This is the last reference, so pull the descriptor off the
823 chain. This prevents gomp_unmap_vars via gomp_unmap_tgt from
824 freeing the device memory. */
825 struct target_mem_desc *tp;
826 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
827 tp = t, t = t->prev)
828 {
829 if (n->tgt == t)
830 {
831 if (tp)
832 tp->prev = t->prev;
833 else
834 acc_dev->openacc.data_environ = t->prev;
835 break;
836 }
837 }
838 }
839
840 /* Set refcount to 1 to allow gomp_unmap_vars to unmap it. */
841 n->refcount = 1;
842 t->refcount = minrefs;
843 for (size_t i = 0; i < t->list_count; i++)
844 if (t->list[i].key == n)
845 {
846 t->list[i].copy_from = force_copyfrom ? 1 : 0;
847 break;
848 }
849
850 /* If running synchronously, unmap immediately. */
851 if (async < acc_async_noval)
852 gomp_unmap_vars (t, true);
853 else
854 t->device_descr->openacc.register_async_cleanup_func (t, async);
855 }
856
857 gomp_mutex_unlock (&acc_dev->lock);
858
859 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__);
860 }