1 /* OpenACC Runtime initialization routines
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Mentor Embedded.
7 This file is part of the GNU Offloading and Multi Processing Library
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
32 #include "gomp-constants.h"
38 /* Return block containing [H->S), or NULL if not contained. The device lock
39 for DEV must be locked on entry, and remains locked on exit. */
42 lookup_host (struct gomp_device_descr
*dev
, void *h
, size_t s
)
44 struct splay_tree_key_s node
;
47 node
.host_start
= (uintptr_t) h
;
48 node
.host_end
= (uintptr_t) h
+ s
;
50 key
= splay_tree_lookup (&dev
->mem_map
, &node
);
55 /* Return block containing [D->S), or NULL if not contained.
56 The list isn't ordered by device address, so we have to iterate
57 over the whole array. This is not expected to be a common
58 operation. The device lock associated with TGT must be locked on entry, and
59 remains locked on exit. */
62 lookup_dev (struct target_mem_desc
*tgt
, void *d
, size_t s
)
65 struct target_mem_desc
*t
;
70 for (t
= tgt
; t
!= NULL
; t
= t
->prev
)
72 if (t
->tgt_start
<= (uintptr_t) d
&& t
->tgt_end
>= (uintptr_t) d
+ s
)
79 for (i
= 0; i
< t
->list_count
; i
++)
83 splay_tree_key k
= &t
->array
[i
].key
;
84 offset
= d
- t
->tgt_start
+ k
->tgt_offset
;
86 if (k
->host_start
+ offset
<= (void *) k
->host_end
)
93 /* OpenACC is silent on how memory exhaustion is indicated. We return
102 goacc_lazy_initialize ();
104 struct goacc_thread
*thr
= goacc_thread ();
108 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
111 return thr
->dev
->alloc_func (thr
->dev
->target_id
, s
);
114 /* OpenACC 2.0a (3.2.16) doesn't specify what to do in the event
115 the device address is mapped. We choose to check if it mapped,
116 and if it is, to unmap it. */
125 struct goacc_thread
*thr
= goacc_thread ();
127 assert (thr
&& thr
->dev
);
129 struct gomp_device_descr
*acc_dev
= thr
->dev
;
131 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
134 gomp_mutex_lock (&acc_dev
->lock
);
136 /* We don't have to call lazy open here, as the ptr value must have
137 been returned by acc_malloc. It's not permitted to pass NULL in
138 (unless you got that null from acc_malloc). */
139 if ((k
= lookup_dev (acc_dev
->openacc
.data_environ
, d
, 1)))
143 offset
= d
- k
->tgt
->tgt_start
+ k
->tgt_offset
;
145 gomp_mutex_unlock (&acc_dev
->lock
);
147 acc_unmap_data ((void *)(k
->host_start
+ offset
));
150 gomp_mutex_unlock (&acc_dev
->lock
);
152 if (!acc_dev
->free_func (acc_dev
->target_id
, d
))
153 gomp_fatal ("error in freeing device memory in %s", __FUNCTION__
);
157 memcpy_tofrom_device (bool from
, void *d
, void *h
, size_t s
, int async
,
158 const char *libfnname
)
160 /* No need to call lazy open here, as the device pointer must have
161 been obtained from a routine that did that. */
162 struct goacc_thread
*thr
= goacc_thread ();
164 assert (thr
&& thr
->dev
);
166 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
175 goacc_aq aq
= get_goacc_asyncqueue (async
);
177 gomp_copy_dev2host (thr
->dev
, aq
, h
, d
, s
);
179 gomp_copy_host2dev (thr
->dev
, aq
, d
, h
, s
, /* TODO: cbuf? */ NULL
);
183 acc_memcpy_to_device (void *d
, void *h
, size_t s
)
185 memcpy_tofrom_device (false, d
, h
, s
, acc_async_sync
, __FUNCTION__
);
189 acc_memcpy_to_device_async (void *d
, void *h
, size_t s
, int async
)
191 memcpy_tofrom_device (false, d
, h
, s
, async
, __FUNCTION__
);
195 acc_memcpy_from_device (void *h
, void *d
, size_t s
)
197 memcpy_tofrom_device (true, d
, h
, s
, acc_async_sync
, __FUNCTION__
);
201 acc_memcpy_from_device_async (void *h
, void *d
, size_t s
, int async
)
203 memcpy_tofrom_device (true, d
, h
, s
, async
, __FUNCTION__
);
206 /* Return the device pointer that corresponds to host data H. Or NULL
210 acc_deviceptr (void *h
)
216 goacc_lazy_initialize ();
218 struct goacc_thread
*thr
= goacc_thread ();
219 struct gomp_device_descr
*dev
= thr
->dev
;
221 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
224 gomp_mutex_lock (&dev
->lock
);
226 n
= lookup_host (dev
, h
, 1);
230 gomp_mutex_unlock (&dev
->lock
);
234 offset
= h
- n
->host_start
;
236 d
= n
->tgt
->tgt_start
+ n
->tgt_offset
+ offset
;
238 gomp_mutex_unlock (&dev
->lock
);
243 /* Return the host pointer that corresponds to device data D. Or NULL
247 acc_hostptr (void *d
)
253 goacc_lazy_initialize ();
255 struct goacc_thread
*thr
= goacc_thread ();
256 struct gomp_device_descr
*acc_dev
= thr
->dev
;
258 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
261 gomp_mutex_lock (&acc_dev
->lock
);
263 n
= lookup_dev (acc_dev
->openacc
.data_environ
, d
, 1);
267 gomp_mutex_unlock (&acc_dev
->lock
);
271 offset
= d
- n
->tgt
->tgt_start
+ n
->tgt_offset
;
273 h
= n
->host_start
+ offset
;
275 gomp_mutex_unlock (&acc_dev
->lock
);
280 /* Return 1 if host data [H,+S] is present on the device. */
283 acc_is_present (void *h
, size_t s
)
290 goacc_lazy_initialize ();
292 struct goacc_thread
*thr
= goacc_thread ();
293 struct gomp_device_descr
*acc_dev
= thr
->dev
;
295 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
298 gomp_mutex_lock (&acc_dev
->lock
);
300 n
= lookup_host (acc_dev
, h
, s
);
302 if (n
&& ((uintptr_t)h
< n
->host_start
303 || (uintptr_t)h
+ s
> n
->host_end
304 || s
> n
->host_end
- n
->host_start
))
307 gomp_mutex_unlock (&acc_dev
->lock
);
312 /* Create a mapping for host [H,+S] -> device [D,+S] */
315 acc_map_data (void *h
, void *d
, size_t s
)
317 struct target_mem_desc
*tgt
= NULL
;
322 unsigned short kinds
= GOMP_MAP_ALLOC
;
324 goacc_lazy_initialize ();
326 struct goacc_thread
*thr
= goacc_thread ();
327 struct gomp_device_descr
*acc_dev
= thr
->dev
;
329 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
332 gomp_fatal ("cannot map data on shared-memory system");
336 struct goacc_thread
*thr
= goacc_thread ();
339 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
340 (void *)h
, (int)s
, (void *)d
, (int)s
);
342 gomp_mutex_lock (&acc_dev
->lock
);
344 if (lookup_host (acc_dev
, h
, s
))
346 gomp_mutex_unlock (&acc_dev
->lock
);
347 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h
,
351 if (lookup_dev (thr
->dev
->openacc
.data_environ
, d
, s
))
353 gomp_mutex_unlock (&acc_dev
->lock
);
354 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d
,
358 gomp_mutex_unlock (&acc_dev
->lock
);
360 tgt
= gomp_map_vars (acc_dev
, mapnum
, &hostaddrs
, &devaddrs
, &sizes
,
361 &kinds
, true, GOMP_MAP_VARS_OPENACC
);
362 tgt
->list
[0].key
->refcount
= REFCOUNT_INFINITY
;
365 gomp_mutex_lock (&acc_dev
->lock
);
366 tgt
->prev
= acc_dev
->openacc
.data_environ
;
367 acc_dev
->openacc
.data_environ
= tgt
;
368 gomp_mutex_unlock (&acc_dev
->lock
);
372 acc_unmap_data (void *h
)
374 struct goacc_thread
*thr
= goacc_thread ();
375 struct gomp_device_descr
*acc_dev
= thr
->dev
;
377 /* No need to call lazy open, as the address must have been mapped. */
379 /* This is a no-op on shared-memory targets. */
380 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
385 gomp_mutex_lock (&acc_dev
->lock
);
387 splay_tree_key n
= lookup_host (acc_dev
, h
, 1);
388 struct target_mem_desc
*t
;
392 gomp_mutex_unlock (&acc_dev
->lock
);
393 gomp_fatal ("%p is not a mapped block", (void *)h
);
396 host_size
= n
->host_end
- n
->host_start
;
398 if (n
->host_start
!= (uintptr_t) h
)
400 gomp_mutex_unlock (&acc_dev
->lock
);
401 gomp_fatal ("[%p,%d] surrounds %p",
402 (void *) n
->host_start
, (int) host_size
, (void *) h
);
405 /* Mark for removal. */
410 if (t
->refcount
== 2)
412 struct target_mem_desc
*tp
;
414 /* This is the last reference, so pull the descriptor off the
415 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
416 freeing the device memory. */
420 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
427 acc_dev
->openacc
.data_environ
= t
->prev
;
433 gomp_mutex_unlock (&acc_dev
->lock
);
435 gomp_unmap_vars (t
, true);
438 #define FLAG_PRESENT (1 << 0)
439 #define FLAG_CREATE (1 << 1)
440 #define FLAG_COPY (1 << 2)
443 present_create_copy (unsigned f
, void *h
, size_t s
, int async
)
449 gomp_fatal ("[%p,+%d] is a bad range", (void *)h
, (int)s
);
451 goacc_lazy_initialize ();
453 struct goacc_thread
*thr
= goacc_thread ();
454 struct gomp_device_descr
*acc_dev
= thr
->dev
;
456 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
459 gomp_mutex_lock (&acc_dev
->lock
);
461 n
= lookup_host (acc_dev
, h
, s
);
465 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
);
467 if (!(f
& FLAG_PRESENT
))
469 gomp_mutex_unlock (&acc_dev
->lock
);
470 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
471 (void *)h
, (int)s
, (void *)d
, (int)s
);
473 if ((h
+ s
) > (void *)n
->host_end
)
475 gomp_mutex_unlock (&acc_dev
->lock
);
476 gomp_fatal ("[%p,+%d] not mapped", (void *)h
, (int)s
);
479 if (n
->refcount
!= REFCOUNT_INFINITY
)
482 n
->dynamic_refcount
++;
484 gomp_mutex_unlock (&acc_dev
->lock
);
486 else if (!(f
& FLAG_CREATE
))
488 gomp_mutex_unlock (&acc_dev
->lock
);
489 gomp_fatal ("[%p,+%d] not mapped", (void *)h
, (int)s
);
493 struct target_mem_desc
*tgt
;
495 unsigned short kinds
;
501 kinds
= GOMP_MAP_ALLOC
;
503 gomp_mutex_unlock (&acc_dev
->lock
);
505 goacc_aq aq
= get_goacc_asyncqueue (async
);
507 tgt
= gomp_map_vars_async (acc_dev
, aq
, mapnum
, &hostaddrs
, NULL
, &s
,
508 &kinds
, true, GOMP_MAP_VARS_OPENACC
);
509 /* Initialize dynamic refcount. */
510 tgt
->list
[0].key
->dynamic_refcount
= 1;
512 gomp_mutex_lock (&acc_dev
->lock
);
515 tgt
->prev
= acc_dev
->openacc
.data_environ
;
516 acc_dev
->openacc
.data_environ
= tgt
;
518 gomp_mutex_unlock (&acc_dev
->lock
);
525 acc_create (void *h
, size_t s
)
527 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
, h
, s
, acc_async_sync
);
531 acc_create_async (void *h
, size_t s
, int async
)
533 present_create_copy (FLAG_PRESENT
| FLAG_CREATE
, h
, s
, async
);
536 /* acc_present_or_create used to be what acc_create is now. */
537 /* acc_pcreate is acc_present_or_create by a different name. */
538 #ifdef HAVE_ATTRIBUTE_ALIAS
539 strong_alias (acc_create
, acc_present_or_create
)
540 strong_alias (acc_create
, acc_pcreate
)
543 acc_present_or_create (void *h
, size_t s
)
545 return acc_create (h
, s
);
549 acc_pcreate (void *h
, size_t s
)
551 return acc_create (h
, s
);
556 acc_copyin (void *h
, size_t s
)
558 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
| FLAG_COPY
, h
, s
,
563 acc_copyin_async (void *h
, size_t s
, int async
)
565 present_create_copy (FLAG_PRESENT
| FLAG_CREATE
| FLAG_COPY
, h
, s
, async
);
568 /* acc_present_or_copyin used to be what acc_copyin is now. */
569 /* acc_pcopyin is acc_present_or_copyin by a different name. */
570 #ifdef HAVE_ATTRIBUTE_ALIAS
571 strong_alias (acc_copyin
, acc_present_or_copyin
)
572 strong_alias (acc_copyin
, acc_pcopyin
)
575 acc_present_or_copyin (void *h
, size_t s
)
577 return acc_copyin (h
, s
);
581 acc_pcopyin (void *h
, size_t s
)
583 return acc_copyin (h
, s
);
587 #define FLAG_COPYOUT (1 << 0)
588 #define FLAG_FINALIZE (1 << 1)
591 delete_copyout (unsigned f
, void *h
, size_t s
, int async
, const char *libfnname
)
596 struct goacc_thread
*thr
= goacc_thread ();
597 struct gomp_device_descr
*acc_dev
= thr
->dev
;
599 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
602 gomp_mutex_lock (&acc_dev
->lock
);
604 n
= lookup_host (acc_dev
, h
, s
);
606 /* No need to call lazy open, as the data must already have been
611 gomp_mutex_unlock (&acc_dev
->lock
);
612 gomp_fatal ("[%p,%d] is not mapped", (void *)h
, (int)s
);
615 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
616 + (uintptr_t) h
- n
->host_start
);
618 host_size
= n
->host_end
- n
->host_start
;
620 if (n
->host_start
!= (uintptr_t) h
|| host_size
!= s
)
622 gomp_mutex_unlock (&acc_dev
->lock
);
623 gomp_fatal ("[%p,%d] surrounds2 [%p,+%d]",
624 (void *) n
->host_start
, (int) host_size
, (void *) h
, (int) s
);
627 if (n
->refcount
== REFCOUNT_INFINITY
)
630 n
->dynamic_refcount
= 0;
632 if (n
->refcount
< n
->dynamic_refcount
)
634 gomp_mutex_unlock (&acc_dev
->lock
);
635 gomp_fatal ("Dynamic reference counting assert fail\n");
638 if (f
& FLAG_FINALIZE
)
640 n
->refcount
-= n
->dynamic_refcount
;
641 n
->dynamic_refcount
= 0;
643 else if (n
->dynamic_refcount
)
645 n
->dynamic_refcount
--;
649 if (n
->refcount
== 0)
651 if (n
->tgt
->refcount
== 2)
653 struct target_mem_desc
*tp
, *t
;
654 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
661 acc_dev
->openacc
.data_environ
= t
->prev
;
666 if (f
& FLAG_COPYOUT
)
668 goacc_aq aq
= get_goacc_asyncqueue (async
);
669 gomp_copy_dev2host (acc_dev
, aq
, h
, d
, s
);
671 gomp_remove_var (acc_dev
, n
);
674 gomp_mutex_unlock (&acc_dev
->lock
);
678 acc_delete (void *h
, size_t s
)
680 delete_copyout (0, h
, s
, acc_async_sync
, __FUNCTION__
);
684 acc_delete_async (void *h
, size_t s
, int async
)
686 delete_copyout (0, h
, s
, async
, __FUNCTION__
);
690 acc_delete_finalize (void *h
, size_t s
)
692 delete_copyout (FLAG_FINALIZE
, h
, s
, acc_async_sync
, __FUNCTION__
);
696 acc_delete_finalize_async (void *h
, size_t s
, int async
)
698 delete_copyout (FLAG_FINALIZE
, h
, s
, async
, __FUNCTION__
);
702 acc_copyout (void *h
, size_t s
)
704 delete_copyout (FLAG_COPYOUT
, h
, s
, acc_async_sync
, __FUNCTION__
);
708 acc_copyout_async (void *h
, size_t s
, int async
)
710 delete_copyout (FLAG_COPYOUT
, h
, s
, async
, __FUNCTION__
);
714 acc_copyout_finalize (void *h
, size_t s
)
716 delete_copyout (FLAG_COPYOUT
| FLAG_FINALIZE
, h
, s
, acc_async_sync
,
721 acc_copyout_finalize_async (void *h
, size_t s
, int async
)
723 delete_copyout (FLAG_COPYOUT
| FLAG_FINALIZE
, h
, s
, async
, __FUNCTION__
);
727 update_dev_host (int is_dev
, void *h
, size_t s
, int async
)
732 goacc_lazy_initialize ();
734 struct goacc_thread
*thr
= goacc_thread ();
735 struct gomp_device_descr
*acc_dev
= thr
->dev
;
737 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
740 gomp_mutex_lock (&acc_dev
->lock
);
742 n
= lookup_host (acc_dev
, h
, s
);
746 gomp_mutex_unlock (&acc_dev
->lock
);
747 gomp_fatal ("[%p,%d] is not mapped", h
, (int)s
);
750 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
751 + (uintptr_t) h
- n
->host_start
);
753 goacc_aq aq
= get_goacc_asyncqueue (async
);
756 gomp_copy_host2dev (acc_dev
, aq
, d
, h
, s
, /* TODO: cbuf? */ NULL
);
758 gomp_copy_dev2host (acc_dev
, aq
, h
, d
, s
);
760 gomp_mutex_unlock (&acc_dev
->lock
);
764 acc_update_device (void *h
, size_t s
)
766 update_dev_host (1, h
, s
, acc_async_sync
);
770 acc_update_device_async (void *h
, size_t s
, int async
)
772 update_dev_host (1, h
, s
, async
);
776 acc_update_self (void *h
, size_t s
)
778 update_dev_host (0, h
, s
, acc_async_sync
);
782 acc_update_self_async (void *h
, size_t s
, int async
)
784 update_dev_host (0, h
, s
, async
);
788 gomp_acc_insert_pointer (size_t mapnum
, void **hostaddrs
, size_t *sizes
,
789 void *kinds
, int async
)
791 struct target_mem_desc
*tgt
;
792 struct goacc_thread
*thr
= goacc_thread ();
793 struct gomp_device_descr
*acc_dev
= thr
->dev
;
795 if (acc_is_present (*hostaddrs
, *sizes
))
798 gomp_mutex_lock (&acc_dev
->lock
);
799 n
= lookup_host (acc_dev
, *hostaddrs
, *sizes
);
800 gomp_mutex_unlock (&acc_dev
->lock
);
803 for (size_t i
= 0; i
< tgt
->list_count
; i
++)
804 if (tgt
->list
[i
].key
== n
)
806 for (size_t j
= 0; j
< mapnum
; j
++)
807 if (i
+ j
< tgt
->list_count
&& tgt
->list
[i
+ j
].key
)
809 tgt
->list
[i
+ j
].key
->refcount
++;
810 tgt
->list
[i
+ j
].key
->dynamic_refcount
++;
814 /* Should not reach here. */
815 gomp_fatal ("Dynamic refcount incrementing failed for pointer/pset");
818 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__
);
819 goacc_aq aq
= get_goacc_asyncqueue (async
);
820 tgt
= gomp_map_vars_async (acc_dev
, aq
, mapnum
, hostaddrs
,
821 NULL
, sizes
, kinds
, true, GOMP_MAP_VARS_OPENACC
);
822 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__
);
824 /* Initialize dynamic refcount. */
825 tgt
->list
[0].key
->dynamic_refcount
= 1;
827 gomp_mutex_lock (&acc_dev
->lock
);
828 tgt
->prev
= acc_dev
->openacc
.data_environ
;
829 acc_dev
->openacc
.data_environ
= tgt
;
830 gomp_mutex_unlock (&acc_dev
->lock
);
834 gomp_acc_remove_pointer (void *h
, size_t s
, bool force_copyfrom
, int async
,
835 int finalize
, int mapnum
)
837 struct goacc_thread
*thr
= goacc_thread ();
838 struct gomp_device_descr
*acc_dev
= thr
->dev
;
840 struct target_mem_desc
*t
;
841 int minrefs
= (mapnum
== 1) ? 2 : 3;
843 if (!acc_is_present (h
, s
))
846 gomp_mutex_lock (&acc_dev
->lock
);
848 n
= lookup_host (acc_dev
, h
, 1);
852 gomp_mutex_unlock (&acc_dev
->lock
);
853 gomp_fatal ("%p is not a mapped block", (void *)h
);
856 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__
);
860 if (n
->refcount
< n
->dynamic_refcount
)
862 gomp_mutex_unlock (&acc_dev
->lock
);
863 gomp_fatal ("Dynamic reference counting assert fail\n");
868 n
->refcount
-= n
->dynamic_refcount
;
869 n
->dynamic_refcount
= 0;
871 else if (n
->dynamic_refcount
)
873 n
->dynamic_refcount
--;
877 gomp_mutex_unlock (&acc_dev
->lock
);
879 if (n
->refcount
== 0)
881 if (t
->refcount
== minrefs
)
883 /* This is the last reference, so pull the descriptor off the
884 chain. This prevents gomp_unmap_vars via gomp_unmap_tgt from
885 freeing the device memory. */
886 struct target_mem_desc
*tp
;
887 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
895 acc_dev
->openacc
.data_environ
= t
->prev
;
901 /* Set refcount to 1 to allow gomp_unmap_vars to unmap it. */
903 t
->refcount
= minrefs
;
904 for (size_t i
= 0; i
< t
->list_count
; i
++)
905 if (t
->list
[i
].key
== n
)
907 t
->list
[i
].copy_from
= force_copyfrom
? 1 : 0;
911 /* If running synchronously, unmap immediately. */
912 if (async
< acc_async_noval
)
913 gomp_unmap_vars (t
, true);
916 goacc_aq aq
= get_goacc_asyncqueue (async
);
917 gomp_unmap_vars_async (t
, true, aq
);
921 gomp_mutex_unlock (&acc_dev
->lock
);
923 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__
);