-/* Copyright (C) 2013-2020 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2022 Free Software Foundation, Inc.
Contributed by Jakub Jelinek <jakub@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
# include <inttypes.h> /* For PRIu64. */
#endif
#include <string.h>
+#include <stdio.h> /* For snprintf. */
#include <assert.h>
#include <errno.h>
#include "plugin-suffix.h"
#endif
+typedef uintptr_t *hash_entry_type;
+static inline void * htab_alloc (size_t size) { return gomp_malloc (size); }
+static inline void htab_free (void *ptr) { free (ptr); }
+#include "hashtab.h"
+
+ialias_redirect (GOMP_task)
+
+static inline hashval_t
+htab_hash (hash_entry_type element)
+{
+ return hash_pointer ((void *) element);
+}
+
+static inline bool
+htab_eq (hash_entry_type x, hash_entry_type y)
+{
+ return x == y;
+}
+
#define FIELD_TGT_EMPTY (~(size_t) 0)
static void gomp_target_init (void);
/* Number of GOMP_OFFLOAD_CAP_OPENMP_400 devices. */
static int num_devices_openmp;
+/* OpenMP requires mask. */
+static int omp_requires_mask;
+
/* Similar to gomp_realloc, but release register_lock before gomp_fatal. */
static void *
}
static struct gomp_device_descr *
-resolve_device (int device_id)
+resolve_device (int device_id, bool remapped)
{
- if (device_id == GOMP_DEVICE_ICV)
+ if (remapped && device_id == GOMP_DEVICE_ICV)
{
struct gomp_task_icv *icv = gomp_icv (false);
device_id = icv->default_device_var;
+ remapped = false;
}
- if (device_id < 0 || device_id >= gomp_get_num_devices ())
- return NULL;
+ if (device_id < 0)
+ {
+ if (device_id == (remapped ? GOMP_DEVICE_HOST_FALLBACK
+ : omp_initial_device))
+ return NULL;
+ if (device_id == omp_invalid_device)
+ gomp_fatal ("omp_invalid_device encountered");
+ else if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
+ "but device not found");
+
+ return NULL;
+ }
+ else if (device_id >= gomp_get_num_devices ())
+ {
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
+ && device_id != num_devices_openmp)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
+ "but device not found");
+
+ return NULL;
+ }
gomp_mutex_lock (&devices[device_id].lock);
if (devices[device_id].state == GOMP_DEVICE_UNINITIALIZED)
else if (devices[device_id].state == GOMP_DEVICE_FINALIZED)
{
gomp_mutex_unlock (&devices[device_id].lock);
+
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
+ "but device is finalized");
+
return NULL;
}
gomp_mutex_unlock (&devices[device_id].lock);
struct goacc_asyncqueue *),
const char *dst, void *dstaddr,
const char *src, const void *srcaddr,
+ const void *srcaddr_orig,
size_t size, struct goacc_asyncqueue *aq)
{
if (!copy_func (devicep->target_id, dstaddr, srcaddr, size, aq))
{
gomp_mutex_unlock (&devicep->lock);
- gomp_fatal ("Copying of %s object [%p..%p) to %s object [%p..%p) failed",
- src, srcaddr, srcaddr + size, dst, dstaddr, dstaddr + size);
+ if (srcaddr_orig && srcaddr_orig != srcaddr)
+ gomp_fatal ("Copying of %s object [%p..%p)"
+ " via buffer %s object [%p..%p)"
+ " to %s object [%p..%p) failed",
+ src, srcaddr_orig, srcaddr_orig + size,
+ src, srcaddr, srcaddr + size,
+ dst, dstaddr, dstaddr + size);
+ else
+ gomp_fatal ("Copying of %s object [%p..%p)"
+ " to %s object [%p..%p) failed",
+ src, srcaddr, srcaddr + size,
+ dst, dstaddr, dstaddr + size);
}
}
host to device (e.g. map(alloc:), map(from:) etc.). */
#define MAX_COALESCE_BUF_GAP (4 * 1024)
-/* Add region with device tgt_start relative offset and length to CBUF. */
+/* Add region with device tgt_start relative offset and length to CBUF.
+
+ This must not be used for asynchronous copies, because the host data might
+ not be computed yet (by an earlier asynchronous compute region, for
+ example).
+ TODO ... but we could allow CBUF usage for EPHEMERAL data? (Open question:
+ is it more performant to use libgomp CBUF buffering or individual device
+ asyncronous copying?) */
static inline void
gomp_coalesce_buf_add (struct gomp_coalesce_buf *cbuf, size_t start, size_t len)
}
}
+/* Copy host memory to an offload device. In asynchronous mode (if AQ is
+ non-NULL), when the source data is stack or may otherwise be deallocated
+ before the asynchronous copy takes place, EPHEMERAL must be passed as
+ TRUE. */
+
attribute_hidden void
gomp_copy_host2dev (struct gomp_device_descr *devicep,
struct goacc_asyncqueue *aq,
void *d, const void *h, size_t sz,
- struct gomp_coalesce_buf *cbuf)
+ bool ephemeral, struct gomp_coalesce_buf *cbuf)
{
+ if (__builtin_expect (aq != NULL, 0))
+ {
+ /* See 'gomp_coalesce_buf_add'. */
+ assert (!cbuf);
+
+ void *h_buf = (void *) h;
+ if (ephemeral)
+ {
+ /* We're queueing up an asynchronous copy from data that may
+ disappear before the transfer takes place (i.e. because it is a
+ stack local in a function that is no longer executing). Make a
+ copy of the data into a temporary buffer in those cases. */
+ h_buf = gomp_malloc (sz);
+ memcpy (h_buf, h, sz);
+ }
+ goacc_device_copy_async (devicep, devicep->openacc.async.host2dev_func,
+ "dev", d, "host", h_buf, h, sz, aq);
+ if (ephemeral)
+ /* Free temporary buffer once the transfer has completed. */
+ devicep->openacc.async.queue_callback_func (aq, free, h_buf);
+
+ return;
+ }
+
if (cbuf)
{
uintptr_t doff = (uintptr_t) d - cbuf->tgt->tgt_start;
else if (cbuf->chunks[middle].start <= doff)
{
if (doff + sz > cbuf->chunks[middle].end)
- gomp_fatal ("internal libgomp cbuf error");
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("internal libgomp cbuf error");
+ }
memcpy ((char *) cbuf->buf + (doff - cbuf->chunks[0].start),
h, sz);
return;
}
}
}
- if (__builtin_expect (aq != NULL, 0))
- goacc_device_copy_async (devicep, devicep->openacc.async.host2dev_func,
- "dev", d, "host", h, sz, aq);
- else
- gomp_device_copy (devicep, devicep->host2dev_func, "dev", d, "host", h, sz);
+
+ gomp_device_copy (devicep, devicep->host2dev_func, "dev", d, "host", h, sz);
}
attribute_hidden void
{
if (__builtin_expect (aq != NULL, 0))
goacc_device_copy_async (devicep, devicep->openacc.async.dev2host_func,
- "host", h, "dev", d, sz, aq);
+ "host", h, "dev", d, NULL, sz, aq);
else
gomp_device_copy (devicep, devicep->dev2host_func, "host", h, "dev", d, sz);
}
}
}
+/* Increment reference count of a splay_tree_key region K by 1.
+ If REFCOUNT_SET != NULL, use it to track already seen refcounts, and only
+ increment the value if refcount is not yet contained in the set (used for
+ OpenMP 5.0, which specifies that a region's refcount is adjusted at most
+ once for each construct). */
+
+static inline void
+gomp_increment_refcount (splay_tree_key k, htab_t *refcount_set)
+{
+ if (k == NULL || k->refcount == REFCOUNT_INFINITY)
+ return;
+
+ uintptr_t *refcount_ptr = &k->refcount;
+
+ if (REFCOUNT_STRUCTELEM_FIRST_P (k->refcount))
+ refcount_ptr = &k->structelem_refcount;
+ else if (REFCOUNT_STRUCTELEM_P (k->refcount))
+ refcount_ptr = k->structelem_refcount_ptr;
+
+ if (refcount_set)
+ {
+ if (htab_find (*refcount_set, refcount_ptr))
+ return;
+ uintptr_t **slot = htab_find_slot (refcount_set, refcount_ptr, INSERT);
+ *slot = refcount_ptr;
+ }
+
+ *refcount_ptr += 1;
+ return;
+}
+
+/* Decrement reference count of a splay_tree_key region K by 1, or if DELETE_P
+ is true, set reference count to zero. If REFCOUNT_SET != NULL, use it to
+ track already seen refcounts, and only adjust the value if refcount is not
+ yet contained in the set (like gomp_increment_refcount).
+
+ Return out-values: set *DO_COPY to true if we set the refcount to zero, or
+ it is already zero and we know we decremented it earlier. This signals that
+ associated maps should be copied back to host.
+
+ *DO_REMOVE is set to true when we this is the first handling of this refcount
+ and we are setting it to zero. This signals a removal of this key from the
+ splay-tree map.
+
+ Copy and removal are separated due to cases like handling of structure
+ elements, e.g. each map of a structure element representing a possible copy
+ out of a structure field has to be handled individually, but we only signal
+ removal for one (the first encountered) sibing map. */
+
+static inline void
+gomp_decrement_refcount (splay_tree_key k, htab_t *refcount_set, bool delete_p,
+ bool *do_copy, bool *do_remove)
+{
+ if (k == NULL || k->refcount == REFCOUNT_INFINITY)
+ {
+ *do_copy = *do_remove = false;
+ return;
+ }
+
+ uintptr_t *refcount_ptr = &k->refcount;
+
+ if (REFCOUNT_STRUCTELEM_FIRST_P (k->refcount))
+ refcount_ptr = &k->structelem_refcount;
+ else if (REFCOUNT_STRUCTELEM_P (k->refcount))
+ refcount_ptr = k->structelem_refcount_ptr;
+
+ bool new_encountered_refcount;
+ bool set_to_zero = false;
+ bool is_zero = false;
+
+ uintptr_t orig_refcount = *refcount_ptr;
+
+ if (refcount_set)
+ {
+ if (htab_find (*refcount_set, refcount_ptr))
+ {
+ new_encountered_refcount = false;
+ goto end;
+ }
+
+ uintptr_t **slot = htab_find_slot (refcount_set, refcount_ptr, INSERT);
+ *slot = refcount_ptr;
+ new_encountered_refcount = true;
+ }
+ else
+ /* If no refcount_set being used, assume all keys are being decremented
+ for the first time. */
+ new_encountered_refcount = true;
+
+ if (delete_p)
+ *refcount_ptr = 0;
+ else if (*refcount_ptr > 0)
+ *refcount_ptr -= 1;
+
+ end:
+ if (*refcount_ptr == 0)
+ {
+ if (orig_refcount > 0)
+ set_to_zero = true;
+
+ is_zero = true;
+ }
+
+ *do_copy = (set_to_zero || (!new_encountered_refcount && is_zero));
+ *do_remove = (new_encountered_refcount && set_to_zero);
+}
+
/* Handle the case where gomp_map_lookup, splay_tree_lookup or
gomp_map_0len_lookup found oldn for newn.
Helper function of gomp_map_vars. */
gomp_map_vars_existing (struct gomp_device_descr *devicep,
struct goacc_asyncqueue *aq, splay_tree_key oldn,
splay_tree_key newn, struct target_var_desc *tgt_var,
- unsigned char kind, struct gomp_coalesce_buf *cbuf)
+ unsigned char kind, bool always_to_flag, bool implicit,
+ struct gomp_coalesce_buf *cbuf,
+ htab_t *refcount_set)
{
+ assert (kind != GOMP_MAP_ATTACH
+ || kind != GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION);
+
tgt_var->key = oldn;
tgt_var->copy_from = GOMP_MAP_COPY_FROM_P (kind);
tgt_var->always_copy_from = GOMP_MAP_ALWAYS_FROM_P (kind);
- tgt_var->do_detach = kind == GOMP_MAP_ATTACH;
+ tgt_var->is_attach = false;
tgt_var->offset = newn->host_start - oldn->host_start;
- tgt_var->length = newn->host_end - newn->host_start;
+
+ /* For implicit maps, old contained in new is valid. */
+ bool implicit_subset = (implicit
+ && newn->host_start <= oldn->host_start
+ && oldn->host_end <= newn->host_end);
+ if (implicit_subset)
+ tgt_var->length = oldn->host_end - oldn->host_start;
+ else
+ tgt_var->length = newn->host_end - newn->host_start;
if ((kind & GOMP_MAP_FLAG_FORCE)
- || oldn->host_start > newn->host_start
- || oldn->host_end < newn->host_end)
+ /* For implicit maps, old contained in new is valid. */
+ || !(implicit_subset
+ /* Otherwise, new contained inside old is considered valid. */
+ || (oldn->host_start <= newn->host_start
+ && newn->host_end <= oldn->host_end)))
{
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("Trying to map into device [%p..%p) object when "
(void *) oldn->host_start, (void *) oldn->host_end);
}
- if (GOMP_MAP_ALWAYS_TO_P (kind))
- gomp_copy_host2dev (devicep, aq,
- (void *) (oldn->tgt->tgt_start + oldn->tgt_offset
- + newn->host_start - oldn->host_start),
- (void *) newn->host_start,
- newn->host_end - newn->host_start, cbuf);
+ if (GOMP_MAP_ALWAYS_TO_P (kind) || always_to_flag)
+ {
+ /* Implicit + always should not happen. If this does occur, below
+ address/length adjustment is a TODO. */
+ assert (!implicit_subset);
- if (oldn->refcount != REFCOUNT_INFINITY)
- oldn->refcount++;
+ if (oldn->aux && oldn->aux->attach_count)
+ {
+ /* We have to be careful not to overwrite still attached pointers
+ during the copyback to host. */
+ uintptr_t addr = newn->host_start;
+ while (addr < newn->host_end)
+ {
+ size_t i = (addr - oldn->host_start) / sizeof (void *);
+ if (oldn->aux->attach_count[i] == 0)
+ gomp_copy_host2dev (devicep, aq,
+ (void *) (oldn->tgt->tgt_start
+ + oldn->tgt_offset
+ + addr - oldn->host_start),
+ (void *) addr,
+ sizeof (void *), false, cbuf);
+ addr += sizeof (void *);
+ }
+ }
+ else
+ gomp_copy_host2dev (devicep, aq,
+ (void *) (oldn->tgt->tgt_start + oldn->tgt_offset
+ + newn->host_start - oldn->host_start),
+ (void *) newn->host_start,
+ newn->host_end - newn->host_start, false, cbuf);
+ }
+
+ gomp_increment_refcount (oldn, refcount_set);
}
static int
get_kind (bool short_mapkind, void *kinds, int idx)
{
- return short_mapkind ? ((unsigned short *) kinds)[idx]
- : ((unsigned char *) kinds)[idx];
+ if (!short_mapkind)
+ return ((unsigned char *) kinds)[idx];
+
+ int val = ((unsigned short *) kinds)[idx];
+ if (GOMP_MAP_IMPLICIT_P (val))
+ val &= ~GOMP_MAP_IMPLICIT;
+ return val;
+}
+
+
+static bool
+get_implicit (bool short_mapkind, void *kinds, int idx)
+{
+ if (!short_mapkind)
+ return false;
+
+ int val = ((unsigned short *) kinds)[idx];
+ return GOMP_MAP_IMPLICIT_P (val);
}
static void
gomp_map_pointer (struct target_mem_desc *tgt, struct goacc_asyncqueue *aq,
uintptr_t host_ptr, uintptr_t target_offset, uintptr_t bias,
- struct gomp_coalesce_buf *cbuf)
+ struct gomp_coalesce_buf *cbuf,
+ bool allow_zero_length_array_sections)
{
struct gomp_device_descr *devicep = tgt->device_descr;
struct splay_tree_s *mem_map = &devicep->mem_map;
cur_node.tgt_offset = (uintptr_t) NULL;
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start + target_offset),
- (void *) &cur_node.tgt_offset,
- sizeof (void *), cbuf);
+ (void *) &cur_node.tgt_offset, sizeof (void *),
+ true, cbuf);
return;
}
/* Add bias to the pointer value. */
splay_tree_key n = gomp_map_lookup (mem_map, &cur_node);
if (n == NULL)
{
- gomp_mutex_unlock (&devicep->lock);
- gomp_fatal ("Pointer target of array section wasn't mapped");
- }
- cur_node.host_start -= n->host_start;
- cur_node.tgt_offset
- = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start;
- /* At this point tgt_offset is target address of the
- array section. Now subtract bias to get what we want
- to initialize the pointer with. */
- cur_node.tgt_offset -= bias;
+ if (allow_zero_length_array_sections)
+ cur_node.tgt_offset = 0;
+ else
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("Pointer target of array section wasn't mapped");
+ }
+ }
+ else
+ {
+ cur_node.host_start -= n->host_start;
+ cur_node.tgt_offset
+ = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start;
+ /* At this point tgt_offset is target address of the
+ array section. Now subtract bias to get what we want
+ to initialize the pointer with. */
+ cur_node.tgt_offset -= bias;
+ }
gomp_copy_host2dev (devicep, aq, (void *) (tgt->tgt_start + target_offset),
- (void *) &cur_node.tgt_offset, sizeof (void *), cbuf);
+ (void *) &cur_node.tgt_offset, sizeof (void *),
+ true, cbuf);
}
static void
struct goacc_asyncqueue *aq, splay_tree_key n,
size_t first, size_t i, void **hostaddrs,
size_t *sizes, void *kinds,
- struct gomp_coalesce_buf *cbuf)
+ struct gomp_coalesce_buf *cbuf, htab_t *refcount_set)
{
struct gomp_device_descr *devicep = tgt->device_descr;
struct splay_tree_s *mem_map = &devicep->mem_map;
struct splay_tree_key_s cur_node;
int kind;
+ bool implicit;
const bool short_mapkind = true;
const int typemask = short_mapkind ? 0xff : 0x7;
cur_node.host_end = cur_node.host_start + sizes[i];
splay_tree_key n2 = splay_tree_lookup (mem_map, &cur_node);
kind = get_kind (short_mapkind, kinds, i);
+ implicit = get_implicit (short_mapkind, kinds, i);
if (n2
&& n2->tgt == n->tgt
&& n2->host_start - n->host_start == n2->tgt_offset - n->tgt_offset)
{
- gomp_map_vars_existing (devicep, aq, n2, &cur_node,
- &tgt->list[i], kind & typemask, cbuf);
+ gomp_map_vars_existing (devicep, aq, n2, &cur_node, &tgt->list[i],
+ kind & typemask, false, implicit, cbuf,
+ refcount_set);
return;
}
if (sizes[i] == 0)
&& n2->host_start - n->host_start
== n2->tgt_offset - n->tgt_offset)
{
- gomp_map_vars_existing (devicep, aq, n2, &cur_node,
- &tgt->list[i], kind & typemask, cbuf);
+ gomp_map_vars_existing (devicep, aq, n2, &cur_node, &tgt->list[i],
+ kind & typemask, false, implicit, cbuf,
+ refcount_set);
return;
}
}
&& n2->host_start - n->host_start == n2->tgt_offset - n->tgt_offset)
{
gomp_map_vars_existing (devicep, aq, n2, &cur_node, &tgt->list[i],
- kind & typemask, cbuf);
+ kind & typemask, false, implicit, cbuf,
+ refcount_set);
return;
}
}
gomp_attach_pointer (struct gomp_device_descr *devicep,
struct goacc_asyncqueue *aq, splay_tree mem_map,
splay_tree_key n, uintptr_t attach_to, size_t bias,
- struct gomp_coalesce_buf *cbufp)
+ struct gomp_coalesce_buf *cbufp,
+ bool allow_zero_length_array_sections)
{
struct splay_tree_key_s s;
size_t size, idx;
if (!tn)
{
- gomp_mutex_unlock (&devicep->lock);
- gomp_fatal ("pointer target not mapped for attach");
+ if (allow_zero_length_array_sections)
+ /* When allowing attachment to zero-length array sections, we
+ allow attaching to NULL pointers when the target region is not
+ mapped. */
+ data = 0;
+ else
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("pointer target not mapped for attach");
+ }
}
-
- data = tn->tgt->tgt_start + tn->tgt_offset + target - tn->host_start;
+ else
+ data = tn->tgt->tgt_start + tn->tgt_offset + target - tn->host_start;
gomp_debug (1,
"%s: attaching host %p, target %p (struct base %p) to %p\n",
(void *) (n->tgt->tgt_start + n->tgt_offset), (void *) data);
gomp_copy_host2dev (devicep, aq, (void *) devptr, (void *) &data,
- sizeof (void *), cbufp);
+ sizeof (void *), true, cbufp);
}
else
gomp_debug (1, "%s: attach count for %p -> %u\n", __FUNCTION__,
(void *) target);
gomp_copy_host2dev (devicep, aq, (void *) devptr, (void *) &target,
- sizeof (void *), cbufp);
+ sizeof (void *), true, cbufp);
}
else
gomp_debug (1, "%s: attach count for %p -> %u\n", __FUNCTION__,
struct goacc_asyncqueue *aq, size_t mapnum,
void **hostaddrs, void **devaddrs, size_t *sizes,
void *kinds, bool short_mapkind,
+ htab_t *refcount_set,
enum gomp_map_vars_kind pragma_kind)
{
size_t i, tgt_align, tgt_size, not_found_cnt = 0;
bool has_firstprivate = false;
+ bool has_always_ptrset = false;
+ bool openmp_p = (pragma_kind & GOMP_MAP_VARS_OPENACC) == 0;
const int rshift = short_mapkind ? 8 : 3;
const int typemask = short_mapkind ? 0xff : 0x7;
struct splay_tree_s *mem_map = &devicep->mem_map;
struct target_mem_desc *tgt
= gomp_malloc (sizeof (*tgt) + sizeof (tgt->list[0]) * mapnum);
tgt->list_count = mapnum;
- tgt->refcount = (pragma_kind == GOMP_MAP_VARS_ENTER_DATA
- || pragma_kind == GOMP_MAP_VARS_OPENACC_ENTER_DATA) ? 0 : 1;
+ tgt->refcount = (pragma_kind & GOMP_MAP_VARS_ENTER_DATA) ? 0 : 1;
tgt->device_descr = devicep;
tgt->prev = NULL;
struct gomp_coalesce_buf cbuf, *cbufp = NULL;
for (i = 0; i < mapnum; i++)
{
int kind = get_kind (short_mapkind, kinds, i);
+ bool implicit = get_implicit (short_mapkind, kinds, i);
if (hostaddrs[i] == NULL
|| (kind & typemask) == GOMP_MAP_FIRSTPRIVATE_INT)
{
tgt->list[i].offset = OFFSET_INLINED;
continue;
}
- else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR)
+ else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR
+ || (kind & typemask) == GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT)
{
tgt->list[i].key = NULL;
if (!not_found_cnt)
cur_node.host_start = (uintptr_t) hostaddrs[i];
cur_node.host_end = cur_node.host_start;
splay_tree_key n = gomp_map_lookup (mem_map, &cur_node);
- if (n == NULL)
+ if (n != NULL)
+ {
+ cur_node.host_start -= n->host_start;
+ hostaddrs[i]
+ = (void *) (n->tgt->tgt_start + n->tgt_offset
+ + cur_node.host_start);
+ }
+ else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR)
{
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("use_device_ptr pointer wasn't mapped");
}
- cur_node.host_start -= n->host_start;
- hostaddrs[i]
- = (void *) (n->tgt->tgt_start + n->tgt_offset
- + cur_node.host_start);
- tgt->list[i].offset = ~(uintptr_t) 0;
+ else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT)
+ /* If not present, continue using the host address. */
+ ;
+ else
+ __builtin_unreachable ();
+ tgt->list[i].offset = OFFSET_INLINED;
}
else
tgt->list[i].offset = 0;
for (i = first; i <= last; i++)
{
tgt->list[i].key = NULL;
- if (gomp_to_device_kind_p (get_kind (short_mapkind, kinds, i)
- & typemask))
+ if (!aq
+ && gomp_to_device_kind_p (get_kind (short_mapkind, kinds, i)
+ & typemask))
gomp_coalesce_buf_add (&cbuf,
tgt_size - cur_node.host_end
+ (uintptr_t) hostaddrs[i],
}
for (i = first; i <= last; i++)
gomp_map_fields_existing (tgt, aq, n, first, i, hostaddrs,
- sizes, kinds, NULL);
+ sizes, kinds, NULL, refcount_set);
i--;
continue;
}
has_firstprivate = true;
continue;
}
- else if ((kind & typemask) == GOMP_MAP_ATTACH)
+ else if ((kind & typemask) == GOMP_MAP_ATTACH
+ || ((kind & typemask)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION))
{
tgt->list[i].key = NULL;
has_firstprivate = true;
continue;
}
cur_node.host_start = (uintptr_t) hostaddrs[i];
- if (!GOMP_MAP_POINTER_P (kind & typemask)
- && (kind & typemask) != GOMP_MAP_ATTACH)
+ if (!GOMP_MAP_POINTER_P (kind & typemask))
cur_node.host_end = cur_node.host_start + sizes[i];
else
cur_node.host_end = cur_node.host_start + sizeof (void *);
if (tgt_align < align)
tgt_align = align;
tgt_size = (tgt_size + align - 1) & ~(align - 1);
- gomp_coalesce_buf_add (&cbuf, tgt_size,
- cur_node.host_end - cur_node.host_start);
+ if (!aq)
+ gomp_coalesce_buf_add (&cbuf, tgt_size,
+ cur_node.host_end - cur_node.host_start);
tgt_size += cur_node.host_end - cur_node.host_start;
has_firstprivate = true;
continue;
else
n = splay_tree_lookup (mem_map, &cur_node);
if (n && n->refcount != REFCOUNT_LINK)
- gomp_map_vars_existing (devicep, aq, n, &cur_node, &tgt->list[i],
- kind & typemask, NULL);
+ {
+ int always_to_cnt = 0;
+ if ((kind & typemask) == GOMP_MAP_TO_PSET)
+ {
+ bool has_nullptr = false;
+ size_t j;
+ for (j = 0; j < n->tgt->list_count; j++)
+ if (n->tgt->list[j].key == n)
+ {
+ has_nullptr = n->tgt->list[j].has_null_ptr_assoc;
+ break;
+ }
+ if (n->tgt->list_count == 0)
+ {
+ /* 'declare target'; assume has_nullptr; it could also be
+ statically assigned pointer, but that it should be to
+ the equivalent variable on the host. */
+ assert (n->refcount == REFCOUNT_INFINITY);
+ has_nullptr = true;
+ }
+ else
+ assert (j < n->tgt->list_count);
+ /* Re-map the data if there is an 'always' modifier or if it a
+ null pointer was there and non a nonnull has been found; that
+ permits transparent re-mapping for Fortran array descriptors
+ which were previously mapped unallocated. */
+ for (j = i + 1; j < mapnum; j++)
+ {
+ int ptr_kind = get_kind (short_mapkind, kinds, j) & typemask;
+ if (!GOMP_MAP_ALWAYS_POINTER_P (ptr_kind)
+ && (!has_nullptr
+ || !GOMP_MAP_POINTER_P (ptr_kind)
+ || *(void **) hostaddrs[j] == NULL))
+ break;
+ else if ((uintptr_t) hostaddrs[j] < cur_node.host_start
+ || ((uintptr_t) hostaddrs[j] + sizeof (void *)
+ > cur_node.host_end))
+ break;
+ else
+ {
+ has_always_ptrset = true;
+ ++always_to_cnt;
+ }
+ }
+ }
+ gomp_map_vars_existing (devicep, aq, n, &cur_node, &tgt->list[i],
+ kind & typemask, always_to_cnt > 0, implicit,
+ NULL, refcount_set);
+ i += always_to_cnt;
+ }
else
{
tgt->list[i].key = NULL;
if (tgt_align < align)
tgt_align = align;
tgt_size = (tgt_size + align - 1) & ~(align - 1);
- if (gomp_to_device_kind_p (kind & typemask))
+ if (!aq
+ && gomp_to_device_kind_p (kind & typemask))
gomp_coalesce_buf_add (&cbuf, tgt_size,
cur_node.host_end - cur_node.host_start);
tgt_size += cur_node.host_end - cur_node.host_start;
if ((kind & typemask) == GOMP_MAP_TO_PSET)
{
size_t j;
+ int kind;
for (j = i + 1; j < mapnum; j++)
- if (!GOMP_MAP_POINTER_P (get_kind (short_mapkind, kinds, j)
- & typemask))
+ if (!GOMP_MAP_POINTER_P ((kind = (get_kind (short_mapkind,
+ kinds, j)) & typemask))
+ && !GOMP_MAP_ALWAYS_POINTER_P (kind))
break;
else if ((uintptr_t) hostaddrs[j] < cur_node.host_start
|| ((uintptr_t) hostaddrs[j] + sizeof (void *)
tgt_size = mapnum * sizeof (void *);
tgt->array = NULL;
- if (not_found_cnt || has_firstprivate)
+ if (not_found_cnt || has_firstprivate || has_always_ptrset)
{
if (not_found_cnt)
tgt->array = gomp_malloc (not_found_cnt * sizeof (*tgt->array));
splay_tree_node array = tgt->array;
- size_t j, field_tgt_offset = 0, field_tgt_clear = ~(size_t) 0;
+ size_t j, field_tgt_offset = 0, field_tgt_clear = FIELD_TGT_EMPTY;
uintptr_t field_tgt_base = 0;
+ splay_tree_key field_tgt_structelem_first = NULL;
for (i = 0; i < mapnum; i++)
- if (tgt->list[i].key == NULL)
+ if (has_always_ptrset
+ && tgt->list[i].key
+ && (get_kind (short_mapkind, kinds, i) & typemask)
+ == GOMP_MAP_TO_PSET)
+ {
+ splay_tree_key k = tgt->list[i].key;
+ bool has_nullptr = false;
+ size_t j;
+ for (j = 0; j < k->tgt->list_count; j++)
+ if (k->tgt->list[j].key == k)
+ {
+ has_nullptr = k->tgt->list[j].has_null_ptr_assoc;
+ break;
+ }
+ if (k->tgt->list_count == 0)
+ has_nullptr = true;
+ else
+ assert (j < k->tgt->list_count);
+
+ tgt->list[i].has_null_ptr_assoc = false;
+ for (j = i + 1; j < mapnum; j++)
+ {
+ int ptr_kind = get_kind (short_mapkind, kinds, j) & typemask;
+ if (!GOMP_MAP_ALWAYS_POINTER_P (ptr_kind)
+ && (!has_nullptr
+ || !GOMP_MAP_POINTER_P (ptr_kind)
+ || *(void **) hostaddrs[j] == NULL))
+ break;
+ else if ((uintptr_t) hostaddrs[j] < k->host_start
+ || ((uintptr_t) hostaddrs[j] + sizeof (void *)
+ > k->host_end))
+ break;
+ else
+ {
+ if (*(void **) hostaddrs[j] == NULL)
+ tgt->list[i].has_null_ptr_assoc = true;
+ tgt->list[j].key = k;
+ tgt->list[j].copy_from = false;
+ tgt->list[j].always_copy_from = false;
+ tgt->list[j].is_attach = false;
+ gomp_increment_refcount (k, refcount_set);
+ gomp_map_pointer (k->tgt, aq,
+ (uintptr_t) *(void **) hostaddrs[j],
+ k->tgt_offset + ((uintptr_t) hostaddrs[j]
+ - k->host_start),
+ sizes[j], cbufp, false);
+ }
+ }
+ i = j - 1;
+ }
+ else if (tgt->list[i].key == NULL)
{
int kind = get_kind (short_mapkind, kinds, i);
+ bool implicit = get_implicit (short_mapkind, kinds, i);
if (hostaddrs[i] == NULL)
continue;
switch (kind & typemask)
len = sizes[i];
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start + tgt_size),
- (void *) hostaddrs[i], len, cbufp);
+ (void *) hostaddrs[i], len, false, cbufp);
+ /* Save device address in hostaddr to permit latter availablity
+ when doing a deep-firstprivate with pointer attach. */
+ hostaddrs[i] = (void *) (tgt->tgt_start + tgt_size);
tgt_size += len;
+
+ /* If followed by GOMP_MAP_ATTACH, pointer assign this
+ firstprivate to hostaddrs[i+1], which is assumed to contain a
+ device address. */
+ if (i + 1 < mapnum
+ && (GOMP_MAP_ATTACH
+ == (typemask & get_kind (short_mapkind, kinds, i+1))))
+ {
+ uintptr_t target = (uintptr_t) hostaddrs[i];
+ void *devptr = *(void**) hostaddrs[i+1] + sizes[i+1];
+ gomp_copy_host2dev (devicep, aq, devptr, &target,
+ sizeof (void *), false, cbufp);
+ ++i;
+ }
continue;
case GOMP_MAP_FIRSTPRIVATE_INT:
case GOMP_MAP_ZERO_LEN_ARRAY_SECTION:
continue;
+ case GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT:
+ /* The OpenACC 'host_data' construct only allows 'use_device'
+ "mapping" clauses, so in the first loop, 'not_found_cnt'
+ must always have been zero, so all OpenACC 'use_device'
+ clauses have already been handled. (We can only easily test
+ 'use_device' with 'if_present' clause here.) */
+ assert (tgt->list[i].offset == OFFSET_INLINED);
+ /* Nevertheless, FALLTHRU to the normal handling, to keep the
+ code conceptually simple, similar to the first loop. */
case GOMP_MAP_USE_DEVICE_PTR:
if (tgt->list[i].offset == 0)
{
cur_node.host_start = (uintptr_t) hostaddrs[i];
cur_node.host_end = cur_node.host_start;
n = gomp_map_lookup (mem_map, &cur_node);
- if (n == NULL)
+ if (n != NULL)
+ {
+ cur_node.host_start -= n->host_start;
+ hostaddrs[i]
+ = (void *) (n->tgt->tgt_start + n->tgt_offset
+ + cur_node.host_start);
+ }
+ else if ((kind & typemask) == GOMP_MAP_USE_DEVICE_PTR)
{
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("use_device_ptr pointer wasn't mapped");
}
- cur_node.host_start -= n->host_start;
- hostaddrs[i]
- = (void *) (n->tgt->tgt_start + n->tgt_offset
- + cur_node.host_start);
- tgt->list[i].offset = ~(uintptr_t) 0;
+ else if ((kind & typemask)
+ == GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT)
+ /* If not present, continue using the host address. */
+ ;
+ else
+ __builtin_unreachable ();
+ tgt->list[i].offset = OFFSET_INLINED;
}
continue;
case GOMP_MAP_STRUCT:
field_tgt_base = (uintptr_t) hostaddrs[first];
field_tgt_offset = tgt_size;
field_tgt_clear = last;
+ field_tgt_structelem_first = NULL;
tgt_size += cur_node.host_end
- (uintptr_t) hostaddrs[first];
continue;
}
for (i = first; i <= last; i++)
gomp_map_fields_existing (tgt, aq, n, first, i, hostaddrs,
- sizes, kinds, cbufp);
+ sizes, kinds, cbufp, refcount_set);
i--;
continue;
case GOMP_MAP_ALWAYS_POINTER:
+ cur_node.host_start
- n->host_start),
(void *) &cur_node.tgt_offset,
- sizeof (void *), cbufp);
+ sizeof (void *), true, cbufp);
cur_node.tgt_offset = n->tgt->tgt_start + n->tgt_offset
+ cur_node.host_start - n->host_start;
continue;
++i;
continue;
case GOMP_MAP_ATTACH:
+ case GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION:
{
cur_node.host_start = (uintptr_t) hostaddrs[i];
cur_node.host_end = cur_node.host_start + sizeof (void *);
tgt->list[i].length = n->host_end - n->host_start;
tgt->list[i].copy_from = false;
tgt->list[i].always_copy_from = false;
- tgt->list[i].do_detach
- = (pragma_kind != GOMP_MAP_VARS_OPENACC_ENTER_DATA);
- n->refcount++;
+ tgt->list[i].is_attach = true;
+ /* OpenACC 'attach'/'detach' doesn't affect
+ structured/dynamic reference counts ('n->refcount',
+ 'n->dynamic_refcount'). */
+
+ bool zlas
+ = ((kind & typemask)
+ == GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION);
+ gomp_attach_pointer (devicep, aq, mem_map, n,
+ (uintptr_t) hostaddrs[i], sizes[i],
+ cbufp, zlas);
}
- else
+ else if ((pragma_kind & GOMP_MAP_VARS_OPENACC) != 0)
{
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("outer struct not mapped for attach");
}
- gomp_attach_pointer (devicep, aq, mem_map, n,
- (uintptr_t) hostaddrs[i], sizes[i],
- cbufp);
continue;
}
default:
splay_tree_key n = splay_tree_lookup (mem_map, k);
if (n && n->refcount != REFCOUNT_LINK)
gomp_map_vars_existing (devicep, aq, n, k, &tgt->list[i],
- kind & typemask, cbufp);
+ kind & typemask, false, implicit, cbufp,
+ refcount_set);
else
{
k->aux = NULL;
size_t align = (size_t) 1 << (kind >> rshift);
tgt->list[i].key = k;
k->tgt = tgt;
+ k->refcount = 0;
+ k->dynamic_refcount = 0;
if (field_tgt_clear != FIELD_TGT_EMPTY)
{
k->tgt_offset = k->host_start - field_tgt_base
+ field_tgt_offset;
+ if (openmp_p)
+ {
+ k->refcount = REFCOUNT_STRUCTELEM;
+ if (field_tgt_structelem_first == NULL)
+ {
+ /* Set to first structure element of sequence. */
+ k->refcount |= REFCOUNT_STRUCTELEM_FLAG_FIRST;
+ field_tgt_structelem_first = k;
+ }
+ else
+ /* Point to refcount of leading element, but do not
+ increment again. */
+ k->structelem_refcount_ptr
+ = &field_tgt_structelem_first->structelem_refcount;
+
+ if (i == field_tgt_clear)
+ {
+ k->refcount |= REFCOUNT_STRUCTELEM_FLAG_LAST;
+ field_tgt_structelem_first = NULL;
+ }
+ }
if (i == field_tgt_clear)
field_tgt_clear = FIELD_TGT_EMPTY;
}
k->tgt_offset = tgt_size;
tgt_size += k->host_end - k->host_start;
}
+ /* First increment, from 0 to 1. gomp_increment_refcount
+ encapsulates the different increment cases, so use this
+ instead of directly setting 1 during initialization. */
+ gomp_increment_refcount (k, refcount_set);
+
tgt->list[i].copy_from = GOMP_MAP_COPY_FROM_P (kind & typemask);
tgt->list[i].always_copy_from
= GOMP_MAP_ALWAYS_FROM_P (kind & typemask);
- tgt->list[i].do_detach = false;
+ tgt->list[i].is_attach = false;
tgt->list[i].offset = 0;
tgt->list[i].length = k->host_end - k->host_start;
- k->refcount = 1;
- k->virtual_refcount = 0;
tgt->refcount++;
array->left = NULL;
array->right = NULL;
(void *) (tgt->tgt_start
+ k->tgt_offset),
(void *) k->host_start,
- k->host_end - k->host_start, cbufp);
+ k->host_end - k->host_start,
+ false, cbufp);
break;
case GOMP_MAP_POINTER:
- gomp_map_pointer (tgt, aq,
- (uintptr_t) *(void **) k->host_start,
- k->tgt_offset, sizes[i], cbufp);
+ case GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION:
+ gomp_map_pointer
+ (tgt, aq, (uintptr_t) *(void **) k->host_start,
+ k->tgt_offset, sizes[i], cbufp,
+ ((kind & typemask)
+ == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION));
break;
case GOMP_MAP_TO_PSET:
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start
+ k->tgt_offset),
(void *) k->host_start,
- k->host_end - k->host_start, cbufp);
+ k->host_end - k->host_start,
+ false, cbufp);
+ tgt->list[i].has_null_ptr_assoc = false;
for (j = i + 1; j < mapnum; j++)
- if (!GOMP_MAP_POINTER_P (get_kind (short_mapkind, kinds,
- j)
- & typemask))
- break;
- else if ((uintptr_t) hostaddrs[j] < k->host_start
- || ((uintptr_t) hostaddrs[j] + sizeof (void *)
- > k->host_end))
- break;
- else
- {
- tgt->list[j].key = k;
- tgt->list[j].copy_from = false;
- tgt->list[j].always_copy_from = false;
- tgt->list[j].do_detach = false;
- if (k->refcount != REFCOUNT_INFINITY)
- k->refcount++;
- gomp_map_pointer (tgt, aq,
- (uintptr_t) *(void **) hostaddrs[j],
- k->tgt_offset
- + ((uintptr_t) hostaddrs[j]
- - k->host_start),
- sizes[j], cbufp);
- i++;
+ {
+ int ptr_kind = (get_kind (short_mapkind, kinds, j)
+ & typemask);
+ if (!GOMP_MAP_POINTER_P (ptr_kind)
+ && !GOMP_MAP_ALWAYS_POINTER_P (ptr_kind))
+ break;
+ else if ((uintptr_t) hostaddrs[j] < k->host_start
+ || ((uintptr_t) hostaddrs[j] + sizeof (void *)
+ > k->host_end))
+ break;
+ else
+ {
+ tgt->list[j].key = k;
+ tgt->list[j].copy_from = false;
+ tgt->list[j].always_copy_from = false;
+ tgt->list[j].is_attach = false;
+ tgt->list[i].has_null_ptr_assoc |= !(*(void **) hostaddrs[j]);
+ /* For OpenMP, the use of refcount_sets causes
+ errors if we set k->refcount = 1 above but also
+ increment it again here, for decrementing will
+ not properly match, since we decrement only once
+ for each key's refcount. Therefore avoid this
+ increment for OpenMP constructs. */
+ if (!openmp_p)
+ gomp_increment_refcount (k, refcount_set);
+ gomp_map_pointer (tgt, aq,
+ (uintptr_t) *(void **) hostaddrs[j],
+ k->tgt_offset
+ + ((uintptr_t) hostaddrs[j]
+ - k->host_start),
+ sizes[j], cbufp, false);
+ }
}
+ i = j - 1;
break;
case GOMP_MAP_FORCE_PRESENT:
{
(void *) (tgt->tgt_start
+ k->tgt_offset),
(void *) k->host_start,
- sizeof (void *), cbufp);
+ sizeof (void *), false, cbufp);
break;
default:
gomp_mutex_unlock (&devicep->lock);
/* We intentionally do not use coalescing here, as it's not
data allocated by the current call to this function. */
gomp_copy_host2dev (devicep, aq, (void *) n->tgt_offset,
- &tgt_addr, sizeof (void *), NULL);
+ &tgt_addr, sizeof (void *), true, NULL);
}
array++;
}
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start + i * sizeof (void *)),
(void *) &cur_node.tgt_offset, sizeof (void *),
- cbufp);
+ true, cbufp);
}
}
if (cbufp)
{
+ /* See 'gomp_coalesce_buf_add'. */
+ assert (!aq);
+
long c = 0;
for (c = 0; c < cbuf.chunk_cnt; ++c)
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start + cbuf.chunks[c].start),
(char *) cbuf.buf + (cbuf.chunks[c].start
- cbuf.chunks[0].start),
- cbuf.chunks[c].end - cbuf.chunks[c].start, NULL);
+ cbuf.chunks[c].end - cbuf.chunks[c].start,
+ true, NULL);
free (cbuf.buf);
cbuf.buf = NULL;
cbufp = NULL;
/* If the variable from "omp target enter data" map-list was already mapped,
tgt is not needed. Otherwise tgt will be freed by gomp_unmap_vars or
gomp_exit_data. */
- if ((pragma_kind == GOMP_MAP_VARS_ENTER_DATA
- || pragma_kind == GOMP_MAP_VARS_OPENACC_ENTER_DATA)
- && tgt->refcount == 0)
- {
- /* If we're about to discard a target_mem_desc with no "structural"
- references (tgt->refcount == 0), any splay keys linked in the tgt's
- list must have their virtual refcount incremented to represent that
- "lost" reference in order to implement the semantics of the OpenACC
- "present increment" operation properly. */
- if (pragma_kind == GOMP_MAP_VARS_OPENACC_ENTER_DATA)
- for (i = 0; i < tgt->list_count; i++)
- if (tgt->list[i].key)
- tgt->list[i].key->virtual_refcount++;
-
+ if ((pragma_kind & GOMP_MAP_VARS_ENTER_DATA) && tgt->refcount == 0)
+ {
free (tgt);
tgt = NULL;
}
return tgt;
}
-attribute_hidden struct target_mem_desc *
+static struct target_mem_desc *
gomp_map_vars (struct gomp_device_descr *devicep, size_t mapnum,
void **hostaddrs, void **devaddrs, size_t *sizes, void *kinds,
- bool short_mapkind, enum gomp_map_vars_kind pragma_kind)
+ bool short_mapkind, htab_t *refcount_set,
+ enum gomp_map_vars_kind pragma_kind)
{
- return gomp_map_vars_internal (devicep, NULL, mapnum, hostaddrs, devaddrs,
- sizes, kinds, short_mapkind, pragma_kind);
+ /* This management of a local refcount_set is for convenience of callers
+ who do not share a refcount_set over multiple map/unmap uses. */
+ htab_t local_refcount_set = NULL;
+ if (refcount_set == NULL)
+ {
+ local_refcount_set = htab_create (mapnum);
+ refcount_set = &local_refcount_set;
+ }
+
+ struct target_mem_desc *tgt;
+ tgt = gomp_map_vars_internal (devicep, NULL, mapnum, hostaddrs, devaddrs,
+ sizes, kinds, short_mapkind, refcount_set,
+ pragma_kind);
+ if (local_refcount_set)
+ htab_free (local_refcount_set);
+
+ return tgt;
}
attribute_hidden struct target_mem_desc *
-gomp_map_vars_async (struct gomp_device_descr *devicep,
- struct goacc_asyncqueue *aq, size_t mapnum,
- void **hostaddrs, void **devaddrs, size_t *sizes,
- void *kinds, bool short_mapkind,
- enum gomp_map_vars_kind pragma_kind)
+goacc_map_vars (struct gomp_device_descr *devicep,
+ struct goacc_asyncqueue *aq, size_t mapnum,
+ void **hostaddrs, void **devaddrs, size_t *sizes,
+ void *kinds, bool short_mapkind,
+ enum gomp_map_vars_kind pragma_kind)
{
return gomp_map_vars_internal (devicep, aq, mapnum, hostaddrs, devaddrs,
- sizes, kinds, short_mapkind, pragma_kind);
+ sizes, kinds, short_mapkind, NULL,
+ GOMP_MAP_VARS_OPENACC | pragma_kind);
}
static void
(void) gomp_unref_tgt (ptr);
}
-static inline __attribute__((always_inline)) bool
-gomp_remove_var_internal (struct gomp_device_descr *devicep, splay_tree_key k,
- struct goacc_asyncqueue *aq)
+static void
+gomp_remove_splay_tree_key (splay_tree sp, splay_tree_key k)
{
- bool is_tgt_unmapped = false;
- splay_tree_remove (&devicep->mem_map, k);
+ splay_tree_remove (sp, k);
if (k->aux)
{
if (k->aux->link_key)
- splay_tree_insert (&devicep->mem_map,
- (splay_tree_node) k->aux->link_key);
+ splay_tree_insert (sp, (splay_tree_node) k->aux->link_key);
if (k->aux->attach_count)
free (k->aux->attach_count);
free (k->aux);
k->aux = NULL;
}
+}
+
+static inline __attribute__((always_inline)) bool
+gomp_remove_var_internal (struct gomp_device_descr *devicep, splay_tree_key k,
+ struct goacc_asyncqueue *aq)
+{
+ bool is_tgt_unmapped = false;
+
+ if (REFCOUNT_STRUCTELEM_P (k->refcount))
+ {
+ if (REFCOUNT_STRUCTELEM_FIRST_P (k->refcount) == false)
+ /* Infer the splay_tree_key of the first structelem key using the
+ pointer to the first structleme_refcount. */
+ k = (splay_tree_key) ((char *) k->structelem_refcount_ptr
+ - offsetof (struct splay_tree_key_s,
+ structelem_refcount));
+ assert (REFCOUNT_STRUCTELEM_FIRST_P (k->refcount));
+
+ /* The array created by gomp_map_vars is an array of splay_tree_nodes,
+ with the splay_tree_keys embedded inside. */
+ splay_tree_node node =
+ (splay_tree_node) ((char *) k
+ - offsetof (struct splay_tree_node_s, key));
+ while (true)
+ {
+ /* Starting from the _FIRST key, and continue for all following
+ sibling keys. */
+ gomp_remove_splay_tree_key (&devicep->mem_map, k);
+ if (REFCOUNT_STRUCTELEM_LAST_P (k->refcount))
+ break;
+ else
+ k = &(++node)->key;
+ }
+ }
+ else
+ gomp_remove_splay_tree_key (&devicep->mem_map, k);
+
if (aq)
devicep->openacc.async.queue_callback_func (aq, gomp_unref_tgt_void,
(void *) k->tgt);
static inline __attribute__((always_inline)) void
gomp_unmap_vars_internal (struct target_mem_desc *tgt, bool do_copyfrom,
- struct goacc_asyncqueue *aq)
+ htab_t *refcount_set, struct goacc_asyncqueue *aq)
{
struct gomp_device_descr *devicep = tgt->device_descr;
{
splay_tree_key k = tgt->list[i].key;
- if (k != NULL && tgt->list[i].do_detach)
+ if (k != NULL && tgt->list[i].is_attach)
gomp_detach_pointer (devicep, aq, k, tgt->list[i].key->host_start
+ tgt->list[i].offset,
- k->refcount == 1, NULL);
+ false, NULL);
}
for (i = 0; i < tgt->list_count; i++)
if (k == NULL)
continue;
- bool do_unmap = false;
- if (k->tgt == tgt
- && k->virtual_refcount > 0
- && k->refcount != REFCOUNT_INFINITY)
- {
- k->virtual_refcount--;
- k->refcount--;
- }
- else if (k->refcount > 1 && k->refcount != REFCOUNT_INFINITY)
- k->refcount--;
- else if (k->refcount == 1)
- {
- k->refcount--;
- do_unmap = true;
- }
+ /* OpenACC 'attach'/'detach' doesn't affect structured/dynamic reference
+ counts ('n->refcount', 'n->dynamic_refcount'). */
+ if (tgt->list[i].is_attach)
+ continue;
+
+ bool do_copy, do_remove;
+ gomp_decrement_refcount (k, refcount_set, false, &do_copy, &do_remove);
- if ((do_unmap && do_copyfrom && tgt->list[i].copy_from)
+ if ((do_copy && do_copyfrom && tgt->list[i].copy_from)
|| tgt->list[i].always_copy_from)
gomp_copy_dev2host (devicep, aq,
(void *) (k->host_start + tgt->list[i].offset),
(void *) (k->tgt->tgt_start + k->tgt_offset
+ tgt->list[i].offset),
tgt->list[i].length);
- if (do_unmap)
+ if (do_remove)
{
struct target_mem_desc *k_tgt = k->tgt;
bool is_tgt_unmapped = gomp_remove_var (devicep, k);
gomp_mutex_unlock (&devicep->lock);
}
-attribute_hidden void
-gomp_unmap_vars (struct target_mem_desc *tgt, bool do_copyfrom)
+static void
+gomp_unmap_vars (struct target_mem_desc *tgt, bool do_copyfrom,
+ htab_t *refcount_set)
{
- gomp_unmap_vars_internal (tgt, do_copyfrom, NULL);
+ /* This management of a local refcount_set is for convenience of callers
+ who do not share a refcount_set over multiple map/unmap uses. */
+ htab_t local_refcount_set = NULL;
+ if (refcount_set == NULL)
+ {
+ local_refcount_set = htab_create (tgt->list_count);
+ refcount_set = &local_refcount_set;
+ }
+
+ gomp_unmap_vars_internal (tgt, do_copyfrom, refcount_set, NULL);
+
+ if (local_refcount_set)
+ htab_free (local_refcount_set);
}
attribute_hidden void
-gomp_unmap_vars_async (struct target_mem_desc *tgt, bool do_copyfrom,
- struct goacc_asyncqueue *aq)
+goacc_unmap_vars (struct target_mem_desc *tgt, bool do_copyfrom,
+ struct goacc_asyncqueue *aq)
{
- gomp_unmap_vars_internal (tgt, do_copyfrom, aq);
+ gomp_unmap_vars_internal (tgt, do_copyfrom, NULL, aq);
}
static void
(void *) n->host_end);
}
-
- void *hostaddr = (void *) cur_node.host_start;
- void *devaddr = (void *) (n->tgt->tgt_start + n->tgt_offset
- + cur_node.host_start - n->host_start);
- size_t size = cur_node.host_end - cur_node.host_start;
-
- if (GOMP_MAP_COPY_TO_P (kind & typemask))
- gomp_copy_host2dev (devicep, NULL, devaddr, hostaddr, size,
- NULL);
- if (GOMP_MAP_COPY_FROM_P (kind & typemask))
- gomp_copy_dev2host (devicep, NULL, hostaddr, devaddr, size);
+ if (n->aux && n->aux->attach_count)
+ {
+ uintptr_t addr = cur_node.host_start;
+ while (addr < cur_node.host_end)
+ {
+ /* We have to be careful not to overwrite still attached
+ pointers during host<->device updates. */
+ size_t i = (addr - cur_node.host_start) / sizeof (void *);
+ if (n->aux->attach_count[i] == 0)
+ {
+ void *devaddr = (void *) (n->tgt->tgt_start
+ + n->tgt_offset
+ + addr - n->host_start);
+ if (GOMP_MAP_COPY_TO_P (kind & typemask))
+ gomp_copy_host2dev (devicep, NULL,
+ devaddr, (void *) addr,
+ sizeof (void *), false, NULL);
+ if (GOMP_MAP_COPY_FROM_P (kind & typemask))
+ gomp_copy_dev2host (devicep, NULL,
+ (void *) addr, devaddr,
+ sizeof (void *));
+ }
+ addr += sizeof (void *);
+ }
+ }
+ else
+ {
+ void *hostaddr = (void *) cur_node.host_start;
+ void *devaddr = (void *) (n->tgt->tgt_start + n->tgt_offset
+ + cur_node.host_start
+ - n->host_start);
+ size_t size = cur_node.host_end - cur_node.host_start;
+
+ if (GOMP_MAP_COPY_TO_P (kind & typemask))
+ gomp_copy_host2dev (devicep, NULL, devaddr, hostaddr, size,
+ false, NULL);
+ if (GOMP_MAP_COPY_FROM_P (kind & typemask))
+ gomp_copy_dev2host (devicep, NULL, hostaddr, devaddr, size);
+ }
}
}
gomp_mutex_unlock (&devicep->lock);
int num_funcs = host_funcs_end - host_func_table;
int num_vars = (host_vars_end - host_var_table) / 2;
+ /* Others currently is only 'device_num' */
+ int num_others = 1;
+
/* Load image to device and get target addresses for the image. */
struct addr_pair *target_table = NULL;
int i, num_target_entries;
= devicep->load_image_func (devicep->target_id, version,
target_data, &target_table);
- if (num_target_entries != num_funcs + num_vars)
+ if (num_target_entries != num_funcs + num_vars
+ /* Others (device_num) are included as trailing entries in pair list. */
+ && num_target_entries != num_funcs + num_vars + num_others)
{
gomp_mutex_unlock (&devicep->lock);
if (is_register_lock)
k->tgt = tgt;
k->tgt_offset = target_table[i].start;
k->refcount = REFCOUNT_INFINITY;
- k->virtual_refcount = 0;
+ k->dynamic_refcount = 0;
k->aux = NULL;
array->left = NULL;
array->right = NULL;
{
struct addr_pair *target_var = &target_table[num_funcs + i];
uintptr_t target_size = target_var->end - target_var->start;
+ bool is_link_var = link_bit & (uintptr_t) host_var_table[i * 2 + 1];
- if ((uintptr_t) host_var_table[i * 2 + 1] != target_size)
+ if (!is_link_var && (uintptr_t) host_var_table[i * 2 + 1] != target_size)
{
gomp_mutex_unlock (&devicep->lock);
if (is_register_lock)
= k->host_start + (size_mask & (uintptr_t) host_var_table[i * 2 + 1]);
k->tgt = tgt;
k->tgt_offset = target_var->start;
- k->refcount = target_size & link_bit ? REFCOUNT_LINK : REFCOUNT_INFINITY;
- k->virtual_refcount = 0;
+ k->refcount = is_link_var ? REFCOUNT_LINK : REFCOUNT_INFINITY;
+ k->dynamic_refcount = 0;
k->aux = NULL;
array->left = NULL;
array->right = NULL;
array++;
}
+ /* Last entry is for the on-device 'device_num' variable. Tolerate case
+ where plugin does not return this entry. */
+ if (num_funcs + num_vars < num_target_entries)
+ {
+ struct addr_pair *device_num_var = &target_table[num_funcs + num_vars];
+ /* Start address will be non-zero for last entry if GOMP_DEVICE_NUM_VAR
+ was found in this image. */
+ if (device_num_var->start != 0)
+ {
+ /* The index of the devicep within devices[] is regarded as its
+ 'device number', which is different from the per-device type
+ devicep->target_id. */
+ int device_num_val = (int) (devicep - &devices[0]);
+ if (device_num_var->end - device_num_var->start != sizeof (int))
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ if (is_register_lock)
+ gomp_mutex_unlock (®ister_lock);
+ gomp_fatal ("offload plugin managed 'device_num' not of expected "
+ "format");
+ }
+
+ /* Copy device_num value to place on device memory, hereby actually
+ designating its device number into effect. */
+ gomp_copy_host2dev (devicep, NULL, (void *) device_num_var->start,
+ &device_num_val, sizeof (int), false, NULL);
+ }
+ }
+
free (target_table);
}
}
}
+static void
+gomp_requires_to_name (char *buf, size_t size, int requires_mask)
+{
+ char *end = buf + size, *p = buf;
+ if (requires_mask & GOMP_REQUIRES_UNIFIED_ADDRESS)
+ p += snprintf (p, end - p, "unified_address");
+ if (requires_mask & GOMP_REQUIRES_UNIFIED_SHARED_MEMORY)
+ p += snprintf (p, end - p, "%sunified_shared_memory",
+ (p == buf ? "" : ", "));
+ if (requires_mask & GOMP_REQUIRES_REVERSE_OFFLOAD)
+ p += snprintf (p, end - p, "%sreverse_offload",
+ (p == buf ? "" : ", "));
+}
+
/* This function should be called from every offload image while loading.
It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of
the target, and TARGET_DATA needed by target plugin. */
int target_type, const void *target_data)
{
int i;
+ int omp_req = 0;
if (GOMP_VERSION_LIB (version) > GOMP_VERSION)
gomp_fatal ("Library too old for offload (version %u < %u)",
GOMP_VERSION, GOMP_VERSION_LIB (version));
-
+
+ if (GOMP_VERSION_LIB (version) > 1)
+ {
+ omp_req = (int) (size_t) ((void **) target_data)[0];
+ target_data = &((void **) target_data)[1];
+ }
+
gomp_mutex_lock (®ister_lock);
+ if (omp_req && omp_requires_mask && omp_requires_mask != omp_req)
+ {
+ char buf1[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ char buf2[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ gomp_requires_to_name (buf2, sizeof (buf2),
+ omp_req != GOMP_REQUIRES_TARGET_USED
+ ? omp_req : omp_requires_mask);
+ if (omp_req != GOMP_REQUIRES_TARGET_USED
+ && omp_requires_mask != GOMP_REQUIRES_TARGET_USED)
+ {
+ gomp_requires_to_name (buf1, sizeof (buf1), omp_requires_mask);
+ gomp_fatal ("OpenMP 'requires' directive with non-identical clauses "
+ "in multiple compilation units: '%s' vs. '%s'",
+ buf1, buf2);
+ }
+ else
+ gomp_fatal ("OpenMP 'requires' directive with '%s' specified only in "
+ "some compilation units", buf2);
+ }
+ omp_requires_mask = omp_req;
+
/* Load image to all initialized devices. */
for (i = 0; i < num_devices; i++)
{
/* Host fallback for GOMP_target{,_ext} routines. */
static void
-gomp_target_fallback (void (*fn) (void *), void **hostaddrs)
+gomp_target_fallback (void (*fn) (void *), void **hostaddrs,
+ struct gomp_device_descr *devicep, void **args)
{
struct gomp_thread old_thr, *thr = gomp_thread ();
+
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
+ && devicep != NULL)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, but device cannot "
+ "be used for offloading");
+
old_thr = *thr;
memset (thr, '\0', sizeof (*thr));
if (gomp_places_list)
thr->place = old_thr.place;
thr->ts.place_partition_len = gomp_places_list_len;
}
+ if (args)
+ while (*args)
+ {
+ intptr_t id = (intptr_t) *args++, val;
+ if (id & GOMP_TARGET_ARG_SUBSEQUENT_PARAM)
+ val = (intptr_t) *args++;
+ else
+ val = id >> GOMP_TARGET_ARG_VALUE_SHIFT;
+ if ((id & GOMP_TARGET_ARG_DEVICE_MASK) != GOMP_TARGET_ARG_DEVICE_ALL)
+ continue;
+ id &= GOMP_TARGET_ARG_ID_MASK;
+ if (id != GOMP_TARGET_ARG_THREAD_LIMIT)
+ continue;
+ val = val > INT_MAX ? INT_MAX : val;
+ if (val)
+ gomp_icv (true)->thread_limit_var = val;
+ break;
+ }
+
fn (hostaddrs);
gomp_free_thread (thr);
*thr = old_thr;
tgt_size = 0;
size_t i;
for (i = 0; i < mapnum; i++)
- if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE)
+ if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE && hostaddrs[i] != NULL)
{
size_t align = (size_t) 1 << (kinds[i] >> 8);
tgt_size = (tgt_size + align - 1) & ~(align - 1);
memcpy (tgt + tgt_size, hostaddrs[i], sizes[i]);
hostaddrs[i] = tgt + tgt_size;
tgt_size = tgt_size + sizes[i];
+ if (i + 1 < mapnum && (kinds[i+1] & 0xff) == GOMP_MAP_ATTACH)
+ {
+ *(*(uintptr_t**) hostaddrs[i+1] + sizes[i+1]) = (uintptr_t) hostaddrs[i];
+ ++i;
+ }
}
}
size_t mapnum, void **hostaddrs, size_t *sizes,
unsigned char *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
void *fn_addr;
if (devicep == NULL
/* All shared memory devices should use the GOMP_target_ext function. */
|| devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM
|| !(fn_addr = gomp_get_target_fn_addr (devicep, fn)))
- return gomp_target_fallback (fn, hostaddrs);
+ return gomp_target_fallback (fn, hostaddrs, devicep, NULL);
+ htab_t refcount_set = htab_create (mapnum);
struct target_mem_desc *tgt_vars
= gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false,
- GOMP_MAP_VARS_TARGET);
+ &refcount_set, GOMP_MAP_VARS_TARGET);
devicep->run_func (devicep->target_id, fn_addr, (void *) tgt_vars->tgt_start,
NULL);
- gomp_unmap_vars (tgt_vars, true);
+ htab_clear (refcount_set);
+ gomp_unmap_vars (tgt_vars, true, &refcount_set);
+ htab_free (refcount_set);
+}
+
+static inline unsigned int
+clear_unsupported_flags (struct gomp_device_descr *devicep, unsigned int flags)
+{
+ /* If we cannot run asynchronously, simply ignore nowait. */
+ if (devicep != NULL && devicep->async_run_func == NULL)
+ flags &= ~GOMP_TARGET_FLAG_NOWAIT;
+
+ return flags;
}
/* Like GOMP_target, but KINDS is 16-bit, UNUSED is no longer present,
void **hostaddrs, size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend, void **args)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
size_t tgt_align = 0, tgt_size = 0;
bool fpc_done = false;
+ flags = clear_unsupported_flags (devicep, flags);
+
if (flags & GOMP_TARGET_FLAG_NOWAIT)
{
struct gomp_thread *thr = gomp_thread ();
tgt_align, tgt_size);
}
}
- gomp_target_fallback (fn, hostaddrs);
+ gomp_target_fallback (fn, hostaddrs, devicep, args);
return;
}
struct target_mem_desc *tgt_vars;
+ htab_t refcount_set = NULL;
+
if (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
{
if (!fpc_done)
tgt_vars = NULL;
}
else
- tgt_vars = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds,
- true, GOMP_MAP_VARS_TARGET);
+ {
+ refcount_set = htab_create (mapnum);
+ tgt_vars = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds,
+ true, &refcount_set, GOMP_MAP_VARS_TARGET);
+ }
devicep->run_func (devicep->target_id, fn_addr,
tgt_vars ? (void *) tgt_vars->tgt_start : hostaddrs,
args);
if (tgt_vars)
- gomp_unmap_vars (tgt_vars, true);
+ {
+ htab_clear (refcount_set);
+ gomp_unmap_vars (tgt_vars, true, &refcount_set);
+ }
+ if (refcount_set)
+ htab_free (refcount_set);
}
/* Host fallback for GOMP_target_data{,_ext} routines. */
static void
-gomp_target_data_fallback (void)
+gomp_target_data_fallback (struct gomp_device_descr *devicep)
{
struct gomp_task_icv *icv = gomp_icv (false);
+
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
+ && devicep != NULL)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, but device cannot "
+ "be used for offloading");
+
if (icv->target_data)
{
/* Even when doing a host fallback, if there are any active
would get out of sync. */
struct target_mem_desc *tgt
= gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, false,
- GOMP_MAP_VARS_DATA);
+ NULL, GOMP_MAP_VARS_DATA);
tgt->prev = icv->target_data;
icv->target_data = tgt;
}
GOMP_target_data (int device, const void *unused, size_t mapnum,
void **hostaddrs, size_t *sizes, unsigned char *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
if (devicep == NULL
|| !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
|| (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM))
- return gomp_target_data_fallback ();
+ return gomp_target_data_fallback (devicep);
struct target_mem_desc *tgt
= gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false,
- GOMP_MAP_VARS_DATA);
+ NULL, GOMP_MAP_VARS_DATA);
struct gomp_task_icv *icv = gomp_icv (true);
tgt->prev = icv->target_data;
icv->target_data = tgt;
GOMP_target_data_ext (int device, size_t mapnum, void **hostaddrs,
size_t *sizes, unsigned short *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
if (devicep == NULL
|| !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
|| devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- return gomp_target_data_fallback ();
+ return gomp_target_data_fallback (devicep);
struct target_mem_desc *tgt
= gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, true,
- GOMP_MAP_VARS_DATA);
+ NULL, GOMP_MAP_VARS_DATA);
struct gomp_task_icv *icv = gomp_icv (true);
tgt->prev = icv->target_data;
icv->target_data = tgt;
{
struct target_mem_desc *tgt = icv->target_data;
icv->target_data = tgt->prev;
- gomp_unmap_vars (tgt, true);
+ gomp_unmap_vars (tgt, true, NULL);
}
}
GOMP_target_update (int device, const void *unused, size_t mapnum,
void **hostaddrs, size_t *sizes, unsigned char *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
if (devicep == NULL
|| !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
/* If there are depend clauses, but nowait is not present,
block the parent task until the dependencies are resolved
static void
gomp_exit_data (struct gomp_device_descr *devicep, size_t mapnum,
- void **hostaddrs, size_t *sizes, unsigned short *kinds)
+ void **hostaddrs, size_t *sizes, unsigned short *kinds,
+ htab_t *refcount_set)
{
const int typemask = 0xff;
size_t i;
return;
}
+ for (i = 0; i < mapnum; i++)
+ if ((kinds[i] & typemask) == GOMP_MAP_DETACH)
+ {
+ struct splay_tree_key_s cur_node;
+ cur_node.host_start = (uintptr_t) hostaddrs[i];
+ cur_node.host_end = cur_node.host_start + sizeof (void *);
+ splay_tree_key n = splay_tree_lookup (&devicep->mem_map, &cur_node);
+
+ if (n)
+ gomp_detach_pointer (devicep, NULL, n, (uintptr_t) hostaddrs[i],
+ false, NULL);
+ }
+
+ int nrmvars = 0;
+ splay_tree_key remove_vars[mapnum];
+
for (i = 0; i < mapnum; i++)
{
struct splay_tree_key_s cur_node;
if (!k)
continue;
- if (k->refcount > 0 && k->refcount != REFCOUNT_INFINITY)
- k->refcount--;
- if ((kind == GOMP_MAP_DELETE
- || kind == GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION)
- && k->refcount != REFCOUNT_INFINITY)
- k->refcount = 0;
+ bool delete_p = (kind == GOMP_MAP_DELETE
+ || kind == GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION);
+ bool do_copy, do_remove;
+ gomp_decrement_refcount (k, refcount_set, delete_p, &do_copy,
+ &do_remove);
- if ((kind == GOMP_MAP_FROM && k->refcount == 0)
+ if ((kind == GOMP_MAP_FROM && do_copy)
|| kind == GOMP_MAP_ALWAYS_FROM)
- gomp_copy_dev2host (devicep, NULL, (void *) cur_node.host_start,
- (void *) (k->tgt->tgt_start + k->tgt_offset
- + cur_node.host_start
- - k->host_start),
- cur_node.host_end - cur_node.host_start);
- if (k->refcount == 0)
- gomp_remove_var (devicep, k);
+ {
+ if (k->aux && k->aux->attach_count)
+ {
+ /* We have to be careful not to overwrite still attached
+ pointers during the copyback to host. */
+ uintptr_t addr = k->host_start;
+ while (addr < k->host_end)
+ {
+ size_t i = (addr - k->host_start) / sizeof (void *);
+ if (k->aux->attach_count[i] == 0)
+ gomp_copy_dev2host (devicep, NULL, (void *) addr,
+ (void *) (k->tgt->tgt_start
+ + k->tgt_offset
+ + addr - k->host_start),
+ sizeof (void *));
+ addr += sizeof (void *);
+ }
+ }
+ else
+ gomp_copy_dev2host (devicep, NULL, (void *) cur_node.host_start,
+ (void *) (k->tgt->tgt_start + k->tgt_offset
+ + cur_node.host_start
+ - k->host_start),
+ cur_node.host_end - cur_node.host_start);
+ }
+
+ /* Structure elements lists are removed altogether at once, which
+ may cause immediate deallocation of the target_mem_desc, causing
+ errors if we still have following element siblings to copy back.
+ While we're at it, it also seems more disciplined to simply
+ queue all removals together for processing below.
+
+ Structured block unmapping (i.e. gomp_unmap_vars_internal) should
+ not have this problem, since they maintain an additional
+ tgt->refcount = 1 reference to the target_mem_desc to start with.
+ */
+ if (do_remove)
+ remove_vars[nrmvars++] = k;
+ break;
+ case GOMP_MAP_DETACH:
break;
default:
gomp_mutex_unlock (&devicep->lock);
}
}
+ for (int i = 0; i < nrmvars; i++)
+ gomp_remove_var (devicep, remove_vars[i]);
+
gomp_mutex_unlock (&devicep->lock);
}
size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
/* If there are depend clauses, but nowait is not present,
block the parent task until the dependencies are resolved
}
}
- size_t i;
+ htab_t refcount_set = htab_create (mapnum);
+
+ /* The variables are mapped separately such that they can be released
+ independently. */
+ size_t i, j;
if ((flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0)
for (i = 0; i < mapnum; i++)
if ((kinds[i] & 0xff) == GOMP_MAP_STRUCT)
{
gomp_map_vars (devicep, sizes[i] + 1, &hostaddrs[i], NULL, &sizes[i],
- &kinds[i], true, GOMP_MAP_VARS_ENTER_DATA);
+ &kinds[i], true, &refcount_set,
+ GOMP_MAP_VARS_ENTER_DATA);
i += sizes[i];
}
+ else if ((kinds[i] & 0xff) == GOMP_MAP_TO_PSET)
+ {
+ for (j = i + 1; j < mapnum; j++)
+ if (!GOMP_MAP_POINTER_P (get_kind (true, kinds, j) & 0xff)
+ && !GOMP_MAP_ALWAYS_POINTER_P (get_kind (true, kinds, j) & 0xff))
+ break;
+ gomp_map_vars (devicep, j-i, &hostaddrs[i], NULL, &sizes[i],
+ &kinds[i], true, &refcount_set,
+ GOMP_MAP_VARS_ENTER_DATA);
+ i += j - i - 1;
+ }
+ else if (i + 1 < mapnum && (kinds[i + 1] & 0xff) == GOMP_MAP_ATTACH)
+ {
+ /* An attach operation must be processed together with the mapped
+ base-pointer list item. */
+ gomp_map_vars (devicep, 2, &hostaddrs[i], NULL, &sizes[i], &kinds[i],
+ true, &refcount_set, GOMP_MAP_VARS_ENTER_DATA);
+ i += 1;
+ }
else
gomp_map_vars (devicep, 1, &hostaddrs[i], NULL, &sizes[i], &kinds[i],
- true, GOMP_MAP_VARS_ENTER_DATA);
+ true, &refcount_set, GOMP_MAP_VARS_ENTER_DATA);
else
- gomp_exit_data (devicep, mapnum, hostaddrs, sizes, kinds);
+ gomp_exit_data (devicep, mapnum, hostaddrs, sizes, kinds, &refcount_set);
+ htab_free (refcount_set);
}
bool
|| (devicep->can_run_func && !devicep->can_run_func (fn_addr)))
{
ttask->state = GOMP_TARGET_TASK_FALLBACK;
- gomp_target_fallback (ttask->fn, ttask->hostaddrs);
+ gomp_target_fallback (ttask->fn, ttask->hostaddrs, devicep,
+ ttask->args);
return false;
}
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
{
if (ttask->tgt)
- gomp_unmap_vars (ttask->tgt, true);
+ gomp_unmap_vars (ttask->tgt, true, NULL);
return false;
}
{
ttask->tgt = gomp_map_vars (devicep, ttask->mapnum, ttask->hostaddrs,
NULL, ttask->sizes, ttask->kinds, true,
- GOMP_MAP_VARS_TARGET);
+ NULL, GOMP_MAP_VARS_TARGET);
actual_arguments = (void *) ttask->tgt->tgt_start;
}
ttask->state = GOMP_TARGET_TASK_READY_TO_RUN;
+ assert (devicep->async_run_func);
devicep->async_run_func (devicep->target_id, fn_addr, actual_arguments,
ttask->args, (void *) ttask);
return true;
if (ttask->flags & GOMP_TARGET_FLAG_UPDATE)
gomp_update (devicep, ttask->mapnum, ttask->hostaddrs, ttask->sizes,
ttask->kinds, true);
- else if ((ttask->flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0)
- for (i = 0; i < ttask->mapnum; i++)
- if ((ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT)
- {
- gomp_map_vars (devicep, ttask->sizes[i] + 1, &ttask->hostaddrs[i],
- NULL, &ttask->sizes[i], &ttask->kinds[i], true,
- GOMP_MAP_VARS_ENTER_DATA);
- i += ttask->sizes[i];
- }
- else
- gomp_map_vars (devicep, 1, &ttask->hostaddrs[i], NULL, &ttask->sizes[i],
- &ttask->kinds[i], true, GOMP_MAP_VARS_ENTER_DATA);
else
- gomp_exit_data (devicep, ttask->mapnum, ttask->hostaddrs, ttask->sizes,
- ttask->kinds);
+ {
+ htab_t refcount_set = htab_create (ttask->mapnum);
+ if ((ttask->flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0)
+ for (i = 0; i < ttask->mapnum; i++)
+ if ((ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT)
+ {
+ gomp_map_vars (devicep, ttask->sizes[i] + 1, &ttask->hostaddrs[i],
+ NULL, &ttask->sizes[i], &ttask->kinds[i], true,
+ &refcount_set, GOMP_MAP_VARS_ENTER_DATA);
+ i += ttask->sizes[i];
+ }
+ else
+ gomp_map_vars (devicep, 1, &ttask->hostaddrs[i], NULL, &ttask->sizes[i],
+ &ttask->kinds[i], true, &refcount_set,
+ GOMP_MAP_VARS_ENTER_DATA);
+ else
+ gomp_exit_data (devicep, ttask->mapnum, ttask->hostaddrs, ttask->sizes,
+ ttask->kinds, &refcount_set);
+ htab_free (refcount_set);
+ }
return false;
}
(void) num_teams;
}
+bool
+GOMP_teams4 (unsigned int num_teams_low, unsigned int num_teams_high,
+ unsigned int thread_limit, bool first)
+{
+ struct gomp_thread *thr = gomp_thread ();
+ if (first)
+ {
+ if (thread_limit)
+ {
+ struct gomp_task_icv *icv = gomp_icv (true);
+ icv->thread_limit_var
+ = thread_limit > INT_MAX ? UINT_MAX : thread_limit;
+ }
+ (void) num_teams_high;
+ if (num_teams_low == 0)
+ num_teams_low = 1;
+ thr->num_teams = num_teams_low - 1;
+ thr->team_num = 0;
+ }
+ else if (thr->team_num == thr->num_teams)
+ return false;
+ else
+ ++thr->team_num;
+ return true;
+}
+
void *
omp_target_alloc (size_t size, int device_num)
{
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return malloc (size);
- if (device_num < 0)
- return NULL;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return NULL;
void
omp_target_free (void *device_ptr, int device_num)
{
- if (device_ptr == NULL)
- return;
-
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
{
free (device_ptr);
return;
}
- if (device_num < 0)
- return;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
- if (devicep == NULL)
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL || device_ptr == NULL)
return;
if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
int
omp_target_is_present (const void *ptr, int device_num)
{
- if (ptr == NULL)
- return 1;
-
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return 1;
- if (device_num < 0)
- return 0;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return 0;
+ if (ptr == NULL)
+ return 1;
+
if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
|| devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
return 1;
return ret;
}
-int
-omp_target_memcpy (void *dst, const void *src, size_t length,
- size_t dst_offset, size_t src_offset, int dst_device_num,
- int src_device_num)
+static int
+omp_target_memcpy_check (int dst_device_num, int src_device_num,
+ struct gomp_device_descr **dst_devicep,
+ struct gomp_device_descr **src_devicep)
{
- struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
- bool ret;
-
- if (dst_device_num != GOMP_DEVICE_HOST_FALLBACK)
+ if (dst_device_num != gomp_get_num_devices ()
+ /* Above gomp_get_num_devices has to be called unconditionally. */
+ && dst_device_num != omp_initial_device)
{
- if (dst_device_num < 0)
- return EINVAL;
-
- dst_devicep = resolve_device (dst_device_num);
- if (dst_devicep == NULL)
+ *dst_devicep = resolve_device (dst_device_num, false);
+ if (*dst_devicep == NULL)
return EINVAL;
- if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- dst_devicep = NULL;
+ if (!((*dst_devicep)->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || (*dst_devicep)->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ *dst_devicep = NULL;
}
- if (src_device_num != GOMP_DEVICE_HOST_FALLBACK)
- {
- if (src_device_num < 0)
- return EINVAL;
- src_devicep = resolve_device (src_device_num);
- if (src_devicep == NULL)
+ if (src_device_num != num_devices_openmp
+ && src_device_num != omp_initial_device)
+ {
+ *src_devicep = resolve_device (src_device_num, false);
+ if (*src_devicep == NULL)
return EINVAL;
- if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- src_devicep = NULL;
+ if (!((*src_devicep)->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || (*src_devicep)->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ *src_devicep = NULL;
}
+
+ return 0;
+}
+
+static int
+omp_target_memcpy_copy (void *dst, const void *src, size_t length,
+ size_t dst_offset, size_t src_offset,
+ struct gomp_device_descr *dst_devicep,
+ struct gomp_device_descr *src_devicep)
+{
+ bool ret;
if (src_devicep == NULL && dst_devicep == NULL)
{
memcpy ((char *) dst + dst_offset, (char *) src + src_offset, length);
return EINVAL;
}
+int
+omp_target_memcpy (void *dst, const void *src, size_t length, size_t dst_offset,
+ size_t src_offset, int dst_device_num, int src_device_num)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ int ret = omp_target_memcpy_check (dst_device_num, src_device_num,
+ &dst_devicep, &src_devicep);
+
+ if (ret)
+ return ret;
+
+ ret = omp_target_memcpy_copy (dst, src, length, dst_offset, src_offset,
+ dst_devicep, src_devicep);
+
+ return ret;
+}
+
+typedef struct
+{
+ void *dst;
+ const void *src;
+ size_t length;
+ size_t dst_offset;
+ size_t src_offset;
+ struct gomp_device_descr *dst_devicep;
+ struct gomp_device_descr *src_devicep;
+} omp_target_memcpy_data;
+
+static void
+omp_target_memcpy_async_helper (void *args)
+{
+ omp_target_memcpy_data *a = args;
+ if (omp_target_memcpy_copy (a->dst, a->src, a->length, a->dst_offset,
+ a->src_offset, a->dst_devicep, a->src_devicep))
+ gomp_fatal ("omp_target_memcpy failed");
+}
+
+int
+omp_target_memcpy_async (void *dst, const void *src, size_t length,
+ size_t dst_offset, size_t src_offset,
+ int dst_device_num, int src_device_num,
+ int depobj_count, omp_depend_t *depobj_list)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ unsigned int flags = 0;
+ void *depend[depobj_count + 5];
+ int i;
+ int check = omp_target_memcpy_check (dst_device_num, src_device_num,
+ &dst_devicep, &src_devicep);
+
+ omp_target_memcpy_data s = {
+ .dst = dst,
+ .src = src,
+ .length = length,
+ .dst_offset = dst_offset,
+ .src_offset = src_offset,
+ .dst_devicep = dst_devicep,
+ .src_devicep = src_devicep
+ };
+
+ if (check)
+ return check;
+
+ if (depobj_count > 0 && depobj_list != NULL)
+ {
+ flags |= GOMP_TASK_FLAG_DEPEND;
+ depend[0] = 0;
+ depend[1] = (void *) (uintptr_t) depobj_count;
+ depend[2] = depend[3] = depend[4] = 0;
+ for (i = 0; i < depobj_count; ++i)
+ depend[i + 5] = &depobj_list[i];
+ }
+
+ GOMP_task (omp_target_memcpy_async_helper, &s, NULL, sizeof (s),
+ __alignof__ (s), true, flags, depend, 0, NULL);
+
+ return 0;
+}
+
static int
omp_target_memcpy_rect_worker (void *dst, const void *src, size_t element_size,
int num_dims, const size_t *volume,
return 0;
}
-int
-omp_target_memcpy_rect (void *dst, const void *src, size_t element_size,
- int num_dims, const size_t *volume,
- const size_t *dst_offsets,
- const size_t *src_offsets,
- const size_t *dst_dimensions,
- const size_t *src_dimensions,
- int dst_device_num, int src_device_num)
+static int
+omp_target_memcpy_rect_check (void *dst, const void *src, int dst_device_num,
+ int src_device_num,
+ struct gomp_device_descr **dst_devicep,
+ struct gomp_device_descr **src_devicep)
{
- struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
-
if (!dst && !src)
return INT_MAX;
- if (dst_device_num != GOMP_DEVICE_HOST_FALLBACK)
- {
- if (dst_device_num < 0)
- return EINVAL;
-
- dst_devicep = resolve_device (dst_device_num);
- if (dst_devicep == NULL)
- return EINVAL;
-
- if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- dst_devicep = NULL;
- }
- if (src_device_num != GOMP_DEVICE_HOST_FALLBACK)
- {
- if (src_device_num < 0)
- return EINVAL;
+ int ret = omp_target_memcpy_check (dst_device_num, src_device_num,
+ dst_devicep, src_devicep);
+ if (ret)
+ return ret;
- src_devicep = resolve_device (src_device_num);
- if (src_devicep == NULL)
- return EINVAL;
-
- if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- src_devicep = NULL;
- }
-
- if (src_devicep != NULL && dst_devicep != NULL && src_devicep != dst_devicep)
+ if (*src_devicep != NULL && *dst_devicep != NULL && *src_devicep != *dst_devicep)
return EINVAL;
+ return 0;
+}
+
+static int
+omp_target_memcpy_rect_copy (void *dst, const void *src,
+ size_t element_size, int num_dims,
+ const size_t *volume, const size_t *dst_offsets,
+ const size_t *src_offsets,
+ const size_t *dst_dimensions,
+ const size_t *src_dimensions,
+ struct gomp_device_descr *dst_devicep,
+ struct gomp_device_descr *src_devicep)
+{
if (src_devicep)
gomp_mutex_lock (&src_devicep->lock);
else if (dst_devicep)
gomp_mutex_unlock (&src_devicep->lock);
else if (dst_devicep)
gomp_mutex_unlock (&dst_devicep->lock);
+
return ret;
}
+int
+omp_target_memcpy_rect (void *dst, const void *src, size_t element_size,
+ int num_dims, const size_t *volume,
+ const size_t *dst_offsets,
+ const size_t *src_offsets,
+ const size_t *dst_dimensions,
+ const size_t *src_dimensions,
+ int dst_device_num, int src_device_num)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+
+ int check = omp_target_memcpy_rect_check (dst, src, dst_device_num,
+ src_device_num, &dst_devicep,
+ &src_devicep);
+
+ if (check)
+ return check;
+
+ int ret = omp_target_memcpy_rect_copy (dst, src, element_size, num_dims,
+ volume, dst_offsets, src_offsets,
+ dst_dimensions, src_dimensions,
+ dst_devicep, src_devicep);
+
+ return ret;
+}
+
+typedef struct
+{
+ void *dst;
+ const void *src;
+ size_t element_size;
+ const size_t *volume;
+ const size_t *dst_offsets;
+ const size_t *src_offsets;
+ const size_t *dst_dimensions;
+ const size_t *src_dimensions;
+ struct gomp_device_descr *dst_devicep;
+ struct gomp_device_descr *src_devicep;
+ int num_dims;
+} omp_target_memcpy_rect_data;
+
+static void
+omp_target_memcpy_rect_async_helper (void *args)
+{
+ omp_target_memcpy_rect_data *a = args;
+ int ret = omp_target_memcpy_rect_copy (a->dst, a->src, a->element_size,
+ a->num_dims, a->volume, a->dst_offsets,
+ a->src_offsets, a->dst_dimensions,
+ a->src_dimensions, a->dst_devicep,
+ a->src_devicep);
+ if (ret)
+ gomp_fatal ("omp_target_memcpy_rect failed");
+}
+
+int
+omp_target_memcpy_rect_async (void *dst, const void *src, size_t element_size,
+ int num_dims, const size_t *volume,
+ const size_t *dst_offsets,
+ const size_t *src_offsets,
+ const size_t *dst_dimensions,
+ const size_t *src_dimensions,
+ int dst_device_num, int src_device_num,
+ int depobj_count, omp_depend_t *depobj_list)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ unsigned flags = 0;
+ int check = omp_target_memcpy_rect_check (dst, src, dst_device_num,
+ src_device_num, &dst_devicep,
+ &src_devicep);
+ void *depend[depobj_count + 5];
+ int i;
+
+ omp_target_memcpy_rect_data s = {
+ .dst = dst,
+ .src = src,
+ .element_size = element_size,
+ .num_dims = num_dims,
+ .volume = volume,
+ .dst_offsets = dst_offsets,
+ .src_offsets = src_offsets,
+ .dst_dimensions = dst_dimensions,
+ .src_dimensions = src_dimensions,
+ .dst_devicep = dst_devicep,
+ .src_devicep = src_devicep
+ };
+
+ if (check)
+ return check;
+
+ if (depobj_count > 0 && depobj_list != NULL)
+ {
+ flags |= GOMP_TASK_FLAG_DEPEND;
+ depend[0] = 0;
+ depend[1] = (void *) (uintptr_t) depobj_count;
+ depend[2] = depend[3] = depend[4] = 0;
+ for (i = 0; i < depobj_count; ++i)
+ depend[i + 5] = &depobj_list[i];
+ }
+
+ GOMP_task (omp_target_memcpy_rect_async_helper, &s, NULL, sizeof (s),
+ __alignof__ (s), true, flags, depend, 0, NULL);
+
+ return 0;
+}
+
int
omp_target_associate_ptr (const void *host_ptr, const void *device_ptr,
size_t size, size_t device_offset, int device_num)
{
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
- return EINVAL;
-
- if (device_num < 0)
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return EINVAL;
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return EINVAL;
k->tgt = tgt;
k->tgt_offset = (uintptr_t) device_ptr + device_offset;
k->refcount = REFCOUNT_INFINITY;
- k->virtual_refcount = 0;
+ k->dynamic_refcount = 0;
k->aux = NULL;
array->left = NULL;
array->right = NULL;
int
omp_target_disassociate_ptr (const void *ptr, int device_num)
{
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
- return EINVAL;
-
- if (device_num < 0)
- return EINVAL;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return EINVAL;
return ret;
}
+void *
+omp_get_mapped_ptr (const void *ptr, int device_num)
+{
+ if (device_num == omp_initial_device
+ || device_num == omp_get_initial_device ())
+ return (void *) ptr;
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
+ return NULL;
+
+ if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ return (void *) ptr;
+
+ gomp_mutex_lock (&devicep->lock);
+
+ struct splay_tree_s *mem_map = &devicep->mem_map;
+ struct splay_tree_key_s cur_node;
+ void *ret = NULL;
+
+ cur_node.host_start = (uintptr_t) ptr;
+ cur_node.host_end = cur_node.host_start;
+ splay_tree_key n = gomp_map_0len_lookup (mem_map, &cur_node);
+
+ if (n)
+ {
+ uintptr_t offset = cur_node.host_start - n->host_start;
+ ret = (void *) (n->tgt->tgt_start + n->tgt_offset + offset);
+ }
+
+ gomp_mutex_unlock (&devicep->lock);
+
+ return ret;
+}
+
+int
+omp_target_is_accessible (const void *ptr, size_t size, int device_num)
+{
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
+ return true;
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
+ return false;
+
+ /* TODO: Unified shared memory must be handled when available. */
+
+ return devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM;
+}
+
int
omp_pause_resource (omp_pause_resource_t kind, int device_num)
{
(void) kind;
- if (device_num == GOMP_DEVICE_HOST_FALLBACK)
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return gomp_pause_host ();
- if (device_num < 0 || device_num >= gomp_get_num_devices ())
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
return -1;
+
/* Do nothing for target devices for now. */
return 0;
}
void *plugin_handle = dlopen (plugin_name, RTLD_LAZY);
if (!plugin_handle)
+#if OFFLOAD_DEFAULTED
+ return 0;
+#else
goto dl_fail;
+#endif
/* Check if all required functions are available in the plugin and store
their handlers. None of the symbols can legitimately be NULL,
DLSYM (get_caps);
DLSYM (get_type);
DLSYM (get_num_devices);
- DLSYM (get_property);
DLSYM (init_device);
DLSYM (fini_device);
DLSYM (load_image);
if (device->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
{
DLSYM (run);
- DLSYM (async_run);
+ DLSYM_OPT (async_run, async_run);
DLSYM_OPT (can_run, can_run);
DLSYM (dev2dev);
}
openacc_async_queue_callback)
|| !DLSYM_OPT (openacc.async.exec, openacc_async_exec)
|| !DLSYM_OPT (openacc.async.dev2host, openacc_async_dev2host)
- || !DLSYM_OPT (openacc.async.host2dev, openacc_async_host2dev))
+ || !DLSYM_OPT (openacc.async.host2dev, openacc_async_host2dev)
+ || !DLSYM_OPT (openacc.get_property, openacc_get_property))
{
/* Require all the OpenACC handlers if we have
GOMP_OFFLOAD_CAP_OPENACC_200. */
const char *suffix = SONAME_SUFFIX (1);
const char *cur, *next;
char *plugin_name;
- int i, new_num_devices;
+ int i, new_num_devs;
+ int num_devs = 0, num_devs_openmp;
+ struct gomp_device_descr *devs = NULL;
- num_devices = 0;
- devices = NULL;
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_DISABLED)
+ return;
cur = OFFLOAD_PLUGINS;
if (*cur)
plugin_name = (char *) malloc (prefix_len + cur_len + suffix_len + 1);
if (!plugin_name)
{
- num_devices = 0;
+ num_devs = 0;
break;
}
if (gomp_load_plugin_for_device (¤t_device, plugin_name))
{
- new_num_devices = current_device.get_num_devices_func ();
- if (new_num_devices >= 1)
+ int omp_req = omp_requires_mask & ~GOMP_REQUIRES_TARGET_USED;
+ new_num_devs = current_device.get_num_devices_func (omp_req);
+ if (gomp_debug_var > 0 && new_num_devs < 0)
+ {
+ bool found = false;
+ int type = current_device.get_type_func ();
+ for (int img = 0; img < num_offload_images; img++)
+ if (type == offload_images[img].type)
+ found = true;
+ if (found)
+ {
+ char buf[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ gomp_requires_to_name (buf, sizeof (buf), omp_req);
+ char *name = (char *) malloc (cur_len + 1);
+ memcpy (name, cur, cur_len);
+ name[cur_len] = '\0';
+ gomp_debug (1,
+ "%s devices present but 'omp requires %s' "
+ "cannot be fulfilled", name, buf);
+ free (name);
+ }
+ }
+ else if (new_num_devs >= 1)
{
/* Augment DEVICES and NUM_DEVICES. */
- devices = realloc (devices, (num_devices + new_num_devices)
- * sizeof (struct gomp_device_descr));
- if (!devices)
+ devs = realloc (devs, (num_devs + new_num_devs)
+ * sizeof (struct gomp_device_descr));
+ if (!devs)
{
- num_devices = 0;
+ num_devs = 0;
free (plugin_name);
break;
}
current_device.type = current_device.get_type_func ();
current_device.mem_map.root = NULL;
current_device.state = GOMP_DEVICE_UNINITIALIZED;
- for (i = 0; i < new_num_devices; i++)
+ for (i = 0; i < new_num_devs; i++)
{
current_device.target_id = i;
- devices[num_devices] = current_device;
- gomp_mutex_init (&devices[num_devices].lock);
- num_devices++;
+ devs[num_devs] = current_device;
+ gomp_mutex_init (&devs[num_devs].lock);
+ num_devs++;
}
}
}
/* In DEVICES, sort the GOMP_OFFLOAD_CAP_OPENMP_400 ones first, and set
NUM_DEVICES_OPENMP. */
- struct gomp_device_descr *devices_s
- = malloc (num_devices * sizeof (struct gomp_device_descr));
- if (!devices_s)
+ struct gomp_device_descr *devs_s
+ = malloc (num_devs * sizeof (struct gomp_device_descr));
+ if (!devs_s)
{
- num_devices = 0;
- free (devices);
- devices = NULL;
+ num_devs = 0;
+ free (devs);
+ devs = NULL;
}
- num_devices_openmp = 0;
- for (i = 0; i < num_devices; i++)
- if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- devices_s[num_devices_openmp++] = devices[i];
- int num_devices_after_openmp = num_devices_openmp;
- for (i = 0; i < num_devices; i++)
- if (!(devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400))
- devices_s[num_devices_after_openmp++] = devices[i];
- free (devices);
- devices = devices_s;
-
- for (i = 0; i < num_devices; i++)
+ num_devs_openmp = 0;
+ for (i = 0; i < num_devs; i++)
+ if (devs[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ devs_s[num_devs_openmp++] = devs[i];
+ int num_devs_after_openmp = num_devs_openmp;
+ for (i = 0; i < num_devs; i++)
+ if (!(devs[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400))
+ devs_s[num_devs_after_openmp++] = devs[i];
+ free (devs);
+ devs = devs_s;
+
+ for (i = 0; i < num_devs; i++)
{
/* The 'devices' array can be moved (by the realloc call) until we have
found all the plugins, so registering with the OpenACC runtime (which
takes a copy of the pointer argument) must be delayed until now. */
- if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENACC_200)
- goacc_register (&devices[i]);
+ if (devs[i].capabilities & GOMP_OFFLOAD_CAP_OPENACC_200)
+ goacc_register (&devs[i]);
}
+ num_devices = num_devs;
+ num_devices_openmp = num_devs_openmp;
+ devices = devs;
if (atexit (gomp_target_fini) != 0)
gomp_fatal ("atexit failed");
}