-/* Copyright (C) 2013-2022 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2023 Free Software Foundation, Inc.
Contributed by Jakub Jelinek <jakub@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
# include <inttypes.h> /* For PRIu64. */
#endif
#include <string.h>
+#include <stdio.h> /* For snprintf. */
#include <assert.h>
#include <errno.h>
#include "plugin-suffix.h"
#endif
+/* Define another splay tree instantiation - for reverse offload. */
+#define splay_tree_prefix reverse
+#define splay_tree_static
+#define splay_tree_c
+#include "splay-tree.h"
+
+
typedef uintptr_t *hash_entry_type;
static inline void * htab_alloc (size_t size) { return gomp_malloc (size); }
static inline void htab_free (void *ptr) { free (ptr); }
#include "hashtab.h"
+ialias_redirect (GOMP_task)
+
static inline hashval_t
htab_hash (hash_entry_type element)
{
/* Number of GOMP_OFFLOAD_CAP_OPENMP_400 devices. */
static int num_devices_openmp;
+/* OpenMP requires mask. */
+static int omp_requires_mask;
+
/* Similar to gomp_realloc, but release register_lock before gomp_fatal. */
static void *
}
static struct gomp_device_descr *
-resolve_device (int device_id)
+resolve_device (int device_id, bool remapped)
{
- if (device_id == GOMP_DEVICE_ICV)
+ /* Get number of devices and thus ensure that 'gomp_init_targets_once' was
+ called, which must be done before using default_device_var. */
+ int num_devices = gomp_get_num_devices ();
+
+ if (remapped && device_id == GOMP_DEVICE_ICV)
{
struct gomp_task_icv *icv = gomp_icv (false);
device_id = icv->default_device_var;
+ remapped = false;
}
- if (device_id < 0 || device_id >= gomp_get_num_devices ())
+ if (device_id < 0)
+ {
+ if (device_id == (remapped ? GOMP_DEVICE_HOST_FALLBACK
+ : omp_initial_device))
+ return NULL;
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
+ && num_devices == 0)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
+ "but only the host device is available");
+ else if (device_id == omp_invalid_device)
+ gomp_fatal ("omp_invalid_device encountered");
+ else if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
+ "but device not found");
+
+ return NULL;
+ }
+ else if (device_id >= num_devices)
{
if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
- && device_id != GOMP_DEVICE_HOST_FALLBACK
- && device_id != num_devices_openmp)
+ && device_id != num_devices)
gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, "
"but device not found");
return splay_tree_lookup (mem_map, key);
}
+static inline reverse_splay_tree_key
+gomp_map_lookup_rev (reverse_splay_tree mem_map_rev, reverse_splay_tree_key key)
+{
+ return reverse_splay_tree_lookup (mem_map_rev, key);
+}
+
static inline splay_tree_key
gomp_map_0len_lookup (splay_tree mem_map, splay_tree_key key)
{
This must not be used for asynchronous copies, because the host data might
not be computed yet (by an earlier asynchronous compute region, for
- example).
- TODO ... but we could allow CBUF usage for EPHEMERAL data? (Open question:
- is it more performant to use libgomp CBUF buffering or individual device
- asyncronous copying?) */
+ example). The exception is for EPHEMERAL data, that we know is available
+ already "by construction". */
static inline void
gomp_coalesce_buf_add (struct gomp_coalesce_buf *cbuf, size_t start, size_t len)
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_PRESENT_FROM:
+ case GOMP_MAP_FORCE_PRESENT:
return false;
default:
return true;
void *d, const void *h, size_t sz,
bool ephemeral, struct gomp_coalesce_buf *cbuf)
{
- if (__builtin_expect (aq != NULL, 0))
- {
- /* See 'gomp_coalesce_buf_add'. */
- assert (!cbuf);
-
- void *h_buf = (void *) h;
- if (ephemeral)
- {
- /* We're queueing up an asynchronous copy from data that may
- disappear before the transfer takes place (i.e. because it is a
- stack local in a function that is no longer executing). Make a
- copy of the data into a temporary buffer in those cases. */
- h_buf = gomp_malloc (sz);
- memcpy (h_buf, h, sz);
- }
- goacc_device_copy_async (devicep, devicep->openacc.async.host2dev_func,
- "dev", d, "host", h_buf, h, sz, aq);
- if (ephemeral)
- /* Free temporary buffer once the transfer has completed. */
- devicep->openacc.async.queue_callback_func (aq, free, h_buf);
-
- return;
- }
-
if (cbuf)
{
uintptr_t doff = (uintptr_t) d - cbuf->tgt->tgt_start;
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("internal libgomp cbuf error");
}
+
+ /* In an asynchronous context, verify that CBUF isn't used
+ with non-EPHEMERAL data; see 'gomp_coalesce_buf_add'. */
+ if (__builtin_expect (aq != NULL, 0))
+ assert (ephemeral);
+
memcpy ((char *) cbuf->buf + (doff - cbuf->chunks[0].start),
h, sz);
return;
}
}
- gomp_device_copy (devicep, devicep->host2dev_func, "dev", d, "host", h, sz);
+ if (__builtin_expect (aq != NULL, 0))
+ {
+ void *h_buf = (void *) h;
+ if (ephemeral)
+ {
+ /* We're queueing up an asynchronous copy from data that may
+ disappear before the transfer takes place (i.e. because it is a
+ stack local in a function that is no longer executing). As we've
+ not been able to use CBUF, make a copy of the data into a
+ temporary buffer. */
+ h_buf = gomp_malloc (sz);
+ memcpy (h_buf, h, sz);
+ }
+ goacc_device_copy_async (devicep, devicep->openacc.async.host2dev_func,
+ "dev", d, "host", h_buf, h, sz, aq);
+ if (ephemeral)
+ /* Free once the transfer has completed. */
+ devicep->openacc.async.queue_callback_func (aq, free, h_buf);
+ }
+ else
+ gomp_device_copy (devicep, devicep->host2dev_func,
+ "dev", d, "host", h, sz);
}
attribute_hidden void
else
tgt_var->length = newn->host_end - newn->host_start;
- if ((kind & GOMP_MAP_FLAG_FORCE)
+ if (GOMP_MAP_FORCE_P (kind)
/* For implicit maps, old contained in new is valid. */
|| !(implicit_subset
/* Otherwise, new contained inside old is considered valid. */
if (n == NULL)
{
if (allow_zero_length_array_sections)
- cur_node.tgt_offset = 0;
+ cur_node.tgt_offset = cur_node.host_start;
else
{
gomp_mutex_unlock (&devicep->lock);
cur_node.host_start = (uintptr_t) hostaddrs[i];
cur_node.host_end = cur_node.host_start + sizes[i];
- splay_tree_key n2 = splay_tree_lookup (mem_map, &cur_node);
+ splay_tree_key n2 = gomp_map_0len_lookup (mem_map, &cur_node);
kind = get_kind (short_mapkind, kinds, i);
implicit = get_implicit (short_mapkind, kinds, i);
if (n2
if ((void *) target == NULL)
{
- gomp_mutex_unlock (&devicep->lock);
- gomp_fatal ("attempt to attach null pointer");
+ /* As a special case, allow attaching NULL host pointers. This
+ allows e.g. unassociated Fortran pointers to be mapped
+ properly. */
+ data = 0;
+
+ gomp_debug (1,
+ "%s: attaching NULL host pointer, target %p "
+ "(struct base %p)\n", __FUNCTION__, (void *) devptr,
+ (void *) (n->tgt->tgt_start + n->tgt_offset));
+
+ gomp_copy_host2dev (devicep, aq, (void *) devptr, (void *) &data,
+ sizeof (void *), true, cbufp);
+
+ return;
}
s.host_start = target + bias;
{
if (allow_zero_length_array_sections)
/* When allowing attachment to zero-length array sections, we
- allow attaching to NULL pointers when the target region is not
- mapped. */
- data = 0;
+ copy the host pointer when the target region is not mapped. */
+ data = target;
else
{
gomp_mutex_unlock (&devicep->lock);
cbuf.chunk_cnt = -1;
cbuf.use_cnt = 0;
cbuf.buf = NULL;
- if (mapnum > 1 || pragma_kind == GOMP_MAP_VARS_TARGET)
+ if (mapnum > 1 || (pragma_kind & GOMP_MAP_VARS_TARGET))
{
size_t chunks_size = (mapnum + 1) * sizeof (struct gomp_coalesce_chunk);
cbuf.chunks = (struct gomp_coalesce_chunk *) gomp_alloca (chunks_size);
cbuf.chunk_cnt = 0;
}
- if (pragma_kind == GOMP_MAP_VARS_TARGET)
+ if (pragma_kind & GOMP_MAP_VARS_TARGET)
{
size_t align = 4 * sizeof (void *);
tgt_align = align;
tgt->list[i].offset = 0;
continue;
}
- else if ((kind & typemask) == GOMP_MAP_STRUCT)
+ else if ((kind & typemask) == GOMP_MAP_STRUCT
+ || (kind & typemask) == GOMP_MAP_STRUCT_UNORD)
{
size_t first = i + 1;
size_t last = i + sizes[i];
tgt->list[i].key = NULL;
if (!aq
&& gomp_to_device_kind_p (get_kind (short_mapkind, kinds, i)
- & typemask))
+ & typemask)
+ && sizes[i] != 0)
gomp_coalesce_buf_add (&cbuf,
tgt_size - cur_node.host_end
+ (uintptr_t) hostaddrs[i],
if (!n)
{
tgt->list[i].key = NULL;
- tgt->list[i].offset = OFFSET_POINTER;
+ tgt->list[i].offset = OFFSET_INLINED;
continue;
}
}
{
/* Not present, hence, skip entry - including its MAP_POINTER,
when existing. */
- tgt->list[i].offset = OFFSET_POINTER;
+ tgt->list[i].offset = OFFSET_INLINED;
if (i + 1 < mapnum
&& ((typemask & get_kind (short_mapkind, kinds, i + 1))
== GOMP_MAP_POINTER))
tgt->tgt_start = (uintptr_t) tgt->to_free;
tgt->tgt_end = tgt->tgt_start + sizes[0];
}
- else if (not_found_cnt || pragma_kind == GOMP_MAP_VARS_TARGET)
+ else if (not_found_cnt || (pragma_kind & GOMP_MAP_VARS_TARGET))
{
/* Allocate tgt_align aligned tgt_size block of memory. */
/* FIXME: Perhaps change interface to allocate properly aligned
}
tgt_size = 0;
- if (pragma_kind == GOMP_MAP_VARS_TARGET)
+ if (pragma_kind & GOMP_MAP_VARS_TARGET)
tgt_size = mapnum * sizeof (void *);
tgt->array = NULL;
gomp_copy_host2dev (devicep, aq,
(void *) (tgt->tgt_start + tgt_size),
(void *) hostaddrs[i], len, false, cbufp);
+ /* Save device address in hostaddr to permit latter availablity
+ when doing a deep-firstprivate with pointer attach. */
+ hostaddrs[i] = (void *) (tgt->tgt_start + tgt_size);
tgt_size += len;
+
+ /* If followed by GOMP_MAP_ATTACH, pointer assign this
+ firstprivate to hostaddrs[i+1], which is assumed to contain a
+ device address. */
+ if (i + 1 < mapnum
+ && (GOMP_MAP_ATTACH
+ == (typemask & get_kind (short_mapkind, kinds, i+1))))
+ {
+ uintptr_t target = (uintptr_t) hostaddrs[i];
+ void *devptr = *(void**) hostaddrs[i+1] + sizes[i+1];
+ /* Per
+ <https://inbox.sourceware.org/gcc-patches/87o7pe12ke.fsf@euler.schwinge.homeip.net>
+ "OpenMP: Handle descriptors in target's firstprivate [PR104949]"
+ this probably needs revision for 'aq' usage. */
+ assert (!aq);
+ gomp_copy_host2dev (devicep, aq, devptr, &target,
+ sizeof (void *), false, cbufp);
+ ++i;
+ }
continue;
case GOMP_MAP_FIRSTPRIVATE_INT:
case GOMP_MAP_ZERO_LEN_ARRAY_SECTION:
tgt->list[i].offset = OFFSET_INLINED;
}
continue;
+ case GOMP_MAP_STRUCT_UNORD:
+ if (sizes[i] > 1)
+ {
+ void *first = hostaddrs[i + 1];
+ for (size_t j = i + 1; j < i + sizes[i]; j++)
+ if (hostaddrs[j + 1] != first)
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("Mapped array elements must be the "
+ "same (%p vs %p)", first,
+ hostaddrs[j + 1]);
+ }
+ }
+ /* Fallthrough. */
case GOMP_MAP_STRUCT:
first = i + 1;
last = i + sizes[i];
+ sizes[last];
if (tgt->list[first].key != NULL)
continue;
+ if (sizes[last] == 0)
+ cur_node.host_end++;
n = splay_tree_lookup (mem_map, &cur_node);
+ if (sizes[last] == 0)
+ cur_node.host_end--;
+ if (n == NULL && cur_node.host_start == cur_node.host_end)
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("Struct pointer member not mapped (%p)",
+ (void*) hostaddrs[first]);
+ }
if (n == NULL)
{
size_t align = (size_t) 1 << (kind >> rshift);
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("always pointer not mapped");
}
- if ((get_kind (short_mapkind, kinds, i - 1) & typemask)
- != GOMP_MAP_ALWAYS_POINTER)
+ if (i > 0
+ && ((get_kind (short_mapkind, kinds, i - 1) & typemask)
+ != GOMP_MAP_ALWAYS_POINTER))
cur_node.tgt_offset = gomp_map_val (tgt, hostaddrs, i - 1);
if (cur_node.tgt_offset)
cur_node.tgt_offset -= sizes[i];
k->host_end = k->host_start + sizeof (void *);
splay_tree_key n = splay_tree_lookup (mem_map, k);
if (n && n->refcount != REFCOUNT_LINK)
- gomp_map_vars_existing (devicep, aq, n, k, &tgt->list[i],
- kind & typemask, false, implicit, cbufp,
- refcount_set);
+ {
+ if (field_tgt_clear != FIELD_TGT_EMPTY)
+ {
+ /* For this condition to be true, there must be a
+ duplicate struct element mapping. This can happen with
+ GOMP_MAP_STRUCT_UNORD mappings, for example. */
+ tgt->list[i].key = n;
+ if (openmp_p)
+ {
+ assert ((n->refcount & REFCOUNT_STRUCTELEM) != 0);
+ assert (field_tgt_structelem_first != NULL);
+
+ if (i == field_tgt_clear)
+ {
+ n->refcount |= REFCOUNT_STRUCTELEM_FLAG_LAST;
+ field_tgt_structelem_first = NULL;
+ }
+ }
+ if (i == field_tgt_clear)
+ field_tgt_clear = FIELD_TGT_EMPTY;
+ gomp_increment_refcount (n, refcount_set);
+ tgt->list[i].copy_from
+ = GOMP_MAP_COPY_FROM_P (kind & typemask);
+ tgt->list[i].always_copy_from
+ = GOMP_MAP_ALWAYS_FROM_P (kind & typemask);
+ tgt->list[i].is_attach = false;
+ tgt->list[i].offset = 0;
+ tgt->list[i].length = k->host_end - k->host_start;
+ }
+ else
+ gomp_map_vars_existing (devicep, aq, n, k, &tgt->list[i],
+ kind & typemask, false, implicit,
+ cbufp, refcount_set);
+ }
else
{
k->aux = NULL;
- k->host_start),
sizes[j], cbufp, false);
}
- }
+ }
i = j - 1;
break;
case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALWAYS_PRESENT_TO:
+ case GOMP_MAP_ALWAYS_PRESENT_FROM:
+ case GOMP_MAP_ALWAYS_PRESENT_TOFROM:
{
/* We already looked up the memory region above and it
was missing. */
size_t size = k->host_end - k->host_start;
gomp_mutex_unlock (&devicep->lock);
#ifdef HAVE_INTTYPES_H
- gomp_fatal ("present clause: !acc_is_present (%p, "
- "%"PRIu64" (0x%"PRIx64"))",
- (void *) k->host_start,
- (uint64_t) size, (uint64_t) size);
+ gomp_fatal ("present clause: not present on the device "
+ "(addr: %p, size: %"PRIu64" (0x%"PRIx64"), "
+ "dev: %d)", (void *) k->host_start,
+ (uint64_t) size, (uint64_t) size,
+ devicep->target_id);
#else
- gomp_fatal ("present clause: !acc_is_present (%p, "
- "%lu (0x%lx))", (void *) k->host_start,
- (unsigned long) size, (unsigned long) size);
+ gomp_fatal ("present clause: not present on the device "
+ "(addr: %p, size: %lu (0x%lx), dev: %d)",
+ (void *) k->host_start,
+ (unsigned long) size, (unsigned long) size,
+ devicep->target_id);
#endif
}
break;
}
}
- if (pragma_kind == GOMP_MAP_VARS_TARGET)
+ if (pragma_kind & GOMP_MAP_VARS_TARGET)
{
for (i = 0; i < mapnum; i++)
{
if (cbufp)
{
- /* See 'gomp_coalesce_buf_add'. */
- assert (!aq);
-
long c = 0;
for (c = 0; c < cbuf.chunk_cnt; ++c)
gomp_copy_host2dev (devicep, aq,
(char *) cbuf.buf + (cbuf.chunks[c].start
- cbuf.chunks[0].start),
cbuf.chunks[c].end - cbuf.chunks[c].start,
- true, NULL);
- free (cbuf.buf);
+ false, NULL);
+ if (aq)
+ /* Free once the transfer has completed. */
+ devicep->openacc.async.queue_callback_func (aq, free, cbuf.buf);
+ else
+ free (cbuf.buf);
cbuf.buf = NULL;
cbufp = NULL;
}
gomp_copy_dev2host (devicep, NULL, hostaddr, devaddr, size);
}
}
+ else
+ {
+ int kind = get_kind (short_mapkind, kinds, i);
+
+ if (GOMP_MAP_PRESENT_P (kind))
+ {
+ /* We already looked up the memory region above and it
+ was missing. */
+ gomp_mutex_unlock (&devicep->lock);
+#ifdef HAVE_INTTYPES_H
+ gomp_fatal ("present clause: not present on the device "
+ "(addr: %p, size: %"PRIu64" (0x%"PRIx64"), "
+ "dev: %d)", (void *) hostaddrs[i],
+ (uint64_t) sizes[i], (uint64_t) sizes[i],
+ devicep->target_id);
+#else
+ gomp_fatal ("present clause: not present on the device "
+ "(addr: %p, size: %lu (0x%lx), dev: %d)",
+ (void *) hostaddrs[i], (unsigned long) sizes[i],
+ (unsigned long) sizes[i], devicep->target_id);
+#endif
+ }
+ }
}
gomp_mutex_unlock (&devicep->lock);
}
+static struct gomp_offload_icv_list *
+gomp_get_offload_icv_item (int dev_num)
+{
+ struct gomp_offload_icv_list *l = gomp_offload_icv_list;
+ while (l != NULL && l->device_num != dev_num)
+ l = l->next;
+
+ return l;
+}
+
+/* Helper function for 'gomp_load_image_to_device'. Returns the ICV values
+ depending on the device num and the variable hierarchy
+ (_DEV_42, _DEV, _ALL). If no ICV was initially configured for the given
+ device and thus no item with that device number is contained in
+ gomp_offload_icv_list, then a new item is created and added to the list. */
+
+static struct gomp_offload_icvs *
+get_gomp_offload_icvs (int dev_num)
+{
+ struct gomp_icv_list *dev
+ = gomp_get_initial_icv_item (GOMP_DEVICE_NUM_FOR_DEV);
+ struct gomp_icv_list *all
+ = gomp_get_initial_icv_item (GOMP_DEVICE_NUM_FOR_ALL);
+ struct gomp_icv_list *dev_x = gomp_get_initial_icv_item (dev_num);
+ struct gomp_offload_icv_list *offload_icvs
+ = gomp_get_offload_icv_item (dev_num);
+
+ if (offload_icvs != NULL)
+ return &offload_icvs->icvs;
+
+ struct gomp_offload_icv_list *new
+ = (struct gomp_offload_icv_list *) gomp_malloc (sizeof (struct gomp_offload_icv_list));
+
+ new->device_num = dev_num;
+ new->icvs.device_num = dev_num;
+ new->next = gomp_offload_icv_list;
+
+ if (dev_x != NULL && gomp_get_icv_flag (dev_x->flags, GOMP_ICV_NTEAMS))
+ new->icvs.nteams = dev_x->icvs.nteams_var;
+ else if (dev != NULL && gomp_get_icv_flag (dev->flags, GOMP_ICV_NTEAMS))
+ new->icvs.nteams = dev->icvs.nteams_var;
+ else if (all != NULL && gomp_get_icv_flag (all->flags, GOMP_ICV_NTEAMS))
+ new->icvs.nteams = all->icvs.nteams_var;
+ else
+ new->icvs.nteams = gomp_default_icv_values.nteams_var;
+
+ if (dev_x != NULL
+ && gomp_get_icv_flag (dev_x->flags, GOMP_ICV_TEAMS_THREAD_LIMIT))
+ new->icvs.teams_thread_limit = dev_x->icvs.teams_thread_limit_var;
+ else if (dev != NULL
+ && gomp_get_icv_flag (dev->flags, GOMP_ICV_TEAMS_THREAD_LIMIT))
+ new->icvs.teams_thread_limit = dev->icvs.teams_thread_limit_var;
+ else if (all != NULL
+ && gomp_get_icv_flag (all->flags, GOMP_ICV_TEAMS_THREAD_LIMIT))
+ new->icvs.teams_thread_limit = all->icvs.teams_thread_limit_var;
+ else
+ new->icvs.teams_thread_limit
+ = gomp_default_icv_values.teams_thread_limit_var;
+
+ if (dev_x != NULL
+ && gomp_get_icv_flag (dev_x->flags, GOMP_ICV_DEFAULT_DEVICE))
+ new->icvs.default_device = dev_x->icvs.default_device_var;
+ else if (dev != NULL
+ && gomp_get_icv_flag (dev->flags, GOMP_ICV_DEFAULT_DEVICE))
+ new->icvs.default_device = dev->icvs.default_device_var;
+ else if (all != NULL
+ && gomp_get_icv_flag (all->flags, GOMP_ICV_DEFAULT_DEVICE))
+ new->icvs.default_device = all->icvs.default_device_var;
+ else
+ new->icvs.default_device = gomp_default_icv_values.default_device_var;
+
+ gomp_offload_icv_list = new;
+ return &new->icvs;
+}
+
/* Load image pointed by TARGET_DATA to the device, specified by DEVICEP.
And insert to splay tree the mapping between addresses from HOST_TABLE and
from loaded target image. We rely in the host and device compiler
void **host_funcs_end = ((void ***) host_table)[1];
void **host_var_table = ((void ***) host_table)[2];
void **host_vars_end = ((void ***) host_table)[3];
+ void **host_ind_func_table = NULL;
+ void **host_ind_funcs_end = NULL;
- /* The func table contains only addresses, the var table contains addresses
- and corresponding sizes. */
+ if (GOMP_VERSION_SUPPORTS_INDIRECT_FUNCS (version))
+ {
+ host_ind_func_table = ((void ***) host_table)[4];
+ host_ind_funcs_end = ((void ***) host_table)[5];
+ }
+
+ /* The func and ind_func tables contain only addresses, the var table
+ contains addresses and corresponding sizes. */
int num_funcs = host_funcs_end - host_func_table;
int num_vars = (host_vars_end - host_var_table) / 2;
-
- /* Others currently is only 'device_num' */
- int num_others = 1;
+ int num_ind_funcs = (host_ind_funcs_end - host_ind_func_table);
/* Load image to device and get target addresses for the image. */
struct addr_pair *target_table = NULL;
+ uint64_t *rev_target_fn_table = NULL;
int i, num_target_entries;
+ /* With reverse offload, insert also target-host addresses. */
+ bool rev_lookup = omp_requires_mask & GOMP_REQUIRES_REVERSE_OFFLOAD;
+
num_target_entries
= devicep->load_image_func (devicep->target_id, version,
- target_data, &target_table);
+ target_data, &target_table,
+ rev_lookup ? &rev_target_fn_table : NULL,
+ num_ind_funcs
+ ? (uint64_t *) host_ind_func_table : NULL);
if (num_target_entries != num_funcs + num_vars
- /* Others (device_num) are included as trailing entries in pair list. */
- && num_target_entries != num_funcs + num_vars + num_others)
+ /* "+1" due to the additional ICV struct. */
+ && num_target_entries != num_funcs + num_vars + 1)
{
gomp_mutex_unlock (&devicep->lock);
if (is_register_lock)
/* Insert host-target address mapping into splay tree. */
struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt));
- tgt->array = gomp_malloc ((num_funcs + num_vars) * sizeof (*tgt->array));
+ /* "+1" due to the additional ICV struct. */
+ tgt->array = gomp_malloc ((num_funcs + num_vars + 1)
+ * sizeof (*tgt->array));
+ if (rev_target_fn_table)
+ tgt->rev_array = gomp_malloc (num_funcs * sizeof (*tgt->rev_array));
+ else
+ tgt->rev_array = NULL;
tgt->refcount = REFCOUNT_INFINITY;
tgt->tgt_start = 0;
tgt->tgt_end = 0;
tgt->list_count = 0;
tgt->device_descr = devicep;
splay_tree_node array = tgt->array;
+ reverse_splay_tree_node rev_array = tgt->rev_array;
for (i = 0; i < num_funcs; i++)
{
array->left = NULL;
array->right = NULL;
splay_tree_insert (&devicep->mem_map, array);
+ if (rev_target_fn_table)
+ {
+ reverse_splay_tree_key k2 = &rev_array->key;
+ k2->dev = rev_target_fn_table[i];
+ k2->k = k;
+ rev_array->left = NULL;
+ rev_array->right = NULL;
+ if (k2->dev != 0)
+ reverse_splay_tree_insert (&devicep->mem_map_rev, rev_array);
+ rev_array++;
+ }
array++;
}
array++;
}
- /* Last entry is for the on-device 'device_num' variable. Tolerate case
- where plugin does not return this entry. */
+ /* Last entry is for a ICVs variable.
+ Tolerate case where plugin does not return those entries. */
if (num_funcs + num_vars < num_target_entries)
{
- struct addr_pair *device_num_var = &target_table[num_funcs + num_vars];
- /* Start address will be non-zero for last entry if GOMP_DEVICE_NUM_VAR
- was found in this image. */
- if (device_num_var->start != 0)
+ struct addr_pair *var = &target_table[num_funcs + num_vars];
+
+ /* Start address will be non-zero for the ICVs variable if
+ the variable was found in this image. */
+ if (var->start != 0)
{
/* The index of the devicep within devices[] is regarded as its
'device number', which is different from the per-device type
devicep->target_id. */
- int device_num_val = (int) (devicep - &devices[0]);
- if (device_num_var->end - device_num_var->start != sizeof (int))
+ int dev_num = (int) (devicep - &devices[0]);
+ struct gomp_offload_icvs *icvs = get_gomp_offload_icvs (dev_num);
+ size_t var_size = var->end - var->start;
+ if (var_size != sizeof (struct gomp_offload_icvs))
{
gomp_mutex_unlock (&devicep->lock);
if (is_register_lock)
gomp_mutex_unlock (®ister_lock);
- gomp_fatal ("offload plugin managed 'device_num' not of expected "
+ gomp_fatal ("offload plugin managed 'icv struct' not of expected "
"format");
}
-
- /* Copy device_num value to place on device memory, hereby actually
- designating its device number into effect. */
- gomp_copy_host2dev (devicep, NULL, (void *) device_num_var->start,
- &device_num_val, sizeof (int), false, NULL);
+ /* Copy the ICVs variable to place on device memory, hereby
+ actually designating its device number into effect. */
+ gomp_copy_host2dev (devicep, NULL, (void *) var->start, icvs,
+ var_size, false, NULL);
+ splay_tree_key k = &array->key;
+ k->host_start = (uintptr_t) icvs;
+ k->host_end =
+ k->host_start + (size_mask & sizeof (struct gomp_offload_icvs));
+ k->tgt = tgt;
+ k->tgt_offset = var->start;
+ k->refcount = REFCOUNT_INFINITY;
+ k->dynamic_refcount = 0;
+ k->aux = NULL;
+ array->left = NULL;
+ array->right = NULL;
+ splay_tree_insert (&devicep->mem_map, array);
+ array++;
}
}
gomp_mutex_unlock (&devicep->lock);
gomp_fatal ("image unload fail");
}
+ if (devicep->mem_map_rev.root)
+ {
+ /* Free reverse offload splay tree + data; 'tgt->rev_array' is the only
+ real allocation. */
+ assert (node && node->tgt && node->tgt->rev_array);
+ assert (devicep->mem_map_rev.root->key.k->tgt == node->tgt);
+ free (node->tgt->rev_array);
+ devicep->mem_map_rev.root = NULL;
+ }
/* Remove mappings from splay tree. */
int i;
}
}
+static void
+gomp_requires_to_name (char *buf, size_t size, int requires_mask)
+{
+ char *end = buf + size, *p = buf;
+ if (requires_mask & GOMP_REQUIRES_UNIFIED_ADDRESS)
+ p += snprintf (p, end - p, "unified_address");
+ if (requires_mask & GOMP_REQUIRES_UNIFIED_SHARED_MEMORY)
+ p += snprintf (p, end - p, "%sunified_shared_memory",
+ (p == buf ? "" : ", "));
+ if (requires_mask & GOMP_REQUIRES_REVERSE_OFFLOAD)
+ p += snprintf (p, end - p, "%sreverse_offload",
+ (p == buf ? "" : ", "));
+}
+
/* This function should be called from every offload image while loading.
It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of
- the target, and TARGET_DATA needed by target plugin. */
+ the target, and DATA. */
void
GOMP_offload_register_ver (unsigned version, const void *host_table,
- int target_type, const void *target_data)
+ int target_type, const void *data)
{
int i;
if (GOMP_VERSION_LIB (version) > GOMP_VERSION)
gomp_fatal ("Library too old for offload (version %u < %u)",
GOMP_VERSION, GOMP_VERSION_LIB (version));
-
+
+ int omp_req;
+ const void *target_data;
+ if (GOMP_VERSION_LIB (version) > 1)
+ {
+ omp_req = (int) (size_t) ((void **) data)[0];
+ target_data = &((void **) data)[1];
+ }
+ else
+ {
+ omp_req = 0;
+ target_data = data;
+ }
+
gomp_mutex_lock (®ister_lock);
+ if (omp_req && omp_requires_mask && omp_requires_mask != omp_req)
+ {
+ char buf1[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ char buf2[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ gomp_requires_to_name (buf2, sizeof (buf2),
+ omp_req != GOMP_REQUIRES_TARGET_USED
+ ? omp_req : omp_requires_mask);
+ if (omp_req != GOMP_REQUIRES_TARGET_USED
+ && omp_requires_mask != GOMP_REQUIRES_TARGET_USED)
+ {
+ gomp_requires_to_name (buf1, sizeof (buf1), omp_requires_mask);
+ gomp_fatal ("OpenMP 'requires' directive with non-identical clauses "
+ "in multiple compilation units: '%s' vs. '%s'",
+ buf1, buf2);
+ }
+ else
+ gomp_fatal ("OpenMP 'requires' directive with '%s' specified only in "
+ "some compilation units", buf2);
+ }
+ omp_requires_mask = omp_req;
+
/* Load image to all initialized devices. */
for (i = 0; i < num_devices; i++)
{
gomp_mutex_unlock (®ister_lock);
}
+/* Legacy entry point. */
+
void
GOMP_offload_register (const void *host_table, int target_type,
const void *target_data)
/* This function should be called from every offload image while unloading.
It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of
- the target, and TARGET_DATA needed by target plugin. */
+ the target, and DATA. */
void
GOMP_offload_unregister_ver (unsigned version, const void *host_table,
- int target_type, const void *target_data)
+ int target_type, const void *data)
{
int i;
+ if (GOMP_VERSION_LIB (version) > GOMP_VERSION)
+ gomp_fatal ("Library too old for offload (version %u < %u)",
+ GOMP_VERSION, GOMP_VERSION_LIB (version));
+
+ const void *target_data;
+ if (GOMP_VERSION_LIB (version) > 1)
+ target_data = &((void **) data)[1];
+ else
+ target_data = data;
+
gomp_mutex_lock (®ister_lock);
/* Unload image from all initialized devices. */
gomp_mutex_unlock (®ister_lock);
}
+/* Legacy entry point. */
+
void
GOMP_offload_unregister (const void *host_table, int target_type,
const void *target_data)
if (devicep->state == GOMP_DEVICE_INITIALIZED)
{
unsigned i;
-
+
/* Unload from device all images registered at the moment. */
for (i = 0; i < num_offload_images; i++)
{
memcpy (tgt + tgt_size, hostaddrs[i], sizes[i]);
hostaddrs[i] = tgt + tgt_size;
tgt_size = tgt_size + sizes[i];
+ if (i + 1 < mapnum && (kinds[i+1] & 0xff) == GOMP_MAP_ATTACH)
+ {
+ *(*(uintptr_t**) hostaddrs[i+1] + sizes[i+1]) = (uintptr_t) hostaddrs[i];
+ ++i;
+ }
}
}
size_t mapnum, void **hostaddrs, size_t *sizes,
unsigned char *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
void *fn_addr;
if (devicep == NULL
return flags;
}
+static void
+gomp_copy_back_icvs (struct gomp_device_descr *devicep, int device)
+{
+ struct gomp_offload_icv_list *item = gomp_get_offload_icv_item (device);
+ if (item == NULL)
+ return;
+
+ void *host_ptr = &item->icvs;
+ void *dev_ptr = omp_get_mapped_ptr (host_ptr, device);
+ if (dev_ptr != NULL)
+ gomp_copy_dev2host (devicep, NULL, host_ptr, dev_ptr,
+ sizeof (struct gomp_offload_icvs));
+}
+
/* Like GOMP_target, but KINDS is 16-bit, UNUSED is no longer present,
and several arguments have been added:
FLAGS is a bitmask, see GOMP_TARGET_FLAG_* in gomp-constants.h.
void **hostaddrs, size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend, void **args)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
size_t tgt_align = 0, tgt_size = 0;
bool fpc_done = false;
+ /* Obtain the original TEAMS and THREADS values from ARGS. */
+ intptr_t orig_teams = 1, orig_threads = 0;
+ size_t num_args = 0, len = 1, teams_len = 1, threads_len = 1;
+ void **tmpargs = args;
+ while (*tmpargs)
+ {
+ intptr_t id = (intptr_t) *tmpargs++, val;
+ if (id & GOMP_TARGET_ARG_SUBSEQUENT_PARAM)
+ {
+ val = (intptr_t) *tmpargs++;
+ len = 2;
+ }
+ else
+ {
+ val = id >> GOMP_TARGET_ARG_VALUE_SHIFT;
+ len = 1;
+ }
+ num_args += len;
+ if ((id & GOMP_TARGET_ARG_DEVICE_MASK) != GOMP_TARGET_ARG_DEVICE_ALL)
+ continue;
+ val = val > INT_MAX ? INT_MAX : val;
+ if ((id & GOMP_TARGET_ARG_ID_MASK) == GOMP_TARGET_ARG_NUM_TEAMS)
+ {
+ orig_teams = val;
+ teams_len = len;
+ }
+ else if ((id & GOMP_TARGET_ARG_ID_MASK) == GOMP_TARGET_ARG_THREAD_LIMIT)
+ {
+ orig_threads = val;
+ threads_len = len;
+ }
+ }
+
+ intptr_t new_teams = orig_teams, new_threads = orig_threads;
+ /* ORIG_TEAMS == -2: No explicit teams construct specified. Set to 1.
+ ORIG_TEAMS == -1: TEAMS construct with NUM_TEAMS clause specified, but the
+ value could not be determined. No change.
+ ORIG_TEAMS == 0: TEAMS construct without NUM_TEAMS clause.
+ Set device-specific value.
+ ORIG_TEAMS > 0: Value was already set through e.g. NUM_TEAMS clause.
+ No change. */
+ if (orig_teams == -2)
+ new_teams = 1;
+ else if (orig_teams == 0)
+ {
+ struct gomp_offload_icv_list *item = gomp_get_offload_icv_item (device);
+ if (item != NULL)
+ new_teams = item->icvs.nteams;
+ }
+ /* The device-specific teams-thread-limit is only set if (a) an explicit TEAMS
+ region exists, i.e. ORIG_TEAMS > -2, and (b) THREADS was not already set by
+ e.g. a THREAD_LIMIT clause. */
+ if (orig_teams > -2 && orig_threads == 0)
+ {
+ struct gomp_offload_icv_list *item = gomp_get_offload_icv_item (device);
+ if (item != NULL)
+ new_threads = item->icvs.teams_thread_limit;
+ }
+
+ /* Copy and change the arguments list only if TEAMS or THREADS need to be
+ updated. */
+ void **new_args = args;
+ if (orig_teams != new_teams || orig_threads != new_threads)
+ {
+ size_t tms_len = (orig_teams == new_teams
+ ? teams_len
+ : (new_teams > -(1 << 15) && new_teams < (1 << 15)
+ ? 1 : 2));
+ size_t ths_len = (orig_threads == new_threads
+ ? threads_len
+ : (new_threads > -(1 << 15) && new_threads < (1 << 15)
+ ? 1 : 2));
+ /* One additional item after the last arg must be NULL. */
+ size_t new_args_cnt = num_args - teams_len - threads_len + tms_len
+ + ths_len + 1;
+ new_args = (void **) gomp_alloca (new_args_cnt * sizeof (void*));
+
+ tmpargs = args;
+ void **tmp_new_args = new_args;
+ /* Copy all args except TEAMS and THREADS. TEAMS and THREADS are copied
+ too if they have not been changed and skipped otherwise. */
+ while (*tmpargs)
+ {
+ intptr_t id = (intptr_t) *tmpargs;
+ if (((id & GOMP_TARGET_ARG_ID_MASK) == GOMP_TARGET_ARG_NUM_TEAMS
+ && orig_teams != new_teams)
+ || ((id & GOMP_TARGET_ARG_ID_MASK) == GOMP_TARGET_ARG_THREAD_LIMIT
+ && orig_threads != new_threads))
+ {
+ tmpargs++;
+ if (id & GOMP_TARGET_ARG_SUBSEQUENT_PARAM)
+ tmpargs++;
+ }
+ else
+ {
+ *tmp_new_args++ = *tmpargs++;
+ if (id & GOMP_TARGET_ARG_SUBSEQUENT_PARAM)
+ *tmp_new_args++ = *tmpargs++;
+ }
+ }
+
+ /* Add the new TEAMS arg to the new args list if it has been changed. */
+ if (orig_teams != new_teams)
+ {
+ intptr_t new_val = new_teams;
+ if (tms_len == 1)
+ {
+ new_val = (new_val << GOMP_TARGET_ARG_VALUE_SHIFT)
+ | GOMP_TARGET_ARG_NUM_TEAMS;
+ *tmp_new_args++ = (void *) new_val;
+ }
+ else
+ {
+ *tmp_new_args++ = (void *) (GOMP_TARGET_ARG_SUBSEQUENT_PARAM
+ | GOMP_TARGET_ARG_NUM_TEAMS);
+ *tmp_new_args++ = (void *) new_val;
+ }
+ }
+
+ /* Add the new THREADS arg to the new args list if it has been changed. */
+ if (orig_threads != new_threads)
+ {
+ intptr_t new_val = new_threads;
+ if (ths_len == 1)
+ {
+ new_val = (new_val << GOMP_TARGET_ARG_VALUE_SHIFT)
+ | GOMP_TARGET_ARG_THREAD_LIMIT;
+ *tmp_new_args++ = (void *) new_val;
+ }
+ else
+ {
+ *tmp_new_args++ = (void *) (GOMP_TARGET_ARG_SUBSEQUENT_PARAM
+ | GOMP_TARGET_ARG_THREAD_LIMIT);
+ *tmp_new_args++ = (void *) new_val;
+ }
+ }
+
+ *tmp_new_args = NULL;
+ }
+
flags = clear_unsupported_flags (devicep, flags);
if (flags & GOMP_TARGET_FLAG_NOWAIT)
{
struct gomp_team *team = gomp_new_team (1);
struct gomp_task *task = thr->task;
+ struct gomp_task **implicit_task = &task;
struct gomp_task_icv *icv = task ? &task->icv : &gomp_global_icv;
team->prev_ts = thr->ts;
thr->ts.team = team;
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
gomp_init_task (thr->task, NULL, icv);
- if (task)
+ while (*implicit_task
+ && (*implicit_task)->kind != GOMP_TASK_IMPLICIT)
+ implicit_task = &(*implicit_task)->parent;
+ if (*implicit_task)
{
- thr->task = task;
+ thr->task = *implicit_task;
gomp_end_task ();
- free (task);
+ free (*implicit_task);
thr->task = &team->implicit_task[0];
}
else
pthread_setspecific (gomp_thread_destructor, thr);
+ if (implicit_task != &task)
+ {
+ *implicit_task = thr->task;
+ thr->task = task;
+ }
}
if (thr->ts.team
&& !thr->task->final_task)
{
gomp_create_target_task (devicep, fn, mapnum, hostaddrs,
- sizes, kinds, flags, depend, args,
+ sizes, kinds, flags, depend, new_args,
GOMP_TARGET_TASK_BEFORE_MAP);
return;
}
tgt_align, tgt_size);
}
}
- gomp_target_fallback (fn, hostaddrs, devicep, args);
+ gomp_target_fallback (fn, hostaddrs, devicep, new_args);
return;
}
}
devicep->run_func (devicep->target_id, fn_addr,
tgt_vars ? (void *) tgt_vars->tgt_start : hostaddrs,
- args);
+ new_args);
if (tgt_vars)
{
htab_clear (refcount_set);
}
if (refcount_set)
htab_free (refcount_set);
+
+ /* Copy back ICVs from device to host.
+ HOST_PTR is expected to exist since it was added in
+ gomp_load_image_to_device if not already available. */
+ gomp_copy_back_icvs (devicep, device);
+
}
-/* Host fallback for GOMP_target_data{,_ext} routines. */
-static void
-gomp_target_data_fallback (struct gomp_device_descr *devicep)
+/* Reverse lookup (device addr -> host addr) for reverse offload. We avoid
+ keeping track of all variable handling - assuming that reverse offload occurs
+ ony very rarely. Downside is that the reverse search is slow. */
+
+struct gomp_splay_tree_rev_lookup_data {
+ uintptr_t tgt_start;
+ uintptr_t tgt_end;
+ splay_tree_key key;
+};
+
+static int
+gomp_splay_tree_rev_lookup (splay_tree_key key, void *d)
{
- struct gomp_task_icv *icv = gomp_icv (false);
+ struct gomp_splay_tree_rev_lookup_data *data;
+ data = (struct gomp_splay_tree_rev_lookup_data *)d;
+ uintptr_t tgt_start = key->tgt->tgt_start + key->tgt_offset;
- if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
- && devicep != NULL)
- gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, but device cannot "
- "be used for offloading");
+ if (tgt_start > data->tgt_start || key->tgt->list_count == 0)
+ return 0;
- if (icv->target_data)
+ size_t j;
+ for (j = 0; j < key->tgt->list_count; j++)
+ if (key->tgt->list[j].key == key)
+ break;
+ assert (j < key->tgt->list_count);
+ uintptr_t tgt_end = tgt_start + key->tgt->list[j].length;
+
+ if ((tgt_start == data->tgt_start && tgt_end == data->tgt_end)
+ || (tgt_end > data->tgt_start && tgt_start < data->tgt_end))
{
- /* Even when doing a host fallback, if there are any active
- #pragma omp target data constructs, need to remember the
- new #pragma omp target data, otherwise GOMP_target_end_data
- would get out of sync. */
- struct target_mem_desc *tgt
- = gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, false,
- NULL, GOMP_MAP_VARS_DATA);
- tgt->prev = icv->target_data;
- icv->target_data = tgt;
+ data->key = key;
+ return 1;
}
+ return 0;
}
-void
-GOMP_target_data (int device, const void *unused, size_t mapnum,
- void **hostaddrs, size_t *sizes, unsigned char *kinds)
+static inline splay_tree_key
+gomp_map_rev_lookup (splay_tree mem_map, uint64_t tgt_start, uint64_t tgt_end,
+ bool zero_len)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_splay_tree_rev_lookup_data data;
+ data.key = NULL;
+ data.tgt_start = tgt_start;
+ data.tgt_end = tgt_end;
- if (devicep == NULL
- || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM))
- return gomp_target_data_fallback (devicep);
+ if (tgt_start != tgt_end)
+ {
+ splay_tree_foreach_lazy (mem_map, gomp_splay_tree_rev_lookup, &data);
+ return data.key;
+ }
- struct target_mem_desc *tgt
+ data.tgt_end++;
+ splay_tree_foreach_lazy (mem_map, gomp_splay_tree_rev_lookup, &data);
+ if (data.key != NULL || zero_len)
+ return data.key;
+ data.tgt_end--;
+
+ data.tgt_start--;
+ splay_tree_foreach_lazy (mem_map, gomp_splay_tree_rev_lookup, &data);
+ return data.key;
+}
+
+struct cpy_data
+{
+ uint64_t devaddr;
+ bool present, aligned;
+};
+
+
+/* Search just mapped reverse-offload data; returns index if found,
+ otherwise >= n. */
+
+static inline int
+gomp_map_cdata_lookup_int (struct cpy_data *d, uint64_t *devaddrs,
+ unsigned short *kinds, uint64_t *sizes, size_t n,
+ uint64_t tgt_start, uint64_t tgt_end)
+{
+ const bool short_mapkind = true;
+ const int typemask = short_mapkind ? 0xff : 0x7;
+ size_t i;
+ for (i = 0; i < n; i++)
+ {
+ bool is_struct = ((get_kind (short_mapkind, kinds, i) & typemask)
+ == GOMP_MAP_STRUCT);
+ uint64_t dev_end;
+ if (!is_struct)
+ dev_end = d[i].devaddr + sizes[i];
+ else
+ {
+ if (i + sizes[i] < n)
+ dev_end = d[i + sizes[i]].devaddr + sizes[i + sizes[i]];
+ else
+ dev_end = devaddrs[i + sizes[i]] + sizes[i + sizes[i]];
+ }
+ if ((d[i].devaddr == tgt_start && dev_end == tgt_end)
+ || (dev_end > tgt_start && d[i].devaddr < tgt_end))
+ break;
+ if (is_struct)
+ i += sizes[i];
+ }
+ return i;
+}
+
+static inline int
+gomp_map_cdata_lookup (struct cpy_data *d, uint64_t *devaddrs,
+ unsigned short *kinds, uint64_t *sizes,
+ size_t n, uint64_t tgt_start, uint64_t tgt_end,
+ bool zero_len)
+{
+ size_t i;
+ if (tgt_start != tgt_end)
+ return gomp_map_cdata_lookup_int (d, devaddrs, kinds, sizes, n,
+ tgt_start, tgt_end);
+ tgt_end++;
+ i = gomp_map_cdata_lookup_int (d, devaddrs, kinds, sizes, n,
+ tgt_start, tgt_end);
+ if (i < n || zero_len)
+ return i;
+ tgt_end--;
+
+ tgt_start--;
+ return gomp_map_cdata_lookup_int (d, devaddrs, kinds, sizes, n,
+ tgt_start, tgt_end);
+}
+
+/* Handle reverse offload. This is called by the device plugins for a
+ reverse offload; it is not called if the outer target runs on the host.
+ The mapping is simplified device-affecting constructs (except for target
+ with device(ancestor:1)) must not be encountered; in particular not
+ target (enter/exit) data. */
+
+void
+gomp_target_rev (uint64_t fn_ptr, uint64_t mapnum, uint64_t devaddrs_ptr,
+ uint64_t sizes_ptr, uint64_t kinds_ptr, int dev_num,
+ struct goacc_asyncqueue *aq)
+{
+ /* Return early if there is no offload code. */
+ if (sizeof (OFFLOAD_PLUGINS) == sizeof (""))
+ return;
+ /* Currently, this fails because of calculate_firstprivate_requirements
+ below; it could be fixed but additional code needs to be updated to
+ handle 32bit hosts - thus, it is not worthwhile. */
+ if (sizeof (void *) != sizeof (uint64_t))
+ gomp_fatal ("Reverse offload of 32bit hosts not supported.");
+
+ struct cpy_data *cdata = NULL;
+ uint64_t *devaddrs;
+ uint64_t *sizes;
+ unsigned short *kinds;
+ const bool short_mapkind = true;
+ const int typemask = short_mapkind ? 0xff : 0x7;
+ struct gomp_device_descr *devicep = resolve_device (dev_num, false);
+
+ reverse_splay_tree_key n;
+ struct reverse_splay_tree_key_s k;
+ k.dev = fn_ptr;
+
+ gomp_mutex_lock (&devicep->lock);
+ n = gomp_map_lookup_rev (&devicep->mem_map_rev, &k);
+ gomp_mutex_unlock (&devicep->lock);
+
+ if (n == NULL)
+ gomp_fatal ("Cannot find reverse-offload function");
+ void (*host_fn)() = (void (*)()) n->k->host_start;
+
+ if ((devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) || mapnum == 0)
+ {
+ devaddrs = (uint64_t *) (uintptr_t) devaddrs_ptr;
+ sizes = (uint64_t *) (uintptr_t) sizes_ptr;
+ kinds = (unsigned short *) (uintptr_t) kinds_ptr;
+ }
+ else
+ {
+ devaddrs = (uint64_t *) gomp_malloc (mapnum * sizeof (uint64_t));
+ sizes = (uint64_t *) gomp_malloc (mapnum * sizeof (uint64_t));
+ kinds = (unsigned short *) gomp_malloc (mapnum * sizeof (unsigned short));
+ gomp_copy_dev2host (devicep, aq, devaddrs,
+ (const void *) (uintptr_t) devaddrs_ptr,
+ mapnum * sizeof (uint64_t));
+ gomp_copy_dev2host (devicep, aq, sizes,
+ (const void *) (uintptr_t) sizes_ptr,
+ mapnum * sizeof (uint64_t));
+ gomp_copy_dev2host (devicep, aq, kinds,
+ (const void *) (uintptr_t) kinds_ptr,
+ mapnum * sizeof (unsigned short));
+ if (aq && !devicep->openacc.async.synchronize_func (aq))
+ exit (EXIT_FAILURE);
+ }
+
+ size_t tgt_align = 0, tgt_size = 0;
+
+ /* If actually executed on 32bit systems, the casts lead to wrong code;
+ but 32bit with offloading is not supported; see top of this function. */
+ calculate_firstprivate_requirements (mapnum, (void *) (uintptr_t) sizes,
+ (void *) (uintptr_t) kinds,
+ &tgt_align, &tgt_size);
+
+ if (tgt_align)
+ {
+ char *tgt = gomp_alloca (tgt_size + tgt_align - 1);
+ uintptr_t al = (uintptr_t) tgt & (tgt_align - 1);
+ if (al)
+ tgt += tgt_align - al;
+ tgt_size = 0;
+ for (uint64_t i = 0; i < mapnum; i++)
+ if (get_kind (short_mapkind, kinds, i) == GOMP_MAP_FIRSTPRIVATE
+ && devaddrs[i] != 0)
+ {
+ size_t align = (size_t) 1 << (kinds[i] >> 8);
+ tgt_size = (tgt_size + align - 1) & ~(align - 1);
+ if (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ memcpy (tgt + tgt_size, (void *) (uintptr_t) devaddrs[i],
+ (size_t) sizes[i]);
+ else
+ {
+ gomp_copy_dev2host (devicep, aq, tgt + tgt_size,
+ (void *) (uintptr_t) devaddrs[i],
+ (size_t) sizes[i]);
+ if (aq && !devicep->openacc.async.synchronize_func (aq))
+ exit (EXIT_FAILURE);
+ }
+ devaddrs[i] = (uint64_t) (uintptr_t) tgt + tgt_size;
+ tgt_size = tgt_size + sizes[i];
+ if ((devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ && i + 1 < mapnum
+ && ((get_kind (short_mapkind, kinds, i) & typemask)
+ == GOMP_MAP_ATTACH))
+ {
+ *(uint64_t*) (uintptr_t) (devaddrs[i+1] + sizes[i+1])
+ = (uint64_t) devaddrs[i];
+ ++i;
+ }
+ }
+ }
+
+ if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) && mapnum > 0)
+ {
+ size_t j, struct_cpy = 0;
+ splay_tree_key n2;
+ cdata = gomp_alloca (sizeof (*cdata) * mapnum);
+ memset (cdata, '\0', sizeof (*cdata) * mapnum);
+ gomp_mutex_lock (&devicep->lock);
+ for (uint64_t i = 0; i < mapnum; i++)
+ {
+ if (devaddrs[i] == 0)
+ continue;
+ n = NULL;
+ int kind = get_kind (short_mapkind, kinds, i) & typemask;
+ switch (kind)
+ {
+ case GOMP_MAP_FIRSTPRIVATE:
+ case GOMP_MAP_FIRSTPRIVATE_INT:
+ continue;
+
+ case GOMP_MAP_DELETE:
+ case GOMP_MAP_RELEASE:
+ case GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION:
+ /* Assume it is present; look it up - but ignore unless the
+ present clause is there. */
+ case GOMP_MAP_ALLOC:
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_FORCE_ALLOC:
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_TO:
+ case GOMP_MAP_TOFROM:
+ case GOMP_MAP_FORCE_TO:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_ALWAYS_TO:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_FORCE_PRESENT:
+ case GOMP_MAP_ALWAYS_PRESENT_FROM:
+ case GOMP_MAP_ALWAYS_PRESENT_TO:
+ case GOMP_MAP_ALWAYS_PRESENT_TOFROM:
+ case GOMP_MAP_ZERO_LEN_ARRAY_SECTION:
+ cdata[i].devaddr = devaddrs[i];
+ bool zero_len = (kind == GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION
+ || kind == GOMP_MAP_ZERO_LEN_ARRAY_SECTION);
+ j = gomp_map_cdata_lookup (cdata, devaddrs, kinds, sizes, i,
+ devaddrs[i],
+ devaddrs[i] + sizes[i], zero_len);
+ if (j < i)
+ {
+ n2 = NULL;
+ cdata[i].present = true;
+ devaddrs[i] = devaddrs[j] + devaddrs[i] - cdata[j].devaddr;
+ }
+ else
+ {
+ n2 = gomp_map_rev_lookup (&devicep->mem_map,
+ devaddrs[i],
+ devaddrs[i] + sizes[i], zero_len);
+ cdata[i].present = n2 != NULL;
+ }
+ if (!cdata[i].present && GOMP_MAP_PRESENT_P (kind))
+ {
+ gomp_mutex_unlock (&devicep->lock);
+#ifdef HAVE_INTTYPES_H
+ gomp_fatal ("present clause: no corresponding data on "
+ "parent device at %p with size %"PRIu64,
+ (void *) (uintptr_t) devaddrs[i],
+ (uint64_t) sizes[i]);
+#else
+ gomp_fatal ("present clause: no corresponding data on "
+ "parent device at %p with size %lu",
+ (void *) (uintptr_t) devaddrs[i],
+ (unsigned long) sizes[i]);
+#endif
+ break;
+ }
+ else if (!cdata[i].present
+ && kind != GOMP_MAP_DELETE
+ && kind != GOMP_MAP_RELEASE
+ && kind != GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION)
+ {
+ cdata[i].aligned = true;
+ size_t align = (size_t) 1 << (kinds[i] >> 8);
+ devaddrs[i]
+ = (uint64_t) (uintptr_t) gomp_aligned_alloc (align,
+ sizes[i]);
+ }
+ else if (n2 != NULL)
+ devaddrs[i] = (n2->host_start + cdata[i].devaddr
+ - (n2->tgt->tgt_start + n2->tgt_offset));
+ if (((!cdata[i].present || struct_cpy)
+ && (kind == GOMP_MAP_TO || kind == GOMP_MAP_TOFROM))
+ || kind == GOMP_MAP_FORCE_TO
+ || kind == GOMP_MAP_FORCE_TOFROM
+ || GOMP_MAP_ALWAYS_TO_P (kind))
+ {
+ gomp_copy_dev2host (devicep, aq,
+ (void *) (uintptr_t) devaddrs[i],
+ (void *) (uintptr_t) cdata[i].devaddr,
+ sizes[i]);
+ if (aq && !devicep->openacc.async.synchronize_func (aq))
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ exit (EXIT_FAILURE);
+ }
+ }
+ if (struct_cpy)
+ struct_cpy--;
+ break;
+ case GOMP_MAP_ATTACH:
+ case GOMP_MAP_POINTER:
+ case GOMP_MAP_ALWAYS_POINTER:
+ n2 = gomp_map_rev_lookup (&devicep->mem_map,
+ devaddrs[i] + sizes[i],
+ devaddrs[i] + sizes[i]
+ + sizeof (void*), false);
+ cdata[i].present = n2 != NULL;
+ cdata[i].devaddr = devaddrs[i];
+ if (n2)
+ devaddrs[i] = (n2->host_start + cdata[i].devaddr
+ - (n2->tgt->tgt_start + n2->tgt_offset));
+ else
+ {
+ j = gomp_map_cdata_lookup (cdata, devaddrs, kinds, sizes, i,
+ devaddrs[i] + sizes[i],
+ devaddrs[i] + sizes[i]
+ + sizeof (void*), false);
+ if (j < i)
+ {
+ cdata[i].present = true;
+ devaddrs[i] = (devaddrs[j] + devaddrs[i]
+ - cdata[j].devaddr);
+ }
+ }
+ if (!cdata[i].present)
+ devaddrs[i] = (uintptr_t) gomp_malloc (sizeof (void*));
+ /* Assume that when present, the pointer is already correct. */
+ if (!n2)
+ *(uint64_t *) (uintptr_t) (devaddrs[i] + sizes[i])
+ = devaddrs[i-1];
+ break;
+ case GOMP_MAP_TO_PSET:
+ /* Assume that when present, the pointers are fine and no 'to:'
+ is required. */
+ n2 = gomp_map_rev_lookup (&devicep->mem_map,
+ devaddrs[i], devaddrs[i] + sizes[i],
+ false);
+ cdata[i].present = n2 != NULL;
+ cdata[i].devaddr = devaddrs[i];
+ if (n2)
+ devaddrs[i] = (n2->host_start + cdata[i].devaddr
+ - (n2->tgt->tgt_start + n2->tgt_offset));
+ else
+ {
+ j = gomp_map_cdata_lookup (cdata, devaddrs, kinds, sizes, i,
+ devaddrs[i],
+ devaddrs[i] + sizes[i], false);
+ if (j < i)
+ {
+ cdata[i].present = true;
+ devaddrs[i] = (devaddrs[j] + devaddrs[i]
+ - cdata[j].devaddr);
+ }
+ }
+ if (!cdata[i].present)
+ {
+ cdata[i].aligned = true;
+ size_t align = (size_t) 1 << (kinds[i] >> 8);
+ devaddrs[i]
+ = (uint64_t) (uintptr_t) gomp_aligned_alloc (align,
+ sizes[i]);
+ gomp_copy_dev2host (devicep, aq,
+ (void *) (uintptr_t) devaddrs[i],
+ (void *) (uintptr_t) cdata[i].devaddr,
+ sizes[i]);
+ if (aq && !devicep->openacc.async.synchronize_func (aq))
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ exit (EXIT_FAILURE);
+ }
+ }
+ for (j = i + 1; j < mapnum; j++)
+ {
+ kind = get_kind (short_mapkind, kinds, j) & typemask;
+ if (!GOMP_MAP_ALWAYS_POINTER_P (kind)
+ && !GOMP_MAP_POINTER_P (kind))
+ break;
+ if (devaddrs[j] < devaddrs[i])
+ break;
+ if (cdata[i].present)
+ continue;
+ if (devaddrs[j] == 0)
+ {
+ *(uint64_t *) (uintptr_t) (devaddrs[i] + sizes[j]) = 0;
+ continue;
+ }
+ int k;
+ n2 = NULL;
+ /* Dereference devaddrs[j] to get the device addr. */
+ assert (devaddrs[j] - sizes[j] == cdata[i].devaddr);
+ devaddrs[j] = *(uint64_t *) (uintptr_t) (devaddrs[i]
+ + sizes[j]);
+ cdata[j].present = true;
+ cdata[j].devaddr = devaddrs[j];
+ if (devaddrs[j] == 0)
+ continue;
+ k = gomp_map_cdata_lookup (cdata, devaddrs, kinds, sizes, j,
+ devaddrs[j],
+ devaddrs[j] + sizeof (void*),
+ false);
+ if (k < j)
+ devaddrs[j] = (devaddrs[k] + devaddrs[j]
+ - cdata[k].devaddr);
+ else
+ {
+ n2 = gomp_map_rev_lookup (&devicep->mem_map,
+ devaddrs[j],
+ devaddrs[j] + sizeof (void*),
+ false);
+ if (n2 == NULL)
+ {
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("Pointer target wasn't mapped");
+ }
+ devaddrs[j] = (n2->host_start + cdata[j].devaddr
+ - (n2->tgt->tgt_start + n2->tgt_offset));
+ }
+ *(void **) (uintptr_t) (devaddrs[i] + sizes[j])
+ = (void *) (uintptr_t) devaddrs[j];
+ }
+ i = j -1;
+ break;
+ case GOMP_MAP_STRUCT:
+ n2 = gomp_map_rev_lookup (&devicep->mem_map, devaddrs[i+1],
+ devaddrs[i + sizes[i]]
+ + sizes[i + sizes[i]], false);
+ cdata[i].present = n2 != NULL;
+ cdata[i].devaddr = devaddrs[i];
+ struct_cpy = cdata[i].present ? 0 : sizes[i];
+ if (!n2)
+ {
+ size_t sz = (size_t) (devaddrs[i + sizes[i]]
+ - devaddrs[i+1]
+ + sizes[i + sizes[i]]);
+ size_t align = (size_t) 1 << (kinds[i] >> 8);
+ cdata[i].aligned = true;
+ devaddrs[i] = (uintptr_t) gomp_aligned_alloc (align, sz);
+ devaddrs[i] -= devaddrs[i+1] - cdata[i].devaddr;
+ }
+ else
+ devaddrs[i] = (n2->host_start + cdata[i].devaddr
+ - (n2->tgt->tgt_start + n2->tgt_offset));
+ break;
+ default:
+ gomp_mutex_unlock (&devicep->lock);
+ gomp_fatal ("gomp_target_rev unhandled kind 0x%.4x", kinds[i]);
+ }
+ }
+ gomp_mutex_unlock (&devicep->lock);
+ }
+
+ host_fn (devaddrs);
+
+ if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM) && mapnum > 0)
+ {
+ uint64_t struct_cpy = 0;
+ bool clean_struct = false;
+ for (uint64_t i = 0; i < mapnum; i++)
+ {
+ if (cdata[i].devaddr == 0)
+ continue;
+ int kind = get_kind (short_mapkind, kinds, i) & typemask;
+ bool copy = !cdata[i].present || struct_cpy;
+ switch (kind)
+ {
+ case GOMP_MAP_FORCE_FROM:
+ case GOMP_MAP_FORCE_TOFROM:
+ case GOMP_MAP_ALWAYS_FROM:
+ case GOMP_MAP_ALWAYS_TOFROM:
+ case GOMP_MAP_ALWAYS_PRESENT_FROM:
+ case GOMP_MAP_ALWAYS_PRESENT_TOFROM:
+ copy = true;
+ /* FALLTHRU */
+ case GOMP_MAP_FROM:
+ case GOMP_MAP_TOFROM:
+ if (copy)
+ {
+ gomp_copy_host2dev (devicep, aq,
+ (void *) (uintptr_t) cdata[i].devaddr,
+ (void *) (uintptr_t) devaddrs[i],
+ sizes[i], false, NULL);
+ if (aq && !devicep->openacc.async.synchronize_func (aq))
+ exit (EXIT_FAILURE);
+ }
+ default:
+ break;
+ }
+ if (struct_cpy)
+ {
+ struct_cpy--;
+ continue;
+ }
+ if (kind == GOMP_MAP_STRUCT && !cdata[i].present)
+ {
+ clean_struct = true;
+ struct_cpy = sizes[i];
+ }
+ else if (!cdata[i].present && cdata[i].aligned)
+ gomp_aligned_free ((void *) (uintptr_t) devaddrs[i]);
+ else if (!cdata[i].present)
+ free ((void *) (uintptr_t) devaddrs[i]);
+ }
+ if (clean_struct)
+ for (uint64_t i = 0; i < mapnum; i++)
+ if (!cdata[i].present
+ && ((get_kind (short_mapkind, kinds, i) & typemask)
+ == GOMP_MAP_STRUCT))
+ {
+ devaddrs[i] += cdata[i+1].devaddr - cdata[i].devaddr;
+ gomp_aligned_free ((void *) (uintptr_t) devaddrs[i]);
+ }
+
+ free (devaddrs);
+ free (sizes);
+ free (kinds);
+ }
+}
+
+/* Host fallback for GOMP_target_data{,_ext} routines. */
+
+static void
+gomp_target_data_fallback (struct gomp_device_descr *devicep)
+{
+ struct gomp_task_icv *icv = gomp_icv (false);
+
+ if (gomp_target_offload_var == GOMP_TARGET_OFFLOAD_MANDATORY
+ && devicep != NULL)
+ gomp_fatal ("OMP_TARGET_OFFLOAD is set to MANDATORY, but device cannot "
+ "be used for offloading");
+
+ if (icv->target_data)
+ {
+ /* Even when doing a host fallback, if there are any active
+ #pragma omp target data constructs, need to remember the
+ new #pragma omp target data, otherwise GOMP_target_end_data
+ would get out of sync. */
+ struct target_mem_desc *tgt
+ = gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, false,
+ NULL, GOMP_MAP_VARS_DATA);
+ tgt->prev = icv->target_data;
+ icv->target_data = tgt;
+ }
+}
+
+void
+GOMP_target_data (int device, const void *unused, size_t mapnum,
+ void **hostaddrs, size_t *sizes, unsigned char *kinds)
+{
+ struct gomp_device_descr *devicep = resolve_device (device, true);
+
+ if (devicep == NULL
+ || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || (devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM))
+ return gomp_target_data_fallback (devicep);
+
+ struct target_mem_desc *tgt
= gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false,
NULL, GOMP_MAP_VARS_DATA);
struct gomp_task_icv *icv = gomp_icv (true);
GOMP_target_data_ext (int device, size_t mapnum, void **hostaddrs,
size_t *sizes, unsigned short *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
if (devicep == NULL
|| !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
GOMP_target_update (int device, const void *unused, size_t mapnum,
void **hostaddrs, size_t *sizes, unsigned char *kinds)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
if (devicep == NULL
|| !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
/* If there are depend clauses, but nowait is not present,
block the parent task until the dependencies are resolved
size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend)
{
- struct gomp_device_descr *devicep = resolve_device (device);
+ struct gomp_device_descr *devicep = resolve_device (device, true);
/* If there are depend clauses, but nowait is not present,
block the parent task until the dependencies are resolved
size_t i, j;
if ((flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0)
for (i = 0; i < mapnum; i++)
- if ((kinds[i] & 0xff) == GOMP_MAP_STRUCT)
+ if ((kinds[i] & 0xff) == GOMP_MAP_STRUCT
+ || (kinds[i] & 0xff) == GOMP_MAP_STRUCT_UNORD)
{
gomp_map_vars (devicep, sizes[i] + 1, &hostaddrs[i], NULL, &sizes[i],
&kinds[i], true, &refcount_set,
GOMP_MAP_VARS_ENTER_DATA);
i += j - i - 1;
}
- else if (i + 1 < mapnum && (kinds[i + 1] & 0xff) == GOMP_MAP_ATTACH)
+ else if (i + 1 < mapnum
+ && ((kinds[i + 1] & 0xff) == GOMP_MAP_ATTACH
+ || ((kinds[i + 1] & 0xff) == GOMP_MAP_ALWAYS_POINTER
+ && (kinds[i] & 0xff) != GOMP_MAP_ALWAYS_POINTER)))
{
/* An attach operation must be processed together with the mapped
base-pointer list item. */
htab_t refcount_set = htab_create (ttask->mapnum);
if ((ttask->flags & GOMP_TARGET_FLAG_EXIT_DATA) == 0)
for (i = 0; i < ttask->mapnum; i++)
- if ((ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT)
+ if ((ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT
+ || (ttask->kinds[i] & 0xff) == GOMP_MAP_STRUCT_UNORD)
{
gomp_map_vars (devicep, ttask->sizes[i] + 1, &ttask->hostaddrs[i],
NULL, &ttask->sizes[i], &ttask->kinds[i], true,
void *
omp_target_alloc (size_t size, int device_num)
{
- if (device_num == gomp_get_num_devices ())
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return malloc (size);
- if (device_num < 0)
- return NULL;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return NULL;
void
omp_target_free (void *device_ptr, int device_num)
{
- if (device_ptr == NULL)
- return;
-
- if (device_num == gomp_get_num_devices ())
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
{
free (device_ptr);
return;
}
- if (device_num < 0)
- return;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
- if (devicep == NULL)
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL || device_ptr == NULL)
return;
if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
int
omp_target_is_present (const void *ptr, int device_num)
{
- if (ptr == NULL)
- return 1;
-
- if (device_num == gomp_get_num_devices ())
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return 1;
- if (device_num < 0)
- return 0;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return 0;
+ if (ptr == NULL)
+ return 1;
+
if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
|| devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
return 1;
return ret;
}
-int
-omp_target_memcpy (void *dst, const void *src, size_t length,
- size_t dst_offset, size_t src_offset, int dst_device_num,
- int src_device_num)
+static int
+omp_target_memcpy_check (int dst_device_num, int src_device_num,
+ struct gomp_device_descr **dst_devicep,
+ struct gomp_device_descr **src_devicep)
{
- struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
- bool ret;
-
- if (dst_device_num != gomp_get_num_devices ())
+ if (dst_device_num != gomp_get_num_devices ()
+ /* Above gomp_get_num_devices has to be called unconditionally. */
+ && dst_device_num != omp_initial_device)
{
- if (dst_device_num < 0)
- return EINVAL;
-
- dst_devicep = resolve_device (dst_device_num);
- if (dst_devicep == NULL)
+ *dst_devicep = resolve_device (dst_device_num, false);
+ if (*dst_devicep == NULL)
return EINVAL;
- if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- dst_devicep = NULL;
+ if (!((*dst_devicep)->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || (*dst_devicep)->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ *dst_devicep = NULL;
}
- if (src_device_num != num_devices_openmp)
- {
- if (src_device_num < 0)
- return EINVAL;
- src_devicep = resolve_device (src_device_num);
- if (src_devicep == NULL)
+ if (src_device_num != num_devices_openmp
+ && src_device_num != omp_initial_device)
+ {
+ *src_devicep = resolve_device (src_device_num, false);
+ if (*src_devicep == NULL)
return EINVAL;
- if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- src_devicep = NULL;
+ if (!((*src_devicep)->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || (*src_devicep)->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ *src_devicep = NULL;
}
+
+ return 0;
+}
+
+static int
+omp_target_memcpy_copy (void *dst, const void *src, size_t length,
+ size_t dst_offset, size_t src_offset,
+ struct gomp_device_descr *dst_devicep,
+ struct gomp_device_descr *src_devicep)
+{
+ bool ret;
if (src_devicep == NULL && dst_devicep == NULL)
{
memcpy ((char *) dst + dst_offset, (char *) src + src_offset, length);
return EINVAL;
}
+int
+omp_target_memcpy (void *dst, const void *src, size_t length, size_t dst_offset,
+ size_t src_offset, int dst_device_num, int src_device_num)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ int ret = omp_target_memcpy_check (dst_device_num, src_device_num,
+ &dst_devicep, &src_devicep);
+
+ if (ret)
+ return ret;
+
+ ret = omp_target_memcpy_copy (dst, src, length, dst_offset, src_offset,
+ dst_devicep, src_devicep);
+
+ return ret;
+}
+
+typedef struct
+{
+ void *dst;
+ const void *src;
+ size_t length;
+ size_t dst_offset;
+ size_t src_offset;
+ struct gomp_device_descr *dst_devicep;
+ struct gomp_device_descr *src_devicep;
+} omp_target_memcpy_data;
+
+static void
+omp_target_memcpy_async_helper (void *args)
+{
+ omp_target_memcpy_data *a = args;
+ if (omp_target_memcpy_copy (a->dst, a->src, a->length, a->dst_offset,
+ a->src_offset, a->dst_devicep, a->src_devicep))
+ gomp_fatal ("omp_target_memcpy failed");
+}
+
+int
+omp_target_memcpy_async (void *dst, const void *src, size_t length,
+ size_t dst_offset, size_t src_offset,
+ int dst_device_num, int src_device_num,
+ int depobj_count, omp_depend_t *depobj_list)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ unsigned int flags = 0;
+ void *depend[depobj_count + 5];
+ int i;
+ int check = omp_target_memcpy_check (dst_device_num, src_device_num,
+ &dst_devicep, &src_devicep);
+
+ omp_target_memcpy_data s = {
+ .dst = dst,
+ .src = src,
+ .length = length,
+ .dst_offset = dst_offset,
+ .src_offset = src_offset,
+ .dst_devicep = dst_devicep,
+ .src_devicep = src_devicep
+ };
+
+ if (check)
+ return check;
+
+ if (depobj_count > 0 && depobj_list != NULL)
+ {
+ flags |= GOMP_TASK_FLAG_DEPEND;
+ depend[0] = 0;
+ depend[1] = (void *) (uintptr_t) depobj_count;
+ depend[2] = depend[3] = depend[4] = 0;
+ for (i = 0; i < depobj_count; ++i)
+ depend[i + 5] = &depobj_list[i];
+ }
+
+ GOMP_task (omp_target_memcpy_async_helper, &s, NULL, sizeof (s),
+ __alignof__ (s), true, flags, depend, 0, NULL);
+
+ return 0;
+}
+
static int
omp_target_memcpy_rect_worker (void *dst, const void *src, size_t element_size,
int num_dims, const size_t *volume,
const size_t *dst_dimensions,
const size_t *src_dimensions,
struct gomp_device_descr *dst_devicep,
- struct gomp_device_descr *src_devicep)
+ struct gomp_device_descr *src_devicep,
+ size_t *tmp_size, void **tmp)
{
size_t dst_slice = element_size;
size_t src_slice = element_size;
(const char *) src + src_off,
length);
else
- ret = 0;
+ {
+ if (*tmp_size == 0)
+ {
+ *tmp_size = length;
+ *tmp = malloc (length);
+ if (*tmp == NULL)
+ return ENOMEM;
+ }
+ else if (*tmp_size < length)
+ {
+ *tmp_size = length;
+ free (*tmp);
+ *tmp = malloc (length);
+ if (*tmp == NULL)
+ return ENOMEM;
+ }
+ ret = src_devicep->dev2host_func (src_devicep->target_id, *tmp,
+ (const char *) src + src_off,
+ length);
+ if (ret == 1)
+ ret = dst_devicep->host2dev_func (dst_devicep->target_id,
+ (char *) dst + dst_off, *tmp,
+ length);
+ }
return ret ? 0 : EINVAL;
}
- /* FIXME: it would be nice to have some plugin function to handle
- num_dims == 2 and num_dims == 3 more efficiently. Larger ones can
- be handled in the generic recursion below, and for host-host it
- should be used even for any num_dims >= 2. */
+ /* host->device, device->host and intra device. */
+ if (num_dims == 2
+ && ((src_devicep
+ && src_devicep == dst_devicep
+ && src_devicep->memcpy2d_func)
+ || (!src_devicep != !dst_devicep
+ && ((src_devicep && src_devicep->memcpy2d_func)
+ || (dst_devicep && dst_devicep->memcpy2d_func)))))
+ {
+ size_t vol_sz1, dst_sz1, src_sz1, dst_off_sz1, src_off_sz1;
+ int dst_id = dst_devicep ? dst_devicep->target_id : -1;
+ int src_id = src_devicep ? src_devicep->target_id : -1;
+ struct gomp_device_descr *devp = dst_devicep ? dst_devicep : src_devicep;
+
+ if (__builtin_mul_overflow (volume[1], element_size, &vol_sz1)
+ || __builtin_mul_overflow (dst_dimensions[1], element_size, &dst_sz1)
+ || __builtin_mul_overflow (src_dimensions[1], element_size, &src_sz1)
+ || __builtin_mul_overflow (dst_offsets[1], element_size, &dst_off_sz1)
+ || __builtin_mul_overflow (src_offsets[1], element_size,
+ &src_off_sz1))
+ return EINVAL;
+ ret = devp->memcpy2d_func (dst_id, src_id, vol_sz1, volume[0],
+ dst, dst_off_sz1, dst_offsets[0], dst_sz1,
+ src, src_off_sz1, src_offsets[0], src_sz1);
+ if (ret != -1)
+ return ret ? 0 : EINVAL;
+ }
+ else if (num_dims == 3
+ && ((src_devicep
+ && src_devicep == dst_devicep
+ && src_devicep->memcpy3d_func)
+ || (!src_devicep != !dst_devicep
+ && ((src_devicep && src_devicep->memcpy3d_func)
+ || (dst_devicep && dst_devicep->memcpy3d_func)))))
+ {
+ size_t vol_sz2, dst_sz2, src_sz2, dst_off_sz2, src_off_sz2;
+ int dst_id = dst_devicep ? dst_devicep->target_id : -1;
+ int src_id = src_devicep ? src_devicep->target_id : -1;
+ struct gomp_device_descr *devp = dst_devicep ? dst_devicep : src_devicep;
+
+ if (__builtin_mul_overflow (volume[2], element_size, &vol_sz2)
+ || __builtin_mul_overflow (dst_dimensions[2], element_size, &dst_sz2)
+ || __builtin_mul_overflow (src_dimensions[2], element_size, &src_sz2)
+ || __builtin_mul_overflow (dst_offsets[2], element_size, &dst_off_sz2)
+ || __builtin_mul_overflow (src_offsets[2], element_size,
+ &src_off_sz2))
+ return EINVAL;
+ ret = devp->memcpy3d_func (dst_id, src_id, vol_sz2, volume[1], volume[0],
+ dst, dst_off_sz2, dst_offsets[1],
+ dst_offsets[0], dst_sz2, dst_dimensions[1],
+ src, src_off_sz2, src_offsets[1],
+ src_offsets[0], src_sz2, src_dimensions[1]);
+ if (ret != -1)
+ return ret ? 0 : EINVAL;
+ }
for (i = 1; i < num_dims; i++)
if (__builtin_mul_overflow (dst_slice, dst_dimensions[i], &dst_slice)
volume + 1, dst_offsets + 1,
src_offsets + 1, dst_dimensions + 1,
src_dimensions + 1, dst_devicep,
- src_devicep);
+ src_devicep, tmp_size, tmp);
if (ret)
return ret;
dst_off += dst_slice;
return 0;
}
+static int
+omp_target_memcpy_rect_check (void *dst, const void *src, int dst_device_num,
+ int src_device_num,
+ struct gomp_device_descr **dst_devicep,
+ struct gomp_device_descr **src_devicep)
+{
+ if (!dst && !src)
+ return INT_MAX;
+
+ int ret = omp_target_memcpy_check (dst_device_num, src_device_num,
+ dst_devicep, src_devicep);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+omp_target_memcpy_rect_copy (void *dst, const void *src,
+ size_t element_size, int num_dims,
+ const size_t *volume, const size_t *dst_offsets,
+ const size_t *src_offsets,
+ const size_t *dst_dimensions,
+ const size_t *src_dimensions,
+ struct gomp_device_descr *dst_devicep,
+ struct gomp_device_descr *src_devicep)
+{
+ size_t tmp_size = 0;
+ void *tmp = NULL;
+ bool lock_src;
+ bool lock_dst;
+
+ lock_src = src_devicep != NULL;
+ lock_dst = dst_devicep != NULL && src_devicep != dst_devicep;
+ if (lock_src)
+ gomp_mutex_lock (&src_devicep->lock);
+ if (lock_dst)
+ gomp_mutex_lock (&dst_devicep->lock);
+ int ret = omp_target_memcpy_rect_worker (dst, src, element_size, num_dims,
+ volume, dst_offsets, src_offsets,
+ dst_dimensions, src_dimensions,
+ dst_devicep, src_devicep,
+ &tmp_size, &tmp);
+ if (lock_src)
+ gomp_mutex_unlock (&src_devicep->lock);
+ if (lock_dst)
+ gomp_mutex_unlock (&dst_devicep->lock);
+ if (tmp)
+ free (tmp);
+
+ return ret;
+}
+
int
omp_target_memcpy_rect (void *dst, const void *src, size_t element_size,
int num_dims, const size_t *volume,
{
struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
- if (!dst && !src)
- return INT_MAX;
+ int check = omp_target_memcpy_rect_check (dst, src, dst_device_num,
+ src_device_num, &dst_devicep,
+ &src_devicep);
- if (dst_device_num != gomp_get_num_devices ())
- {
- if (dst_device_num < 0)
- return EINVAL;
+ if (check)
+ return check;
- dst_devicep = resolve_device (dst_device_num);
- if (dst_devicep == NULL)
- return EINVAL;
+ int ret = omp_target_memcpy_rect_copy (dst, src, element_size, num_dims,
+ volume, dst_offsets, src_offsets,
+ dst_dimensions, src_dimensions,
+ dst_devicep, src_devicep);
- if (!(dst_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || dst_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- dst_devicep = NULL;
- }
- if (src_device_num != num_devices_openmp)
- {
- if (src_device_num < 0)
- return EINVAL;
+ return ret;
+}
- src_devicep = resolve_device (src_device_num);
- if (src_devicep == NULL)
- return EINVAL;
+typedef struct
+{
+ void *dst;
+ const void *src;
+ size_t element_size;
+ const size_t *volume;
+ const size_t *dst_offsets;
+ const size_t *src_offsets;
+ const size_t *dst_dimensions;
+ const size_t *src_dimensions;
+ struct gomp_device_descr *dst_devicep;
+ struct gomp_device_descr *src_devicep;
+ int num_dims;
+} omp_target_memcpy_rect_data;
+
+static void
+omp_target_memcpy_rect_async_helper (void *args)
+{
+ omp_target_memcpy_rect_data *a = args;
+ int ret = omp_target_memcpy_rect_copy (a->dst, a->src, a->element_size,
+ a->num_dims, a->volume, a->dst_offsets,
+ a->src_offsets, a->dst_dimensions,
+ a->src_dimensions, a->dst_devicep,
+ a->src_devicep);
+ if (ret)
+ gomp_fatal ("omp_target_memcpy_rect failed");
+}
- if (!(src_devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
- || src_devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
- src_devicep = NULL;
+int
+omp_target_memcpy_rect_async (void *dst, const void *src, size_t element_size,
+ int num_dims, const size_t *volume,
+ const size_t *dst_offsets,
+ const size_t *src_offsets,
+ const size_t *dst_dimensions,
+ const size_t *src_dimensions,
+ int dst_device_num, int src_device_num,
+ int depobj_count, omp_depend_t *depobj_list)
+{
+ struct gomp_device_descr *dst_devicep = NULL, *src_devicep = NULL;
+ unsigned flags = 0;
+ int check = omp_target_memcpy_rect_check (dst, src, dst_device_num,
+ src_device_num, &dst_devicep,
+ &src_devicep);
+ void *depend[depobj_count + 5];
+ int i;
+
+ omp_target_memcpy_rect_data s = {
+ .dst = dst,
+ .src = src,
+ .element_size = element_size,
+ .num_dims = num_dims,
+ .volume = volume,
+ .dst_offsets = dst_offsets,
+ .src_offsets = src_offsets,
+ .dst_dimensions = dst_dimensions,
+ .src_dimensions = src_dimensions,
+ .dst_devicep = dst_devicep,
+ .src_devicep = src_devicep
+ };
+
+ if (check)
+ return check;
+
+ if (depobj_count > 0 && depobj_list != NULL)
+ {
+ flags |= GOMP_TASK_FLAG_DEPEND;
+ depend[0] = 0;
+ depend[1] = (void *) (uintptr_t) depobj_count;
+ depend[2] = depend[3] = depend[4] = 0;
+ for (i = 0; i < depobj_count; ++i)
+ depend[i + 5] = &depobj_list[i];
}
- if (src_devicep != NULL && dst_devicep != NULL && src_devicep != dst_devicep)
- return EINVAL;
+ GOMP_task (omp_target_memcpy_rect_async_helper, &s, NULL, sizeof (s),
+ __alignof__ (s), true, flags, depend, 0, NULL);
- if (src_devicep)
- gomp_mutex_lock (&src_devicep->lock);
- else if (dst_devicep)
- gomp_mutex_lock (&dst_devicep->lock);
- int ret = omp_target_memcpy_rect_worker (dst, src, element_size, num_dims,
- volume, dst_offsets, src_offsets,
- dst_dimensions, src_dimensions,
- dst_devicep, src_devicep);
- if (src_devicep)
- gomp_mutex_unlock (&src_devicep->lock);
- else if (dst_devicep)
- gomp_mutex_unlock (&dst_devicep->lock);
- return ret;
+ return 0;
}
int
omp_target_associate_ptr (const void *host_ptr, const void *device_ptr,
size_t size, size_t device_offset, int device_num)
{
- if (device_num == gomp_get_num_devices ())
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return EINVAL;
- if (device_num < 0)
- return EINVAL;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return EINVAL;
int
omp_target_disassociate_ptr (const void *ptr, int device_num)
{
- if (device_num == gomp_get_num_devices ())
- return EINVAL;
-
- if (device_num < 0)
- return EINVAL;
-
- struct gomp_device_descr *devicep = resolve_device (device_num);
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
if (devicep == NULL)
return EINVAL;
return ret;
}
+void *
+omp_get_mapped_ptr (const void *ptr, int device_num)
+{
+ if (device_num == omp_initial_device
+ || device_num == omp_get_initial_device ())
+ return (void *) ptr;
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
+ return NULL;
+
+ if (!(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
+ || devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
+ return (void *) ptr;
+
+ gomp_mutex_lock (&devicep->lock);
+
+ struct splay_tree_s *mem_map = &devicep->mem_map;
+ struct splay_tree_key_s cur_node;
+ void *ret = NULL;
+
+ cur_node.host_start = (uintptr_t) ptr;
+ cur_node.host_end = cur_node.host_start;
+ splay_tree_key n = gomp_map_0len_lookup (mem_map, &cur_node);
+
+ if (n)
+ {
+ uintptr_t offset = cur_node.host_start - n->host_start;
+ ret = (void *) (n->tgt->tgt_start + n->tgt_offset + offset);
+ }
+
+ gomp_mutex_unlock (&devicep->lock);
+
+ return ret;
+}
+
+int
+omp_target_is_accessible (const void *ptr, size_t size, int device_num)
+{
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
+ return true;
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
+ return false;
+
+ /* TODO: Unified shared memory must be handled when available. */
+
+ return devicep->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM;
+}
+
int
omp_pause_resource (omp_pause_resource_t kind, int device_num)
{
(void) kind;
- if (device_num == gomp_get_num_devices ())
+ if (device_num == omp_initial_device
+ || device_num == gomp_get_num_devices ())
return gomp_pause_host ();
- if (device_num < 0 || device_num >= num_devices_openmp)
+
+ struct gomp_device_descr *devicep = resolve_device (device_num, false);
+ if (devicep == NULL)
return -1;
+
/* Do nothing for target devices for now. */
return 0;
}
DLSYM (free);
DLSYM (dev2host);
DLSYM (host2dev);
+ DLSYM_OPT (memcpy2d, memcpy2d);
+ DLSYM_OPT (memcpy3d, memcpy3d);
device->capabilities = device->get_caps_func ();
if (device->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)
{
if (gomp_load_plugin_for_device (¤t_device, plugin_name))
{
- new_num_devs = current_device.get_num_devices_func ();
- if (new_num_devs >= 1)
+ int omp_req = omp_requires_mask & ~GOMP_REQUIRES_TARGET_USED;
+ new_num_devs = current_device.get_num_devices_func (omp_req);
+ if (gomp_debug_var > 0 && new_num_devs < 0)
+ {
+ bool found = false;
+ int type = current_device.get_type_func ();
+ for (int img = 0; img < num_offload_images; img++)
+ if (type == offload_images[img].type)
+ found = true;
+ if (found)
+ {
+ char buf[sizeof ("unified_address, unified_shared_memory, "
+ "reverse_offload")];
+ gomp_requires_to_name (buf, sizeof (buf), omp_req);
+ char *name = (char *) malloc (cur_len + 1);
+ memcpy (name, cur, cur_len);
+ name[cur_len] = '\0';
+ gomp_debug (1,
+ "%s devices present but 'omp requires %s' "
+ "cannot be fulfilled\n", name, buf);
+ free (name);
+ }
+ }
+ else if (new_num_devs >= 1)
{
/* Augment DEVICES and NUM_DEVICES. */
/* current_device.capabilities has already been set. */
current_device.type = current_device.get_type_func ();
current_device.mem_map.root = NULL;
+ current_device.mem_map_rev.root = NULL;
current_device.state = GOMP_DEVICE_UNINITIALIZED;
for (i = 0; i < new_num_devs; i++)
{
if (devs[i].capabilities & GOMP_OFFLOAD_CAP_OPENACC_200)
goacc_register (&devs[i]);
}
+ if (gomp_global_icv.default_device_var == INT_MIN)
+ {
+ /* This implies OMP_TARGET_OFFLOAD=mandatory. */
+ struct gomp_icv_list *none;
+ none = gomp_get_initial_icv_item (GOMP_DEVICE_NUM_FOR_NO_SUFFIX);
+ gomp_global_icv.default_device_var = (num_devs_openmp
+ ? 0 : omp_invalid_device);
+ none->icvs.default_device_var = gomp_global_icv.default_device_var;
+ }
num_devices = num_devs;
num_devices_openmp = num_devs_openmp;