-/* Copyright (C) 2005-2020 Free Software Foundation, Inc.
+/* Copyright (C) 2005-2024 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
/* alloc.c */
#if defined(HAVE_ALIGNED_ALLOC) \
- || defined(HAVE__ALIGNED_MALLOC) \
|| defined(HAVE_POSIX_MEMALIGN) \
|| defined(HAVE_MEMALIGN)
/* Defined if gomp_aligned_alloc doesn't use fallback version
#define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1
#endif
+#if defined(GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC) && !defined(__AMDGCN__)
+#define GOMP_USE_ALIGNED_WORK_SHARES 1
+#endif
+
extern void *gomp_malloc (size_t) __attribute__((malloc));
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
extern void *gomp_realloc (void *, size_t);
/* Optimized allocators for team-specific data that will die with the team. */
#ifdef __AMDGCN__
+#include "libgomp-gcn.h"
/* The arena is initialized in config/gcn/team.c. */
-#define TEAM_ARENA_SIZE 64*1024 /* Must match the value in plugin-gcn.c. */
-#define TEAM_ARENA_START 16 /* LDS offset of free pointer. */
-#define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */
-#define TEAM_ARENA_END 32 /* LDS offset of end pointer. */
static inline void * __attribute__((malloc))
team_malloc (size_t size)
{
/* While this is experimental, let's make sure we know when OOM
happens. */
- const char msg[] = "GCN team arena exhausted\n";
+ const char msg[] = "GCN team arena exhausted;"
+ " configure with GCN_TEAM_ARENA_SIZE=bytes\n";
write (2, msg, sizeof(msg)-1);
/* Fall back to using the heap (slowly). */
unsigned int shift_counts[];
};
+/* Like struct gomp_work_share, but only the 1st cacheline of it plus
+ flexible array at the end.
+ Keep in sync with struct gomp_work_share. */
+struct gomp_work_share_1st_cacheline
+{
+ enum gomp_schedule_type sched;
+ int mode;
+ union {
+ struct {
+ long chunk_size, end, incr;
+ };
+ struct {
+ unsigned long long chunk_size_ull, end_ull, incr_ull;
+ };
+ };
+ union {
+ unsigned *ordered_team_ids;
+ struct gomp_doacross_work_share *doacross;
+ };
+ unsigned ordered_num_used, ordered_owner, ordered_cur;
+ struct gomp_work_share *next_alloc;
+ char pad[];
+};
+
struct gomp_work_share
{
/* This member records the SCHEDULE clause to be used for this construct.
are in a different cache line. */
/* This lock protects the update of the following members. */
+#ifdef GOMP_USE_ALIGNED_WORK_SHARES
gomp_mutex_t lock __attribute__((aligned (64)));
+#else
+ char pad[64 - offsetof (struct gomp_work_share_1st_cacheline, pad)];
+ gomp_mutex_t lock;
+#endif
/* This is the count of the number of threads that have exited the work
share construct. If the construct was marked nowait, they have moved on
unsigned inline_ordered_team_ids[0];
};
+extern char gomp_workshare_struct_check1
+ [offsetof (struct gomp_work_share_1st_cacheline, next_alloc)
+ == offsetof (struct gomp_work_share, next_alloc) ? 1 : -1];
+extern char gomp_workshare_struct_check2
+ [offsetof (struct gomp_work_share, lock) == 64 ? 1 : -1];
+
/* This structure contains all of the thread-local data associated with
a thread team. This is the data that must be saved when a thread
encounters a nested PARALLEL construct. */
struct target_mem_desc;
+enum gomp_icvs
+{
+ GOMP_ICV_NTEAMS = 1,
+ GOMP_ICV_SCHEDULE = 2,
+ GOMP_ICV_SCHEDULE_CHUNK_SIZE = 3,
+ GOMP_ICV_DYNAMIC = 4,
+ GOMP_ICV_TEAMS_THREAD_LIMIT = 5,
+ GOMP_ICV_THREAD_LIMIT = 6,
+ GOMP_ICV_NTHREADS = 7,
+ GOMP_ICV_NTHREADS_LIST = 8,
+ GOMP_ICV_NTHREADS_LIST_LEN = 9,
+ GOMP_ICV_BIND = 10,
+ GOMP_ICV_BIND_LIST = 11,
+ GOMP_ICV_BIND_LIST_LEN = 12,
+ GOMP_ICV_MAX_ACTIVE_LEVELS = 13,
+ GOMP_ICV_WAIT_POLICY = 14,
+ GOMP_ICV_STACKSIZE = 15,
+ GOMP_ICV_DEFAULT_DEVICE = 16,
+ GOMP_ICV_CANCELLATION = 17,
+ GOMP_ICV_DISPLAY_AFFINITY = 18,
+ GOMP_ICV_TARGET_OFFLOAD = 19,
+ GOMP_ICV_MAX_TASK_PRIORITY = 20,
+ GOMP_ICV_ALLOCATOR = 21
+};
+
+enum gomp_device_num
+{
+ GOMP_DEVICE_NUM_FOR_DEV = -1,
+ GOMP_DEVICE_NUM_FOR_ALL = -2,
+ GOMP_DEVICE_NUM_FOR_NO_SUFFIX = -3
+};
+
/* These are the OpenMP 4.0 Internal Control Variables described in
section 2.3.1. Those described as having one copy per task are
stored within the structure; those described as having one copy
int default_device_var;
unsigned int thread_limit_var;
bool dyn_var;
- bool nest_var;
+ unsigned char max_active_levels_var;
char bind_var;
/* Internal ICV. */
struct target_mem_desc *target_data;
};
+enum gomp_env_suffix
+{
+ GOMP_ENV_SUFFIX_UNKNOWN = 0,
+ GOMP_ENV_SUFFIX_NONE = 1,
+ GOMP_ENV_SUFFIX_DEV = 2,
+ GOMP_ENV_SUFFIX_ALL = 4,
+ GOMP_ENV_SUFFIX_DEV_X = 8
+};
+
+/* Struct that contains all ICVs for which we need to store initial values.
+ Keeping the initial values is needed for omp_display_env. Moreover initial
+ _DEV and _ALL variants of environment variables are also used to determine
+ actually used values for devices and for the host. */
+struct gomp_initial_icvs
+{
+ unsigned long *nthreads_var_list;
+ char *bind_var_list;
+ unsigned long nthreads_var;
+ unsigned long nthreads_var_list_len;
+ unsigned long bind_var_list_len;
+ unsigned long stacksize;
+ int run_sched_chunk_size;
+ int default_device_var;
+ int nteams_var;
+ int teams_thread_limit_var;
+ int wait_policy;
+ unsigned int thread_limit_var;
+ enum gomp_schedule_type run_sched_var;
+ bool dyn_var;
+ unsigned char max_active_levels_var;
+ char bind_var;
+};
+
+struct gomp_default_icv
+{
+ unsigned long nthreads_var;
+ enum gomp_schedule_type run_sched_var;
+ int run_sched_chunk_size;
+ int default_device_var;
+ unsigned int thread_limit_var;
+ int nteams_var;
+ int teams_thread_limit_var;
+ bool dyn_var;
+ unsigned char max_active_levels_var;
+ char bind_var;
+};
+
+/* DEVICE_NUM "-1" is reserved for "_DEV" icvs.
+ DEVICE_NUM "-2" is reserved for "_ALL" icvs.
+ DEVICE_NUM "-3" is reserved for ICVs without suffix.
+ Non-negative DEVICE_NUM is for "_DEV_X" icvs. */
+struct gomp_icv_list
+{
+ int device_num;
+ uint32_t flags;
+ struct gomp_initial_icvs icvs;
+ struct gomp_icv_list *next;
+};
+
+struct gomp_offload_icvs
+{
+ int device_num;
+ int default_device;
+ int nteams;
+ int teams_thread_limit;
+};
+
+struct gomp_offload_icv_list
+{
+ int device_num;
+ struct gomp_offload_icvs icvs;
+ struct gomp_offload_icv_list *next;
+};
+
+enum gomp_target_offload_t
+{
+ GOMP_TARGET_OFFLOAD_DEFAULT,
+ GOMP_TARGET_OFFLOAD_MANDATORY,
+ GOMP_TARGET_OFFLOAD_DISABLED
+};
+
+#define gomp_supported_active_levels UCHAR_MAX
+
extern struct gomp_task_icv gomp_global_icv;
#ifndef HAVE_SYNC_BUILTINS
extern gomp_mutex_t gomp_managed_threads_lock;
#endif
-extern unsigned long gomp_max_active_levels_var;
extern bool gomp_cancel_var;
+extern enum gomp_target_offload_t gomp_target_offload_var;
extern int gomp_max_task_priority_var;
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
extern unsigned long gomp_available_cpus, gomp_managed_threads;
extern void **gomp_places_list;
extern unsigned long gomp_places_list_len;
extern unsigned int gomp_num_teams_var;
+extern int gomp_nteams_var;
+extern int gomp_teams_thread_limit_var;
extern int gomp_debug_var;
extern bool gomp_display_affinity_var;
extern char *gomp_affinity_format_var;
extern size_t gomp_affinity_format_len;
extern uintptr_t gomp_def_allocator;
+extern const struct gomp_default_icv gomp_default_icv_values;
+extern struct gomp_icv_list *gomp_initial_icv_list;
+extern struct gomp_offload_icv_list *gomp_offload_icv_list;
extern int goacc_device_num;
extern char *goacc_device_type;
extern int goacc_default_dims[GOMP_DIM_MAX];
but not yet completed. Once that completes, they will be readded
into the queues as GOMP_TASK_WAITING in order to perform the var
unmapping. */
- GOMP_TASK_ASYNC_RUNNING
+ GOMP_TASK_ASYNC_RUNNING,
+ /* Task that has finished executing but is waiting for its
+ completion event to be fulfilled. */
+ GOMP_TASK_DETACHED
};
struct gomp_task_depend_entry
struct gomp_task_depend_entry *prev;
/* Task that provides the dependency in ADDR. */
struct gomp_task *task;
- /* Depend entry is of type "IN". */
- bool is_in;
+ /* Depend entry is of type "IN" (1) or "INOUTSET" (2). */
+ unsigned char is_in;
bool redundant;
bool redundant_out;
};
struct gomp_dependers_vec *dependers;
struct htab *depend_hash;
struct gomp_taskwait *taskwait;
+ /* Last depend({,in}out:omp_all_memory) child if any. */
+ struct gomp_task *depend_all_memory;
/* Number of items in DEPEND. */
size_t depend_count;
/* Number of tasks this task depends on. Once this counter reaches
into the various queues to be scheduled. */
size_t num_dependees;
+ union {
+ /* Valid only if deferred_p is false. */
+ gomp_sem_t *completion_sem;
+ /* Valid only if deferred_p is true. Set to the team that executes the
+ task if the task is detached and the completion event has yet to be
+ fulfilled. */
+ struct gomp_team *detach_team;
+ };
+ bool deferred_p;
+
/* Priority of this task. */
int priority;
/* The priority node for this task in each of the different queues.
int work_share_cancelled;
int team_cancelled;
+ /* Number of tasks waiting for their completion event to be fulfilled. */
+ unsigned int task_detach_count;
+
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
/* User pthread thread pool */
struct gomp_thread_pool *thread_pool;
+#ifdef LIBGOMP_USE_PTHREADS
+ /* omp_get_num_teams () - 1. */
+ unsigned int num_teams;
+
+ /* omp_get_team_num (). */
+ unsigned int team_num;
+#endif
+
#if defined(LIBGOMP_USE_PTHREADS) \
&& (!defined(HAVE_TLS) \
|| !defined(__GLIBC__) \
struct gomp_team_state *,
unsigned int) __attribute__((cold));
+/* env.c */
+
+extern struct gomp_icv_list *gomp_get_initial_icv_item (int dev_num);
+extern bool gomp_get_icv_flag (uint32_t value, enum gomp_icvs icv);
+
/* iter.c */
extern int gomp_iter_static_next (long *, long *);
extern void gomp_init_targets_once (void);
extern int gomp_get_num_devices (void);
extern bool gomp_target_task_fn (void *);
+extern void gomp_target_rev (uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
+ int, struct goacc_asyncqueue *);
/* Splay tree definitions. */
typedef struct splay_tree_node_s *splay_tree_node;
uintptr_t length;
};
-struct target_mem_desc {
- /* Reference count. */
- uintptr_t refcount;
- /* All the splay nodes allocated together. */
- splay_tree_node array;
- /* Start of the target region. */
- uintptr_t tgt_start;
- /* End of the targer region. */
- uintptr_t tgt_end;
- /* Handle to free. */
- void *to_free;
- /* Previous target_mem_desc. */
- struct target_mem_desc *prev;
- /* Number of items in following list. */
- size_t list_count;
-
- /* Corresponding target device descriptor. */
- struct gomp_device_descr *device_descr;
+struct target_mem_desc;
- /* List of target items to remove (or decrease refcount)
- at the end of region. */
- struct target_var_desc list[];
-};
+/* Special value for refcount - mask to indicate existence of special
+ values. Right now we allocate 3 bits. */
+#define REFCOUNT_SPECIAL (~(uintptr_t) 0x7)
/* Special value for refcount - infinity. */
-#define REFCOUNT_INFINITY (~(uintptr_t) 0)
+#define REFCOUNT_INFINITY (REFCOUNT_SPECIAL | 0)
/* Special value for refcount - tgt_offset contains target address of the
artificial pointer to "omp declare target link" object. */
-#define REFCOUNT_LINK (~(uintptr_t) 1)
+#define REFCOUNT_LINK (REFCOUNT_SPECIAL | 1)
+
+/* Special value for refcount - structure element sibling list items.
+ All such key refounts have REFCOUNT_STRUCTELEM bits set, with _FLAG_FIRST
+ and _FLAG_LAST indicating first and last in the created sibling sequence. */
+#define REFCOUNT_STRUCTELEM (REFCOUNT_SPECIAL | 4)
+#define REFCOUNT_STRUCTELEM_P(V) \
+ (((V) & REFCOUNT_STRUCTELEM) == REFCOUNT_STRUCTELEM)
+/* The first leading key with _FLAG_FIRST set houses the actual reference count
+ in the structelem_refcount field. Other siblings point to this counter value
+ through its structelem_refcount_ptr field. */
+#define REFCOUNT_STRUCTELEM_FLAG_FIRST (1)
+/* The last key in the sibling sequence has this set. This is required to
+ indicate the sequence boundary, when we remove the structure sibling list
+ from the map. */
+#define REFCOUNT_STRUCTELEM_FLAG_LAST (2)
+
+#define REFCOUNT_STRUCTELEM_FIRST_P(V) \
+ (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_FIRST))
+#define REFCOUNT_STRUCTELEM_LAST_P(V) \
+ (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_LAST))
/* Special offset values. */
#define OFFSET_INLINED (~(uintptr_t) 0)
uintptr_t tgt_offset;
/* Reference count. */
uintptr_t refcount;
- /* Dynamic reference count. */
- uintptr_t dynamic_refcount;
+ union {
+ /* Dynamic reference count. */
+ uintptr_t dynamic_refcount;
+
+ /* Unified reference count for structure element siblings, this is used
+ when REFCOUNT_STRUCTELEM_FIRST_P(k->refcount) == true, the first sibling
+ in a structure element sibling list item sequence. */
+ uintptr_t structelem_refcount;
+
+ /* When REFCOUNT_STRUCTELEM_P (k->refcount) == true, this field points
+ into the (above) structelem_refcount field of the _FIRST splay_tree_key,
+ the first key in the created sequence. All structure element siblings
+ share a single refcount in this manner. Since these two fields won't be
+ used at the same time, they are stashed in a union. */
+ uintptr_t *structelem_refcount_ptr;
+ };
struct splay_tree_aux *aux;
};
#include "splay-tree.h"
+/* Reverse offload splay-tree handling (functions only). */
+
+struct reverse_splay_tree_key_s {
+ /* Address of the device object. */
+ uint64_t dev;
+ splay_tree_key k;
+};
+
+typedef struct reverse_splay_tree_node_s *reverse_splay_tree_node;
+typedef struct reverse_splay_tree_s *reverse_splay_tree;
+typedef struct reverse_splay_tree_key_s *reverse_splay_tree_key;
+
+static inline int
+reverse_splay_compare (reverse_splay_tree_key x, reverse_splay_tree_key y)
+{
+ if (x->dev < y->dev)
+ return -1;
+ if (x->dev > y->dev)
+ return 1;
+ return 0;
+}
+
+#define splay_tree_prefix reverse
+#define splay_tree_static
+#include "splay-tree.h"
+
+/* Indirect target function splay-tree handling. */
+
+struct indirect_splay_tree_key_s {
+ uint64_t host_addr, target_addr;
+};
+
+typedef struct indirect_splay_tree_node_s *indirect_splay_tree_node;
+typedef struct indirect_splay_tree_s *indirect_splay_tree;
+typedef struct indirect_splay_tree_key_s *indirect_splay_tree_key;
+
+static inline int
+indirect_splay_compare (indirect_splay_tree_key x, indirect_splay_tree_key y)
+{
+ if (x->host_addr < y->host_addr)
+ return -1;
+ if (x->host_addr > y->host_addr)
+ return 1;
+ return 0;
+}
+
+#define splay_tree_prefix indirect
+#include "splay-tree.h"
+
+struct target_mem_desc {
+ /* Reference count. */
+ uintptr_t refcount;
+ /* All the splay nodes allocated together. */
+ splay_tree_node array;
+ /* Likewise for the reverse lookup device->host for reverse offload. */
+ reverse_splay_tree_node rev_array;
+ /* Start of the target region. */
+ uintptr_t tgt_start;
+ /* End of the targer region. */
+ uintptr_t tgt_end;
+ /* Handle to free. */
+ void *to_free;
+ /* Previous target_mem_desc. */
+ struct target_mem_desc *prev;
+ /* Number of items in following list. */
+ size_t list_count;
+
+ /* Corresponding target device descriptor. */
+ struct gomp_device_descr *device_descr;
+
+ /* List of target items to remove (or decrease refcount)
+ at the end of region. */
+ struct target_var_desc list[];
+};
+
+
typedef struct acc_dispatch_t
{
/* Execute. */
__typeof (GOMP_OFFLOAD_free) *free_func;
__typeof (GOMP_OFFLOAD_dev2host) *dev2host_func;
__typeof (GOMP_OFFLOAD_host2dev) *host2dev_func;
+ __typeof (GOMP_OFFLOAD_memcpy2d) *memcpy2d_func;
+ __typeof (GOMP_OFFLOAD_memcpy3d) *memcpy3d_func;
__typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func;
__typeof (GOMP_OFFLOAD_can_run) *can_run_func;
__typeof (GOMP_OFFLOAD_run) *run_func;
/* Splay tree containing information about mapped memory regions. */
struct splay_tree_s mem_map;
+ struct reverse_splay_tree_s mem_map_rev;
/* Mutex for the mutable data. */
gomp_mutex_t lock;
/* Kind of the pragma, for which gomp_map_vars () is called. */
enum gomp_map_vars_kind
{
- GOMP_MAP_VARS_OPENACC,
- GOMP_MAP_VARS_TARGET,
- GOMP_MAP_VARS_DATA,
- GOMP_MAP_VARS_ENTER_DATA
+ GOMP_MAP_VARS_OPENACC = 1,
+ GOMP_MAP_VARS_TARGET = 2,
+ GOMP_MAP_VARS_DATA = 4,
+ GOMP_MAP_VARS_ENTER_DATA = 8
};
extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *,
struct gomp_coalesce_buf;
extern void gomp_copy_host2dev (struct gomp_device_descr *,
struct goacc_asyncqueue *, void *, const void *,
- size_t, struct gomp_coalesce_buf *);
+ size_t, bool, struct gomp_coalesce_buf *);
extern void gomp_copy_dev2host (struct gomp_device_descr *,
struct goacc_asyncqueue *, void *, const void *,
size_t);
extern void gomp_attach_pointer (struct gomp_device_descr *,
struct goacc_asyncqueue *, splay_tree,
splay_tree_key, uintptr_t, size_t,
- struct gomp_coalesce_buf *);
+ struct gomp_coalesce_buf *, bool);
extern void gomp_detach_pointer (struct gomp_device_descr *,
struct goacc_asyncqueue *, splay_tree_key,
uintptr_t, bool, struct gomp_coalesce_buf *);
-
-extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *,
- size_t, void **, void **,
- size_t *, void *, bool,
- enum gomp_map_vars_kind);
-extern struct target_mem_desc *gomp_map_vars_async (struct gomp_device_descr *,
- struct goacc_asyncqueue *,
- size_t, void **, void **,
- size_t *, void *, bool,
- enum gomp_map_vars_kind);
-extern void gomp_unmap_vars (struct target_mem_desc *, bool);
-extern void gomp_unmap_vars_async (struct target_mem_desc *, bool,
- struct goacc_asyncqueue *);
+extern struct target_mem_desc *goacc_map_vars (struct gomp_device_descr *,
+ struct goacc_asyncqueue *,
+ size_t, void **, void **,
+ size_t *, void *, bool,
+ enum gomp_map_vars_kind);
+extern void goacc_unmap_vars (struct target_mem_desc *, bool,
+ struct goacc_asyncqueue *);
extern void gomp_init_device (struct gomp_device_descr *);
extern bool gomp_fini_device (struct gomp_device_descr *);
extern void gomp_unload_device (struct gomp_device_descr *);