hsa_executable_t executable = { 0 };
hsa_region_t kernargs_region = { 0 };
+hsa_region_t heap_region = { 0 };
uint32_t kernarg_segment_size = 0;
uint32_t group_segment_size = 0;
uint32_t private_segment_size = 0;
hsa_signal_t *signal);
hsa_status_t (*hsa_memory_allocate_fn) (hsa_region_t region, size_t size,
void **ptr);
+ hsa_status_t (*hsa_memory_assign_agent_fn) (void *ptr, hsa_agent_t agent,
+ hsa_access_permission_t access);
hsa_status_t (*hsa_memory_copy_fn) (void *dst, const void *src,
size_t size);
hsa_status_t (*hsa_memory_free_fn) (void *ptr);
DLSYM_FN (hsa_executable_freeze)
DLSYM_FN (hsa_signal_create)
DLSYM_FN (hsa_memory_allocate)
+ DLSYM_FN (hsa_memory_assign_agent)
DLSYM_FN (hsa_memory_copy)
DLSYM_FN (hsa_memory_free)
DLSYM_FN (hsa_signal_destroy)
suitable one has been found. */
static hsa_status_t
-get_kernarg_region (hsa_region_t region, void *data __attribute__ ((unused)))
+get_memory_region (hsa_region_t region, hsa_region_t *retval,
+ hsa_region_global_flag_t kind)
{
/* Reject non-global regions. */
hsa_region_segment_t segment;
hsa_region_global_flag_t flags;
hsa_fns.hsa_region_get_info_fn (region, HSA_REGION_INFO_GLOBAL_FLAGS,
&flags);
- if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG)
+ if (flags & kind)
{
- kernargs_region = region;
+ *retval = region;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
+static hsa_status_t
+get_kernarg_region (hsa_region_t region, void *data __attribute__((unused)))
+{
+ return get_memory_region (region, &kernargs_region,
+ HSA_REGION_GLOBAL_FLAG_KERNARG);
+}
+
+static hsa_status_t
+get_heap_region (hsa_region_t region, void *data __attribute__((unused)))
+{
+ return get_memory_region (region, &heap_region,
+ HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED);
+}
+
/* Initialize the HSA Runtime library and GPU device. */
static void
NULL),
status == HSA_STATUS_SUCCESS || status == HSA_STATUS_INFO_BREAK,
"Locate kernargs memory");
+
+ /* Select a memory region for the kernel heap.
+ The call-back function, get_heap_region, does the selection. */
+ XHSA_CMP (hsa_fns.hsa_agent_iterate_regions_fn (device, get_heap_region,
+ NULL),
+ status == HSA_STATUS_SUCCESS || status == HSA_STATUS_INFO_BREAK,
+ "Locate device memory");
}
__flat_scalar GCN address space). */
static void *
-device_malloc (size_t size)
+device_malloc (size_t size, hsa_region_t region)
{
void *result;
- XHSA (hsa_fns.hsa_memory_allocate_fn (kernargs_region, size, &result),
+ XHSA (hsa_fns.hsa_memory_allocate_fn (region, size, &result),
"Allocate device memory");
return result;
}
} queue[1024];
unsigned int consumed;
} output_data;
-
- struct heap
- {
- int64_t size;
- char data[0];
- } heap;
};
+struct heap
+{
+ int64_t size;
+ char data[0];
+} heap;
+
/* Print any console output from the kernel.
We print all entries from "consumed" to the next entry without a "written"
flag, or "next_output" is reached. The buffer is circular, but the
/* Allocate device memory for both function parameters and the argv
data. */
- size_t heap_size = 10 * 1024 * 1024; /* 10MB. */
- struct kernargs *kernargs = device_malloc (sizeof (*kernargs) + heap_size);
+ struct kernargs *kernargs = device_malloc (sizeof (*kernargs),
+ kernargs_region);
struct argdata
{
int64_t argv_data[kernel_argc];
char strings[args_size];
- } *args = device_malloc (sizeof (struct argdata));
+ } *args = device_malloc (sizeof (struct argdata), kernargs_region);
+
+ size_t heap_size = 10 * 1024 * 1024; /* 10MB. */
+ struct heap *heap = device_malloc (heap_size, heap_region);
+ XHSA (hsa_fns.hsa_memory_assign_agent_fn (heap, device,
+ HSA_ACCESS_PERMISSION_RW),
+ "Assign heap to device agent");
/* Write the data to the target. */
kernargs->argc = kernel_argc;
memcpy (&args->strings[offset], kernel_argv[i], arg_len + 1);
offset += arg_len;
}
- kernargs->heap_ptr = (int64_t) &kernargs->heap;
- kernargs->heap.size = heap_size;
+ kernargs->heap_ptr = (int64_t) heap;
+ hsa_fns.hsa_memory_copy_fn (&heap->size, &heap_size, sizeof (heap_size));
/* Run constructors on the GPU. */
run (init_array_kernel, kernargs);
hsa_signal_t *signal);
hsa_status_t (*hsa_memory_allocate_fn) (hsa_region_t region, size_t size,
void **ptr);
+ hsa_status_t (*hsa_memory_assign_agent_fn) (void *ptr, hsa_agent_t agent,
+ hsa_access_permission_t access);
hsa_status_t (*hsa_memory_copy_fn)(void *dst, const void *src, size_t size);
hsa_status_t (*hsa_memory_free_fn) (void *ptr);
hsa_status_t (*hsa_signal_destroy_fn) (hsa_signal_t signal);
/* The HSA memory region from which to allocate kernel arguments. */
hsa_region_t kernarg_region;
+ /* The HSA memory region from which to allocate device data. */
+ hsa_region_t data_region;
+
/* Read-write lock that protects kernels which are running or about to be run
from interference with loading and unloading of images. Needs to be
locked for reading while a kernel is being run, and for writing if the
DLSYM_FN (hsa_executable_freeze)
DLSYM_FN (hsa_signal_create)
DLSYM_FN (hsa_memory_allocate)
+ DLSYM_FN (hsa_memory_assign_agent)
DLSYM_FN (hsa_memory_copy)
DLSYM_FN (hsa_memory_free)
DLSYM_FN (hsa_signal_destroy)
}
/* Callback of hsa_agent_iterate_regions. Determine if a memory REGION can be
- used for kernarg allocations and if so write it to the memory pointed to by
+ used for allocations of KIND and if so write it to the memory pointed to by
DATA and break the query. */
static hsa_status_t
-get_kernarg_memory_region (hsa_region_t region, void *data)
+get_memory_region (hsa_region_t region, hsa_region_t *retval,
+ hsa_region_global_flag_t kind)
{
hsa_status_t status;
hsa_region_segment_t segment;
&flags);
if (status != HSA_STATUS_SUCCESS)
return status;
- if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG)
+ if (flags & kind)
{
- hsa_region_t *ret = (hsa_region_t *) data;
- *ret = region;
+ *retval = region;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
+static hsa_status_t
+get_kernarg_memory_region (hsa_region_t region, void *data)
+{
+ return get_memory_region (region, (hsa_region_t *)data,
+ HSA_REGION_GLOBAL_FLAG_KERNARG);
+}
+
+static hsa_status_t
+get_data_memory_region (hsa_region_t region, void *data)
+{
+ return get_memory_region (region, (hsa_region_t *)data,
+ HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED);
+}
+
/* Part of the libgomp plugin interface. Return the number of HSA devices on
the system. */
GOMP_OFFLOAD_get_property (int device, int prop)
{
struct agent_info *agent = get_agent_info (device);
- hsa_region_t region = agent->kernarg_region;
+ hsa_region_t region = agent->data_region;
union gomp_device_property_value propval = { .val = 0 };
HSA_DEBUG ("Selected kernel arguments memory region:\n");
dump_hsa_region (agent->kernarg_region, NULL);
+ agent->data_region.handle = (uint64_t) -1;
+ status = hsa_fns.hsa_agent_iterate_regions_fn (agent->id,
+ get_data_memory_region,
+ &agent->data_region);
+ if (agent->data_region.handle == (uint64_t) -1)
+ {
+ GOMP_PLUGIN_error ("Could not find suitable memory region for device "
+ "data");
+ return false;
+ }
+ HSA_DEBUG ("Selected device data memory region:\n");
+ dump_hsa_region (agent->data_region, NULL);
+
HSA_DEBUG ("GCN agent %d initialized\n", n);
agent->initialized = true;
if (!module->heap)
{
- status = hsa_fns.hsa_memory_allocate_fn (agent->kernarg_region,
+ status = hsa_fns.hsa_memory_allocate_fn (agent->data_region,
gcn_kernel_heap_size,
(void**)&module->heap);
if (status != HSA_STATUS_SUCCESS)
goto fail;
}
- module->heap->size = gcn_kernel_heap_size;
+ status = hsa_fns.hsa_memory_assign_agent_fn
+ (module->heap, agent->id, HSA_ACCESS_PERMISSION_RW);
+ if (status != HSA_STATUS_SUCCESS)
+ {
+ hsa_error ("Could not assign GCN heap memory to device", status);
+ goto fail;
+ }
+
+ hsa_fns.hsa_memory_copy_fn (&module->heap->size,
+ &gcn_kernel_heap_size,
+ sizeof (gcn_kernel_heap_size));
}
}
size = 4;
void *ptr;
- hsa_status_t status = hsa_fns.hsa_memory_allocate_fn (agent->kernarg_region,
+ hsa_status_t status = hsa_fns.hsa_memory_allocate_fn (agent->data_region,
size, &ptr);
if (status != HSA_STATUS_SUCCESS)
{
return NULL;
}
+ status = hsa_fns.hsa_memory_assign_agent_fn (ptr, agent->id,
+ HSA_ACCESS_PERMISSION_RW);
+ if (status != HSA_STATUS_SUCCESS)
+ {
+ hsa_error ("Could not assign data memory to device", status);
+ return NULL;
+ }
+
struct goacc_thread *thr = GOMP_PLUGIN_goacc_thread ();
bool profiling_dispatch_p
= __builtin_expect (thr != NULL && thr->prof_info != NULL, false);
return true;
}
-/* Returns true if PTR falls within the bounds of any loaded kernel image. */
-
-static bool
-image_address_p (struct agent_info *agent, const void *ptr)
-{
- Elf64_Addr addr = (Elf64_Addr)ptr;
- if (agent->module)
- {
- if (addr >= agent->module->phys_address_start
- && addr <= agent->module->phys_address_end)
- return true;
- }
- return false;
-}
-
struct copy_data
{
void *dst;
const void *src;
size_t len;
- bool use_hsa_memory_copy;
bool using_src_copy;
struct goacc_asyncqueue *aq;
};
HSA_DEBUG ("Async thread %d:%d: Copying %zu bytes from (%p) to (%p)\n",
data->aq->agent->device_id, data->aq->id, data->len, data->src,
data->dst);
- if (data->use_hsa_memory_copy)
- hsa_fns.hsa_memory_copy_fn (data->dst, data->src, data->len);
- else
- memcpy (data->dst, data->src, data->len);
+ hsa_fns.hsa_memory_copy_fn (data->dst, data->src, data->len);
if (data->using_src_copy)
free ((void *) data->src);
free (data);
static void
queue_push_copy (struct goacc_asyncqueue *aq, void *dst, const void *src,
- size_t len, bool use_hsa_memory_copy, bool using_src_copy)
+ size_t len, bool using_src_copy)
{
if (DEBUG_QUEUES)
HSA_DEBUG ("queue_push_copy %d:%d: %zu bytes from (%p) to (%p)\n",
data->dst = dst;
data->src = src;
data->len = len;
- data->use_hsa_memory_copy = use_hsa_memory_copy;
data->using_src_copy = using_src_copy;
data->aq = aq;
queue_push_callback (aq, copy_data, data);
{
HSA_DEBUG ("Copying %zu bytes from device %d (%p) to host (%p)\n", n, device,
src, dst);
-
- /* memcpy only works for addresses allocated with hsa_memory_allocate,
- but hsa_memory_copy seems unable to read from .rodata variables. */
- if (image_address_p (get_agent_info (device), src))
- hsa_fns.hsa_memory_copy_fn (dst, src, n);
- else
- memcpy (dst, src, n);
+ hsa_fns.hsa_memory_copy_fn (dst, src, n);
return true;
}
{
HSA_DEBUG ("Copying %zu bytes from host (%p) to device %d (%p)\n", n, src,
device, dst);
- /* memcpy only works for addresses allocated with hsa_memory_allocate,
- but hsa_memory_copy seems unable to read from .rodata variables. */
- if (image_address_p (get_agent_info (device), dst))
- hsa_fns.hsa_memory_copy_fn (dst, src, n);
- else
- memcpy (dst, src, n);
+ hsa_fns.hsa_memory_copy_fn (dst, src, n);
return true;
}
{
struct agent_info *agent = get_agent_info (device);
maybe_init_omp_async (agent);
- queue_push_copy (agent->omp_async_queue, dst, src, n, false, false);
+ queue_push_copy (agent->omp_async_queue, dst, src, n, false);
return true;
}
HSA_DEBUG ("Copying %zu bytes from device %d (%p) to device %d (%p)\n", n,
device, src, device, dst);
- /* We can assume that dev2dev moves are always within allocated memory. */
- memcpy (dst, src, n);
+ hsa_fns.hsa_memory_copy_fn (dst, src, n);
return true;
}
void **ind_da = GOMP_OFFLOAD_alloc_by_agent (kernel->agent,
sizeof (void*) * mapnum);
for (size_t i = 0; i < mapnum; i++)
- ind_da[i] = devaddrs[i] ? devaddrs[i] : hostaddrs[i];
+ hsa_fns.hsa_memory_copy_fn (&ind_da[i],
+ devaddrs[i] ? &devaddrs[i] : &hostaddrs[i],
+ sizeof (void *));
struct hsa_kernel_description *hsa_kernel_desc = NULL;
for (unsigned i = 0; i < kernel->module->image_desc->kernel_count; i++)
But, that is probably correct. */
void *src_copy = GOMP_PLUGIN_malloc (n);
memcpy (src_copy, src, n);
- queue_push_copy (aq, dst, src_copy, n, image_address_p (agent, dst), true);
+ queue_push_copy (aq, dst, src, n, true);
return true;
}
{
struct agent_info *agent = get_agent_info (device);
assert (agent == aq->agent);
- queue_push_copy (aq, dst, src, n, image_address_p (agent, src), false);
+ queue_push_copy (aq, dst, src, n, false);
return true;
}