#define GOMP_DEVICE_NOT_HOST 4
#define GOMP_DEVICE_NVIDIA_PTX 5
#define GOMP_DEVICE_INTEL_MIC 6
-#define GOMP_DEVICE_HSA 7
+/* #define GOMP_DEVICE_HSA 7 removed. */
#define GOMP_DEVICE_GCN 8
/* We have a compatibility issue. OpenMP 5.2 introduced
} ]
}
+# Return 1 if using a GCN offload device.
+proc check_effective_target_offload_device_gcn { } {
+ return [check_runtime_nocache offload_device_gcn {
+ #include <omp.h>
+ #include "testsuite/libgomp.c-c++-common/on_device_arch.h"
+ int main ()
+ {
+ return !on_device_arch_gcn ();
+ }
+ } ]
+}
+
# Return 1 if at least one Nvidia GPU is accessible.
proc check_effective_target_openacc_nvidia_accel_present { } {
return GOMP_DEVICE_NVIDIA_PTX;
}
+/* static */ int
+device_arch_gcn (void)
+{
+ return GOMP_DEVICE_GCN;
+}
+
/* static */ int
device_arch_intel_mic (void)
{
}
#pragma omp declare variant (device_arch_nvptx) match(construct={target},device={arch(nvptx)})
+#pragma omp declare variant (device_arch_gcn) match(construct={target},device={arch(gcn)})
#pragma omp declare variant (device_arch_intel_mic) match(construct={target},device={arch(intel_mic)})
/* static */ int
device_arch (void)
return on_device_arch (GOMP_DEVICE_NVIDIA_PTX);
}
+int
+on_device_arch_gcn ()
+{
+ return on_device_arch (GOMP_DEVICE_GCN);
+}
+
int
on_device_arch_intel_mic ()
{
--- /dev/null
+/* { dg-additional-options "-flto" } */
+/* { dg-additional-options "-foffload-options=nvptx-none=-misa=sm_35" { target { offload_target_nvptx } } } */
+/* { dg-additional-sources requires-4-aux.c } */
+
+/* Same as requires-4.c, but uses heap memory for 'a'. */
+
+/* Check no diagnostic by device-compiler's or host compiler's lto1.
+ Other file uses: 'requires reverse_offload', but that's inactive as
+ there are no declare target directives, device constructs nor device routines */
+
+/* Depending on offload device capabilities, it may print something like the
+ following (only) if GOMP_DEBUG=1:
+ "devices present but 'omp requires unified_address, unified_shared_memory, reverse_offload' cannot be fulfilled"
+ and in that case does host-fallback execution.
+
+ No offload devices support USM at present, so we may verify host-fallback
+ execution by presence of separate memory spaces. */
+
+#pragma omp requires unified_address,unified_shared_memory
+
+int *a;
+extern void foo (void);
+
+int
+main (void)
+{
+ a = (int *) __builtin_calloc (sizeof (int), 10);
+ #pragma omp target map(to: a)
+ for (int i = 0; i < 10; i++)
+ a[i] = i;
+
+ for (int i = 0; i < 10; i++)
+ if (a[i] != i)
+ __builtin_abort ();
+
+ foo ();
+ __builtin_free (a);
+ return 0;
+}