]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - libgomp/allocator.c
Daily bump.
[thirdparty/gcc.git] / libgomp / allocator.c
index 90f2dcb60d64b97d68a8280d58cc82587ba5370a..cdedc7d80e9e0aa040a26dfc1b23dbd1375239a2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2020-2023 Free Software Foundation, Inc.
+/* Copyright (C) 2020-2024 Free Software Foundation, Inc.
    Contributed by Jakub Jelinek <jakub@redhat.com>.
 
    This file is part of the GNU Offloading and Multi Processing Library
 #include <dlfcn.h>
 #endif
 
+/* Keeping track whether a Fortran scalar allocatable/pointer has been
+   allocated via 'omp allocators'/'omp allocate'.  */
+
+struct fort_alloc_splay_tree_key_s {
+  void *ptr;
+};
+
+typedef struct fort_alloc_splay_tree_node_s *fort_alloc_splay_tree_node;
+typedef struct fort_alloc_splay_tree_s *fort_alloc_splay_tree;
+typedef struct fort_alloc_splay_tree_key_s *fort_alloc_splay_tree_key;
+
+static inline int
+fort_alloc_splay_compare (fort_alloc_splay_tree_key x, fort_alloc_splay_tree_key y)
+{
+  if (x->ptr < y->ptr)
+    return -1;
+  if (x->ptr > y->ptr)
+    return 1;
+  return 0;
+}
+#define splay_tree_prefix fort_alloc
+#define splay_tree_static
+#include "splay-tree.h"
+
+#define splay_tree_prefix fort_alloc
+#define splay_tree_static
+#define splay_tree_c
+#include "splay-tree.h"
+
+static struct fort_alloc_splay_tree_s fort_alloc_scalars;
+
+/* Add pointer as being alloced by GOMP_alloc.  */
+void
+GOMP_add_alloc (void *ptr)
+{
+  if (ptr == NULL)
+    return;
+  fort_alloc_splay_tree_node item;
+  item = gomp_malloc (sizeof (struct splay_tree_node_s));
+  item->key.ptr = ptr;
+  item->left = NULL;
+  item->right = NULL;
+  fort_alloc_splay_tree_insert (&fort_alloc_scalars, item);
+}
+
+/* Remove pointer, either called by FREE or by REALLOC,
+   either of them can change the allocation status.  */
+bool
+GOMP_is_alloc (void *ptr)
+{
+  struct fort_alloc_splay_tree_key_s needle;
+  fort_alloc_splay_tree_node n;
+  needle.ptr = ptr;
+  n = fort_alloc_splay_tree_lookup_node (&fort_alloc_scalars, &needle);
+  if (n)
+    {
+      fort_alloc_splay_tree_remove (&fort_alloc_scalars, &n->key);
+      free (n);
+    }
+  return n != NULL;
+}
+
+
 #define omp_max_predefined_alloc omp_thread_mem_alloc
 
+/* These macros may be overridden in config/<target>/allocator.c.
+   The defaults (no override) are to return NULL for pinned memory requests
+   and pass through to the regular OS calls otherwise.
+   The following definitions (ab)use comma operators to avoid unused
+   variable errors.  */
+#ifndef MEMSPACE_ALLOC
+#define MEMSPACE_ALLOC(MEMSPACE, SIZE, PIN) \
+  (PIN ? NULL : malloc (((void)(MEMSPACE), (SIZE))))
+#endif
+#ifndef MEMSPACE_CALLOC
+#define MEMSPACE_CALLOC(MEMSPACE, SIZE, PIN) \
+  (PIN ? NULL : calloc (1, (((void)(MEMSPACE), (SIZE)))))
+#endif
+#ifndef MEMSPACE_REALLOC
+#define MEMSPACE_REALLOC(MEMSPACE, ADDR, OLDSIZE, SIZE, OLDPIN, PIN) \
+   ((PIN) || (OLDPIN) ? NULL \
+   : realloc (ADDR, (((void)(MEMSPACE), (void)(OLDSIZE), (SIZE)))))
+#endif
+#ifndef MEMSPACE_FREE
+#define MEMSPACE_FREE(MEMSPACE, ADDR, SIZE, PIN) \
+  if (PIN) free (((void)(MEMSPACE), (void)(SIZE), (ADDR)))
+#endif
+#ifndef MEMSPACE_VALIDATE
+#define MEMSPACE_VALIDATE(MEMSPACE, ACCESS, PIN) \
+  (PIN ? 0 : ((void)(MEMSPACE), (void)(ACCESS), 1))
+#endif
+
+/* Map the predefined allocators to the correct memory space.
+   The index to this table is the omp_allocator_handle_t enum value.
+   When the user calls omp_alloc with a predefined allocator this
+   table determines what memory they get.  */
+static const omp_memspace_handle_t predefined_alloc_mapping[] = {
+  omp_default_mem_space,   /* omp_null_allocator doesn't actually use this. */
+  omp_default_mem_space,   /* omp_default_mem_alloc. */
+  omp_large_cap_mem_space, /* omp_large_cap_mem_alloc. */
+  omp_const_mem_space,     /* omp_const_mem_alloc. */
+  omp_high_bw_mem_space,   /* omp_high_bw_mem_alloc. */
+  omp_low_lat_mem_space,   /* omp_low_lat_mem_alloc. */
+  omp_low_lat_mem_space,   /* omp_cgroup_mem_alloc (implementation defined). */
+  omp_low_lat_mem_space,   /* omp_pteam_mem_alloc (implementation defined). */
+  omp_low_lat_mem_space,   /* omp_thread_mem_alloc (implementation defined). */
+};
+
+#define ARRAY_SIZE(A) (sizeof (A) / sizeof ((A)[0]))
+_Static_assert (ARRAY_SIZE (predefined_alloc_mapping)
+               == omp_max_predefined_alloc + 1,
+               "predefined_alloc_mapping must match omp_memspace_handle_t");
+
 enum gomp_numa_memkind_kind
 {
   GOMP_MEMKIND_NONE = 0,
@@ -118,6 +229,17 @@ gomp_init_libnuma (void)
        dlclose (handle);
       return;
     }
+  if (handle)
+    {
+      int (*numa_available) (void);
+      numa_available
+       = (__typeof (numa_available)) dlsym (handle, "numa_available");
+      if (!numa_available || numa_available () != 0)
+       {
+         dlclose (handle);
+         handle = NULL;
+       }
+    }
   if (!handle)
     {
       __atomic_store_n (&libnuma_data, data, MEMMODEL_RELEASE);
@@ -383,8 +505,8 @@ omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
     }
 #endif
 
-  /* No support for this so far.  */
-  if (data.pinned)
+  /* Reject unsupported memory spaces.  */
+  if (!MEMSPACE_VALIDATE (data.memspace, data.access, data.pinned))
     return omp_null_allocator;
 
   ret = gomp_malloc (sizeof (struct omp_allocator_data));
@@ -470,6 +592,10 @@ retry:
     new_size += new_alignment - sizeof (void *);
   if (__builtin_add_overflow (size, new_size, &new_size))
     goto fail;
+#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
+  if (allocator == omp_low_lat_mem_alloc)
+    goto fail;
+#endif
 
   if (__builtin_expect (allocator_data
                        && allocator_data->pool_size < ~(uintptr_t) 0, 0))
@@ -522,7 +648,8 @@ retry:
        }
       else
 #endif
-       ptr = malloc (new_size);
+       ptr = MEMSPACE_ALLOC (allocator_data->memspace, new_size,
+                             allocator_data->pinned);
       if (ptr == NULL)
        {
 #ifdef HAVE_SYNC_BUILTINS
@@ -554,7 +681,14 @@ retry:
        }
       else
 #endif
-       ptr = malloc (new_size);
+       {
+         omp_memspace_handle_t memspace;
+         memspace = (allocator_data
+                     ? allocator_data->memspace
+                     : predefined_alloc_mapping[allocator]);
+         ptr = MEMSPACE_ALLOC (memspace, new_size,
+                               allocator_data && allocator_data->pinned);
+       }
       if (ptr == NULL)
        goto fail;
     }
@@ -571,36 +705,26 @@ retry:
   ((struct omp_mem_header *) ret)[-1].allocator = allocator;
   return ret;
 
-fail:
-  if (allocator_data)
+fail:;
+  int fallback = (allocator_data
+                 ? allocator_data->fallback
+                 : allocator == omp_default_mem_alloc
+                 ? omp_atv_null_fb
+                 : omp_atv_default_mem_fb);
+  switch (fallback)
     {
-      switch (allocator_data->fallback)
-       {
-       case omp_atv_default_mem_fb:
-         if ((new_alignment > sizeof (void *) && new_alignment > alignment)
-#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
-             || memkind
-#endif
-             || (allocator_data
-                 && allocator_data->pool_size < ~(uintptr_t) 0))
-           {
-             allocator = omp_default_mem_alloc;
-             goto retry;
-           }
-         /* Otherwise, we've already performed default mem allocation
-            and if that failed, it won't succeed again (unless it was
-            intermittent.  Return NULL then, as that is the fallback.  */
-         break;
-       case omp_atv_null_fb:
-         break;
-       default:
-       case omp_atv_abort_fb:
-         gomp_fatal ("Out of memory allocating %lu bytes",
-                     (unsigned long) size);
-       case omp_atv_allocator_fb:
-         allocator = allocator_data->fb_data;
-         goto retry;
-       }
+    case omp_atv_default_mem_fb:
+      allocator = omp_default_mem_alloc;
+      goto retry;
+    case omp_atv_null_fb:
+      break;
+    default:
+    case omp_atv_abort_fb:
+      gomp_fatal ("Out of memory allocating %lu bytes",
+                 (unsigned long) size);
+    case omp_atv_allocator_fb:
+      allocator = allocator_data->fb_data;
+      goto retry;
     }
   return NULL;
 }
@@ -633,6 +757,8 @@ void
 omp_free (void *ptr, omp_allocator_handle_t allocator)
 {
   struct omp_mem_header *data;
+  omp_memspace_handle_t memspace = omp_default_mem_space;
+  int pinned = false;
 
   if (ptr == NULL)
     return;
@@ -672,10 +798,13 @@ omp_free (void *ptr, omp_allocator_handle_t allocator)
          return;
        }
 #endif
+
+      memspace = allocator_data->memspace;
+      pinned = allocator_data->pinned;
     }
-#ifdef LIBGOMP_USE_MEMKIND
   else
     {
+#ifdef LIBGOMP_USE_MEMKIND
       enum gomp_numa_memkind_kind memkind = GOMP_MEMKIND_NONE;
       if (data->allocator == omp_high_bw_mem_alloc)
        memkind = GOMP_MEMKIND_HBW_PREFERRED;
@@ -691,9 +820,12 @@ omp_free (void *ptr, omp_allocator_handle_t allocator)
              return;
            }
        }
-    }
 #endif
-  free (data->ptr);
+
+      memspace = predefined_alloc_mapping[data->allocator];
+    }
+
+  MEMSPACE_FREE (memspace, data->ptr, data->size, pinned);
 }
 
 ialias (omp_free)
@@ -766,6 +898,10 @@ retry:
     goto fail;
   if (__builtin_add_overflow (size_temp, new_size, &new_size))
     goto fail;
+#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
+  if (allocator == omp_low_lat_mem_alloc)
+    goto fail;
+#endif
 
   if (__builtin_expect (allocator_data
                        && allocator_data->pool_size < ~(uintptr_t) 0, 0))
@@ -820,7 +956,8 @@ retry:
        }
       else
 #endif
-       ptr = calloc (1, new_size);
+       ptr = MEMSPACE_CALLOC (allocator_data->memspace, new_size,
+                              allocator_data->pinned);
       if (ptr == NULL)
        {
 #ifdef HAVE_SYNC_BUILTINS
@@ -854,7 +991,14 @@ retry:
        }
       else
 #endif
-       ptr = calloc (1, new_size);
+       {
+         omp_memspace_handle_t memspace;
+         memspace = (allocator_data
+                     ? allocator_data->memspace
+                     : predefined_alloc_mapping[allocator]);
+         ptr = MEMSPACE_CALLOC (memspace, new_size,
+                                allocator_data && allocator_data->pinned);
+       }
       if (ptr == NULL)
        goto fail;
     }
@@ -871,36 +1015,26 @@ retry:
   ((struct omp_mem_header *) ret)[-1].allocator = allocator;
   return ret;
 
-fail:
-  if (allocator_data)
+fail:;
+  int fallback = (allocator_data
+                 ? allocator_data->fallback
+                 : allocator == omp_default_mem_alloc
+                 ? omp_atv_null_fb
+                 : omp_atv_default_mem_fb);
+  switch (fallback)
     {
-      switch (allocator_data->fallback)
-       {
-       case omp_atv_default_mem_fb:
-         if ((new_alignment > sizeof (void *) && new_alignment > alignment)
-#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
-             || memkind
-#endif
-             || (allocator_data
-                 && allocator_data->pool_size < ~(uintptr_t) 0))
-           {
-             allocator = omp_default_mem_alloc;
-             goto retry;
-           }
-         /* Otherwise, we've already performed default mem allocation
-            and if that failed, it won't succeed again (unless it was
-            intermittent.  Return NULL then, as that is the fallback.  */
-         break;
-       case omp_atv_null_fb:
-         break;
-       default:
-       case omp_atv_abort_fb:
-         gomp_fatal ("Out of memory allocating %lu bytes",
-                     (unsigned long) (size * nmemb));
-       case omp_atv_allocator_fb:
-         allocator = allocator_data->fb_data;
-         goto retry;
-       }
+    case omp_atv_default_mem_fb:
+      allocator = omp_default_mem_alloc;
+      goto retry;
+    case omp_atv_null_fb:
+      break;
+    default:
+    case omp_atv_abort_fb:
+      gomp_fatal ("Out of memory allocating %lu bytes",
+                 (unsigned long) (size * nmemb));
+    case omp_atv_allocator_fb:
+      allocator = allocator_data->fb_data;
+      goto retry;
     }
   return NULL;
 }
@@ -1004,6 +1138,10 @@ retry:
   if (__builtin_add_overflow (size, new_size, &new_size))
     goto fail;
   old_size = data->size;
+#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
+  if (allocator == omp_low_lat_mem_alloc)
+    goto fail;
+#endif
 
   if (__builtin_expect (allocator_data
                        && allocator_data->pool_size < ~(uintptr_t) 0, 0))
@@ -1090,9 +1228,14 @@ retry:
       else
 #endif
       if (prev_size)
-       new_ptr = realloc (data->ptr, new_size);
+       new_ptr = MEMSPACE_REALLOC (allocator_data->memspace, data->ptr,
+                                   data->size, new_size,
+                                   (free_allocator_data
+                                    && free_allocator_data->pinned),
+                                   allocator_data->pinned);
       else
-       new_ptr = malloc (new_size);
+       new_ptr = MEMSPACE_ALLOC (allocator_data->memspace, new_size,
+                                 allocator_data->pinned);
       if (new_ptr == NULL)
        {
 #ifdef HAVE_SYNC_BUILTINS
@@ -1140,9 +1283,19 @@ retry:
        }
       else
 #endif
-       new_ptr = realloc (data->ptr, new_size);
+       {
+         omp_memspace_handle_t memspace;
+         memspace = (allocator_data
+                     ? allocator_data->memspace
+                     : predefined_alloc_mapping[allocator]);
+         new_ptr = MEMSPACE_REALLOC (memspace, data->ptr, data->size, new_size,
+                                     (free_allocator_data
+                                      && free_allocator_data->pinned),
+                                     allocator_data && allocator_data->pinned);
+       }
       if (new_ptr == NULL)
        goto fail;
+
       ret = (char *) new_ptr + sizeof (struct omp_mem_header);
       ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
       ((struct omp_mem_header *) ret)[-1].size = new_size;
@@ -1167,7 +1320,14 @@ retry:
        }
       else
 #endif
-       new_ptr = malloc (new_size);
+       {
+         omp_memspace_handle_t memspace;
+         memspace = (allocator_data
+                     ? allocator_data->memspace
+                     : predefined_alloc_mapping[allocator]);
+         new_ptr = MEMSPACE_ALLOC (memspace, new_size,
+                                   allocator_data && allocator_data->pinned);
+       }
       if (new_ptr == NULL)
        goto fail;
     }
@@ -1216,39 +1376,36 @@ retry:
       return ret;
     }
 #endif
-  free (data->ptr);
+  {
+    omp_memspace_handle_t was_memspace;
+    was_memspace = (free_allocator_data
+                   ? free_allocator_data->memspace
+                   : predefined_alloc_mapping[free_allocator]);
+    int was_pinned = (free_allocator_data && free_allocator_data->pinned);
+    MEMSPACE_FREE (was_memspace, data->ptr, data->size, was_pinned);
+  }
   return ret;
 
-fail:
-  if (allocator_data)
+fail:;
+  int fallback = (allocator_data
+                 ? allocator_data->fallback
+                 : allocator == omp_default_mem_alloc
+                 ? omp_atv_null_fb
+                 : omp_atv_default_mem_fb);
+  switch (fallback)
     {
-      switch (allocator_data->fallback)
-       {
-       case omp_atv_default_mem_fb:
-         if (new_alignment > sizeof (void *)
-#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
-             || memkind
-#endif
-             || (allocator_data
-                 && allocator_data->pool_size < ~(uintptr_t) 0))
-           {
-             allocator = omp_default_mem_alloc;
-             goto retry;
-           }
-         /* Otherwise, we've already performed default mem allocation
-            and if that failed, it won't succeed again (unless it was
-            intermittent.  Return NULL then, as that is the fallback.  */
-         break;
-       case omp_atv_null_fb:
-         break;
-       default:
-       case omp_atv_abort_fb:
-         gomp_fatal ("Out of memory allocating %lu bytes",
-                     (unsigned long) size);
-       case omp_atv_allocator_fb:
-         allocator = allocator_data->fb_data;
-         goto retry;
-       }
+    case omp_atv_default_mem_fb:
+      allocator = omp_default_mem_alloc;
+      goto retry;
+    case omp_atv_null_fb:
+      break;
+    default:
+    case omp_atv_abort_fb:
+      gomp_fatal ("Out of memory allocating %lu bytes",
+                 (unsigned long) size);
+    case omp_atv_allocator_fb:
+      allocator = allocator_data->fb_data;
+      goto retry;
     }
   return NULL;
 }