]> git.ipfire.org Git - thirdparty/bird.git/commitdiff
Keeping un-unmmappable pages until they can be reused
authorMaria Matejka <mq@ucw.cz>
Thu, 11 Nov 2021 15:25:59 +0000 (16:25 +0100)
committerMaria Matejka <mq@ucw.cz>
Mon, 22 Nov 2021 18:05:44 +0000 (19:05 +0100)
On Linux, munmap() may fail with ENOMEM when virtual memory is too
fragmented. Working this around by just keeping such blocks for future
use.

lib/locking.h
lib/resource.c
sysdep/unix/alloc.c

index 0a69f50f0305221bda134e05b96dd4fdc07549a3..1a8bdcd4562672d0594af18b11289230a8f844a9 100644 (file)
@@ -19,6 +19,7 @@ struct lock_order {
   struct domain_generic *attrs;
   struct domain_generic *cork;
   struct domain_generic *event;
+  struct domain_generic *resource;
 };
 
 extern _Thread_local struct lock_order locking_stack;
index e80b315b2c85e276d64eaa86ce13834a64c095d4..2d041ad5148af439d49639f8551c43d2f805b361 100644 (file)
@@ -60,7 +60,7 @@ static struct resclass pool_class = {
 pool root_pool;
 
 void *alloc_sys_page(void);
-void free_sys_page(void *);
+int free_sys_page(void *);
 
 static int indent;
 
@@ -98,8 +98,10 @@ pool_free(resource *P)
   if (p->pages)
     {
       ASSERT_DIE(!p->pages->used);
-      for (uint i=0; i<p->pages->free; i++)
+
+      for (uint i = 0; i < p->pages->free; i++)
        free_sys_page(p->pages->ptr[i]);
+
       free_sys_page(p->pages);
     }
 }
@@ -476,10 +478,19 @@ free_page(pool *p, void *ptr)
   ASSERT_DIE(p->pages);
   p->pages->used--;
 
-  if (p->pages->free >= POOL_PAGES_MAX)
-    return free_sys_page(ptr);
-  else
-    p->pages->ptr[p->pages->free++] = ptr;
+  ASSERT_DIE(p->pages->free <= POOL_PAGES_MAX);
+
+  if (p->pages->free == POOL_PAGES_MAX)
+  {
+    const unsigned long keep = POOL_PAGES_MAX / 4;
+
+    for (uint i = keep; i < p->pages->free; i++)
+      free_sys_page(p->pages->ptr[i]);
+
+    p->pages->free = keep;
+  }
+
+  p->pages->ptr[p->pages->free++] = ptr;
 }
 
 
index 4c9d5eb5d14512ac70da0089a66bc9973e4a5fad..4ae1a9db7272ff62e131445735f4047f158c62f4 100644 (file)
@@ -11,6 +11,8 @@
 
 #include <stdlib.h>
 #include <unistd.h>
+#include <stdatomic.h>
+#include <errno.h>
 
 #ifdef HAVE_MMAP
 #include <sys/mman.h>
 long page_size = 0;
 _Bool alloc_multipage = 0;
 
+static _Atomic int global_page_list_not_empty;
+static list global_page_list;
+static _Atomic int global_page_spinlock;
+
+#define        GLOBAL_PAGE_SPIN_LOCK   for (int v = 0; !atomic_compare_exchange_weak_explicit(&global_page_spinlock, &v, 1, memory_order_acq_rel, memory_order_acquire); v = 0)
+#define GLOBAL_PAGE_SPIN_UNLOCK        do { int v = 1; ASSERT_DIE(atomic_compare_exchange_strong_explicit(&global_page_spinlock, &v, 0, memory_order_acq_rel, memory_order_acquire)); } while (0)
+
 #ifdef HAVE_MMAP
 static _Bool use_fake = 0;
 #else
@@ -28,12 +37,14 @@ static _Bool use_fake = 1;
 void resource_sys_init(void)
 {
 #ifdef HAVE_MMAP
+  init_list(&global_page_list);
+
   if (!(page_size = sysconf(_SC_PAGESIZE)))
     die("System page size must be non-zero");
 
   if ((u64_popcount(page_size) > 1) || (page_size > 16384))
-  {
 #endif
+  {
     /* Too big or strange page, use the aligned allocator instead */
     page_size = 4096;
     use_fake = 1;
@@ -46,6 +57,22 @@ alloc_sys_page(void)
 #ifdef HAVE_MMAP
   if (!use_fake)
   {
+    if (atomic_load_explicit(&global_page_list_not_empty, memory_order_relaxed))
+    {
+      GLOBAL_PAGE_SPIN_LOCK;
+      if (!EMPTY_LIST(global_page_list))
+      {
+       node *ret = HEAD(global_page_list);
+       rem_node(ret);
+       if (EMPTY_LIST(global_page_list))
+         atomic_store_explicit(&global_page_list_not_empty, 0, memory_order_relaxed);
+       GLOBAL_PAGE_SPIN_UNLOCK;
+       memset(ret, 0, sizeof(node));
+       return (void *) ret;
+      }
+      GLOBAL_PAGE_SPIN_UNLOCK;
+    }
+
     if (alloc_multipage)
     {
       void *big = mmap(NULL, page_size * 2, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -90,7 +117,19 @@ free_sys_page(void *ptr)
   if (!use_fake)
   {
     if (munmap(ptr, page_size) < 0)
-      bug("munmap(%p) failed: %m", ptr);
+#ifdef ENOMEM
+      if (errno == ENOMEM)
+      {
+       memset(ptr, 0, page_size);
+
+       GLOBAL_PAGE_SPIN_LOCK;
+       add_tail(&global_page_list, (node *) ptr);
+       atomic_store_explicit(&global_page_list_not_empty, 1, memory_order_relaxed);
+       GLOBAL_PAGE_SPIN_UNLOCK;
+      }
+      else
+#endif
+       bug("munmap(%p) failed: %m", ptr);
   }
   else
 #endif