]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: memory: make pool_gc() run under thread isolation
authorWilly Tarreau <w@1wt.eu>
Fri, 24 Apr 2020 04:15:24 +0000 (06:15 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 24 Apr 2020 04:25:25 +0000 (06:25 +0200)
pool_gc() causes quite some stress on the memory allocator because
it calls a lot of free() calls while other threads are using malloc().
In addition, pool_gc() needs to take care of possible locking because
it may be called from pool allocators. One way to avoid all this is to
use thread_isolate() to make sure the gc runs alone. By putting less
pressure on the pools and getting rid of the locks, it may even take
less time to complete.

src/memory.c

index 4a37381e045f82e0c3c2a60fbbebb9256420fb69..551ca83ea09e8e22fae9587addb6abac05c634e9 100644 (file)
@@ -248,22 +248,18 @@ void pool_flush(struct pool_head *pool)
 
 /*
  * This function frees whatever can be freed in all pools, but respecting
- * the minimum thresholds imposed by owners. It takes care of avoiding
- * recursion because it may be called from a signal handler.
- *
- * <pool_ctx> is unused
+ * the minimum thresholds imposed by owners. It makes sure to be alone to
+ * run by using thread_isolate(). <pool_ctx> is unused.
  */
 void pool_gc(struct pool_head *pool_ctx)
 {
-       static int recurse;
-       int cur_recurse = 0;
        struct pool_head *entry;
+       int isolated = thread_isolated();
 
-       if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
-               return;
+       if (!isolated)
+               thread_isolate();
 
        list_for_each_entry(entry, &pools, list) {
-               HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
                while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
                        struct pool_free_list cmp, new;
 
@@ -280,10 +276,10 @@ void pool_gc(struct pool_head *pool_ctx)
                        free(cmp.free_list);
                        _HA_ATOMIC_SUB(&entry->allocated, 1);
                }
-               HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
        }
 
-       _HA_ATOMIC_STORE(&recurse, 0);
+       if (!isolated)
+               thread_release();
 }
 
 /* frees an object to the local cache, possibly pushing oldest objects to the
@@ -412,43 +408,31 @@ void pool_flush(struct pool_head *pool)
 
 /*
  * This function frees whatever can be freed in all pools, but respecting
- * the minimum thresholds imposed by owners. It takes care of avoiding
- * recursion because it may be called from a signal handler.
- *
- * <pool_ctx> is used when pool_gc is called to release resources to allocate
- * an element in __pool_refill_alloc. It is important because <pool_ctx> is
- * already locked, so we need to skip the lock here.
+ * the minimum thresholds imposed by owners. It makes sure to be alone to
+ * run by using thread_isolate(). <pool_ctx> is unused.
  */
 void pool_gc(struct pool_head *pool_ctx)
 {
-       static int recurse;
-       int cur_recurse = 0;
        struct pool_head *entry;
+       int isolated = thread_isolated();
 
-       if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
-               return;
+       if (!isolated)
+               thread_isolate();
 
        list_for_each_entry(entry, &pools, list) {
                void *temp;
                //qfprintf(stderr, "Flushing pool %s\n", entry->name);
-               if (entry != pool_ctx)
-                       HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
                while (entry->free_list &&
                       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
                        temp = entry->free_list;
                        entry->free_list = *POOL_LINK(entry, temp);
                        entry->allocated--;
-                       if (entry != pool_ctx)
-                               HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
                        pool_free_area(temp, entry->size + POOL_EXTRA);
-                       if (entry != pool_ctx)
-                               HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
                }
-               if (entry != pool_ctx)
-                       HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
        }
 
-       _HA_ATOMIC_STORE(&recurse, 0);
+       if (!isolated)
+               thread_release();
 }
 #endif