]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: memory: move pool-specific path of the locked pool_free() to __pool_free()
authorWilly Tarreau <w@1wt.eu>
Mon, 1 Jun 2020 16:35:24 +0000 (18:35 +0200)
committerWilly Tarreau <w@1wt.eu>
Thu, 11 Jun 2020 08:18:56 +0000 (10:18 +0200)
pool_free() was not identical between locked and lockless pools. The
different was the call to __pool_free() in one case versus open-coded
accesses in the other, and the poisoning brought by commit da52035a45
("MINOR: memory: also poison the area on freeing") which unfortunately
did if only for the lockless path.

Let's now have __pool_free() to work on the global pool also in the
locked case so that the code is architected similarly.

include/common/memory.h

index e19cd1bbc21f7ba1e6add5838538ae16c5643c07..2aaaaa019b940208f265427e129d42ca97ac0e38 100644 (file)
@@ -372,6 +372,35 @@ static inline void *pool_alloc(struct pool_head *pool)
        return p;
 }
 
+/* unconditionally stores the object as-is into the global pool. The object
+ * must not be NULL. Use pool_free() instead.
+ */
+static inline void __pool_free(struct pool_head *pool, void *ptr)
+{
+#ifndef DEBUG_UAF /* normal pool behaviour */
+       HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
+       pool->used--;
+       if (pool_is_crowded(pool)) {
+               pool_free_area(ptr, pool->size + POOL_EXTRA);
+               pool->allocated--;
+       } else {
+               *POOL_LINK(pool, ptr) = (void *)pool->free_list;
+               pool->free_list = (void *)ptr;
+       }
+       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+       HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+#else  /* release the entry for real to detect use after free */
+       /* ensure we crash on double free or free of a const area*/
+       *(uint32_t *)ptr = 0xDEADADD4;
+       pool_free_area(ptr, pool->size + POOL_EXTRA);
+       HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
+       pool->allocated--;
+       pool->used--;
+       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+       HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+#endif /* DEBUG_UAF */
+}
+
 /*
  * Puts a memory area back to the corresponding pool.
  * Items are chained directly through a pointer that
@@ -389,29 +418,10 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
                if (*POOL_LINK(pool, ptr) != (void *)pool)
                        *DISGUISE((volatile int *)0) = 0;
 #endif
+               if (mem_poison_byte >= 0)
+                       memset(ptr, mem_poison_byte, pool->size);
 
-#ifndef DEBUG_UAF /* normal pool behaviour */
-               HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
-               pool->used--;
-               if (pool_is_crowded(pool)) {
-                       pool_free_area(ptr, pool->size + POOL_EXTRA);
-                       pool->allocated--;
-               } else {
-                       *POOL_LINK(pool, ptr) = (void *)pool->free_list;
-                       pool->free_list = (void *)ptr;
-               }
-               swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-               HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
-#else  /* release the entry for real to detect use after free */
-               /* ensure we crash on double free or free of a const area*/
-               *(uint32_t *)ptr = 0xDEADADD4;
-               pool_free_area(ptr, pool->size + POOL_EXTRA);
-               HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
-               pool->allocated--;
-               pool->used--;
-               swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-               HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
-#endif /* DEBUG_UAF */
+               __pool_free(pool, ptr);
        }
 }
 #endif /* CONFIG_HAP_LOCKLESS_POOLS */