]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: memory: Use the new _HA_ATOMIC_* macros.
authorOlivier Houchard <ohouchard@haproxy.com>
Fri, 8 Mar 2019 17:53:35 +0000 (18:53 +0100)
committerOlivier Houchard <cognet@ci0.org>
Mon, 11 Mar 2019 16:02:38 +0000 (17:02 +0100)
Use the new _HA_ATOMIC_* macros and add barriers where needed.

include/common/memory.h
src/memory.c

index 9c54422a2f61833492f818173e78a343dd7aa3e2..43ab8e90fefedcf5480192d84e9fe7c1f22cf6ae 100644 (file)
@@ -229,8 +229,9 @@ static inline void *__pool_get_first(struct pool_head *pool)
                __ha_barrier_load();
                new.free_list = *POOL_LINK(pool, cmp.free_list);
        } while (__ha_cas_dw((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
+       __ha_barrier_atomic_store();
 
-       HA_ATOMIC_ADD(&pool->used, 1);
+       _HA_ATOMIC_ADD(&pool->used, 1);
 #ifdef DEBUG_MEMORY_POOLS
        /* keep track of where the element was allocated from */
        *POOL_LINK(pool, cmp.free_list) = (void *)pool;
@@ -288,8 +289,9 @@ static inline void __pool_free(struct pool_head *pool, void *ptr)
        do {
                *POOL_LINK(pool, ptr) = (void *)free_list;
                __ha_barrier_store();
-       } while (!HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
-       HA_ATOMIC_SUB(&pool->used, 1);
+       } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
+       __ha_barrier_atomic_store();
+       _HA_ATOMIC_SUB(&pool->used, 1);
 }
 
 /* frees an object to the local cache, possibly pushing oldest objects to the
index b200c380482cd2de5663e533b7d5b0b5f0848b72..ef7ec933d5923328a434c15949c7cd05abed0800 100644 (file)
@@ -159,13 +159,13 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
 
        while (1) {
                if (limit && allocated >= limit) {
-                       HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
+                       _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
                        return NULL;
                }
 
                ptr = malloc(size + POOL_EXTRA);
                if (!ptr) {
-                       HA_ATOMIC_ADD(&pool->failed, 1);
+                       _HA_ATOMIC_ADD(&pool->failed, 1);
                        if (failed)
                                return NULL;
                        failed++;
@@ -179,11 +179,12 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
                do {
                        *POOL_LINK(pool, ptr) = free_list;
                        __ha_barrier_store();
-               } while (HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
+               } while (_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
        }
+       __ha_barrier_atomic_store();
 
-       HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
-       HA_ATOMIC_ADD(&pool->used, 1);
+       _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
+       _HA_ATOMIC_ADD(&pool->used, 1);
 
 #ifdef DEBUG_MEMORY_POOLS
        /* keep track of where the element was allocated from */
@@ -210,7 +211,8 @@ void pool_flush(struct pool_head *pool)
                return;
        do {
                next = pool->free_list;
-       } while (!HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
+       } while (!_HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
+       __ha_barrier_atomic_store();
        while (next) {
                temp = next;
                next = *POOL_LINK(pool, temp);
@@ -218,7 +220,7 @@ void pool_flush(struct pool_head *pool)
                free(temp);
        }
        pool->free_list = next;
-       HA_ATOMIC_SUB(&pool->allocated, removed);
+       _HA_ATOMIC_SUB(&pool->allocated, removed);
        /* here, we should have pool->allocate == pool->used */
 }
 
@@ -235,7 +237,7 @@ void pool_gc(struct pool_head *pool_ctx)
        int cur_recurse = 0;
        struct pool_head *entry;
 
-       if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
+       if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
                return;
 
        list_for_each_entry(entry, &pools, list) {
@@ -253,11 +255,11 @@ void pool_gc(struct pool_head *pool_ctx)
                        if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
                                continue;
                        free(cmp.free_list);
-                       HA_ATOMIC_SUB(&entry->allocated, 1);
+                       _HA_ATOMIC_SUB(&entry->allocated, 1);
                }
        }
 
-       HA_ATOMIC_STORE(&recurse, 0);
+       _HA_ATOMIC_STORE(&recurse, 0);
 }
 
 /* frees an object to the local cache, possibly pushing oldest objects to the
@@ -386,7 +388,7 @@ void pool_gc(struct pool_head *pool_ctx)
        int cur_recurse = 0;
        struct pool_head *entry;
 
-       if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
+       if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
                return;
 
        list_for_each_entry(entry, &pools, list) {
@@ -407,7 +409,7 @@ void pool_gc(struct pool_head *pool_ctx)
                        HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
        }
 
-       HA_ATOMIC_STORE(&recurse, 0);
+       _HA_ATOMIC_STORE(&recurse, 0);
 }
 #endif