/* ignored without shared pools */
}
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
+static inline void pool_put_to_shared_cache(struct pool_head *pool, void *item)
{
- pool_free_nocache(pool, ptr);
+ /* ignored without shared pools */
}
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
+void pool_put_to_shared_cache(struct pool_head *pool, void *item);
/* returns true if the pool is considered to have too many free objects */
static inline int pool_is_crowded(const struct pool_head *pool)
(int)(pool->allocated - pool->used) >= pool->minavail;
}
-/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
- * Both the pool and the pointer must be valid. Use pool_free() for normal
- * operations.
- */
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
-{
- void **free_list;
-
- if (unlikely(pool_is_crowded(pool))) {
- pool_free_nocache(pool, ptr);
- return;
- }
-
- _HA_ATOMIC_DEC(&pool->used);
- free_list = _HA_ATOMIC_LOAD(&pool->free_list);
- do {
- while (unlikely(free_list == POOL_BUSY)) {
- __ha_cpu_relax();
- free_list = _HA_ATOMIC_LOAD(&pool->free_list);
- }
- _HA_ATOMIC_STORE((void **)ptr, (void *)free_list);
- __ha_barrier_atomic_store();
- } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
- __ha_barrier_atomic_store();
- swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-}
#endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
pool_cache_count--;
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
- pool_put_to_shared_cache(pool, item);
+
+ if (unlikely(pool_is_crowded(pool)))
+ pool_free_nocache(pool, item);
+ else
+ pool_put_to_shared_cache(pool, item);
}
}
ph->count--;
pool_cache_count--;
pool_cache_bytes -= pool->size;
- pool_put_to_shared_cache(pool, item);
+ if (unlikely(pool_is_crowded(pool)))
+ pool_free_nocache(pool, item);
+ else
+ pool_put_to_shared_cache(pool, item);
} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
}
pool_cache_bytes += pool->size;
}
+/* Adds cache item entry <item> to the shared cache. The caller is advised to
+ * first check using pool_is_crowded() if it's wise to add this object there.
+ * Both the pool and the item must be valid. Use pool_free() for normal
+ * operations.
+ */
+void pool_put_to_shared_cache(struct pool_head *pool, void *item)
+{
+ void **free_list;
+
+ _HA_ATOMIC_DEC(&pool->used);
+ free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+ do {
+ while (unlikely(free_list == POOL_BUSY)) {
+ __ha_cpu_relax();
+ free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+ }
+ _HA_ATOMIC_STORE((void **)item, (void *)free_list);
+ __ha_barrier_atomic_store();
+ } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
+ __ha_barrier_atomic_store();
+ swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+}
+
/*
* This function frees whatever can be freed in pool <pool>.
*/