]>
git.ipfire.org Git - thirdparty/linux.git/blob - mm/mempool.c
1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
9 * started by Ingo Molnar, Copyright (C) 2001
10 * debugging by David Rientjes, Copyright (C) 2015
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/writeback.h>
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t
*pool
, void *element
, size_t size
,
27 const int nr
= pool
->curr_nr
;
28 const int start
= max_t(int, byte
- (BITS_PER_LONG
/ 8), 0);
29 const int end
= min_t(int, byte
+ (BITS_PER_LONG
/ 8), size
);
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool
, size
);
34 pr_err(" nr=%d @ %p: %s0x", nr
, element
, start
> 0 ? "... " : "");
35 for (i
= start
; i
< end
; i
++)
36 pr_cont("%x ", *(u8
*)(element
+ i
));
37 pr_cont("%s\n", end
< size
? "..." : "");
41 static void __check_element(mempool_t
*pool
, void *element
, size_t size
)
46 for (i
= 0; i
< size
; i
++) {
47 u8 exp
= (i
< size
- 1) ? POISON_FREE
: POISON_END
;
50 poison_error(pool
, element
, size
, i
);
54 memset(obj
, POISON_INUSE
, size
);
57 static void check_element(mempool_t
*pool
, void *element
)
59 /* Mempools backed by slab allocator */
60 if (pool
->free
== mempool_kfree
) {
61 __check_element(pool
, element
, (size_t)pool
->pool_data
);
62 } else if (pool
->free
== mempool_free_slab
) {
63 __check_element(pool
, element
, kmem_cache_size(pool
->pool_data
));
64 } else if (pool
->free
== mempool_free_pages
) {
65 /* Mempools backed by page allocator */
66 int order
= (int)(long)pool
->pool_data
;
67 void *addr
= kmap_atomic((struct page
*)element
);
69 __check_element(pool
, addr
, 1UL << (PAGE_SHIFT
+ order
));
74 static void __poison_element(void *element
, size_t size
)
78 memset(obj
, POISON_FREE
, size
- 1);
79 obj
[size
- 1] = POISON_END
;
82 static void poison_element(mempool_t
*pool
, void *element
)
84 /* Mempools backed by slab allocator */
85 if (pool
->alloc
== mempool_kmalloc
) {
86 __poison_element(element
, (size_t)pool
->pool_data
);
87 } else if (pool
->alloc
== mempool_alloc_slab
) {
88 __poison_element(element
, kmem_cache_size(pool
->pool_data
));
89 } else if (pool
->alloc
== mempool_alloc_pages
) {
90 /* Mempools backed by page allocator */
91 int order
= (int)(long)pool
->pool_data
;
92 void *addr
= kmap_atomic((struct page
*)element
);
94 __poison_element(addr
, 1UL << (PAGE_SHIFT
+ order
));
98 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
99 static inline void check_element(mempool_t
*pool
, void *element
)
102 static inline void poison_element(mempool_t
*pool
, void *element
)
105 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
107 static __always_inline
void kasan_poison_element(mempool_t
*pool
, void *element
)
109 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
110 kasan_slab_free_mempool(element
);
111 else if (pool
->alloc
== mempool_alloc_pages
)
112 kasan_poison_pages(element
, (unsigned long)pool
->pool_data
,
116 static void kasan_unpoison_element(mempool_t
*pool
, void *element
)
118 if (pool
->alloc
== mempool_kmalloc
)
119 kasan_unpoison_range(element
, (size_t)pool
->pool_data
);
120 else if (pool
->alloc
== mempool_alloc_slab
)
121 kasan_unpoison_range(element
, kmem_cache_size(pool
->pool_data
));
122 else if (pool
->alloc
== mempool_alloc_pages
)
123 kasan_unpoison_pages(element
, (unsigned long)pool
->pool_data
,
127 static __always_inline
void add_element(mempool_t
*pool
, void *element
)
129 BUG_ON(pool
->curr_nr
>= pool
->min_nr
);
130 poison_element(pool
, element
);
131 kasan_poison_element(pool
, element
);
132 pool
->elements
[pool
->curr_nr
++] = element
;
135 static void *remove_element(mempool_t
*pool
)
137 void *element
= pool
->elements
[--pool
->curr_nr
];
139 BUG_ON(pool
->curr_nr
< 0);
140 kasan_unpoison_element(pool
, element
);
141 check_element(pool
, element
);
146 * mempool_exit - exit a mempool initialized with mempool_init()
147 * @pool: pointer to the memory pool which was initialized with
150 * Free all reserved elements in @pool and @pool itself. This function
151 * only sleeps if the free_fn() function sleeps.
153 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
156 void mempool_exit(mempool_t
*pool
)
158 while (pool
->curr_nr
) {
159 void *element
= remove_element(pool
);
160 pool
->free(element
, pool
->pool_data
);
162 kfree(pool
->elements
);
163 pool
->elements
= NULL
;
165 EXPORT_SYMBOL(mempool_exit
);
168 * mempool_destroy - deallocate a memory pool
169 * @pool: pointer to the memory pool which was allocated via
172 * Free all reserved elements in @pool and @pool itself. This function
173 * only sleeps if the free_fn() function sleeps.
175 void mempool_destroy(mempool_t
*pool
)
183 EXPORT_SYMBOL(mempool_destroy
);
185 int mempool_init_node(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
186 mempool_free_t
*free_fn
, void *pool_data
,
187 gfp_t gfp_mask
, int node_id
)
189 spin_lock_init(&pool
->lock
);
190 pool
->min_nr
= min_nr
;
191 pool
->pool_data
= pool_data
;
192 pool
->alloc
= alloc_fn
;
193 pool
->free
= free_fn
;
194 init_waitqueue_head(&pool
->wait
);
196 pool
->elements
= kmalloc_array_node(min_nr
, sizeof(void *),
202 * First pre-allocate the guaranteed number of buffers.
204 while (pool
->curr_nr
< pool
->min_nr
) {
207 element
= pool
->alloc(gfp_mask
, pool
->pool_data
);
208 if (unlikely(!element
)) {
212 add_element(pool
, element
);
217 EXPORT_SYMBOL(mempool_init_node
);
220 * mempool_init - initialize a memory pool
221 * @pool: pointer to the memory pool that should be initialized
222 * @min_nr: the minimum number of elements guaranteed to be
223 * allocated for this pool.
224 * @alloc_fn: user-defined element-allocation function.
225 * @free_fn: user-defined element-freeing function.
226 * @pool_data: optional private data available to the user-defined functions.
228 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
231 * Return: %0 on success, negative error code otherwise.
233 int mempool_init(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
234 mempool_free_t
*free_fn
, void *pool_data
)
236 return mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
,
237 pool_data
, GFP_KERNEL
, NUMA_NO_NODE
);
240 EXPORT_SYMBOL(mempool_init
);
243 * mempool_create - create a memory pool
244 * @min_nr: the minimum number of elements guaranteed to be
245 * allocated for this pool.
246 * @alloc_fn: user-defined element-allocation function.
247 * @free_fn: user-defined element-freeing function.
248 * @pool_data: optional private data available to the user-defined functions.
250 * this function creates and allocates a guaranteed size, preallocated
251 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
252 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
253 * functions might sleep - as long as the mempool_alloc() function is not called
256 * Return: pointer to the created memory pool object or %NULL on error.
258 mempool_t
*mempool_create(int min_nr
, mempool_alloc_t
*alloc_fn
,
259 mempool_free_t
*free_fn
, void *pool_data
)
261 return mempool_create_node(min_nr
, alloc_fn
, free_fn
, pool_data
,
262 GFP_KERNEL
, NUMA_NO_NODE
);
264 EXPORT_SYMBOL(mempool_create
);
266 mempool_t
*mempool_create_node(int min_nr
, mempool_alloc_t
*alloc_fn
,
267 mempool_free_t
*free_fn
, void *pool_data
,
268 gfp_t gfp_mask
, int node_id
)
272 pool
= kzalloc_node(sizeof(*pool
), gfp_mask
, node_id
);
276 if (mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
, pool_data
,
277 gfp_mask
, node_id
)) {
284 EXPORT_SYMBOL(mempool_create_node
);
287 * mempool_resize - resize an existing memory pool
288 * @pool: pointer to the memory pool which was allocated via
290 * @new_min_nr: the new minimum number of elements guaranteed to be
291 * allocated for this pool.
293 * This function shrinks/grows the pool. In the case of growing,
294 * it cannot be guaranteed that the pool will be grown to the new
295 * size immediately, but new mempool_free() calls will refill it.
296 * This function may sleep.
298 * Note, the caller must guarantee that no mempool_destroy is called
299 * while this function is running. mempool_alloc() & mempool_free()
300 * might be called (eg. from IRQ contexts) while this function executes.
302 * Return: %0 on success, negative error code otherwise.
304 int mempool_resize(mempool_t
*pool
, int new_min_nr
)
310 BUG_ON(new_min_nr
<= 0);
313 spin_lock_irqsave(&pool
->lock
, flags
);
314 if (new_min_nr
<= pool
->min_nr
) {
315 while (new_min_nr
< pool
->curr_nr
) {
316 element
= remove_element(pool
);
317 spin_unlock_irqrestore(&pool
->lock
, flags
);
318 pool
->free(element
, pool
->pool_data
);
319 spin_lock_irqsave(&pool
->lock
, flags
);
321 pool
->min_nr
= new_min_nr
;
324 spin_unlock_irqrestore(&pool
->lock
, flags
);
327 new_elements
= kmalloc_array(new_min_nr
, sizeof(*new_elements
),
332 spin_lock_irqsave(&pool
->lock
, flags
);
333 if (unlikely(new_min_nr
<= pool
->min_nr
)) {
334 /* Raced, other resize will do our work */
335 spin_unlock_irqrestore(&pool
->lock
, flags
);
339 memcpy(new_elements
, pool
->elements
,
340 pool
->curr_nr
* sizeof(*new_elements
));
341 kfree(pool
->elements
);
342 pool
->elements
= new_elements
;
343 pool
->min_nr
= new_min_nr
;
345 while (pool
->curr_nr
< pool
->min_nr
) {
346 spin_unlock_irqrestore(&pool
->lock
, flags
);
347 element
= pool
->alloc(GFP_KERNEL
, pool
->pool_data
);
350 spin_lock_irqsave(&pool
->lock
, flags
);
351 if (pool
->curr_nr
< pool
->min_nr
) {
352 add_element(pool
, element
);
354 spin_unlock_irqrestore(&pool
->lock
, flags
);
355 pool
->free(element
, pool
->pool_data
); /* Raced */
360 spin_unlock_irqrestore(&pool
->lock
, flags
);
364 EXPORT_SYMBOL(mempool_resize
);
367 * mempool_alloc - allocate an element from a specific memory pool
368 * @pool: pointer to the memory pool which was allocated via
370 * @gfp_mask: the usual allocation bitmask.
372 * this function only sleeps if the alloc_fn() function sleeps or
373 * returns NULL. Note that due to preallocation, this function
374 * *never* fails when called from process contexts. (it might
375 * fail if called from an IRQ context.)
376 * Note: using __GFP_ZERO is not supported.
378 * Return: pointer to the allocated element or %NULL on error.
380 void *mempool_alloc(mempool_t
*pool
, gfp_t gfp_mask
)
384 wait_queue_entry_t wait
;
387 VM_WARN_ON_ONCE(gfp_mask
& __GFP_ZERO
);
388 might_alloc(gfp_mask
);
390 gfp_mask
|= __GFP_NOMEMALLOC
; /* don't allocate emergency reserves */
391 gfp_mask
|= __GFP_NORETRY
; /* don't loop in __alloc_pages */
392 gfp_mask
|= __GFP_NOWARN
; /* failures are OK */
394 gfp_temp
= gfp_mask
& ~(__GFP_DIRECT_RECLAIM
|__GFP_IO
);
398 element
= pool
->alloc(gfp_temp
, pool
->pool_data
);
399 if (likely(element
!= NULL
))
402 spin_lock_irqsave(&pool
->lock
, flags
);
403 if (likely(pool
->curr_nr
)) {
404 element
= remove_element(pool
);
405 spin_unlock_irqrestore(&pool
->lock
, flags
);
406 /* paired with rmb in mempool_free(), read comment there */
409 * Update the allocation stack trace as this is more useful
412 kmemleak_update_trace(element
);
417 * We use gfp mask w/o direct reclaim or IO for the first round. If
418 * alloc failed with that and @pool was empty, retry immediately.
420 if (gfp_temp
!= gfp_mask
) {
421 spin_unlock_irqrestore(&pool
->lock
, flags
);
426 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
427 if (!(gfp_mask
& __GFP_DIRECT_RECLAIM
)) {
428 spin_unlock_irqrestore(&pool
->lock
, flags
);
432 /* Let's wait for someone else to return an element to @pool */
434 prepare_to_wait(&pool
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
436 spin_unlock_irqrestore(&pool
->lock
, flags
);
439 * FIXME: this should be io_schedule(). The timeout is there as a
440 * workaround for some DM problems in 2.6.18.
442 io_schedule_timeout(5*HZ
);
444 finish_wait(&pool
->wait
, &wait
);
447 EXPORT_SYMBOL(mempool_alloc
);
450 * mempool_free - return an element to the pool.
451 * @element: pool element pointer.
452 * @pool: pointer to the memory pool which was allocated via
455 * this function only sleeps if the free_fn() function sleeps.
457 void mempool_free(void *element
, mempool_t
*pool
)
461 if (unlikely(element
== NULL
))
465 * Paired with the wmb in mempool_alloc(). The preceding read is
466 * for @element and the following @pool->curr_nr. This ensures
467 * that the visible value of @pool->curr_nr is from after the
468 * allocation of @element. This is necessary for fringe cases
469 * where @element was passed to this task without going through
472 * For example, assume @p is %NULL at the beginning and one task
473 * performs "p = mempool_alloc(...);" while another task is doing
474 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
475 * may end up using curr_nr value which is from before allocation
476 * of @p without the following rmb.
481 * For correctness, we need a test which is guaranteed to trigger
482 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
483 * without locking achieves that and refilling as soon as possible
486 * Because curr_nr visible here is always a value after the
487 * allocation of @element, any task which decremented curr_nr below
488 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
489 * incremented to min_nr afterwards. If curr_nr gets incremented
490 * to min_nr after the allocation of @element, the elements
491 * allocated after that are subject to the same guarantee.
493 * Waiters happen iff curr_nr is 0 and the above guarantee also
494 * ensures that there will be frees which return elements to the
495 * pool waking up the waiters.
497 if (unlikely(READ_ONCE(pool
->curr_nr
) < pool
->min_nr
)) {
498 spin_lock_irqsave(&pool
->lock
, flags
);
499 if (likely(pool
->curr_nr
< pool
->min_nr
)) {
500 add_element(pool
, element
);
501 spin_unlock_irqrestore(&pool
->lock
, flags
);
502 wake_up(&pool
->wait
);
505 spin_unlock_irqrestore(&pool
->lock
, flags
);
507 pool
->free(element
, pool
->pool_data
);
509 EXPORT_SYMBOL(mempool_free
);
512 * A commonly used alloc and free fn.
514 void *mempool_alloc_slab(gfp_t gfp_mask
, void *pool_data
)
516 struct kmem_cache
*mem
= pool_data
;
517 VM_BUG_ON(mem
->ctor
);
518 return kmem_cache_alloc(mem
, gfp_mask
);
520 EXPORT_SYMBOL(mempool_alloc_slab
);
522 void mempool_free_slab(void *element
, void *pool_data
)
524 struct kmem_cache
*mem
= pool_data
;
525 kmem_cache_free(mem
, element
);
527 EXPORT_SYMBOL(mempool_free_slab
);
530 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
531 * specified by pool_data
533 void *mempool_kmalloc(gfp_t gfp_mask
, void *pool_data
)
535 size_t size
= (size_t)pool_data
;
536 return kmalloc(size
, gfp_mask
);
538 EXPORT_SYMBOL(mempool_kmalloc
);
540 void mempool_kfree(void *element
, void *pool_data
)
544 EXPORT_SYMBOL(mempool_kfree
);
547 * A simple mempool-backed page allocator that allocates pages
548 * of the order specified by pool_data.
550 void *mempool_alloc_pages(gfp_t gfp_mask
, void *pool_data
)
552 int order
= (int)(long)pool_data
;
553 return alloc_pages(gfp_mask
, order
);
555 EXPORT_SYMBOL(mempool_alloc_pages
);
557 void mempool_free_pages(void *element
, void *pool_data
)
559 int order
= (int)(long)pool_data
;
560 __free_pages(element
, order
);
562 EXPORT_SYMBOL(mempool_free_pages
);