]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/mempool.c
drm/amdgpu: differentiate external rev id for gfx 11.5.0
[thirdparty/kernel/stable.git] / mm / mempool.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/mempool.c
4 *
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
7 * extreme VM load.
8 *
9 * started by Ingo Molnar, Copyright (C) 2001
bdfedb76 10 * debugging by David Rientjes, Copyright (C) 2015
1da177e4
LT
11 */
12
13#include <linux/mm.h>
14#include <linux/slab.h>
bdfedb76 15#include <linux/highmem.h>
92393615 16#include <linux/kasan.h>
17411962 17#include <linux/kmemleak.h>
b95f1b31 18#include <linux/export.h>
1da177e4 19#include <linux/mempool.h>
1da177e4 20#include <linux/writeback.h>
e244c9e6 21#include "slab.h"
1da177e4 22
8c20b29d 23#ifdef CONFIG_SLUB_DEBUG_ON
bdfedb76
DR
24static void poison_error(mempool_t *pool, void *element, size_t size,
25 size_t byte)
26{
27 const int nr = pool->curr_nr;
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 int i;
31
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool, size);
34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 for (i = start; i < end; i++)
36 pr_cont("%x ", *(u8 *)(element + i));
37 pr_cont("%s\n", end < size ? "..." : "");
38 dump_stack();
39}
40
41static void __check_element(mempool_t *pool, void *element, size_t size)
42{
43 u8 *obj = element;
44 size_t i;
45
46 for (i = 0; i < size; i++) {
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48
49 if (obj[i] != exp) {
50 poison_error(pool, element, size, i);
51 return;
52 }
53 }
54 memset(obj, POISON_INUSE, size);
55}
56
57static void check_element(mempool_t *pool, void *element)
58{
7d4847de
AK
59 /* Skip checking: KASAN might save its metadata in the element. */
60 if (kasan_enabled())
61 return;
62
bdfedb76 63 /* Mempools backed by slab allocator */
b2b23ba0
KC
64 if (pool->free == mempool_kfree) {
65 __check_element(pool, element, (size_t)pool->pool_data);
66 } else if (pool->free == mempool_free_slab) {
67 __check_element(pool, element, kmem_cache_size(pool->pool_data));
544941d7
ML
68 } else if (pool->free == mempool_free_pages) {
69 /* Mempools backed by page allocator */
bdfedb76 70 int order = (int)(long)pool->pool_data;
f2bcc99a 71 void *addr = kmap_local_page((struct page *)element);
bdfedb76
DR
72
73 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
f2bcc99a 74 kunmap_local(addr);
bdfedb76
DR
75 }
76}
77
78static void __poison_element(void *element, size_t size)
79{
80 u8 *obj = element;
81
82 memset(obj, POISON_FREE, size - 1);
83 obj[size - 1] = POISON_END;
84}
85
86static void poison_element(mempool_t *pool, void *element)
87{
7d4847de
AK
88 /* Skip poisoning: KASAN might save its metadata in the element. */
89 if (kasan_enabled())
90 return;
91
bdfedb76 92 /* Mempools backed by slab allocator */
b2b23ba0
KC
93 if (pool->alloc == mempool_kmalloc) {
94 __poison_element(element, (size_t)pool->pool_data);
95 } else if (pool->alloc == mempool_alloc_slab) {
96 __poison_element(element, kmem_cache_size(pool->pool_data));
544941d7
ML
97 } else if (pool->alloc == mempool_alloc_pages) {
98 /* Mempools backed by page allocator */
bdfedb76 99 int order = (int)(long)pool->pool_data;
f2bcc99a 100 void *addr = kmap_local_page((struct page *)element);
bdfedb76
DR
101
102 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
f2bcc99a 103 kunmap_local(addr);
bdfedb76
DR
104 }
105}
8c20b29d 106#else /* CONFIG_SLUB_DEBUG_ON */
bdfedb76
DR
107static inline void check_element(mempool_t *pool, void *element)
108{
109}
110static inline void poison_element(mempool_t *pool, void *element)
111{
112}
8c20b29d 113#endif /* CONFIG_SLUB_DEBUG_ON */
bdfedb76 114
413643f3 115static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
92393615 116{
9b75a867 117 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
413643f3 118 return kasan_mempool_poison_object(element);
544941d7 119 else if (pool->alloc == mempool_alloc_pages)
413643f3
AK
120 return kasan_mempool_poison_pages(element,
121 (unsigned long)pool->pool_data);
122 return true;
92393615
AR
123}
124
8cded866 125static void kasan_unpoison_element(mempool_t *pool, void *element)
92393615 126{
b2b23ba0 127 if (pool->alloc == mempool_kmalloc)
413643f3 128 kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
b2b23ba0 129 else if (pool->alloc == mempool_alloc_slab)
413643f3
AK
130 kasan_mempool_unpoison_object(element,
131 kmem_cache_size(pool->pool_data));
544941d7 132 else if (pool->alloc == mempool_alloc_pages)
413643f3
AK
133 kasan_mempool_unpoison_pages(element,
134 (unsigned long)pool->pool_data);
92393615
AR
135}
136
6860f634 137static __always_inline void add_element(mempool_t *pool, void *element)
1da177e4
LT
138{
139 BUG_ON(pool->curr_nr >= pool->min_nr);
bdfedb76 140 poison_element(pool, element);
413643f3
AK
141 if (kasan_poison_element(pool, element))
142 pool->elements[pool->curr_nr++] = element;
1da177e4
LT
143}
144
8cded866 145static void *remove_element(mempool_t *pool)
1da177e4 146{
bdfedb76
DR
147 void *element = pool->elements[--pool->curr_nr];
148
149 BUG_ON(pool->curr_nr < 0);
8cded866 150 kasan_unpoison_element(pool, element);
76401310 151 check_element(pool, element);
bdfedb76 152 return element;
1da177e4
LT
153}
154
c1a67fef
KO
155/**
156 * mempool_exit - exit a mempool initialized with mempool_init()
157 * @pool: pointer to the memory pool which was initialized with
158 * mempool_init().
159 *
160 * Free all reserved elements in @pool and @pool itself. This function
161 * only sleeps if the free_fn() function sleeps.
162 *
163 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
164 * kzalloc()).
165 */
166void mempool_exit(mempool_t *pool)
167{
168 while (pool->curr_nr) {
8cded866 169 void *element = remove_element(pool);
c1a67fef
KO
170 pool->free(element, pool->pool_data);
171 }
172 kfree(pool->elements);
173 pool->elements = NULL;
174}
175EXPORT_SYMBOL(mempool_exit);
176
0565d317
TH
177/**
178 * mempool_destroy - deallocate a memory pool
179 * @pool: pointer to the memory pool which was allocated via
180 * mempool_create().
181 *
182 * Free all reserved elements in @pool and @pool itself. This function
183 * only sleeps if the free_fn() function sleeps.
184 */
185void mempool_destroy(mempool_t *pool)
1da177e4 186{
4e3ca3e0
SS
187 if (unlikely(!pool))
188 return;
189
c1a67fef 190 mempool_exit(pool);
1da177e4
LT
191 kfree(pool);
192}
0565d317 193EXPORT_SYMBOL(mempool_destroy);
1da177e4 194
c1a67fef
KO
195int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
196 mempool_free_t *free_fn, void *pool_data,
197 gfp_t gfp_mask, int node_id)
198{
199 spin_lock_init(&pool->lock);
200 pool->min_nr = min_nr;
201 pool->pool_data = pool_data;
202 pool->alloc = alloc_fn;
203 pool->free = free_fn;
204 init_waitqueue_head(&pool->wait);
205
206 pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
207 gfp_mask, node_id);
208 if (!pool->elements)
209 return -ENOMEM;
210
211 /*
212 * First pre-allocate the guaranteed number of buffers.
213 */
214 while (pool->curr_nr < pool->min_nr) {
215 void *element;
216
217 element = pool->alloc(gfp_mask, pool->pool_data);
218 if (unlikely(!element)) {
219 mempool_exit(pool);
220 return -ENOMEM;
221 }
222 add_element(pool, element);
223 }
224
225 return 0;
226}
227EXPORT_SYMBOL(mempool_init_node);
228
229/**
230 * mempool_init - initialize a memory pool
a3bf6ce3 231 * @pool: pointer to the memory pool that should be initialized
c1a67fef
KO
232 * @min_nr: the minimum number of elements guaranteed to be
233 * allocated for this pool.
234 * @alloc_fn: user-defined element-allocation function.
235 * @free_fn: user-defined element-freeing function.
236 * @pool_data: optional private data available to the user-defined functions.
237 *
238 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
239 * structure).
a862f68a
MR
240 *
241 * Return: %0 on success, negative error code otherwise.
c1a67fef
KO
242 */
243int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
244 mempool_free_t *free_fn, void *pool_data)
245{
246 return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
247 pool_data, GFP_KERNEL, NUMA_NO_NODE);
248
249}
250EXPORT_SYMBOL(mempool_init);
251
1da177e4
LT
252/**
253 * mempool_create - create a memory pool
254 * @min_nr: the minimum number of elements guaranteed to be
255 * allocated for this pool.
256 * @alloc_fn: user-defined element-allocation function.
257 * @free_fn: user-defined element-freeing function.
258 * @pool_data: optional private data available to the user-defined functions.
259 *
260 * this function creates and allocates a guaranteed size, preallocated
72fd4a35 261 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
1da177e4 262 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
72fd4a35 263 * functions might sleep - as long as the mempool_alloc() function is not called
1da177e4 264 * from IRQ contexts.
a862f68a
MR
265 *
266 * Return: pointer to the created memory pool object or %NULL on error.
1da177e4 267 */
1946089a 268mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
1da177e4
LT
269 mempool_free_t *free_fn, void *pool_data)
270{
68d68ff6 271 return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
a91a5ac6 272 GFP_KERNEL, NUMA_NO_NODE);
1946089a
CL
273}
274EXPORT_SYMBOL(mempool_create);
1da177e4 275
1946089a 276mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
a91a5ac6
TH
277 mempool_free_t *free_fn, void *pool_data,
278 gfp_t gfp_mask, int node_id)
1946089a
CL
279{
280 mempool_t *pool;
c1a67fef 281
7b5219db 282 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
1da177e4
LT
283 if (!pool)
284 return NULL;
c1a67fef
KO
285
286 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
287 gfp_mask, node_id)) {
1da177e4
LT
288 kfree(pool);
289 return NULL;
290 }
1da177e4 291
1da177e4
LT
292 return pool;
293}
1946089a 294EXPORT_SYMBOL(mempool_create_node);
1da177e4
LT
295
296/**
297 * mempool_resize - resize an existing memory pool
298 * @pool: pointer to the memory pool which was allocated via
299 * mempool_create().
300 * @new_min_nr: the new minimum number of elements guaranteed to be
301 * allocated for this pool.
1da177e4
LT
302 *
303 * This function shrinks/grows the pool. In the case of growing,
304 * it cannot be guaranteed that the pool will be grown to the new
305 * size immediately, but new mempool_free() calls will refill it.
11d83360 306 * This function may sleep.
1da177e4
LT
307 *
308 * Note, the caller must guarantee that no mempool_destroy is called
309 * while this function is running. mempool_alloc() & mempool_free()
310 * might be called (eg. from IRQ contexts) while this function executes.
a862f68a
MR
311 *
312 * Return: %0 on success, negative error code otherwise.
1da177e4 313 */
11d83360 314int mempool_resize(mempool_t *pool, int new_min_nr)
1da177e4
LT
315{
316 void *element;
317 void **new_elements;
318 unsigned long flags;
319
320 BUG_ON(new_min_nr <= 0);
11d83360 321 might_sleep();
1da177e4
LT
322
323 spin_lock_irqsave(&pool->lock, flags);
324 if (new_min_nr <= pool->min_nr) {
325 while (new_min_nr < pool->curr_nr) {
8cded866 326 element = remove_element(pool);
1da177e4
LT
327 spin_unlock_irqrestore(&pool->lock, flags);
328 pool->free(element, pool->pool_data);
329 spin_lock_irqsave(&pool->lock, flags);
330 }
331 pool->min_nr = new_min_nr;
332 goto out_unlock;
333 }
334 spin_unlock_irqrestore(&pool->lock, flags);
335
336 /* Grow the pool */
11d83360
DR
337 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
338 GFP_KERNEL);
1da177e4
LT
339 if (!new_elements)
340 return -ENOMEM;
341
342 spin_lock_irqsave(&pool->lock, flags);
343 if (unlikely(new_min_nr <= pool->min_nr)) {
344 /* Raced, other resize will do our work */
345 spin_unlock_irqrestore(&pool->lock, flags);
346 kfree(new_elements);
347 goto out;
348 }
349 memcpy(new_elements, pool->elements,
350 pool->curr_nr * sizeof(*new_elements));
351 kfree(pool->elements);
352 pool->elements = new_elements;
353 pool->min_nr = new_min_nr;
354
355 while (pool->curr_nr < pool->min_nr) {
356 spin_unlock_irqrestore(&pool->lock, flags);
11d83360 357 element = pool->alloc(GFP_KERNEL, pool->pool_data);
1da177e4
LT
358 if (!element)
359 goto out;
360 spin_lock_irqsave(&pool->lock, flags);
361 if (pool->curr_nr < pool->min_nr) {
362 add_element(pool, element);
363 } else {
364 spin_unlock_irqrestore(&pool->lock, flags);
365 pool->free(element, pool->pool_data); /* Raced */
366 goto out;
367 }
368 }
369out_unlock:
370 spin_unlock_irqrestore(&pool->lock, flags);
371out:
372 return 0;
373}
374EXPORT_SYMBOL(mempool_resize);
375
1da177e4
LT
376/**
377 * mempool_alloc - allocate an element from a specific memory pool
378 * @pool: pointer to the memory pool which was allocated via
379 * mempool_create().
380 * @gfp_mask: the usual allocation bitmask.
381 *
72fd4a35 382 * this function only sleeps if the alloc_fn() function sleeps or
1da177e4
LT
383 * returns NULL. Note that due to preallocation, this function
384 * *never* fails when called from process contexts. (it might
385 * fail if called from an IRQ context.)
4e390b2b 386 * Note: using __GFP_ZERO is not supported.
a862f68a
MR
387 *
388 * Return: pointer to the allocated element or %NULL on error.
1da177e4 389 */
f9054c70 390void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
1da177e4
LT
391{
392 void *element;
393 unsigned long flags;
ac6424b9 394 wait_queue_entry_t wait;
6daa0e28 395 gfp_t gfp_temp;
20a77776 396
8bf8fcb0 397 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
21bfe8db 398 might_alloc(gfp_mask);
b84a35be 399
4e390b2b 400 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
b84a35be
NP
401 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
402 gfp_mask |= __GFP_NOWARN; /* failures are OK */
1da177e4 403
d0164adc 404 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
20a77776 405
1da177e4 406repeat_alloc:
20a77776
NP
407
408 element = pool->alloc(gfp_temp, pool->pool_data);
1da177e4
LT
409 if (likely(element != NULL))
410 return element;
411
1da177e4
LT
412 spin_lock_irqsave(&pool->lock, flags);
413 if (likely(pool->curr_nr)) {
8cded866 414 element = remove_element(pool);
1da177e4 415 spin_unlock_irqrestore(&pool->lock, flags);
5b990546
TH
416 /* paired with rmb in mempool_free(), read comment there */
417 smp_wmb();
17411962
CM
418 /*
419 * Update the allocation stack trace as this is more useful
420 * for debugging.
421 */
422 kmemleak_update_trace(element);
1da177e4
LT
423 return element;
424 }
1da177e4 425
1ebb7044 426 /*
d0164adc 427 * We use gfp mask w/o direct reclaim or IO for the first round. If
1ebb7044
TH
428 * alloc failed with that and @pool was empty, retry immediately.
429 */
4e390b2b 430 if (gfp_temp != gfp_mask) {
1ebb7044
TH
431 spin_unlock_irqrestore(&pool->lock, flags);
432 gfp_temp = gfp_mask;
433 goto repeat_alloc;
434 }
435
d0164adc
MG
436 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
437 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
5b990546 438 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 439 return NULL;
5b990546 440 }
1da177e4 441
5b990546 442 /* Let's wait for someone else to return an element to @pool */
01890a4c 443 init_wait(&wait);
1da177e4 444 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
1da177e4 445
5b990546
TH
446 spin_unlock_irqrestore(&pool->lock, flags);
447
448 /*
449 * FIXME: this should be io_schedule(). The timeout is there as a
450 * workaround for some DM problems in 2.6.18.
451 */
452 io_schedule_timeout(5*HZ);
453
454 finish_wait(&pool->wait, &wait);
1da177e4
LT
455 goto repeat_alloc;
456}
457EXPORT_SYMBOL(mempool_alloc);
458
37dcc69a
AK
459/**
460 * mempool_alloc_preallocated - allocate an element from preallocated elements
461 * belonging to a specific memory pool
462 * @pool: pointer to the memory pool which was allocated via
463 * mempool_create().
464 *
465 * This function is similar to mempool_alloc, but it only attempts allocating
466 * an element from the preallocated elements. It does not sleep and immediately
467 * returns if no preallocated elements are available.
468 *
469 * Return: pointer to the allocated element or %NULL if no elements are
470 * available.
471 */
472void *mempool_alloc_preallocated(mempool_t *pool)
473{
474 void *element;
475 unsigned long flags;
476
477 spin_lock_irqsave(&pool->lock, flags);
478 if (likely(pool->curr_nr)) {
479 element = remove_element(pool);
480 spin_unlock_irqrestore(&pool->lock, flags);
481 /* paired with rmb in mempool_free(), read comment there */
482 smp_wmb();
483 /*
484 * Update the allocation stack trace as this is more useful
485 * for debugging.
486 */
487 kmemleak_update_trace(element);
488 return element;
489 }
490 spin_unlock_irqrestore(&pool->lock, flags);
491
492 return NULL;
493}
494EXPORT_SYMBOL(mempool_alloc_preallocated);
495
1da177e4
LT
496/**
497 * mempool_free - return an element to the pool.
498 * @element: pool element pointer.
499 * @pool: pointer to the memory pool which was allocated via
500 * mempool_create().
501 *
502 * this function only sleeps if the free_fn() function sleeps.
503 */
504void mempool_free(void *element, mempool_t *pool)
505{
506 unsigned long flags;
507
c80e7a82
RR
508 if (unlikely(element == NULL))
509 return;
510
5b990546
TH
511 /*
512 * Paired with the wmb in mempool_alloc(). The preceding read is
513 * for @element and the following @pool->curr_nr. This ensures
514 * that the visible value of @pool->curr_nr is from after the
515 * allocation of @element. This is necessary for fringe cases
516 * where @element was passed to this task without going through
517 * barriers.
518 *
519 * For example, assume @p is %NULL at the beginning and one task
520 * performs "p = mempool_alloc(...);" while another task is doing
521 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
522 * may end up using curr_nr value which is from before allocation
523 * of @p without the following rmb.
524 */
525 smp_rmb();
526
527 /*
528 * For correctness, we need a test which is guaranteed to trigger
529 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
530 * without locking achieves that and refilling as soon as possible
531 * is desirable.
532 *
533 * Because curr_nr visible here is always a value after the
534 * allocation of @element, any task which decremented curr_nr below
535 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
536 * incremented to min_nr afterwards. If curr_nr gets incremented
537 * to min_nr after the allocation of @element, the elements
538 * allocated after that are subject to the same guarantee.
539 *
540 * Waiters happen iff curr_nr is 0 and the above guarantee also
541 * ensures that there will be frees which return elements to the
542 * pool waking up the waiters.
543 */
abe1de42 544 if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
1da177e4 545 spin_lock_irqsave(&pool->lock, flags);
eb9a3c62 546 if (likely(pool->curr_nr < pool->min_nr)) {
1da177e4
LT
547 add_element(pool, element);
548 spin_unlock_irqrestore(&pool->lock, flags);
549 wake_up(&pool->wait);
550 return;
551 }
552 spin_unlock_irqrestore(&pool->lock, flags);
553 }
554 pool->free(element, pool->pool_data);
555}
556EXPORT_SYMBOL(mempool_free);
557
558/*
559 * A commonly used alloc and free fn.
560 */
dd0fc66f 561void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
1da177e4 562{
fcc234f8 563 struct kmem_cache *mem = pool_data;
e244c9e6 564 VM_BUG_ON(mem->ctor);
1da177e4
LT
565 return kmem_cache_alloc(mem, gfp_mask);
566}
567EXPORT_SYMBOL(mempool_alloc_slab);
568
569void mempool_free_slab(void *element, void *pool_data)
570{
fcc234f8 571 struct kmem_cache *mem = pool_data;
1da177e4
LT
572 kmem_cache_free(mem, element);
573}
574EXPORT_SYMBOL(mempool_free_slab);
6e0678f3 575
53184082
MD
576/*
577 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
183ff22b 578 * specified by pool_data
53184082
MD
579 */
580void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
581{
5e2f89b5 582 size_t size = (size_t)pool_data;
53184082
MD
583 return kmalloc(size, gfp_mask);
584}
585EXPORT_SYMBOL(mempool_kmalloc);
586
587void mempool_kfree(void *element, void *pool_data)
588{
589 kfree(element);
590}
591EXPORT_SYMBOL(mempool_kfree);
592
0225bdfa
KO
593void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data)
594{
595 size_t size = (size_t)pool_data;
596 return kvmalloc(size, gfp_mask);
597}
598EXPORT_SYMBOL(mempool_kvmalloc);
599
600void mempool_kvfree(void *element, void *pool_data)
601{
602 kvfree(element);
603}
604EXPORT_SYMBOL(mempool_kvfree);
605
6e0678f3
MD
606/*
607 * A simple mempool-backed page allocator that allocates pages
608 * of the order specified by pool_data.
609 */
610void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
611{
612 int order = (int)(long)pool_data;
613 return alloc_pages(gfp_mask, order);
614}
615EXPORT_SYMBOL(mempool_alloc_pages);
616
617void mempool_free_pages(void *element, void *pool_data)
618{
619 int order = (int)(long)pool_data;
620 __free_pages(element, order);
621}
622EXPORT_SYMBOL(mempool_free_pages);