2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_device.h>
38 #include <linux/vmalloc.h>
40 static inline size_t chunk_size(const struct gen_pool_chunk
*chunk
)
42 return chunk
->end_addr
- chunk
->start_addr
+ 1;
45 static int set_bits_ll(unsigned long *addr
, unsigned long mask_to_set
)
47 unsigned long val
, nval
;
52 if (val
& mask_to_set
)
55 } while ((nval
= cmpxchg(addr
, val
, val
| mask_to_set
)) != val
);
60 static int clear_bits_ll(unsigned long *addr
, unsigned long mask_to_clear
)
62 unsigned long val
, nval
;
67 if ((val
& mask_to_clear
) != mask_to_clear
)
70 } while ((nval
= cmpxchg(addr
, val
, val
& ~mask_to_clear
)) != val
);
76 * bitmap_set_ll - set the specified number of bits at the specified position
77 * @map: pointer to a bitmap
78 * @start: a bit position in @map
79 * @nr: number of bits to set
81 * Set @nr bits start from @start in @map lock-lessly. Several users
82 * can set/clear the same bitmap simultaneously without lock. If two
83 * users set the same bit, one user will return remain bits, otherwise
86 static int bitmap_set_ll(unsigned long *map
, int start
, int nr
)
88 unsigned long *p
= map
+ BIT_WORD(start
);
89 const int size
= start
+ nr
;
90 int bits_to_set
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
91 unsigned long mask_to_set
= BITMAP_FIRST_WORD_MASK(start
);
93 while (nr
- bits_to_set
>= 0) {
94 if (set_bits_ll(p
, mask_to_set
))
97 bits_to_set
= BITS_PER_LONG
;
102 mask_to_set
&= BITMAP_LAST_WORD_MASK(size
);
103 if (set_bits_ll(p
, mask_to_set
))
111 * bitmap_clear_ll - clear the specified number of bits at the specified position
112 * @map: pointer to a bitmap
113 * @start: a bit position in @map
114 * @nr: number of bits to set
116 * Clear @nr bits start from @start in @map lock-lessly. Several users
117 * can set/clear the same bitmap simultaneously without lock. If two
118 * users clear the same bit, one user will return remain bits,
119 * otherwise return 0.
121 static int bitmap_clear_ll(unsigned long *map
, int start
, int nr
)
123 unsigned long *p
= map
+ BIT_WORD(start
);
124 const int size
= start
+ nr
;
125 int bits_to_clear
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
126 unsigned long mask_to_clear
= BITMAP_FIRST_WORD_MASK(start
);
128 while (nr
- bits_to_clear
>= 0) {
129 if (clear_bits_ll(p
, mask_to_clear
))
132 bits_to_clear
= BITS_PER_LONG
;
133 mask_to_clear
= ~0UL;
137 mask_to_clear
&= BITMAP_LAST_WORD_MASK(size
);
138 if (clear_bits_ll(p
, mask_to_clear
))
146 * gen_pool_create - create a new special memory pool
147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
151 * memory not managed by the regular kmalloc/kfree interface.
153 struct gen_pool
*gen_pool_create(int min_alloc_order
, int nid
)
155 struct gen_pool
*pool
;
157 pool
= kmalloc_node(sizeof(struct gen_pool
), GFP_KERNEL
, nid
);
159 spin_lock_init(&pool
->lock
);
160 INIT_LIST_HEAD(&pool
->chunks
);
161 pool
->min_alloc_order
= min_alloc_order
;
162 pool
->algo
= gen_pool_first_fit
;
168 EXPORT_SYMBOL(gen_pool_create
);
171 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
177 * allocated on, or -1
178 * @owner: private data the publisher would like to recall at alloc time
180 * Add a new chunk of special memory to the specified pool.
182 * Returns 0 on success or a -ve errno on failure.
184 int gen_pool_add_owner(struct gen_pool
*pool
, unsigned long virt
, phys_addr_t phys
,
185 size_t size
, int nid
, void *owner
)
187 struct gen_pool_chunk
*chunk
;
188 int nbits
= size
>> pool
->min_alloc_order
;
189 int nbytes
= sizeof(struct gen_pool_chunk
) +
190 BITS_TO_LONGS(nbits
) * sizeof(long);
192 chunk
= vzalloc_node(nbytes
, nid
);
193 if (unlikely(chunk
== NULL
))
196 chunk
->phys_addr
= phys
;
197 chunk
->start_addr
= virt
;
198 chunk
->end_addr
= virt
+ size
- 1;
199 chunk
->owner
= owner
;
200 atomic_long_set(&chunk
->avail
, size
);
202 spin_lock(&pool
->lock
);
203 list_add_rcu(&chunk
->next_chunk
, &pool
->chunks
);
204 spin_unlock(&pool
->lock
);
208 EXPORT_SYMBOL(gen_pool_add_owner
);
211 * gen_pool_virt_to_phys - return the physical address of memory
212 * @pool: pool to allocate from
213 * @addr: starting address of memory
215 * Returns the physical address on success, or -1 on error.
217 phys_addr_t
gen_pool_virt_to_phys(struct gen_pool
*pool
, unsigned long addr
)
219 struct gen_pool_chunk
*chunk
;
220 phys_addr_t paddr
= -1;
223 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
224 if (addr
>= chunk
->start_addr
&& addr
<= chunk
->end_addr
) {
225 paddr
= chunk
->phys_addr
+ (addr
- chunk
->start_addr
);
233 EXPORT_SYMBOL(gen_pool_virt_to_phys
);
236 * gen_pool_destroy - destroy a special memory pool
237 * @pool: pool to destroy
239 * Destroy the specified special memory pool. Verifies that there are no
240 * outstanding allocations.
242 void gen_pool_destroy(struct gen_pool
*pool
)
244 struct list_head
*_chunk
, *_next_chunk
;
245 struct gen_pool_chunk
*chunk
;
246 int order
= pool
->min_alloc_order
;
249 list_for_each_safe(_chunk
, _next_chunk
, &pool
->chunks
) {
250 chunk
= list_entry(_chunk
, struct gen_pool_chunk
, next_chunk
);
251 list_del(&chunk
->next_chunk
);
253 end_bit
= chunk_size(chunk
) >> order
;
254 bit
= find_next_bit(chunk
->bits
, end_bit
, 0);
255 BUG_ON(bit
< end_bit
);
259 kfree_const(pool
->name
);
262 EXPORT_SYMBOL(gen_pool_destroy
);
265 * gen_pool_alloc_algo_owner - allocate special memory from the pool
266 * @pool: pool to allocate from
267 * @size: number of bytes to allocate from the pool
268 * @algo: algorithm passed from caller
269 * @data: data passed to algorithm
270 * @owner: optionally retrieve the chunk owner
272 * Allocate the requested number of bytes from the specified pool.
273 * Uses the pool allocation function (with first-fit algorithm by default).
274 * Can not be used in NMI handler on architectures without
275 * NMI-safe cmpxchg implementation.
277 unsigned long gen_pool_alloc_algo_owner(struct gen_pool
*pool
, size_t size
,
278 genpool_algo_t algo
, void *data
, void **owner
)
280 struct gen_pool_chunk
*chunk
;
281 unsigned long addr
= 0;
282 int order
= pool
->min_alloc_order
;
283 int nbits
, start_bit
, end_bit
, remain
;
285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
295 nbits
= (size
+ (1UL << order
) - 1) >> order
;
297 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
298 if (size
> atomic_long_read(&chunk
->avail
))
302 end_bit
= chunk_size(chunk
) >> order
;
304 start_bit
= algo(chunk
->bits
, end_bit
, start_bit
,
305 nbits
, data
, pool
, chunk
->start_addr
);
306 if (start_bit
>= end_bit
)
308 remain
= bitmap_set_ll(chunk
->bits
, start_bit
, nbits
);
310 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
,
316 addr
= chunk
->start_addr
+ ((unsigned long)start_bit
<< order
);
317 size
= nbits
<< order
;
318 atomic_long_sub(size
, &chunk
->avail
);
320 *owner
= chunk
->owner
;
326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner
);
329 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
330 * @pool: pool to allocate from
331 * @size: number of bytes to allocate from the pool
332 * @dma: dma-view physical address return value. Use NULL if unneeded.
334 * Allocate the requested number of bytes from the specified pool.
335 * Uses the pool allocation function (with first-fit algorithm by default).
336 * Can not be used in NMI handler on architectures without
337 * NMI-safe cmpxchg implementation.
339 void *gen_pool_dma_alloc(struct gen_pool
*pool
, size_t size
, dma_addr_t
*dma
)
346 vaddr
= gen_pool_alloc(pool
, size
);
351 *dma
= gen_pool_virt_to_phys(pool
, vaddr
);
353 return (void *)vaddr
;
355 EXPORT_SYMBOL(gen_pool_dma_alloc
);
358 * gen_pool_free - free allocated special memory back to the pool
359 * @pool: pool to free to
360 * @addr: starting address of memory to free back to pool
361 * @size: size in bytes of memory to free
362 * @owner: private data stashed at gen_pool_add() time
364 * Free previously allocated special memory back to the specified
365 * pool. Can not be used in NMI handler on architectures without
366 * NMI-safe cmpxchg implementation.
368 void gen_pool_free_owner(struct gen_pool
*pool
, unsigned long addr
, size_t size
,
371 struct gen_pool_chunk
*chunk
;
372 int order
= pool
->min_alloc_order
;
373 int start_bit
, nbits
, remain
;
375 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
382 nbits
= (size
+ (1UL << order
) - 1) >> order
;
384 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
385 if (addr
>= chunk
->start_addr
&& addr
<= chunk
->end_addr
) {
386 BUG_ON(addr
+ size
- 1 > chunk
->end_addr
);
387 start_bit
= (addr
- chunk
->start_addr
) >> order
;
388 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
, nbits
);
390 size
= nbits
<< order
;
391 atomic_long_add(size
, &chunk
->avail
);
393 *owner
= chunk
->owner
;
401 EXPORT_SYMBOL(gen_pool_free_owner
);
404 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
405 * @pool: the generic memory pool
406 * @func: func to call
407 * @data: additional data used by @func
409 * Call @func for every chunk of generic memory pool. The @func is
410 * called with rcu_read_lock held.
412 void gen_pool_for_each_chunk(struct gen_pool
*pool
,
413 void (*func
)(struct gen_pool
*pool
, struct gen_pool_chunk
*chunk
, void *data
),
416 struct gen_pool_chunk
*chunk
;
419 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
)
420 func(pool
, chunk
, data
);
423 EXPORT_SYMBOL(gen_pool_for_each_chunk
);
426 * addr_in_gen_pool - checks if an address falls within the range of a pool
427 * @pool: the generic memory pool
428 * @start: start address
429 * @size: size of the region
431 * Check if the range of addresses falls within the specified pool. Returns
432 * true if the entire range is contained in the pool and false otherwise.
434 bool addr_in_gen_pool(struct gen_pool
*pool
, unsigned long start
,
438 unsigned long end
= start
+ size
- 1;
439 struct gen_pool_chunk
*chunk
;
442 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
) {
443 if (start
>= chunk
->start_addr
&& start
<= chunk
->end_addr
) {
444 if (end
<= chunk
->end_addr
) {
455 * gen_pool_avail - get available free space of the pool
456 * @pool: pool to get available free space
458 * Return available free space of the specified pool.
460 size_t gen_pool_avail(struct gen_pool
*pool
)
462 struct gen_pool_chunk
*chunk
;
466 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
467 avail
+= atomic_long_read(&chunk
->avail
);
471 EXPORT_SYMBOL_GPL(gen_pool_avail
);
474 * gen_pool_size - get size in bytes of memory managed by the pool
475 * @pool: pool to get size
477 * Return size in bytes of memory managed by the pool.
479 size_t gen_pool_size(struct gen_pool
*pool
)
481 struct gen_pool_chunk
*chunk
;
485 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
486 size
+= chunk_size(chunk
);
490 EXPORT_SYMBOL_GPL(gen_pool_size
);
493 * gen_pool_set_algo - set the allocation algorithm
494 * @pool: pool to change allocation algorithm
495 * @algo: custom algorithm function
496 * @data: additional data used by @algo
498 * Call @algo for each memory allocation in the pool.
499 * If @algo is NULL use gen_pool_first_fit as default
500 * memory allocation function.
502 void gen_pool_set_algo(struct gen_pool
*pool
, genpool_algo_t algo
, void *data
)
508 pool
->algo
= gen_pool_first_fit
;
514 EXPORT_SYMBOL(gen_pool_set_algo
);
517 * gen_pool_first_fit - find the first available region
518 * of memory matching the size requirement (no alignment constraint)
519 * @map: The address to base the search on
520 * @size: The bitmap size in bits
521 * @start: The bitnumber to start searching at
522 * @nr: The number of zeroed bits we're looking for
523 * @data: additional data - unused
524 * @pool: pool to find the fit region memory from
526 unsigned long gen_pool_first_fit(unsigned long *map
, unsigned long size
,
527 unsigned long start
, unsigned int nr
, void *data
,
528 struct gen_pool
*pool
, unsigned long start_addr
)
530 return bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
532 EXPORT_SYMBOL(gen_pool_first_fit
);
535 * gen_pool_first_fit_align - find the first available region
536 * of memory matching the size requirement (alignment constraint)
537 * @map: The address to base the search on
538 * @size: The bitmap size in bits
539 * @start: The bitnumber to start searching at
540 * @nr: The number of zeroed bits we're looking for
541 * @data: data for alignment
542 * @pool: pool to get order from
544 unsigned long gen_pool_first_fit_align(unsigned long *map
, unsigned long size
,
545 unsigned long start
, unsigned int nr
, void *data
,
546 struct gen_pool
*pool
, unsigned long start_addr
)
548 struct genpool_data_align
*alignment
;
549 unsigned long align_mask
, align_off
;
553 order
= pool
->min_alloc_order
;
554 align_mask
= ((alignment
->align
+ (1UL << order
) - 1) >> order
) - 1;
555 align_off
= (start_addr
& (alignment
->align
- 1)) >> order
;
557 return bitmap_find_next_zero_area_off(map
, size
, start
, nr
,
558 align_mask
, align_off
);
560 EXPORT_SYMBOL(gen_pool_first_fit_align
);
563 * gen_pool_fixed_alloc - reserve a specific region
564 * @map: The address to base the search on
565 * @size: The bitmap size in bits
566 * @start: The bitnumber to start searching at
567 * @nr: The number of zeroed bits we're looking for
568 * @data: data for alignment
569 * @pool: pool to get order from
571 unsigned long gen_pool_fixed_alloc(unsigned long *map
, unsigned long size
,
572 unsigned long start
, unsigned int nr
, void *data
,
573 struct gen_pool
*pool
, unsigned long start_addr
)
575 struct genpool_data_fixed
*fixed_data
;
577 unsigned long offset_bit
;
578 unsigned long start_bit
;
581 order
= pool
->min_alloc_order
;
582 offset_bit
= fixed_data
->offset
>> order
;
583 if (WARN_ON(fixed_data
->offset
& ((1UL << order
) - 1)))
586 start_bit
= bitmap_find_next_zero_area(map
, size
,
587 start
+ offset_bit
, nr
, 0);
588 if (start_bit
!= offset_bit
)
592 EXPORT_SYMBOL(gen_pool_fixed_alloc
);
595 * gen_pool_first_fit_order_align - find the first available region
596 * of memory matching the size requirement. The region will be aligned
597 * to the order of the size specified.
598 * @map: The address to base the search on
599 * @size: The bitmap size in bits
600 * @start: The bitnumber to start searching at
601 * @nr: The number of zeroed bits we're looking for
602 * @data: additional data - unused
603 * @pool: pool to find the fit region memory from
605 unsigned long gen_pool_first_fit_order_align(unsigned long *map
,
606 unsigned long size
, unsigned long start
,
607 unsigned int nr
, void *data
, struct gen_pool
*pool
,
608 unsigned long start_addr
)
610 unsigned long align_mask
= roundup_pow_of_two(nr
) - 1;
612 return bitmap_find_next_zero_area(map
, size
, start
, nr
, align_mask
);
614 EXPORT_SYMBOL(gen_pool_first_fit_order_align
);
617 * gen_pool_best_fit - find the best fitting region of memory
618 * macthing the size requirement (no alignment constraint)
619 * @map: The address to base the search on
620 * @size: The bitmap size in bits
621 * @start: The bitnumber to start searching at
622 * @nr: The number of zeroed bits we're looking for
623 * @data: additional data - unused
624 * @pool: pool to find the fit region memory from
626 * Iterate over the bitmap to find the smallest free region
627 * which we can allocate the memory.
629 unsigned long gen_pool_best_fit(unsigned long *map
, unsigned long size
,
630 unsigned long start
, unsigned int nr
, void *data
,
631 struct gen_pool
*pool
, unsigned long start_addr
)
633 unsigned long start_bit
= size
;
634 unsigned long len
= size
+ 1;
637 index
= bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
639 while (index
< size
) {
640 int next_bit
= find_next_bit(map
, size
, index
+ nr
);
641 if ((next_bit
- index
) < len
) {
642 len
= next_bit
- index
;
647 index
= bitmap_find_next_zero_area(map
, size
,
648 next_bit
+ 1, nr
, 0);
653 EXPORT_SYMBOL(gen_pool_best_fit
);
655 static void devm_gen_pool_release(struct device
*dev
, void *res
)
657 gen_pool_destroy(*(struct gen_pool
**)res
);
660 static int devm_gen_pool_match(struct device
*dev
, void *res
, void *data
)
662 struct gen_pool
**p
= res
;
664 /* NULL data matches only a pool without an assigned name */
665 if (!data
&& !(*p
)->name
)
668 if (!data
|| !(*p
)->name
)
671 return !strcmp((*p
)->name
, data
);
675 * gen_pool_get - Obtain the gen_pool (if any) for a device
676 * @dev: device to retrieve the gen_pool from
677 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
679 * Returns the gen_pool for the device if one is present, or NULL.
681 struct gen_pool
*gen_pool_get(struct device
*dev
, const char *name
)
685 p
= devres_find(dev
, devm_gen_pool_release
, devm_gen_pool_match
,
691 EXPORT_SYMBOL_GPL(gen_pool_get
);
694 * devm_gen_pool_create - managed gen_pool_create
695 * @dev: device that provides the gen_pool
696 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
697 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
698 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
700 * Create a new special memory pool that can be used to manage special purpose
701 * memory not managed by the regular kmalloc/kfree interface. The pool will be
702 * automatically destroyed by the device management code.
704 struct gen_pool
*devm_gen_pool_create(struct device
*dev
, int min_alloc_order
,
705 int nid
, const char *name
)
707 struct gen_pool
**ptr
, *pool
;
708 const char *pool_name
= NULL
;
710 /* Check that genpool to be created is uniquely addressed on device */
711 if (gen_pool_get(dev
, name
))
712 return ERR_PTR(-EINVAL
);
715 pool_name
= kstrdup_const(name
, GFP_KERNEL
);
717 return ERR_PTR(-ENOMEM
);
720 ptr
= devres_alloc(devm_gen_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
724 pool
= gen_pool_create(min_alloc_order
, nid
);
729 pool
->name
= pool_name
;
730 devres_add(dev
, ptr
);
737 kfree_const(pool_name
);
739 return ERR_PTR(-ENOMEM
);
741 EXPORT_SYMBOL(devm_gen_pool_create
);
745 * of_gen_pool_get - find a pool by phandle property
747 * @propname: property name containing phandle(s)
748 * @index: index into the phandle array
750 * Returns the pool that contains the chunk starting at the physical
751 * address of the device tree node pointed at by the phandle property,
752 * or NULL if not found.
754 struct gen_pool
*of_gen_pool_get(struct device_node
*np
,
755 const char *propname
, int index
)
757 struct platform_device
*pdev
;
758 struct device_node
*np_pool
, *parent
;
759 const char *name
= NULL
;
760 struct gen_pool
*pool
= NULL
;
762 np_pool
= of_parse_phandle(np
, propname
, index
);
766 pdev
= of_find_device_by_node(np_pool
);
768 /* Check if named gen_pool is created by parent node device */
769 parent
= of_get_parent(np_pool
);
770 pdev
= of_find_device_by_node(parent
);
773 of_property_read_string(np_pool
, "label", &name
);
775 name
= np_pool
->name
;
778 pool
= gen_pool_get(&pdev
->dev
, name
);
779 of_node_put(np_pool
);
783 EXPORT_SYMBOL_GPL(of_gen_pool_get
);
784 #endif /* CONFIG_OF */