]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - lib/genalloc.c
Merge tag 'acpi-5.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[thirdparty/kernel/linux.git] / lib / genalloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Basic general purpose allocator for managing special purpose
4 * memory, for example, memory that is not managed by the regular
5 * kmalloc/kfree interface. Uses for this includes on-device special
6 * memory, uncached memory etc.
7 *
8 * It is safe to use the allocator in NMI handlers and other special
9 * unblockable contexts that could otherwise deadlock on locks. This
10 * is implemented by using atomic operations and retries on any
11 * conflicts. The disadvantage is that there may be livelocks in
12 * extreme cases. For better scalability, one allocator can be used
13 * for each CPU.
14 *
15 * The lockless operation only works if there is enough memory
16 * available. If new memory is added to the pool a lock has to be
17 * still taken. So any user relying on locklessness has to ensure
18 * that sufficient memory is preallocated.
19 *
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
22 * the allocator can NOT be used in NMI handler. So code uses the
23 * allocator in NMI handler should depend on
24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 *
26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 */
28
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/bitmap.h>
32 #include <linux/rculist.h>
33 #include <linux/interrupt.h>
34 #include <linux/genalloc.h>
35 #include <linux/of_device.h>
36 #include <linux/vmalloc.h>
37
38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
39 {
40 return chunk->end_addr - chunk->start_addr + 1;
41 }
42
43 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
44 {
45 unsigned long val, nval;
46
47 nval = *addr;
48 do {
49 val = nval;
50 if (val & mask_to_set)
51 return -EBUSY;
52 cpu_relax();
53 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
54
55 return 0;
56 }
57
58 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
59 {
60 unsigned long val, nval;
61
62 nval = *addr;
63 do {
64 val = nval;
65 if ((val & mask_to_clear) != mask_to_clear)
66 return -EBUSY;
67 cpu_relax();
68 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
69
70 return 0;
71 }
72
73 /*
74 * bitmap_set_ll - set the specified number of bits at the specified position
75 * @map: pointer to a bitmap
76 * @start: a bit position in @map
77 * @nr: number of bits to set
78 *
79 * Set @nr bits start from @start in @map lock-lessly. Several users
80 * can set/clear the same bitmap simultaneously without lock. If two
81 * users set the same bit, one user will return remain bits, otherwise
82 * return 0.
83 */
84 static unsigned long
85 bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
86 {
87 unsigned long *p = map + BIT_WORD(start);
88 const unsigned long size = start + nr;
89 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
90 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
91
92 while (nr >= bits_to_set) {
93 if (set_bits_ll(p, mask_to_set))
94 return nr;
95 nr -= bits_to_set;
96 bits_to_set = BITS_PER_LONG;
97 mask_to_set = ~0UL;
98 p++;
99 }
100 if (nr) {
101 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102 if (set_bits_ll(p, mask_to_set))
103 return nr;
104 }
105
106 return 0;
107 }
108
109 /*
110 * bitmap_clear_ll - clear the specified number of bits at the specified position
111 * @map: pointer to a bitmap
112 * @start: a bit position in @map
113 * @nr: number of bits to set
114 *
115 * Clear @nr bits start from @start in @map lock-lessly. Several users
116 * can set/clear the same bitmap simultaneously without lock. If two
117 * users clear the same bit, one user will return remain bits,
118 * otherwise return 0.
119 */
120 static unsigned long
121 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
122 {
123 unsigned long *p = map + BIT_WORD(start);
124 const unsigned long size = start + nr;
125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127
128 while (nr >= bits_to_clear) {
129 if (clear_bits_ll(p, mask_to_clear))
130 return nr;
131 nr -= bits_to_clear;
132 bits_to_clear = BITS_PER_LONG;
133 mask_to_clear = ~0UL;
134 p++;
135 }
136 if (nr) {
137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 if (clear_bits_ll(p, mask_to_clear))
139 return nr;
140 }
141
142 return 0;
143 }
144
145 /**
146 * gen_pool_create - create a new special memory pool
147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 * @nid: node id of the node the pool structure should be allocated on, or -1
149 *
150 * Create a new special memory pool that can be used to manage special purpose
151 * memory not managed by the regular kmalloc/kfree interface.
152 */
153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154 {
155 struct gen_pool *pool;
156
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 if (pool != NULL) {
159 spin_lock_init(&pool->lock);
160 INIT_LIST_HEAD(&pool->chunks);
161 pool->min_alloc_order = min_alloc_order;
162 pool->algo = gen_pool_first_fit;
163 pool->data = NULL;
164 pool->name = NULL;
165 }
166 return pool;
167 }
168 EXPORT_SYMBOL(gen_pool_create);
169
170 /**
171 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
177 * allocated on, or -1
178 * @owner: private data the publisher would like to recall at alloc time
179 *
180 * Add a new chunk of special memory to the specified pool.
181 *
182 * Returns 0 on success or a -ve errno on failure.
183 */
184 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185 size_t size, int nid, void *owner)
186 {
187 struct gen_pool_chunk *chunk;
188 unsigned long nbits = size >> pool->min_alloc_order;
189 unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190 BITS_TO_LONGS(nbits) * sizeof(long);
191
192 chunk = vzalloc_node(nbytes, nid);
193 if (unlikely(chunk == NULL))
194 return -ENOMEM;
195
196 chunk->phys_addr = phys;
197 chunk->start_addr = virt;
198 chunk->end_addr = virt + size - 1;
199 chunk->owner = owner;
200 atomic_long_set(&chunk->avail, size);
201
202 spin_lock(&pool->lock);
203 list_add_rcu(&chunk->next_chunk, &pool->chunks);
204 spin_unlock(&pool->lock);
205
206 return 0;
207 }
208 EXPORT_SYMBOL(gen_pool_add_owner);
209
210 /**
211 * gen_pool_virt_to_phys - return the physical address of memory
212 * @pool: pool to allocate from
213 * @addr: starting address of memory
214 *
215 * Returns the physical address on success, or -1 on error.
216 */
217 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218 {
219 struct gen_pool_chunk *chunk;
220 phys_addr_t paddr = -1;
221
222 rcu_read_lock();
223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225 paddr = chunk->phys_addr + (addr - chunk->start_addr);
226 break;
227 }
228 }
229 rcu_read_unlock();
230
231 return paddr;
232 }
233 EXPORT_SYMBOL(gen_pool_virt_to_phys);
234
235 /**
236 * gen_pool_destroy - destroy a special memory pool
237 * @pool: pool to destroy
238 *
239 * Destroy the specified special memory pool. Verifies that there are no
240 * outstanding allocations.
241 */
242 void gen_pool_destroy(struct gen_pool *pool)
243 {
244 struct list_head *_chunk, *_next_chunk;
245 struct gen_pool_chunk *chunk;
246 int order = pool->min_alloc_order;
247 unsigned long bit, end_bit;
248
249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
250 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251 list_del(&chunk->next_chunk);
252
253 end_bit = chunk_size(chunk) >> order;
254 bit = find_next_bit(chunk->bits, end_bit, 0);
255 BUG_ON(bit < end_bit);
256
257 vfree(chunk);
258 }
259 kfree_const(pool->name);
260 kfree(pool);
261 }
262 EXPORT_SYMBOL(gen_pool_destroy);
263
264 /**
265 * gen_pool_alloc_algo_owner - allocate special memory from the pool
266 * @pool: pool to allocate from
267 * @size: number of bytes to allocate from the pool
268 * @algo: algorithm passed from caller
269 * @data: data passed to algorithm
270 * @owner: optionally retrieve the chunk owner
271 *
272 * Allocate the requested number of bytes from the specified pool.
273 * Uses the pool allocation function (with first-fit algorithm by default).
274 * Can not be used in NMI handler on architectures without
275 * NMI-safe cmpxchg implementation.
276 */
277 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
278 genpool_algo_t algo, void *data, void **owner)
279 {
280 struct gen_pool_chunk *chunk;
281 unsigned long addr = 0;
282 int order = pool->min_alloc_order;
283 unsigned long nbits, start_bit, end_bit, remain;
284
285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286 BUG_ON(in_nmi());
287 #endif
288
289 if (owner)
290 *owner = NULL;
291
292 if (size == 0)
293 return 0;
294
295 nbits = (size + (1UL << order) - 1) >> order;
296 rcu_read_lock();
297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298 if (size > atomic_long_read(&chunk->avail))
299 continue;
300
301 start_bit = 0;
302 end_bit = chunk_size(chunk) >> order;
303 retry:
304 start_bit = algo(chunk->bits, end_bit, start_bit,
305 nbits, data, pool, chunk->start_addr);
306 if (start_bit >= end_bit)
307 continue;
308 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
309 if (remain) {
310 remain = bitmap_clear_ll(chunk->bits, start_bit,
311 nbits - remain);
312 BUG_ON(remain);
313 goto retry;
314 }
315
316 addr = chunk->start_addr + ((unsigned long)start_bit << order);
317 size = nbits << order;
318 atomic_long_sub(size, &chunk->avail);
319 if (owner)
320 *owner = chunk->owner;
321 break;
322 }
323 rcu_read_unlock();
324 return addr;
325 }
326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327
328 /**
329 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
330 * @pool: pool to allocate from
331 * @size: number of bytes to allocate from the pool
332 * @dma: dma-view physical address return value. Use %NULL if unneeded.
333 *
334 * Allocate the requested number of bytes from the specified pool.
335 * Uses the pool allocation function (with first-fit algorithm by default).
336 * Can not be used in NMI handler on architectures without
337 * NMI-safe cmpxchg implementation.
338 *
339 * Return: virtual address of the allocated memory, or %NULL on failure
340 */
341 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
342 {
343 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
344 }
345 EXPORT_SYMBOL(gen_pool_dma_alloc);
346
347 /**
348 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
349 * usage with the given pool algorithm
350 * @pool: pool to allocate from
351 * @size: number of bytes to allocate from the pool
352 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
353 * @algo: algorithm passed from caller
354 * @data: data passed to algorithm
355 *
356 * Allocate the requested number of bytes from the specified pool. Uses the
357 * given pool allocation function. Can not be used in NMI handler on
358 * architectures without NMI-safe cmpxchg implementation.
359 *
360 * Return: virtual address of the allocated memory, or %NULL on failure
361 */
362 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
363 dma_addr_t *dma, genpool_algo_t algo, void *data)
364 {
365 unsigned long vaddr;
366
367 if (!pool)
368 return NULL;
369
370 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
371 if (!vaddr)
372 return NULL;
373
374 if (dma)
375 *dma = gen_pool_virt_to_phys(pool, vaddr);
376
377 return (void *)vaddr;
378 }
379 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
380
381 /**
382 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
383 * usage with the given alignment
384 * @pool: pool to allocate from
385 * @size: number of bytes to allocate from the pool
386 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
387 * @align: alignment in bytes for starting address
388 *
389 * Allocate the requested number bytes from the specified pool, with the given
390 * alignment restriction. Can not be used in NMI handler on architectures
391 * without NMI-safe cmpxchg implementation.
392 *
393 * Return: virtual address of the allocated memory, or %NULL on failure
394 */
395 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
396 dma_addr_t *dma, int align)
397 {
398 struct genpool_data_align data = { .align = align };
399
400 return gen_pool_dma_alloc_algo(pool, size, dma,
401 gen_pool_first_fit_align, &data);
402 }
403 EXPORT_SYMBOL(gen_pool_dma_alloc_align);
404
405 /**
406 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
407 * DMA usage
408 * @pool: pool to allocate from
409 * @size: number of bytes to allocate from the pool
410 * @dma: dma-view physical address return value. Use %NULL if unneeded.
411 *
412 * Allocate the requested number of zeroed bytes from the specified pool.
413 * Uses the pool allocation function (with first-fit algorithm by default).
414 * Can not be used in NMI handler on architectures without
415 * NMI-safe cmpxchg implementation.
416 *
417 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
418 */
419 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
420 {
421 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
422 }
423 EXPORT_SYMBOL(gen_pool_dma_zalloc);
424
425 /**
426 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
427 * DMA usage with the given pool algorithm
428 * @pool: pool to allocate from
429 * @size: number of bytes to allocate from the pool
430 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
431 * @algo: algorithm passed from caller
432 * @data: data passed to algorithm
433 *
434 * Allocate the requested number of zeroed bytes from the specified pool. Uses
435 * the given pool allocation function. Can not be used in NMI handler on
436 * architectures without NMI-safe cmpxchg implementation.
437 *
438 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
439 */
440 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
441 dma_addr_t *dma, genpool_algo_t algo, void *data)
442 {
443 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
444
445 if (vaddr)
446 memset(vaddr, 0, size);
447
448 return vaddr;
449 }
450 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
451
452 /**
453 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
454 * DMA usage with the given alignment
455 * @pool: pool to allocate from
456 * @size: number of bytes to allocate from the pool
457 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
458 * @align: alignment in bytes for starting address
459 *
460 * Allocate the requested number of zeroed bytes from the specified pool,
461 * with the given alignment restriction. Can not be used in NMI handler on
462 * architectures without NMI-safe cmpxchg implementation.
463 *
464 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
465 */
466 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
467 dma_addr_t *dma, int align)
468 {
469 struct genpool_data_align data = { .align = align };
470
471 return gen_pool_dma_zalloc_algo(pool, size, dma,
472 gen_pool_first_fit_align, &data);
473 }
474 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
475
476 /**
477 * gen_pool_free_owner - free allocated special memory back to the pool
478 * @pool: pool to free to
479 * @addr: starting address of memory to free back to pool
480 * @size: size in bytes of memory to free
481 * @owner: private data stashed at gen_pool_add() time
482 *
483 * Free previously allocated special memory back to the specified
484 * pool. Can not be used in NMI handler on architectures without
485 * NMI-safe cmpxchg implementation.
486 */
487 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
488 void **owner)
489 {
490 struct gen_pool_chunk *chunk;
491 int order = pool->min_alloc_order;
492 unsigned long start_bit, nbits, remain;
493
494 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
495 BUG_ON(in_nmi());
496 #endif
497
498 if (owner)
499 *owner = NULL;
500
501 nbits = (size + (1UL << order) - 1) >> order;
502 rcu_read_lock();
503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
504 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
505 BUG_ON(addr + size - 1 > chunk->end_addr);
506 start_bit = (addr - chunk->start_addr) >> order;
507 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
508 BUG_ON(remain);
509 size = nbits << order;
510 atomic_long_add(size, &chunk->avail);
511 if (owner)
512 *owner = chunk->owner;
513 rcu_read_unlock();
514 return;
515 }
516 }
517 rcu_read_unlock();
518 BUG();
519 }
520 EXPORT_SYMBOL(gen_pool_free_owner);
521
522 /**
523 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
524 * @pool: the generic memory pool
525 * @func: func to call
526 * @data: additional data used by @func
527 *
528 * Call @func for every chunk of generic memory pool. The @func is
529 * called with rcu_read_lock held.
530 */
531 void gen_pool_for_each_chunk(struct gen_pool *pool,
532 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
533 void *data)
534 {
535 struct gen_pool_chunk *chunk;
536
537 rcu_read_lock();
538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
539 func(pool, chunk, data);
540 rcu_read_unlock();
541 }
542 EXPORT_SYMBOL(gen_pool_for_each_chunk);
543
544 /**
545 * gen_pool_has_addr - checks if an address falls within the range of a pool
546 * @pool: the generic memory pool
547 * @start: start address
548 * @size: size of the region
549 *
550 * Check if the range of addresses falls within the specified pool. Returns
551 * true if the entire range is contained in the pool and false otherwise.
552 */
553 bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
554 size_t size)
555 {
556 bool found = false;
557 unsigned long end = start + size - 1;
558 struct gen_pool_chunk *chunk;
559
560 rcu_read_lock();
561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
562 if (start >= chunk->start_addr && start <= chunk->end_addr) {
563 if (end <= chunk->end_addr) {
564 found = true;
565 break;
566 }
567 }
568 }
569 rcu_read_unlock();
570 return found;
571 }
572 EXPORT_SYMBOL(gen_pool_has_addr);
573
574 /**
575 * gen_pool_avail - get available free space of the pool
576 * @pool: pool to get available free space
577 *
578 * Return available free space of the specified pool.
579 */
580 size_t gen_pool_avail(struct gen_pool *pool)
581 {
582 struct gen_pool_chunk *chunk;
583 size_t avail = 0;
584
585 rcu_read_lock();
586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
587 avail += atomic_long_read(&chunk->avail);
588 rcu_read_unlock();
589 return avail;
590 }
591 EXPORT_SYMBOL_GPL(gen_pool_avail);
592
593 /**
594 * gen_pool_size - get size in bytes of memory managed by the pool
595 * @pool: pool to get size
596 *
597 * Return size in bytes of memory managed by the pool.
598 */
599 size_t gen_pool_size(struct gen_pool *pool)
600 {
601 struct gen_pool_chunk *chunk;
602 size_t size = 0;
603
604 rcu_read_lock();
605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
606 size += chunk_size(chunk);
607 rcu_read_unlock();
608 return size;
609 }
610 EXPORT_SYMBOL_GPL(gen_pool_size);
611
612 /**
613 * gen_pool_set_algo - set the allocation algorithm
614 * @pool: pool to change allocation algorithm
615 * @algo: custom algorithm function
616 * @data: additional data used by @algo
617 *
618 * Call @algo for each memory allocation in the pool.
619 * If @algo is NULL use gen_pool_first_fit as default
620 * memory allocation function.
621 */
622 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
623 {
624 rcu_read_lock();
625
626 pool->algo = algo;
627 if (!pool->algo)
628 pool->algo = gen_pool_first_fit;
629
630 pool->data = data;
631
632 rcu_read_unlock();
633 }
634 EXPORT_SYMBOL(gen_pool_set_algo);
635
636 /**
637 * gen_pool_first_fit - find the first available region
638 * of memory matching the size requirement (no alignment constraint)
639 * @map: The address to base the search on
640 * @size: The bitmap size in bits
641 * @start: The bitnumber to start searching at
642 * @nr: The number of zeroed bits we're looking for
643 * @data: additional data - unused
644 * @pool: pool to find the fit region memory from
645 */
646 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
647 unsigned long start, unsigned int nr, void *data,
648 struct gen_pool *pool, unsigned long start_addr)
649 {
650 return bitmap_find_next_zero_area(map, size, start, nr, 0);
651 }
652 EXPORT_SYMBOL(gen_pool_first_fit);
653
654 /**
655 * gen_pool_first_fit_align - find the first available region
656 * of memory matching the size requirement (alignment constraint)
657 * @map: The address to base the search on
658 * @size: The bitmap size in bits
659 * @start: The bitnumber to start searching at
660 * @nr: The number of zeroed bits we're looking for
661 * @data: data for alignment
662 * @pool: pool to get order from
663 */
664 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
665 unsigned long start, unsigned int nr, void *data,
666 struct gen_pool *pool, unsigned long start_addr)
667 {
668 struct genpool_data_align *alignment;
669 unsigned long align_mask, align_off;
670 int order;
671
672 alignment = data;
673 order = pool->min_alloc_order;
674 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
675 align_off = (start_addr & (alignment->align - 1)) >> order;
676
677 return bitmap_find_next_zero_area_off(map, size, start, nr,
678 align_mask, align_off);
679 }
680 EXPORT_SYMBOL(gen_pool_first_fit_align);
681
682 /**
683 * gen_pool_fixed_alloc - reserve a specific region
684 * @map: The address to base the search on
685 * @size: The bitmap size in bits
686 * @start: The bitnumber to start searching at
687 * @nr: The number of zeroed bits we're looking for
688 * @data: data for alignment
689 * @pool: pool to get order from
690 */
691 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
692 unsigned long start, unsigned int nr, void *data,
693 struct gen_pool *pool, unsigned long start_addr)
694 {
695 struct genpool_data_fixed *fixed_data;
696 int order;
697 unsigned long offset_bit;
698 unsigned long start_bit;
699
700 fixed_data = data;
701 order = pool->min_alloc_order;
702 offset_bit = fixed_data->offset >> order;
703 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
704 return size;
705
706 start_bit = bitmap_find_next_zero_area(map, size,
707 start + offset_bit, nr, 0);
708 if (start_bit != offset_bit)
709 start_bit = size;
710 return start_bit;
711 }
712 EXPORT_SYMBOL(gen_pool_fixed_alloc);
713
714 /**
715 * gen_pool_first_fit_order_align - find the first available region
716 * of memory matching the size requirement. The region will be aligned
717 * to the order of the size specified.
718 * @map: The address to base the search on
719 * @size: The bitmap size in bits
720 * @start: The bitnumber to start searching at
721 * @nr: The number of zeroed bits we're looking for
722 * @data: additional data - unused
723 * @pool: pool to find the fit region memory from
724 */
725 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
726 unsigned long size, unsigned long start,
727 unsigned int nr, void *data, struct gen_pool *pool,
728 unsigned long start_addr)
729 {
730 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
731
732 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
733 }
734 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
735
736 /**
737 * gen_pool_best_fit - find the best fitting region of memory
738 * macthing the size requirement (no alignment constraint)
739 * @map: The address to base the search on
740 * @size: The bitmap size in bits
741 * @start: The bitnumber to start searching at
742 * @nr: The number of zeroed bits we're looking for
743 * @data: additional data - unused
744 * @pool: pool to find the fit region memory from
745 *
746 * Iterate over the bitmap to find the smallest free region
747 * which we can allocate the memory.
748 */
749 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
750 unsigned long start, unsigned int nr, void *data,
751 struct gen_pool *pool, unsigned long start_addr)
752 {
753 unsigned long start_bit = size;
754 unsigned long len = size + 1;
755 unsigned long index;
756
757 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
758
759 while (index < size) {
760 unsigned long next_bit = find_next_bit(map, size, index + nr);
761 if ((next_bit - index) < len) {
762 len = next_bit - index;
763 start_bit = index;
764 if (len == nr)
765 return start_bit;
766 }
767 index = bitmap_find_next_zero_area(map, size,
768 next_bit + 1, nr, 0);
769 }
770
771 return start_bit;
772 }
773 EXPORT_SYMBOL(gen_pool_best_fit);
774
775 static void devm_gen_pool_release(struct device *dev, void *res)
776 {
777 gen_pool_destroy(*(struct gen_pool **)res);
778 }
779
780 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
781 {
782 struct gen_pool **p = res;
783
784 /* NULL data matches only a pool without an assigned name */
785 if (!data && !(*p)->name)
786 return 1;
787
788 if (!data || !(*p)->name)
789 return 0;
790
791 return !strcmp((*p)->name, data);
792 }
793
794 /**
795 * gen_pool_get - Obtain the gen_pool (if any) for a device
796 * @dev: device to retrieve the gen_pool from
797 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
798 *
799 * Returns the gen_pool for the device if one is present, or NULL.
800 */
801 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
802 {
803 struct gen_pool **p;
804
805 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
806 (void *)name);
807 if (!p)
808 return NULL;
809 return *p;
810 }
811 EXPORT_SYMBOL_GPL(gen_pool_get);
812
813 /**
814 * devm_gen_pool_create - managed gen_pool_create
815 * @dev: device that provides the gen_pool
816 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
817 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
818 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
819 *
820 * Create a new special memory pool that can be used to manage special purpose
821 * memory not managed by the regular kmalloc/kfree interface. The pool will be
822 * automatically destroyed by the device management code.
823 */
824 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
825 int nid, const char *name)
826 {
827 struct gen_pool **ptr, *pool;
828 const char *pool_name = NULL;
829
830 /* Check that genpool to be created is uniquely addressed on device */
831 if (gen_pool_get(dev, name))
832 return ERR_PTR(-EINVAL);
833
834 if (name) {
835 pool_name = kstrdup_const(name, GFP_KERNEL);
836 if (!pool_name)
837 return ERR_PTR(-ENOMEM);
838 }
839
840 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
841 if (!ptr)
842 goto free_pool_name;
843
844 pool = gen_pool_create(min_alloc_order, nid);
845 if (!pool)
846 goto free_devres;
847
848 *ptr = pool;
849 pool->name = pool_name;
850 devres_add(dev, ptr);
851
852 return pool;
853
854 free_devres:
855 devres_free(ptr);
856 free_pool_name:
857 kfree_const(pool_name);
858
859 return ERR_PTR(-ENOMEM);
860 }
861 EXPORT_SYMBOL(devm_gen_pool_create);
862
863 #ifdef CONFIG_OF
864 /**
865 * of_gen_pool_get - find a pool by phandle property
866 * @np: device node
867 * @propname: property name containing phandle(s)
868 * @index: index into the phandle array
869 *
870 * Returns the pool that contains the chunk starting at the physical
871 * address of the device tree node pointed at by the phandle property,
872 * or NULL if not found.
873 */
874 struct gen_pool *of_gen_pool_get(struct device_node *np,
875 const char *propname, int index)
876 {
877 struct platform_device *pdev;
878 struct device_node *np_pool, *parent;
879 const char *name = NULL;
880 struct gen_pool *pool = NULL;
881
882 np_pool = of_parse_phandle(np, propname, index);
883 if (!np_pool)
884 return NULL;
885
886 pdev = of_find_device_by_node(np_pool);
887 if (!pdev) {
888 /* Check if named gen_pool is created by parent node device */
889 parent = of_get_parent(np_pool);
890 pdev = of_find_device_by_node(parent);
891 of_node_put(parent);
892
893 of_property_read_string(np_pool, "label", &name);
894 if (!name)
895 name = np_pool->name;
896 }
897 if (pdev)
898 pool = gen_pool_get(&pdev->dev, name);
899 of_node_put(np_pool);
900
901 return pool;
902 }
903 EXPORT_SYMBOL_GPL(of_gen_pool_get);
904 #endif /* CONFIG_OF */