1 // SPDX-License-Identifier: GPL-2.0-only
3 * Basic general purpose allocator for managing special purpose
4 * memory, for example, memory that is not managed by the regular
5 * kmalloc/kfree interface. Uses for this includes on-device special
6 * memory, uncached memory etc.
8 * It is safe to use the allocator in NMI handlers and other special
9 * unblockable contexts that could otherwise deadlock on locks. This
10 * is implemented by using atomic operations and retries on any
11 * conflicts. The disadvantage is that there may be livelocks in
12 * extreme cases. For better scalability, one allocator can be used
15 * The lockless operation only works if there is enough memory
16 * available. If new memory is added to the pool a lock has to be
17 * still taken. So any user relying on locklessness has to ensure
18 * that sufficient memory is preallocated.
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
22 * the allocator can NOT be used in NMI handler. So code uses the
23 * allocator in NMI handler should depend on
24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/bitmap.h>
32 #include <linux/rculist.h>
33 #include <linux/interrupt.h>
34 #include <linux/genalloc.h>
35 #include <linux/of_device.h>
36 #include <linux/vmalloc.h>
38 static inline size_t chunk_size(const struct gen_pool_chunk
*chunk
)
40 return chunk
->end_addr
- chunk
->start_addr
+ 1;
43 static int set_bits_ll(unsigned long *addr
, unsigned long mask_to_set
)
45 unsigned long val
, nval
;
50 if (val
& mask_to_set
)
53 } while ((nval
= cmpxchg(addr
, val
, val
| mask_to_set
)) != val
);
58 static int clear_bits_ll(unsigned long *addr
, unsigned long mask_to_clear
)
60 unsigned long val
, nval
;
65 if ((val
& mask_to_clear
) != mask_to_clear
)
68 } while ((nval
= cmpxchg(addr
, val
, val
& ~mask_to_clear
)) != val
);
74 * bitmap_set_ll - set the specified number of bits at the specified position
75 * @map: pointer to a bitmap
76 * @start: a bit position in @map
77 * @nr: number of bits to set
79 * Set @nr bits start from @start in @map lock-lessly. Several users
80 * can set/clear the same bitmap simultaneously without lock. If two
81 * users set the same bit, one user will return remain bits, otherwise
84 static int bitmap_set_ll(unsigned long *map
, unsigned long start
, unsigned long nr
)
86 unsigned long *p
= map
+ BIT_WORD(start
);
87 const unsigned long size
= start
+ nr
;
88 int bits_to_set
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
89 unsigned long mask_to_set
= BITMAP_FIRST_WORD_MASK(start
);
91 while (nr
>= bits_to_set
) {
92 if (set_bits_ll(p
, mask_to_set
))
95 bits_to_set
= BITS_PER_LONG
;
100 mask_to_set
&= BITMAP_LAST_WORD_MASK(size
);
101 if (set_bits_ll(p
, mask_to_set
))
109 * bitmap_clear_ll - clear the specified number of bits at the specified position
110 * @map: pointer to a bitmap
111 * @start: a bit position in @map
112 * @nr: number of bits to set
114 * Clear @nr bits start from @start in @map lock-lessly. Several users
115 * can set/clear the same bitmap simultaneously without lock. If two
116 * users clear the same bit, one user will return remain bits,
117 * otherwise return 0.
120 bitmap_clear_ll(unsigned long *map
, unsigned long start
, unsigned long nr
)
122 unsigned long *p
= map
+ BIT_WORD(start
);
123 const unsigned long size
= start
+ nr
;
124 int bits_to_clear
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
125 unsigned long mask_to_clear
= BITMAP_FIRST_WORD_MASK(start
);
127 while (nr
>= bits_to_clear
) {
128 if (clear_bits_ll(p
, mask_to_clear
))
131 bits_to_clear
= BITS_PER_LONG
;
132 mask_to_clear
= ~0UL;
136 mask_to_clear
&= BITMAP_LAST_WORD_MASK(size
);
137 if (clear_bits_ll(p
, mask_to_clear
))
145 * gen_pool_create - create a new special memory pool
146 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
147 * @nid: node id of the node the pool structure should be allocated on, or -1
149 * Create a new special memory pool that can be used to manage special purpose
150 * memory not managed by the regular kmalloc/kfree interface.
152 struct gen_pool
*gen_pool_create(int min_alloc_order
, int nid
)
154 struct gen_pool
*pool
;
156 pool
= kmalloc_node(sizeof(struct gen_pool
), GFP_KERNEL
, nid
);
158 spin_lock_init(&pool
->lock
);
159 INIT_LIST_HEAD(&pool
->chunks
);
160 pool
->min_alloc_order
= min_alloc_order
;
161 pool
->algo
= gen_pool_first_fit
;
167 EXPORT_SYMBOL(gen_pool_create
);
170 * gen_pool_add_owner- add a new chunk of special memory to the pool
171 * @pool: pool to add new memory chunk to
172 * @virt: virtual starting address of memory chunk to add to pool
173 * @phys: physical starting address of memory chunk to add to pool
174 * @size: size in bytes of the memory chunk to add to pool
175 * @nid: node id of the node the chunk structure and bitmap should be
176 * allocated on, or -1
177 * @owner: private data the publisher would like to recall at alloc time
179 * Add a new chunk of special memory to the specified pool.
181 * Returns 0 on success or a -ve errno on failure.
183 int gen_pool_add_owner(struct gen_pool
*pool
, unsigned long virt
, phys_addr_t phys
,
184 size_t size
, int nid
, void *owner
)
186 struct gen_pool_chunk
*chunk
;
187 unsigned long nbits
= size
>> pool
->min_alloc_order
;
188 unsigned long nbytes
= sizeof(struct gen_pool_chunk
) +
189 BITS_TO_LONGS(nbits
) * sizeof(long);
191 chunk
= vzalloc_node(nbytes
, nid
);
192 if (unlikely(chunk
== NULL
))
195 chunk
->phys_addr
= phys
;
196 chunk
->start_addr
= virt
;
197 chunk
->end_addr
= virt
+ size
- 1;
198 chunk
->owner
= owner
;
199 atomic_long_set(&chunk
->avail
, size
);
201 spin_lock(&pool
->lock
);
202 list_add_rcu(&chunk
->next_chunk
, &pool
->chunks
);
203 spin_unlock(&pool
->lock
);
207 EXPORT_SYMBOL(gen_pool_add_owner
);
210 * gen_pool_virt_to_phys - return the physical address of memory
211 * @pool: pool to allocate from
212 * @addr: starting address of memory
214 * Returns the physical address on success, or -1 on error.
216 phys_addr_t
gen_pool_virt_to_phys(struct gen_pool
*pool
, unsigned long addr
)
218 struct gen_pool_chunk
*chunk
;
219 phys_addr_t paddr
= -1;
222 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
223 if (addr
>= chunk
->start_addr
&& addr
<= chunk
->end_addr
) {
224 paddr
= chunk
->phys_addr
+ (addr
- chunk
->start_addr
);
232 EXPORT_SYMBOL(gen_pool_virt_to_phys
);
235 * gen_pool_destroy - destroy a special memory pool
236 * @pool: pool to destroy
238 * Destroy the specified special memory pool. Verifies that there are no
239 * outstanding allocations.
241 void gen_pool_destroy(struct gen_pool
*pool
)
243 struct list_head
*_chunk
, *_next_chunk
;
244 struct gen_pool_chunk
*chunk
;
245 int order
= pool
->min_alloc_order
;
246 unsigned long bit
, end_bit
;
248 list_for_each_safe(_chunk
, _next_chunk
, &pool
->chunks
) {
249 chunk
= list_entry(_chunk
, struct gen_pool_chunk
, next_chunk
);
250 list_del(&chunk
->next_chunk
);
252 end_bit
= chunk_size(chunk
) >> order
;
253 bit
= find_next_bit(chunk
->bits
, end_bit
, 0);
254 BUG_ON(bit
< end_bit
);
258 kfree_const(pool
->name
);
261 EXPORT_SYMBOL(gen_pool_destroy
);
264 * gen_pool_alloc_algo_owner - allocate special memory from the pool
265 * @pool: pool to allocate from
266 * @size: number of bytes to allocate from the pool
267 * @algo: algorithm passed from caller
268 * @data: data passed to algorithm
269 * @owner: optionally retrieve the chunk owner
271 * Allocate the requested number of bytes from the specified pool.
272 * Uses the pool allocation function (with first-fit algorithm by default).
273 * Can not be used in NMI handler on architectures without
274 * NMI-safe cmpxchg implementation.
276 unsigned long gen_pool_alloc_algo_owner(struct gen_pool
*pool
, size_t size
,
277 genpool_algo_t algo
, void *data
, void **owner
)
279 struct gen_pool_chunk
*chunk
;
280 unsigned long addr
= 0;
281 int order
= pool
->min_alloc_order
;
282 unsigned long nbits
, start_bit
, end_bit
, remain
;
284 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
294 nbits
= (size
+ (1UL << order
) - 1) >> order
;
296 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
297 if (size
> atomic_long_read(&chunk
->avail
))
301 end_bit
= chunk_size(chunk
) >> order
;
303 start_bit
= algo(chunk
->bits
, end_bit
, start_bit
,
304 nbits
, data
, pool
, chunk
->start_addr
);
305 if (start_bit
>= end_bit
)
307 remain
= bitmap_set_ll(chunk
->bits
, start_bit
, nbits
);
309 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
,
315 addr
= chunk
->start_addr
+ ((unsigned long)start_bit
<< order
);
316 size
= nbits
<< order
;
317 atomic_long_sub(size
, &chunk
->avail
);
319 *owner
= chunk
->owner
;
325 EXPORT_SYMBOL(gen_pool_alloc_algo_owner
);
328 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
329 * @pool: pool to allocate from
330 * @size: number of bytes to allocate from the pool
331 * @dma: dma-view physical address return value. Use %NULL if unneeded.
333 * Allocate the requested number of bytes from the specified pool.
334 * Uses the pool allocation function (with first-fit algorithm by default).
335 * Can not be used in NMI handler on architectures without
336 * NMI-safe cmpxchg implementation.
338 * Return: virtual address of the allocated memory, or %NULL on failure
340 void *gen_pool_dma_alloc(struct gen_pool
*pool
, size_t size
, dma_addr_t
*dma
)
342 return gen_pool_dma_alloc_algo(pool
, size
, dma
, pool
->algo
, pool
->data
);
344 EXPORT_SYMBOL(gen_pool_dma_alloc
);
347 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
348 * usage with the given pool algorithm
349 * @pool: pool to allocate from
350 * @size: number of bytes to allocate from the pool
351 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
352 * @algo: algorithm passed from caller
353 * @data: data passed to algorithm
355 * Allocate the requested number of bytes from the specified pool. Uses the
356 * given pool allocation function. Can not be used in NMI handler on
357 * architectures without NMI-safe cmpxchg implementation.
359 * Return: virtual address of the allocated memory, or %NULL on failure
361 void *gen_pool_dma_alloc_algo(struct gen_pool
*pool
, size_t size
,
362 dma_addr_t
*dma
, genpool_algo_t algo
, void *data
)
369 vaddr
= gen_pool_alloc_algo(pool
, size
, algo
, data
);
374 *dma
= gen_pool_virt_to_phys(pool
, vaddr
);
376 return (void *)vaddr
;
378 EXPORT_SYMBOL(gen_pool_dma_alloc_algo
);
381 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
382 * usage with the given alignment
383 * @pool: pool to allocate from
384 * @size: number of bytes to allocate from the pool
385 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
386 * @align: alignment in bytes for starting address
388 * Allocate the requested number bytes from the specified pool, with the given
389 * alignment restriction. Can not be used in NMI handler on architectures
390 * without NMI-safe cmpxchg implementation.
392 * Return: virtual address of the allocated memory, or %NULL on failure
394 void *gen_pool_dma_alloc_align(struct gen_pool
*pool
, size_t size
,
395 dma_addr_t
*dma
, int align
)
397 struct genpool_data_align data
= { .align
= align
};
399 return gen_pool_dma_alloc_algo(pool
, size
, dma
,
400 gen_pool_first_fit_align
, &data
);
402 EXPORT_SYMBOL(gen_pool_dma_alloc_align
);
405 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
407 * @pool: pool to allocate from
408 * @size: number of bytes to allocate from the pool
409 * @dma: dma-view physical address return value. Use %NULL if unneeded.
411 * Allocate the requested number of zeroed bytes from the specified pool.
412 * Uses the pool allocation function (with first-fit algorithm by default).
413 * Can not be used in NMI handler on architectures without
414 * NMI-safe cmpxchg implementation.
416 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
418 void *gen_pool_dma_zalloc(struct gen_pool
*pool
, size_t size
, dma_addr_t
*dma
)
420 return gen_pool_dma_zalloc_algo(pool
, size
, dma
, pool
->algo
, pool
->data
);
422 EXPORT_SYMBOL(gen_pool_dma_zalloc
);
425 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
426 * DMA usage with the given pool algorithm
427 * @pool: pool to allocate from
428 * @size: number of bytes to allocate from the pool
429 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
430 * @algo: algorithm passed from caller
431 * @data: data passed to algorithm
433 * Allocate the requested number of zeroed bytes from the specified pool. Uses
434 * the given pool allocation function. Can not be used in NMI handler on
435 * architectures without NMI-safe cmpxchg implementation.
437 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
439 void *gen_pool_dma_zalloc_algo(struct gen_pool
*pool
, size_t size
,
440 dma_addr_t
*dma
, genpool_algo_t algo
, void *data
)
442 void *vaddr
= gen_pool_dma_alloc_algo(pool
, size
, dma
, algo
, data
);
445 memset(vaddr
, 0, size
);
449 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo
);
452 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
453 * DMA usage with the given alignment
454 * @pool: pool to allocate from
455 * @size: number of bytes to allocate from the pool
456 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
457 * @align: alignment in bytes for starting address
459 * Allocate the requested number of zeroed bytes from the specified pool,
460 * with the given alignment restriction. Can not be used in NMI handler on
461 * architectures without NMI-safe cmpxchg implementation.
463 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
465 void *gen_pool_dma_zalloc_align(struct gen_pool
*pool
, size_t size
,
466 dma_addr_t
*dma
, int align
)
468 struct genpool_data_align data
= { .align
= align
};
470 return gen_pool_dma_zalloc_algo(pool
, size
, dma
,
471 gen_pool_first_fit_align
, &data
);
473 EXPORT_SYMBOL(gen_pool_dma_zalloc_align
);
476 * gen_pool_free_owner - free allocated special memory back to the pool
477 * @pool: pool to free to
478 * @addr: starting address of memory to free back to pool
479 * @size: size in bytes of memory to free
480 * @owner: private data stashed at gen_pool_add() time
482 * Free previously allocated special memory back to the specified
483 * pool. Can not be used in NMI handler on architectures without
484 * NMI-safe cmpxchg implementation.
486 void gen_pool_free_owner(struct gen_pool
*pool
, unsigned long addr
, size_t size
,
489 struct gen_pool_chunk
*chunk
;
490 int order
= pool
->min_alloc_order
;
491 unsigned long start_bit
, nbits
, remain
;
493 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
500 nbits
= (size
+ (1UL << order
) - 1) >> order
;
502 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
503 if (addr
>= chunk
->start_addr
&& addr
<= chunk
->end_addr
) {
504 BUG_ON(addr
+ size
- 1 > chunk
->end_addr
);
505 start_bit
= (addr
- chunk
->start_addr
) >> order
;
506 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
, nbits
);
508 size
= nbits
<< order
;
509 atomic_long_add(size
, &chunk
->avail
);
511 *owner
= chunk
->owner
;
519 EXPORT_SYMBOL(gen_pool_free_owner
);
522 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
523 * @pool: the generic memory pool
524 * @func: func to call
525 * @data: additional data used by @func
527 * Call @func for every chunk of generic memory pool. The @func is
528 * called with rcu_read_lock held.
530 void gen_pool_for_each_chunk(struct gen_pool
*pool
,
531 void (*func
)(struct gen_pool
*pool
, struct gen_pool_chunk
*chunk
, void *data
),
534 struct gen_pool_chunk
*chunk
;
537 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
)
538 func(pool
, chunk
, data
);
541 EXPORT_SYMBOL(gen_pool_for_each_chunk
);
544 * gen_pool_has_addr - checks if an address falls within the range of a pool
545 * @pool: the generic memory pool
546 * @start: start address
547 * @size: size of the region
549 * Check if the range of addresses falls within the specified pool. Returns
550 * true if the entire range is contained in the pool and false otherwise.
552 bool gen_pool_has_addr(struct gen_pool
*pool
, unsigned long start
,
556 unsigned long end
= start
+ size
- 1;
557 struct gen_pool_chunk
*chunk
;
560 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
) {
561 if (start
>= chunk
->start_addr
&& start
<= chunk
->end_addr
) {
562 if (end
<= chunk
->end_addr
) {
571 EXPORT_SYMBOL(gen_pool_has_addr
);
574 * gen_pool_avail - get available free space of the pool
575 * @pool: pool to get available free space
577 * Return available free space of the specified pool.
579 size_t gen_pool_avail(struct gen_pool
*pool
)
581 struct gen_pool_chunk
*chunk
;
585 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
586 avail
+= atomic_long_read(&chunk
->avail
);
590 EXPORT_SYMBOL_GPL(gen_pool_avail
);
593 * gen_pool_size - get size in bytes of memory managed by the pool
594 * @pool: pool to get size
596 * Return size in bytes of memory managed by the pool.
598 size_t gen_pool_size(struct gen_pool
*pool
)
600 struct gen_pool_chunk
*chunk
;
604 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
605 size
+= chunk_size(chunk
);
609 EXPORT_SYMBOL_GPL(gen_pool_size
);
612 * gen_pool_set_algo - set the allocation algorithm
613 * @pool: pool to change allocation algorithm
614 * @algo: custom algorithm function
615 * @data: additional data used by @algo
617 * Call @algo for each memory allocation in the pool.
618 * If @algo is NULL use gen_pool_first_fit as default
619 * memory allocation function.
621 void gen_pool_set_algo(struct gen_pool
*pool
, genpool_algo_t algo
, void *data
)
627 pool
->algo
= gen_pool_first_fit
;
633 EXPORT_SYMBOL(gen_pool_set_algo
);
636 * gen_pool_first_fit - find the first available region
637 * of memory matching the size requirement (no alignment constraint)
638 * @map: The address to base the search on
639 * @size: The bitmap size in bits
640 * @start: The bitnumber to start searching at
641 * @nr: The number of zeroed bits we're looking for
642 * @data: additional data - unused
643 * @pool: pool to find the fit region memory from
645 unsigned long gen_pool_first_fit(unsigned long *map
, unsigned long size
,
646 unsigned long start
, unsigned int nr
, void *data
,
647 struct gen_pool
*pool
, unsigned long start_addr
)
649 return bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
651 EXPORT_SYMBOL(gen_pool_first_fit
);
654 * gen_pool_first_fit_align - find the first available region
655 * of memory matching the size requirement (alignment constraint)
656 * @map: The address to base the search on
657 * @size: The bitmap size in bits
658 * @start: The bitnumber to start searching at
659 * @nr: The number of zeroed bits we're looking for
660 * @data: data for alignment
661 * @pool: pool to get order from
663 unsigned long gen_pool_first_fit_align(unsigned long *map
, unsigned long size
,
664 unsigned long start
, unsigned int nr
, void *data
,
665 struct gen_pool
*pool
, unsigned long start_addr
)
667 struct genpool_data_align
*alignment
;
668 unsigned long align_mask
, align_off
;
672 order
= pool
->min_alloc_order
;
673 align_mask
= ((alignment
->align
+ (1UL << order
) - 1) >> order
) - 1;
674 align_off
= (start_addr
& (alignment
->align
- 1)) >> order
;
676 return bitmap_find_next_zero_area_off(map
, size
, start
, nr
,
677 align_mask
, align_off
);
679 EXPORT_SYMBOL(gen_pool_first_fit_align
);
682 * gen_pool_fixed_alloc - reserve a specific region
683 * @map: The address to base the search on
684 * @size: The bitmap size in bits
685 * @start: The bitnumber to start searching at
686 * @nr: The number of zeroed bits we're looking for
687 * @data: data for alignment
688 * @pool: pool to get order from
690 unsigned long gen_pool_fixed_alloc(unsigned long *map
, unsigned long size
,
691 unsigned long start
, unsigned int nr
, void *data
,
692 struct gen_pool
*pool
, unsigned long start_addr
)
694 struct genpool_data_fixed
*fixed_data
;
696 unsigned long offset_bit
;
697 unsigned long start_bit
;
700 order
= pool
->min_alloc_order
;
701 offset_bit
= fixed_data
->offset
>> order
;
702 if (WARN_ON(fixed_data
->offset
& ((1UL << order
) - 1)))
705 start_bit
= bitmap_find_next_zero_area(map
, size
,
706 start
+ offset_bit
, nr
, 0);
707 if (start_bit
!= offset_bit
)
711 EXPORT_SYMBOL(gen_pool_fixed_alloc
);
714 * gen_pool_first_fit_order_align - find the first available region
715 * of memory matching the size requirement. The region will be aligned
716 * to the order of the size specified.
717 * @map: The address to base the search on
718 * @size: The bitmap size in bits
719 * @start: The bitnumber to start searching at
720 * @nr: The number of zeroed bits we're looking for
721 * @data: additional data - unused
722 * @pool: pool to find the fit region memory from
724 unsigned long gen_pool_first_fit_order_align(unsigned long *map
,
725 unsigned long size
, unsigned long start
,
726 unsigned int nr
, void *data
, struct gen_pool
*pool
,
727 unsigned long start_addr
)
729 unsigned long align_mask
= roundup_pow_of_two(nr
) - 1;
731 return bitmap_find_next_zero_area(map
, size
, start
, nr
, align_mask
);
733 EXPORT_SYMBOL(gen_pool_first_fit_order_align
);
736 * gen_pool_best_fit - find the best fitting region of memory
737 * macthing the size requirement (no alignment constraint)
738 * @map: The address to base the search on
739 * @size: The bitmap size in bits
740 * @start: The bitnumber to start searching at
741 * @nr: The number of zeroed bits we're looking for
742 * @data: additional data - unused
743 * @pool: pool to find the fit region memory from
745 * Iterate over the bitmap to find the smallest free region
746 * which we can allocate the memory.
748 unsigned long gen_pool_best_fit(unsigned long *map
, unsigned long size
,
749 unsigned long start
, unsigned int nr
, void *data
,
750 struct gen_pool
*pool
, unsigned long start_addr
)
752 unsigned long start_bit
= size
;
753 unsigned long len
= size
+ 1;
756 index
= bitmap_find_next_zero_area(map
, size
, start
, nr
, 0);
758 while (index
< size
) {
759 unsigned long next_bit
= find_next_bit(map
, size
, index
+ nr
);
760 if ((next_bit
- index
) < len
) {
761 len
= next_bit
- index
;
766 index
= bitmap_find_next_zero_area(map
, size
,
767 next_bit
+ 1, nr
, 0);
772 EXPORT_SYMBOL(gen_pool_best_fit
);
774 static void devm_gen_pool_release(struct device
*dev
, void *res
)
776 gen_pool_destroy(*(struct gen_pool
**)res
);
779 static int devm_gen_pool_match(struct device
*dev
, void *res
, void *data
)
781 struct gen_pool
**p
= res
;
783 /* NULL data matches only a pool without an assigned name */
784 if (!data
&& !(*p
)->name
)
787 if (!data
|| !(*p
)->name
)
790 return !strcmp((*p
)->name
, data
);
794 * gen_pool_get - Obtain the gen_pool (if any) for a device
795 * @dev: device to retrieve the gen_pool from
796 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
798 * Returns the gen_pool for the device if one is present, or NULL.
800 struct gen_pool
*gen_pool_get(struct device
*dev
, const char *name
)
804 p
= devres_find(dev
, devm_gen_pool_release
, devm_gen_pool_match
,
810 EXPORT_SYMBOL_GPL(gen_pool_get
);
813 * devm_gen_pool_create - managed gen_pool_create
814 * @dev: device that provides the gen_pool
815 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
816 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
817 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
819 * Create a new special memory pool that can be used to manage special purpose
820 * memory not managed by the regular kmalloc/kfree interface. The pool will be
821 * automatically destroyed by the device management code.
823 struct gen_pool
*devm_gen_pool_create(struct device
*dev
, int min_alloc_order
,
824 int nid
, const char *name
)
826 struct gen_pool
**ptr
, *pool
;
827 const char *pool_name
= NULL
;
829 /* Check that genpool to be created is uniquely addressed on device */
830 if (gen_pool_get(dev
, name
))
831 return ERR_PTR(-EINVAL
);
834 pool_name
= kstrdup_const(name
, GFP_KERNEL
);
836 return ERR_PTR(-ENOMEM
);
839 ptr
= devres_alloc(devm_gen_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
843 pool
= gen_pool_create(min_alloc_order
, nid
);
848 pool
->name
= pool_name
;
849 devres_add(dev
, ptr
);
856 kfree_const(pool_name
);
858 return ERR_PTR(-ENOMEM
);
860 EXPORT_SYMBOL(devm_gen_pool_create
);
864 * of_gen_pool_get - find a pool by phandle property
866 * @propname: property name containing phandle(s)
867 * @index: index into the phandle array
869 * Returns the pool that contains the chunk starting at the physical
870 * address of the device tree node pointed at by the phandle property,
871 * or NULL if not found.
873 struct gen_pool
*of_gen_pool_get(struct device_node
*np
,
874 const char *propname
, int index
)
876 struct platform_device
*pdev
;
877 struct device_node
*np_pool
, *parent
;
878 const char *name
= NULL
;
879 struct gen_pool
*pool
= NULL
;
881 np_pool
= of_parse_phandle(np
, propname
, index
);
885 pdev
= of_find_device_by_node(np_pool
);
887 /* Check if named gen_pool is created by parent node device */
888 parent
= of_get_parent(np_pool
);
889 pdev
= of_find_device_by_node(parent
);
892 of_property_read_string(np_pool
, "label", &name
);
894 name
= np_pool
->name
;
897 pool
= gen_pool_get(&pdev
->dev
, name
);
898 of_node_put(np_pool
);
902 EXPORT_SYMBOL_GPL(of_gen_pool_get
);
903 #endif /* CONFIG_OF */