net: ethernet: Fix memleak in ethoc_probe
[linux/fpc-iii.git] / lib / genalloc.c
blob0b8ee173cf3a6a3d976d882ad3f02a3c8f80fd91
1 /*
2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_device.h>
38 #include <linux/vmalloc.h>
40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
42 return chunk->end_addr - chunk->start_addr + 1;
45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
47 unsigned long val, nval;
49 nval = *addr;
50 do {
51 val = nval;
52 if (val & mask_to_set)
53 return -EBUSY;
54 cpu_relax();
55 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
57 return 0;
60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
62 unsigned long val, nval;
64 nval = *addr;
65 do {
66 val = nval;
67 if ((val & mask_to_clear) != mask_to_clear)
68 return -EBUSY;
69 cpu_relax();
70 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
72 return 0;
76 * bitmap_set_ll - set the specified number of bits at the specified position
77 * @map: pointer to a bitmap
78 * @start: a bit position in @map
79 * @nr: number of bits to set
81 * Set @nr bits start from @start in @map lock-lessly. Several users
82 * can set/clear the same bitmap simultaneously without lock. If two
83 * users set the same bit, one user will return remain bits, otherwise
84 * return 0.
86 static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
88 unsigned long *p = map + BIT_WORD(start);
89 const unsigned long size = start + nr;
90 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
93 while (nr >= bits_to_set) {
94 if (set_bits_ll(p, mask_to_set))
95 return nr;
96 nr -= bits_to_set;
97 bits_to_set = BITS_PER_LONG;
98 mask_to_set = ~0UL;
99 p++;
101 if (nr) {
102 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 if (set_bits_ll(p, mask_to_set))
104 return nr;
107 return 0;
111 * bitmap_clear_ll - clear the specified number of bits at the specified position
112 * @map: pointer to a bitmap
113 * @start: a bit position in @map
114 * @nr: number of bits to set
116 * Clear @nr bits start from @start in @map lock-lessly. Several users
117 * can set/clear the same bitmap simultaneously without lock. If two
118 * users clear the same bit, one user will return remain bits,
119 * otherwise return 0.
121 static unsigned long
122 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
124 unsigned long *p = map + BIT_WORD(start);
125 const unsigned long size = start + nr;
126 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
127 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
129 while (nr >= bits_to_clear) {
130 if (clear_bits_ll(p, mask_to_clear))
131 return nr;
132 nr -= bits_to_clear;
133 bits_to_clear = BITS_PER_LONG;
134 mask_to_clear = ~0UL;
135 p++;
137 if (nr) {
138 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
139 if (clear_bits_ll(p, mask_to_clear))
140 return nr;
143 return 0;
147 * gen_pool_create - create a new special memory pool
148 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
149 * @nid: node id of the node the pool structure should be allocated on, or -1
151 * Create a new special memory pool that can be used to manage special purpose
152 * memory not managed by the regular kmalloc/kfree interface.
154 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
156 struct gen_pool *pool;
158 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
159 if (pool != NULL) {
160 spin_lock_init(&pool->lock);
161 INIT_LIST_HEAD(&pool->chunks);
162 pool->min_alloc_order = min_alloc_order;
163 pool->algo = gen_pool_first_fit;
164 pool->data = NULL;
165 pool->name = NULL;
167 return pool;
169 EXPORT_SYMBOL(gen_pool_create);
172 * gen_pool_add_virt - add a new chunk of special memory to the pool
173 * @pool: pool to add new memory chunk to
174 * @virt: virtual starting address of memory chunk to add to pool
175 * @phys: physical starting address of memory chunk to add to pool
176 * @size: size in bytes of the memory chunk to add to pool
177 * @nid: node id of the node the chunk structure and bitmap should be
178 * allocated on, or -1
180 * Add a new chunk of special memory to the specified pool.
182 * Returns 0 on success or a -ve errno on failure.
184 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185 size_t size, int nid)
187 struct gen_pool_chunk *chunk;
188 unsigned long nbits = size >> pool->min_alloc_order;
189 unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190 BITS_TO_LONGS(nbits) * sizeof(long);
192 chunk = vzalloc_node(nbytes, nid);
193 if (unlikely(chunk == NULL))
194 return -ENOMEM;
196 chunk->phys_addr = phys;
197 chunk->start_addr = virt;
198 chunk->end_addr = virt + size - 1;
199 atomic_long_set(&chunk->avail, size);
201 spin_lock(&pool->lock);
202 list_add_rcu(&chunk->next_chunk, &pool->chunks);
203 spin_unlock(&pool->lock);
205 return 0;
207 EXPORT_SYMBOL(gen_pool_add_virt);
210 * gen_pool_virt_to_phys - return the physical address of memory
211 * @pool: pool to allocate from
212 * @addr: starting address of memory
214 * Returns the physical address on success, or -1 on error.
216 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218 struct gen_pool_chunk *chunk;
219 phys_addr_t paddr = -1;
221 rcu_read_lock();
222 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
223 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
224 paddr = chunk->phys_addr + (addr - chunk->start_addr);
225 break;
228 rcu_read_unlock();
230 return paddr;
232 EXPORT_SYMBOL(gen_pool_virt_to_phys);
235 * gen_pool_destroy - destroy a special memory pool
236 * @pool: pool to destroy
238 * Destroy the specified special memory pool. Verifies that there are no
239 * outstanding allocations.
241 void gen_pool_destroy(struct gen_pool *pool)
243 struct list_head *_chunk, *_next_chunk;
244 struct gen_pool_chunk *chunk;
245 int order = pool->min_alloc_order;
246 unsigned long bit, end_bit;
248 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
249 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
250 list_del(&chunk->next_chunk);
252 end_bit = chunk_size(chunk) >> order;
253 bit = find_next_bit(chunk->bits, end_bit, 0);
254 BUG_ON(bit < end_bit);
256 vfree(chunk);
258 kfree_const(pool->name);
259 kfree(pool);
261 EXPORT_SYMBOL(gen_pool_destroy);
264 * gen_pool_alloc - allocate special memory from the pool
265 * @pool: pool to allocate from
266 * @size: number of bytes to allocate from the pool
268 * Allocate the requested number of bytes from the specified pool.
269 * Uses the pool allocation function (with first-fit algorithm by default).
270 * Can not be used in NMI handler on architectures without
271 * NMI-safe cmpxchg implementation.
273 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
275 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
277 EXPORT_SYMBOL(gen_pool_alloc);
280 * gen_pool_alloc_algo - allocate special memory from the pool
281 * @pool: pool to allocate from
282 * @size: number of bytes to allocate from the pool
283 * @algo: algorithm passed from caller
284 * @data: data passed to algorithm
286 * Allocate the requested number of bytes from the specified pool.
287 * Uses the pool allocation function (with first-fit algorithm by default).
288 * Can not be used in NMI handler on architectures without
289 * NMI-safe cmpxchg implementation.
291 unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
292 genpool_algo_t algo, void *data)
294 struct gen_pool_chunk *chunk;
295 unsigned long addr = 0;
296 int order = pool->min_alloc_order;
297 unsigned long nbits, start_bit, end_bit, remain;
299 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
300 BUG_ON(in_nmi());
301 #endif
303 if (size == 0)
304 return 0;
306 nbits = (size + (1UL << order) - 1) >> order;
307 rcu_read_lock();
308 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
309 if (size > atomic_long_read(&chunk->avail))
310 continue;
312 start_bit = 0;
313 end_bit = chunk_size(chunk) >> order;
314 retry:
315 start_bit = algo(chunk->bits, end_bit, start_bit,
316 nbits, data, pool, chunk->start_addr);
317 if (start_bit >= end_bit)
318 continue;
319 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
320 if (remain) {
321 remain = bitmap_clear_ll(chunk->bits, start_bit,
322 nbits - remain);
323 BUG_ON(remain);
324 goto retry;
327 addr = chunk->start_addr + ((unsigned long)start_bit << order);
328 size = nbits << order;
329 atomic_long_sub(size, &chunk->avail);
330 break;
332 rcu_read_unlock();
333 return addr;
335 EXPORT_SYMBOL(gen_pool_alloc_algo);
338 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
339 * @pool: pool to allocate from
340 * @size: number of bytes to allocate from the pool
341 * @dma: dma-view physical address return value. Use NULL if unneeded.
343 * Allocate the requested number of bytes from the specified pool.
344 * Uses the pool allocation function (with first-fit algorithm by default).
345 * Can not be used in NMI handler on architectures without
346 * NMI-safe cmpxchg implementation.
348 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
350 unsigned long vaddr;
352 if (!pool)
353 return NULL;
355 vaddr = gen_pool_alloc(pool, size);
356 if (!vaddr)
357 return NULL;
359 if (dma)
360 *dma = gen_pool_virt_to_phys(pool, vaddr);
362 return (void *)vaddr;
364 EXPORT_SYMBOL(gen_pool_dma_alloc);
367 * gen_pool_free - free allocated special memory back to the pool
368 * @pool: pool to free to
369 * @addr: starting address of memory to free back to pool
370 * @size: size in bytes of memory to free
372 * Free previously allocated special memory back to the specified
373 * pool. Can not be used in NMI handler on architectures without
374 * NMI-safe cmpxchg implementation.
376 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
378 struct gen_pool_chunk *chunk;
379 int order = pool->min_alloc_order;
380 unsigned long start_bit, nbits, remain;
382 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
383 BUG_ON(in_nmi());
384 #endif
386 nbits = (size + (1UL << order) - 1) >> order;
387 rcu_read_lock();
388 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
389 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
390 BUG_ON(addr + size - 1 > chunk->end_addr);
391 start_bit = (addr - chunk->start_addr) >> order;
392 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
393 BUG_ON(remain);
394 size = nbits << order;
395 atomic_long_add(size, &chunk->avail);
396 rcu_read_unlock();
397 return;
400 rcu_read_unlock();
401 BUG();
403 EXPORT_SYMBOL(gen_pool_free);
406 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
407 * @pool: the generic memory pool
408 * @func: func to call
409 * @data: additional data used by @func
411 * Call @func for every chunk of generic memory pool. The @func is
412 * called with rcu_read_lock held.
414 void gen_pool_for_each_chunk(struct gen_pool *pool,
415 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
416 void *data)
418 struct gen_pool_chunk *chunk;
420 rcu_read_lock();
421 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
422 func(pool, chunk, data);
423 rcu_read_unlock();
425 EXPORT_SYMBOL(gen_pool_for_each_chunk);
428 * addr_in_gen_pool - checks if an address falls within the range of a pool
429 * @pool: the generic memory pool
430 * @start: start address
431 * @size: size of the region
433 * Check if the range of addresses falls within the specified pool. Returns
434 * true if the entire range is contained in the pool and false otherwise.
436 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
437 size_t size)
439 bool found = false;
440 unsigned long end = start + size - 1;
441 struct gen_pool_chunk *chunk;
443 rcu_read_lock();
444 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
445 if (start >= chunk->start_addr && start <= chunk->end_addr) {
446 if (end <= chunk->end_addr) {
447 found = true;
448 break;
452 rcu_read_unlock();
453 return found;
457 * gen_pool_avail - get available free space of the pool
458 * @pool: pool to get available free space
460 * Return available free space of the specified pool.
462 size_t gen_pool_avail(struct gen_pool *pool)
464 struct gen_pool_chunk *chunk;
465 size_t avail = 0;
467 rcu_read_lock();
468 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
469 avail += atomic_long_read(&chunk->avail);
470 rcu_read_unlock();
471 return avail;
473 EXPORT_SYMBOL_GPL(gen_pool_avail);
476 * gen_pool_size - get size in bytes of memory managed by the pool
477 * @pool: pool to get size
479 * Return size in bytes of memory managed by the pool.
481 size_t gen_pool_size(struct gen_pool *pool)
483 struct gen_pool_chunk *chunk;
484 size_t size = 0;
486 rcu_read_lock();
487 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
488 size += chunk_size(chunk);
489 rcu_read_unlock();
490 return size;
492 EXPORT_SYMBOL_GPL(gen_pool_size);
495 * gen_pool_set_algo - set the allocation algorithm
496 * @pool: pool to change allocation algorithm
497 * @algo: custom algorithm function
498 * @data: additional data used by @algo
500 * Call @algo for each memory allocation in the pool.
501 * If @algo is NULL use gen_pool_first_fit as default
502 * memory allocation function.
504 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
506 rcu_read_lock();
508 pool->algo = algo;
509 if (!pool->algo)
510 pool->algo = gen_pool_first_fit;
512 pool->data = data;
514 rcu_read_unlock();
516 EXPORT_SYMBOL(gen_pool_set_algo);
519 * gen_pool_first_fit - find the first available region
520 * of memory matching the size requirement (no alignment constraint)
521 * @map: The address to base the search on
522 * @size: The bitmap size in bits
523 * @start: The bitnumber to start searching at
524 * @nr: The number of zeroed bits we're looking for
525 * @data: additional data - unused
526 * @pool: pool to find the fit region memory from
528 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
529 unsigned long start, unsigned int nr, void *data,
530 struct gen_pool *pool, unsigned long start_addr)
532 return bitmap_find_next_zero_area(map, size, start, nr, 0);
534 EXPORT_SYMBOL(gen_pool_first_fit);
537 * gen_pool_first_fit_align - find the first available region
538 * of memory matching the size requirement (alignment constraint)
539 * @map: The address to base the search on
540 * @size: The bitmap size in bits
541 * @start: The bitnumber to start searching at
542 * @nr: The number of zeroed bits we're looking for
543 * @data: data for alignment
544 * @pool: pool to get order from
546 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
547 unsigned long start, unsigned int nr, void *data,
548 struct gen_pool *pool, unsigned long start_addr)
550 struct genpool_data_align *alignment;
551 unsigned long align_mask, align_off;
552 int order;
554 alignment = data;
555 order = pool->min_alloc_order;
556 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
557 align_off = (start_addr & (alignment->align - 1)) >> order;
559 return bitmap_find_next_zero_area_off(map, size, start, nr,
560 align_mask, align_off);
562 EXPORT_SYMBOL(gen_pool_first_fit_align);
565 * gen_pool_fixed_alloc - reserve a specific region
566 * @map: The address to base the search on
567 * @size: The bitmap size in bits
568 * @start: The bitnumber to start searching at
569 * @nr: The number of zeroed bits we're looking for
570 * @data: data for alignment
571 * @pool: pool to get order from
573 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
574 unsigned long start, unsigned int nr, void *data,
575 struct gen_pool *pool, unsigned long start_addr)
577 struct genpool_data_fixed *fixed_data;
578 int order;
579 unsigned long offset_bit;
580 unsigned long start_bit;
582 fixed_data = data;
583 order = pool->min_alloc_order;
584 offset_bit = fixed_data->offset >> order;
585 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
586 return size;
588 start_bit = bitmap_find_next_zero_area(map, size,
589 start + offset_bit, nr, 0);
590 if (start_bit != offset_bit)
591 start_bit = size;
592 return start_bit;
594 EXPORT_SYMBOL(gen_pool_fixed_alloc);
597 * gen_pool_first_fit_order_align - find the first available region
598 * of memory matching the size requirement. The region will be aligned
599 * to the order of the size specified.
600 * @map: The address to base the search on
601 * @size: The bitmap size in bits
602 * @start: The bitnumber to start searching at
603 * @nr: The number of zeroed bits we're looking for
604 * @data: additional data - unused
605 * @pool: pool to find the fit region memory from
607 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
608 unsigned long size, unsigned long start,
609 unsigned int nr, void *data, struct gen_pool *pool,
610 unsigned long start_addr)
612 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
614 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
616 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
619 * gen_pool_best_fit - find the best fitting region of memory
620 * macthing the size requirement (no alignment constraint)
621 * @map: The address to base the search on
622 * @size: The bitmap size in bits
623 * @start: The bitnumber to start searching at
624 * @nr: The number of zeroed bits we're looking for
625 * @data: additional data - unused
626 * @pool: pool to find the fit region memory from
628 * Iterate over the bitmap to find the smallest free region
629 * which we can allocate the memory.
631 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
632 unsigned long start, unsigned int nr, void *data,
633 struct gen_pool *pool, unsigned long start_addr)
635 unsigned long start_bit = size;
636 unsigned long len = size + 1;
637 unsigned long index;
639 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
641 while (index < size) {
642 unsigned long next_bit = find_next_bit(map, size, index + nr);
643 if ((next_bit - index) < len) {
644 len = next_bit - index;
645 start_bit = index;
646 if (len == nr)
647 return start_bit;
649 index = bitmap_find_next_zero_area(map, size,
650 next_bit + 1, nr, 0);
653 return start_bit;
655 EXPORT_SYMBOL(gen_pool_best_fit);
657 static void devm_gen_pool_release(struct device *dev, void *res)
659 gen_pool_destroy(*(struct gen_pool **)res);
662 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
664 struct gen_pool **p = res;
666 /* NULL data matches only a pool without an assigned name */
667 if (!data && !(*p)->name)
668 return 1;
670 if (!data || !(*p)->name)
671 return 0;
673 return !strcmp((*p)->name, data);
677 * gen_pool_get - Obtain the gen_pool (if any) for a device
678 * @dev: device to retrieve the gen_pool from
679 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
681 * Returns the gen_pool for the device if one is present, or NULL.
683 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
685 struct gen_pool **p;
687 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
688 (void *)name);
689 if (!p)
690 return NULL;
691 return *p;
693 EXPORT_SYMBOL_GPL(gen_pool_get);
696 * devm_gen_pool_create - managed gen_pool_create
697 * @dev: device that provides the gen_pool
698 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
699 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
700 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
702 * Create a new special memory pool that can be used to manage special purpose
703 * memory not managed by the regular kmalloc/kfree interface. The pool will be
704 * automatically destroyed by the device management code.
706 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
707 int nid, const char *name)
709 struct gen_pool **ptr, *pool;
710 const char *pool_name = NULL;
712 /* Check that genpool to be created is uniquely addressed on device */
713 if (gen_pool_get(dev, name))
714 return ERR_PTR(-EINVAL);
716 if (name) {
717 pool_name = kstrdup_const(name, GFP_KERNEL);
718 if (!pool_name)
719 return ERR_PTR(-ENOMEM);
722 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
723 if (!ptr)
724 goto free_pool_name;
726 pool = gen_pool_create(min_alloc_order, nid);
727 if (!pool)
728 goto free_devres;
730 *ptr = pool;
731 pool->name = pool_name;
732 devres_add(dev, ptr);
734 return pool;
736 free_devres:
737 devres_free(ptr);
738 free_pool_name:
739 kfree_const(pool_name);
741 return ERR_PTR(-ENOMEM);
743 EXPORT_SYMBOL(devm_gen_pool_create);
745 #ifdef CONFIG_OF
747 * of_gen_pool_get - find a pool by phandle property
748 * @np: device node
749 * @propname: property name containing phandle(s)
750 * @index: index into the phandle array
752 * Returns the pool that contains the chunk starting at the physical
753 * address of the device tree node pointed at by the phandle property,
754 * or NULL if not found.
756 struct gen_pool *of_gen_pool_get(struct device_node *np,
757 const char *propname, int index)
759 struct platform_device *pdev;
760 struct device_node *np_pool, *parent;
761 const char *name = NULL;
762 struct gen_pool *pool = NULL;
764 np_pool = of_parse_phandle(np, propname, index);
765 if (!np_pool)
766 return NULL;
768 pdev = of_find_device_by_node(np_pool);
769 if (!pdev) {
770 /* Check if named gen_pool is created by parent node device */
771 parent = of_get_parent(np_pool);
772 pdev = of_find_device_by_node(parent);
773 of_node_put(parent);
775 of_property_read_string(np_pool, "label", &name);
776 if (!name)
777 name = np_pool->name;
779 if (pdev)
780 pool = gen_pool_get(&pdev->dev, name);
781 of_node_put(np_pool);
783 return pool;
785 EXPORT_SYMBOL_GPL(of_gen_pool_get);
786 #endif /* CONFIG_OF */