2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
38 static int set_bits_ll(unsigned long *addr
, unsigned long mask_to_set
)
40 unsigned long val
, nval
;
45 if (val
& mask_to_set
)
48 } while ((nval
= cmpxchg(addr
, val
, val
| mask_to_set
)) != val
);
53 static int clear_bits_ll(unsigned long *addr
, unsigned long mask_to_clear
)
55 unsigned long val
, nval
;
60 if ((val
& mask_to_clear
) != mask_to_clear
)
63 } while ((nval
= cmpxchg(addr
, val
, val
& ~mask_to_clear
)) != val
);
69 * bitmap_set_ll - set the specified number of bits at the specified position
70 * @map: pointer to a bitmap
71 * @start: a bit position in @map
72 * @nr: number of bits to set
74 * Set @nr bits start from @start in @map lock-lessly. Several users
75 * can set/clear the same bitmap simultaneously without lock. If two
76 * users set the same bit, one user will return remain bits, otherwise
79 static int bitmap_set_ll(unsigned long *map
, int start
, int nr
)
81 unsigned long *p
= map
+ BIT_WORD(start
);
82 const int size
= start
+ nr
;
83 int bits_to_set
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
84 unsigned long mask_to_set
= BITMAP_FIRST_WORD_MASK(start
);
86 while (nr
- bits_to_set
>= 0) {
87 if (set_bits_ll(p
, mask_to_set
))
90 bits_to_set
= BITS_PER_LONG
;
95 mask_to_set
&= BITMAP_LAST_WORD_MASK(size
);
96 if (set_bits_ll(p
, mask_to_set
))
104 * bitmap_clear_ll - clear the specified number of bits at the specified position
105 * @map: pointer to a bitmap
106 * @start: a bit position in @map
107 * @nr: number of bits to set
109 * Clear @nr bits start from @start in @map lock-lessly. Several users
110 * can set/clear the same bitmap simultaneously without lock. If two
111 * users clear the same bit, one user will return remain bits,
112 * otherwise return 0.
114 static int bitmap_clear_ll(unsigned long *map
, int start
, int nr
)
116 unsigned long *p
= map
+ BIT_WORD(start
);
117 const int size
= start
+ nr
;
118 int bits_to_clear
= BITS_PER_LONG
- (start
% BITS_PER_LONG
);
119 unsigned long mask_to_clear
= BITMAP_FIRST_WORD_MASK(start
);
121 while (nr
- bits_to_clear
>= 0) {
122 if (clear_bits_ll(p
, mask_to_clear
))
125 bits_to_clear
= BITS_PER_LONG
;
126 mask_to_clear
= ~0UL;
130 mask_to_clear
&= BITMAP_LAST_WORD_MASK(size
);
131 if (clear_bits_ll(p
, mask_to_clear
))
139 * gen_pool_create - create a new special memory pool
140 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
141 * @nid: node id of the node the pool structure should be allocated on, or -1
143 * Create a new special memory pool that can be used to manage special purpose
144 * memory not managed by the regular kmalloc/kfree interface.
146 struct gen_pool
*gen_pool_create(int min_alloc_order
, int nid
)
148 struct gen_pool
*pool
;
150 pool
= kmalloc_node(sizeof(struct gen_pool
), GFP_KERNEL
, nid
);
152 spin_lock_init(&pool
->lock
);
153 INIT_LIST_HEAD(&pool
->chunks
);
154 pool
->min_alloc_order
= min_alloc_order
;
158 EXPORT_SYMBOL(gen_pool_create
);
161 * gen_pool_add_virt - add a new chunk of special memory to the pool
162 * @pool: pool to add new memory chunk to
163 * @virt: virtual starting address of memory chunk to add to pool
164 * @phys: physical starting address of memory chunk to add to pool
165 * @size: size in bytes of the memory chunk to add to pool
166 * @nid: node id of the node the chunk structure and bitmap should be
167 * allocated on, or -1
169 * Add a new chunk of special memory to the specified pool.
171 * Returns 0 on success or a -ve errno on failure.
173 int gen_pool_add_virt(struct gen_pool
*pool
, unsigned long virt
, phys_addr_t phys
,
174 size_t size
, int nid
)
176 struct gen_pool_chunk
*chunk
;
177 int nbits
= size
>> pool
->min_alloc_order
;
178 int nbytes
= sizeof(struct gen_pool_chunk
) +
179 BITS_TO_LONGS(nbits
) * sizeof(long);
181 chunk
= kmalloc_node(nbytes
, GFP_KERNEL
| __GFP_ZERO
, nid
);
182 if (unlikely(chunk
== NULL
))
185 chunk
->phys_addr
= phys
;
186 chunk
->start_addr
= virt
;
187 chunk
->end_addr
= virt
+ size
;
188 atomic_set(&chunk
->avail
, size
);
190 spin_lock(&pool
->lock
);
191 list_add_rcu(&chunk
->next_chunk
, &pool
->chunks
);
192 spin_unlock(&pool
->lock
);
196 EXPORT_SYMBOL(gen_pool_add_virt
);
199 * gen_pool_virt_to_phys - return the physical address of memory
200 * @pool: pool to allocate from
201 * @addr: starting address of memory
203 * Returns the physical address on success, or -1 on error.
205 phys_addr_t
gen_pool_virt_to_phys(struct gen_pool
*pool
, unsigned long addr
)
207 struct gen_pool_chunk
*chunk
;
208 phys_addr_t paddr
= -1;
211 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
212 if (addr
>= chunk
->start_addr
&& addr
< chunk
->end_addr
) {
213 paddr
= chunk
->phys_addr
+ (addr
- chunk
->start_addr
);
221 EXPORT_SYMBOL(gen_pool_virt_to_phys
);
224 * gen_pool_destroy - destroy a special memory pool
225 * @pool: pool to destroy
227 * Destroy the specified special memory pool. Verifies that there are no
228 * outstanding allocations.
230 void gen_pool_destroy(struct gen_pool
*pool
)
232 struct list_head
*_chunk
, *_next_chunk
;
233 struct gen_pool_chunk
*chunk
;
234 int order
= pool
->min_alloc_order
;
237 list_for_each_safe(_chunk
, _next_chunk
, &pool
->chunks
) {
238 chunk
= list_entry(_chunk
, struct gen_pool_chunk
, next_chunk
);
239 list_del(&chunk
->next_chunk
);
241 end_bit
= (chunk
->end_addr
- chunk
->start_addr
) >> order
;
242 bit
= find_next_bit(chunk
->bits
, end_bit
, 0);
243 BUG_ON(bit
< end_bit
);
250 EXPORT_SYMBOL(gen_pool_destroy
);
253 * gen_pool_alloc - allocate special memory from the pool
254 * @pool: pool to allocate from
255 * @size: number of bytes to allocate from the pool
257 * Allocate the requested number of bytes from the specified pool.
258 * Uses a first-fit algorithm. Can not be used in NMI handler on
259 * architectures without NMI-safe cmpxchg implementation.
261 unsigned long gen_pool_alloc(struct gen_pool
*pool
, size_t size
)
263 struct gen_pool_chunk
*chunk
;
264 unsigned long addr
= 0;
265 int order
= pool
->min_alloc_order
;
266 int nbits
, start_bit
= 0, end_bit
, remain
;
268 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
275 nbits
= (size
+ (1UL << order
) - 1) >> order
;
277 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
278 if (size
> atomic_read(&chunk
->avail
))
281 end_bit
= (chunk
->end_addr
- chunk
->start_addr
) >> order
;
283 start_bit
= bitmap_find_next_zero_area(chunk
->bits
, end_bit
,
284 start_bit
, nbits
, 0);
285 if (start_bit
>= end_bit
)
287 remain
= bitmap_set_ll(chunk
->bits
, start_bit
, nbits
);
289 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
,
295 addr
= chunk
->start_addr
+ ((unsigned long)start_bit
<< order
);
296 size
= nbits
<< order
;
297 atomic_sub(size
, &chunk
->avail
);
303 EXPORT_SYMBOL(gen_pool_alloc
);
306 * gen_pool_free - free allocated special memory back to the pool
307 * @pool: pool to free to
308 * @addr: starting address of memory to free back to pool
309 * @size: size in bytes of memory to free
311 * Free previously allocated special memory back to the specified
312 * pool. Can not be used in NMI handler on architectures without
313 * NMI-safe cmpxchg implementation.
315 void gen_pool_free(struct gen_pool
*pool
, unsigned long addr
, size_t size
)
317 struct gen_pool_chunk
*chunk
;
318 int order
= pool
->min_alloc_order
;
319 int start_bit
, nbits
, remain
;
321 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
325 nbits
= (size
+ (1UL << order
) - 1) >> order
;
327 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
) {
328 if (addr
>= chunk
->start_addr
&& addr
< chunk
->end_addr
) {
329 BUG_ON(addr
+ size
> chunk
->end_addr
);
330 start_bit
= (addr
- chunk
->start_addr
) >> order
;
331 remain
= bitmap_clear_ll(chunk
->bits
, start_bit
, nbits
);
333 size
= nbits
<< order
;
334 atomic_add(size
, &chunk
->avail
);
342 EXPORT_SYMBOL(gen_pool_free
);
345 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
346 * @pool: the generic memory pool
347 * @func: func to call
348 * @data: additional data used by @func
350 * Call @func for every chunk of generic memory pool. The @func is
351 * called with rcu_read_lock held.
353 void gen_pool_for_each_chunk(struct gen_pool
*pool
,
354 void (*func
)(struct gen_pool
*pool
, struct gen_pool_chunk
*chunk
, void *data
),
357 struct gen_pool_chunk
*chunk
;
360 list_for_each_entry_rcu(chunk
, &(pool
)->chunks
, next_chunk
)
361 func(pool
, chunk
, data
);
364 EXPORT_SYMBOL(gen_pool_for_each_chunk
);
367 * gen_pool_avail - get available free space of the pool
368 * @pool: pool to get available free space
370 * Return available free space of the specified pool.
372 size_t gen_pool_avail(struct gen_pool
*pool
)
374 struct gen_pool_chunk
*chunk
;
378 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
379 avail
+= atomic_read(&chunk
->avail
);
383 EXPORT_SYMBOL_GPL(gen_pool_avail
);
386 * gen_pool_size - get size in bytes of memory managed by the pool
387 * @pool: pool to get size
389 * Return size in bytes of memory managed by the pool.
391 size_t gen_pool_size(struct gen_pool
*pool
)
393 struct gen_pool_chunk
*chunk
;
397 list_for_each_entry_rcu(chunk
, &pool
->chunks
, next_chunk
)
398 size
+= chunk
->end_addr
- chunk
->start_addr
;
402 EXPORT_SYMBOL_GPL(gen_pool_size
);