Fix ROSE module unload oops.
[linux/fpc-iii.git] / lib / genalloc.c
blobeb7c2bab9ebf93f6ec8986f4ee8bacd7fe897075
1 /*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
13 #include <linux/module.h>
14 #include <linux/genalloc.h>
17 /**
18 * gen_pool_create - create a new special memory pool
19 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
20 * @nid: node id of the node the pool structure should be allocated on, or -1
22 * Create a new special memory pool that can be used to manage special purpose
23 * memory not managed by the regular kmalloc/kfree interface.
25 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
27 struct gen_pool *pool;
29 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
30 if (pool != NULL) {
31 rwlock_init(&pool->lock);
32 INIT_LIST_HEAD(&pool->chunks);
33 pool->min_alloc_order = min_alloc_order;
35 return pool;
37 EXPORT_SYMBOL(gen_pool_create);
39 /**
40 * gen_pool_add - add a new chunk of special memory to the pool
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
47 * Add a new chunk of special memory to the specified pool.
49 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
50 int nid)
52 struct gen_pool_chunk *chunk;
53 int nbits = size >> pool->min_alloc_order;
54 int nbytes = sizeof(struct gen_pool_chunk) +
55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
57 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
58 if (unlikely(chunk == NULL))
59 return -1;
61 memset(chunk, 0, nbytes);
62 spin_lock_init(&chunk->lock);
63 chunk->start_addr = addr;
64 chunk->end_addr = addr + size;
66 write_lock(&pool->lock);
67 list_add(&chunk->next_chunk, &pool->chunks);
68 write_unlock(&pool->lock);
70 return 0;
72 EXPORT_SYMBOL(gen_pool_add);
74 /**
75 * gen_pool_destroy - destroy a special memory pool
76 * @pool: pool to destroy
78 * Destroy the specified special memory pool. Verifies that there are no
79 * outstanding allocations.
81 void gen_pool_destroy(struct gen_pool *pool)
83 struct list_head *_chunk, *_next_chunk;
84 struct gen_pool_chunk *chunk;
85 int order = pool->min_alloc_order;
86 int bit, end_bit;
89 write_lock(&pool->lock);
90 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
91 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
92 list_del(&chunk->next_chunk);
94 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
95 bit = find_next_bit(chunk->bits, end_bit, 0);
96 BUG_ON(bit < end_bit);
98 kfree(chunk);
100 kfree(pool);
101 return;
103 EXPORT_SYMBOL(gen_pool_destroy);
106 * gen_pool_alloc - allocate special memory from the pool
107 * @pool: pool to allocate from
108 * @size: number of bytes to allocate from the pool
110 * Allocate the requested number of bytes from the specified pool.
111 * Uses a first-fit algorithm.
113 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
115 struct list_head *_chunk;
116 struct gen_pool_chunk *chunk;
117 unsigned long addr, flags;
118 int order = pool->min_alloc_order;
119 int nbits, bit, start_bit, end_bit;
121 if (size == 0)
122 return 0;
124 nbits = (size + (1UL << order) - 1) >> order;
126 read_lock(&pool->lock);
127 list_for_each(_chunk, &pool->chunks) {
128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
131 end_bit -= nbits + 1;
133 spin_lock_irqsave(&chunk->lock, flags);
134 bit = -1;
135 while (bit + 1 < end_bit) {
136 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
137 if (bit >= end_bit)
138 break;
140 start_bit = bit;
141 if (nbits > 1) {
142 bit = find_next_bit(chunk->bits, bit + nbits,
143 bit + 1);
144 if (bit - start_bit < nbits)
145 continue;
148 addr = chunk->start_addr +
149 ((unsigned long)start_bit << order);
150 while (nbits--)
151 __set_bit(start_bit++, chunk->bits);
152 spin_unlock_irqrestore(&chunk->lock, flags);
153 read_unlock(&pool->lock);
154 return addr;
156 spin_unlock_irqrestore(&chunk->lock, flags);
158 read_unlock(&pool->lock);
159 return 0;
161 EXPORT_SYMBOL(gen_pool_alloc);
164 * gen_pool_free - free allocated special memory back to the pool
165 * @pool: pool to free to
166 * @addr: starting address of memory to free back to pool
167 * @size: size in bytes of memory to free
169 * Free previously allocated special memory back to the specified pool.
171 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
173 struct list_head *_chunk;
174 struct gen_pool_chunk *chunk;
175 unsigned long flags;
176 int order = pool->min_alloc_order;
177 int bit, nbits;
179 nbits = (size + (1UL << order) - 1) >> order;
181 read_lock(&pool->lock);
182 list_for_each(_chunk, &pool->chunks) {
183 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
185 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
186 BUG_ON(addr + size > chunk->end_addr);
187 spin_lock_irqsave(&chunk->lock, flags);
188 bit = (addr - chunk->start_addr) >> order;
189 while (nbits--)
190 __clear_bit(bit++, chunk->bits);
191 spin_unlock_irqrestore(&chunk->lock, flags);
192 break;
195 BUG_ON(nbits > 0);
196 read_unlock(&pool->lock);
198 EXPORT_SYMBOL(gen_pool_free);