x86: minor cleanup of comments in processor.h
[wrt350n-kernel.git] / lib / genalloc.c
blobf6d276db2d58f97c80db2b0aececde84120893a9
1 /*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
13 #include <linux/module.h>
14 #include <linux/genalloc.h>
17 /**
18 * gen_pool_create - create a new special memory pool
19 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
20 * @nid: node id of the node the pool structure should be allocated on, or -1
22 * Create a new special memory pool that can be used to manage special purpose
23 * memory not managed by the regular kmalloc/kfree interface.
25 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
27 struct gen_pool *pool;
29 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
30 if (pool != NULL) {
31 rwlock_init(&pool->lock);
32 INIT_LIST_HEAD(&pool->chunks);
33 pool->min_alloc_order = min_alloc_order;
35 return pool;
37 EXPORT_SYMBOL(gen_pool_create);
39 /**
40 * gen_pool_add - add a new chunk of special memory to the pool
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
47 * Add a new chunk of special memory to the specified pool.
49 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
50 int nid)
52 struct gen_pool_chunk *chunk;
53 int nbits = size >> pool->min_alloc_order;
54 int nbytes = sizeof(struct gen_pool_chunk) +
55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
57 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
58 if (unlikely(chunk == NULL))
59 return -1;
61 spin_lock_init(&chunk->lock);
62 chunk->start_addr = addr;
63 chunk->end_addr = addr + size;
65 write_lock(&pool->lock);
66 list_add(&chunk->next_chunk, &pool->chunks);
67 write_unlock(&pool->lock);
69 return 0;
71 EXPORT_SYMBOL(gen_pool_add);
73 /**
74 * gen_pool_destroy - destroy a special memory pool
75 * @pool: pool to destroy
77 * Destroy the specified special memory pool. Verifies that there are no
78 * outstanding allocations.
80 void gen_pool_destroy(struct gen_pool *pool)
82 struct list_head *_chunk, *_next_chunk;
83 struct gen_pool_chunk *chunk;
84 int order = pool->min_alloc_order;
85 int bit, end_bit;
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk);
93 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
94 bit = find_next_bit(chunk->bits, end_bit, 0);
95 BUG_ON(bit < end_bit);
97 kfree(chunk);
99 kfree(pool);
100 return;
102 EXPORT_SYMBOL(gen_pool_destroy);
105 * gen_pool_alloc - allocate special memory from the pool
106 * @pool: pool to allocate from
107 * @size: number of bytes to allocate from the pool
109 * Allocate the requested number of bytes from the specified pool.
110 * Uses a first-fit algorithm.
112 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
114 struct list_head *_chunk;
115 struct gen_pool_chunk *chunk;
116 unsigned long addr, flags;
117 int order = pool->min_alloc_order;
118 int nbits, bit, start_bit, end_bit;
120 if (size == 0)
121 return 0;
123 nbits = (size + (1UL << order) - 1) >> order;
125 read_lock(&pool->lock);
126 list_for_each(_chunk, &pool->chunks) {
127 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
129 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
130 end_bit -= nbits + 1;
132 spin_lock_irqsave(&chunk->lock, flags);
133 bit = -1;
134 while (bit + 1 < end_bit) {
135 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
136 if (bit >= end_bit)
137 break;
139 start_bit = bit;
140 if (nbits > 1) {
141 bit = find_next_bit(chunk->bits, bit + nbits,
142 bit + 1);
143 if (bit - start_bit < nbits)
144 continue;
147 addr = chunk->start_addr +
148 ((unsigned long)start_bit << order);
149 while (nbits--)
150 __set_bit(start_bit++, chunk->bits);
151 spin_unlock_irqrestore(&chunk->lock, flags);
152 read_unlock(&pool->lock);
153 return addr;
155 spin_unlock_irqrestore(&chunk->lock, flags);
157 read_unlock(&pool->lock);
158 return 0;
160 EXPORT_SYMBOL(gen_pool_alloc);
163 * gen_pool_free - free allocated special memory back to the pool
164 * @pool: pool to free to
165 * @addr: starting address of memory to free back to pool
166 * @size: size in bytes of memory to free
168 * Free previously allocated special memory back to the specified pool.
170 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
172 struct list_head *_chunk;
173 struct gen_pool_chunk *chunk;
174 unsigned long flags;
175 int order = pool->min_alloc_order;
176 int bit, nbits;
178 nbits = (size + (1UL << order) - 1) >> order;
180 read_lock(&pool->lock);
181 list_for_each(_chunk, &pool->chunks) {
182 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
184 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
185 BUG_ON(addr + size > chunk->end_addr);
186 spin_lock_irqsave(&chunk->lock, flags);
187 bit = (addr - chunk->start_addr) >> order;
188 while (nbits--)
189 __clear_bit(bit++, chunk->bits);
190 spin_unlock_irqrestore(&chunk->lock, flags);
191 break;
194 BUG_ON(nbits > 0);
195 read_unlock(&pool->lock);
197 EXPORT_SYMBOL(gen_pool_free);