Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / md / dm-memcache.c
blob2d7d914a1bc551ffb2f11610129ba6b86184f73b
1 /*
2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
4 * Module Author: Heinz Mauelshagen <heinzm@redhat.com>
6 * Device-mapper memory object handling:
8 * o allocate/free total_pages in a per client page pool.
10 * o allocate/free memory objects with chunks (1..n) of
11 * pages_per_chunk pages hanging off.
13 * This file is released under the GPL.
16 #define DM_MEM_CACHE_VERSION "0.2"
18 #include "dm.h"
19 #include "dm-memcache.h"
20 #include <linux/dm-io.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
24 struct dm_mem_cache_client {
25 spinlock_t lock;
26 mempool_t *objs_pool;
27 struct page_list *free_list;
28 unsigned objects;
29 unsigned chunks;
30 unsigned pages_per_chunk;
31 unsigned free_pages;
32 unsigned total_pages;
36 * Free pages and page_list elements of client.
38 static void free_cache_pages(struct page_list *list)
40 while (list) {
41 struct page_list *pl = list;
43 list = pl->next;
44 BUG_ON(!pl->page);
45 __free_page(pl->page);
46 kfree(pl);
51 * Alloc number of pages and page_list elements as required by client.
53 static struct page_list *alloc_cache_pages(unsigned pages)
55 struct page_list *pl, *ret = NULL;
56 struct page *page;
58 while (pages--) {
59 page = alloc_page(GFP_NOIO);
60 if (!page)
61 goto err;
63 pl = kmalloc(sizeof(*pl), GFP_NOIO);
64 if (!pl) {
65 __free_page(page);
66 goto err;
69 pl->page = page;
70 pl->next = ret;
71 ret = pl;
74 return ret;
76 err:
77 free_cache_pages(ret);
78 return NULL;
82 * Allocate page_list elements from the pool to chunks of the memory object.
84 static void alloc_chunks(struct dm_mem_cache_client *cl,
85 struct dm_mem_cache_object *obj)
87 unsigned chunks = cl->chunks;
88 unsigned long flags;
90 local_irq_save(flags);
91 local_irq_disable();
92 while (chunks--) {
93 unsigned p = cl->pages_per_chunk;
95 obj[chunks].pl = NULL;
97 while (p--) {
98 struct page_list *pl;
100 /* Take next element from free list */
101 spin_lock(&cl->lock);
102 pl = cl->free_list;
103 BUG_ON(!pl);
104 cl->free_list = pl->next;
105 spin_unlock(&cl->lock);
107 pl->next = obj[chunks].pl;
108 obj[chunks].pl = pl;
112 local_irq_restore(flags);
116 * Free page_list elements putting them back onto free list
118 static void free_chunks(struct dm_mem_cache_client *cl,
119 struct dm_mem_cache_object *obj)
121 unsigned chunks = cl->chunks;
122 unsigned long flags;
123 struct page_list *next, *pl;
125 local_irq_save(flags);
126 local_irq_disable();
127 while (chunks--) {
128 for (pl = obj[chunks].pl; pl; pl = next) {
129 next = pl->next;
131 spin_lock(&cl->lock);
132 pl->next = cl->free_list;
133 cl->free_list = pl;
134 cl->free_pages++;
135 spin_unlock(&cl->lock);
139 local_irq_restore(flags);
143 * Create/destroy dm memory cache client resources.
145 struct dm_mem_cache_client *
146 dm_mem_cache_client_create(unsigned objects, unsigned chunks,
147 unsigned pages_per_chunk)
149 unsigned total_pages = objects * chunks * pages_per_chunk;
150 struct dm_mem_cache_client *client;
152 BUG_ON(!total_pages);
153 client = kzalloc(sizeof(*client), GFP_KERNEL);
154 if (!client)
155 return ERR_PTR(-ENOMEM);
157 client->objs_pool = mempool_create_kmalloc_pool(objects,
158 chunks * sizeof(struct dm_mem_cache_object));
159 if (!client->objs_pool)
160 goto err;
162 client->free_list = alloc_cache_pages(total_pages);
163 if (!client->free_list)
164 goto err1;
166 spin_lock_init(&client->lock);
167 client->objects = objects;
168 client->chunks = chunks;
169 client->pages_per_chunk = pages_per_chunk;
170 client->free_pages = client->total_pages = total_pages;
171 return client;
173 err1:
174 mempool_destroy(client->objs_pool);
175 err:
176 kfree(client);
177 return ERR_PTR(-ENOMEM);
179 EXPORT_SYMBOL(dm_mem_cache_client_create);
181 void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl)
183 BUG_ON(cl->free_pages != cl->total_pages);
184 free_cache_pages(cl->free_list);
185 mempool_destroy(cl->objs_pool);
186 kfree(cl);
188 EXPORT_SYMBOL(dm_mem_cache_client_destroy);
191 * Grow a clients cache by an amount of pages.
193 * Don't call from interrupt context!
195 int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects)
197 unsigned pages = objects * cl->chunks * cl->pages_per_chunk;
198 struct page_list *pl, *last;
200 BUG_ON(!pages);
201 pl = alloc_cache_pages(pages);
202 if (!pl)
203 return -ENOMEM;
205 last = pl;
206 while (last->next)
207 last = last->next;
209 spin_lock_irq(&cl->lock);
210 last->next = cl->free_list;
211 cl->free_list = pl;
212 cl->free_pages += pages;
213 cl->total_pages += pages;
214 cl->objects += objects;
215 spin_unlock_irq(&cl->lock);
217 mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
218 return 0;
220 EXPORT_SYMBOL(dm_mem_cache_grow);
222 /* Shrink a clients cache by an amount of pages */
223 int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects)
225 int r;
226 unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages;
227 unsigned long flags;
228 struct page_list *last = NULL, *pl, *pos;
230 BUG_ON(!pages);
232 spin_lock_irqsave(&cl->lock, flags);
233 pl = pos = cl->free_list;
234 while (p-- && pos->next) {
235 last = pos;
236 pos = pos->next;
239 if (++p)
240 r = -ENOMEM;
241 else {
242 r = 0;
243 cl->free_list = pos;
244 cl->free_pages -= pages;
245 cl->total_pages -= pages;
246 cl->objects -= objects;
247 last->next = NULL;
249 spin_unlock_irqrestore(&cl->lock, flags);
251 if (!r) {
252 free_cache_pages(pl);
253 mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
256 return r;
258 EXPORT_SYMBOL(dm_mem_cache_shrink);
261 * Allocate/free a memory object
263 * Can be called from interrupt context
265 struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl)
267 int r = 0;
268 unsigned pages = cl->chunks * cl->pages_per_chunk;
269 unsigned long flags;
270 struct dm_mem_cache_object *obj;
272 obj = mempool_alloc(cl->objs_pool, GFP_NOIO);
273 if (!obj)
274 return ERR_PTR(-ENOMEM);
276 spin_lock_irqsave(&cl->lock, flags);
277 if (pages > cl->free_pages)
278 r = -ENOMEM;
279 else
280 cl->free_pages -= pages;
281 spin_unlock_irqrestore(&cl->lock, flags);
283 if (r) {
284 mempool_free(obj, cl->objs_pool);
285 return ERR_PTR(r);
288 alloc_chunks(cl, obj);
289 return obj;
291 EXPORT_SYMBOL(dm_mem_cache_alloc);
293 void dm_mem_cache_free(struct dm_mem_cache_client *cl,
294 struct dm_mem_cache_object *obj)
296 free_chunks(cl, obj);
297 mempool_free(obj, cl->objs_pool);
299 EXPORT_SYMBOL(dm_mem_cache_free);
301 MODULE_DESCRIPTION(DM_NAME " dm memory cache");
302 MODULE_AUTHOR("Heinz Mauelshagen <heinzm@redhat.com>");
303 MODULE_LICENSE("GPL");