2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
4 * Module Author: Heinz Mauelshagen <heinzm@redhat.com>
6 * Device-mapper memory object handling:
8 * o allocate/free total_pages in a per client page pool.
10 * o allocate/free memory objects with chunks (1..n) of
11 * pages_per_chunk pages hanging off.
13 * This file is released under the GPL.
16 #define DM_MEM_CACHE_VERSION "0.2"
19 #include "dm-memcache.h"
20 #include <linux/dm-io.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
24 struct dm_mem_cache_client
{
27 struct page_list
*free_list
;
30 unsigned pages_per_chunk
;
36 * Free pages and page_list elements of client.
38 static void free_cache_pages(struct page_list
*list
)
41 struct page_list
*pl
= list
;
45 __free_page(pl
->page
);
51 * Alloc number of pages and page_list elements as required by client.
53 static struct page_list
*alloc_cache_pages(unsigned pages
)
55 struct page_list
*pl
, *ret
= NULL
;
59 page
= alloc_page(GFP_NOIO
);
63 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
77 free_cache_pages(ret
);
82 * Allocate page_list elements from the pool to chunks of the memory object.
84 static void alloc_chunks(struct dm_mem_cache_client
*cl
,
85 struct dm_mem_cache_object
*obj
)
87 unsigned chunks
= cl
->chunks
;
90 local_irq_save(flags
);
93 unsigned p
= cl
->pages_per_chunk
;
95 obj
[chunks
].pl
= NULL
;
100 /* Take next element from free list */
101 spin_lock(&cl
->lock
);
104 cl
->free_list
= pl
->next
;
105 spin_unlock(&cl
->lock
);
107 pl
->next
= obj
[chunks
].pl
;
112 local_irq_restore(flags
);
116 * Free page_list elements putting them back onto free list
118 static void free_chunks(struct dm_mem_cache_client
*cl
,
119 struct dm_mem_cache_object
*obj
)
121 unsigned chunks
= cl
->chunks
;
123 struct page_list
*next
, *pl
;
125 local_irq_save(flags
);
128 for (pl
= obj
[chunks
].pl
; pl
; pl
= next
) {
131 spin_lock(&cl
->lock
);
132 pl
->next
= cl
->free_list
;
135 spin_unlock(&cl
->lock
);
139 local_irq_restore(flags
);
143 * Create/destroy dm memory cache client resources.
145 struct dm_mem_cache_client
*
146 dm_mem_cache_client_create(unsigned objects
, unsigned chunks
,
147 unsigned pages_per_chunk
)
149 unsigned total_pages
= objects
* chunks
* pages_per_chunk
;
150 struct dm_mem_cache_client
*client
;
152 BUG_ON(!total_pages
);
153 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
155 return ERR_PTR(-ENOMEM
);
157 client
->objs_pool
= mempool_create_kmalloc_pool(objects
,
158 chunks
* sizeof(struct dm_mem_cache_object
));
159 if (!client
->objs_pool
)
162 client
->free_list
= alloc_cache_pages(total_pages
);
163 if (!client
->free_list
)
166 spin_lock_init(&client
->lock
);
167 client
->objects
= objects
;
168 client
->chunks
= chunks
;
169 client
->pages_per_chunk
= pages_per_chunk
;
170 client
->free_pages
= client
->total_pages
= total_pages
;
174 mempool_destroy(client
->objs_pool
);
177 return ERR_PTR(-ENOMEM
);
179 EXPORT_SYMBOL(dm_mem_cache_client_create
);
181 void dm_mem_cache_client_destroy(struct dm_mem_cache_client
*cl
)
183 BUG_ON(cl
->free_pages
!= cl
->total_pages
);
184 free_cache_pages(cl
->free_list
);
185 mempool_destroy(cl
->objs_pool
);
188 EXPORT_SYMBOL(dm_mem_cache_client_destroy
);
191 * Grow a clients cache by an amount of pages.
193 * Don't call from interrupt context!
195 int dm_mem_cache_grow(struct dm_mem_cache_client
*cl
, unsigned objects
)
197 unsigned pages
= objects
* cl
->chunks
* cl
->pages_per_chunk
;
198 struct page_list
*pl
, *last
;
201 pl
= alloc_cache_pages(pages
);
209 spin_lock_irq(&cl
->lock
);
210 last
->next
= cl
->free_list
;
212 cl
->free_pages
+= pages
;
213 cl
->total_pages
+= pages
;
214 cl
->objects
+= objects
;
215 spin_unlock_irq(&cl
->lock
);
217 mempool_resize(cl
->objs_pool
, cl
->objects
, GFP_NOIO
);
220 EXPORT_SYMBOL(dm_mem_cache_grow
);
222 /* Shrink a clients cache by an amount of pages */
223 int dm_mem_cache_shrink(struct dm_mem_cache_client
*cl
, unsigned objects
)
226 unsigned pages
= objects
* cl
->chunks
* cl
->pages_per_chunk
, p
= pages
;
228 struct page_list
*last
= NULL
, *pl
, *pos
;
232 spin_lock_irqsave(&cl
->lock
, flags
);
233 pl
= pos
= cl
->free_list
;
234 while (p
-- && pos
->next
) {
244 cl
->free_pages
-= pages
;
245 cl
->total_pages
-= pages
;
246 cl
->objects
-= objects
;
249 spin_unlock_irqrestore(&cl
->lock
, flags
);
252 free_cache_pages(pl
);
253 mempool_resize(cl
->objs_pool
, cl
->objects
, GFP_NOIO
);
258 EXPORT_SYMBOL(dm_mem_cache_shrink
);
261 * Allocate/free a memory object
263 * Can be called from interrupt context
265 struct dm_mem_cache_object
*dm_mem_cache_alloc(struct dm_mem_cache_client
*cl
)
268 unsigned pages
= cl
->chunks
* cl
->pages_per_chunk
;
270 struct dm_mem_cache_object
*obj
;
272 obj
= mempool_alloc(cl
->objs_pool
, GFP_NOIO
);
274 return ERR_PTR(-ENOMEM
);
276 spin_lock_irqsave(&cl
->lock
, flags
);
277 if (pages
> cl
->free_pages
)
280 cl
->free_pages
-= pages
;
281 spin_unlock_irqrestore(&cl
->lock
, flags
);
284 mempool_free(obj
, cl
->objs_pool
);
288 alloc_chunks(cl
, obj
);
291 EXPORT_SYMBOL(dm_mem_cache_alloc
);
293 void dm_mem_cache_free(struct dm_mem_cache_client
*cl
,
294 struct dm_mem_cache_object
*obj
)
296 free_chunks(cl
, obj
);
297 mempool_free(obj
, cl
->objs_pool
);
299 EXPORT_SYMBOL(dm_mem_cache_free
);
301 MODULE_DESCRIPTION(DM_NAME
" dm memory cache");
302 MODULE_AUTHOR("Heinz Mauelshagen <heinzm@redhat.com>");
303 MODULE_LICENSE("GPL");