Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / staging / zram / xvmalloc.c
blob1f9c5082b6d5dc7ac9e8c47422fa574e7961ebf6
1 /*
2 * xvmalloc memory allocator
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
13 #ifdef CONFIG_ZRAM_DEBUG
14 #define DEBUG
15 #endif
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
26 #include "xvmalloc.h"
27 #include "xvmalloc_int.h"
29 static void stat_inc(u64 *value)
31 *value = *value + 1;
34 static void stat_dec(u64 *value)
36 *value = *value - 1;
39 static int test_flag(struct block_header *block, enum blockflags flag)
41 return block->prev & BIT(flag);
44 static void set_flag(struct block_header *block, enum blockflags flag)
46 block->prev |= BIT(flag);
49 static void clear_flag(struct block_header *block, enum blockflags flag)
51 block->prev &= ~BIT(flag);
55 * Given <page, offset> pair, provide a dereferencable pointer.
56 * This is called from xv_malloc/xv_free path, so it
57 * needs to be fast.
59 static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
61 unsigned char *base;
63 base = kmap_atomic(page, type);
64 return base + offset;
67 static void put_ptr_atomic(void *ptr, enum km_type type)
69 kunmap_atomic(ptr, type);
72 static u32 get_blockprev(struct block_header *block)
74 return block->prev & PREV_MASK;
77 static void set_blockprev(struct block_header *block, u16 new_offset)
79 block->prev = new_offset | (block->prev & FLAGS_MASK);
82 static struct block_header *BLOCK_NEXT(struct block_header *block)
84 return (struct block_header *)
85 ((char *)block + block->size + XV_ALIGN);
89 * Get index of free list containing blocks of maximum size
90 * which is less than or equal to given size.
92 static u32 get_index_for_insert(u32 size)
94 if (unlikely(size > XV_MAX_ALLOC_SIZE))
95 size = XV_MAX_ALLOC_SIZE;
96 size &= ~FL_DELTA_MASK;
97 return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
101 * Get index of free list having blocks of size greater than
102 * or equal to requested size.
104 static u32 get_index(u32 size)
106 if (unlikely(size < XV_MIN_ALLOC_SIZE))
107 size = XV_MIN_ALLOC_SIZE;
108 size = ALIGN(size, FL_DELTA);
109 return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
113 * find_block - find block of at least given size
114 * @pool: memory pool to search from
115 * @size: size of block required
116 * @page: page containing required block
117 * @offset: offset within the page where block is located.
119 * Searches two level bitmap to locate block of at least
120 * the given size. If such a block is found, it provides
121 * <page, offset> to identify this block and returns index
122 * in freelist where we found this block.
123 * Otherwise, returns 0 and <page, offset> params are not touched.
125 static u32 find_block(struct xv_pool *pool, u32 size,
126 struct page **page, u32 *offset)
128 ulong flbitmap, slbitmap;
129 u32 flindex, slindex, slbitstart;
131 /* There are no free blocks in this pool */
132 if (!pool->flbitmap)
133 return 0;
135 /* Get freelist index correspoding to this size */
136 slindex = get_index(size);
137 slbitmap = pool->slbitmap[slindex / BITS_PER_LONG];
138 slbitstart = slindex % BITS_PER_LONG;
141 * If freelist is not empty at this index, we found the
142 * block - head of this list. This is approximate best-fit match.
144 if (test_bit(slbitstart, &slbitmap)) {
145 *page = pool->freelist[slindex].page;
146 *offset = pool->freelist[slindex].offset;
147 return slindex;
151 * No best-fit found. Search a bit further in bitmap for a free block.
152 * Second level bitmap consists of series of 32-bit chunks. Search
153 * further in the chunk where we expected a best-fit, starting from
154 * index location found above.
156 slbitstart++;
157 slbitmap >>= slbitstart;
159 /* Skip this search if we were already at end of this bitmap chunk */
160 if ((slbitstart != BITS_PER_LONG) && slbitmap) {
161 slindex += __ffs(slbitmap) + 1;
162 *page = pool->freelist[slindex].page;
163 *offset = pool->freelist[slindex].offset;
164 return slindex;
167 /* Now do a full two-level bitmap search to find next nearest fit */
168 flindex = slindex / BITS_PER_LONG;
170 flbitmap = (pool->flbitmap) >> (flindex + 1);
171 if (!flbitmap)
172 return 0;
174 flindex += __ffs(flbitmap) + 1;
175 slbitmap = pool->slbitmap[flindex];
176 slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap);
177 *page = pool->freelist[slindex].page;
178 *offset = pool->freelist[slindex].offset;
180 return slindex;
184 * Insert block at <page, offset> in freelist of given pool.
185 * freelist used depends on block size.
187 static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
188 struct block_header *block)
190 u32 flindex, slindex;
191 struct block_header *nextblock;
193 slindex = get_index_for_insert(block->size);
194 flindex = slindex / BITS_PER_LONG;
196 block->link.prev_page = NULL;
197 block->link.prev_offset = 0;
198 block->link.next_page = pool->freelist[slindex].page;
199 block->link.next_offset = pool->freelist[slindex].offset;
200 pool->freelist[slindex].page = page;
201 pool->freelist[slindex].offset = offset;
203 if (block->link.next_page) {
204 nextblock = get_ptr_atomic(block->link.next_page,
205 block->link.next_offset, KM_USER1);
206 nextblock->link.prev_page = page;
207 nextblock->link.prev_offset = offset;
208 put_ptr_atomic(nextblock, KM_USER1);
209 /* If there was a next page then the free bits are set. */
210 return;
213 __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
214 __set_bit(flindex, &pool->flbitmap);
218 * Remove block from freelist. Index 'slindex' identifies the freelist.
220 static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
221 struct block_header *block, u32 slindex)
223 u32 flindex = slindex / BITS_PER_LONG;
224 struct block_header *tmpblock;
226 if (block->link.prev_page) {
227 tmpblock = get_ptr_atomic(block->link.prev_page,
228 block->link.prev_offset, KM_USER1);
229 tmpblock->link.next_page = block->link.next_page;
230 tmpblock->link.next_offset = block->link.next_offset;
231 put_ptr_atomic(tmpblock, KM_USER1);
234 if (block->link.next_page) {
235 tmpblock = get_ptr_atomic(block->link.next_page,
236 block->link.next_offset, KM_USER1);
237 tmpblock->link.prev_page = block->link.prev_page;
238 tmpblock->link.prev_offset = block->link.prev_offset;
239 put_ptr_atomic(tmpblock, KM_USER1);
242 /* Is this block is at the head of the freelist? */
243 if (pool->freelist[slindex].page == page
244 && pool->freelist[slindex].offset == offset) {
246 pool->freelist[slindex].page = block->link.next_page;
247 pool->freelist[slindex].offset = block->link.next_offset;
249 if (pool->freelist[slindex].page) {
250 struct block_header *tmpblock;
251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
252 pool->freelist[slindex].offset,
253 KM_USER1);
254 tmpblock->link.prev_page = NULL;
255 tmpblock->link.prev_offset = 0;
256 put_ptr_atomic(tmpblock, KM_USER1);
257 } else {
258 /* This freelist bucket is empty */
259 __clear_bit(slindex % BITS_PER_LONG,
260 &pool->slbitmap[flindex]);
261 if (!pool->slbitmap[flindex])
262 __clear_bit(flindex, &pool->flbitmap);
266 block->link.prev_page = NULL;
267 block->link.prev_offset = 0;
268 block->link.next_page = NULL;
269 block->link.next_offset = 0;
273 * Allocate a page and add it to freelist of given pool.
275 static int grow_pool(struct xv_pool *pool, gfp_t flags)
277 struct page *page;
278 struct block_header *block;
280 page = alloc_page(flags);
281 if (unlikely(!page))
282 return -ENOMEM;
284 stat_inc(&pool->total_pages);
286 spin_lock(&pool->lock);
287 block = get_ptr_atomic(page, 0, KM_USER0);
289 block->size = PAGE_SIZE - XV_ALIGN;
290 set_flag(block, BLOCK_FREE);
291 clear_flag(block, PREV_FREE);
292 set_blockprev(block, 0);
294 insert_block(pool, page, 0, block);
296 put_ptr_atomic(block, KM_USER0);
297 spin_unlock(&pool->lock);
299 return 0;
303 * Create a memory pool. Allocates freelist, bitmaps and other
304 * per-pool metadata.
306 struct xv_pool *xv_create_pool(void)
308 u32 ovhd_size;
309 struct xv_pool *pool;
311 ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
312 pool = kzalloc(ovhd_size, GFP_KERNEL);
313 if (!pool)
314 return NULL;
316 spin_lock_init(&pool->lock);
318 return pool;
320 EXPORT_SYMBOL_GPL(xv_create_pool);
322 void xv_destroy_pool(struct xv_pool *pool)
324 kfree(pool);
326 EXPORT_SYMBOL_GPL(xv_destroy_pool);
329 * xv_malloc - Allocate block of given size from pool.
330 * @pool: pool to allocate from
331 * @size: size of block to allocate
332 * @page: page no. that holds the object
333 * @offset: location of object within page
335 * On success, <page, offset> identifies block allocated
336 * and 0 is returned. On failure, <page, offset> is set to
337 * 0 and -ENOMEM is returned.
339 * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
341 int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
342 u32 *offset, gfp_t flags)
344 int error;
345 u32 index, tmpsize, origsize, tmpoffset;
346 struct block_header *block, *tmpblock;
348 *page = NULL;
349 *offset = 0;
350 origsize = size;
352 if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
353 return -ENOMEM;
355 size = ALIGN(size, XV_ALIGN);
357 spin_lock(&pool->lock);
359 index = find_block(pool, size, page, offset);
361 if (!*page) {
362 spin_unlock(&pool->lock);
363 if (flags & GFP_NOWAIT)
364 return -ENOMEM;
365 error = grow_pool(pool, flags);
366 if (unlikely(error))
367 return error;
369 spin_lock(&pool->lock);
370 index = find_block(pool, size, page, offset);
373 if (!*page) {
374 spin_unlock(&pool->lock);
375 return -ENOMEM;
378 block = get_ptr_atomic(*page, *offset, KM_USER0);
380 remove_block(pool, *page, *offset, block, index);
382 /* Split the block if required */
383 tmpoffset = *offset + size + XV_ALIGN;
384 tmpsize = block->size - size;
385 tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
386 if (tmpsize) {
387 tmpblock->size = tmpsize - XV_ALIGN;
388 set_flag(tmpblock, BLOCK_FREE);
389 clear_flag(tmpblock, PREV_FREE);
391 set_blockprev(tmpblock, *offset);
392 if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
393 insert_block(pool, *page, tmpoffset, tmpblock);
395 if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
396 tmpblock = BLOCK_NEXT(tmpblock);
397 set_blockprev(tmpblock, tmpoffset);
399 } else {
400 /* This block is exact fit */
401 if (tmpoffset != PAGE_SIZE)
402 clear_flag(tmpblock, PREV_FREE);
405 block->size = origsize;
406 clear_flag(block, BLOCK_FREE);
408 put_ptr_atomic(block, KM_USER0);
409 spin_unlock(&pool->lock);
411 *offset += XV_ALIGN;
413 return 0;
415 EXPORT_SYMBOL_GPL(xv_malloc);
418 * Free block identified with <page, offset>
420 void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
422 void *page_start;
423 struct block_header *block, *tmpblock;
425 offset -= XV_ALIGN;
427 spin_lock(&pool->lock);
429 page_start = get_ptr_atomic(page, 0, KM_USER0);
430 block = (struct block_header *)((char *)page_start + offset);
432 /* Catch double free bugs */
433 BUG_ON(test_flag(block, BLOCK_FREE));
435 block->size = ALIGN(block->size, XV_ALIGN);
437 tmpblock = BLOCK_NEXT(block);
438 if (offset + block->size + XV_ALIGN == PAGE_SIZE)
439 tmpblock = NULL;
441 /* Merge next block if its free */
442 if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
444 * Blocks smaller than XV_MIN_ALLOC_SIZE
445 * are not inserted in any free list.
447 if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
448 remove_block(pool, page,
449 offset + block->size + XV_ALIGN, tmpblock,
450 get_index_for_insert(tmpblock->size));
452 block->size += tmpblock->size + XV_ALIGN;
455 /* Merge previous block if its free */
456 if (test_flag(block, PREV_FREE)) {
457 tmpblock = (struct block_header *)((char *)(page_start) +
458 get_blockprev(block));
459 offset = offset - tmpblock->size - XV_ALIGN;
461 if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
462 remove_block(pool, page, offset, tmpblock,
463 get_index_for_insert(tmpblock->size));
465 tmpblock->size += block->size + XV_ALIGN;
466 block = tmpblock;
469 /* No used objects in this page. Free it. */
470 if (block->size == PAGE_SIZE - XV_ALIGN) {
471 put_ptr_atomic(page_start, KM_USER0);
472 spin_unlock(&pool->lock);
474 __free_page(page);
475 stat_dec(&pool->total_pages);
476 return;
479 set_flag(block, BLOCK_FREE);
480 if (block->size >= XV_MIN_ALLOC_SIZE)
481 insert_block(pool, page, offset, block);
483 if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
484 tmpblock = BLOCK_NEXT(block);
485 set_flag(tmpblock, PREV_FREE);
486 set_blockprev(tmpblock, offset);
489 put_ptr_atomic(page_start, KM_USER0);
490 spin_unlock(&pool->lock);
492 EXPORT_SYMBOL_GPL(xv_free);
494 u32 xv_get_object_size(void *obj)
496 struct block_header *blk;
498 blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
499 return blk->size;
501 EXPORT_SYMBOL_GPL(xv_get_object_size);
504 * Returns total memory used by allocator (userdata + metadata)
506 u64 xv_get_total_size_bytes(struct xv_pool *pool)
508 return pool->total_pages << PAGE_SHIFT;
510 EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);