2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
42 /* Module params (documentation at end) */
43 unsigned int num_devices
;
45 static void zram_stat_inc(u32
*v
)
50 static void zram_stat_dec(u32
*v
)
55 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
57 spin_lock(&zram
->stat64_lock
);
59 spin_unlock(&zram
->stat64_lock
);
62 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
64 spin_lock(&zram
->stat64_lock
);
66 spin_unlock(&zram
->stat64_lock
);
69 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
71 zram_stat64_add(zram
, v
, 1);
74 static int zram_test_flag(struct zram
*zram
, u32 index
,
75 enum zram_pageflags flag
)
77 return zram
->table
[index
].flags
& BIT(flag
);
80 static void zram_set_flag(struct zram
*zram
, u32 index
,
81 enum zram_pageflags flag
)
83 zram
->table
[index
].flags
|= BIT(flag
);
86 static void zram_clear_flag(struct zram
*zram
, u32 index
,
87 enum zram_pageflags flag
)
89 zram
->table
[index
].flags
&= ~BIT(flag
);
92 static int page_zero_filled(void *ptr
)
97 page
= (unsigned long *)ptr
;
99 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
107 static void zram_set_disksize(struct zram
*zram
, size_t totalram_bytes
)
109 if (!zram
->disksize
) {
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
115 zram
->disksize
= default_disksize_perc_ram
*
116 (totalram_bytes
/ 100);
119 if (zram
->disksize
> 2 * (totalram_bytes
)) {
121 "There is little point creating a zram of greater than "
122 "twice the size of memory since we expect a 2:1 compression "
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
126 "\tMemory Size: %zu kB\n"
127 "\tSize you selected: %llu kB\n"
128 "Continuing anyway ...\n",
129 totalram_bytes
>> 10, zram
->disksize
133 zram
->disksize
&= PAGE_MASK
;
136 static void zram_free_page(struct zram
*zram
, size_t index
)
141 struct page
*page
= zram
->table
[index
].page
;
142 u32 offset
= zram
->table
[index
].offset
;
144 if (unlikely(!page
)) {
146 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag.
149 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
150 zram_clear_flag(zram
, index
, ZRAM_ZERO
);
151 zram_stat_dec(&zram
->stats
.pages_zero
);
156 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
159 zram_clear_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
160 zram_stat_dec(&zram
->stats
.pages_expand
);
164 obj
= kmap_atomic(page
, KM_USER0
) + offset
;
165 clen
= xv_get_object_size(obj
) - sizeof(struct zobj_header
);
166 kunmap_atomic(obj
, KM_USER0
);
168 xv_free(zram
->mem_pool
, page
, offset
);
169 if (clen
<= PAGE_SIZE
/ 2)
170 zram_stat_dec(&zram
->stats
.good_compress
);
173 zram_stat64_sub(zram
, &zram
->stats
.compr_size
, clen
);
174 zram_stat_dec(&zram
->stats
.pages_stored
);
176 zram
->table
[index
].page
= NULL
;
177 zram
->table
[index
].offset
= 0;
180 static void handle_zero_page(struct bio_vec
*bvec
)
182 struct page
*page
= bvec
->bv_page
;
185 user_mem
= kmap_atomic(page
, KM_USER0
);
186 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
187 kunmap_atomic(user_mem
, KM_USER0
);
189 flush_dcache_page(page
);
192 static void handle_uncompressed_page(struct zram
*zram
, struct bio_vec
*bvec
,
193 u32 index
, int offset
)
195 struct page
*page
= bvec
->bv_page
;
196 unsigned char *user_mem
, *cmem
;
198 user_mem
= kmap_atomic(page
, KM_USER0
);
199 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
);
201 memcpy(user_mem
+ bvec
->bv_offset
, cmem
+ offset
, bvec
->bv_len
);
202 kunmap_atomic(cmem
, KM_USER1
);
203 kunmap_atomic(user_mem
, KM_USER0
);
205 flush_dcache_page(page
);
208 static inline int is_partial_io(struct bio_vec
*bvec
)
210 return bvec
->bv_len
!= PAGE_SIZE
;
213 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
214 u32 index
, int offset
, struct bio
*bio
)
219 struct zobj_header
*zheader
;
220 unsigned char *user_mem
, *cmem
, *uncmem
= NULL
;
222 page
= bvec
->bv_page
;
224 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
225 handle_zero_page(bvec
);
229 /* Requested page is not present in compressed area */
230 if (unlikely(!zram
->table
[index
].page
)) {
231 pr_debug("Read before write: sector=%lu, size=%u",
232 (ulong
)(bio
->bi_sector
), bio
->bi_size
);
233 handle_zero_page(bvec
);
237 /* Page is stored uncompressed since it's incompressible */
238 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
239 handle_uncompressed_page(zram
, bvec
, index
, offset
);
243 if (is_partial_io(bvec
)) {
244 /* Use a temporary buffer to decompress the page */
245 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
247 pr_info("Error allocating temp memory!\n");
252 user_mem
= kmap_atomic(page
, KM_USER0
);
253 if (!is_partial_io(bvec
))
257 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
258 zram
->table
[index
].offset
;
260 ret
= lzo1x_decompress_safe(cmem
+ sizeof(*zheader
),
261 xv_get_object_size(cmem
) - sizeof(*zheader
),
264 if (is_partial_io(bvec
)) {
265 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
270 kunmap_atomic(cmem
, KM_USER1
);
271 kunmap_atomic(user_mem
, KM_USER0
);
273 /* Should NEVER happen. Return bio error if it does. */
274 if (unlikely(ret
!= LZO_E_OK
)) {
275 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
276 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
280 flush_dcache_page(page
);
285 static int zram_read_before_write(struct zram
*zram
, char *mem
, u32 index
)
288 size_t clen
= PAGE_SIZE
;
289 struct zobj_header
*zheader
;
292 if (zram_test_flag(zram
, index
, ZRAM_ZERO
) ||
293 !zram
->table
[index
].page
) {
294 memset(mem
, 0, PAGE_SIZE
);
298 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER0
) +
299 zram
->table
[index
].offset
;
301 /* Page is stored uncompressed since it's incompressible */
302 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
303 memcpy(mem
, cmem
, PAGE_SIZE
);
304 kunmap_atomic(cmem
, KM_USER0
);
308 ret
= lzo1x_decompress_safe(cmem
+ sizeof(*zheader
),
309 xv_get_object_size(cmem
) - sizeof(*zheader
),
311 kunmap_atomic(cmem
, KM_USER0
);
313 /* Should NEVER happen. Return bio error if it does. */
314 if (unlikely(ret
!= LZO_E_OK
)) {
315 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
316 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
323 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
329 struct zobj_header
*zheader
;
330 struct page
*page
, *page_store
;
331 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
333 page
= bvec
->bv_page
;
334 src
= zram
->compress_buffer
;
336 if (is_partial_io(bvec
)) {
338 * This is a partial IO. We need to read the full page
339 * before to write the changes.
341 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
343 pr_info("Error allocating temp memory!\n");
347 ret
= zram_read_before_write(zram
, uncmem
, index
);
355 * System overwrites unused sectors. Free memory associated
356 * with this sector now.
358 if (zram
->table
[index
].page
||
359 zram_test_flag(zram
, index
, ZRAM_ZERO
))
360 zram_free_page(zram
, index
);
362 user_mem
= kmap_atomic(page
, KM_USER0
);
364 if (is_partial_io(bvec
))
365 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
370 if (page_zero_filled(uncmem
)) {
371 kunmap_atomic(user_mem
, KM_USER0
);
372 if (is_partial_io(bvec
))
374 zram_stat_inc(&zram
->stats
.pages_zero
);
375 zram_set_flag(zram
, index
, ZRAM_ZERO
);
380 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
381 zram
->compress_workmem
);
383 kunmap_atomic(user_mem
, KM_USER0
);
384 if (is_partial_io(bvec
))
387 if (unlikely(ret
!= LZO_E_OK
)) {
388 pr_err("Compression failed! err=%d\n", ret
);
393 * Page is incompressible. Store it as-is (uncompressed)
394 * since we do not want to return too many disk write
395 * errors which has side effect of hanging the system.
397 if (unlikely(clen
> max_zpage_size
)) {
399 page_store
= alloc_page(GFP_NOIO
| __GFP_HIGHMEM
);
400 if (unlikely(!page_store
)) {
401 pr_info("Error allocating memory for "
402 "incompressible page: %u\n", index
);
408 zram_set_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
409 zram_stat_inc(&zram
->stats
.pages_expand
);
410 zram
->table
[index
].page
= page_store
;
411 src
= kmap_atomic(page
, KM_USER0
);
415 if (xv_malloc(zram
->mem_pool
, clen
+ sizeof(*zheader
),
416 &zram
->table
[index
].page
, &store_offset
,
417 GFP_NOIO
| __GFP_HIGHMEM
)) {
418 pr_info("Error allocating memory for compressed "
419 "page: %u, size=%zu\n", index
, clen
);
425 zram
->table
[index
].offset
= store_offset
;
427 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
428 zram
->table
[index
].offset
;
431 /* Back-reference needed for memory defragmentation */
432 if (!zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)) {
433 zheader
= (struct zobj_header
*)cmem
;
434 zheader
->table_idx
= index
;
435 cmem
+= sizeof(*zheader
);
439 memcpy(cmem
, src
, clen
);
441 kunmap_atomic(cmem
, KM_USER1
);
442 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
443 kunmap_atomic(src
, KM_USER0
);
446 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
447 zram_stat_inc(&zram
->stats
.pages_stored
);
448 if (clen
<= PAGE_SIZE
/ 2)
449 zram_stat_inc(&zram
->stats
.good_compress
);
455 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
459 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
460 int offset
, struct bio
*bio
, int rw
)
465 down_read(&zram
->lock
);
466 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
467 up_read(&zram
->lock
);
469 down_write(&zram
->lock
);
470 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
471 up_write(&zram
->lock
);
477 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
479 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
481 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
484 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
488 struct bio_vec
*bvec
;
492 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
495 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
499 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
500 offset
= (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
502 bio_for_each_segment(bvec
, bio
, i
) {
503 int max_transfer_size
= PAGE_SIZE
- offset
;
505 if (bvec
->bv_len
> max_transfer_size
) {
507 * zram_bvec_rw() can only make operation on a single
508 * zram page. Split the bio vector.
512 bv
.bv_page
= bvec
->bv_page
;
513 bv
.bv_len
= max_transfer_size
;
514 bv
.bv_offset
= bvec
->bv_offset
;
516 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
519 bv
.bv_len
= bvec
->bv_len
- max_transfer_size
;
520 bv
.bv_offset
+= max_transfer_size
;
521 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
524 if (zram_bvec_rw(zram
, bvec
, index
, offset
, bio
, rw
)
528 update_position(&index
, &offset
, bvec
);
531 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
540 * Check if request is within bounds and aligned on zram logical blocks.
542 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
545 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
546 (bio
->bi_sector
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)) ||
547 (bio
->bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))) {
552 /* I/O request is valid */
557 * Handler function for all zram I/O requests.
559 static int zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
561 struct zram
*zram
= queue
->queuedata
;
563 if (!valid_io_request(zram
, bio
)) {
564 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
569 if (unlikely(!zram
->init_done
) && zram_init_device(zram
)) {
574 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
579 void zram_reset_device(struct zram
*zram
)
583 mutex_lock(&zram
->init_lock
);
586 /* Free various per-device buffers */
587 kfree(zram
->compress_workmem
);
588 free_pages((unsigned long)zram
->compress_buffer
, 1);
590 zram
->compress_workmem
= NULL
;
591 zram
->compress_buffer
= NULL
;
593 /* Free all pages that are still in this zram device */
594 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
598 page
= zram
->table
[index
].page
;
599 offset
= zram
->table
[index
].offset
;
604 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
607 xv_free(zram
->mem_pool
, page
, offset
);
613 xv_destroy_pool(zram
->mem_pool
);
614 zram
->mem_pool
= NULL
;
617 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
620 mutex_unlock(&zram
->init_lock
);
623 int zram_init_device(struct zram
*zram
)
628 mutex_lock(&zram
->init_lock
);
630 if (zram
->init_done
) {
631 mutex_unlock(&zram
->init_lock
);
635 zram_set_disksize(zram
, totalram_pages
<< PAGE_SHIFT
);
637 zram
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
638 if (!zram
->compress_workmem
) {
639 pr_err("Error allocating compressor working memory!\n");
644 zram
->compress_buffer
= (void *)__get_free_pages(__GFP_ZERO
, 1);
645 if (!zram
->compress_buffer
) {
646 pr_err("Error allocating compressor buffer space\n");
651 num_pages
= zram
->disksize
>> PAGE_SHIFT
;
652 zram
->table
= vzalloc(num_pages
* sizeof(*zram
->table
));
654 pr_err("Error allocating zram address table\n");
655 /* To prevent accessing table entries during cleanup */
661 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
663 /* zram devices sort of resembles non-rotational disks */
664 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
666 zram
->mem_pool
= xv_create_pool();
667 if (!zram
->mem_pool
) {
668 pr_err("Error creating memory pool\n");
674 mutex_unlock(&zram
->init_lock
);
676 pr_debug("Initialization done!\n");
680 mutex_unlock(&zram
->init_lock
);
681 zram_reset_device(zram
);
683 pr_err("Initialization failed: err=%d\n", ret
);
687 void zram_slot_free_notify(struct block_device
*bdev
, unsigned long index
)
691 zram
= bdev
->bd_disk
->private_data
;
692 zram_free_page(zram
, index
);
693 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
696 static const struct block_device_operations zram_devops
= {
697 .swap_slot_free_notify
= zram_slot_free_notify
,
701 static int create_device(struct zram
*zram
, int device_id
)
705 init_rwsem(&zram
->lock
);
706 mutex_init(&zram
->init_lock
);
707 spin_lock_init(&zram
->stat64_lock
);
709 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
711 pr_err("Error allocating disk queue for device %d\n",
717 blk_queue_make_request(zram
->queue
, zram_make_request
);
718 zram
->queue
->queuedata
= zram
;
720 /* gendisk structure */
721 zram
->disk
= alloc_disk(1);
723 blk_cleanup_queue(zram
->queue
);
724 pr_warning("Error allocating disk structure for device %d\n",
730 zram
->disk
->major
= zram_major
;
731 zram
->disk
->first_minor
= device_id
;
732 zram
->disk
->fops
= &zram_devops
;
733 zram
->disk
->queue
= zram
->queue
;
734 zram
->disk
->private_data
= zram
;
735 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
737 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
738 set_capacity(zram
->disk
, 0);
741 * To ensure that we always get PAGE_SIZE aligned
742 * and n*PAGE_SIZED sized I/O requests.
744 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
745 blk_queue_logical_block_size(zram
->disk
->queue
,
746 ZRAM_LOGICAL_BLOCK_SIZE
);
747 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
748 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
750 add_disk(zram
->disk
);
752 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
753 &zram_disk_attr_group
);
755 pr_warning("Error creating sysfs group");
765 static void destroy_device(struct zram
*zram
)
767 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
768 &zram_disk_attr_group
);
771 del_gendisk(zram
->disk
);
772 put_disk(zram
->disk
);
776 blk_cleanup_queue(zram
->queue
);
779 static int __init
zram_init(void)
783 if (num_devices
> max_num_devices
) {
784 pr_warning("Invalid value for num_devices: %u\n",
790 zram_major
= register_blkdev(0, "zram");
791 if (zram_major
<= 0) {
792 pr_warning("Unable to get major number\n");
798 pr_info("num_devices not specified. Using default: 1\n");
802 /* Allocate the device array and initialize each one */
803 pr_info("Creating %u devices ...\n", num_devices
);
804 devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
810 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
811 ret
= create_device(&devices
[dev_id
], dev_id
);
820 destroy_device(&devices
[--dev_id
]);
823 unregister_blkdev(zram_major
, "zram");
828 static void __exit
zram_exit(void)
833 for (i
= 0; i
< num_devices
; i
++) {
836 destroy_device(zram
);
838 zram_reset_device(zram
);
841 unregister_blkdev(zram_major
, "zram");
844 pr_debug("Cleanup done!\n");
847 module_param(num_devices
, uint
, 0);
848 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
850 module_init(zram_init
);
851 module_exit(zram_exit
);
853 MODULE_LICENSE("Dual BSD/GPL");
854 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
855 MODULE_DESCRIPTION("Compressed RAM Block Device");