2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 unsigned int zram_num_devices
;
45 static void zram_stat_inc(u32
*v
)
50 static void zram_stat_dec(u32
*v
)
55 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
57 spin_lock(&zram
->stat64_lock
);
59 spin_unlock(&zram
->stat64_lock
);
62 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
64 spin_lock(&zram
->stat64_lock
);
66 spin_unlock(&zram
->stat64_lock
);
69 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
71 zram_stat64_add(zram
, v
, 1);
74 static int zram_test_flag(struct zram
*zram
, u32 index
,
75 enum zram_pageflags flag
)
77 return zram
->table
[index
].flags
& BIT(flag
);
80 static void zram_set_flag(struct zram
*zram
, u32 index
,
81 enum zram_pageflags flag
)
83 zram
->table
[index
].flags
|= BIT(flag
);
86 static void zram_clear_flag(struct zram
*zram
, u32 index
,
87 enum zram_pageflags flag
)
89 zram
->table
[index
].flags
&= ~BIT(flag
);
92 static int page_zero_filled(void *ptr
)
97 page
= (unsigned long *)ptr
;
99 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
107 static void zram_set_disksize(struct zram
*zram
, size_t totalram_bytes
)
109 if (!zram
->disksize
) {
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
115 zram
->disksize
= default_disksize_perc_ram
*
116 (totalram_bytes
/ 100);
119 if (zram
->disksize
> 2 * (totalram_bytes
)) {
121 "There is little point creating a zram of greater than "
122 "twice the size of memory since we expect a 2:1 compression "
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
126 "\tMemory Size: %zu kB\n"
127 "\tSize you selected: %llu kB\n"
128 "Continuing anyway ...\n",
129 totalram_bytes
>> 10, zram
->disksize
133 zram
->disksize
&= PAGE_MASK
;
136 static void zram_free_page(struct zram
*zram
, size_t index
)
141 struct page
*page
= zram
->table
[index
].page
;
142 u32 offset
= zram
->table
[index
].offset
;
144 if (unlikely(!page
)) {
146 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag.
149 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
150 zram_clear_flag(zram
, index
, ZRAM_ZERO
);
151 zram_stat_dec(&zram
->stats
.pages_zero
);
156 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
159 zram_clear_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
160 zram_stat_dec(&zram
->stats
.pages_expand
);
164 obj
= kmap_atomic(page
, KM_USER0
) + offset
;
165 clen
= xv_get_object_size(obj
) - sizeof(struct zobj_header
);
166 kunmap_atomic(obj
, KM_USER0
);
168 xv_free(zram
->mem_pool
, page
, offset
);
169 if (clen
<= PAGE_SIZE
/ 2)
170 zram_stat_dec(&zram
->stats
.good_compress
);
173 zram_stat64_sub(zram
, &zram
->stats
.compr_size
, clen
);
174 zram_stat_dec(&zram
->stats
.pages_stored
);
176 zram
->table
[index
].page
= NULL
;
177 zram
->table
[index
].offset
= 0;
180 static void handle_zero_page(struct bio_vec
*bvec
)
182 struct page
*page
= bvec
->bv_page
;
185 user_mem
= kmap_atomic(page
, KM_USER0
);
186 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
187 kunmap_atomic(user_mem
, KM_USER0
);
189 flush_dcache_page(page
);
192 static void handle_uncompressed_page(struct zram
*zram
, struct bio_vec
*bvec
,
193 u32 index
, int offset
)
195 struct page
*page
= bvec
->bv_page
;
196 unsigned char *user_mem
, *cmem
;
198 user_mem
= kmap_atomic(page
, KM_USER0
);
199 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
);
201 memcpy(user_mem
+ bvec
->bv_offset
, cmem
+ offset
, bvec
->bv_len
);
202 kunmap_atomic(cmem
, KM_USER1
);
203 kunmap_atomic(user_mem
, KM_USER0
);
205 flush_dcache_page(page
);
208 static inline int is_partial_io(struct bio_vec
*bvec
)
210 return bvec
->bv_len
!= PAGE_SIZE
;
213 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
214 u32 index
, int offset
, struct bio
*bio
)
219 struct zobj_header
*zheader
;
220 unsigned char *user_mem
, *cmem
, *uncmem
= NULL
;
222 page
= bvec
->bv_page
;
224 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
225 handle_zero_page(bvec
);
229 /* Requested page is not present in compressed area */
230 if (unlikely(!zram
->table
[index
].page
)) {
231 pr_debug("Read before write: sector=%lu, size=%u",
232 (ulong
)(bio
->bi_sector
), bio
->bi_size
);
233 handle_zero_page(bvec
);
237 /* Page is stored uncompressed since it's incompressible */
238 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
239 handle_uncompressed_page(zram
, bvec
, index
, offset
);
243 if (is_partial_io(bvec
)) {
244 /* Use a temporary buffer to decompress the page */
245 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
247 pr_info("Error allocating temp memory!\n");
252 user_mem
= kmap_atomic(page
, KM_USER0
);
253 if (!is_partial_io(bvec
))
257 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
258 zram
->table
[index
].offset
;
260 ret
= lzo1x_decompress_safe(cmem
+ sizeof(*zheader
),
261 xv_get_object_size(cmem
) - sizeof(*zheader
),
264 if (is_partial_io(bvec
)) {
265 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
270 kunmap_atomic(cmem
, KM_USER1
);
271 kunmap_atomic(user_mem
, KM_USER0
);
273 /* Should NEVER happen. Return bio error if it does. */
274 if (unlikely(ret
!= LZO_E_OK
)) {
275 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
276 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
280 flush_dcache_page(page
);
285 static int zram_read_before_write(struct zram
*zram
, char *mem
, u32 index
)
288 size_t clen
= PAGE_SIZE
;
289 struct zobj_header
*zheader
;
292 if (zram_test_flag(zram
, index
, ZRAM_ZERO
) ||
293 !zram
->table
[index
].page
) {
294 memset(mem
, 0, PAGE_SIZE
);
298 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER0
) +
299 zram
->table
[index
].offset
;
301 /* Page is stored uncompressed since it's incompressible */
302 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
303 memcpy(mem
, cmem
, PAGE_SIZE
);
304 kunmap_atomic(cmem
, KM_USER0
);
308 ret
= lzo1x_decompress_safe(cmem
+ sizeof(*zheader
),
309 xv_get_object_size(cmem
) - sizeof(*zheader
),
311 kunmap_atomic(cmem
, KM_USER0
);
313 /* Should NEVER happen. Return bio error if it does. */
314 if (unlikely(ret
!= LZO_E_OK
)) {
315 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
316 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
323 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
329 struct zobj_header
*zheader
;
330 struct page
*page
, *page_store
;
331 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
333 page
= bvec
->bv_page
;
334 src
= zram
->compress_buffer
;
336 if (is_partial_io(bvec
)) {
338 * This is a partial IO. We need to read the full page
339 * before to write the changes.
341 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
343 pr_info("Error allocating temp memory!\n");
347 ret
= zram_read_before_write(zram
, uncmem
, index
);
355 * System overwrites unused sectors. Free memory associated
356 * with this sector now.
358 if (zram
->table
[index
].page
||
359 zram_test_flag(zram
, index
, ZRAM_ZERO
))
360 zram_free_page(zram
, index
);
362 user_mem
= kmap_atomic(page
, KM_USER0
);
364 if (is_partial_io(bvec
))
365 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
370 if (page_zero_filled(uncmem
)) {
371 kunmap_atomic(user_mem
, KM_USER0
);
372 if (is_partial_io(bvec
))
374 zram_stat_inc(&zram
->stats
.pages_zero
);
375 zram_set_flag(zram
, index
, ZRAM_ZERO
);
380 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
381 zram
->compress_workmem
);
383 kunmap_atomic(user_mem
, KM_USER0
);
384 if (is_partial_io(bvec
))
387 if (unlikely(ret
!= LZO_E_OK
)) {
388 pr_err("Compression failed! err=%d\n", ret
);
393 * Page is incompressible. Store it as-is (uncompressed)
394 * since we do not want to return too many disk write
395 * errors which has side effect of hanging the system.
397 if (unlikely(clen
> max_zpage_size
)) {
399 page_store
= alloc_page(GFP_NOIO
| __GFP_HIGHMEM
);
400 if (unlikely(!page_store
)) {
401 pr_info("Error allocating memory for "
402 "incompressible page: %u\n", index
);
408 zram_set_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
409 zram_stat_inc(&zram
->stats
.pages_expand
);
410 zram
->table
[index
].page
= page_store
;
411 src
= kmap_atomic(page
, KM_USER0
);
415 if (xv_malloc(zram
->mem_pool
, clen
+ sizeof(*zheader
),
416 &zram
->table
[index
].page
, &store_offset
,
417 GFP_NOIO
| __GFP_HIGHMEM
)) {
418 pr_info("Error allocating memory for compressed "
419 "page: %u, size=%zu\n", index
, clen
);
425 zram
->table
[index
].offset
= store_offset
;
427 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
428 zram
->table
[index
].offset
;
431 /* Back-reference needed for memory defragmentation */
432 if (!zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)) {
433 zheader
= (struct zobj_header
*)cmem
;
434 zheader
->table_idx
= index
;
435 cmem
+= sizeof(*zheader
);
439 memcpy(cmem
, src
, clen
);
441 kunmap_atomic(cmem
, KM_USER1
);
442 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
443 kunmap_atomic(src
, KM_USER0
);
446 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
447 zram_stat_inc(&zram
->stats
.pages_stored
);
448 if (clen
<= PAGE_SIZE
/ 2)
449 zram_stat_inc(&zram
->stats
.good_compress
);
455 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
459 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
460 int offset
, struct bio
*bio
, int rw
)
465 down_read(&zram
->lock
);
466 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
467 up_read(&zram
->lock
);
469 down_write(&zram
->lock
);
470 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
471 up_write(&zram
->lock
);
477 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
479 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
481 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
484 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
488 struct bio_vec
*bvec
;
492 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
495 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
499 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
500 offset
= (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
502 bio_for_each_segment(bvec
, bio
, i
) {
503 int max_transfer_size
= PAGE_SIZE
- offset
;
505 if (bvec
->bv_len
> max_transfer_size
) {
507 * zram_bvec_rw() can only make operation on a single
508 * zram page. Split the bio vector.
512 bv
.bv_page
= bvec
->bv_page
;
513 bv
.bv_len
= max_transfer_size
;
514 bv
.bv_offset
= bvec
->bv_offset
;
516 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
519 bv
.bv_len
= bvec
->bv_len
- max_transfer_size
;
520 bv
.bv_offset
+= max_transfer_size
;
521 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
524 if (zram_bvec_rw(zram
, bvec
, index
, offset
, bio
, rw
)
528 update_position(&index
, &offset
, bvec
);
531 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
540 * Check if request is within bounds and aligned on zram logical blocks.
542 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
545 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
546 (bio
->bi_sector
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)) ||
547 (bio
->bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))) {
552 /* I/O request is valid */
557 * Handler function for all zram I/O requests.
559 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
561 struct zram
*zram
= queue
->queuedata
;
563 if (unlikely(!zram
->init_done
) && zram_init_device(zram
))
566 down_read(&zram
->init_lock
);
567 if (unlikely(!zram
->init_done
))
570 if (!valid_io_request(zram
, bio
)) {
571 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
575 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
576 up_read(&zram
->init_lock
);
581 up_read(&zram
->init_lock
);
586 void __zram_reset_device(struct zram
*zram
)
592 /* Free various per-device buffers */
593 kfree(zram
->compress_workmem
);
594 free_pages((unsigned long)zram
->compress_buffer
, 1);
596 zram
->compress_workmem
= NULL
;
597 zram
->compress_buffer
= NULL
;
599 /* Free all pages that are still in this zram device */
600 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
604 page
= zram
->table
[index
].page
;
605 offset
= zram
->table
[index
].offset
;
610 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
613 xv_free(zram
->mem_pool
, page
, offset
);
619 xv_destroy_pool(zram
->mem_pool
);
620 zram
->mem_pool
= NULL
;
623 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
628 void zram_reset_device(struct zram
*zram
)
630 down_write(&zram
->init_lock
);
631 __zram_reset_device(zram
);
632 up_write(&zram
->init_lock
);
635 int zram_init_device(struct zram
*zram
)
640 down_write(&zram
->init_lock
);
642 if (zram
->init_done
) {
643 up_write(&zram
->init_lock
);
647 zram_set_disksize(zram
, totalram_pages
<< PAGE_SHIFT
);
649 zram
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
650 if (!zram
->compress_workmem
) {
651 pr_err("Error allocating compressor working memory!\n");
656 zram
->compress_buffer
=
657 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
658 if (!zram
->compress_buffer
) {
659 pr_err("Error allocating compressor buffer space\n");
664 num_pages
= zram
->disksize
>> PAGE_SHIFT
;
665 zram
->table
= vzalloc(num_pages
* sizeof(*zram
->table
));
667 pr_err("Error allocating zram address table\n");
672 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
674 /* zram devices sort of resembles non-rotational disks */
675 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
677 zram
->mem_pool
= xv_create_pool();
678 if (!zram
->mem_pool
) {
679 pr_err("Error creating memory pool\n");
685 up_write(&zram
->init_lock
);
687 pr_debug("Initialization done!\n");
691 /* To prevent accessing table entries during cleanup */
694 __zram_reset_device(zram
);
695 up_write(&zram
->init_lock
);
696 pr_err("Initialization failed: err=%d\n", ret
);
700 static void zram_slot_free_notify(struct block_device
*bdev
,
705 zram
= bdev
->bd_disk
->private_data
;
706 zram_free_page(zram
, index
);
707 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
710 static const struct block_device_operations zram_devops
= {
711 .swap_slot_free_notify
= zram_slot_free_notify
,
715 static int create_device(struct zram
*zram
, int device_id
)
719 init_rwsem(&zram
->lock
);
720 init_rwsem(&zram
->init_lock
);
721 spin_lock_init(&zram
->stat64_lock
);
723 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
725 pr_err("Error allocating disk queue for device %d\n",
731 blk_queue_make_request(zram
->queue
, zram_make_request
);
732 zram
->queue
->queuedata
= zram
;
734 /* gendisk structure */
735 zram
->disk
= alloc_disk(1);
737 blk_cleanup_queue(zram
->queue
);
738 pr_warning("Error allocating disk structure for device %d\n",
744 zram
->disk
->major
= zram_major
;
745 zram
->disk
->first_minor
= device_id
;
746 zram
->disk
->fops
= &zram_devops
;
747 zram
->disk
->queue
= zram
->queue
;
748 zram
->disk
->private_data
= zram
;
749 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
751 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
752 set_capacity(zram
->disk
, 0);
755 * To ensure that we always get PAGE_SIZE aligned
756 * and n*PAGE_SIZED sized I/O requests.
758 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
759 blk_queue_logical_block_size(zram
->disk
->queue
,
760 ZRAM_LOGICAL_BLOCK_SIZE
);
761 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
762 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
764 add_disk(zram
->disk
);
766 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
767 &zram_disk_attr_group
);
769 pr_warning("Error creating sysfs group");
779 static void destroy_device(struct zram
*zram
)
781 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
782 &zram_disk_attr_group
);
785 del_gendisk(zram
->disk
);
786 put_disk(zram
->disk
);
790 blk_cleanup_queue(zram
->queue
);
793 static int __init
zram_init(void)
797 if (zram_num_devices
> max_num_devices
) {
798 pr_warning("Invalid value for num_devices: %u\n",
804 zram_major
= register_blkdev(0, "zram");
805 if (zram_major
<= 0) {
806 pr_warning("Unable to get major number\n");
811 if (!zram_num_devices
) {
812 pr_info("num_devices not specified. Using default: 1\n");
813 zram_num_devices
= 1;
816 /* Allocate the device array and initialize each one */
817 pr_info("Creating %u devices ...\n", zram_num_devices
);
818 zram_devices
= kzalloc(zram_num_devices
* sizeof(struct zram
), GFP_KERNEL
);
824 for (dev_id
= 0; dev_id
< zram_num_devices
; dev_id
++) {
825 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
834 destroy_device(&zram_devices
[--dev_id
]);
837 unregister_blkdev(zram_major
, "zram");
842 static void __exit
zram_exit(void)
847 for (i
= 0; i
< zram_num_devices
; i
++) {
848 zram
= &zram_devices
[i
];
850 destroy_device(zram
);
852 zram_reset_device(zram
);
855 unregister_blkdev(zram_major
, "zram");
858 pr_debug("Cleanup done!\n");
861 module_param(zram_num_devices
, uint
, 0);
862 MODULE_PARM_DESC(zram_num_devices
, "Number of zram devices");
864 module_init(zram_init
);
865 module_exit(zram_exit
);
867 MODULE_LICENSE("Dual BSD/GPL");
868 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
869 MODULE_DESCRIPTION("Compressed RAM Block Device");