2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
= 1;
45 static inline struct zram
*dev_to_zram(struct device
*dev
)
47 return (struct zram
*)dev_to_disk(dev
)->private_data
;
50 static ssize_t
disksize_show(struct device
*dev
,
51 struct device_attribute
*attr
, char *buf
)
53 struct zram
*zram
= dev_to_zram(dev
);
55 return sprintf(buf
, "%llu\n", zram
->disksize
);
58 static ssize_t
initstate_show(struct device
*dev
,
59 struct device_attribute
*attr
, char *buf
)
61 struct zram
*zram
= dev_to_zram(dev
);
63 return sprintf(buf
, "%u\n", zram
->init_done
);
66 static ssize_t
num_reads_show(struct device
*dev
,
67 struct device_attribute
*attr
, char *buf
)
69 struct zram
*zram
= dev_to_zram(dev
);
71 return sprintf(buf
, "%llu\n",
72 (u64
)atomic64_read(&zram
->stats
.num_reads
));
75 static ssize_t
num_writes_show(struct device
*dev
,
76 struct device_attribute
*attr
, char *buf
)
78 struct zram
*zram
= dev_to_zram(dev
);
80 return sprintf(buf
, "%llu\n",
81 (u64
)atomic64_read(&zram
->stats
.num_writes
));
84 static ssize_t
invalid_io_show(struct device
*dev
,
85 struct device_attribute
*attr
, char *buf
)
87 struct zram
*zram
= dev_to_zram(dev
);
89 return sprintf(buf
, "%llu\n",
90 (u64
)atomic64_read(&zram
->stats
.invalid_io
));
93 static ssize_t
notify_free_show(struct device
*dev
,
94 struct device_attribute
*attr
, char *buf
)
96 struct zram
*zram
= dev_to_zram(dev
);
98 return sprintf(buf
, "%llu\n",
99 (u64
)atomic64_read(&zram
->stats
.notify_free
));
102 static ssize_t
zero_pages_show(struct device
*dev
,
103 struct device_attribute
*attr
, char *buf
)
105 struct zram
*zram
= dev_to_zram(dev
);
107 return sprintf(buf
, "%u\n", atomic_read(&zram
->stats
.pages_zero
));
110 static ssize_t
orig_data_size_show(struct device
*dev
,
111 struct device_attribute
*attr
, char *buf
)
113 struct zram
*zram
= dev_to_zram(dev
);
115 return sprintf(buf
, "%llu\n",
116 (u64
)(atomic_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
119 static ssize_t
compr_data_size_show(struct device
*dev
,
120 struct device_attribute
*attr
, char *buf
)
122 struct zram
*zram
= dev_to_zram(dev
);
124 return sprintf(buf
, "%llu\n",
125 (u64
)atomic64_read(&zram
->stats
.compr_size
));
128 static ssize_t
mem_used_total_show(struct device
*dev
,
129 struct device_attribute
*attr
, char *buf
)
132 struct zram
*zram
= dev_to_zram(dev
);
133 struct zram_meta
*meta
= zram
->meta
;
135 down_read(&zram
->init_lock
);
137 val
= zs_get_total_size_bytes(meta
->mem_pool
);
138 up_read(&zram
->init_lock
);
140 return sprintf(buf
, "%llu\n", val
);
143 /* flag operations needs meta->tb_lock */
144 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
145 enum zram_pageflags flag
)
147 return meta
->table
[index
].flags
& BIT(flag
);
150 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
151 enum zram_pageflags flag
)
153 meta
->table
[index
].flags
|= BIT(flag
);
156 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
157 enum zram_pageflags flag
)
159 meta
->table
[index
].flags
&= ~BIT(flag
);
162 static inline int is_partial_io(struct bio_vec
*bvec
)
164 return bvec
->bv_len
!= PAGE_SIZE
;
168 * Check if request is within bounds and aligned on zram logical blocks.
170 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
172 u64 start
, end
, bound
;
174 /* unaligned request */
175 if (unlikely(bio
->bi_iter
.bi_sector
&
176 (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
178 if (unlikely(bio
->bi_iter
.bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
181 start
= bio
->bi_iter
.bi_sector
;
182 end
= start
+ (bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
);
183 bound
= zram
->disksize
>> SECTOR_SHIFT
;
184 /* out of range range */
185 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
188 /* I/O request is valid */
192 static void zram_meta_free(struct zram_meta
*meta
)
194 zs_destroy_pool(meta
->mem_pool
);
195 kfree(meta
->compress_workmem
);
196 free_pages((unsigned long)meta
->compress_buffer
, 1);
201 static struct zram_meta
*zram_meta_alloc(u64 disksize
)
204 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
208 meta
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
209 if (!meta
->compress_workmem
)
212 meta
->compress_buffer
=
213 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
214 if (!meta
->compress_buffer
) {
215 pr_err("Error allocating compressor buffer space\n");
219 num_pages
= disksize
>> PAGE_SHIFT
;
220 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
222 pr_err("Error allocating zram address table\n");
226 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
227 if (!meta
->mem_pool
) {
228 pr_err("Error creating memory pool\n");
232 rwlock_init(&meta
->tb_lock
);
233 mutex_init(&meta
->buffer_lock
);
239 free_pages((unsigned long)meta
->compress_buffer
, 1);
241 kfree(meta
->compress_workmem
);
249 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
251 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
253 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
256 static int page_zero_filled(void *ptr
)
261 page
= (unsigned long *)ptr
;
263 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
271 static void handle_zero_page(struct bio_vec
*bvec
)
273 struct page
*page
= bvec
->bv_page
;
276 user_mem
= kmap_atomic(page
);
277 if (is_partial_io(bvec
))
278 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
280 clear_page(user_mem
);
281 kunmap_atomic(user_mem
);
283 flush_dcache_page(page
);
286 /* NOTE: caller should hold meta->tb_lock with write-side */
287 static void zram_free_page(struct zram
*zram
, size_t index
)
289 struct zram_meta
*meta
= zram
->meta
;
290 unsigned long handle
= meta
->table
[index
].handle
;
291 u16 size
= meta
->table
[index
].size
;
293 if (unlikely(!handle
)) {
295 * No memory is allocated for zero filled pages.
296 * Simply clear zero page flag.
298 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
299 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
300 atomic_dec(&zram
->stats
.pages_zero
);
305 if (unlikely(size
> max_zpage_size
))
306 atomic_dec(&zram
->stats
.bad_compress
);
308 zs_free(meta
->mem_pool
, handle
);
310 if (size
<= PAGE_SIZE
/ 2)
311 atomic_dec(&zram
->stats
.good_compress
);
313 atomic64_sub(meta
->table
[index
].size
, &zram
->stats
.compr_size
);
314 atomic_dec(&zram
->stats
.pages_stored
);
316 meta
->table
[index
].handle
= 0;
317 meta
->table
[index
].size
= 0;
320 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
323 size_t clen
= PAGE_SIZE
;
325 struct zram_meta
*meta
= zram
->meta
;
326 unsigned long handle
;
329 read_lock(&meta
->tb_lock
);
330 handle
= meta
->table
[index
].handle
;
331 size
= meta
->table
[index
].size
;
333 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
334 read_unlock(&meta
->tb_lock
);
339 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
340 if (size
== PAGE_SIZE
)
341 copy_page(mem
, cmem
);
343 ret
= lzo1x_decompress_safe(cmem
, size
, mem
, &clen
);
344 zs_unmap_object(meta
->mem_pool
, handle
);
345 read_unlock(&meta
->tb_lock
);
347 /* Should NEVER happen. Return bio error if it does. */
348 if (unlikely(ret
!= LZO_E_OK
)) {
349 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
350 atomic64_inc(&zram
->stats
.failed_reads
);
357 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
358 u32 index
, int offset
, struct bio
*bio
)
362 unsigned char *user_mem
, *uncmem
= NULL
;
363 struct zram_meta
*meta
= zram
->meta
;
364 page
= bvec
->bv_page
;
366 read_lock(&meta
->tb_lock
);
367 if (unlikely(!meta
->table
[index
].handle
) ||
368 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
369 read_unlock(&meta
->tb_lock
);
370 handle_zero_page(bvec
);
373 read_unlock(&meta
->tb_lock
);
375 if (is_partial_io(bvec
))
376 /* Use a temporary buffer to decompress the page */
377 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
379 user_mem
= kmap_atomic(page
);
380 if (!is_partial_io(bvec
))
384 pr_info("Unable to allocate temp memory\n");
389 ret
= zram_decompress_page(zram
, uncmem
, index
);
390 /* Should NEVER happen. Return bio error if it does. */
391 if (unlikely(ret
!= LZO_E_OK
))
394 if (is_partial_io(bvec
))
395 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
398 flush_dcache_page(page
);
401 kunmap_atomic(user_mem
);
402 if (is_partial_io(bvec
))
407 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
412 unsigned long handle
;
414 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
415 struct zram_meta
*meta
= zram
->meta
;
418 page
= bvec
->bv_page
;
419 src
= meta
->compress_buffer
;
421 if (is_partial_io(bvec
)) {
423 * This is a partial IO. We need to read the full page
424 * before to write the changes.
426 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
431 ret
= zram_decompress_page(zram
, uncmem
, index
);
436 mutex_lock(&meta
->buffer_lock
);
438 user_mem
= kmap_atomic(page
);
440 if (is_partial_io(bvec
)) {
441 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
443 kunmap_atomic(user_mem
);
449 if (page_zero_filled(uncmem
)) {
451 kunmap_atomic(user_mem
);
452 /* Free memory associated with this sector now. */
453 write_lock(&zram
->meta
->tb_lock
);
454 zram_free_page(zram
, index
);
455 zram_set_flag(meta
, index
, ZRAM_ZERO
);
456 write_unlock(&zram
->meta
->tb_lock
);
458 atomic_inc(&zram
->stats
.pages_zero
);
463 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
464 meta
->compress_workmem
);
465 if (!is_partial_io(bvec
)) {
466 kunmap_atomic(user_mem
);
471 if (unlikely(ret
!= LZO_E_OK
)) {
472 pr_err("Compression failed! err=%d\n", ret
);
476 if (unlikely(clen
> max_zpage_size
)) {
477 atomic_inc(&zram
->stats
.bad_compress
);
480 if (is_partial_io(bvec
))
484 handle
= zs_malloc(meta
->mem_pool
, clen
);
486 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
491 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
493 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
494 src
= kmap_atomic(page
);
495 copy_page(cmem
, src
);
498 memcpy(cmem
, src
, clen
);
501 zs_unmap_object(meta
->mem_pool
, handle
);
504 * Free memory associated with this sector
505 * before overwriting unused sectors.
507 write_lock(&zram
->meta
->tb_lock
);
508 zram_free_page(zram
, index
);
510 meta
->table
[index
].handle
= handle
;
511 meta
->table
[index
].size
= clen
;
512 write_unlock(&zram
->meta
->tb_lock
);
515 atomic64_add(clen
, &zram
->stats
.compr_size
);
516 atomic_inc(&zram
->stats
.pages_stored
);
517 if (clen
<= PAGE_SIZE
/ 2)
518 atomic_inc(&zram
->stats
.good_compress
);
522 mutex_unlock(&meta
->buffer_lock
);
523 if (is_partial_io(bvec
))
527 atomic64_inc(&zram
->stats
.failed_writes
);
531 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
532 int offset
, struct bio
*bio
, int rw
)
537 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
539 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
544 static void zram_reset_device(struct zram
*zram
, bool reset_capacity
)
547 struct zram_meta
*meta
;
549 down_write(&zram
->init_lock
);
550 if (!zram
->init_done
) {
551 up_write(&zram
->init_lock
);
558 /* Free all pages that are still in this zram device */
559 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
560 unsigned long handle
= meta
->table
[index
].handle
;
564 zs_free(meta
->mem_pool
, handle
);
567 zram_meta_free(zram
->meta
);
570 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
574 set_capacity(zram
->disk
, 0);
575 up_write(&zram
->init_lock
);
578 static void zram_init_device(struct zram
*zram
, struct zram_meta
*meta
)
580 if (zram
->disksize
> 2 * (totalram_pages
<< PAGE_SHIFT
)) {
582 "There is little point creating a zram of greater than "
583 "twice the size of memory since we expect a 2:1 compression "
584 "ratio. Note that zram uses about 0.1%% of the size of "
585 "the disk when not in use so a huge zram is "
587 "\tMemory Size: %lu kB\n"
588 "\tSize you selected: %llu kB\n"
589 "Continuing anyway ...\n",
590 (totalram_pages
<< PAGE_SHIFT
) >> 10, zram
->disksize
>> 10
594 /* zram devices sort of resembles non-rotational disks */
595 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
600 pr_debug("Initialization done!\n");
603 static ssize_t
disksize_store(struct device
*dev
,
604 struct device_attribute
*attr
, const char *buf
, size_t len
)
607 struct zram_meta
*meta
;
608 struct zram
*zram
= dev_to_zram(dev
);
610 disksize
= memparse(buf
, NULL
);
614 disksize
= PAGE_ALIGN(disksize
);
615 meta
= zram_meta_alloc(disksize
);
618 down_write(&zram
->init_lock
);
619 if (zram
->init_done
) {
620 up_write(&zram
->init_lock
);
621 zram_meta_free(meta
);
622 pr_info("Cannot change disksize for initialized device\n");
626 zram
->disksize
= disksize
;
627 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
628 zram_init_device(zram
, meta
);
629 up_write(&zram
->init_lock
);
634 static ssize_t
reset_store(struct device
*dev
,
635 struct device_attribute
*attr
, const char *buf
, size_t len
)
638 unsigned short do_reset
;
640 struct block_device
*bdev
;
642 zram
= dev_to_zram(dev
);
643 bdev
= bdget_disk(zram
->disk
, 0);
648 /* Do not reset an active device! */
649 if (bdev
->bd_holders
) {
654 ret
= kstrtou16(buf
, 10, &do_reset
);
663 /* Make sure all pending I/O is finished */
667 zram_reset_device(zram
, true);
675 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
680 struct bvec_iter iter
;
684 atomic64_inc(&zram
->stats
.num_reads
);
687 atomic64_inc(&zram
->stats
.num_writes
);
691 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
692 offset
= (bio
->bi_iter
.bi_sector
&
693 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
695 bio_for_each_segment(bvec
, bio
, iter
) {
696 int max_transfer_size
= PAGE_SIZE
- offset
;
698 if (bvec
.bv_len
> max_transfer_size
) {
700 * zram_bvec_rw() can only make operation on a single
701 * zram page. Split the bio vector.
705 bv
.bv_page
= bvec
.bv_page
;
706 bv
.bv_len
= max_transfer_size
;
707 bv
.bv_offset
= bvec
.bv_offset
;
709 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
712 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
713 bv
.bv_offset
+= max_transfer_size
;
714 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
717 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, bio
, rw
)
721 update_position(&index
, &offset
, &bvec
);
724 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
733 * Handler function for all zram I/O requests.
735 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
737 struct zram
*zram
= queue
->queuedata
;
739 down_read(&zram
->init_lock
);
740 if (unlikely(!zram
->init_done
))
743 if (!valid_io_request(zram
, bio
)) {
744 atomic64_inc(&zram
->stats
.invalid_io
);
748 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
749 up_read(&zram
->init_lock
);
754 up_read(&zram
->init_lock
);
758 static void zram_slot_free_notify(struct block_device
*bdev
,
762 struct zram_meta
*meta
;
764 zram
= bdev
->bd_disk
->private_data
;
767 write_lock(&meta
->tb_lock
);
768 zram_free_page(zram
, index
);
769 write_unlock(&meta
->tb_lock
);
770 atomic64_inc(&zram
->stats
.notify_free
);
773 static const struct block_device_operations zram_devops
= {
774 .swap_slot_free_notify
= zram_slot_free_notify
,
778 static DEVICE_ATTR(disksize
, S_IRUGO
| S_IWUSR
,
779 disksize_show
, disksize_store
);
780 static DEVICE_ATTR(initstate
, S_IRUGO
, initstate_show
, NULL
);
781 static DEVICE_ATTR(reset
, S_IWUSR
, NULL
, reset_store
);
782 static DEVICE_ATTR(num_reads
, S_IRUGO
, num_reads_show
, NULL
);
783 static DEVICE_ATTR(num_writes
, S_IRUGO
, num_writes_show
, NULL
);
784 static DEVICE_ATTR(invalid_io
, S_IRUGO
, invalid_io_show
, NULL
);
785 static DEVICE_ATTR(notify_free
, S_IRUGO
, notify_free_show
, NULL
);
786 static DEVICE_ATTR(zero_pages
, S_IRUGO
, zero_pages_show
, NULL
);
787 static DEVICE_ATTR(orig_data_size
, S_IRUGO
, orig_data_size_show
, NULL
);
788 static DEVICE_ATTR(compr_data_size
, S_IRUGO
, compr_data_size_show
, NULL
);
789 static DEVICE_ATTR(mem_used_total
, S_IRUGO
, mem_used_total_show
, NULL
);
791 static struct attribute
*zram_disk_attrs
[] = {
792 &dev_attr_disksize
.attr
,
793 &dev_attr_initstate
.attr
,
794 &dev_attr_reset
.attr
,
795 &dev_attr_num_reads
.attr
,
796 &dev_attr_num_writes
.attr
,
797 &dev_attr_invalid_io
.attr
,
798 &dev_attr_notify_free
.attr
,
799 &dev_attr_zero_pages
.attr
,
800 &dev_attr_orig_data_size
.attr
,
801 &dev_attr_compr_data_size
.attr
,
802 &dev_attr_mem_used_total
.attr
,
806 static struct attribute_group zram_disk_attr_group
= {
807 .attrs
= zram_disk_attrs
,
810 static int create_device(struct zram
*zram
, int device_id
)
814 init_rwsem(&zram
->init_lock
);
816 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
818 pr_err("Error allocating disk queue for device %d\n",
823 blk_queue_make_request(zram
->queue
, zram_make_request
);
824 zram
->queue
->queuedata
= zram
;
826 /* gendisk structure */
827 zram
->disk
= alloc_disk(1);
829 pr_warn("Error allocating disk structure for device %d\n",
834 zram
->disk
->major
= zram_major
;
835 zram
->disk
->first_minor
= device_id
;
836 zram
->disk
->fops
= &zram_devops
;
837 zram
->disk
->queue
= zram
->queue
;
838 zram
->disk
->private_data
= zram
;
839 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
841 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
842 set_capacity(zram
->disk
, 0);
845 * To ensure that we always get PAGE_SIZE aligned
846 * and n*PAGE_SIZED sized I/O requests.
848 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
849 blk_queue_logical_block_size(zram
->disk
->queue
,
850 ZRAM_LOGICAL_BLOCK_SIZE
);
851 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
852 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
854 add_disk(zram
->disk
);
856 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
857 &zram_disk_attr_group
);
859 pr_warn("Error creating sysfs group");
867 del_gendisk(zram
->disk
);
868 put_disk(zram
->disk
);
870 blk_cleanup_queue(zram
->queue
);
875 static void destroy_device(struct zram
*zram
)
877 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
878 &zram_disk_attr_group
);
880 del_gendisk(zram
->disk
);
881 put_disk(zram
->disk
);
883 blk_cleanup_queue(zram
->queue
);
886 static int __init
zram_init(void)
890 if (num_devices
> max_num_devices
) {
891 pr_warn("Invalid value for num_devices: %u\n",
897 zram_major
= register_blkdev(0, "zram");
898 if (zram_major
<= 0) {
899 pr_warn("Unable to get major number\n");
904 /* Allocate the device array and initialize each one */
905 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
911 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
912 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
917 pr_info("Created %u device(s) ...\n", num_devices
);
923 destroy_device(&zram_devices
[--dev_id
]);
926 unregister_blkdev(zram_major
, "zram");
931 static void __exit
zram_exit(void)
936 for (i
= 0; i
< num_devices
; i
++) {
937 zram
= &zram_devices
[i
];
939 destroy_device(zram
);
941 * Shouldn't access zram->disk after destroy_device
942 * because destroy_device already released zram->disk.
944 zram_reset_device(zram
, false);
947 unregister_blkdev(zram_major
, "zram");
950 pr_debug("Cleanup done!\n");
953 module_init(zram_init
);
954 module_exit(zram_exit
);
956 module_param(num_devices
, uint
, 0);
957 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
959 MODULE_LICENSE("Dual BSD/GPL");
960 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
961 MODULE_DESCRIPTION("Compressed RAM Block Device");