2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
= 1;
45 static inline struct zram
*dev_to_zram(struct device
*dev
)
47 return (struct zram
*)dev_to_disk(dev
)->private_data
;
50 static ssize_t
disksize_show(struct device
*dev
,
51 struct device_attribute
*attr
, char *buf
)
53 struct zram
*zram
= dev_to_zram(dev
);
55 return sprintf(buf
, "%llu\n", zram
->disksize
);
58 static ssize_t
initstate_show(struct device
*dev
,
59 struct device_attribute
*attr
, char *buf
)
61 struct zram
*zram
= dev_to_zram(dev
);
63 return sprintf(buf
, "%u\n", zram
->init_done
);
66 static ssize_t
num_reads_show(struct device
*dev
,
67 struct device_attribute
*attr
, char *buf
)
69 struct zram
*zram
= dev_to_zram(dev
);
71 return sprintf(buf
, "%llu\n",
72 (u64
)atomic64_read(&zram
->stats
.num_reads
));
75 static ssize_t
num_writes_show(struct device
*dev
,
76 struct device_attribute
*attr
, char *buf
)
78 struct zram
*zram
= dev_to_zram(dev
);
80 return sprintf(buf
, "%llu\n",
81 (u64
)atomic64_read(&zram
->stats
.num_writes
));
84 static ssize_t
invalid_io_show(struct device
*dev
,
85 struct device_attribute
*attr
, char *buf
)
87 struct zram
*zram
= dev_to_zram(dev
);
89 return sprintf(buf
, "%llu\n",
90 (u64
)atomic64_read(&zram
->stats
.invalid_io
));
93 static ssize_t
notify_free_show(struct device
*dev
,
94 struct device_attribute
*attr
, char *buf
)
96 struct zram
*zram
= dev_to_zram(dev
);
98 return sprintf(buf
, "%llu\n",
99 (u64
)atomic64_read(&zram
->stats
.notify_free
));
102 static ssize_t
zero_pages_show(struct device
*dev
,
103 struct device_attribute
*attr
, char *buf
)
105 struct zram
*zram
= dev_to_zram(dev
);
107 return sprintf(buf
, "%u\n", atomic_read(&zram
->stats
.pages_zero
));
110 static ssize_t
orig_data_size_show(struct device
*dev
,
111 struct device_attribute
*attr
, char *buf
)
113 struct zram
*zram
= dev_to_zram(dev
);
115 return sprintf(buf
, "%llu\n",
116 (u64
)(atomic_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
119 static ssize_t
compr_data_size_show(struct device
*dev
,
120 struct device_attribute
*attr
, char *buf
)
122 struct zram
*zram
= dev_to_zram(dev
);
124 return sprintf(buf
, "%llu\n",
125 (u64
)atomic64_read(&zram
->stats
.compr_size
));
128 static ssize_t
mem_used_total_show(struct device
*dev
,
129 struct device_attribute
*attr
, char *buf
)
132 struct zram
*zram
= dev_to_zram(dev
);
133 struct zram_meta
*meta
= zram
->meta
;
135 down_read(&zram
->init_lock
);
137 val
= zs_get_total_size_bytes(meta
->mem_pool
);
138 up_read(&zram
->init_lock
);
140 return sprintf(buf
, "%llu\n", val
);
143 /* flag operations needs meta->tb_lock */
144 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
145 enum zram_pageflags flag
)
147 return meta
->table
[index
].flags
& BIT(flag
);
150 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
151 enum zram_pageflags flag
)
153 meta
->table
[index
].flags
|= BIT(flag
);
156 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
157 enum zram_pageflags flag
)
159 meta
->table
[index
].flags
&= ~BIT(flag
);
162 static inline int is_partial_io(struct bio_vec
*bvec
)
164 return bvec
->bv_len
!= PAGE_SIZE
;
168 * Check if request is within bounds and aligned on zram logical blocks.
170 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
172 u64 start
, end
, bound
;
174 /* unaligned request */
175 if (unlikely(bio
->bi_iter
.bi_sector
&
176 (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
178 if (unlikely(bio
->bi_iter
.bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
181 start
= bio
->bi_iter
.bi_sector
;
182 end
= start
+ (bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
);
183 bound
= zram
->disksize
>> SECTOR_SHIFT
;
184 /* out of range range */
185 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
188 /* I/O request is valid */
192 static void zram_meta_free(struct zram_meta
*meta
)
194 zs_destroy_pool(meta
->mem_pool
);
195 kfree(meta
->compress_workmem
);
196 free_pages((unsigned long)meta
->compress_buffer
, 1);
201 static struct zram_meta
*zram_meta_alloc(u64 disksize
)
204 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
208 meta
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
209 if (!meta
->compress_workmem
)
212 meta
->compress_buffer
=
213 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
214 if (!meta
->compress_buffer
) {
215 pr_err("Error allocating compressor buffer space\n");
219 num_pages
= disksize
>> PAGE_SHIFT
;
220 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
222 pr_err("Error allocating zram address table\n");
226 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
227 if (!meta
->mem_pool
) {
228 pr_err("Error creating memory pool\n");
232 rwlock_init(&meta
->tb_lock
);
233 mutex_init(&meta
->buffer_lock
);
239 free_pages((unsigned long)meta
->compress_buffer
, 1);
241 kfree(meta
->compress_workmem
);
249 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
251 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
253 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
256 static int page_zero_filled(void *ptr
)
261 page
= (unsigned long *)ptr
;
263 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
271 static void handle_zero_page(struct bio_vec
*bvec
)
273 struct page
*page
= bvec
->bv_page
;
276 user_mem
= kmap_atomic(page
);
277 if (is_partial_io(bvec
))
278 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
280 clear_page(user_mem
);
281 kunmap_atomic(user_mem
);
283 flush_dcache_page(page
);
286 /* NOTE: caller should hold meta->tb_lock with write-side */
287 static void zram_free_page(struct zram
*zram
, size_t index
)
289 struct zram_meta
*meta
= zram
->meta
;
290 unsigned long handle
= meta
->table
[index
].handle
;
291 u16 size
= meta
->table
[index
].size
;
293 if (unlikely(!handle
)) {
295 * No memory is allocated for zero filled pages.
296 * Simply clear zero page flag.
298 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
299 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
300 atomic_dec(&zram
->stats
.pages_zero
);
305 if (unlikely(size
> max_zpage_size
))
306 atomic_dec(&zram
->stats
.bad_compress
);
308 zs_free(meta
->mem_pool
, handle
);
310 if (size
<= PAGE_SIZE
/ 2)
311 atomic_dec(&zram
->stats
.good_compress
);
313 atomic64_sub(meta
->table
[index
].size
, &zram
->stats
.compr_size
);
314 atomic_dec(&zram
->stats
.pages_stored
);
316 meta
->table
[index
].handle
= 0;
317 meta
->table
[index
].size
= 0;
320 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
323 size_t clen
= PAGE_SIZE
;
325 struct zram_meta
*meta
= zram
->meta
;
326 unsigned long handle
;
329 read_lock(&meta
->tb_lock
);
330 handle
= meta
->table
[index
].handle
;
331 size
= meta
->table
[index
].size
;
333 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
334 read_unlock(&meta
->tb_lock
);
339 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
340 if (size
== PAGE_SIZE
)
341 copy_page(mem
, cmem
);
343 ret
= lzo1x_decompress_safe(cmem
, size
, mem
, &clen
);
344 zs_unmap_object(meta
->mem_pool
, handle
);
345 read_unlock(&meta
->tb_lock
);
347 /* Should NEVER happen. Return bio error if it does. */
348 if (unlikely(ret
!= LZO_E_OK
)) {
349 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
350 atomic64_inc(&zram
->stats
.failed_reads
);
357 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
358 u32 index
, int offset
, struct bio
*bio
)
362 unsigned char *user_mem
, *uncmem
= NULL
;
363 struct zram_meta
*meta
= zram
->meta
;
364 page
= bvec
->bv_page
;
366 read_lock(&meta
->tb_lock
);
367 if (unlikely(!meta
->table
[index
].handle
) ||
368 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
369 read_unlock(&meta
->tb_lock
);
370 handle_zero_page(bvec
);
373 read_unlock(&meta
->tb_lock
);
375 if (is_partial_io(bvec
))
376 /* Use a temporary buffer to decompress the page */
377 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
379 user_mem
= kmap_atomic(page
);
380 if (!is_partial_io(bvec
))
384 pr_info("Unable to allocate temp memory\n");
389 ret
= zram_decompress_page(zram
, uncmem
, index
);
390 /* Should NEVER happen. Return bio error if it does. */
391 if (unlikely(ret
!= LZO_E_OK
))
394 if (is_partial_io(bvec
))
395 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
398 flush_dcache_page(page
);
401 kunmap_atomic(user_mem
);
402 if (is_partial_io(bvec
))
407 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
412 unsigned long handle
;
414 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
415 struct zram_meta
*meta
= zram
->meta
;
418 page
= bvec
->bv_page
;
419 src
= meta
->compress_buffer
;
421 if (is_partial_io(bvec
)) {
423 * This is a partial IO. We need to read the full page
424 * before to write the changes.
426 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
431 ret
= zram_decompress_page(zram
, uncmem
, index
);
436 mutex_lock(&meta
->buffer_lock
);
438 user_mem
= kmap_atomic(page
);
440 if (is_partial_io(bvec
)) {
441 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
443 kunmap_atomic(user_mem
);
449 if (page_zero_filled(uncmem
)) {
450 kunmap_atomic(user_mem
);
451 /* Free memory associated with this sector now. */
452 write_lock(&zram
->meta
->tb_lock
);
453 zram_free_page(zram
, index
);
454 zram_set_flag(meta
, index
, ZRAM_ZERO
);
455 write_unlock(&zram
->meta
->tb_lock
);
457 atomic_inc(&zram
->stats
.pages_zero
);
462 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
463 meta
->compress_workmem
);
464 if (!is_partial_io(bvec
)) {
465 kunmap_atomic(user_mem
);
470 if (unlikely(ret
!= LZO_E_OK
)) {
471 pr_err("Compression failed! err=%d\n", ret
);
475 if (unlikely(clen
> max_zpage_size
)) {
476 atomic_inc(&zram
->stats
.bad_compress
);
479 if (is_partial_io(bvec
))
483 handle
= zs_malloc(meta
->mem_pool
, clen
);
485 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
490 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
492 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
493 src
= kmap_atomic(page
);
494 copy_page(cmem
, src
);
497 memcpy(cmem
, src
, clen
);
500 zs_unmap_object(meta
->mem_pool
, handle
);
503 * Free memory associated with this sector
504 * before overwriting unused sectors.
506 write_lock(&zram
->meta
->tb_lock
);
507 zram_free_page(zram
, index
);
509 meta
->table
[index
].handle
= handle
;
510 meta
->table
[index
].size
= clen
;
511 write_unlock(&zram
->meta
->tb_lock
);
514 atomic64_add(clen
, &zram
->stats
.compr_size
);
515 atomic_inc(&zram
->stats
.pages_stored
);
516 if (clen
<= PAGE_SIZE
/ 2)
517 atomic_inc(&zram
->stats
.good_compress
);
521 mutex_unlock(&meta
->buffer_lock
);
522 if (is_partial_io(bvec
))
526 atomic64_inc(&zram
->stats
.failed_writes
);
530 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
531 int offset
, struct bio
*bio
, int rw
)
536 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
538 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
543 static void zram_reset_device(struct zram
*zram
, bool reset_capacity
)
546 struct zram_meta
*meta
;
548 down_write(&zram
->init_lock
);
549 if (!zram
->init_done
) {
550 up_write(&zram
->init_lock
);
557 /* Free all pages that are still in this zram device */
558 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
559 unsigned long handle
= meta
->table
[index
].handle
;
563 zs_free(meta
->mem_pool
, handle
);
566 zram_meta_free(zram
->meta
);
569 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
573 set_capacity(zram
->disk
, 0);
574 up_write(&zram
->init_lock
);
577 static void zram_init_device(struct zram
*zram
, struct zram_meta
*meta
)
579 if (zram
->disksize
> 2 * (totalram_pages
<< PAGE_SHIFT
)) {
581 "There is little point creating a zram of greater than "
582 "twice the size of memory since we expect a 2:1 compression "
583 "ratio. Note that zram uses about 0.1%% of the size of "
584 "the disk when not in use so a huge zram is "
586 "\tMemory Size: %lu kB\n"
587 "\tSize you selected: %llu kB\n"
588 "Continuing anyway ...\n",
589 (totalram_pages
<< PAGE_SHIFT
) >> 10, zram
->disksize
>> 10
593 /* zram devices sort of resembles non-rotational disks */
594 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
599 pr_debug("Initialization done!\n");
602 static ssize_t
disksize_store(struct device
*dev
,
603 struct device_attribute
*attr
, const char *buf
, size_t len
)
606 struct zram_meta
*meta
;
607 struct zram
*zram
= dev_to_zram(dev
);
609 disksize
= memparse(buf
, NULL
);
613 disksize
= PAGE_ALIGN(disksize
);
614 meta
= zram_meta_alloc(disksize
);
615 down_write(&zram
->init_lock
);
616 if (zram
->init_done
) {
617 up_write(&zram
->init_lock
);
618 zram_meta_free(meta
);
619 pr_info("Cannot change disksize for initialized device\n");
623 zram
->disksize
= disksize
;
624 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
625 zram_init_device(zram
, meta
);
626 up_write(&zram
->init_lock
);
631 static ssize_t
reset_store(struct device
*dev
,
632 struct device_attribute
*attr
, const char *buf
, size_t len
)
635 unsigned short do_reset
;
637 struct block_device
*bdev
;
639 zram
= dev_to_zram(dev
);
640 bdev
= bdget_disk(zram
->disk
, 0);
645 /* Do not reset an active device! */
646 if (bdev
->bd_holders
) {
651 ret
= kstrtou16(buf
, 10, &do_reset
);
660 /* Make sure all pending I/O is finished */
664 zram_reset_device(zram
, true);
672 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
677 struct bvec_iter iter
;
681 atomic64_inc(&zram
->stats
.num_reads
);
684 atomic64_inc(&zram
->stats
.num_writes
);
688 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
689 offset
= (bio
->bi_iter
.bi_sector
&
690 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
692 bio_for_each_segment(bvec
, bio
, iter
) {
693 int max_transfer_size
= PAGE_SIZE
- offset
;
695 if (bvec
.bv_len
> max_transfer_size
) {
697 * zram_bvec_rw() can only make operation on a single
698 * zram page. Split the bio vector.
702 bv
.bv_page
= bvec
.bv_page
;
703 bv
.bv_len
= max_transfer_size
;
704 bv
.bv_offset
= bvec
.bv_offset
;
706 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
709 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
710 bv
.bv_offset
+= max_transfer_size
;
711 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
714 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, bio
, rw
)
718 update_position(&index
, &offset
, &bvec
);
721 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
730 * Handler function for all zram I/O requests.
732 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
734 struct zram
*zram
= queue
->queuedata
;
736 down_read(&zram
->init_lock
);
737 if (unlikely(!zram
->init_done
))
740 if (!valid_io_request(zram
, bio
)) {
741 atomic64_inc(&zram
->stats
.invalid_io
);
745 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
746 up_read(&zram
->init_lock
);
751 up_read(&zram
->init_lock
);
755 static void zram_slot_free_notify(struct block_device
*bdev
,
759 struct zram_meta
*meta
;
761 zram
= bdev
->bd_disk
->private_data
;
764 write_lock(&meta
->tb_lock
);
765 zram_free_page(zram
, index
);
766 write_unlock(&meta
->tb_lock
);
767 atomic64_inc(&zram
->stats
.notify_free
);
770 static const struct block_device_operations zram_devops
= {
771 .swap_slot_free_notify
= zram_slot_free_notify
,
775 static DEVICE_ATTR(disksize
, S_IRUGO
| S_IWUSR
,
776 disksize_show
, disksize_store
);
777 static DEVICE_ATTR(initstate
, S_IRUGO
, initstate_show
, NULL
);
778 static DEVICE_ATTR(reset
, S_IWUSR
, NULL
, reset_store
);
779 static DEVICE_ATTR(num_reads
, S_IRUGO
, num_reads_show
, NULL
);
780 static DEVICE_ATTR(num_writes
, S_IRUGO
, num_writes_show
, NULL
);
781 static DEVICE_ATTR(invalid_io
, S_IRUGO
, invalid_io_show
, NULL
);
782 static DEVICE_ATTR(notify_free
, S_IRUGO
, notify_free_show
, NULL
);
783 static DEVICE_ATTR(zero_pages
, S_IRUGO
, zero_pages_show
, NULL
);
784 static DEVICE_ATTR(orig_data_size
, S_IRUGO
, orig_data_size_show
, NULL
);
785 static DEVICE_ATTR(compr_data_size
, S_IRUGO
, compr_data_size_show
, NULL
);
786 static DEVICE_ATTR(mem_used_total
, S_IRUGO
, mem_used_total_show
, NULL
);
788 static struct attribute
*zram_disk_attrs
[] = {
789 &dev_attr_disksize
.attr
,
790 &dev_attr_initstate
.attr
,
791 &dev_attr_reset
.attr
,
792 &dev_attr_num_reads
.attr
,
793 &dev_attr_num_writes
.attr
,
794 &dev_attr_invalid_io
.attr
,
795 &dev_attr_notify_free
.attr
,
796 &dev_attr_zero_pages
.attr
,
797 &dev_attr_orig_data_size
.attr
,
798 &dev_attr_compr_data_size
.attr
,
799 &dev_attr_mem_used_total
.attr
,
803 static struct attribute_group zram_disk_attr_group
= {
804 .attrs
= zram_disk_attrs
,
807 static int create_device(struct zram
*zram
, int device_id
)
811 init_rwsem(&zram
->init_lock
);
813 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
815 pr_err("Error allocating disk queue for device %d\n",
820 blk_queue_make_request(zram
->queue
, zram_make_request
);
821 zram
->queue
->queuedata
= zram
;
823 /* gendisk structure */
824 zram
->disk
= alloc_disk(1);
826 pr_warn("Error allocating disk structure for device %d\n",
831 zram
->disk
->major
= zram_major
;
832 zram
->disk
->first_minor
= device_id
;
833 zram
->disk
->fops
= &zram_devops
;
834 zram
->disk
->queue
= zram
->queue
;
835 zram
->disk
->private_data
= zram
;
836 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
838 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
839 set_capacity(zram
->disk
, 0);
842 * To ensure that we always get PAGE_SIZE aligned
843 * and n*PAGE_SIZED sized I/O requests.
845 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
846 blk_queue_logical_block_size(zram
->disk
->queue
,
847 ZRAM_LOGICAL_BLOCK_SIZE
);
848 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
849 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
851 add_disk(zram
->disk
);
853 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
854 &zram_disk_attr_group
);
856 pr_warn("Error creating sysfs group");
864 del_gendisk(zram
->disk
);
865 put_disk(zram
->disk
);
867 blk_cleanup_queue(zram
->queue
);
872 static void destroy_device(struct zram
*zram
)
874 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
875 &zram_disk_attr_group
);
877 del_gendisk(zram
->disk
);
878 put_disk(zram
->disk
);
880 blk_cleanup_queue(zram
->queue
);
883 static int __init
zram_init(void)
887 if (num_devices
> max_num_devices
) {
888 pr_warn("Invalid value for num_devices: %u\n",
894 zram_major
= register_blkdev(0, "zram");
895 if (zram_major
<= 0) {
896 pr_warn("Unable to get major number\n");
901 /* Allocate the device array and initialize each one */
902 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
908 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
909 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
914 pr_info("Created %u device(s) ...\n", num_devices
);
920 destroy_device(&zram_devices
[--dev_id
]);
923 unregister_blkdev(zram_major
, "zram");
928 static void __exit
zram_exit(void)
933 for (i
= 0; i
< num_devices
; i
++) {
934 zram
= &zram_devices
[i
];
936 destroy_device(zram
);
938 * Shouldn't access zram->disk after destroy_device
939 * because destroy_device already released zram->disk.
941 zram_reset_device(zram
, false);
944 unregister_blkdev(zram_major
, "zram");
947 pr_debug("Cleanup done!\n");
950 module_init(zram_init
);
951 module_exit(zram_exit
);
953 module_param(num_devices
, uint
, 0);
954 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
956 MODULE_LICENSE("Dual BSD/GPL");
957 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
958 MODULE_DESCRIPTION("Compressed RAM Block Device");