2 * Ram backed block device driver.
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/mutex.h>
19 #include <linux/radix-tree.h>
21 #include <linux/slab.h>
22 #ifdef CONFIG_BLK_DEV_RAM_DAX
23 #include <linux/pfn_t.h>
26 #include <asm/uaccess.h>
28 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
29 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
32 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
33 * the pages containing the block device's contents. A brd page's ->index is
34 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
35 * with, the kernel's pagecache or buffer cache (which sit above our block
41 struct request_queue
*brd_queue
;
42 struct gendisk
*brd_disk
;
43 struct list_head brd_list
;
46 * Backing store of pages and lock to protect it. This is the contents
47 * of the block device.
50 struct radix_tree_root brd_pages
;
54 * Look up and return a brd's page for a given sector.
56 static DEFINE_MUTEX(brd_mutex
);
57 static struct page
*brd_lookup_page(struct brd_device
*brd
, sector_t sector
)
63 * The page lifetime is protected by the fact that we have opened the
64 * device node -- brd pages will never be deleted under us, so we
65 * don't need any further locking or refcounting.
67 * This is strictly true for the radix-tree nodes as well (ie. we
68 * don't actually need the rcu_read_lock()), however that is not a
69 * documented feature of the radix-tree API so it is better to be
70 * safe here (we don't have total exclusion from radix tree updates
71 * here, only deletes).
74 idx
= sector
>> PAGE_SECTORS_SHIFT
; /* sector to page index */
75 page
= radix_tree_lookup(&brd
->brd_pages
, idx
);
78 BUG_ON(page
&& page
->index
!= idx
);
84 * Look up and return a brd's page for a given sector.
85 * If one does not exist, allocate an empty page, and insert that. Then
88 static struct page
*brd_insert_page(struct brd_device
*brd
, sector_t sector
)
94 page
= brd_lookup_page(brd
, sector
);
99 * Must use NOIO because we don't want to recurse back into the
100 * block or filesystem layers from page reclaim.
102 * Cannot support DAX and highmem, because our ->direct_access
103 * routine for DAX must return memory that is always addressable.
104 * If DAX was reworked to use pfns and kmap throughout, this
105 * restriction might be able to be lifted.
107 gfp_flags
= GFP_NOIO
| __GFP_ZERO
;
108 #ifndef CONFIG_BLK_DEV_RAM_DAX
109 gfp_flags
|= __GFP_HIGHMEM
;
111 page
= alloc_page(gfp_flags
);
115 if (radix_tree_preload(GFP_NOIO
)) {
120 spin_lock(&brd
->brd_lock
);
121 idx
= sector
>> PAGE_SECTORS_SHIFT
;
123 if (radix_tree_insert(&brd
->brd_pages
, idx
, page
)) {
125 page
= radix_tree_lookup(&brd
->brd_pages
, idx
);
127 BUG_ON(page
->index
!= idx
);
129 spin_unlock(&brd
->brd_lock
);
131 radix_tree_preload_end();
136 static void brd_free_page(struct brd_device
*brd
, sector_t sector
)
141 spin_lock(&brd
->brd_lock
);
142 idx
= sector
>> PAGE_SECTORS_SHIFT
;
143 page
= radix_tree_delete(&brd
->brd_pages
, idx
);
144 spin_unlock(&brd
->brd_lock
);
149 static void brd_zero_page(struct brd_device
*brd
, sector_t sector
)
153 page
= brd_lookup_page(brd
, sector
);
155 clear_highpage(page
);
159 * Free all backing store pages and radix tree. This must only be called when
160 * there are no other users of the device.
162 #define FREE_BATCH 16
163 static void brd_free_pages(struct brd_device
*brd
)
165 unsigned long pos
= 0;
166 struct page
*pages
[FREE_BATCH
];
172 nr_pages
= radix_tree_gang_lookup(&brd
->brd_pages
,
173 (void **)pages
, pos
, FREE_BATCH
);
175 for (i
= 0; i
< nr_pages
; i
++) {
178 BUG_ON(pages
[i
]->index
< pos
);
179 pos
= pages
[i
]->index
;
180 ret
= radix_tree_delete(&brd
->brd_pages
, pos
);
181 BUG_ON(!ret
|| ret
!= pages
[i
]);
182 __free_page(pages
[i
]);
188 * This assumes radix_tree_gang_lookup always returns as
189 * many pages as possible. If the radix-tree code changes,
190 * so will this have to.
192 } while (nr_pages
== FREE_BATCH
);
196 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
198 static int copy_to_brd_setup(struct brd_device
*brd
, sector_t sector
, size_t n
)
200 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
203 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
204 if (!brd_insert_page(brd
, sector
))
207 sector
+= copy
>> SECTOR_SHIFT
;
208 if (!brd_insert_page(brd
, sector
))
214 static void discard_from_brd(struct brd_device
*brd
,
215 sector_t sector
, size_t n
)
217 while (n
>= PAGE_SIZE
) {
219 * Don't want to actually discard pages here because
220 * re-allocating the pages can result in writeback
221 * deadlocks under heavy load.
224 brd_free_page(brd
, sector
);
226 brd_zero_page(brd
, sector
);
227 sector
+= PAGE_SIZE
>> SECTOR_SHIFT
;
233 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 static void copy_to_brd(struct brd_device
*brd
, const void *src
,
236 sector_t sector
, size_t n
)
240 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
243 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
244 page
= brd_lookup_page(brd
, sector
);
247 dst
= kmap_atomic(page
);
248 memcpy(dst
+ offset
, src
, copy
);
253 sector
+= copy
>> SECTOR_SHIFT
;
255 page
= brd_lookup_page(brd
, sector
);
258 dst
= kmap_atomic(page
);
259 memcpy(dst
, src
, copy
);
265 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
267 static void copy_from_brd(void *dst
, struct brd_device
*brd
,
268 sector_t sector
, size_t n
)
272 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
275 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
276 page
= brd_lookup_page(brd
, sector
);
278 src
= kmap_atomic(page
);
279 memcpy(dst
, src
+ offset
, copy
);
282 memset(dst
, 0, copy
);
286 sector
+= copy
>> SECTOR_SHIFT
;
288 page
= brd_lookup_page(brd
, sector
);
290 src
= kmap_atomic(page
);
291 memcpy(dst
, src
, copy
);
294 memset(dst
, 0, copy
);
299 * Process a single bvec of a bio.
301 static int brd_do_bvec(struct brd_device
*brd
, struct page
*page
,
302 unsigned int len
, unsigned int off
, bool is_write
,
309 err
= copy_to_brd_setup(brd
, sector
, len
);
314 mem
= kmap_atomic(page
);
316 copy_from_brd(mem
+ off
, brd
, sector
, len
);
317 flush_dcache_page(page
);
319 flush_dcache_page(page
);
320 copy_to_brd(brd
, mem
+ off
, sector
, len
);
328 static blk_qc_t
brd_make_request(struct request_queue
*q
, struct bio
*bio
)
330 struct block_device
*bdev
= bio
->bi_bdev
;
331 struct brd_device
*brd
= bdev
->bd_disk
->private_data
;
334 struct bvec_iter iter
;
336 sector
= bio
->bi_iter
.bi_sector
;
337 if (bio_end_sector(bio
) > get_capacity(bdev
->bd_disk
))
340 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
341 if (sector
& ((PAGE_SIZE
>> SECTOR_SHIFT
) - 1) ||
342 bio
->bi_iter
.bi_size
& ~PAGE_MASK
)
344 discard_from_brd(brd
, sector
, bio
->bi_iter
.bi_size
);
348 bio_for_each_segment(bvec
, bio
, iter
) {
349 unsigned int len
= bvec
.bv_len
;
352 err
= brd_do_bvec(brd
, bvec
.bv_page
, len
, bvec
.bv_offset
,
353 op_is_write(bio_op(bio
)), sector
);
356 sector
+= len
>> SECTOR_SHIFT
;
361 return BLK_QC_T_NONE
;
364 return BLK_QC_T_NONE
;
367 static int brd_rw_page(struct block_device
*bdev
, sector_t sector
,
368 struct page
*page
, bool is_write
)
370 struct brd_device
*brd
= bdev
->bd_disk
->private_data
;
371 int err
= brd_do_bvec(brd
, page
, PAGE_SIZE
, 0, is_write
, sector
);
372 page_endio(page
, is_write
, err
);
376 #ifdef CONFIG_BLK_DEV_RAM_DAX
377 static long brd_direct_access(struct block_device
*bdev
, sector_t sector
,
378 void **kaddr
, pfn_t
*pfn
, long size
)
380 struct brd_device
*brd
= bdev
->bd_disk
->private_data
;
385 page
= brd_insert_page(brd
, sector
);
388 *kaddr
= page_address(page
);
389 *pfn
= page_to_pfn_t(page
);
394 #define brd_direct_access NULL
397 static int brd_ioctl(struct block_device
*bdev
, fmode_t mode
,
398 unsigned int cmd
, unsigned long arg
)
401 struct brd_device
*brd
= bdev
->bd_disk
->private_data
;
403 if (cmd
!= BLKFLSBUF
)
407 * ram device BLKFLSBUF has special semantics, we want to actually
408 * release and destroy the ramdisk data.
410 mutex_lock(&brd_mutex
);
411 mutex_lock(&bdev
->bd_mutex
);
413 if (bdev
->bd_openers
<= 1) {
415 * Kill the cache first, so it isn't written back to the
418 * Another thread might instantiate more buffercache here,
419 * but there is not much we can do to close that race.
425 mutex_unlock(&bdev
->bd_mutex
);
426 mutex_unlock(&brd_mutex
);
431 static const struct block_device_operations brd_fops
= {
432 .owner
= THIS_MODULE
,
433 .rw_page
= brd_rw_page
,
435 .direct_access
= brd_direct_access
,
439 * And now the modules code and kernel interface.
441 static int rd_nr
= CONFIG_BLK_DEV_RAM_COUNT
;
442 module_param(rd_nr
, int, S_IRUGO
);
443 MODULE_PARM_DESC(rd_nr
, "Maximum number of brd devices");
445 int rd_size
= CONFIG_BLK_DEV_RAM_SIZE
;
446 module_param(rd_size
, int, S_IRUGO
);
447 MODULE_PARM_DESC(rd_size
, "Size of each RAM disk in kbytes.");
449 static int max_part
= 1;
450 module_param(max_part
, int, S_IRUGO
);
451 MODULE_PARM_DESC(max_part
, "Num Minors to reserve between devices");
453 MODULE_LICENSE("GPL");
454 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR
);
458 /* Legacy boot options - nonmodular */
459 static int __init
ramdisk_size(char *str
)
461 rd_size
= simple_strtol(str
, NULL
, 0);
464 __setup("ramdisk_size=", ramdisk_size
);
468 * The device scheme is derived from loop.c. Keep them in synch where possible
469 * (should share code eventually).
471 static LIST_HEAD(brd_devices
);
472 static DEFINE_MUTEX(brd_devices_mutex
);
474 static struct brd_device
*brd_alloc(int i
)
476 struct brd_device
*brd
;
477 struct gendisk
*disk
;
479 brd
= kzalloc(sizeof(*brd
), GFP_KERNEL
);
483 spin_lock_init(&brd
->brd_lock
);
484 INIT_RADIX_TREE(&brd
->brd_pages
, GFP_ATOMIC
);
486 brd
->brd_queue
= blk_alloc_queue(GFP_KERNEL
);
490 blk_queue_make_request(brd
->brd_queue
, brd_make_request
);
491 blk_queue_max_hw_sectors(brd
->brd_queue
, 1024);
492 blk_queue_bounce_limit(brd
->brd_queue
, BLK_BOUNCE_ANY
);
494 /* This is so fdisk will align partitions on 4k, because of
495 * direct_access API needing 4k alignment, returning a PFN
496 * (This is only a problem on very small devices <= 4M,
497 * otherwise fdisk will align on 1M. Regardless this call
500 blk_queue_physical_block_size(brd
->brd_queue
, PAGE_SIZE
);
502 brd
->brd_queue
->limits
.discard_granularity
= PAGE_SIZE
;
503 blk_queue_max_discard_sectors(brd
->brd_queue
, UINT_MAX
);
504 brd
->brd_queue
->limits
.discard_zeroes_data
= 1;
505 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, brd
->brd_queue
);
506 #ifdef CONFIG_BLK_DEV_RAM_DAX
507 queue_flag_set_unlocked(QUEUE_FLAG_DAX
, brd
->brd_queue
);
509 disk
= brd
->brd_disk
= alloc_disk(max_part
);
512 disk
->major
= RAMDISK_MAJOR
;
513 disk
->first_minor
= i
* max_part
;
514 disk
->fops
= &brd_fops
;
515 disk
->private_data
= brd
;
516 disk
->queue
= brd
->brd_queue
;
517 disk
->flags
= GENHD_FL_EXT_DEVT
;
518 sprintf(disk
->disk_name
, "ram%d", i
);
519 set_capacity(disk
, rd_size
* 2);
524 blk_cleanup_queue(brd
->brd_queue
);
531 static void brd_free(struct brd_device
*brd
)
533 put_disk(brd
->brd_disk
);
534 blk_cleanup_queue(brd
->brd_queue
);
539 static struct brd_device
*brd_init_one(int i
, bool *new)
541 struct brd_device
*brd
;
544 list_for_each_entry(brd
, &brd_devices
, brd_list
) {
545 if (brd
->brd_number
== i
)
551 add_disk(brd
->brd_disk
);
552 list_add_tail(&brd
->brd_list
, &brd_devices
);
559 static void brd_del_one(struct brd_device
*brd
)
561 list_del(&brd
->brd_list
);
562 del_gendisk(brd
->brd_disk
);
566 static struct kobject
*brd_probe(dev_t dev
, int *part
, void *data
)
568 struct brd_device
*brd
;
569 struct kobject
*kobj
;
572 mutex_lock(&brd_devices_mutex
);
573 brd
= brd_init_one(MINOR(dev
) / max_part
, &new);
574 kobj
= brd
? get_disk(brd
->brd_disk
) : NULL
;
575 mutex_unlock(&brd_devices_mutex
);
583 static inline void brd_check_and_reset_par(void)
585 if (unlikely(!max_part
))
589 * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
590 * otherwise, it is possiable to get same dev_t when adding partitions.
592 if ((1U << MINORBITS
) % max_part
!= 0)
593 max_part
= 1UL << fls(max_part
);
595 if (max_part
> DISK_MAX_PARTS
) {
596 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
597 DISK_MAX_PARTS
, DISK_MAX_PARTS
);
598 max_part
= DISK_MAX_PARTS
;
602 static int __init
brd_init(void)
604 struct brd_device
*brd
, *next
;
608 * brd module now has a feature to instantiate underlying device
609 * structure on-demand, provided that there is an access dev node.
611 * (1) if rd_nr is specified, create that many upfront. else
612 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
613 * (2) User can further extend brd devices by create dev node themselves
614 * and have kernel automatically instantiate actual device
615 * on-demand. Example:
616 * mknod /path/devnod_name b 1 X # 1 is the rd major
617 * fdisk -l /path/devnod_name
618 * If (X / max_part) was not already created it will be created
622 if (register_blkdev(RAMDISK_MAJOR
, "ramdisk"))
625 brd_check_and_reset_par();
627 for (i
= 0; i
< rd_nr
; i
++) {
631 list_add_tail(&brd
->brd_list
, &brd_devices
);
634 /* point of no return */
636 list_for_each_entry(brd
, &brd_devices
, brd_list
)
637 add_disk(brd
->brd_disk
);
639 blk_register_region(MKDEV(RAMDISK_MAJOR
, 0), 1UL << MINORBITS
,
640 THIS_MODULE
, brd_probe
, NULL
, NULL
);
642 pr_info("brd: module loaded\n");
646 list_for_each_entry_safe(brd
, next
, &brd_devices
, brd_list
) {
647 list_del(&brd
->brd_list
);
650 unregister_blkdev(RAMDISK_MAJOR
, "ramdisk");
652 pr_info("brd: module NOT loaded !!!\n");
656 static void __exit
brd_exit(void)
658 struct brd_device
*brd
, *next
;
660 list_for_each_entry_safe(brd
, next
, &brd_devices
, brd_list
)
663 blk_unregister_region(MKDEV(RAMDISK_MAJOR
, 0), 1UL << MINORBITS
);
664 unregister_blkdev(RAMDISK_MAJOR
, "ramdisk");
666 pr_info("brd: module unloaded\n");
669 module_init(brd_init
);
670 module_exit(brd_exit
);