2 * Ram backed block device driver.
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
11 #include <linux/init.h>
12 #include <linux/initrd.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/bio.h>
18 #include <linux/highmem.h>
19 #include <linux/mutex.h>
20 #include <linux/radix-tree.h>
22 #include <linux/slab.h>
23 #include <linux/backing-dev.h>
25 #include <linux/uaccess.h>
27 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
28 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
31 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
32 * the pages containing the block device's contents. A brd page's ->index is
33 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
34 * with, the kernel's pagecache or buffer cache (which sit above our block
40 struct request_queue
*brd_queue
;
41 struct gendisk
*brd_disk
;
42 struct list_head brd_list
;
45 * Backing store of pages and lock to protect it. This is the contents
46 * of the block device.
49 struct radix_tree_root brd_pages
;
53 * Look up and return a brd's page for a given sector.
55 static struct page
*brd_lookup_page(struct brd_device
*brd
, sector_t sector
)
61 * The page lifetime is protected by the fact that we have opened the
62 * device node -- brd pages will never be deleted under us, so we
63 * don't need any further locking or refcounting.
65 * This is strictly true for the radix-tree nodes as well (ie. we
66 * don't actually need the rcu_read_lock()), however that is not a
67 * documented feature of the radix-tree API so it is better to be
68 * safe here (we don't have total exclusion from radix tree updates
69 * here, only deletes).
72 idx
= sector
>> PAGE_SECTORS_SHIFT
; /* sector to page index */
73 page
= radix_tree_lookup(&brd
->brd_pages
, idx
);
76 BUG_ON(page
&& page
->index
!= idx
);
82 * Look up and return a brd's page for a given sector.
83 * If one does not exist, allocate an empty page, and insert that. Then
86 static struct page
*brd_insert_page(struct brd_device
*brd
, sector_t sector
)
92 page
= brd_lookup_page(brd
, sector
);
97 * Must use NOIO because we don't want to recurse back into the
98 * block or filesystem layers from page reclaim.
100 * Cannot support DAX and highmem, because our ->direct_access
101 * routine for DAX must return memory that is always addressable.
102 * If DAX was reworked to use pfns and kmap throughout, this
103 * restriction might be able to be lifted.
105 gfp_flags
= GFP_NOIO
| __GFP_ZERO
;
106 page
= alloc_page(gfp_flags
);
110 if (radix_tree_preload(GFP_NOIO
)) {
115 spin_lock(&brd
->brd_lock
);
116 idx
= sector
>> PAGE_SECTORS_SHIFT
;
118 if (radix_tree_insert(&brd
->brd_pages
, idx
, page
)) {
120 page
= radix_tree_lookup(&brd
->brd_pages
, idx
);
122 BUG_ON(page
->index
!= idx
);
124 spin_unlock(&brd
->brd_lock
);
126 radix_tree_preload_end();
132 * Free all backing store pages and radix tree. This must only be called when
133 * there are no other users of the device.
135 #define FREE_BATCH 16
136 static void brd_free_pages(struct brd_device
*brd
)
138 unsigned long pos
= 0;
139 struct page
*pages
[FREE_BATCH
];
145 nr_pages
= radix_tree_gang_lookup(&brd
->brd_pages
,
146 (void **)pages
, pos
, FREE_BATCH
);
148 for (i
= 0; i
< nr_pages
; i
++) {
151 BUG_ON(pages
[i
]->index
< pos
);
152 pos
= pages
[i
]->index
;
153 ret
= radix_tree_delete(&brd
->brd_pages
, pos
);
154 BUG_ON(!ret
|| ret
!= pages
[i
]);
155 __free_page(pages
[i
]);
161 * This assumes radix_tree_gang_lookup always returns as
162 * many pages as possible. If the radix-tree code changes,
163 * so will this have to.
165 } while (nr_pages
== FREE_BATCH
);
169 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
171 static int copy_to_brd_setup(struct brd_device
*brd
, sector_t sector
, size_t n
)
173 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
176 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
177 if (!brd_insert_page(brd
, sector
))
180 sector
+= copy
>> SECTOR_SHIFT
;
181 if (!brd_insert_page(brd
, sector
))
188 * Copy n bytes from src to the brd starting at sector. Does not sleep.
190 static void copy_to_brd(struct brd_device
*brd
, const void *src
,
191 sector_t sector
, size_t n
)
195 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
198 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
199 page
= brd_lookup_page(brd
, sector
);
202 dst
= kmap_atomic(page
);
203 memcpy(dst
+ offset
, src
, copy
);
208 sector
+= copy
>> SECTOR_SHIFT
;
210 page
= brd_lookup_page(brd
, sector
);
213 dst
= kmap_atomic(page
);
214 memcpy(dst
, src
, copy
);
220 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
222 static void copy_from_brd(void *dst
, struct brd_device
*brd
,
223 sector_t sector
, size_t n
)
227 unsigned int offset
= (sector
& (PAGE_SECTORS
-1)) << SECTOR_SHIFT
;
230 copy
= min_t(size_t, n
, PAGE_SIZE
- offset
);
231 page
= brd_lookup_page(brd
, sector
);
233 src
= kmap_atomic(page
);
234 memcpy(dst
, src
+ offset
, copy
);
237 memset(dst
, 0, copy
);
241 sector
+= copy
>> SECTOR_SHIFT
;
243 page
= brd_lookup_page(brd
, sector
);
245 src
= kmap_atomic(page
);
246 memcpy(dst
, src
, copy
);
249 memset(dst
, 0, copy
);
254 * Process a single bvec of a bio.
256 static int brd_do_bvec(struct brd_device
*brd
, struct page
*page
,
257 unsigned int len
, unsigned int off
, unsigned int op
,
263 if (op_is_write(op
)) {
264 err
= copy_to_brd_setup(brd
, sector
, len
);
269 mem
= kmap_atomic(page
);
270 if (!op_is_write(op
)) {
271 copy_from_brd(mem
+ off
, brd
, sector
, len
);
272 flush_dcache_page(page
);
274 flush_dcache_page(page
);
275 copy_to_brd(brd
, mem
+ off
, sector
, len
);
283 static blk_qc_t
brd_make_request(struct request_queue
*q
, struct bio
*bio
)
285 struct brd_device
*brd
= bio
->bi_disk
->private_data
;
288 struct bvec_iter iter
;
290 sector
= bio
->bi_iter
.bi_sector
;
291 if (bio_end_sector(bio
) > get_capacity(bio
->bi_disk
))
294 bio_for_each_segment(bvec
, bio
, iter
) {
295 unsigned int len
= bvec
.bv_len
;
298 err
= brd_do_bvec(brd
, bvec
.bv_page
, len
, bvec
.bv_offset
,
299 bio_op(bio
), sector
);
302 sector
+= len
>> SECTOR_SHIFT
;
306 return BLK_QC_T_NONE
;
309 return BLK_QC_T_NONE
;
312 static int brd_rw_page(struct block_device
*bdev
, sector_t sector
,
313 struct page
*page
, unsigned int op
)
315 struct brd_device
*brd
= bdev
->bd_disk
->private_data
;
318 if (PageTransHuge(page
))
320 err
= brd_do_bvec(brd
, page
, PAGE_SIZE
, 0, op
, sector
);
321 page_endio(page
, op_is_write(op
), err
);
325 static const struct block_device_operations brd_fops
= {
326 .owner
= THIS_MODULE
,
327 .rw_page
= brd_rw_page
,
331 * And now the modules code and kernel interface.
333 static int rd_nr
= CONFIG_BLK_DEV_RAM_COUNT
;
334 module_param(rd_nr
, int, 0444);
335 MODULE_PARM_DESC(rd_nr
, "Maximum number of brd devices");
337 unsigned long rd_size
= CONFIG_BLK_DEV_RAM_SIZE
;
338 module_param(rd_size
, ulong
, 0444);
339 MODULE_PARM_DESC(rd_size
, "Size of each RAM disk in kbytes.");
341 static int max_part
= 1;
342 module_param(max_part
, int, 0444);
343 MODULE_PARM_DESC(max_part
, "Num Minors to reserve between devices");
345 MODULE_LICENSE("GPL");
346 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR
);
350 /* Legacy boot options - nonmodular */
351 static int __init
ramdisk_size(char *str
)
353 rd_size
= simple_strtol(str
, NULL
, 0);
356 __setup("ramdisk_size=", ramdisk_size
);
360 * The device scheme is derived from loop.c. Keep them in synch where possible
361 * (should share code eventually).
363 static LIST_HEAD(brd_devices
);
364 static DEFINE_MUTEX(brd_devices_mutex
);
366 static struct brd_device
*brd_alloc(int i
)
368 struct brd_device
*brd
;
369 struct gendisk
*disk
;
371 brd
= kzalloc(sizeof(*brd
), GFP_KERNEL
);
375 spin_lock_init(&brd
->brd_lock
);
376 INIT_RADIX_TREE(&brd
->brd_pages
, GFP_ATOMIC
);
378 brd
->brd_queue
= blk_alloc_queue(GFP_KERNEL
);
382 blk_queue_make_request(brd
->brd_queue
, brd_make_request
);
383 blk_queue_max_hw_sectors(brd
->brd_queue
, 1024);
385 /* This is so fdisk will align partitions on 4k, because of
386 * direct_access API needing 4k alignment, returning a PFN
387 * (This is only a problem on very small devices <= 4M,
388 * otherwise fdisk will align on 1M. Regardless this call
391 blk_queue_physical_block_size(brd
->brd_queue
, PAGE_SIZE
);
392 disk
= brd
->brd_disk
= alloc_disk(max_part
);
395 disk
->major
= RAMDISK_MAJOR
;
396 disk
->first_minor
= i
* max_part
;
397 disk
->fops
= &brd_fops
;
398 disk
->private_data
= brd
;
399 disk
->flags
= GENHD_FL_EXT_DEVT
;
400 sprintf(disk
->disk_name
, "ram%d", i
);
401 set_capacity(disk
, rd_size
* 2);
402 brd
->brd_queue
->backing_dev_info
->capabilities
|= BDI_CAP_SYNCHRONOUS_IO
;
404 /* Tell the block layer that this is not a rotational device */
405 blk_queue_flag_set(QUEUE_FLAG_NONROT
, brd
->brd_queue
);
406 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, brd
->brd_queue
);
411 blk_cleanup_queue(brd
->brd_queue
);
418 static void brd_free(struct brd_device
*brd
)
420 put_disk(brd
->brd_disk
);
421 blk_cleanup_queue(brd
->brd_queue
);
426 static struct brd_device
*brd_init_one(int i
, bool *new)
428 struct brd_device
*brd
;
431 list_for_each_entry(brd
, &brd_devices
, brd_list
) {
432 if (brd
->brd_number
== i
)
438 brd
->brd_disk
->queue
= brd
->brd_queue
;
439 add_disk(brd
->brd_disk
);
440 list_add_tail(&brd
->brd_list
, &brd_devices
);
447 static void brd_del_one(struct brd_device
*brd
)
449 list_del(&brd
->brd_list
);
450 del_gendisk(brd
->brd_disk
);
454 static struct kobject
*brd_probe(dev_t dev
, int *part
, void *data
)
456 struct brd_device
*brd
;
457 struct kobject
*kobj
;
460 mutex_lock(&brd_devices_mutex
);
461 brd
= brd_init_one(MINOR(dev
) / max_part
, &new);
462 kobj
= brd
? get_disk_and_module(brd
->brd_disk
) : NULL
;
463 mutex_unlock(&brd_devices_mutex
);
471 static int __init
brd_init(void)
473 struct brd_device
*brd
, *next
;
477 * brd module now has a feature to instantiate underlying device
478 * structure on-demand, provided that there is an access dev node.
480 * (1) if rd_nr is specified, create that many upfront. else
481 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
482 * (2) User can further extend brd devices by create dev node themselves
483 * and have kernel automatically instantiate actual device
484 * on-demand. Example:
485 * mknod /path/devnod_name b 1 X # 1 is the rd major
486 * fdisk -l /path/devnod_name
487 * If (X / max_part) was not already created it will be created
491 if (register_blkdev(RAMDISK_MAJOR
, "ramdisk"))
494 if (unlikely(!max_part
))
497 for (i
= 0; i
< rd_nr
; i
++) {
501 list_add_tail(&brd
->brd_list
, &brd_devices
);
504 /* point of no return */
506 list_for_each_entry(brd
, &brd_devices
, brd_list
) {
508 * associate with queue just before adding disk for
509 * avoiding to mess up failure path
511 brd
->brd_disk
->queue
= brd
->brd_queue
;
512 add_disk(brd
->brd_disk
);
515 blk_register_region(MKDEV(RAMDISK_MAJOR
, 0), 1UL << MINORBITS
,
516 THIS_MODULE
, brd_probe
, NULL
, NULL
);
518 pr_info("brd: module loaded\n");
522 list_for_each_entry_safe(brd
, next
, &brd_devices
, brd_list
) {
523 list_del(&brd
->brd_list
);
526 unregister_blkdev(RAMDISK_MAJOR
, "ramdisk");
528 pr_info("brd: module NOT loaded !!!\n");
532 static void __exit
brd_exit(void)
534 struct brd_device
*brd
, *next
;
536 list_for_each_entry_safe(brd
, next
, &brd_devices
, brd_list
)
539 blk_unregister_region(MKDEV(RAMDISK_MAJOR
, 0), 1UL << MINORBITS
);
540 unregister_blkdev(RAMDISK_MAJOR
, "ramdisk");
542 pr_info("brd: module unloaded\n");
545 module_init(brd_init
);
546 module_exit(brd_exit
);