2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/moduleparam.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
23 #include <linux/blktrace_api.h>
24 #include <linux/smp_lock.h>
26 #define DM_MSG_PREFIX "core"
28 static const char *_name
= DM_NAME
;
30 static unsigned int major
= 0;
31 static unsigned int _major
= 0;
33 static DEFINE_SPINLOCK(_minor_lock
);
35 * One of these is allocated per bio.
38 struct mapped_device
*md
;
42 unsigned long start_time
;
46 * One of these is allocated per target within a bio. Hopefully
47 * this will be simplified out one day.
55 union map_info
*dm_get_mapinfo(struct bio
*bio
)
57 if (bio
&& bio
->bi_private
)
58 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
62 #define MINOR_ALLOCED ((void *)-1)
65 * Bits for the md->flags field.
67 #define DMF_BLOCK_IO 0
68 #define DMF_SUSPENDED 1
71 #define DMF_DELETING 4
72 #define DMF_NOFLUSH_SUSPENDING 5
75 * Work processed by per-device workqueue.
82 struct work_struct work
;
83 struct mapped_device
*md
;
87 struct mapped_device
{
88 struct rw_semaphore io_lock
;
89 struct mutex suspend_lock
;
90 spinlock_t pushback_lock
;
97 struct request_queue
*queue
;
104 * A list of ios that arrived while we were suspended.
107 wait_queue_head_t wait
;
108 struct bio_list deferred
;
109 struct bio_list pushback
;
112 * Processing queue (flush/barriers)
114 struct workqueue_struct
*wq
;
117 * The current mapping.
119 struct dm_table
*map
;
122 * io objects are allocated from here.
133 wait_queue_head_t eventq
;
135 struct list_head uevent_list
;
136 spinlock_t uevent_lock
; /* Protect access to uevent_list */
139 * freeze/thaw support require holding onto a super block
141 struct super_block
*frozen_sb
;
142 struct block_device
*suspended_bdev
;
144 /* forced geometry settings */
145 struct hd_geometry geometry
;
149 static struct kmem_cache
*_io_cache
;
150 static struct kmem_cache
*_tio_cache
;
152 static int __init
local_init(void)
156 /* allocate a slab for the dm_ios */
157 _io_cache
= KMEM_CACHE(dm_io
, 0);
161 /* allocate a slab for the target ios */
162 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
164 kmem_cache_destroy(_io_cache
);
168 r
= dm_uevent_init();
170 kmem_cache_destroy(_tio_cache
);
171 kmem_cache_destroy(_io_cache
);
176 r
= register_blkdev(_major
, _name
);
178 kmem_cache_destroy(_tio_cache
);
179 kmem_cache_destroy(_io_cache
);
190 static void local_exit(void)
192 kmem_cache_destroy(_tio_cache
);
193 kmem_cache_destroy(_io_cache
);
194 unregister_blkdev(_major
, _name
);
199 DMINFO("cleaned up");
202 static int (*_inits
[])(void) __initdata
= {
211 static void (*_exits
[])(void) = {
220 static int __init
dm_init(void)
222 const int count
= ARRAY_SIZE(_inits
);
226 for (i
= 0; i
< count
; i
++) {
241 static void __exit
dm_exit(void)
243 int i
= ARRAY_SIZE(_exits
);
250 * Block device functions
252 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
254 struct mapped_device
*md
;
256 spin_lock(&_minor_lock
);
258 md
= inode
->i_bdev
->bd_disk
->private_data
;
262 if (test_bit(DMF_FREEING
, &md
->flags
) ||
263 test_bit(DMF_DELETING
, &md
->flags
)) {
269 atomic_inc(&md
->open_count
);
272 spin_unlock(&_minor_lock
);
274 return md
? 0 : -ENXIO
;
277 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
279 struct mapped_device
*md
;
281 md
= inode
->i_bdev
->bd_disk
->private_data
;
282 atomic_dec(&md
->open_count
);
287 int dm_open_count(struct mapped_device
*md
)
289 return atomic_read(&md
->open_count
);
293 * Guarantees nothing is using the device before it's deleted.
295 int dm_lock_for_deletion(struct mapped_device
*md
)
299 spin_lock(&_minor_lock
);
301 if (dm_open_count(md
))
304 set_bit(DMF_DELETING
, &md
->flags
);
306 spin_unlock(&_minor_lock
);
311 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
313 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
315 return dm_get_geometry(md
, geo
);
318 static int dm_blk_ioctl(struct inode
*inode
, struct file
*file
,
319 unsigned int cmd
, unsigned long arg
)
321 struct mapped_device
*md
;
322 struct dm_table
*map
;
323 struct dm_target
*tgt
;
326 /* We don't really need this lock, but we do need 'inode'. */
329 md
= inode
->i_bdev
->bd_disk
->private_data
;
331 map
= dm_get_table(md
);
333 if (!map
|| !dm_table_get_size(map
))
336 /* We only support devices that have a single target */
337 if (dm_table_get_num_targets(map
) != 1)
340 tgt
= dm_table_get_target(map
, 0);
342 if (dm_suspended(md
)) {
347 if (tgt
->type
->ioctl
)
348 r
= tgt
->type
->ioctl(tgt
, inode
, file
, cmd
, arg
);
357 static struct dm_io
*alloc_io(struct mapped_device
*md
)
359 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
362 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
364 mempool_free(io
, md
->io_pool
);
367 static struct dm_target_io
*alloc_tio(struct mapped_device
*md
)
369 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
372 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
374 mempool_free(tio
, md
->tio_pool
);
377 static void start_io_acct(struct dm_io
*io
)
379 struct mapped_device
*md
= io
->md
;
381 io
->start_time
= jiffies
;
384 disk_round_stats(dm_disk(md
));
386 dm_disk(md
)->in_flight
= atomic_inc_return(&md
->pending
);
389 static int end_io_acct(struct dm_io
*io
)
391 struct mapped_device
*md
= io
->md
;
392 struct bio
*bio
= io
->bio
;
393 unsigned long duration
= jiffies
- io
->start_time
;
395 int rw
= bio_data_dir(bio
);
398 disk_round_stats(dm_disk(md
));
400 dm_disk(md
)->in_flight
= pending
= atomic_dec_return(&md
->pending
);
402 disk_stat_add(dm_disk(md
), ticks
[rw
], duration
);
408 * Add the bio to the list of deferred io.
410 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
412 down_write(&md
->io_lock
);
414 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
415 up_write(&md
->io_lock
);
419 bio_list_add(&md
->deferred
, bio
);
421 up_write(&md
->io_lock
);
422 return 0; /* deferred successfully */
426 * Everyone (including functions in this file), should use this
427 * function to access the md->map field, and make sure they call
428 * dm_table_put() when finished.
430 struct dm_table
*dm_get_table(struct mapped_device
*md
)
434 read_lock(&md
->map_lock
);
438 read_unlock(&md
->map_lock
);
444 * Get the geometry associated with a dm device
446 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
454 * Set the geometry of a device.
456 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
458 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
460 if (geo
->start
> sz
) {
461 DMWARN("Start sector is beyond the geometry limits.");
470 /*-----------------------------------------------------------------
472 * A more elegant soln is in the works that uses the queue
473 * merge fn, unfortunately there are a couple of changes to
474 * the block layer that I want to make for this. So in the
475 * interests of getting something for people to use I give
476 * you this clearly demarcated crap.
477 *---------------------------------------------------------------*/
479 static int __noflush_suspending(struct mapped_device
*md
)
481 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
485 * Decrements the number of outstanding ios that a bio has been
486 * cloned into, completing the original io if necc.
488 static void dec_pending(struct dm_io
*io
, int error
)
492 /* Push-back supersedes any I/O errors */
493 if (error
&& !(io
->error
> 0 && __noflush_suspending(io
->md
)))
496 if (atomic_dec_and_test(&io
->io_count
)) {
497 if (io
->error
== DM_ENDIO_REQUEUE
) {
499 * Target requested pushing back the I/O.
500 * This must be handled before the sleeper on
501 * suspend queue merges the pushback list.
503 spin_lock_irqsave(&io
->md
->pushback_lock
, flags
);
504 if (__noflush_suspending(io
->md
))
505 bio_list_add(&io
->md
->pushback
, io
->bio
);
507 /* noflush suspend was interrupted. */
509 spin_unlock_irqrestore(&io
->md
->pushback_lock
, flags
);
513 /* nudge anyone waiting on suspend queue */
514 wake_up(&io
->md
->wait
);
516 if (io
->error
!= DM_ENDIO_REQUEUE
) {
517 blk_add_trace_bio(io
->md
->queue
, io
->bio
,
520 bio_endio(io
->bio
, io
->error
);
527 static void clone_endio(struct bio
*bio
, int error
)
530 struct dm_target_io
*tio
= bio
->bi_private
;
531 struct mapped_device
*md
= tio
->io
->md
;
532 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
534 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
538 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
539 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
541 * error and requeue request are handled
545 else if (r
== DM_ENDIO_INCOMPLETE
)
546 /* The target will handle the io */
549 DMWARN("unimplemented target endio return value: %d", r
);
554 dec_pending(tio
->io
, error
);
557 * Store md for cleanup instead of tio which is about to get freed.
559 bio
->bi_private
= md
->bs
;
565 static sector_t
max_io_len(struct mapped_device
*md
,
566 sector_t sector
, struct dm_target
*ti
)
568 sector_t offset
= sector
- ti
->begin
;
569 sector_t len
= ti
->len
- offset
;
572 * Does the target need to split even further ?
576 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
585 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
586 struct dm_target_io
*tio
)
590 struct mapped_device
*md
;
595 BUG_ON(!clone
->bi_size
);
597 clone
->bi_end_io
= clone_endio
;
598 clone
->bi_private
= tio
;
601 * Map the clone. If r == 0 we don't need to do
602 * anything, the target has assumed ownership of
605 atomic_inc(&tio
->io
->io_count
);
606 sector
= clone
->bi_sector
;
607 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
608 if (r
== DM_MAPIO_REMAPPED
) {
609 /* the bio has been remapped so dispatch it */
611 blk_add_trace_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
612 tio
->io
->bio
->bi_bdev
->bd_dev
,
613 clone
->bi_sector
, sector
);
615 generic_make_request(clone
);
616 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
617 /* error the io and bail out, or requeue it if needed */
619 dec_pending(tio
->io
, r
);
621 * Store bio_set for cleanup.
623 clone
->bi_private
= md
->bs
;
627 DMWARN("unimplemented target map return value: %d", r
);
633 struct mapped_device
*md
;
634 struct dm_table
*map
;
638 sector_t sector_count
;
642 static void dm_bio_destructor(struct bio
*bio
)
644 struct bio_set
*bs
= bio
->bi_private
;
650 * Creates a little bio that is just does part of a bvec.
652 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
653 unsigned short idx
, unsigned int offset
,
654 unsigned int len
, struct bio_set
*bs
)
657 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
659 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
660 clone
->bi_destructor
= dm_bio_destructor
;
661 *clone
->bi_io_vec
= *bv
;
663 clone
->bi_sector
= sector
;
664 clone
->bi_bdev
= bio
->bi_bdev
;
665 clone
->bi_rw
= bio
->bi_rw
;
667 clone
->bi_size
= to_bytes(len
);
668 clone
->bi_io_vec
->bv_offset
= offset
;
669 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
675 * Creates a bio that consists of range of complete bvecs.
677 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
678 unsigned short idx
, unsigned short bv_count
,
679 unsigned int len
, struct bio_set
*bs
)
683 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
684 __bio_clone(clone
, bio
);
685 clone
->bi_destructor
= dm_bio_destructor
;
686 clone
->bi_sector
= sector
;
688 clone
->bi_vcnt
= idx
+ bv_count
;
689 clone
->bi_size
= to_bytes(len
);
690 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
695 static int __clone_and_map(struct clone_info
*ci
)
697 struct bio
*clone
, *bio
= ci
->bio
;
698 struct dm_target
*ti
;
699 sector_t len
= 0, max
;
700 struct dm_target_io
*tio
;
702 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
703 if (!dm_target_is_valid(ti
))
706 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
709 * Allocate a target io object.
711 tio
= alloc_tio(ci
->md
);
714 memset(&tio
->info
, 0, sizeof(tio
->info
));
716 if (ci
->sector_count
<= max
) {
718 * Optimise for the simple case where we can do all of
719 * the remaining io with a single clone.
721 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
722 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
724 __map_bio(ti
, clone
, tio
);
725 ci
->sector_count
= 0;
727 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
729 * There are some bvecs that don't span targets.
730 * Do as many of these as possible.
733 sector_t remaining
= max
;
736 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
737 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
739 if (bv_len
> remaining
)
746 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
748 __map_bio(ti
, clone
, tio
);
751 ci
->sector_count
-= len
;
756 * Handle a bvec that must be split between two or more targets.
758 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
759 sector_t remaining
= to_sector(bv
->bv_len
);
760 unsigned int offset
= 0;
764 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
765 if (!dm_target_is_valid(ti
))
768 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
770 tio
= alloc_tio(ci
->md
);
773 memset(&tio
->info
, 0, sizeof(tio
->info
));
776 len
= min(remaining
, max
);
778 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
779 bv
->bv_offset
+ offset
, len
,
782 __map_bio(ti
, clone
, tio
);
785 ci
->sector_count
-= len
;
786 offset
+= to_bytes(len
);
787 } while (remaining
-= len
);
796 * Split the bio into several clones.
798 static int __split_bio(struct mapped_device
*md
, struct bio
*bio
)
800 struct clone_info ci
;
803 ci
.map
= dm_get_table(md
);
804 if (unlikely(!ci
.map
))
809 ci
.io
= alloc_io(md
);
811 atomic_set(&ci
.io
->io_count
, 1);
814 ci
.sector
= bio
->bi_sector
;
815 ci
.sector_count
= bio_sectors(bio
);
816 ci
.idx
= bio
->bi_idx
;
818 start_io_acct(ci
.io
);
819 while (ci
.sector_count
&& !error
)
820 error
= __clone_and_map(&ci
);
822 /* drop the extra reference count */
823 dec_pending(ci
.io
, error
);
824 dm_table_put(ci
.map
);
828 /*-----------------------------------------------------------------
830 *---------------------------------------------------------------*/
832 static int dm_merge_bvec(struct request_queue
*q
,
833 struct bvec_merge_data
*bvm
,
834 struct bio_vec
*biovec
)
836 struct mapped_device
*md
= q
->queuedata
;
837 struct dm_table
*map
= dm_get_table(md
);
838 struct dm_target
*ti
;
839 sector_t max_sectors
;
845 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
848 * Find maximum amount of I/O that won't need splitting
850 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
851 (sector_t
) BIO_MAX_SECTORS
);
852 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
857 * merge_bvec_fn() returns number of bytes
858 * it can accept at this offset
859 * max is precomputed maximal io size
861 if (max_size
&& ti
->type
->merge
)
862 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
865 * Always allow an entire first page
867 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
868 max_size
= biovec
->bv_len
;
876 * The request function that just remaps the bio built up by
879 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
882 int rw
= bio_data_dir(bio
);
883 struct mapped_device
*md
= q
->queuedata
;
886 * There is no use in forwarding any barrier request since we can't
887 * guarantee it is (or can be) handled by the targets correctly.
889 if (unlikely(bio_barrier(bio
))) {
890 bio_endio(bio
, -EOPNOTSUPP
);
894 down_read(&md
->io_lock
);
896 disk_stat_inc(dm_disk(md
), ios
[rw
]);
897 disk_stat_add(dm_disk(md
), sectors
[rw
], bio_sectors(bio
));
900 * If we're suspended we have to queue
903 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
904 up_read(&md
->io_lock
);
906 if (bio_rw(bio
) != READA
)
907 r
= queue_io(md
, bio
);
913 * We're in a while loop, because someone could suspend
914 * before we get to the following read lock.
916 down_read(&md
->io_lock
);
919 r
= __split_bio(md
, bio
);
920 up_read(&md
->io_lock
);
929 static void dm_unplug_all(struct request_queue
*q
)
931 struct mapped_device
*md
= q
->queuedata
;
932 struct dm_table
*map
= dm_get_table(md
);
935 dm_table_unplug_all(map
);
940 static int dm_any_congested(void *congested_data
, int bdi_bits
)
943 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
944 struct dm_table
*map
= dm_get_table(md
);
946 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
949 r
= dm_table_any_congested(map
, bdi_bits
);
955 /*-----------------------------------------------------------------
956 * An IDR is used to keep track of allocated minor numbers.
957 *---------------------------------------------------------------*/
958 static DEFINE_IDR(_minor_idr
);
960 static void free_minor(int minor
)
962 spin_lock(&_minor_lock
);
963 idr_remove(&_minor_idr
, minor
);
964 spin_unlock(&_minor_lock
);
968 * See if the device with a specific minor # is free.
970 static int specific_minor(int minor
)
974 if (minor
>= (1 << MINORBITS
))
977 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
981 spin_lock(&_minor_lock
);
983 if (idr_find(&_minor_idr
, minor
)) {
988 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
993 idr_remove(&_minor_idr
, m
);
999 spin_unlock(&_minor_lock
);
1003 static int next_free_minor(int *minor
)
1007 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1011 spin_lock(&_minor_lock
);
1013 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1017 if (m
>= (1 << MINORBITS
)) {
1018 idr_remove(&_minor_idr
, m
);
1026 spin_unlock(&_minor_lock
);
1030 static struct block_device_operations dm_blk_dops
;
1033 * Allocate and initialise a blank device with a given minor.
1035 static struct mapped_device
*alloc_dev(int minor
)
1038 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1042 DMWARN("unable to allocate device, out of memory.");
1046 if (!try_module_get(THIS_MODULE
))
1047 goto bad_module_get
;
1049 /* get a minor number for the dev */
1050 if (minor
== DM_ANY_MINOR
)
1051 r
= next_free_minor(&minor
);
1053 r
= specific_minor(minor
);
1057 init_rwsem(&md
->io_lock
);
1058 mutex_init(&md
->suspend_lock
);
1059 spin_lock_init(&md
->pushback_lock
);
1060 rwlock_init(&md
->map_lock
);
1061 atomic_set(&md
->holders
, 1);
1062 atomic_set(&md
->open_count
, 0);
1063 atomic_set(&md
->event_nr
, 0);
1064 atomic_set(&md
->uevent_seq
, 0);
1065 INIT_LIST_HEAD(&md
->uevent_list
);
1066 spin_lock_init(&md
->uevent_lock
);
1068 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1072 md
->queue
->queuedata
= md
;
1073 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1074 md
->queue
->backing_dev_info
.congested_data
= md
;
1075 blk_queue_make_request(md
->queue
, dm_request
);
1076 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1077 md
->queue
->unplug_fn
= dm_unplug_all
;
1078 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1080 md
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _io_cache
);
1084 md
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _tio_cache
);
1088 md
->bs
= bioset_create(16, 16);
1092 md
->disk
= alloc_disk(1);
1096 atomic_set(&md
->pending
, 0);
1097 init_waitqueue_head(&md
->wait
);
1098 init_waitqueue_head(&md
->eventq
);
1100 md
->disk
->major
= _major
;
1101 md
->disk
->first_minor
= minor
;
1102 md
->disk
->fops
= &dm_blk_dops
;
1103 md
->disk
->queue
= md
->queue
;
1104 md
->disk
->private_data
= md
;
1105 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1107 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1109 md
->wq
= create_singlethread_workqueue("kdmflush");
1113 /* Populate the mapping, nobody knows we exist yet */
1114 spin_lock(&_minor_lock
);
1115 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1116 spin_unlock(&_minor_lock
);
1118 BUG_ON(old_md
!= MINOR_ALLOCED
);
1125 bioset_free(md
->bs
);
1127 mempool_destroy(md
->tio_pool
);
1129 mempool_destroy(md
->io_pool
);
1131 blk_cleanup_queue(md
->queue
);
1135 module_put(THIS_MODULE
);
1141 static void unlock_fs(struct mapped_device
*md
);
1143 static void free_dev(struct mapped_device
*md
)
1145 int minor
= md
->disk
->first_minor
;
1147 if (md
->suspended_bdev
) {
1149 bdput(md
->suspended_bdev
);
1151 destroy_workqueue(md
->wq
);
1152 mempool_destroy(md
->tio_pool
);
1153 mempool_destroy(md
->io_pool
);
1154 bioset_free(md
->bs
);
1155 del_gendisk(md
->disk
);
1158 spin_lock(&_minor_lock
);
1159 md
->disk
->private_data
= NULL
;
1160 spin_unlock(&_minor_lock
);
1163 blk_cleanup_queue(md
->queue
);
1164 module_put(THIS_MODULE
);
1169 * Bind a table to the device.
1171 static void event_callback(void *context
)
1173 unsigned long flags
;
1175 struct mapped_device
*md
= (struct mapped_device
*) context
;
1177 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1178 list_splice_init(&md
->uevent_list
, &uevents
);
1179 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1181 dm_send_uevents(&uevents
, &md
->disk
->dev
.kobj
);
1183 atomic_inc(&md
->event_nr
);
1184 wake_up(&md
->eventq
);
1187 static void __set_size(struct mapped_device
*md
, sector_t size
)
1189 set_capacity(md
->disk
, size
);
1191 mutex_lock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1192 i_size_write(md
->suspended_bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1193 mutex_unlock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1196 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
1198 struct request_queue
*q
= md
->queue
;
1201 size
= dm_table_get_size(t
);
1204 * Wipe any geometry if the size of the table changed.
1206 if (size
!= get_capacity(md
->disk
))
1207 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1209 if (md
->suspended_bdev
)
1210 __set_size(md
, size
);
1215 dm_table_event_callback(t
, event_callback
, md
);
1217 write_lock(&md
->map_lock
);
1219 dm_table_set_restrictions(t
, q
);
1220 write_unlock(&md
->map_lock
);
1225 static void __unbind(struct mapped_device
*md
)
1227 struct dm_table
*map
= md
->map
;
1232 dm_table_event_callback(map
, NULL
, NULL
);
1233 write_lock(&md
->map_lock
);
1235 write_unlock(&md
->map_lock
);
1240 * Constructor for a new device.
1242 int dm_create(int minor
, struct mapped_device
**result
)
1244 struct mapped_device
*md
;
1246 md
= alloc_dev(minor
);
1254 static struct mapped_device
*dm_find_md(dev_t dev
)
1256 struct mapped_device
*md
;
1257 unsigned minor
= MINOR(dev
);
1259 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
1262 spin_lock(&_minor_lock
);
1264 md
= idr_find(&_minor_idr
, minor
);
1265 if (md
&& (md
== MINOR_ALLOCED
||
1266 (dm_disk(md
)->first_minor
!= minor
) ||
1267 test_bit(DMF_FREEING
, &md
->flags
))) {
1273 spin_unlock(&_minor_lock
);
1278 struct mapped_device
*dm_get_md(dev_t dev
)
1280 struct mapped_device
*md
= dm_find_md(dev
);
1288 void *dm_get_mdptr(struct mapped_device
*md
)
1290 return md
->interface_ptr
;
1293 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
1295 md
->interface_ptr
= ptr
;
1298 void dm_get(struct mapped_device
*md
)
1300 atomic_inc(&md
->holders
);
1303 const char *dm_device_name(struct mapped_device
*md
)
1307 EXPORT_SYMBOL_GPL(dm_device_name
);
1309 void dm_put(struct mapped_device
*md
)
1311 struct dm_table
*map
;
1313 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
1315 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
1316 map
= dm_get_table(md
);
1317 idr_replace(&_minor_idr
, MINOR_ALLOCED
, dm_disk(md
)->first_minor
);
1318 set_bit(DMF_FREEING
, &md
->flags
);
1319 spin_unlock(&_minor_lock
);
1320 if (!dm_suspended(md
)) {
1321 dm_table_presuspend_targets(map
);
1322 dm_table_postsuspend_targets(map
);
1329 EXPORT_SYMBOL_GPL(dm_put
);
1331 static int dm_wait_for_completion(struct mapped_device
*md
)
1336 set_current_state(TASK_INTERRUPTIBLE
);
1339 if (!atomic_read(&md
->pending
))
1342 if (signal_pending(current
)) {
1349 set_current_state(TASK_RUNNING
);
1355 * Process the deferred bios
1357 static void __flush_deferred_io(struct mapped_device
*md
)
1361 while ((c
= bio_list_pop(&md
->deferred
))) {
1362 if (__split_bio(md
, c
))
1366 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1369 static void __merge_pushback_list(struct mapped_device
*md
)
1371 unsigned long flags
;
1373 spin_lock_irqsave(&md
->pushback_lock
, flags
);
1374 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1375 bio_list_merge_head(&md
->deferred
, &md
->pushback
);
1376 bio_list_init(&md
->pushback
);
1377 spin_unlock_irqrestore(&md
->pushback_lock
, flags
);
1380 static void dm_wq_work(struct work_struct
*work
)
1382 struct dm_wq_req
*req
= container_of(work
, struct dm_wq_req
, work
);
1383 struct mapped_device
*md
= req
->md
;
1385 down_write(&md
->io_lock
);
1386 switch (req
->type
) {
1387 case DM_WQ_FLUSH_ALL
:
1388 __merge_pushback_list(md
);
1390 case DM_WQ_FLUSH_DEFERRED
:
1391 __flush_deferred_io(md
);
1394 DMERR("dm_wq_work: unrecognised work type %d", req
->type
);
1397 up_write(&md
->io_lock
);
1400 static void dm_wq_queue(struct mapped_device
*md
, int type
, void *context
,
1401 struct dm_wq_req
*req
)
1405 req
->context
= context
;
1406 INIT_WORK(&req
->work
, dm_wq_work
);
1407 queue_work(md
->wq
, &req
->work
);
1410 static void dm_queue_flush(struct mapped_device
*md
, int type
, void *context
)
1412 struct dm_wq_req req
;
1414 dm_wq_queue(md
, type
, context
, &req
);
1415 flush_workqueue(md
->wq
);
1419 * Swap in a new table (destroying old one).
1421 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
1425 mutex_lock(&md
->suspend_lock
);
1427 /* device must be suspended */
1428 if (!dm_suspended(md
))
1431 /* without bdev, the device size cannot be changed */
1432 if (!md
->suspended_bdev
)
1433 if (get_capacity(md
->disk
) != dm_table_get_size(table
))
1437 r
= __bind(md
, table
);
1440 mutex_unlock(&md
->suspend_lock
);
1445 * Functions to lock and unlock any filesystem running on the
1448 static int lock_fs(struct mapped_device
*md
)
1452 WARN_ON(md
->frozen_sb
);
1454 md
->frozen_sb
= freeze_bdev(md
->suspended_bdev
);
1455 if (IS_ERR(md
->frozen_sb
)) {
1456 r
= PTR_ERR(md
->frozen_sb
);
1457 md
->frozen_sb
= NULL
;
1461 set_bit(DMF_FROZEN
, &md
->flags
);
1463 /* don't bdput right now, we don't want the bdev
1464 * to go away while it is locked.
1469 static void unlock_fs(struct mapped_device
*md
)
1471 if (!test_bit(DMF_FROZEN
, &md
->flags
))
1474 thaw_bdev(md
->suspended_bdev
, md
->frozen_sb
);
1475 md
->frozen_sb
= NULL
;
1476 clear_bit(DMF_FROZEN
, &md
->flags
);
1480 * We need to be able to change a mapping table under a mounted
1481 * filesystem. For example we might want to move some data in
1482 * the background. Before the table can be swapped with
1483 * dm_bind_table, dm_suspend must be called to flush any in
1484 * flight bios and ensure that any further io gets deferred.
1486 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
1488 struct dm_table
*map
= NULL
;
1489 DECLARE_WAITQUEUE(wait
, current
);
1491 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
1492 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
1494 mutex_lock(&md
->suspend_lock
);
1496 if (dm_suspended(md
)) {
1501 map
= dm_get_table(md
);
1504 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1505 * This flag is cleared before dm_suspend returns.
1508 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1510 /* This does not get reverted if there's an error later. */
1511 dm_table_presuspend_targets(map
);
1513 /* bdget() can stall if the pending I/Os are not flushed */
1515 md
->suspended_bdev
= bdget_disk(md
->disk
, 0);
1516 if (!md
->suspended_bdev
) {
1517 DMWARN("bdget failed in dm_suspend");
1523 * Flush I/O to the device. noflush supersedes do_lockfs,
1524 * because lock_fs() needs to flush I/Os.
1534 * First we set the BLOCK_IO flag so no more ios will be mapped.
1536 down_write(&md
->io_lock
);
1537 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1539 add_wait_queue(&md
->wait
, &wait
);
1540 up_write(&md
->io_lock
);
1544 dm_table_unplug_all(map
);
1547 * Wait for the already-mapped ios to complete.
1549 r
= dm_wait_for_completion(md
);
1551 down_write(&md
->io_lock
);
1552 remove_wait_queue(&md
->wait
, &wait
);
1555 __merge_pushback_list(md
);
1556 up_write(&md
->io_lock
);
1558 /* were we interrupted ? */
1560 dm_queue_flush(md
, DM_WQ_FLUSH_DEFERRED
, NULL
);
1563 goto out
; /* pushback list is already flushed, so skip flush */
1566 dm_table_postsuspend_targets(map
);
1568 set_bit(DMF_SUSPENDED
, &md
->flags
);
1573 * Because there may be already I/Os in the pushback list,
1574 * flush them before return.
1576 dm_queue_flush(md
, DM_WQ_FLUSH_ALL
, NULL
);
1579 if (r
&& md
->suspended_bdev
) {
1580 bdput(md
->suspended_bdev
);
1581 md
->suspended_bdev
= NULL
;
1587 mutex_unlock(&md
->suspend_lock
);
1591 int dm_resume(struct mapped_device
*md
)
1594 struct dm_table
*map
= NULL
;
1596 mutex_lock(&md
->suspend_lock
);
1597 if (!dm_suspended(md
))
1600 map
= dm_get_table(md
);
1601 if (!map
|| !dm_table_get_size(map
))
1604 r
= dm_table_resume_targets(map
);
1608 dm_queue_flush(md
, DM_WQ_FLUSH_DEFERRED
, NULL
);
1612 if (md
->suspended_bdev
) {
1613 bdput(md
->suspended_bdev
);
1614 md
->suspended_bdev
= NULL
;
1617 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1619 dm_table_unplug_all(map
);
1621 dm_kobject_uevent(md
);
1627 mutex_unlock(&md
->suspend_lock
);
1632 /*-----------------------------------------------------------------
1633 * Event notification.
1634 *---------------------------------------------------------------*/
1635 void dm_kobject_uevent(struct mapped_device
*md
)
1637 kobject_uevent(&md
->disk
->dev
.kobj
, KOBJ_CHANGE
);
1640 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
1642 return atomic_add_return(1, &md
->uevent_seq
);
1645 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1647 return atomic_read(&md
->event_nr
);
1650 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1652 return wait_event_interruptible(md
->eventq
,
1653 (event_nr
!= atomic_read(&md
->event_nr
)));
1656 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
1658 unsigned long flags
;
1660 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1661 list_add(elist
, &md
->uevent_list
);
1662 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1666 * The gendisk is only valid as long as you have a reference
1669 struct gendisk
*dm_disk(struct mapped_device
*md
)
1674 int dm_suspended(struct mapped_device
*md
)
1676 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1679 int dm_noflush_suspending(struct dm_target
*ti
)
1681 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1682 int r
= __noflush_suspending(md
);
1688 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
1690 static struct block_device_operations dm_blk_dops
= {
1691 .open
= dm_blk_open
,
1692 .release
= dm_blk_close
,
1693 .ioctl
= dm_blk_ioctl
,
1694 .getgeo
= dm_blk_getgeo
,
1695 .owner
= THIS_MODULE
1698 EXPORT_SYMBOL(dm_get_mapinfo
);
1703 module_init(dm_init
);
1704 module_exit(dm_exit
);
1706 module_param(major
, uint
, 0);
1707 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1708 MODULE_DESCRIPTION(DM_NAME
" driver");
1709 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1710 MODULE_LICENSE("GPL");