2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/blkpg.h>
15 #include <linux/bio.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
21 static const char *_name
= DM_NAME
;
23 static unsigned int major
= 0;
24 static unsigned int _major
= 0;
27 * One of these is allocated per bio.
30 struct mapped_device
*md
;
37 * One of these is allocated per target within a bio. Hopefully
38 * this will be simplified out one day.
46 union map_info
*dm_get_mapinfo(struct bio
*bio
)
48 if (bio
&& bio
->bi_private
)
49 return &((struct target_io
*)bio
->bi_private
)->info
;
54 * Bits for the md->flags field.
56 #define DMF_BLOCK_IO 0
57 #define DMF_SUSPENDED 1
58 #define DMF_FS_LOCKED 2
60 struct mapped_device
{
61 struct rw_semaphore lock
;
67 request_queue_t
*queue
;
73 * A list of ios that arrived while we were suspended.
76 wait_queue_head_t wait
;
77 struct bio_list deferred
;
80 * The current mapping.
85 * io objects are allocated from here.
94 wait_queue_head_t eventq
;
97 * freeze/thaw support require holding onto a super block
99 struct super_block
*frozen_sb
;
100 struct block_device
*frozen_bdev
;
104 static kmem_cache_t
*_io_cache
;
105 static kmem_cache_t
*_tio_cache
;
107 static struct bio_set
*dm_set
;
109 static int __init
local_init(void)
113 dm_set
= bioset_create(16, 16, 4);
117 /* allocate a slab for the dm_ios */
118 _io_cache
= kmem_cache_create("dm_io",
119 sizeof(struct dm_io
), 0, 0, NULL
, NULL
);
123 /* allocate a slab for the target ios */
124 _tio_cache
= kmem_cache_create("dm_tio", sizeof(struct target_io
),
127 kmem_cache_destroy(_io_cache
);
132 r
= register_blkdev(_major
, _name
);
134 kmem_cache_destroy(_tio_cache
);
135 kmem_cache_destroy(_io_cache
);
145 static void local_exit(void)
147 kmem_cache_destroy(_tio_cache
);
148 kmem_cache_destroy(_io_cache
);
152 if (unregister_blkdev(_major
, _name
) < 0)
153 DMERR("devfs_unregister_blkdev failed");
157 DMINFO("cleaned up");
160 int (*_inits
[])(void) __initdata
= {
168 void (*_exits
[])(void) = {
176 static int __init
dm_init(void)
178 const int count
= ARRAY_SIZE(_inits
);
182 for (i
= 0; i
< count
; i
++) {
197 static void __exit
dm_exit(void)
199 int i
= ARRAY_SIZE(_exits
);
206 * Block device functions
208 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
210 struct mapped_device
*md
;
212 md
= inode
->i_bdev
->bd_disk
->private_data
;
217 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
219 struct mapped_device
*md
;
221 md
= inode
->i_bdev
->bd_disk
->private_data
;
226 static inline struct dm_io
*alloc_io(struct mapped_device
*md
)
228 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
231 static inline void free_io(struct mapped_device
*md
, struct dm_io
*io
)
233 mempool_free(io
, md
->io_pool
);
236 static inline struct target_io
*alloc_tio(struct mapped_device
*md
)
238 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
241 static inline void free_tio(struct mapped_device
*md
, struct target_io
*tio
)
243 mempool_free(tio
, md
->tio_pool
);
247 * Add the bio to the list of deferred io.
249 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
251 down_write(&md
->lock
);
253 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
258 bio_list_add(&md
->deferred
, bio
);
261 return 0; /* deferred successfully */
265 * Everyone (including functions in this file), should use this
266 * function to access the md->map field, and make sure they call
267 * dm_table_put() when finished.
269 struct dm_table
*dm_get_table(struct mapped_device
*md
)
273 read_lock(&md
->map_lock
);
277 read_unlock(&md
->map_lock
);
282 /*-----------------------------------------------------------------
284 * A more elegant soln is in the works that uses the queue
285 * merge fn, unfortunately there are a couple of changes to
286 * the block layer that I want to make for this. So in the
287 * interests of getting something for people to use I give
288 * you this clearly demarcated crap.
289 *---------------------------------------------------------------*/
292 * Decrements the number of outstanding ios that a bio has been
293 * cloned into, completing the original io if necc.
295 static inline void dec_pending(struct dm_io
*io
, int error
)
300 if (atomic_dec_and_test(&io
->io_count
)) {
301 if (atomic_dec_and_test(&io
->md
->pending
))
302 /* nudge anyone waiting on suspend queue */
303 wake_up(&io
->md
->wait
);
305 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
310 static int clone_endio(struct bio
*bio
, unsigned int done
, int error
)
313 struct target_io
*tio
= bio
->bi_private
;
314 struct dm_io
*io
= tio
->io
;
315 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
320 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
324 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
329 /* the target wants another shot at the io */
333 free_tio(io
->md
, tio
);
334 dec_pending(io
, error
);
339 static sector_t
max_io_len(struct mapped_device
*md
,
340 sector_t sector
, struct dm_target
*ti
)
342 sector_t offset
= sector
- ti
->begin
;
343 sector_t len
= ti
->len
- offset
;
346 * Does the target need to split even further ?
350 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
359 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
360 struct target_io
*tio
)
367 BUG_ON(!clone
->bi_size
);
369 clone
->bi_end_io
= clone_endio
;
370 clone
->bi_private
= tio
;
373 * Map the clone. If r == 0 we don't need to do
374 * anything, the target has assumed ownership of
377 atomic_inc(&tio
->io
->io_count
);
378 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
380 /* the bio has been remapped so dispatch it */
381 generic_make_request(clone
);
384 /* error the io and bail out */
385 struct dm_io
*io
= tio
->io
;
386 free_tio(tio
->io
->md
, tio
);
387 dec_pending(io
, -EIO
);
393 struct mapped_device
*md
;
394 struct dm_table
*map
;
398 sector_t sector_count
;
403 * Creates a little bio that is just does part of a bvec.
405 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
406 unsigned short idx
, unsigned int offset
,
410 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
412 clone
= bio_alloc_bioset(GFP_NOIO
, 1, dm_set
);
413 *clone
->bi_io_vec
= *bv
;
415 clone
->bi_sector
= sector
;
416 clone
->bi_bdev
= bio
->bi_bdev
;
417 clone
->bi_rw
= bio
->bi_rw
;
419 clone
->bi_size
= to_bytes(len
);
420 clone
->bi_io_vec
->bv_offset
= offset
;
421 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
427 * Creates a bio that consists of range of complete bvecs.
429 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
430 unsigned short idx
, unsigned short bv_count
,
435 clone
= bio_clone(bio
, GFP_NOIO
);
436 clone
->bi_sector
= sector
;
438 clone
->bi_vcnt
= idx
+ bv_count
;
439 clone
->bi_size
= to_bytes(len
);
440 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
445 static void __clone_and_map(struct clone_info
*ci
)
447 struct bio
*clone
, *bio
= ci
->bio
;
448 struct dm_target
*ti
= dm_table_find_target(ci
->map
, ci
->sector
);
449 sector_t len
= 0, max
= max_io_len(ci
->md
, ci
->sector
, ti
);
450 struct target_io
*tio
;
453 * Allocate a target io object.
455 tio
= alloc_tio(ci
->md
);
458 memset(&tio
->info
, 0, sizeof(tio
->info
));
460 if (ci
->sector_count
<= max
) {
462 * Optimise for the simple case where we can do all of
463 * the remaining io with a single clone.
465 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
466 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
);
467 __map_bio(ti
, clone
, tio
);
468 ci
->sector_count
= 0;
470 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
472 * There are some bvecs that don't span targets.
473 * Do as many of these as possible.
476 sector_t remaining
= max
;
479 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
480 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
482 if (bv_len
> remaining
)
489 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
);
490 __map_bio(ti
, clone
, tio
);
493 ci
->sector_count
-= len
;
498 * Create two copy bios to deal with io that has
499 * been split across a target.
501 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
503 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
505 __map_bio(ti
, clone
, tio
);
508 ci
->sector_count
-= max
;
509 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
511 len
= to_sector(bv
->bv_len
) - max
;
512 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
513 bv
->bv_offset
+ to_bytes(max
), len
);
514 tio
= alloc_tio(ci
->md
);
517 memset(&tio
->info
, 0, sizeof(tio
->info
));
518 __map_bio(ti
, clone
, tio
);
521 ci
->sector_count
-= len
;
527 * Split the bio into several clones.
529 static void __split_bio(struct mapped_device
*md
, struct bio
*bio
)
531 struct clone_info ci
;
533 ci
.map
= dm_get_table(md
);
535 bio_io_error(bio
, bio
->bi_size
);
541 ci
.io
= alloc_io(md
);
543 atomic_set(&ci
.io
->io_count
, 1);
546 ci
.sector
= bio
->bi_sector
;
547 ci
.sector_count
= bio_sectors(bio
);
548 ci
.idx
= bio
->bi_idx
;
550 atomic_inc(&md
->pending
);
551 while (ci
.sector_count
)
552 __clone_and_map(&ci
);
554 /* drop the extra reference count */
555 dec_pending(ci
.io
, 0);
556 dm_table_put(ci
.map
);
558 /*-----------------------------------------------------------------
560 *---------------------------------------------------------------*/
563 * The request function that just remaps the bio built up by
566 static int dm_request(request_queue_t
*q
, struct bio
*bio
)
569 struct mapped_device
*md
= q
->queuedata
;
571 down_read(&md
->lock
);
574 * If we're suspended we have to queue
577 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
580 if (bio_rw(bio
) == READA
) {
581 bio_io_error(bio
, bio
->bi_size
);
585 r
= queue_io(md
, bio
);
587 bio_io_error(bio
, bio
->bi_size
);
591 return 0; /* deferred successfully */
594 * We're in a while loop, because someone could suspend
595 * before we get to the following read lock.
597 down_read(&md
->lock
);
600 __split_bio(md
, bio
);
605 static int dm_flush_all(request_queue_t
*q
, struct gendisk
*disk
,
606 sector_t
*error_sector
)
608 struct mapped_device
*md
= q
->queuedata
;
609 struct dm_table
*map
= dm_get_table(md
);
613 ret
= dm_table_flush_all(md
->map
);
620 static void dm_unplug_all(request_queue_t
*q
)
622 struct mapped_device
*md
= q
->queuedata
;
623 struct dm_table
*map
= dm_get_table(md
);
626 dm_table_unplug_all(map
);
631 static int dm_any_congested(void *congested_data
, int bdi_bits
)
634 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
635 struct dm_table
*map
= dm_get_table(md
);
637 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
640 r
= dm_table_any_congested(map
, bdi_bits
);
646 /*-----------------------------------------------------------------
647 * An IDR is used to keep track of allocated minor numbers.
648 *---------------------------------------------------------------*/
649 static DECLARE_MUTEX(_minor_lock
);
650 static DEFINE_IDR(_minor_idr
);
652 static void free_minor(unsigned int minor
)
655 idr_remove(&_minor_idr
, minor
);
660 * See if the device with a specific minor # is free.
662 static int specific_minor(struct mapped_device
*md
, unsigned int minor
)
666 if (minor
>= (1 << MINORBITS
))
671 if (idr_find(&_minor_idr
, minor
)) {
676 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
682 r
= idr_get_new_above(&_minor_idr
, md
, minor
, &m
);
688 idr_remove(&_minor_idr
, m
);
698 static int next_free_minor(struct mapped_device
*md
, unsigned int *minor
)
705 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
711 r
= idr_get_new(&_minor_idr
, md
, &m
);
716 if (m
>= (1 << MINORBITS
)) {
717 idr_remove(&_minor_idr
, m
);
729 static struct block_device_operations dm_blk_dops
;
732 * Allocate and initialise a blank device with a given minor.
734 static struct mapped_device
*alloc_dev(unsigned int minor
, int persistent
)
737 struct mapped_device
*md
= kmalloc(sizeof(*md
), GFP_KERNEL
);
740 DMWARN("unable to allocate device, out of memory.");
744 /* get a minor number for the dev */
745 r
= persistent
? specific_minor(md
, minor
) : next_free_minor(md
, &minor
);
749 memset(md
, 0, sizeof(*md
));
750 init_rwsem(&md
->lock
);
751 rwlock_init(&md
->map_lock
);
752 atomic_set(&md
->holders
, 1);
753 atomic_set(&md
->event_nr
, 0);
755 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
759 md
->queue
->queuedata
= md
;
760 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
761 md
->queue
->backing_dev_info
.congested_data
= md
;
762 blk_queue_make_request(md
->queue
, dm_request
);
763 md
->queue
->unplug_fn
= dm_unplug_all
;
764 md
->queue
->issue_flush_fn
= dm_flush_all
;
766 md
->io_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
767 mempool_free_slab
, _io_cache
);
771 md
->tio_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
772 mempool_free_slab
, _tio_cache
);
776 md
->disk
= alloc_disk(1);
780 md
->disk
->major
= _major
;
781 md
->disk
->first_minor
= minor
;
782 md
->disk
->fops
= &dm_blk_dops
;
783 md
->disk
->queue
= md
->queue
;
784 md
->disk
->private_data
= md
;
785 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
788 atomic_set(&md
->pending
, 0);
789 init_waitqueue_head(&md
->wait
);
790 init_waitqueue_head(&md
->eventq
);
795 mempool_destroy(md
->tio_pool
);
797 mempool_destroy(md
->io_pool
);
799 blk_put_queue(md
->queue
);
806 static void free_dev(struct mapped_device
*md
)
808 free_minor(md
->disk
->first_minor
);
809 mempool_destroy(md
->tio_pool
);
810 mempool_destroy(md
->io_pool
);
811 del_gendisk(md
->disk
);
813 blk_put_queue(md
->queue
);
818 * Bind a table to the device.
820 static void event_callback(void *context
)
822 struct mapped_device
*md
= (struct mapped_device
*) context
;
824 atomic_inc(&md
->event_nr
);
825 wake_up(&md
->eventq
);
828 static void __set_size(struct gendisk
*disk
, sector_t size
)
830 struct block_device
*bdev
;
832 set_capacity(disk
, size
);
833 bdev
= bdget_disk(disk
, 0);
835 down(&bdev
->bd_inode
->i_sem
);
836 i_size_write(bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
837 up(&bdev
->bd_inode
->i_sem
);
842 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
844 request_queue_t
*q
= md
->queue
;
847 size
= dm_table_get_size(t
);
848 __set_size(md
->disk
, size
);
852 write_lock(&md
->map_lock
);
854 write_unlock(&md
->map_lock
);
857 dm_table_event_callback(md
->map
, event_callback
, md
);
858 dm_table_set_restrictions(t
, q
);
862 static void __unbind(struct mapped_device
*md
)
864 struct dm_table
*map
= md
->map
;
869 dm_table_event_callback(map
, NULL
, NULL
);
870 write_lock(&md
->map_lock
);
872 write_unlock(&md
->map_lock
);
877 * Constructor for a new device.
879 static int create_aux(unsigned int minor
, int persistent
,
880 struct mapped_device
**result
)
882 struct mapped_device
*md
;
884 md
= alloc_dev(minor
, persistent
);
892 int dm_create(struct mapped_device
**result
)
894 return create_aux(0, 0, result
);
897 int dm_create_with_minor(unsigned int minor
, struct mapped_device
**result
)
899 return create_aux(minor
, 1, result
);
902 void *dm_get_mdptr(dev_t dev
)
904 struct mapped_device
*md
;
906 unsigned minor
= MINOR(dev
);
908 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
913 md
= idr_find(&_minor_idr
, minor
);
915 if (md
&& (dm_disk(md
)->first_minor
== minor
))
916 mdptr
= md
->interface_ptr
;
923 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
925 md
->interface_ptr
= ptr
;
928 void dm_get(struct mapped_device
*md
)
930 atomic_inc(&md
->holders
);
933 void dm_put(struct mapped_device
*md
)
935 struct dm_table
*map
= dm_get_table(md
);
937 if (atomic_dec_and_test(&md
->holders
)) {
938 if (!test_bit(DMF_SUSPENDED
, &md
->flags
) && map
) {
939 dm_table_presuspend_targets(map
);
940 dm_table_postsuspend_targets(map
);
950 * Process the deferred bios
952 static void __flush_deferred_io(struct mapped_device
*md
, struct bio
*c
)
965 * Swap in a new table (destroying old one).
967 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
971 down_write(&md
->lock
);
973 /* device must be suspended */
974 if (!test_bit(DMF_SUSPENDED
, &md
->flags
)) {
980 r
= __bind(md
, table
);
989 * Functions to lock and unlock any filesystem running on the
992 static int __lock_fs(struct mapped_device
*md
)
996 if (test_and_set_bit(DMF_FS_LOCKED
, &md
->flags
))
999 md
->frozen_bdev
= bdget_disk(md
->disk
, 0);
1000 if (!md
->frozen_bdev
) {
1001 DMWARN("bdget failed in __lock_fs");
1005 WARN_ON(md
->frozen_sb
);
1007 md
->frozen_sb
= freeze_bdev(md
->frozen_bdev
);
1008 if (IS_ERR(md
->frozen_sb
)) {
1009 error
= PTR_ERR(md
->frozen_sb
);
1013 /* don't bdput right now, we don't want the bdev
1014 * to go away while it is locked. We'll bdput
1020 bdput(md
->frozen_bdev
);
1021 md
->frozen_sb
= NULL
;
1022 md
->frozen_bdev
= NULL
;
1024 clear_bit(DMF_FS_LOCKED
, &md
->flags
);
1028 static void __unlock_fs(struct mapped_device
*md
)
1030 if (!test_and_clear_bit(DMF_FS_LOCKED
, &md
->flags
))
1033 thaw_bdev(md
->frozen_bdev
, md
->frozen_sb
);
1034 bdput(md
->frozen_bdev
);
1036 md
->frozen_sb
= NULL
;
1037 md
->frozen_bdev
= NULL
;
1041 * We need to be able to change a mapping table under a mounted
1042 * filesystem. For example we might want to move some data in
1043 * the background. Before the table can be swapped with
1044 * dm_bind_table, dm_suspend must be called to flush any in
1045 * flight bios and ensure that any further io gets deferred.
1047 int dm_suspend(struct mapped_device
*md
)
1049 struct dm_table
*map
;
1050 DECLARE_WAITQUEUE(wait
, current
);
1051 int error
= -EINVAL
;
1053 /* Flush I/O to the device. */
1054 down_read(&md
->lock
);
1055 if (test_bit(DMF_BLOCK_IO
, &md
->flags
))
1056 goto out_read_unlock
;
1058 error
= __lock_fs(md
);
1060 goto out_read_unlock
;
1062 map
= dm_get_table(md
);
1064 dm_table_presuspend_targets(map
);
1069 * First we set the BLOCK_IO flag so no more ios will be mapped.
1071 * If the flag is already set we know another thread is trying to
1072 * suspend as well, so we leave the fs locked for this thread.
1075 down_write(&md
->lock
);
1076 if (test_and_set_bit(DMF_BLOCK_IO
, &md
->flags
)) {
1079 goto out_write_unlock
;
1082 add_wait_queue(&md
->wait
, &wait
);
1083 up_write(&md
->lock
);
1087 dm_table_unplug_all(map
);
1092 * Then we wait for the already mapped ios to
1096 set_current_state(TASK_INTERRUPTIBLE
);
1098 if (!atomic_read(&md
->pending
) || signal_pending(current
))
1103 set_current_state(TASK_RUNNING
);
1105 down_write(&md
->lock
);
1106 remove_wait_queue(&md
->wait
, &wait
);
1108 /* were we interrupted ? */
1110 if (atomic_read(&md
->pending
))
1113 set_bit(DMF_SUSPENDED
, &md
->flags
);
1115 map
= dm_get_table(md
);
1117 dm_table_postsuspend_targets(map
);
1119 up_write(&md
->lock
);
1124 /* FIXME Undo dm_table_presuspend_targets */
1126 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1128 up_write(&md
->lock
);
1136 int dm_resume(struct mapped_device
*md
)
1139 struct dm_table
*map
= dm_get_table(md
);
1141 down_write(&md
->lock
);
1143 !test_bit(DMF_SUSPENDED
, &md
->flags
) ||
1144 !dm_table_get_size(map
)) {
1145 up_write(&md
->lock
);
1150 dm_table_resume_targets(map
);
1151 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1152 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1154 def
= bio_list_get(&md
->deferred
);
1155 __flush_deferred_io(md
, def
);
1156 up_write(&md
->lock
);
1158 dm_table_unplug_all(map
);
1164 /*-----------------------------------------------------------------
1165 * Event notification.
1166 *---------------------------------------------------------------*/
1167 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1169 return atomic_read(&md
->event_nr
);
1172 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1174 return wait_event_interruptible(md
->eventq
,
1175 (event_nr
!= atomic_read(&md
->event_nr
)));
1179 * The gendisk is only valid as long as you have a reference
1182 struct gendisk
*dm_disk(struct mapped_device
*md
)
1187 int dm_suspended(struct mapped_device
*md
)
1189 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1192 static struct block_device_operations dm_blk_dops
= {
1193 .open
= dm_blk_open
,
1194 .release
= dm_blk_close
,
1195 .owner
= THIS_MODULE
1198 EXPORT_SYMBOL(dm_get_mapinfo
);
1203 module_init(dm_init
);
1204 module_exit(dm_exit
);
1206 module_param(major
, uint
, 0);
1207 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1208 MODULE_DESCRIPTION(DM_NAME
" driver");
1209 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1210 MODULE_LICENSE("GPL");