2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <asm/atomic.h>
22 #define DM_MSG_PREFIX "table"
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
30 * The table has always exactly one reference from either mapped_device->map
31 * or hash_cell->new_map. This reference is not counted in table->holders.
32 * A pair of dm_create_table/dm_destroy_table functions is used for table
33 * creation/destruction.
35 * Temporary references from the other code increase table->holders. A pair
36 * of dm_table_get/dm_table_put functions is used to manipulate it.
38 * When the table is about to be destroyed, we wait for table->holders to
43 struct mapped_device
*md
;
49 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
50 sector_t
*index
[MAX_DEPTH
];
52 unsigned int num_targets
;
53 unsigned int num_allocated
;
55 struct dm_target
*targets
;
57 unsigned discards_supported
:1;
58 unsigned integrity_supported
:1;
61 * Indicates the rw permissions for the new logical
62 * device. This should be a combination of FMODE_READ
67 /* a list of devices used by this table */
68 struct list_head devices
;
70 /* events get handed up using this callback */
71 void (*event_fn
)(void *);
74 struct dm_md_mempools
*mempools
;
76 struct list_head target_callbacks
;
80 * Similar to ceiling(log_size(n))
82 static unsigned int int_log(unsigned int n
, unsigned int base
)
87 n
= dm_div_up(n
, base
);
95 * Calculate the index of the child node of the n'th node k'th key.
97 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
99 return (n
* CHILDREN_PER_NODE
) + k
;
103 * Return the n'th node of level l from table t.
105 static inline sector_t
*get_node(struct dm_table
*t
,
106 unsigned int l
, unsigned int n
)
108 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
112 * Return the highest key that you could lookup from the n'th
113 * node on level l of the btree.
115 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
117 for (; l
< t
->depth
- 1; l
++)
118 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
120 if (n
>= t
->counts
[l
])
121 return (sector_t
) - 1;
123 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
127 * Fills in a level of the btree based on the highs of the level
130 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
135 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
136 node
= get_node(t
, l
, n
);
138 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
139 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
145 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
151 * Check that we're not going to overflow.
153 if (nmemb
> (ULONG_MAX
/ elem_size
))
156 size
= nmemb
* elem_size
;
157 addr
= vmalloc(size
);
159 memset(addr
, 0, size
);
165 * highs, and targets are managed as dynamic arrays during a
168 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
171 struct dm_target
*n_targets
;
172 int n
= t
->num_targets
;
175 * Allocate both the target array and offset array at once.
176 * Append an empty entry to catch sectors beyond the end of
179 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
184 n_targets
= (struct dm_target
*) (n_highs
+ num
);
187 memcpy(n_highs
, t
->highs
, sizeof(*n_highs
) * n
);
188 memcpy(n_targets
, t
->targets
, sizeof(*n_targets
) * n
);
191 memset(n_highs
+ n
, -1, sizeof(*n_highs
) * (num
- n
));
194 t
->num_allocated
= num
;
196 t
->targets
= n_targets
;
201 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
202 unsigned num_targets
, struct mapped_device
*md
)
204 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
209 INIT_LIST_HEAD(&t
->devices
);
210 INIT_LIST_HEAD(&t
->target_callbacks
);
211 atomic_set(&t
->holders
, 0);
212 t
->discards_supported
= 1;
215 num_targets
= KEYS_PER_NODE
;
217 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
219 if (alloc_targets(t
, num_targets
)) {
231 static void free_devices(struct list_head
*devices
)
233 struct list_head
*tmp
, *next
;
235 list_for_each_safe(tmp
, next
, devices
) {
236 struct dm_dev_internal
*dd
=
237 list_entry(tmp
, struct dm_dev_internal
, list
);
238 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
244 void dm_table_destroy(struct dm_table
*t
)
251 while (atomic_read(&t
->holders
))
255 /* free the indexes */
257 vfree(t
->index
[t
->depth
- 2]);
259 /* free the targets */
260 for (i
= 0; i
< t
->num_targets
; i
++) {
261 struct dm_target
*tgt
= t
->targets
+ i
;
266 dm_put_target_type(tgt
->type
);
271 /* free the device list */
272 if (t
->devices
.next
!= &t
->devices
)
273 free_devices(&t
->devices
);
275 dm_free_md_mempools(t
->mempools
);
280 void dm_table_get(struct dm_table
*t
)
282 atomic_inc(&t
->holders
);
285 void dm_table_put(struct dm_table
*t
)
290 smp_mb__before_atomic_dec();
291 atomic_dec(&t
->holders
);
295 * Checks to see if we need to extend highs or targets.
297 static inline int check_space(struct dm_table
*t
)
299 if (t
->num_targets
>= t
->num_allocated
)
300 return alloc_targets(t
, t
->num_allocated
* 2);
306 * See if we've already got a device in the list.
308 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
310 struct dm_dev_internal
*dd
;
312 list_for_each_entry (dd
, l
, list
)
313 if (dd
->dm_dev
.bdev
->bd_dev
== dev
)
320 * Open a device so we can use it as a map destination.
322 static int open_dev(struct dm_dev_internal
*d
, dev_t dev
,
323 struct mapped_device
*md
)
325 static char *_claim_ptr
= "I belong to device-mapper";
326 struct block_device
*bdev
;
330 BUG_ON(d
->dm_dev
.bdev
);
332 bdev
= blkdev_get_by_dev(dev
, d
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
334 return PTR_ERR(bdev
);
336 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
338 blkdev_put(bdev
, d
->dm_dev
.mode
| FMODE_EXCL
);
342 d
->dm_dev
.bdev
= bdev
;
347 * Close a device that we've been using.
349 static void close_dev(struct dm_dev_internal
*d
, struct mapped_device
*md
)
354 bd_unlink_disk_holder(d
->dm_dev
.bdev
, dm_disk(md
));
355 blkdev_put(d
->dm_dev
.bdev
, d
->dm_dev
.mode
| FMODE_EXCL
);
356 d
->dm_dev
.bdev
= NULL
;
360 * If possible, this checks an area of a destination device is invalid.
362 static int device_area_is_invalid(struct dm_target
*ti
, struct dm_dev
*dev
,
363 sector_t start
, sector_t len
, void *data
)
365 struct request_queue
*q
;
366 struct queue_limits
*limits
= data
;
367 struct block_device
*bdev
= dev
->bdev
;
369 i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
;
370 unsigned short logical_block_size_sectors
=
371 limits
->logical_block_size
>> SECTOR_SHIFT
;
372 char b
[BDEVNAME_SIZE
];
375 * Some devices exist without request functions,
376 * such as loop devices not yet bound to backing files.
377 * Forbid the use of such devices.
379 q
= bdev_get_queue(bdev
);
380 if (!q
|| !q
->make_request_fn
) {
381 DMWARN("%s: %s is not yet initialised: "
382 "start=%llu, len=%llu, dev_size=%llu",
383 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
384 (unsigned long long)start
,
385 (unsigned long long)len
,
386 (unsigned long long)dev_size
);
393 if ((start
>= dev_size
) || (start
+ len
> dev_size
)) {
394 DMWARN("%s: %s too small for target: "
395 "start=%llu, len=%llu, dev_size=%llu",
396 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
397 (unsigned long long)start
,
398 (unsigned long long)len
,
399 (unsigned long long)dev_size
);
403 if (logical_block_size_sectors
<= 1)
406 if (start
& (logical_block_size_sectors
- 1)) {
407 DMWARN("%s: start=%llu not aligned to h/w "
408 "logical block size %u of %s",
409 dm_device_name(ti
->table
->md
),
410 (unsigned long long)start
,
411 limits
->logical_block_size
, bdevname(bdev
, b
));
415 if (len
& (logical_block_size_sectors
- 1)) {
416 DMWARN("%s: len=%llu not aligned to h/w "
417 "logical block size %u of %s",
418 dm_device_name(ti
->table
->md
),
419 (unsigned long long)len
,
420 limits
->logical_block_size
, bdevname(bdev
, b
));
428 * This upgrades the mode on an already open dm_dev, being
429 * careful to leave things as they were if we fail to reopen the
430 * device and not to touch the existing bdev field in case
431 * it is accessed concurrently inside dm_table_any_congested().
433 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
434 struct mapped_device
*md
)
437 struct dm_dev_internal dd_new
, dd_old
;
439 dd_new
= dd_old
= *dd
;
441 dd_new
.dm_dev
.mode
|= new_mode
;
442 dd_new
.dm_dev
.bdev
= NULL
;
444 r
= open_dev(&dd_new
, dd
->dm_dev
.bdev
->bd_dev
, md
);
448 dd
->dm_dev
.mode
|= new_mode
;
449 close_dev(&dd_old
, md
);
455 * Add a device to the list, or just increment the usage count if
456 * it's already present.
458 static int __table_get_device(struct dm_table
*t
, struct dm_target
*ti
,
459 const char *path
, fmode_t mode
, struct dm_dev
**result
)
462 dev_t
uninitialized_var(dev
);
463 struct dm_dev_internal
*dd
;
464 unsigned int major
, minor
;
468 if (sscanf(path
, "%u:%u", &major
, &minor
) == 2) {
469 /* Extract the major/minor numbers */
470 dev
= MKDEV(major
, minor
);
471 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
474 /* convert the path to a device */
475 struct block_device
*bdev
= lookup_bdev(path
);
478 return PTR_ERR(bdev
);
483 dd
= find_device(&t
->devices
, dev
);
485 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
489 dd
->dm_dev
.mode
= mode
;
490 dd
->dm_dev
.bdev
= NULL
;
492 if ((r
= open_dev(dd
, dev
, t
->md
))) {
497 format_dev_t(dd
->dm_dev
.name
, dev
);
499 atomic_set(&dd
->count
, 0);
500 list_add(&dd
->list
, &t
->devices
);
502 } else if (dd
->dm_dev
.mode
!= (mode
| dd
->dm_dev
.mode
)) {
503 r
= upgrade_mode(dd
, mode
, t
->md
);
507 atomic_inc(&dd
->count
);
509 *result
= &dd
->dm_dev
;
513 int dm_set_device_limits(struct dm_target
*ti
, struct dm_dev
*dev
,
514 sector_t start
, sector_t len
, void *data
)
516 struct queue_limits
*limits
= data
;
517 struct block_device
*bdev
= dev
->bdev
;
518 struct request_queue
*q
= bdev_get_queue(bdev
);
519 char b
[BDEVNAME_SIZE
];
522 DMWARN("%s: Cannot set limits for nonexistent device %s",
523 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
527 if (bdev_stack_limits(limits
, bdev
, start
) < 0)
528 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
529 "physical_block_size=%u, logical_block_size=%u, "
530 "alignment_offset=%u, start=%llu",
531 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
532 q
->limits
.physical_block_size
,
533 q
->limits
.logical_block_size
,
534 q
->limits
.alignment_offset
,
535 (unsigned long long) start
<< SECTOR_SHIFT
);
538 * Check if merge fn is supported.
539 * If not we'll force DM to use PAGE_SIZE or
540 * smaller I/O, just to be safe.
543 if (q
->merge_bvec_fn
&& !ti
->type
->merge
)
544 blk_limits_max_hw_sectors(limits
,
545 (unsigned int) (PAGE_SIZE
>> 9));
548 EXPORT_SYMBOL_GPL(dm_set_device_limits
);
550 int dm_get_device(struct dm_target
*ti
, const char *path
, fmode_t mode
,
551 struct dm_dev
**result
)
553 return __table_get_device(ti
->table
, ti
, path
, mode
, result
);
558 * Decrement a devices use count and remove it if necessary.
560 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
562 struct dm_dev_internal
*dd
= container_of(d
, struct dm_dev_internal
,
565 if (atomic_dec_and_test(&dd
->count
)) {
566 close_dev(dd
, ti
->table
->md
);
573 * Checks to see if the target joins onto the end of the table.
575 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
577 struct dm_target
*prev
;
579 if (!table
->num_targets
)
582 prev
= &table
->targets
[table
->num_targets
- 1];
583 return (ti
->begin
== (prev
->begin
+ prev
->len
));
587 * Used to dynamically allocate the arg array.
589 static char **realloc_argv(unsigned *array_size
, char **old_argv
)
594 new_size
= *array_size
? *array_size
* 2 : 64;
595 argv
= kmalloc(new_size
* sizeof(*argv
), GFP_KERNEL
);
597 memcpy(argv
, old_argv
, *array_size
* sizeof(*argv
));
598 *array_size
= new_size
;
606 * Destructively splits up the argument list to pass to ctr.
608 int dm_split_args(int *argc
, char ***argvp
, char *input
)
610 char *start
, *end
= input
, *out
, **argv
= NULL
;
611 unsigned array_size
= 0;
620 argv
= realloc_argv(&array_size
, argv
);
625 /* Skip whitespace */
626 start
= skip_spaces(end
);
629 break; /* success, we hit the end */
631 /* 'out' is used to remove any back-quotes */
634 /* Everything apart from '\0' can be quoted */
635 if (*end
== '\\' && *(end
+ 1)) {
642 break; /* end of token */
647 /* have we already filled the array ? */
648 if ((*argc
+ 1) > array_size
) {
649 argv
= realloc_argv(&array_size
, argv
);
654 /* we know this is whitespace */
658 /* terminate the string and put it in the array */
669 * Impose necessary and sufficient conditions on a devices's table such
670 * that any incoming bio which respects its logical_block_size can be
671 * processed successfully. If it falls across the boundary between
672 * two or more targets, the size of each piece it gets split into must
673 * be compatible with the logical_block_size of the target processing it.
675 static int validate_hardware_logical_block_alignment(struct dm_table
*table
,
676 struct queue_limits
*limits
)
679 * This function uses arithmetic modulo the logical_block_size
680 * (in units of 512-byte sectors).
682 unsigned short device_logical_block_size_sects
=
683 limits
->logical_block_size
>> SECTOR_SHIFT
;
686 * Offset of the start of the next table entry, mod logical_block_size.
688 unsigned short next_target_start
= 0;
691 * Given an aligned bio that extends beyond the end of a
692 * target, how many sectors must the next target handle?
694 unsigned short remaining
= 0;
696 struct dm_target
*uninitialized_var(ti
);
697 struct queue_limits ti_limits
;
701 * Check each entry in the table in turn.
703 while (i
< dm_table_get_num_targets(table
)) {
704 ti
= dm_table_get_target(table
, i
++);
706 blk_set_default_limits(&ti_limits
);
708 /* combine all target devices' limits */
709 if (ti
->type
->iterate_devices
)
710 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
714 * If the remaining sectors fall entirely within this
715 * table entry are they compatible with its logical_block_size?
717 if (remaining
< ti
->len
&&
718 remaining
& ((ti_limits
.logical_block_size
>>
723 (unsigned short) ((next_target_start
+ ti
->len
) &
724 (device_logical_block_size_sects
- 1));
725 remaining
= next_target_start
?
726 device_logical_block_size_sects
- next_target_start
: 0;
730 DMWARN("%s: table line %u (start sect %llu len %llu) "
731 "not aligned to h/w logical block size %u",
732 dm_device_name(table
->md
), i
,
733 (unsigned long long) ti
->begin
,
734 (unsigned long long) ti
->len
,
735 limits
->logical_block_size
);
742 int dm_table_add_target(struct dm_table
*t
, const char *type
,
743 sector_t start
, sector_t len
, char *params
)
745 int r
= -EINVAL
, argc
;
747 struct dm_target
*tgt
;
749 if ((r
= check_space(t
)))
752 tgt
= t
->targets
+ t
->num_targets
;
753 memset(tgt
, 0, sizeof(*tgt
));
756 DMERR("%s: zero-length target", dm_device_name(t
->md
));
760 tgt
->type
= dm_get_target_type(type
);
762 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
),
770 tgt
->error
= "Unknown error";
773 * Does this target adjoin the previous one ?
775 if (!adjoin(t
, tgt
)) {
776 tgt
->error
= "Gap in table";
781 r
= dm_split_args(&argc
, &argv
, params
);
783 tgt
->error
= "couldn't split parameters (insufficient memory)";
787 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
792 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
794 if (!tgt
->num_discard_requests
)
795 t
->discards_supported
= 0;
800 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
801 dm_put_target_type(tgt
->type
);
805 static int dm_table_set_type(struct dm_table
*t
)
808 unsigned bio_based
= 0, request_based
= 0;
809 struct dm_target
*tgt
;
810 struct dm_dev_internal
*dd
;
811 struct list_head
*devices
;
813 for (i
= 0; i
< t
->num_targets
; i
++) {
814 tgt
= t
->targets
+ i
;
815 if (dm_target_request_based(tgt
))
820 if (bio_based
&& request_based
) {
821 DMWARN("Inconsistent table: different target types"
822 " can't be mixed up");
828 /* We must use this table as bio-based */
829 t
->type
= DM_TYPE_BIO_BASED
;
833 BUG_ON(!request_based
); /* No targets in this table */
835 /* Non-request-stackable devices can't be used for request-based dm */
836 devices
= dm_table_get_devices(t
);
837 list_for_each_entry(dd
, devices
, list
) {
838 if (!blk_queue_stackable(bdev_get_queue(dd
->dm_dev
.bdev
))) {
839 DMWARN("table load rejected: including"
840 " non-request-stackable devices");
846 * Request-based dm supports only tables that have a single target now.
847 * To support multiple targets, request splitting support is needed,
848 * and that needs lots of changes in the block-layer.
849 * (e.g. request completion process for partial completion.)
851 if (t
->num_targets
> 1) {
852 DMWARN("Request-based dm doesn't support multiple targets yet");
856 t
->type
= DM_TYPE_REQUEST_BASED
;
861 unsigned dm_table_get_type(struct dm_table
*t
)
866 bool dm_table_request_based(struct dm_table
*t
)
868 return dm_table_get_type(t
) == DM_TYPE_REQUEST_BASED
;
871 int dm_table_alloc_md_mempools(struct dm_table
*t
)
873 unsigned type
= dm_table_get_type(t
);
875 if (unlikely(type
== DM_TYPE_NONE
)) {
876 DMWARN("no table type is set, can't allocate mempools");
880 t
->mempools
= dm_alloc_md_mempools(type
, t
->integrity_supported
);
887 void dm_table_free_md_mempools(struct dm_table
*t
)
889 dm_free_md_mempools(t
->mempools
);
893 struct dm_md_mempools
*dm_table_get_md_mempools(struct dm_table
*t
)
898 static int setup_indexes(struct dm_table
*t
)
901 unsigned int total
= 0;
904 /* allocate the space for *all* the indexes */
905 for (i
= t
->depth
- 2; i
>= 0; i
--) {
906 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
907 total
+= t
->counts
[i
];
910 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
914 /* set up internal nodes, bottom-up */
915 for (i
= t
->depth
- 2; i
>= 0; i
--) {
916 t
->index
[i
] = indexes
;
917 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
918 setup_btree_index(i
, t
);
925 * Builds the btree to index the map.
927 static int dm_table_build_index(struct dm_table
*t
)
930 unsigned int leaf_nodes
;
932 /* how many indexes will the btree have ? */
933 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
934 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
936 /* leaf layer has already been set up */
937 t
->counts
[t
->depth
- 1] = leaf_nodes
;
938 t
->index
[t
->depth
- 1] = t
->highs
;
941 r
= setup_indexes(t
);
947 * Get a disk whose integrity profile reflects the table's profile.
948 * If %match_all is true, all devices' profiles must match.
949 * If %match_all is false, all devices must at least have an
950 * allocated integrity profile; but uninitialized is ok.
951 * Returns NULL if integrity support was inconsistent or unavailable.
953 static struct gendisk
* dm_table_get_integrity_disk(struct dm_table
*t
,
956 struct list_head
*devices
= dm_table_get_devices(t
);
957 struct dm_dev_internal
*dd
= NULL
;
958 struct gendisk
*prev_disk
= NULL
, *template_disk
= NULL
;
960 list_for_each_entry(dd
, devices
, list
) {
961 template_disk
= dd
->dm_dev
.bdev
->bd_disk
;
962 if (!blk_get_integrity(template_disk
))
964 if (!match_all
&& !blk_integrity_is_initialized(template_disk
))
965 continue; /* skip uninitialized profiles */
966 else if (prev_disk
&&
967 blk_integrity_compare(prev_disk
, template_disk
) < 0)
969 prev_disk
= template_disk
;
972 return template_disk
;
976 DMWARN("%s: integrity not set: %s and %s profile mismatch",
977 dm_device_name(t
->md
),
978 prev_disk
->disk_name
,
979 template_disk
->disk_name
);
984 * Register the mapped device for blk_integrity support if
985 * the underlying devices have an integrity profile. But all devices
986 * may not have matching profiles (checking all devices isn't reliable
987 * during table load because this table may use other DM device(s) which
988 * must be resumed before they will have an initialized integity profile).
989 * Stacked DM devices force a 2 stage integrity profile validation:
990 * 1 - during load, validate all initialized integrity profiles match
991 * 2 - during resume, validate all integrity profiles match
993 static int dm_table_prealloc_integrity(struct dm_table
*t
, struct mapped_device
*md
)
995 struct gendisk
*template_disk
= NULL
;
997 template_disk
= dm_table_get_integrity_disk(t
, false);
1001 if (!blk_integrity_is_initialized(dm_disk(md
))) {
1002 t
->integrity_supported
= 1;
1003 return blk_integrity_register(dm_disk(md
), NULL
);
1007 * If DM device already has an initalized integrity
1008 * profile the new profile should not conflict.
1010 if (blk_integrity_is_initialized(template_disk
) &&
1011 blk_integrity_compare(dm_disk(md
), template_disk
) < 0) {
1012 DMWARN("%s: conflict with existing integrity profile: "
1013 "%s profile mismatch",
1014 dm_device_name(t
->md
),
1015 template_disk
->disk_name
);
1019 /* Preserve existing initialized integrity profile */
1020 t
->integrity_supported
= 1;
1025 * Prepares the table for use by building the indices,
1026 * setting the type, and allocating mempools.
1028 int dm_table_complete(struct dm_table
*t
)
1032 r
= dm_table_set_type(t
);
1034 DMERR("unable to set table type");
1038 r
= dm_table_build_index(t
);
1040 DMERR("unable to build btrees");
1044 r
= dm_table_prealloc_integrity(t
, t
->md
);
1046 DMERR("could not register integrity profile.");
1050 r
= dm_table_alloc_md_mempools(t
);
1052 DMERR("unable to allocate mempools");
1057 static DEFINE_MUTEX(_event_lock
);
1058 void dm_table_event_callback(struct dm_table
*t
,
1059 void (*fn
)(void *), void *context
)
1061 mutex_lock(&_event_lock
);
1063 t
->event_context
= context
;
1064 mutex_unlock(&_event_lock
);
1067 void dm_table_event(struct dm_table
*t
)
1070 * You can no longer call dm_table_event() from interrupt
1071 * context, use a bottom half instead.
1073 BUG_ON(in_interrupt());
1075 mutex_lock(&_event_lock
);
1077 t
->event_fn(t
->event_context
);
1078 mutex_unlock(&_event_lock
);
1081 sector_t
dm_table_get_size(struct dm_table
*t
)
1083 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
1086 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
1088 if (index
>= t
->num_targets
)
1091 return t
->targets
+ index
;
1095 * Search the btree for the correct target.
1097 * Caller should check returned pointer with dm_target_is_valid()
1098 * to trap I/O beyond end of device.
1100 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
1102 unsigned int l
, n
= 0, k
= 0;
1105 for (l
= 0; l
< t
->depth
; l
++) {
1106 n
= get_child(n
, k
);
1107 node
= get_node(t
, l
, n
);
1109 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
1110 if (node
[k
] >= sector
)
1114 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
1118 * Establish the new table's queue_limits and validate them.
1120 int dm_calculate_queue_limits(struct dm_table
*table
,
1121 struct queue_limits
*limits
)
1123 struct dm_target
*uninitialized_var(ti
);
1124 struct queue_limits ti_limits
;
1127 blk_set_default_limits(limits
);
1129 while (i
< dm_table_get_num_targets(table
)) {
1130 blk_set_default_limits(&ti_limits
);
1132 ti
= dm_table_get_target(table
, i
++);
1134 if (!ti
->type
->iterate_devices
)
1135 goto combine_limits
;
1138 * Combine queue limits of all the devices this target uses.
1140 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
1143 /* Set I/O hints portion of queue limits */
1144 if (ti
->type
->io_hints
)
1145 ti
->type
->io_hints(ti
, &ti_limits
);
1148 * Check each device area is consistent with the target's
1149 * overall queue limits.
1151 if (ti
->type
->iterate_devices(ti
, device_area_is_invalid
,
1157 * Merge this target's queue limits into the overall limits
1160 if (blk_stack_limits(limits
, &ti_limits
, 0) < 0)
1161 DMWARN("%s: adding target device "
1162 "(start sect %llu len %llu) "
1163 "caused an alignment inconsistency",
1164 dm_device_name(table
->md
),
1165 (unsigned long long) ti
->begin
,
1166 (unsigned long long) ti
->len
);
1169 return validate_hardware_logical_block_alignment(table
, limits
);
1173 * Set the integrity profile for this device if all devices used have
1174 * matching profiles. We're quite deep in the resume path but still
1175 * don't know if all devices (particularly DM devices this device
1176 * may be stacked on) have matching profiles. Even if the profiles
1177 * don't match we have no way to fail (to resume) at this point.
1179 static void dm_table_set_integrity(struct dm_table
*t
)
1181 struct gendisk
*template_disk
= NULL
;
1183 if (!blk_get_integrity(dm_disk(t
->md
)))
1186 template_disk
= dm_table_get_integrity_disk(t
, true);
1188 blk_integrity_register(dm_disk(t
->md
),
1189 blk_get_integrity(template_disk
));
1190 else if (blk_integrity_is_initialized(dm_disk(t
->md
)))
1191 DMWARN("%s: device no longer has a valid integrity profile",
1192 dm_device_name(t
->md
));
1194 DMWARN("%s: unable to establish an integrity profile",
1195 dm_device_name(t
->md
));
1198 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
,
1199 struct queue_limits
*limits
)
1202 * Copy table's limits to the DM device's request_queue
1204 q
->limits
= *limits
;
1206 if (!dm_table_supports_discards(t
))
1207 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, q
);
1209 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
1211 dm_table_set_integrity(t
);
1214 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1215 * visible to other CPUs because, once the flag is set, incoming bios
1216 * are processed by request-based dm, which refers to the queue
1218 * Until the flag set, bios are passed to bio-based dm and queued to
1219 * md->deferred where queue settings are not needed yet.
1220 * Those bios are passed to request-based dm at the resume time.
1223 if (dm_table_request_based(t
))
1224 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE
, q
);
1227 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
1229 return t
->num_targets
;
1232 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
1237 fmode_t
dm_table_get_mode(struct dm_table
*t
)
1242 static void suspend_targets(struct dm_table
*t
, unsigned postsuspend
)
1244 int i
= t
->num_targets
;
1245 struct dm_target
*ti
= t
->targets
;
1249 if (ti
->type
->postsuspend
)
1250 ti
->type
->postsuspend(ti
);
1251 } else if (ti
->type
->presuspend
)
1252 ti
->type
->presuspend(ti
);
1258 void dm_table_presuspend_targets(struct dm_table
*t
)
1263 suspend_targets(t
, 0);
1266 void dm_table_postsuspend_targets(struct dm_table
*t
)
1271 suspend_targets(t
, 1);
1274 int dm_table_resume_targets(struct dm_table
*t
)
1278 for (i
= 0; i
< t
->num_targets
; i
++) {
1279 struct dm_target
*ti
= t
->targets
+ i
;
1281 if (!ti
->type
->preresume
)
1284 r
= ti
->type
->preresume(ti
);
1289 for (i
= 0; i
< t
->num_targets
; i
++) {
1290 struct dm_target
*ti
= t
->targets
+ i
;
1292 if (ti
->type
->resume
)
1293 ti
->type
->resume(ti
);
1299 void dm_table_add_target_callbacks(struct dm_table
*t
, struct dm_target_callbacks
*cb
)
1301 list_add(&cb
->list
, &t
->target_callbacks
);
1303 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks
);
1305 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
1307 struct dm_dev_internal
*dd
;
1308 struct list_head
*devices
= dm_table_get_devices(t
);
1309 struct dm_target_callbacks
*cb
;
1312 list_for_each_entry(dd
, devices
, list
) {
1313 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
.bdev
);
1314 char b
[BDEVNAME_SIZE
];
1317 r
|= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
1319 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1320 dm_device_name(t
->md
),
1321 bdevname(dd
->dm_dev
.bdev
, b
));
1324 list_for_each_entry(cb
, &t
->target_callbacks
, list
)
1325 if (cb
->congested_fn
)
1326 r
|= cb
->congested_fn(cb
, bdi_bits
);
1331 int dm_table_any_busy_target(struct dm_table
*t
)
1334 struct dm_target
*ti
;
1336 for (i
= 0; i
< t
->num_targets
; i
++) {
1337 ti
= t
->targets
+ i
;
1338 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1345 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
1350 static int device_discard_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1351 sector_t start
, sector_t len
, void *data
)
1353 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1355 return q
&& blk_queue_discard(q
);
1358 bool dm_table_supports_discards(struct dm_table
*t
)
1360 struct dm_target
*ti
;
1363 if (!t
->discards_supported
)
1367 * Unless any target used by the table set discards_supported,
1368 * require at least one underlying device to support discards.
1369 * t->devices includes internal dm devices such as mirror logs
1370 * so we need to use iterate_devices here, which targets
1371 * supporting discard must provide.
1373 while (i
< dm_table_get_num_targets(t
)) {
1374 ti
= dm_table_get_target(t
, i
++);
1376 if (ti
->discards_supported
)
1379 if (ti
->type
->iterate_devices
&&
1380 ti
->type
->iterate_devices(ti
, device_discard_capable
, NULL
))
1387 EXPORT_SYMBOL(dm_vcalloc
);
1388 EXPORT_SYMBOL(dm_get_device
);
1389 EXPORT_SYMBOL(dm_put_device
);
1390 EXPORT_SYMBOL(dm_table_event
);
1391 EXPORT_SYMBOL(dm_table_get_size
);
1392 EXPORT_SYMBOL(dm_table_get_mode
);
1393 EXPORT_SYMBOL(dm_table_get_md
);
1394 EXPORT_SYMBOL(dm_table_put
);
1395 EXPORT_SYMBOL(dm_table_get
);