2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 40
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
90 #define THIN_MAX_CONCURRENT_LOCKS 6
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
96 * Little endian on-disk superblock and device details.
98 struct thin_disk_superblock
{
99 __le32 csum
; /* Checksum of superblock except for this field. */
101 __le64 blocknr
; /* This block number, dm_block_t. */
111 * Root held by userspace transactions.
115 __u8 data_space_map_root
[SPACE_MAP_ROOT_SIZE
];
116 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
121 __le64 data_mapping_root
;
124 * Device detail root mapping dev_id -> device_details
126 __le64 device_details_root
;
128 __le32 data_block_size
; /* In 512-byte sectors. */
130 __le32 metadata_block_size
; /* In 512-byte sectors. */
131 __le64 metadata_nr_blocks
;
134 __le32 compat_ro_flags
;
135 __le32 incompat_flags
;
138 struct disk_device_details
{
139 __le64 mapped_blocks
;
140 __le64 transaction_id
; /* When created. */
141 __le32 creation_time
;
142 __le32 snapshotted_time
;
145 struct dm_pool_metadata
{
146 struct hlist_node hash
;
148 struct block_device
*bdev
;
149 struct dm_block_manager
*bm
;
150 struct dm_space_map
*metadata_sm
;
151 struct dm_space_map
*data_sm
;
152 struct dm_transaction_manager
*tm
;
153 struct dm_transaction_manager
*nb_tm
;
157 * First level holds thin_dev_t.
158 * Second level holds mappings.
160 struct dm_btree_info info
;
163 * Non-blocking version of the above.
165 struct dm_btree_info nb_info
;
168 * Just the top level for deleting whole devices.
170 struct dm_btree_info tl_info
;
173 * Just the bottom level for creating new devices.
175 struct dm_btree_info bl_info
;
178 * Describes the device details btree.
180 struct dm_btree_info details_info
;
182 struct rw_semaphore root_lock
;
185 dm_block_t details_root
;
186 struct list_head thin_devices
;
189 sector_t data_block_size
;
192 * Pre-commit callback.
194 * This allows the thin provisioning target to run a callback before
195 * the metadata are committed.
197 dm_pool_pre_commit_fn pre_commit_fn
;
198 void *pre_commit_context
;
201 * We reserve a section of the metadata for commit overhead.
202 * All reported space does *not* include this.
204 dm_block_t metadata_reserve
;
207 * Set if a transaction has to be aborted but the attempt to roll back
208 * to the previous (good) transaction failed. The only pool metadata
209 * operation possible in this state is the closing of the device.
214 * Set once a thin-pool has been accessed through one of the interfaces
215 * that imply the pool is in-service (e.g. thin devices created/deleted,
216 * thin-pool message, metadata snapshots, etc).
221 * Reading the space map roots can fail, so we read it into these
222 * buffers before the superblock is locked and updated.
224 __u8 data_space_map_root
[SPACE_MAP_ROOT_SIZE
];
225 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
228 struct dm_thin_device
{
229 struct list_head list
;
230 struct dm_pool_metadata
*pmd
;
235 bool aborted_with_changes
:1;
236 uint64_t mapped_blocks
;
237 uint64_t transaction_id
;
238 uint32_t creation_time
;
239 uint32_t snapshotted_time
;
242 /*----------------------------------------------------------------
243 * superblock validator
244 *--------------------------------------------------------------*/
246 #define SUPERBLOCK_CSUM_XOR 160774
248 static void sb_prepare_for_write(struct dm_block_validator
*v
,
252 struct thin_disk_superblock
*disk_super
= dm_block_data(b
);
254 disk_super
->blocknr
= cpu_to_le64(dm_block_location(b
));
255 disk_super
->csum
= cpu_to_le32(dm_bm_checksum(&disk_super
->flags
,
256 block_size
- sizeof(__le32
),
257 SUPERBLOCK_CSUM_XOR
));
260 static int sb_check(struct dm_block_validator
*v
,
264 struct thin_disk_superblock
*disk_super
= dm_block_data(b
);
267 if (dm_block_location(b
) != le64_to_cpu(disk_super
->blocknr
)) {
268 DMERR("sb_check failed: blocknr %llu: "
269 "wanted %llu", le64_to_cpu(disk_super
->blocknr
),
270 (unsigned long long)dm_block_location(b
));
274 if (le64_to_cpu(disk_super
->magic
) != THIN_SUPERBLOCK_MAGIC
) {
275 DMERR("sb_check failed: magic %llu: "
276 "wanted %llu", le64_to_cpu(disk_super
->magic
),
277 (unsigned long long)THIN_SUPERBLOCK_MAGIC
);
281 csum_le
= cpu_to_le32(dm_bm_checksum(&disk_super
->flags
,
282 block_size
- sizeof(__le32
),
283 SUPERBLOCK_CSUM_XOR
));
284 if (csum_le
!= disk_super
->csum
) {
285 DMERR("sb_check failed: csum %u: wanted %u",
286 le32_to_cpu(csum_le
), le32_to_cpu(disk_super
->csum
));
293 static struct dm_block_validator sb_validator
= {
294 .name
= "superblock",
295 .prepare_for_write
= sb_prepare_for_write
,
299 /*----------------------------------------------------------------
300 * Methods for the btree value types
301 *--------------------------------------------------------------*/
303 static uint64_t pack_block_time(dm_block_t b
, uint32_t t
)
305 return (b
<< 24) | t
;
308 static void unpack_block_time(uint64_t v
, dm_block_t
*b
, uint32_t *t
)
311 *t
= v
& ((1 << 24) - 1);
314 static void data_block_inc(void *context
, const void *value_le
)
316 struct dm_space_map
*sm
= context
;
321 memcpy(&v_le
, value_le
, sizeof(v_le
));
322 unpack_block_time(le64_to_cpu(v_le
), &b
, &t
);
323 dm_sm_inc_block(sm
, b
);
326 static void data_block_dec(void *context
, const void *value_le
)
328 struct dm_space_map
*sm
= context
;
333 memcpy(&v_le
, value_le
, sizeof(v_le
));
334 unpack_block_time(le64_to_cpu(v_le
), &b
, &t
);
335 dm_sm_dec_block(sm
, b
);
338 static int data_block_equal(void *context
, const void *value1_le
, const void *value2_le
)
344 memcpy(&v1_le
, value1_le
, sizeof(v1_le
));
345 memcpy(&v2_le
, value2_le
, sizeof(v2_le
));
346 unpack_block_time(le64_to_cpu(v1_le
), &b1
, &t
);
347 unpack_block_time(le64_to_cpu(v2_le
), &b2
, &t
);
352 static void subtree_inc(void *context
, const void *value
)
354 struct dm_btree_info
*info
= context
;
358 memcpy(&root_le
, value
, sizeof(root_le
));
359 root
= le64_to_cpu(root_le
);
360 dm_tm_inc(info
->tm
, root
);
363 static void subtree_dec(void *context
, const void *value
)
365 struct dm_btree_info
*info
= context
;
369 memcpy(&root_le
, value
, sizeof(root_le
));
370 root
= le64_to_cpu(root_le
);
371 if (dm_btree_del(info
, root
))
372 DMERR("btree delete failed");
375 static int subtree_equal(void *context
, const void *value1_le
, const void *value2_le
)
378 memcpy(&v1_le
, value1_le
, sizeof(v1_le
));
379 memcpy(&v2_le
, value2_le
, sizeof(v2_le
));
381 return v1_le
== v2_le
;
384 /*----------------------------------------------------------------*/
387 * Variant that is used for in-core only changes or code that
388 * shouldn't put the pool in service on its own (e.g. commit).
390 static inline void pmd_write_lock_in_core(struct dm_pool_metadata
*pmd
)
391 __acquires(pmd
->root_lock
)
393 down_write(&pmd
->root_lock
);
396 static inline void pmd_write_lock(struct dm_pool_metadata
*pmd
)
398 pmd_write_lock_in_core(pmd
);
399 if (unlikely(!pmd
->in_service
))
400 pmd
->in_service
= true;
403 static inline void pmd_write_unlock(struct dm_pool_metadata
*pmd
)
404 __releases(pmd
->root_lock
)
406 up_write(&pmd
->root_lock
);
409 /*----------------------------------------------------------------*/
411 static int superblock_lock_zero(struct dm_pool_metadata
*pmd
,
412 struct dm_block
**sblock
)
414 return dm_bm_write_lock_zero(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
415 &sb_validator
, sblock
);
418 static int superblock_lock(struct dm_pool_metadata
*pmd
,
419 struct dm_block
**sblock
)
421 return dm_bm_write_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
422 &sb_validator
, sblock
);
425 static int __superblock_all_zeroes(struct dm_block_manager
*bm
, int *result
)
430 __le64
*data_le
, zero
= cpu_to_le64(0);
431 unsigned block_size
= dm_bm_block_size(bm
) / sizeof(__le64
);
434 * We can't use a validator here - it may be all zeroes.
436 r
= dm_bm_read_lock(bm
, THIN_SUPERBLOCK_LOCATION
, NULL
, &b
);
440 data_le
= dm_block_data(b
);
442 for (i
= 0; i
< block_size
; i
++) {
443 if (data_le
[i
] != zero
) {
454 static void __setup_btree_details(struct dm_pool_metadata
*pmd
)
456 pmd
->info
.tm
= pmd
->tm
;
457 pmd
->info
.levels
= 2;
458 pmd
->info
.value_type
.context
= pmd
->data_sm
;
459 pmd
->info
.value_type
.size
= sizeof(__le64
);
460 pmd
->info
.value_type
.inc
= data_block_inc
;
461 pmd
->info
.value_type
.dec
= data_block_dec
;
462 pmd
->info
.value_type
.equal
= data_block_equal
;
464 memcpy(&pmd
->nb_info
, &pmd
->info
, sizeof(pmd
->nb_info
));
465 pmd
->nb_info
.tm
= pmd
->nb_tm
;
467 pmd
->tl_info
.tm
= pmd
->tm
;
468 pmd
->tl_info
.levels
= 1;
469 pmd
->tl_info
.value_type
.context
= &pmd
->bl_info
;
470 pmd
->tl_info
.value_type
.size
= sizeof(__le64
);
471 pmd
->tl_info
.value_type
.inc
= subtree_inc
;
472 pmd
->tl_info
.value_type
.dec
= subtree_dec
;
473 pmd
->tl_info
.value_type
.equal
= subtree_equal
;
475 pmd
->bl_info
.tm
= pmd
->tm
;
476 pmd
->bl_info
.levels
= 1;
477 pmd
->bl_info
.value_type
.context
= pmd
->data_sm
;
478 pmd
->bl_info
.value_type
.size
= sizeof(__le64
);
479 pmd
->bl_info
.value_type
.inc
= data_block_inc
;
480 pmd
->bl_info
.value_type
.dec
= data_block_dec
;
481 pmd
->bl_info
.value_type
.equal
= data_block_equal
;
483 pmd
->details_info
.tm
= pmd
->tm
;
484 pmd
->details_info
.levels
= 1;
485 pmd
->details_info
.value_type
.context
= NULL
;
486 pmd
->details_info
.value_type
.size
= sizeof(struct disk_device_details
);
487 pmd
->details_info
.value_type
.inc
= NULL
;
488 pmd
->details_info
.value_type
.dec
= NULL
;
489 pmd
->details_info
.value_type
.equal
= NULL
;
492 static int save_sm_roots(struct dm_pool_metadata
*pmd
)
497 r
= dm_sm_root_size(pmd
->metadata_sm
, &len
);
501 r
= dm_sm_copy_root(pmd
->metadata_sm
, &pmd
->metadata_space_map_root
, len
);
505 r
= dm_sm_root_size(pmd
->data_sm
, &len
);
509 return dm_sm_copy_root(pmd
->data_sm
, &pmd
->data_space_map_root
, len
);
512 static void copy_sm_roots(struct dm_pool_metadata
*pmd
,
513 struct thin_disk_superblock
*disk
)
515 memcpy(&disk
->metadata_space_map_root
,
516 &pmd
->metadata_space_map_root
,
517 sizeof(pmd
->metadata_space_map_root
));
519 memcpy(&disk
->data_space_map_root
,
520 &pmd
->data_space_map_root
,
521 sizeof(pmd
->data_space_map_root
));
524 static int __write_initial_superblock(struct dm_pool_metadata
*pmd
)
527 struct dm_block
*sblock
;
528 struct thin_disk_superblock
*disk_super
;
529 sector_t bdev_size
= i_size_read(pmd
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
531 if (bdev_size
> THIN_METADATA_MAX_SECTORS
)
532 bdev_size
= THIN_METADATA_MAX_SECTORS
;
534 r
= dm_sm_commit(pmd
->data_sm
);
538 r
= dm_tm_pre_commit(pmd
->tm
);
542 r
= save_sm_roots(pmd
);
546 r
= superblock_lock_zero(pmd
, &sblock
);
550 disk_super
= dm_block_data(sblock
);
551 disk_super
->flags
= 0;
552 memset(disk_super
->uuid
, 0, sizeof(disk_super
->uuid
));
553 disk_super
->magic
= cpu_to_le64(THIN_SUPERBLOCK_MAGIC
);
554 disk_super
->version
= cpu_to_le32(THIN_VERSION
);
555 disk_super
->time
= 0;
556 disk_super
->trans_id
= 0;
557 disk_super
->held_root
= 0;
559 copy_sm_roots(pmd
, disk_super
);
561 disk_super
->data_mapping_root
= cpu_to_le64(pmd
->root
);
562 disk_super
->device_details_root
= cpu_to_le64(pmd
->details_root
);
563 disk_super
->metadata_block_size
= cpu_to_le32(THIN_METADATA_BLOCK_SIZE
);
564 disk_super
->metadata_nr_blocks
= cpu_to_le64(bdev_size
>> SECTOR_TO_BLOCK_SHIFT
);
565 disk_super
->data_block_size
= cpu_to_le32(pmd
->data_block_size
);
567 return dm_tm_commit(pmd
->tm
, sblock
);
570 static int __format_metadata(struct dm_pool_metadata
*pmd
)
574 r
= dm_tm_create_with_sm(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
575 &pmd
->tm
, &pmd
->metadata_sm
);
577 DMERR("tm_create_with_sm failed");
581 pmd
->data_sm
= dm_sm_disk_create(pmd
->tm
, 0);
582 if (IS_ERR(pmd
->data_sm
)) {
583 DMERR("sm_disk_create failed");
584 r
= PTR_ERR(pmd
->data_sm
);
588 pmd
->nb_tm
= dm_tm_create_non_blocking_clone(pmd
->tm
);
590 DMERR("could not create non-blocking clone tm");
592 goto bad_cleanup_data_sm
;
595 __setup_btree_details(pmd
);
597 r
= dm_btree_empty(&pmd
->info
, &pmd
->root
);
599 goto bad_cleanup_nb_tm
;
601 r
= dm_btree_empty(&pmd
->details_info
, &pmd
->details_root
);
603 DMERR("couldn't create devices root");
604 goto bad_cleanup_nb_tm
;
607 r
= __write_initial_superblock(pmd
);
609 goto bad_cleanup_nb_tm
;
614 dm_tm_destroy(pmd
->nb_tm
);
616 dm_sm_destroy(pmd
->data_sm
);
618 dm_tm_destroy(pmd
->tm
);
619 dm_sm_destroy(pmd
->metadata_sm
);
624 static int __check_incompat_features(struct thin_disk_superblock
*disk_super
,
625 struct dm_pool_metadata
*pmd
)
629 features
= le32_to_cpu(disk_super
->incompat_flags
) & ~THIN_FEATURE_INCOMPAT_SUPP
;
631 DMERR("could not access metadata due to unsupported optional features (%lx).",
632 (unsigned long)features
);
637 * Check for read-only metadata to skip the following RDWR checks.
639 if (get_disk_ro(pmd
->bdev
->bd_disk
))
642 features
= le32_to_cpu(disk_super
->compat_ro_flags
) & ~THIN_FEATURE_COMPAT_RO_SUPP
;
644 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
645 (unsigned long)features
);
652 static int __open_metadata(struct dm_pool_metadata
*pmd
)
655 struct dm_block
*sblock
;
656 struct thin_disk_superblock
*disk_super
;
658 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
659 &sb_validator
, &sblock
);
661 DMERR("couldn't read superblock");
665 disk_super
= dm_block_data(sblock
);
667 /* Verify the data block size hasn't changed */
668 if (le32_to_cpu(disk_super
->data_block_size
) != pmd
->data_block_size
) {
669 DMERR("changing the data block size (from %u to %llu) is not supported",
670 le32_to_cpu(disk_super
->data_block_size
),
671 (unsigned long long)pmd
->data_block_size
);
673 goto bad_unlock_sblock
;
676 r
= __check_incompat_features(disk_super
, pmd
);
678 goto bad_unlock_sblock
;
680 r
= dm_tm_open_with_sm(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
681 disk_super
->metadata_space_map_root
,
682 sizeof(disk_super
->metadata_space_map_root
),
683 &pmd
->tm
, &pmd
->metadata_sm
);
685 DMERR("tm_open_with_sm failed");
686 goto bad_unlock_sblock
;
689 pmd
->data_sm
= dm_sm_disk_open(pmd
->tm
, disk_super
->data_space_map_root
,
690 sizeof(disk_super
->data_space_map_root
));
691 if (IS_ERR(pmd
->data_sm
)) {
692 DMERR("sm_disk_open failed");
693 r
= PTR_ERR(pmd
->data_sm
);
697 pmd
->nb_tm
= dm_tm_create_non_blocking_clone(pmd
->tm
);
699 DMERR("could not create non-blocking clone tm");
701 goto bad_cleanup_data_sm
;
704 __setup_btree_details(pmd
);
705 dm_bm_unlock(sblock
);
710 dm_sm_destroy(pmd
->data_sm
);
712 dm_tm_destroy(pmd
->tm
);
713 dm_sm_destroy(pmd
->metadata_sm
);
715 dm_bm_unlock(sblock
);
720 static int __open_or_format_metadata(struct dm_pool_metadata
*pmd
, bool format_device
)
724 r
= __superblock_all_zeroes(pmd
->bm
, &unformatted
);
729 return format_device
? __format_metadata(pmd
) : -EPERM
;
731 return __open_metadata(pmd
);
734 static int __create_persistent_data_objects(struct dm_pool_metadata
*pmd
, bool format_device
)
738 pmd
->bm
= dm_block_manager_create(pmd
->bdev
, THIN_METADATA_BLOCK_SIZE
<< SECTOR_SHIFT
,
739 THIN_MAX_CONCURRENT_LOCKS
);
740 if (IS_ERR(pmd
->bm
)) {
741 DMERR("could not create block manager");
742 r
= PTR_ERR(pmd
->bm
);
747 r
= __open_or_format_metadata(pmd
, format_device
);
749 dm_block_manager_destroy(pmd
->bm
);
756 static void __destroy_persistent_data_objects(struct dm_pool_metadata
*pmd
)
758 dm_sm_destroy(pmd
->data_sm
);
759 dm_sm_destroy(pmd
->metadata_sm
);
760 dm_tm_destroy(pmd
->nb_tm
);
761 dm_tm_destroy(pmd
->tm
);
762 dm_block_manager_destroy(pmd
->bm
);
765 static int __begin_transaction(struct dm_pool_metadata
*pmd
)
768 struct thin_disk_superblock
*disk_super
;
769 struct dm_block
*sblock
;
772 * We re-read the superblock every time. Shouldn't need to do this
775 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
776 &sb_validator
, &sblock
);
780 disk_super
= dm_block_data(sblock
);
781 pmd
->time
= le32_to_cpu(disk_super
->time
);
782 pmd
->root
= le64_to_cpu(disk_super
->data_mapping_root
);
783 pmd
->details_root
= le64_to_cpu(disk_super
->device_details_root
);
784 pmd
->trans_id
= le64_to_cpu(disk_super
->trans_id
);
785 pmd
->flags
= le32_to_cpu(disk_super
->flags
);
786 pmd
->data_block_size
= le32_to_cpu(disk_super
->data_block_size
);
788 dm_bm_unlock(sblock
);
792 static int __write_changed_details(struct dm_pool_metadata
*pmd
)
795 struct dm_thin_device
*td
, *tmp
;
796 struct disk_device_details details
;
799 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
805 details
.mapped_blocks
= cpu_to_le64(td
->mapped_blocks
);
806 details
.transaction_id
= cpu_to_le64(td
->transaction_id
);
807 details
.creation_time
= cpu_to_le32(td
->creation_time
);
808 details
.snapshotted_time
= cpu_to_le32(td
->snapshotted_time
);
809 __dm_bless_for_disk(&details
);
811 r
= dm_btree_insert(&pmd
->details_info
, pmd
->details_root
,
812 &key
, &details
, &pmd
->details_root
);
827 static int __commit_transaction(struct dm_pool_metadata
*pmd
)
830 struct thin_disk_superblock
*disk_super
;
831 struct dm_block
*sblock
;
834 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
836 BUILD_BUG_ON(sizeof(struct thin_disk_superblock
) > 512);
837 BUG_ON(!rwsem_is_locked(&pmd
->root_lock
));
839 if (unlikely(!pmd
->in_service
))
842 if (pmd
->pre_commit_fn
) {
843 r
= pmd
->pre_commit_fn(pmd
->pre_commit_context
);
845 DMERR("pre-commit callback failed");
850 r
= __write_changed_details(pmd
);
854 r
= dm_sm_commit(pmd
->data_sm
);
858 r
= dm_tm_pre_commit(pmd
->tm
);
862 r
= save_sm_roots(pmd
);
866 r
= superblock_lock(pmd
, &sblock
);
870 disk_super
= dm_block_data(sblock
);
871 disk_super
->time
= cpu_to_le32(pmd
->time
);
872 disk_super
->data_mapping_root
= cpu_to_le64(pmd
->root
);
873 disk_super
->device_details_root
= cpu_to_le64(pmd
->details_root
);
874 disk_super
->trans_id
= cpu_to_le64(pmd
->trans_id
);
875 disk_super
->flags
= cpu_to_le32(pmd
->flags
);
877 copy_sm_roots(pmd
, disk_super
);
879 return dm_tm_commit(pmd
->tm
, sblock
);
882 static void __set_metadata_reserve(struct dm_pool_metadata
*pmd
)
886 dm_block_t max_blocks
= 4096; /* 16M */
888 r
= dm_sm_get_nr_blocks(pmd
->metadata_sm
, &total
);
890 DMERR("could not get size of metadata device");
891 pmd
->metadata_reserve
= max_blocks
;
893 pmd
->metadata_reserve
= min(max_blocks
, div_u64(total
, 10));
896 struct dm_pool_metadata
*dm_pool_metadata_open(struct block_device
*bdev
,
897 sector_t data_block_size
,
901 struct dm_pool_metadata
*pmd
;
903 pmd
= kmalloc(sizeof(*pmd
), GFP_KERNEL
);
905 DMERR("could not allocate metadata struct");
906 return ERR_PTR(-ENOMEM
);
909 init_rwsem(&pmd
->root_lock
);
911 INIT_LIST_HEAD(&pmd
->thin_devices
);
912 pmd
->fail_io
= false;
913 pmd
->in_service
= false;
915 pmd
->data_block_size
= data_block_size
;
916 pmd
->pre_commit_fn
= NULL
;
917 pmd
->pre_commit_context
= NULL
;
919 r
= __create_persistent_data_objects(pmd
, format_device
);
925 r
= __begin_transaction(pmd
);
927 if (dm_pool_metadata_close(pmd
) < 0)
928 DMWARN("%s: dm_pool_metadata_close() failed.", __func__
);
932 __set_metadata_reserve(pmd
);
937 int dm_pool_metadata_close(struct dm_pool_metadata
*pmd
)
940 unsigned open_devices
= 0;
941 struct dm_thin_device
*td
, *tmp
;
943 down_read(&pmd
->root_lock
);
944 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
952 up_read(&pmd
->root_lock
);
955 DMERR("attempt to close pmd when %u device(s) are still open",
960 pmd_write_lock_in_core(pmd
);
961 if (!pmd
->fail_io
&& !dm_bm_is_read_only(pmd
->bm
)) {
962 r
= __commit_transaction(pmd
);
964 DMWARN("%s: __commit_transaction() failed, error = %d",
967 pmd_write_unlock(pmd
);
969 __destroy_persistent_data_objects(pmd
);
976 * __open_device: Returns @td corresponding to device with id @dev,
977 * creating it if @create is set and incrementing @td->open_count.
978 * On failure, @td is undefined.
980 static int __open_device(struct dm_pool_metadata
*pmd
,
981 dm_thin_id dev
, int create
,
982 struct dm_thin_device
**td
)
985 struct dm_thin_device
*td2
;
987 struct disk_device_details details_le
;
990 * If the device is already open, return it.
992 list_for_each_entry(td2
, &pmd
->thin_devices
, list
)
993 if (td2
->id
== dev
) {
995 * May not create an already-open device.
1006 * Check the device exists.
1008 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
1011 if (r
!= -ENODATA
|| !create
)
1015 * Create new device.
1018 details_le
.mapped_blocks
= 0;
1019 details_le
.transaction_id
= cpu_to_le64(pmd
->trans_id
);
1020 details_le
.creation_time
= cpu_to_le32(pmd
->time
);
1021 details_le
.snapshotted_time
= cpu_to_le32(pmd
->time
);
1024 *td
= kmalloc(sizeof(**td
), GFP_NOIO
);
1030 (*td
)->open_count
= 1;
1031 (*td
)->changed
= changed
;
1032 (*td
)->aborted_with_changes
= false;
1033 (*td
)->mapped_blocks
= le64_to_cpu(details_le
.mapped_blocks
);
1034 (*td
)->transaction_id
= le64_to_cpu(details_le
.transaction_id
);
1035 (*td
)->creation_time
= le32_to_cpu(details_le
.creation_time
);
1036 (*td
)->snapshotted_time
= le32_to_cpu(details_le
.snapshotted_time
);
1038 list_add(&(*td
)->list
, &pmd
->thin_devices
);
1043 static void __close_device(struct dm_thin_device
*td
)
1048 static int __create_thin(struct dm_pool_metadata
*pmd
,
1052 dm_block_t dev_root
;
1054 struct dm_thin_device
*td
;
1057 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
1063 * Create an empty btree for the mappings.
1065 r
= dm_btree_empty(&pmd
->bl_info
, &dev_root
);
1070 * Insert it into the main mapping tree.
1072 value
= cpu_to_le64(dev_root
);
1073 __dm_bless_for_disk(&value
);
1074 r
= dm_btree_insert(&pmd
->tl_info
, pmd
->root
, &key
, &value
, &pmd
->root
);
1076 dm_btree_del(&pmd
->bl_info
, dev_root
);
1080 r
= __open_device(pmd
, dev
, 1, &td
);
1082 dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1083 dm_btree_del(&pmd
->bl_info
, dev_root
);
1091 int dm_pool_create_thin(struct dm_pool_metadata
*pmd
, dm_thin_id dev
)
1095 pmd_write_lock(pmd
);
1097 r
= __create_thin(pmd
, dev
);
1098 pmd_write_unlock(pmd
);
1103 static int __set_snapshot_details(struct dm_pool_metadata
*pmd
,
1104 struct dm_thin_device
*snap
,
1105 dm_thin_id origin
, uint32_t time
)
1108 struct dm_thin_device
*td
;
1110 r
= __open_device(pmd
, origin
, 0, &td
);
1115 td
->snapshotted_time
= time
;
1117 snap
->mapped_blocks
= td
->mapped_blocks
;
1118 snap
->snapshotted_time
= time
;
1124 static int __create_snap(struct dm_pool_metadata
*pmd
,
1125 dm_thin_id dev
, dm_thin_id origin
)
1128 dm_block_t origin_root
;
1129 uint64_t key
= origin
, dev_key
= dev
;
1130 struct dm_thin_device
*td
;
1133 /* check this device is unused */
1134 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
1139 /* find the mapping tree for the origin */
1140 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, &key
, &value
);
1143 origin_root
= le64_to_cpu(value
);
1145 /* clone the origin, an inc will do */
1146 dm_tm_inc(pmd
->tm
, origin_root
);
1148 /* insert into the main mapping tree */
1149 value
= cpu_to_le64(origin_root
);
1150 __dm_bless_for_disk(&value
);
1152 r
= dm_btree_insert(&pmd
->tl_info
, pmd
->root
, &key
, &value
, &pmd
->root
);
1154 dm_tm_dec(pmd
->tm
, origin_root
);
1160 r
= __open_device(pmd
, dev
, 1, &td
);
1164 r
= __set_snapshot_details(pmd
, td
, origin
, pmd
->time
);
1173 dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1174 dm_btree_remove(&pmd
->details_info
, pmd
->details_root
,
1175 &key
, &pmd
->details_root
);
1179 int dm_pool_create_snap(struct dm_pool_metadata
*pmd
,
1185 pmd_write_lock(pmd
);
1187 r
= __create_snap(pmd
, dev
, origin
);
1188 pmd_write_unlock(pmd
);
1193 static int __delete_device(struct dm_pool_metadata
*pmd
, dm_thin_id dev
)
1197 struct dm_thin_device
*td
;
1199 /* TODO: failure should mark the transaction invalid */
1200 r
= __open_device(pmd
, dev
, 0, &td
);
1204 if (td
->open_count
> 1) {
1209 list_del(&td
->list
);
1211 r
= dm_btree_remove(&pmd
->details_info
, pmd
->details_root
,
1212 &key
, &pmd
->details_root
);
1216 r
= dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1223 int dm_pool_delete_thin_device(struct dm_pool_metadata
*pmd
,
1228 pmd_write_lock(pmd
);
1230 r
= __delete_device(pmd
, dev
);
1231 pmd_write_unlock(pmd
);
1236 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata
*pmd
,
1237 uint64_t current_id
,
1242 pmd_write_lock(pmd
);
1247 if (pmd
->trans_id
!= current_id
) {
1248 DMERR("mismatched transaction id");
1252 pmd
->trans_id
= new_id
;
1256 pmd_write_unlock(pmd
);
1261 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata
*pmd
,
1266 down_read(&pmd
->root_lock
);
1267 if (!pmd
->fail_io
) {
1268 *result
= pmd
->trans_id
;
1271 up_read(&pmd
->root_lock
);
1276 static int __reserve_metadata_snap(struct dm_pool_metadata
*pmd
)
1279 struct thin_disk_superblock
*disk_super
;
1280 struct dm_block
*copy
, *sblock
;
1281 dm_block_t held_root
;
1284 * We commit to ensure the btree roots which we increment in a
1285 * moment are up to date.
1287 r
= __commit_transaction(pmd
);
1289 DMWARN("%s: __commit_transaction() failed, error = %d",
1295 * Copy the superblock.
1297 dm_sm_inc_block(pmd
->metadata_sm
, THIN_SUPERBLOCK_LOCATION
);
1298 r
= dm_tm_shadow_block(pmd
->tm
, THIN_SUPERBLOCK_LOCATION
,
1299 &sb_validator
, ©
, &inc
);
1305 held_root
= dm_block_location(copy
);
1306 disk_super
= dm_block_data(copy
);
1308 if (le64_to_cpu(disk_super
->held_root
)) {
1309 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1311 dm_tm_dec(pmd
->tm
, held_root
);
1312 dm_tm_unlock(pmd
->tm
, copy
);
1317 * Wipe the spacemap since we're not publishing this.
1319 memset(&disk_super
->data_space_map_root
, 0,
1320 sizeof(disk_super
->data_space_map_root
));
1321 memset(&disk_super
->metadata_space_map_root
, 0,
1322 sizeof(disk_super
->metadata_space_map_root
));
1325 * Increment the data structures that need to be preserved.
1327 dm_tm_inc(pmd
->tm
, le64_to_cpu(disk_super
->data_mapping_root
));
1328 dm_tm_inc(pmd
->tm
, le64_to_cpu(disk_super
->device_details_root
));
1329 dm_tm_unlock(pmd
->tm
, copy
);
1332 * Write the held root into the superblock.
1334 r
= superblock_lock(pmd
, &sblock
);
1336 dm_tm_dec(pmd
->tm
, held_root
);
1340 disk_super
= dm_block_data(sblock
);
1341 disk_super
->held_root
= cpu_to_le64(held_root
);
1342 dm_bm_unlock(sblock
);
1346 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata
*pmd
)
1350 pmd_write_lock(pmd
);
1352 r
= __reserve_metadata_snap(pmd
);
1353 pmd_write_unlock(pmd
);
1358 static int __release_metadata_snap(struct dm_pool_metadata
*pmd
)
1361 struct thin_disk_superblock
*disk_super
;
1362 struct dm_block
*sblock
, *copy
;
1363 dm_block_t held_root
;
1365 r
= superblock_lock(pmd
, &sblock
);
1369 disk_super
= dm_block_data(sblock
);
1370 held_root
= le64_to_cpu(disk_super
->held_root
);
1371 disk_super
->held_root
= cpu_to_le64(0);
1373 dm_bm_unlock(sblock
);
1376 DMWARN("No pool metadata snapshot found: nothing to release.");
1380 r
= dm_tm_read_lock(pmd
->tm
, held_root
, &sb_validator
, ©
);
1384 disk_super
= dm_block_data(copy
);
1385 dm_btree_del(&pmd
->info
, le64_to_cpu(disk_super
->data_mapping_root
));
1386 dm_btree_del(&pmd
->details_info
, le64_to_cpu(disk_super
->device_details_root
));
1387 dm_sm_dec_block(pmd
->metadata_sm
, held_root
);
1389 dm_tm_unlock(pmd
->tm
, copy
);
1394 int dm_pool_release_metadata_snap(struct dm_pool_metadata
*pmd
)
1398 pmd_write_lock(pmd
);
1400 r
= __release_metadata_snap(pmd
);
1401 pmd_write_unlock(pmd
);
1406 static int __get_metadata_snap(struct dm_pool_metadata
*pmd
,
1410 struct thin_disk_superblock
*disk_super
;
1411 struct dm_block
*sblock
;
1413 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
1414 &sb_validator
, &sblock
);
1418 disk_super
= dm_block_data(sblock
);
1419 *result
= le64_to_cpu(disk_super
->held_root
);
1421 dm_bm_unlock(sblock
);
1426 int dm_pool_get_metadata_snap(struct dm_pool_metadata
*pmd
,
1431 down_read(&pmd
->root_lock
);
1433 r
= __get_metadata_snap(pmd
, result
);
1434 up_read(&pmd
->root_lock
);
1439 int dm_pool_open_thin_device(struct dm_pool_metadata
*pmd
, dm_thin_id dev
,
1440 struct dm_thin_device
**td
)
1444 pmd_write_lock_in_core(pmd
);
1446 r
= __open_device(pmd
, dev
, 0, td
);
1447 pmd_write_unlock(pmd
);
1452 int dm_pool_close_thin_device(struct dm_thin_device
*td
)
1454 pmd_write_lock_in_core(td
->pmd
);
1456 pmd_write_unlock(td
->pmd
);
1461 dm_thin_id
dm_thin_dev_id(struct dm_thin_device
*td
)
1467 * Check whether @time (of block creation) is older than @td's last snapshot.
1468 * If so then the associated block is shared with the last snapshot device.
1469 * Any block on a device created *after* the device last got snapshotted is
1470 * necessarily not shared.
1472 static bool __snapshotted_since(struct dm_thin_device
*td
, uint32_t time
)
1474 return td
->snapshotted_time
> time
;
1477 static void unpack_lookup_result(struct dm_thin_device
*td
, __le64 value
,
1478 struct dm_thin_lookup_result
*result
)
1480 uint64_t block_time
= 0;
1481 dm_block_t exception_block
;
1482 uint32_t exception_time
;
1484 block_time
= le64_to_cpu(value
);
1485 unpack_block_time(block_time
, &exception_block
, &exception_time
);
1486 result
->block
= exception_block
;
1487 result
->shared
= __snapshotted_since(td
, exception_time
);
1490 static int __find_block(struct dm_thin_device
*td
, dm_block_t block
,
1491 int can_issue_io
, struct dm_thin_lookup_result
*result
)
1495 struct dm_pool_metadata
*pmd
= td
->pmd
;
1496 dm_block_t keys
[2] = { td
->id
, block
};
1497 struct dm_btree_info
*info
;
1502 info
= &pmd
->nb_info
;
1504 r
= dm_btree_lookup(info
, pmd
->root
, keys
, &value
);
1506 unpack_lookup_result(td
, value
, result
);
1511 int dm_thin_find_block(struct dm_thin_device
*td
, dm_block_t block
,
1512 int can_issue_io
, struct dm_thin_lookup_result
*result
)
1515 struct dm_pool_metadata
*pmd
= td
->pmd
;
1517 down_read(&pmd
->root_lock
);
1519 up_read(&pmd
->root_lock
);
1523 r
= __find_block(td
, block
, can_issue_io
, result
);
1525 up_read(&pmd
->root_lock
);
1529 static int __find_next_mapped_block(struct dm_thin_device
*td
, dm_block_t block
,
1531 struct dm_thin_lookup_result
*result
)
1535 struct dm_pool_metadata
*pmd
= td
->pmd
;
1536 dm_block_t keys
[2] = { td
->id
, block
};
1538 r
= dm_btree_lookup_next(&pmd
->info
, pmd
->root
, keys
, vblock
, &value
);
1540 unpack_lookup_result(td
, value
, result
);
1545 static int __find_mapped_range(struct dm_thin_device
*td
,
1546 dm_block_t begin
, dm_block_t end
,
1547 dm_block_t
*thin_begin
, dm_block_t
*thin_end
,
1548 dm_block_t
*pool_begin
, bool *maybe_shared
)
1551 dm_block_t pool_end
;
1552 struct dm_thin_lookup_result lookup
;
1557 r
= __find_next_mapped_block(td
, begin
, &begin
, &lookup
);
1564 *thin_begin
= begin
;
1565 *pool_begin
= lookup
.block
;
1566 *maybe_shared
= lookup
.shared
;
1569 pool_end
= *pool_begin
+ 1;
1570 while (begin
!= end
) {
1571 r
= __find_block(td
, begin
, true, &lookup
);
1579 if ((lookup
.block
!= pool_end
) ||
1580 (lookup
.shared
!= *maybe_shared
))
1591 int dm_thin_find_mapped_range(struct dm_thin_device
*td
,
1592 dm_block_t begin
, dm_block_t end
,
1593 dm_block_t
*thin_begin
, dm_block_t
*thin_end
,
1594 dm_block_t
*pool_begin
, bool *maybe_shared
)
1597 struct dm_pool_metadata
*pmd
= td
->pmd
;
1599 down_read(&pmd
->root_lock
);
1600 if (!pmd
->fail_io
) {
1601 r
= __find_mapped_range(td
, begin
, end
, thin_begin
, thin_end
,
1602 pool_begin
, maybe_shared
);
1604 up_read(&pmd
->root_lock
);
1609 static int __insert(struct dm_thin_device
*td
, dm_block_t block
,
1610 dm_block_t data_block
)
1614 struct dm_pool_metadata
*pmd
= td
->pmd
;
1615 dm_block_t keys
[2] = { td
->id
, block
};
1617 value
= cpu_to_le64(pack_block_time(data_block
, pmd
->time
));
1618 __dm_bless_for_disk(&value
);
1620 r
= dm_btree_insert_notify(&pmd
->info
, pmd
->root
, keys
, &value
,
1621 &pmd
->root
, &inserted
);
1627 td
->mapped_blocks
++;
1632 int dm_thin_insert_block(struct dm_thin_device
*td
, dm_block_t block
,
1633 dm_block_t data_block
)
1637 pmd_write_lock(td
->pmd
);
1638 if (!td
->pmd
->fail_io
)
1639 r
= __insert(td
, block
, data_block
);
1640 pmd_write_unlock(td
->pmd
);
1645 static int __remove(struct dm_thin_device
*td
, dm_block_t block
)
1648 struct dm_pool_metadata
*pmd
= td
->pmd
;
1649 dm_block_t keys
[2] = { td
->id
, block
};
1651 r
= dm_btree_remove(&pmd
->info
, pmd
->root
, keys
, &pmd
->root
);
1655 td
->mapped_blocks
--;
1661 static int __remove_range(struct dm_thin_device
*td
, dm_block_t begin
, dm_block_t end
)
1664 unsigned count
, total_count
= 0;
1665 struct dm_pool_metadata
*pmd
= td
->pmd
;
1666 dm_block_t keys
[1] = { td
->id
};
1668 dm_block_t mapping_root
;
1671 * Find the mapping tree
1673 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, keys
, &value
);
1678 * Remove from the mapping tree, taking care to inc the
1679 * ref count so it doesn't get deleted.
1681 mapping_root
= le64_to_cpu(value
);
1682 dm_tm_inc(pmd
->tm
, mapping_root
);
1683 r
= dm_btree_remove(&pmd
->tl_info
, pmd
->root
, keys
, &pmd
->root
);
1688 * Remove leaves stops at the first unmapped entry, so we have to
1689 * loop round finding mapped ranges.
1691 while (begin
< end
) {
1692 r
= dm_btree_lookup_next(&pmd
->bl_info
, mapping_root
, &begin
, &begin
, &value
);
1702 r
= dm_btree_remove_leaves(&pmd
->bl_info
, mapping_root
, &begin
, end
, &mapping_root
, &count
);
1706 total_count
+= count
;
1709 td
->mapped_blocks
-= total_count
;
1713 * Reinsert the mapping tree.
1715 value
= cpu_to_le64(mapping_root
);
1716 __dm_bless_for_disk(&value
);
1717 return dm_btree_insert(&pmd
->tl_info
, pmd
->root
, keys
, &value
, &pmd
->root
);
1720 int dm_thin_remove_block(struct dm_thin_device
*td
, dm_block_t block
)
1724 pmd_write_lock(td
->pmd
);
1725 if (!td
->pmd
->fail_io
)
1726 r
= __remove(td
, block
);
1727 pmd_write_unlock(td
->pmd
);
1732 int dm_thin_remove_range(struct dm_thin_device
*td
,
1733 dm_block_t begin
, dm_block_t end
)
1737 pmd_write_lock(td
->pmd
);
1738 if (!td
->pmd
->fail_io
)
1739 r
= __remove_range(td
, begin
, end
);
1740 pmd_write_unlock(td
->pmd
);
1745 int dm_pool_block_is_shared(struct dm_pool_metadata
*pmd
, dm_block_t b
, bool *result
)
1750 down_read(&pmd
->root_lock
);
1751 r
= dm_sm_get_count(pmd
->data_sm
, b
, &ref_count
);
1753 *result
= (ref_count
> 1);
1754 up_read(&pmd
->root_lock
);
1759 int dm_pool_inc_data_range(struct dm_pool_metadata
*pmd
, dm_block_t b
, dm_block_t e
)
1763 pmd_write_lock(pmd
);
1764 for (; b
!= e
; b
++) {
1765 r
= dm_sm_inc_block(pmd
->data_sm
, b
);
1769 pmd_write_unlock(pmd
);
1774 int dm_pool_dec_data_range(struct dm_pool_metadata
*pmd
, dm_block_t b
, dm_block_t e
)
1778 pmd_write_lock(pmd
);
1779 for (; b
!= e
; b
++) {
1780 r
= dm_sm_dec_block(pmd
->data_sm
, b
);
1784 pmd_write_unlock(pmd
);
1789 bool dm_thin_changed_this_transaction(struct dm_thin_device
*td
)
1793 down_read(&td
->pmd
->root_lock
);
1795 up_read(&td
->pmd
->root_lock
);
1800 bool dm_pool_changed_this_transaction(struct dm_pool_metadata
*pmd
)
1803 struct dm_thin_device
*td
, *tmp
;
1805 down_read(&pmd
->root_lock
);
1806 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
1812 up_read(&pmd
->root_lock
);
1817 bool dm_thin_aborted_changes(struct dm_thin_device
*td
)
1821 down_read(&td
->pmd
->root_lock
);
1822 r
= td
->aborted_with_changes
;
1823 up_read(&td
->pmd
->root_lock
);
1828 int dm_pool_alloc_data_block(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1832 pmd_write_lock(pmd
);
1834 r
= dm_sm_new_block(pmd
->data_sm
, result
);
1835 pmd_write_unlock(pmd
);
1840 int dm_pool_commit_metadata(struct dm_pool_metadata
*pmd
)
1845 * Care is taken to not have commit be what
1846 * triggers putting the thin-pool in-service.
1848 pmd_write_lock_in_core(pmd
);
1852 r
= __commit_transaction(pmd
);
1857 * Open the next transaction.
1859 r
= __begin_transaction(pmd
);
1861 pmd_write_unlock(pmd
);
1865 static void __set_abort_with_changes_flags(struct dm_pool_metadata
*pmd
)
1867 struct dm_thin_device
*td
;
1869 list_for_each_entry(td
, &pmd
->thin_devices
, list
)
1870 td
->aborted_with_changes
= td
->changed
;
1873 int dm_pool_abort_metadata(struct dm_pool_metadata
*pmd
)
1877 pmd_write_lock(pmd
);
1881 __set_abort_with_changes_flags(pmd
);
1882 __destroy_persistent_data_objects(pmd
);
1883 r
= __create_persistent_data_objects(pmd
, false);
1885 pmd
->fail_io
= true;
1888 pmd_write_unlock(pmd
);
1893 int dm_pool_get_free_block_count(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1897 down_read(&pmd
->root_lock
);
1899 r
= dm_sm_get_nr_free(pmd
->data_sm
, result
);
1900 up_read(&pmd
->root_lock
);
1905 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata
*pmd
,
1910 down_read(&pmd
->root_lock
);
1912 r
= dm_sm_get_nr_free(pmd
->metadata_sm
, result
);
1915 if (*result
< pmd
->metadata_reserve
)
1918 *result
-= pmd
->metadata_reserve
;
1920 up_read(&pmd
->root_lock
);
1925 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata
*pmd
,
1930 down_read(&pmd
->root_lock
);
1932 r
= dm_sm_get_nr_blocks(pmd
->metadata_sm
, result
);
1933 up_read(&pmd
->root_lock
);
1938 int dm_pool_get_data_dev_size(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1942 down_read(&pmd
->root_lock
);
1944 r
= dm_sm_get_nr_blocks(pmd
->data_sm
, result
);
1945 up_read(&pmd
->root_lock
);
1950 int dm_thin_get_mapped_count(struct dm_thin_device
*td
, dm_block_t
*result
)
1953 struct dm_pool_metadata
*pmd
= td
->pmd
;
1955 down_read(&pmd
->root_lock
);
1956 if (!pmd
->fail_io
) {
1957 *result
= td
->mapped_blocks
;
1960 up_read(&pmd
->root_lock
);
1965 static int __highest_block(struct dm_thin_device
*td
, dm_block_t
*result
)
1969 dm_block_t thin_root
;
1970 struct dm_pool_metadata
*pmd
= td
->pmd
;
1972 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, &td
->id
, &value_le
);
1976 thin_root
= le64_to_cpu(value_le
);
1978 return dm_btree_find_highest_key(&pmd
->bl_info
, thin_root
, result
);
1981 int dm_thin_get_highest_mapped_block(struct dm_thin_device
*td
,
1985 struct dm_pool_metadata
*pmd
= td
->pmd
;
1987 down_read(&pmd
->root_lock
);
1989 r
= __highest_block(td
, result
);
1990 up_read(&pmd
->root_lock
);
1995 static int __resize_space_map(struct dm_space_map
*sm
, dm_block_t new_count
)
1998 dm_block_t old_count
;
2000 r
= dm_sm_get_nr_blocks(sm
, &old_count
);
2004 if (new_count
== old_count
)
2007 if (new_count
< old_count
) {
2008 DMERR("cannot reduce size of space map");
2012 return dm_sm_extend(sm
, new_count
- old_count
);
2015 int dm_pool_resize_data_dev(struct dm_pool_metadata
*pmd
, dm_block_t new_count
)
2019 pmd_write_lock(pmd
);
2021 r
= __resize_space_map(pmd
->data_sm
, new_count
);
2022 pmd_write_unlock(pmd
);
2027 int dm_pool_resize_metadata_dev(struct dm_pool_metadata
*pmd
, dm_block_t new_count
)
2031 pmd_write_lock(pmd
);
2032 if (!pmd
->fail_io
) {
2033 r
= __resize_space_map(pmd
->metadata_sm
, new_count
);
2035 __set_metadata_reserve(pmd
);
2037 pmd_write_unlock(pmd
);
2042 void dm_pool_metadata_read_only(struct dm_pool_metadata
*pmd
)
2044 pmd_write_lock_in_core(pmd
);
2045 dm_bm_set_read_only(pmd
->bm
);
2046 pmd_write_unlock(pmd
);
2049 void dm_pool_metadata_read_write(struct dm_pool_metadata
*pmd
)
2051 pmd_write_lock_in_core(pmd
);
2052 dm_bm_set_read_write(pmd
->bm
);
2053 pmd_write_unlock(pmd
);
2056 int dm_pool_register_metadata_threshold(struct dm_pool_metadata
*pmd
,
2057 dm_block_t threshold
,
2058 dm_sm_threshold_fn fn
,
2063 pmd_write_lock_in_core(pmd
);
2064 r
= dm_sm_register_threshold_callback(pmd
->metadata_sm
, threshold
, fn
, context
);
2065 pmd_write_unlock(pmd
);
2070 void dm_pool_register_pre_commit_callback(struct dm_pool_metadata
*pmd
,
2071 dm_pool_pre_commit_fn fn
,
2074 pmd_write_lock_in_core(pmd
);
2075 pmd
->pre_commit_fn
= fn
;
2076 pmd
->pre_commit_context
= context
;
2077 pmd_write_unlock(pmd
);
2080 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata
*pmd
)
2083 struct dm_block
*sblock
;
2084 struct thin_disk_superblock
*disk_super
;
2086 pmd_write_lock(pmd
);
2090 pmd
->flags
|= THIN_METADATA_NEEDS_CHECK_FLAG
;
2092 r
= superblock_lock(pmd
, &sblock
);
2094 DMERR("couldn't lock superblock");
2098 disk_super
= dm_block_data(sblock
);
2099 disk_super
->flags
= cpu_to_le32(pmd
->flags
);
2101 dm_bm_unlock(sblock
);
2103 pmd_write_unlock(pmd
);
2107 bool dm_pool_metadata_needs_check(struct dm_pool_metadata
*pmd
)
2111 down_read(&pmd
->root_lock
);
2112 needs_check
= pmd
->flags
& THIN_METADATA_NEEDS_CHECK_FLAG
;
2113 up_read(&pmd
->root_lock
);
2118 void dm_pool_issue_prefetches(struct dm_pool_metadata
*pmd
)
2120 down_read(&pmd
->root_lock
);
2122 dm_tm_issue_prefetches(pmd
->tm
);
2123 up_read(&pmd
->root_lock
);