2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
90 #define THIN_MAX_CONCURRENT_LOCKS 6
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
96 * Little endian on-disk superblock and device details.
98 struct thin_disk_superblock
{
99 __le32 csum
; /* Checksum of superblock except for this field. */
101 __le64 blocknr
; /* This block number, dm_block_t. */
111 * Root held by userspace transactions.
115 __u8 data_space_map_root
[SPACE_MAP_ROOT_SIZE
];
116 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
121 __le64 data_mapping_root
;
124 * Device detail root mapping dev_id -> device_details
126 __le64 device_details_root
;
128 __le32 data_block_size
; /* In 512-byte sectors. */
130 __le32 metadata_block_size
; /* In 512-byte sectors. */
131 __le64 metadata_nr_blocks
;
134 __le32 compat_ro_flags
;
135 __le32 incompat_flags
;
138 struct disk_device_details
{
139 __le64 mapped_blocks
;
140 __le64 transaction_id
; /* When created. */
141 __le32 creation_time
;
142 __le32 snapshotted_time
;
145 struct dm_pool_metadata
{
146 struct hlist_node hash
;
148 struct block_device
*bdev
;
149 struct dm_block_manager
*bm
;
150 struct dm_space_map
*metadata_sm
;
151 struct dm_space_map
*data_sm
;
152 struct dm_transaction_manager
*tm
;
153 struct dm_transaction_manager
*nb_tm
;
157 * First level holds thin_dev_t.
158 * Second level holds mappings.
160 struct dm_btree_info info
;
163 * Non-blocking version of the above.
165 struct dm_btree_info nb_info
;
168 * Just the top level for deleting whole devices.
170 struct dm_btree_info tl_info
;
173 * Just the bottom level for creating new devices.
175 struct dm_btree_info bl_info
;
178 * Describes the device details btree.
180 struct dm_btree_info details_info
;
182 struct rw_semaphore root_lock
;
185 dm_block_t details_root
;
186 struct list_head thin_devices
;
189 sector_t data_block_size
;
192 * We reserve a section of the metadata for commit overhead.
193 * All reported space does *not* include this.
195 dm_block_t metadata_reserve
;
198 * Set if a transaction has to be aborted but the attempt to roll back
199 * to the previous (good) transaction failed. The only pool metadata
200 * operation possible in this state is the closing of the device.
205 * Reading the space map roots can fail, so we read it into these
206 * buffers before the superblock is locked and updated.
208 __u8 data_space_map_root
[SPACE_MAP_ROOT_SIZE
];
209 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
212 struct dm_thin_device
{
213 struct list_head list
;
214 struct dm_pool_metadata
*pmd
;
219 bool aborted_with_changes
:1;
220 uint64_t mapped_blocks
;
221 uint64_t transaction_id
;
222 uint32_t creation_time
;
223 uint32_t snapshotted_time
;
226 /*----------------------------------------------------------------
227 * superblock validator
228 *--------------------------------------------------------------*/
230 #define SUPERBLOCK_CSUM_XOR 160774
232 static void sb_prepare_for_write(struct dm_block_validator
*v
,
236 struct thin_disk_superblock
*disk_super
= dm_block_data(b
);
238 disk_super
->blocknr
= cpu_to_le64(dm_block_location(b
));
239 disk_super
->csum
= cpu_to_le32(dm_bm_checksum(&disk_super
->flags
,
240 block_size
- sizeof(__le32
),
241 SUPERBLOCK_CSUM_XOR
));
244 static int sb_check(struct dm_block_validator
*v
,
248 struct thin_disk_superblock
*disk_super
= dm_block_data(b
);
251 if (dm_block_location(b
) != le64_to_cpu(disk_super
->blocknr
)) {
252 DMERR("sb_check failed: blocknr %llu: "
253 "wanted %llu", le64_to_cpu(disk_super
->blocknr
),
254 (unsigned long long)dm_block_location(b
));
258 if (le64_to_cpu(disk_super
->magic
) != THIN_SUPERBLOCK_MAGIC
) {
259 DMERR("sb_check failed: magic %llu: "
260 "wanted %llu", le64_to_cpu(disk_super
->magic
),
261 (unsigned long long)THIN_SUPERBLOCK_MAGIC
);
265 csum_le
= cpu_to_le32(dm_bm_checksum(&disk_super
->flags
,
266 block_size
- sizeof(__le32
),
267 SUPERBLOCK_CSUM_XOR
));
268 if (csum_le
!= disk_super
->csum
) {
269 DMERR("sb_check failed: csum %u: wanted %u",
270 le32_to_cpu(csum_le
), le32_to_cpu(disk_super
->csum
));
277 static struct dm_block_validator sb_validator
= {
278 .name
= "superblock",
279 .prepare_for_write
= sb_prepare_for_write
,
283 /*----------------------------------------------------------------
284 * Methods for the btree value types
285 *--------------------------------------------------------------*/
287 static uint64_t pack_block_time(dm_block_t b
, uint32_t t
)
289 return (b
<< 24) | t
;
292 static void unpack_block_time(uint64_t v
, dm_block_t
*b
, uint32_t *t
)
295 *t
= v
& ((1 << 24) - 1);
298 static void data_block_inc(void *context
, const void *value_le
)
300 struct dm_space_map
*sm
= context
;
305 memcpy(&v_le
, value_le
, sizeof(v_le
));
306 unpack_block_time(le64_to_cpu(v_le
), &b
, &t
);
307 dm_sm_inc_block(sm
, b
);
310 static void data_block_dec(void *context
, const void *value_le
)
312 struct dm_space_map
*sm
= context
;
317 memcpy(&v_le
, value_le
, sizeof(v_le
));
318 unpack_block_time(le64_to_cpu(v_le
), &b
, &t
);
319 dm_sm_dec_block(sm
, b
);
322 static int data_block_equal(void *context
, const void *value1_le
, const void *value2_le
)
328 memcpy(&v1_le
, value1_le
, sizeof(v1_le
));
329 memcpy(&v2_le
, value2_le
, sizeof(v2_le
));
330 unpack_block_time(le64_to_cpu(v1_le
), &b1
, &t
);
331 unpack_block_time(le64_to_cpu(v2_le
), &b2
, &t
);
336 static void subtree_inc(void *context
, const void *value
)
338 struct dm_btree_info
*info
= context
;
342 memcpy(&root_le
, value
, sizeof(root_le
));
343 root
= le64_to_cpu(root_le
);
344 dm_tm_inc(info
->tm
, root
);
347 static void subtree_dec(void *context
, const void *value
)
349 struct dm_btree_info
*info
= context
;
353 memcpy(&root_le
, value
, sizeof(root_le
));
354 root
= le64_to_cpu(root_le
);
355 if (dm_btree_del(info
, root
))
356 DMERR("btree delete failed");
359 static int subtree_equal(void *context
, const void *value1_le
, const void *value2_le
)
362 memcpy(&v1_le
, value1_le
, sizeof(v1_le
));
363 memcpy(&v2_le
, value2_le
, sizeof(v2_le
));
365 return v1_le
== v2_le
;
368 /*----------------------------------------------------------------*/
370 static int superblock_lock_zero(struct dm_pool_metadata
*pmd
,
371 struct dm_block
**sblock
)
373 return dm_bm_write_lock_zero(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
374 &sb_validator
, sblock
);
377 static int superblock_lock(struct dm_pool_metadata
*pmd
,
378 struct dm_block
**sblock
)
380 return dm_bm_write_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
381 &sb_validator
, sblock
);
384 static int __superblock_all_zeroes(struct dm_block_manager
*bm
, int *result
)
389 __le64
*data_le
, zero
= cpu_to_le64(0);
390 unsigned block_size
= dm_bm_block_size(bm
) / sizeof(__le64
);
393 * We can't use a validator here - it may be all zeroes.
395 r
= dm_bm_read_lock(bm
, THIN_SUPERBLOCK_LOCATION
, NULL
, &b
);
399 data_le
= dm_block_data(b
);
401 for (i
= 0; i
< block_size
; i
++) {
402 if (data_le
[i
] != zero
) {
413 static void __setup_btree_details(struct dm_pool_metadata
*pmd
)
415 pmd
->info
.tm
= pmd
->tm
;
416 pmd
->info
.levels
= 2;
417 pmd
->info
.value_type
.context
= pmd
->data_sm
;
418 pmd
->info
.value_type
.size
= sizeof(__le64
);
419 pmd
->info
.value_type
.inc
= data_block_inc
;
420 pmd
->info
.value_type
.dec
= data_block_dec
;
421 pmd
->info
.value_type
.equal
= data_block_equal
;
423 memcpy(&pmd
->nb_info
, &pmd
->info
, sizeof(pmd
->nb_info
));
424 pmd
->nb_info
.tm
= pmd
->nb_tm
;
426 pmd
->tl_info
.tm
= pmd
->tm
;
427 pmd
->tl_info
.levels
= 1;
428 pmd
->tl_info
.value_type
.context
= &pmd
->bl_info
;
429 pmd
->tl_info
.value_type
.size
= sizeof(__le64
);
430 pmd
->tl_info
.value_type
.inc
= subtree_inc
;
431 pmd
->tl_info
.value_type
.dec
= subtree_dec
;
432 pmd
->tl_info
.value_type
.equal
= subtree_equal
;
434 pmd
->bl_info
.tm
= pmd
->tm
;
435 pmd
->bl_info
.levels
= 1;
436 pmd
->bl_info
.value_type
.context
= pmd
->data_sm
;
437 pmd
->bl_info
.value_type
.size
= sizeof(__le64
);
438 pmd
->bl_info
.value_type
.inc
= data_block_inc
;
439 pmd
->bl_info
.value_type
.dec
= data_block_dec
;
440 pmd
->bl_info
.value_type
.equal
= data_block_equal
;
442 pmd
->details_info
.tm
= pmd
->tm
;
443 pmd
->details_info
.levels
= 1;
444 pmd
->details_info
.value_type
.context
= NULL
;
445 pmd
->details_info
.value_type
.size
= sizeof(struct disk_device_details
);
446 pmd
->details_info
.value_type
.inc
= NULL
;
447 pmd
->details_info
.value_type
.dec
= NULL
;
448 pmd
->details_info
.value_type
.equal
= NULL
;
451 static int save_sm_roots(struct dm_pool_metadata
*pmd
)
456 r
= dm_sm_root_size(pmd
->metadata_sm
, &len
);
460 r
= dm_sm_copy_root(pmd
->metadata_sm
, &pmd
->metadata_space_map_root
, len
);
464 r
= dm_sm_root_size(pmd
->data_sm
, &len
);
468 return dm_sm_copy_root(pmd
->data_sm
, &pmd
->data_space_map_root
, len
);
471 static void copy_sm_roots(struct dm_pool_metadata
*pmd
,
472 struct thin_disk_superblock
*disk
)
474 memcpy(&disk
->metadata_space_map_root
,
475 &pmd
->metadata_space_map_root
,
476 sizeof(pmd
->metadata_space_map_root
));
478 memcpy(&disk
->data_space_map_root
,
479 &pmd
->data_space_map_root
,
480 sizeof(pmd
->data_space_map_root
));
483 static int __write_initial_superblock(struct dm_pool_metadata
*pmd
)
486 struct dm_block
*sblock
;
487 struct thin_disk_superblock
*disk_super
;
488 sector_t bdev_size
= i_size_read(pmd
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
490 if (bdev_size
> THIN_METADATA_MAX_SECTORS
)
491 bdev_size
= THIN_METADATA_MAX_SECTORS
;
493 r
= dm_sm_commit(pmd
->data_sm
);
497 r
= dm_tm_pre_commit(pmd
->tm
);
501 r
= save_sm_roots(pmd
);
505 r
= superblock_lock_zero(pmd
, &sblock
);
509 disk_super
= dm_block_data(sblock
);
510 disk_super
->flags
= 0;
511 memset(disk_super
->uuid
, 0, sizeof(disk_super
->uuid
));
512 disk_super
->magic
= cpu_to_le64(THIN_SUPERBLOCK_MAGIC
);
513 disk_super
->version
= cpu_to_le32(THIN_VERSION
);
514 disk_super
->time
= 0;
515 disk_super
->trans_id
= 0;
516 disk_super
->held_root
= 0;
518 copy_sm_roots(pmd
, disk_super
);
520 disk_super
->data_mapping_root
= cpu_to_le64(pmd
->root
);
521 disk_super
->device_details_root
= cpu_to_le64(pmd
->details_root
);
522 disk_super
->metadata_block_size
= cpu_to_le32(THIN_METADATA_BLOCK_SIZE
);
523 disk_super
->metadata_nr_blocks
= cpu_to_le64(bdev_size
>> SECTOR_TO_BLOCK_SHIFT
);
524 disk_super
->data_block_size
= cpu_to_le32(pmd
->data_block_size
);
526 return dm_tm_commit(pmd
->tm
, sblock
);
529 static int __format_metadata(struct dm_pool_metadata
*pmd
)
533 r
= dm_tm_create_with_sm(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
534 &pmd
->tm
, &pmd
->metadata_sm
);
536 DMERR("tm_create_with_sm failed");
540 pmd
->data_sm
= dm_sm_disk_create(pmd
->tm
, 0);
541 if (IS_ERR(pmd
->data_sm
)) {
542 DMERR("sm_disk_create failed");
543 r
= PTR_ERR(pmd
->data_sm
);
547 pmd
->nb_tm
= dm_tm_create_non_blocking_clone(pmd
->tm
);
549 DMERR("could not create non-blocking clone tm");
551 goto bad_cleanup_data_sm
;
554 __setup_btree_details(pmd
);
556 r
= dm_btree_empty(&pmd
->info
, &pmd
->root
);
558 goto bad_cleanup_nb_tm
;
560 r
= dm_btree_empty(&pmd
->details_info
, &pmd
->details_root
);
562 DMERR("couldn't create devices root");
563 goto bad_cleanup_nb_tm
;
566 r
= __write_initial_superblock(pmd
);
568 goto bad_cleanup_nb_tm
;
573 dm_tm_destroy(pmd
->nb_tm
);
575 dm_sm_destroy(pmd
->data_sm
);
577 dm_tm_destroy(pmd
->tm
);
578 dm_sm_destroy(pmd
->metadata_sm
);
583 static int __check_incompat_features(struct thin_disk_superblock
*disk_super
,
584 struct dm_pool_metadata
*pmd
)
588 features
= le32_to_cpu(disk_super
->incompat_flags
) & ~THIN_FEATURE_INCOMPAT_SUPP
;
590 DMERR("could not access metadata due to unsupported optional features (%lx).",
591 (unsigned long)features
);
596 * Check for read-only metadata to skip the following RDWR checks.
598 if (get_disk_ro(pmd
->bdev
->bd_disk
))
601 features
= le32_to_cpu(disk_super
->compat_ro_flags
) & ~THIN_FEATURE_COMPAT_RO_SUPP
;
603 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
604 (unsigned long)features
);
611 static int __open_metadata(struct dm_pool_metadata
*pmd
)
614 struct dm_block
*sblock
;
615 struct thin_disk_superblock
*disk_super
;
617 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
618 &sb_validator
, &sblock
);
620 DMERR("couldn't read superblock");
624 disk_super
= dm_block_data(sblock
);
626 /* Verify the data block size hasn't changed */
627 if (le32_to_cpu(disk_super
->data_block_size
) != pmd
->data_block_size
) {
628 DMERR("changing the data block size (from %u to %llu) is not supported",
629 le32_to_cpu(disk_super
->data_block_size
),
630 (unsigned long long)pmd
->data_block_size
);
632 goto bad_unlock_sblock
;
635 r
= __check_incompat_features(disk_super
, pmd
);
637 goto bad_unlock_sblock
;
639 r
= dm_tm_open_with_sm(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
640 disk_super
->metadata_space_map_root
,
641 sizeof(disk_super
->metadata_space_map_root
),
642 &pmd
->tm
, &pmd
->metadata_sm
);
644 DMERR("tm_open_with_sm failed");
645 goto bad_unlock_sblock
;
648 pmd
->data_sm
= dm_sm_disk_open(pmd
->tm
, disk_super
->data_space_map_root
,
649 sizeof(disk_super
->data_space_map_root
));
650 if (IS_ERR(pmd
->data_sm
)) {
651 DMERR("sm_disk_open failed");
652 r
= PTR_ERR(pmd
->data_sm
);
656 pmd
->nb_tm
= dm_tm_create_non_blocking_clone(pmd
->tm
);
658 DMERR("could not create non-blocking clone tm");
660 goto bad_cleanup_data_sm
;
663 __setup_btree_details(pmd
);
664 dm_bm_unlock(sblock
);
669 dm_sm_destroy(pmd
->data_sm
);
671 dm_tm_destroy(pmd
->tm
);
672 dm_sm_destroy(pmd
->metadata_sm
);
674 dm_bm_unlock(sblock
);
679 static int __open_or_format_metadata(struct dm_pool_metadata
*pmd
, bool format_device
)
683 r
= __superblock_all_zeroes(pmd
->bm
, &unformatted
);
688 return format_device
? __format_metadata(pmd
) : -EPERM
;
690 return __open_metadata(pmd
);
693 static int __create_persistent_data_objects(struct dm_pool_metadata
*pmd
, bool format_device
)
697 pmd
->bm
= dm_block_manager_create(pmd
->bdev
, THIN_METADATA_BLOCK_SIZE
<< SECTOR_SHIFT
,
698 THIN_MAX_CONCURRENT_LOCKS
);
699 if (IS_ERR(pmd
->bm
)) {
700 DMERR("could not create block manager");
701 return PTR_ERR(pmd
->bm
);
704 r
= __open_or_format_metadata(pmd
, format_device
);
706 dm_block_manager_destroy(pmd
->bm
);
711 static void __destroy_persistent_data_objects(struct dm_pool_metadata
*pmd
)
713 dm_sm_destroy(pmd
->data_sm
);
714 dm_sm_destroy(pmd
->metadata_sm
);
715 dm_tm_destroy(pmd
->nb_tm
);
716 dm_tm_destroy(pmd
->tm
);
717 dm_block_manager_destroy(pmd
->bm
);
720 static int __begin_transaction(struct dm_pool_metadata
*pmd
)
723 struct thin_disk_superblock
*disk_super
;
724 struct dm_block
*sblock
;
727 * We re-read the superblock every time. Shouldn't need to do this
730 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
731 &sb_validator
, &sblock
);
735 disk_super
= dm_block_data(sblock
);
736 pmd
->time
= le32_to_cpu(disk_super
->time
);
737 pmd
->root
= le64_to_cpu(disk_super
->data_mapping_root
);
738 pmd
->details_root
= le64_to_cpu(disk_super
->device_details_root
);
739 pmd
->trans_id
= le64_to_cpu(disk_super
->trans_id
);
740 pmd
->flags
= le32_to_cpu(disk_super
->flags
);
741 pmd
->data_block_size
= le32_to_cpu(disk_super
->data_block_size
);
743 dm_bm_unlock(sblock
);
747 static int __write_changed_details(struct dm_pool_metadata
*pmd
)
750 struct dm_thin_device
*td
, *tmp
;
751 struct disk_device_details details
;
754 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
760 details
.mapped_blocks
= cpu_to_le64(td
->mapped_blocks
);
761 details
.transaction_id
= cpu_to_le64(td
->transaction_id
);
762 details
.creation_time
= cpu_to_le32(td
->creation_time
);
763 details
.snapshotted_time
= cpu_to_le32(td
->snapshotted_time
);
764 __dm_bless_for_disk(&details
);
766 r
= dm_btree_insert(&pmd
->details_info
, pmd
->details_root
,
767 &key
, &details
, &pmd
->details_root
);
782 static int __commit_transaction(struct dm_pool_metadata
*pmd
)
785 struct thin_disk_superblock
*disk_super
;
786 struct dm_block
*sblock
;
789 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
791 BUILD_BUG_ON(sizeof(struct thin_disk_superblock
) > 512);
793 r
= __write_changed_details(pmd
);
797 r
= dm_sm_commit(pmd
->data_sm
);
801 r
= dm_tm_pre_commit(pmd
->tm
);
805 r
= save_sm_roots(pmd
);
809 r
= superblock_lock(pmd
, &sblock
);
813 disk_super
= dm_block_data(sblock
);
814 disk_super
->time
= cpu_to_le32(pmd
->time
);
815 disk_super
->data_mapping_root
= cpu_to_le64(pmd
->root
);
816 disk_super
->device_details_root
= cpu_to_le64(pmd
->details_root
);
817 disk_super
->trans_id
= cpu_to_le64(pmd
->trans_id
);
818 disk_super
->flags
= cpu_to_le32(pmd
->flags
);
820 copy_sm_roots(pmd
, disk_super
);
822 return dm_tm_commit(pmd
->tm
, sblock
);
825 static void __set_metadata_reserve(struct dm_pool_metadata
*pmd
)
829 dm_block_t max_blocks
= 4096; /* 16M */
831 r
= dm_sm_get_nr_blocks(pmd
->metadata_sm
, &total
);
833 DMERR("could not get size of metadata device");
834 pmd
->metadata_reserve
= max_blocks
;
836 pmd
->metadata_reserve
= min(max_blocks
, div_u64(total
, 10));
839 struct dm_pool_metadata
*dm_pool_metadata_open(struct block_device
*bdev
,
840 sector_t data_block_size
,
844 struct dm_pool_metadata
*pmd
;
846 pmd
= kmalloc(sizeof(*pmd
), GFP_KERNEL
);
848 DMERR("could not allocate metadata struct");
849 return ERR_PTR(-ENOMEM
);
852 init_rwsem(&pmd
->root_lock
);
854 INIT_LIST_HEAD(&pmd
->thin_devices
);
855 pmd
->fail_io
= false;
857 pmd
->data_block_size
= data_block_size
;
859 r
= __create_persistent_data_objects(pmd
, format_device
);
865 r
= __begin_transaction(pmd
);
867 if (dm_pool_metadata_close(pmd
) < 0)
868 DMWARN("%s: dm_pool_metadata_close() failed.", __func__
);
872 __set_metadata_reserve(pmd
);
877 int dm_pool_metadata_close(struct dm_pool_metadata
*pmd
)
880 unsigned open_devices
= 0;
881 struct dm_thin_device
*td
, *tmp
;
883 down_read(&pmd
->root_lock
);
884 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
892 up_read(&pmd
->root_lock
);
895 DMERR("attempt to close pmd when %u device(s) are still open",
900 if (!dm_bm_is_read_only(pmd
->bm
) && !pmd
->fail_io
) {
901 r
= __commit_transaction(pmd
);
903 DMWARN("%s: __commit_transaction() failed, error = %d",
908 __destroy_persistent_data_objects(pmd
);
915 * __open_device: Returns @td corresponding to device with id @dev,
916 * creating it if @create is set and incrementing @td->open_count.
917 * On failure, @td is undefined.
919 static int __open_device(struct dm_pool_metadata
*pmd
,
920 dm_thin_id dev
, int create
,
921 struct dm_thin_device
**td
)
924 struct dm_thin_device
*td2
;
926 struct disk_device_details details_le
;
929 * If the device is already open, return it.
931 list_for_each_entry(td2
, &pmd
->thin_devices
, list
)
932 if (td2
->id
== dev
) {
934 * May not create an already-open device.
945 * Check the device exists.
947 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
950 if (r
!= -ENODATA
|| !create
)
957 details_le
.mapped_blocks
= 0;
958 details_le
.transaction_id
= cpu_to_le64(pmd
->trans_id
);
959 details_le
.creation_time
= cpu_to_le32(pmd
->time
);
960 details_le
.snapshotted_time
= cpu_to_le32(pmd
->time
);
963 *td
= kmalloc(sizeof(**td
), GFP_NOIO
);
969 (*td
)->open_count
= 1;
970 (*td
)->changed
= changed
;
971 (*td
)->aborted_with_changes
= false;
972 (*td
)->mapped_blocks
= le64_to_cpu(details_le
.mapped_blocks
);
973 (*td
)->transaction_id
= le64_to_cpu(details_le
.transaction_id
);
974 (*td
)->creation_time
= le32_to_cpu(details_le
.creation_time
);
975 (*td
)->snapshotted_time
= le32_to_cpu(details_le
.snapshotted_time
);
977 list_add(&(*td
)->list
, &pmd
->thin_devices
);
982 static void __close_device(struct dm_thin_device
*td
)
987 static int __create_thin(struct dm_pool_metadata
*pmd
,
993 struct disk_device_details details_le
;
994 struct dm_thin_device
*td
;
997 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
1003 * Create an empty btree for the mappings.
1005 r
= dm_btree_empty(&pmd
->bl_info
, &dev_root
);
1010 * Insert it into the main mapping tree.
1012 value
= cpu_to_le64(dev_root
);
1013 __dm_bless_for_disk(&value
);
1014 r
= dm_btree_insert(&pmd
->tl_info
, pmd
->root
, &key
, &value
, &pmd
->root
);
1016 dm_btree_del(&pmd
->bl_info
, dev_root
);
1020 r
= __open_device(pmd
, dev
, 1, &td
);
1022 dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1023 dm_btree_del(&pmd
->bl_info
, dev_root
);
1031 int dm_pool_create_thin(struct dm_pool_metadata
*pmd
, dm_thin_id dev
)
1035 down_write(&pmd
->root_lock
);
1037 r
= __create_thin(pmd
, dev
);
1038 up_write(&pmd
->root_lock
);
1043 static int __set_snapshot_details(struct dm_pool_metadata
*pmd
,
1044 struct dm_thin_device
*snap
,
1045 dm_thin_id origin
, uint32_t time
)
1048 struct dm_thin_device
*td
;
1050 r
= __open_device(pmd
, origin
, 0, &td
);
1055 td
->snapshotted_time
= time
;
1057 snap
->mapped_blocks
= td
->mapped_blocks
;
1058 snap
->snapshotted_time
= time
;
1064 static int __create_snap(struct dm_pool_metadata
*pmd
,
1065 dm_thin_id dev
, dm_thin_id origin
)
1068 dm_block_t origin_root
;
1069 uint64_t key
= origin
, dev_key
= dev
;
1070 struct dm_thin_device
*td
;
1071 struct disk_device_details details_le
;
1074 /* check this device is unused */
1075 r
= dm_btree_lookup(&pmd
->details_info
, pmd
->details_root
,
1076 &dev_key
, &details_le
);
1080 /* find the mapping tree for the origin */
1081 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, &key
, &value
);
1084 origin_root
= le64_to_cpu(value
);
1086 /* clone the origin, an inc will do */
1087 dm_tm_inc(pmd
->tm
, origin_root
);
1089 /* insert into the main mapping tree */
1090 value
= cpu_to_le64(origin_root
);
1091 __dm_bless_for_disk(&value
);
1093 r
= dm_btree_insert(&pmd
->tl_info
, pmd
->root
, &key
, &value
, &pmd
->root
);
1095 dm_tm_dec(pmd
->tm
, origin_root
);
1101 r
= __open_device(pmd
, dev
, 1, &td
);
1105 r
= __set_snapshot_details(pmd
, td
, origin
, pmd
->time
);
1114 dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1115 dm_btree_remove(&pmd
->details_info
, pmd
->details_root
,
1116 &key
, &pmd
->details_root
);
1120 int dm_pool_create_snap(struct dm_pool_metadata
*pmd
,
1126 down_write(&pmd
->root_lock
);
1128 r
= __create_snap(pmd
, dev
, origin
);
1129 up_write(&pmd
->root_lock
);
1134 static int __delete_device(struct dm_pool_metadata
*pmd
, dm_thin_id dev
)
1138 struct dm_thin_device
*td
;
1140 /* TODO: failure should mark the transaction invalid */
1141 r
= __open_device(pmd
, dev
, 0, &td
);
1145 if (td
->open_count
> 1) {
1150 list_del(&td
->list
);
1152 r
= dm_btree_remove(&pmd
->details_info
, pmd
->details_root
,
1153 &key
, &pmd
->details_root
);
1157 r
= dm_btree_remove(&pmd
->tl_info
, pmd
->root
, &key
, &pmd
->root
);
1164 int dm_pool_delete_thin_device(struct dm_pool_metadata
*pmd
,
1169 down_write(&pmd
->root_lock
);
1171 r
= __delete_device(pmd
, dev
);
1172 up_write(&pmd
->root_lock
);
1177 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata
*pmd
,
1178 uint64_t current_id
,
1183 down_write(&pmd
->root_lock
);
1188 if (pmd
->trans_id
!= current_id
) {
1189 DMERR("mismatched transaction id");
1193 pmd
->trans_id
= new_id
;
1197 up_write(&pmd
->root_lock
);
1202 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata
*pmd
,
1207 down_read(&pmd
->root_lock
);
1208 if (!pmd
->fail_io
) {
1209 *result
= pmd
->trans_id
;
1212 up_read(&pmd
->root_lock
);
1217 static int __reserve_metadata_snap(struct dm_pool_metadata
*pmd
)
1220 struct thin_disk_superblock
*disk_super
;
1221 struct dm_block
*copy
, *sblock
;
1222 dm_block_t held_root
;
1225 * We commit to ensure the btree roots which we increment in a
1226 * moment are up to date.
1228 __commit_transaction(pmd
);
1231 * Copy the superblock.
1233 dm_sm_inc_block(pmd
->metadata_sm
, THIN_SUPERBLOCK_LOCATION
);
1234 r
= dm_tm_shadow_block(pmd
->tm
, THIN_SUPERBLOCK_LOCATION
,
1235 &sb_validator
, ©
, &inc
);
1241 held_root
= dm_block_location(copy
);
1242 disk_super
= dm_block_data(copy
);
1244 if (le64_to_cpu(disk_super
->held_root
)) {
1245 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1247 dm_tm_dec(pmd
->tm
, held_root
);
1248 dm_tm_unlock(pmd
->tm
, copy
);
1253 * Wipe the spacemap since we're not publishing this.
1255 memset(&disk_super
->data_space_map_root
, 0,
1256 sizeof(disk_super
->data_space_map_root
));
1257 memset(&disk_super
->metadata_space_map_root
, 0,
1258 sizeof(disk_super
->metadata_space_map_root
));
1261 * Increment the data structures that need to be preserved.
1263 dm_tm_inc(pmd
->tm
, le64_to_cpu(disk_super
->data_mapping_root
));
1264 dm_tm_inc(pmd
->tm
, le64_to_cpu(disk_super
->device_details_root
));
1265 dm_tm_unlock(pmd
->tm
, copy
);
1268 * Write the held root into the superblock.
1270 r
= superblock_lock(pmd
, &sblock
);
1272 dm_tm_dec(pmd
->tm
, held_root
);
1276 disk_super
= dm_block_data(sblock
);
1277 disk_super
->held_root
= cpu_to_le64(held_root
);
1278 dm_bm_unlock(sblock
);
1282 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata
*pmd
)
1286 down_write(&pmd
->root_lock
);
1288 r
= __reserve_metadata_snap(pmd
);
1289 up_write(&pmd
->root_lock
);
1294 static int __release_metadata_snap(struct dm_pool_metadata
*pmd
)
1297 struct thin_disk_superblock
*disk_super
;
1298 struct dm_block
*sblock
, *copy
;
1299 dm_block_t held_root
;
1301 r
= superblock_lock(pmd
, &sblock
);
1305 disk_super
= dm_block_data(sblock
);
1306 held_root
= le64_to_cpu(disk_super
->held_root
);
1307 disk_super
->held_root
= cpu_to_le64(0);
1309 dm_bm_unlock(sblock
);
1312 DMWARN("No pool metadata snapshot found: nothing to release.");
1316 r
= dm_tm_read_lock(pmd
->tm
, held_root
, &sb_validator
, ©
);
1320 disk_super
= dm_block_data(copy
);
1321 dm_btree_del(&pmd
->info
, le64_to_cpu(disk_super
->data_mapping_root
));
1322 dm_btree_del(&pmd
->details_info
, le64_to_cpu(disk_super
->device_details_root
));
1323 dm_sm_dec_block(pmd
->metadata_sm
, held_root
);
1325 dm_tm_unlock(pmd
->tm
, copy
);
1330 int dm_pool_release_metadata_snap(struct dm_pool_metadata
*pmd
)
1334 down_write(&pmd
->root_lock
);
1336 r
= __release_metadata_snap(pmd
);
1337 up_write(&pmd
->root_lock
);
1342 static int __get_metadata_snap(struct dm_pool_metadata
*pmd
,
1346 struct thin_disk_superblock
*disk_super
;
1347 struct dm_block
*sblock
;
1349 r
= dm_bm_read_lock(pmd
->bm
, THIN_SUPERBLOCK_LOCATION
,
1350 &sb_validator
, &sblock
);
1354 disk_super
= dm_block_data(sblock
);
1355 *result
= le64_to_cpu(disk_super
->held_root
);
1357 dm_bm_unlock(sblock
);
1362 int dm_pool_get_metadata_snap(struct dm_pool_metadata
*pmd
,
1367 down_read(&pmd
->root_lock
);
1369 r
= __get_metadata_snap(pmd
, result
);
1370 up_read(&pmd
->root_lock
);
1375 int dm_pool_open_thin_device(struct dm_pool_metadata
*pmd
, dm_thin_id dev
,
1376 struct dm_thin_device
**td
)
1380 down_write(&pmd
->root_lock
);
1382 r
= __open_device(pmd
, dev
, 0, td
);
1383 up_write(&pmd
->root_lock
);
1388 int dm_pool_close_thin_device(struct dm_thin_device
*td
)
1390 down_write(&td
->pmd
->root_lock
);
1392 up_write(&td
->pmd
->root_lock
);
1397 dm_thin_id
dm_thin_dev_id(struct dm_thin_device
*td
)
1403 * Check whether @time (of block creation) is older than @td's last snapshot.
1404 * If so then the associated block is shared with the last snapshot device.
1405 * Any block on a device created *after* the device last got snapshotted is
1406 * necessarily not shared.
1408 static bool __snapshotted_since(struct dm_thin_device
*td
, uint32_t time
)
1410 return td
->snapshotted_time
> time
;
1413 static void unpack_lookup_result(struct dm_thin_device
*td
, __le64 value
,
1414 struct dm_thin_lookup_result
*result
)
1416 uint64_t block_time
= 0;
1417 dm_block_t exception_block
;
1418 uint32_t exception_time
;
1420 block_time
= le64_to_cpu(value
);
1421 unpack_block_time(block_time
, &exception_block
, &exception_time
);
1422 result
->block
= exception_block
;
1423 result
->shared
= __snapshotted_since(td
, exception_time
);
1426 static int __find_block(struct dm_thin_device
*td
, dm_block_t block
,
1427 int can_issue_io
, struct dm_thin_lookup_result
*result
)
1431 struct dm_pool_metadata
*pmd
= td
->pmd
;
1432 dm_block_t keys
[2] = { td
->id
, block
};
1433 struct dm_btree_info
*info
;
1438 info
= &pmd
->nb_info
;
1440 r
= dm_btree_lookup(info
, pmd
->root
, keys
, &value
);
1442 unpack_lookup_result(td
, value
, result
);
1447 int dm_thin_find_block(struct dm_thin_device
*td
, dm_block_t block
,
1448 int can_issue_io
, struct dm_thin_lookup_result
*result
)
1451 struct dm_pool_metadata
*pmd
= td
->pmd
;
1453 down_read(&pmd
->root_lock
);
1455 up_read(&pmd
->root_lock
);
1459 r
= __find_block(td
, block
, can_issue_io
, result
);
1461 up_read(&pmd
->root_lock
);
1465 static int __find_next_mapped_block(struct dm_thin_device
*td
, dm_block_t block
,
1467 struct dm_thin_lookup_result
*result
)
1471 struct dm_pool_metadata
*pmd
= td
->pmd
;
1472 dm_block_t keys
[2] = { td
->id
, block
};
1474 r
= dm_btree_lookup_next(&pmd
->info
, pmd
->root
, keys
, vblock
, &value
);
1476 unpack_lookup_result(td
, value
, result
);
1481 static int __find_mapped_range(struct dm_thin_device
*td
,
1482 dm_block_t begin
, dm_block_t end
,
1483 dm_block_t
*thin_begin
, dm_block_t
*thin_end
,
1484 dm_block_t
*pool_begin
, bool *maybe_shared
)
1487 dm_block_t pool_end
;
1488 struct dm_thin_lookup_result lookup
;
1493 r
= __find_next_mapped_block(td
, begin
, &begin
, &lookup
);
1500 *thin_begin
= begin
;
1501 *pool_begin
= lookup
.block
;
1502 *maybe_shared
= lookup
.shared
;
1505 pool_end
= *pool_begin
+ 1;
1506 while (begin
!= end
) {
1507 r
= __find_block(td
, begin
, true, &lookup
);
1515 if ((lookup
.block
!= pool_end
) ||
1516 (lookup
.shared
!= *maybe_shared
))
1527 int dm_thin_find_mapped_range(struct dm_thin_device
*td
,
1528 dm_block_t begin
, dm_block_t end
,
1529 dm_block_t
*thin_begin
, dm_block_t
*thin_end
,
1530 dm_block_t
*pool_begin
, bool *maybe_shared
)
1533 struct dm_pool_metadata
*pmd
= td
->pmd
;
1535 down_read(&pmd
->root_lock
);
1536 if (!pmd
->fail_io
) {
1537 r
= __find_mapped_range(td
, begin
, end
, thin_begin
, thin_end
,
1538 pool_begin
, maybe_shared
);
1540 up_read(&pmd
->root_lock
);
1545 static int __insert(struct dm_thin_device
*td
, dm_block_t block
,
1546 dm_block_t data_block
)
1550 struct dm_pool_metadata
*pmd
= td
->pmd
;
1551 dm_block_t keys
[2] = { td
->id
, block
};
1553 value
= cpu_to_le64(pack_block_time(data_block
, pmd
->time
));
1554 __dm_bless_for_disk(&value
);
1556 r
= dm_btree_insert_notify(&pmd
->info
, pmd
->root
, keys
, &value
,
1557 &pmd
->root
, &inserted
);
1563 td
->mapped_blocks
++;
1568 int dm_thin_insert_block(struct dm_thin_device
*td
, dm_block_t block
,
1569 dm_block_t data_block
)
1573 down_write(&td
->pmd
->root_lock
);
1574 if (!td
->pmd
->fail_io
)
1575 r
= __insert(td
, block
, data_block
);
1576 up_write(&td
->pmd
->root_lock
);
1581 static int __remove(struct dm_thin_device
*td
, dm_block_t block
)
1584 struct dm_pool_metadata
*pmd
= td
->pmd
;
1585 dm_block_t keys
[2] = { td
->id
, block
};
1587 r
= dm_btree_remove(&pmd
->info
, pmd
->root
, keys
, &pmd
->root
);
1591 td
->mapped_blocks
--;
1597 static int __remove_range(struct dm_thin_device
*td
, dm_block_t begin
, dm_block_t end
)
1600 unsigned count
, total_count
= 0;
1601 struct dm_pool_metadata
*pmd
= td
->pmd
;
1602 dm_block_t keys
[1] = { td
->id
};
1604 dm_block_t mapping_root
;
1607 * Find the mapping tree
1609 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, keys
, &value
);
1614 * Remove from the mapping tree, taking care to inc the
1615 * ref count so it doesn't get deleted.
1617 mapping_root
= le64_to_cpu(value
);
1618 dm_tm_inc(pmd
->tm
, mapping_root
);
1619 r
= dm_btree_remove(&pmd
->tl_info
, pmd
->root
, keys
, &pmd
->root
);
1624 * Remove leaves stops at the first unmapped entry, so we have to
1625 * loop round finding mapped ranges.
1627 while (begin
< end
) {
1628 r
= dm_btree_lookup_next(&pmd
->bl_info
, mapping_root
, &begin
, &begin
, &value
);
1638 r
= dm_btree_remove_leaves(&pmd
->bl_info
, mapping_root
, &begin
, end
, &mapping_root
, &count
);
1642 total_count
+= count
;
1645 td
->mapped_blocks
-= total_count
;
1649 * Reinsert the mapping tree.
1651 value
= cpu_to_le64(mapping_root
);
1652 __dm_bless_for_disk(&value
);
1653 return dm_btree_insert(&pmd
->tl_info
, pmd
->root
, keys
, &value
, &pmd
->root
);
1656 int dm_thin_remove_block(struct dm_thin_device
*td
, dm_block_t block
)
1660 down_write(&td
->pmd
->root_lock
);
1661 if (!td
->pmd
->fail_io
)
1662 r
= __remove(td
, block
);
1663 up_write(&td
->pmd
->root_lock
);
1668 int dm_thin_remove_range(struct dm_thin_device
*td
,
1669 dm_block_t begin
, dm_block_t end
)
1673 down_write(&td
->pmd
->root_lock
);
1674 if (!td
->pmd
->fail_io
)
1675 r
= __remove_range(td
, begin
, end
);
1676 up_write(&td
->pmd
->root_lock
);
1681 int dm_pool_block_is_shared(struct dm_pool_metadata
*pmd
, dm_block_t b
, bool *result
)
1686 down_read(&pmd
->root_lock
);
1687 r
= dm_sm_get_count(pmd
->data_sm
, b
, &ref_count
);
1689 *result
= (ref_count
> 1);
1690 up_read(&pmd
->root_lock
);
1695 int dm_pool_inc_data_range(struct dm_pool_metadata
*pmd
, dm_block_t b
, dm_block_t e
)
1699 down_write(&pmd
->root_lock
);
1700 for (; b
!= e
; b
++) {
1701 r
= dm_sm_inc_block(pmd
->data_sm
, b
);
1705 up_write(&pmd
->root_lock
);
1710 int dm_pool_dec_data_range(struct dm_pool_metadata
*pmd
, dm_block_t b
, dm_block_t e
)
1714 down_write(&pmd
->root_lock
);
1715 for (; b
!= e
; b
++) {
1716 r
= dm_sm_dec_block(pmd
->data_sm
, b
);
1720 up_write(&pmd
->root_lock
);
1725 bool dm_thin_changed_this_transaction(struct dm_thin_device
*td
)
1729 down_read(&td
->pmd
->root_lock
);
1731 up_read(&td
->pmd
->root_lock
);
1736 bool dm_pool_changed_this_transaction(struct dm_pool_metadata
*pmd
)
1739 struct dm_thin_device
*td
, *tmp
;
1741 down_read(&pmd
->root_lock
);
1742 list_for_each_entry_safe(td
, tmp
, &pmd
->thin_devices
, list
) {
1748 up_read(&pmd
->root_lock
);
1753 bool dm_thin_aborted_changes(struct dm_thin_device
*td
)
1757 down_read(&td
->pmd
->root_lock
);
1758 r
= td
->aborted_with_changes
;
1759 up_read(&td
->pmd
->root_lock
);
1764 int dm_pool_alloc_data_block(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1768 down_write(&pmd
->root_lock
);
1770 r
= dm_sm_new_block(pmd
->data_sm
, result
);
1771 up_write(&pmd
->root_lock
);
1776 int dm_pool_commit_metadata(struct dm_pool_metadata
*pmd
)
1780 down_write(&pmd
->root_lock
);
1784 r
= __commit_transaction(pmd
);
1789 * Open the next transaction.
1791 r
= __begin_transaction(pmd
);
1793 up_write(&pmd
->root_lock
);
1797 static void __set_abort_with_changes_flags(struct dm_pool_metadata
*pmd
)
1799 struct dm_thin_device
*td
;
1801 list_for_each_entry(td
, &pmd
->thin_devices
, list
)
1802 td
->aborted_with_changes
= td
->changed
;
1805 int dm_pool_abort_metadata(struct dm_pool_metadata
*pmd
)
1809 down_write(&pmd
->root_lock
);
1813 __set_abort_with_changes_flags(pmd
);
1814 __destroy_persistent_data_objects(pmd
);
1815 r
= __create_persistent_data_objects(pmd
, false);
1817 pmd
->fail_io
= true;
1820 up_write(&pmd
->root_lock
);
1825 int dm_pool_get_free_block_count(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1829 down_read(&pmd
->root_lock
);
1831 r
= dm_sm_get_nr_free(pmd
->data_sm
, result
);
1832 up_read(&pmd
->root_lock
);
1837 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata
*pmd
,
1842 down_read(&pmd
->root_lock
);
1844 r
= dm_sm_get_nr_free(pmd
->metadata_sm
, result
);
1847 if (*result
< pmd
->metadata_reserve
)
1850 *result
-= pmd
->metadata_reserve
;
1852 up_read(&pmd
->root_lock
);
1857 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata
*pmd
,
1862 down_read(&pmd
->root_lock
);
1864 r
= dm_sm_get_nr_blocks(pmd
->metadata_sm
, result
);
1865 up_read(&pmd
->root_lock
);
1870 int dm_pool_get_data_dev_size(struct dm_pool_metadata
*pmd
, dm_block_t
*result
)
1874 down_read(&pmd
->root_lock
);
1876 r
= dm_sm_get_nr_blocks(pmd
->data_sm
, result
);
1877 up_read(&pmd
->root_lock
);
1882 int dm_thin_get_mapped_count(struct dm_thin_device
*td
, dm_block_t
*result
)
1885 struct dm_pool_metadata
*pmd
= td
->pmd
;
1887 down_read(&pmd
->root_lock
);
1888 if (!pmd
->fail_io
) {
1889 *result
= td
->mapped_blocks
;
1892 up_read(&pmd
->root_lock
);
1897 static int __highest_block(struct dm_thin_device
*td
, dm_block_t
*result
)
1901 dm_block_t thin_root
;
1902 struct dm_pool_metadata
*pmd
= td
->pmd
;
1904 r
= dm_btree_lookup(&pmd
->tl_info
, pmd
->root
, &td
->id
, &value_le
);
1908 thin_root
= le64_to_cpu(value_le
);
1910 return dm_btree_find_highest_key(&pmd
->bl_info
, thin_root
, result
);
1913 int dm_thin_get_highest_mapped_block(struct dm_thin_device
*td
,
1917 struct dm_pool_metadata
*pmd
= td
->pmd
;
1919 down_read(&pmd
->root_lock
);
1921 r
= __highest_block(td
, result
);
1922 up_read(&pmd
->root_lock
);
1927 static int __resize_space_map(struct dm_space_map
*sm
, dm_block_t new_count
)
1930 dm_block_t old_count
;
1932 r
= dm_sm_get_nr_blocks(sm
, &old_count
);
1936 if (new_count
== old_count
)
1939 if (new_count
< old_count
) {
1940 DMERR("cannot reduce size of space map");
1944 return dm_sm_extend(sm
, new_count
- old_count
);
1947 int dm_pool_resize_data_dev(struct dm_pool_metadata
*pmd
, dm_block_t new_count
)
1951 down_write(&pmd
->root_lock
);
1953 r
= __resize_space_map(pmd
->data_sm
, new_count
);
1954 up_write(&pmd
->root_lock
);
1959 int dm_pool_resize_metadata_dev(struct dm_pool_metadata
*pmd
, dm_block_t new_count
)
1963 down_write(&pmd
->root_lock
);
1964 if (!pmd
->fail_io
) {
1965 r
= __resize_space_map(pmd
->metadata_sm
, new_count
);
1967 __set_metadata_reserve(pmd
);
1969 up_write(&pmd
->root_lock
);
1974 void dm_pool_metadata_read_only(struct dm_pool_metadata
*pmd
)
1976 down_write(&pmd
->root_lock
);
1977 dm_bm_set_read_only(pmd
->bm
);
1978 up_write(&pmd
->root_lock
);
1981 void dm_pool_metadata_read_write(struct dm_pool_metadata
*pmd
)
1983 down_write(&pmd
->root_lock
);
1984 dm_bm_set_read_write(pmd
->bm
);
1985 up_write(&pmd
->root_lock
);
1988 int dm_pool_register_metadata_threshold(struct dm_pool_metadata
*pmd
,
1989 dm_block_t threshold
,
1990 dm_sm_threshold_fn fn
,
1995 down_write(&pmd
->root_lock
);
1996 r
= dm_sm_register_threshold_callback(pmd
->metadata_sm
, threshold
, fn
, context
);
1997 up_write(&pmd
->root_lock
);
2002 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata
*pmd
)
2005 struct dm_block
*sblock
;
2006 struct thin_disk_superblock
*disk_super
;
2008 down_write(&pmd
->root_lock
);
2012 pmd
->flags
|= THIN_METADATA_NEEDS_CHECK_FLAG
;
2014 r
= superblock_lock(pmd
, &sblock
);
2016 DMERR("couldn't lock superblock");
2020 disk_super
= dm_block_data(sblock
);
2021 disk_super
->flags
= cpu_to_le32(pmd
->flags
);
2023 dm_bm_unlock(sblock
);
2025 up_write(&pmd
->root_lock
);
2029 bool dm_pool_metadata_needs_check(struct dm_pool_metadata
*pmd
)
2033 down_read(&pmd
->root_lock
);
2034 needs_check
= pmd
->flags
& THIN_METADATA_NEEDS_CHECK_FLAG
;
2035 up_read(&pmd
->root_lock
);
2040 void dm_pool_issue_prefetches(struct dm_pool_metadata
*pmd
)
2042 down_read(&pmd
->root_lock
);
2044 dm_tm_issue_prefetches(pmd
->tm
);
2045 up_read(&pmd
->root_lock
);