dm: Call proper helper to determine dax support
[linux/fpc-iii.git] / drivers / md / dm-thin-metadata.c
bloba5ed59eafdc51887c59a9a21bb98f056ccae52ee
1 /*
2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
21 * atomic writes.
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
32 * bits.
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
40 * cpu cache.
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
46 * are etc.
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
51 * 0 - ref count is 0
52 * 1 - ref count is 1
53 * 2 - ref count is 2
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
58 * count.
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
83 * For btree insert:
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
86 * For btree remove:
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
90 #define THIN_MAX_CONCURRENT_LOCKS 6
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
96 * Little endian on-disk superblock and device details.
98 struct thin_disk_superblock {
99 __le32 csum; /* Checksum of superblock except for this field. */
100 __le32 flags;
101 __le64 blocknr; /* This block number, dm_block_t. */
103 __u8 uuid[16];
104 __le64 magic;
105 __le32 version;
106 __le32 time;
108 __le64 trans_id;
111 * Root held by userspace transactions.
113 __le64 held_root;
115 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
116 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
121 __le64 data_mapping_root;
124 * Device detail root mapping dev_id -> device_details
126 __le64 device_details_root;
128 __le32 data_block_size; /* In 512-byte sectors. */
130 __le32 metadata_block_size; /* In 512-byte sectors. */
131 __le64 metadata_nr_blocks;
133 __le32 compat_flags;
134 __le32 compat_ro_flags;
135 __le32 incompat_flags;
136 } __packed;
138 struct disk_device_details {
139 __le64 mapped_blocks;
140 __le64 transaction_id; /* When created. */
141 __le32 creation_time;
142 __le32 snapshotted_time;
143 } __packed;
145 struct dm_pool_metadata {
146 struct hlist_node hash;
148 struct block_device *bdev;
149 struct dm_block_manager *bm;
150 struct dm_space_map *metadata_sm;
151 struct dm_space_map *data_sm;
152 struct dm_transaction_manager *tm;
153 struct dm_transaction_manager *nb_tm;
156 * Two-level btree.
157 * First level holds thin_dev_t.
158 * Second level holds mappings.
160 struct dm_btree_info info;
163 * Non-blocking version of the above.
165 struct dm_btree_info nb_info;
168 * Just the top level for deleting whole devices.
170 struct dm_btree_info tl_info;
173 * Just the bottom level for creating new devices.
175 struct dm_btree_info bl_info;
178 * Describes the device details btree.
180 struct dm_btree_info details_info;
182 struct rw_semaphore root_lock;
183 uint32_t time;
184 dm_block_t root;
185 dm_block_t details_root;
186 struct list_head thin_devices;
187 uint64_t trans_id;
188 unsigned long flags;
189 sector_t data_block_size;
192 * Pre-commit callback.
194 * This allows the thin provisioning target to run a callback before
195 * the metadata are committed.
197 dm_pool_pre_commit_fn pre_commit_fn;
198 void *pre_commit_context;
201 * We reserve a section of the metadata for commit overhead.
202 * All reported space does *not* include this.
204 dm_block_t metadata_reserve;
207 * Set if a transaction has to be aborted but the attempt to roll back
208 * to the previous (good) transaction failed. The only pool metadata
209 * operation possible in this state is the closing of the device.
211 bool fail_io:1;
214 * Set once a thin-pool has been accessed through one of the interfaces
215 * that imply the pool is in-service (e.g. thin devices created/deleted,
216 * thin-pool message, metadata snapshots, etc).
218 bool in_service:1;
221 * Reading the space map roots can fail, so we read it into these
222 * buffers before the superblock is locked and updated.
224 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
225 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
228 struct dm_thin_device {
229 struct list_head list;
230 struct dm_pool_metadata *pmd;
231 dm_thin_id id;
233 int open_count;
234 bool changed:1;
235 bool aborted_with_changes:1;
236 uint64_t mapped_blocks;
237 uint64_t transaction_id;
238 uint32_t creation_time;
239 uint32_t snapshotted_time;
242 /*----------------------------------------------------------------
243 * superblock validator
244 *--------------------------------------------------------------*/
246 #define SUPERBLOCK_CSUM_XOR 160774
248 static void sb_prepare_for_write(struct dm_block_validator *v,
249 struct dm_block *b,
250 size_t block_size)
252 struct thin_disk_superblock *disk_super = dm_block_data(b);
254 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
255 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
256 block_size - sizeof(__le32),
257 SUPERBLOCK_CSUM_XOR));
260 static int sb_check(struct dm_block_validator *v,
261 struct dm_block *b,
262 size_t block_size)
264 struct thin_disk_superblock *disk_super = dm_block_data(b);
265 __le32 csum_le;
267 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
268 DMERR("sb_check failed: blocknr %llu: "
269 "wanted %llu", le64_to_cpu(disk_super->blocknr),
270 (unsigned long long)dm_block_location(b));
271 return -ENOTBLK;
274 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
275 DMERR("sb_check failed: magic %llu: "
276 "wanted %llu", le64_to_cpu(disk_super->magic),
277 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
278 return -EILSEQ;
281 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
282 block_size - sizeof(__le32),
283 SUPERBLOCK_CSUM_XOR));
284 if (csum_le != disk_super->csum) {
285 DMERR("sb_check failed: csum %u: wanted %u",
286 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
287 return -EILSEQ;
290 return 0;
293 static struct dm_block_validator sb_validator = {
294 .name = "superblock",
295 .prepare_for_write = sb_prepare_for_write,
296 .check = sb_check
299 /*----------------------------------------------------------------
300 * Methods for the btree value types
301 *--------------------------------------------------------------*/
303 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
305 return (b << 24) | t;
308 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
310 *b = v >> 24;
311 *t = v & ((1 << 24) - 1);
314 static void data_block_inc(void *context, const void *value_le)
316 struct dm_space_map *sm = context;
317 __le64 v_le;
318 uint64_t b;
319 uint32_t t;
321 memcpy(&v_le, value_le, sizeof(v_le));
322 unpack_block_time(le64_to_cpu(v_le), &b, &t);
323 dm_sm_inc_block(sm, b);
326 static void data_block_dec(void *context, const void *value_le)
328 struct dm_space_map *sm = context;
329 __le64 v_le;
330 uint64_t b;
331 uint32_t t;
333 memcpy(&v_le, value_le, sizeof(v_le));
334 unpack_block_time(le64_to_cpu(v_le), &b, &t);
335 dm_sm_dec_block(sm, b);
338 static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
340 __le64 v1_le, v2_le;
341 uint64_t b1, b2;
342 uint32_t t;
344 memcpy(&v1_le, value1_le, sizeof(v1_le));
345 memcpy(&v2_le, value2_le, sizeof(v2_le));
346 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
347 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
349 return b1 == b2;
352 static void subtree_inc(void *context, const void *value)
354 struct dm_btree_info *info = context;
355 __le64 root_le;
356 uint64_t root;
358 memcpy(&root_le, value, sizeof(root_le));
359 root = le64_to_cpu(root_le);
360 dm_tm_inc(info->tm, root);
363 static void subtree_dec(void *context, const void *value)
365 struct dm_btree_info *info = context;
366 __le64 root_le;
367 uint64_t root;
369 memcpy(&root_le, value, sizeof(root_le));
370 root = le64_to_cpu(root_le);
371 if (dm_btree_del(info, root))
372 DMERR("btree delete failed");
375 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
377 __le64 v1_le, v2_le;
378 memcpy(&v1_le, value1_le, sizeof(v1_le));
379 memcpy(&v2_le, value2_le, sizeof(v2_le));
381 return v1_le == v2_le;
384 /*----------------------------------------------------------------*/
387 * Variant that is used for in-core only changes or code that
388 * shouldn't put the pool in service on its own (e.g. commit).
390 static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
391 __acquires(pmd->root_lock)
393 down_write(&pmd->root_lock);
396 static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
398 pmd_write_lock_in_core(pmd);
399 if (unlikely(!pmd->in_service))
400 pmd->in_service = true;
403 static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
404 __releases(pmd->root_lock)
406 up_write(&pmd->root_lock);
409 /*----------------------------------------------------------------*/
411 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
412 struct dm_block **sblock)
414 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
415 &sb_validator, sblock);
418 static int superblock_lock(struct dm_pool_metadata *pmd,
419 struct dm_block **sblock)
421 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
422 &sb_validator, sblock);
425 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
427 int r;
428 unsigned i;
429 struct dm_block *b;
430 __le64 *data_le, zero = cpu_to_le64(0);
431 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
434 * We can't use a validator here - it may be all zeroes.
436 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
437 if (r)
438 return r;
440 data_le = dm_block_data(b);
441 *result = 1;
442 for (i = 0; i < block_size; i++) {
443 if (data_le[i] != zero) {
444 *result = 0;
445 break;
449 dm_bm_unlock(b);
451 return 0;
454 static void __setup_btree_details(struct dm_pool_metadata *pmd)
456 pmd->info.tm = pmd->tm;
457 pmd->info.levels = 2;
458 pmd->info.value_type.context = pmd->data_sm;
459 pmd->info.value_type.size = sizeof(__le64);
460 pmd->info.value_type.inc = data_block_inc;
461 pmd->info.value_type.dec = data_block_dec;
462 pmd->info.value_type.equal = data_block_equal;
464 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
465 pmd->nb_info.tm = pmd->nb_tm;
467 pmd->tl_info.tm = pmd->tm;
468 pmd->tl_info.levels = 1;
469 pmd->tl_info.value_type.context = &pmd->bl_info;
470 pmd->tl_info.value_type.size = sizeof(__le64);
471 pmd->tl_info.value_type.inc = subtree_inc;
472 pmd->tl_info.value_type.dec = subtree_dec;
473 pmd->tl_info.value_type.equal = subtree_equal;
475 pmd->bl_info.tm = pmd->tm;
476 pmd->bl_info.levels = 1;
477 pmd->bl_info.value_type.context = pmd->data_sm;
478 pmd->bl_info.value_type.size = sizeof(__le64);
479 pmd->bl_info.value_type.inc = data_block_inc;
480 pmd->bl_info.value_type.dec = data_block_dec;
481 pmd->bl_info.value_type.equal = data_block_equal;
483 pmd->details_info.tm = pmd->tm;
484 pmd->details_info.levels = 1;
485 pmd->details_info.value_type.context = NULL;
486 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
487 pmd->details_info.value_type.inc = NULL;
488 pmd->details_info.value_type.dec = NULL;
489 pmd->details_info.value_type.equal = NULL;
492 static int save_sm_roots(struct dm_pool_metadata *pmd)
494 int r;
495 size_t len;
497 r = dm_sm_root_size(pmd->metadata_sm, &len);
498 if (r < 0)
499 return r;
501 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
502 if (r < 0)
503 return r;
505 r = dm_sm_root_size(pmd->data_sm, &len);
506 if (r < 0)
507 return r;
509 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
512 static void copy_sm_roots(struct dm_pool_metadata *pmd,
513 struct thin_disk_superblock *disk)
515 memcpy(&disk->metadata_space_map_root,
516 &pmd->metadata_space_map_root,
517 sizeof(pmd->metadata_space_map_root));
519 memcpy(&disk->data_space_map_root,
520 &pmd->data_space_map_root,
521 sizeof(pmd->data_space_map_root));
524 static int __write_initial_superblock(struct dm_pool_metadata *pmd)
526 int r;
527 struct dm_block *sblock;
528 struct thin_disk_superblock *disk_super;
529 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
531 if (bdev_size > THIN_METADATA_MAX_SECTORS)
532 bdev_size = THIN_METADATA_MAX_SECTORS;
534 r = dm_sm_commit(pmd->data_sm);
535 if (r < 0)
536 return r;
538 r = dm_tm_pre_commit(pmd->tm);
539 if (r < 0)
540 return r;
542 r = save_sm_roots(pmd);
543 if (r < 0)
544 return r;
546 r = superblock_lock_zero(pmd, &sblock);
547 if (r)
548 return r;
550 disk_super = dm_block_data(sblock);
551 disk_super->flags = 0;
552 memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
553 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
554 disk_super->version = cpu_to_le32(THIN_VERSION);
555 disk_super->time = 0;
556 disk_super->trans_id = 0;
557 disk_super->held_root = 0;
559 copy_sm_roots(pmd, disk_super);
561 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
562 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
563 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
564 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
565 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
567 return dm_tm_commit(pmd->tm, sblock);
570 static int __format_metadata(struct dm_pool_metadata *pmd)
572 int r;
574 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
575 &pmd->tm, &pmd->metadata_sm);
576 if (r < 0) {
577 DMERR("tm_create_with_sm failed");
578 return r;
581 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
582 if (IS_ERR(pmd->data_sm)) {
583 DMERR("sm_disk_create failed");
584 r = PTR_ERR(pmd->data_sm);
585 goto bad_cleanup_tm;
588 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
589 if (!pmd->nb_tm) {
590 DMERR("could not create non-blocking clone tm");
591 r = -ENOMEM;
592 goto bad_cleanup_data_sm;
595 __setup_btree_details(pmd);
597 r = dm_btree_empty(&pmd->info, &pmd->root);
598 if (r < 0)
599 goto bad_cleanup_nb_tm;
601 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
602 if (r < 0) {
603 DMERR("couldn't create devices root");
604 goto bad_cleanup_nb_tm;
607 r = __write_initial_superblock(pmd);
608 if (r)
609 goto bad_cleanup_nb_tm;
611 return 0;
613 bad_cleanup_nb_tm:
614 dm_tm_destroy(pmd->nb_tm);
615 bad_cleanup_data_sm:
616 dm_sm_destroy(pmd->data_sm);
617 bad_cleanup_tm:
618 dm_tm_destroy(pmd->tm);
619 dm_sm_destroy(pmd->metadata_sm);
621 return r;
624 static int __check_incompat_features(struct thin_disk_superblock *disk_super,
625 struct dm_pool_metadata *pmd)
627 uint32_t features;
629 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
630 if (features) {
631 DMERR("could not access metadata due to unsupported optional features (%lx).",
632 (unsigned long)features);
633 return -EINVAL;
637 * Check for read-only metadata to skip the following RDWR checks.
639 if (get_disk_ro(pmd->bdev->bd_disk))
640 return 0;
642 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
643 if (features) {
644 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
645 (unsigned long)features);
646 return -EINVAL;
649 return 0;
652 static int __open_metadata(struct dm_pool_metadata *pmd)
654 int r;
655 struct dm_block *sblock;
656 struct thin_disk_superblock *disk_super;
658 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
659 &sb_validator, &sblock);
660 if (r < 0) {
661 DMERR("couldn't read superblock");
662 return r;
665 disk_super = dm_block_data(sblock);
667 /* Verify the data block size hasn't changed */
668 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
669 DMERR("changing the data block size (from %u to %llu) is not supported",
670 le32_to_cpu(disk_super->data_block_size),
671 (unsigned long long)pmd->data_block_size);
672 r = -EINVAL;
673 goto bad_unlock_sblock;
676 r = __check_incompat_features(disk_super, pmd);
677 if (r < 0)
678 goto bad_unlock_sblock;
680 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
681 disk_super->metadata_space_map_root,
682 sizeof(disk_super->metadata_space_map_root),
683 &pmd->tm, &pmd->metadata_sm);
684 if (r < 0) {
685 DMERR("tm_open_with_sm failed");
686 goto bad_unlock_sblock;
689 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
690 sizeof(disk_super->data_space_map_root));
691 if (IS_ERR(pmd->data_sm)) {
692 DMERR("sm_disk_open failed");
693 r = PTR_ERR(pmd->data_sm);
694 goto bad_cleanup_tm;
697 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
698 if (!pmd->nb_tm) {
699 DMERR("could not create non-blocking clone tm");
700 r = -ENOMEM;
701 goto bad_cleanup_data_sm;
704 __setup_btree_details(pmd);
705 dm_bm_unlock(sblock);
707 return 0;
709 bad_cleanup_data_sm:
710 dm_sm_destroy(pmd->data_sm);
711 bad_cleanup_tm:
712 dm_tm_destroy(pmd->tm);
713 dm_sm_destroy(pmd->metadata_sm);
714 bad_unlock_sblock:
715 dm_bm_unlock(sblock);
717 return r;
720 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
722 int r, unformatted;
724 r = __superblock_all_zeroes(pmd->bm, &unformatted);
725 if (r)
726 return r;
728 if (unformatted)
729 return format_device ? __format_metadata(pmd) : -EPERM;
731 return __open_metadata(pmd);
734 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
736 int r;
738 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
739 THIN_MAX_CONCURRENT_LOCKS);
740 if (IS_ERR(pmd->bm)) {
741 DMERR("could not create block manager");
742 r = PTR_ERR(pmd->bm);
743 pmd->bm = NULL;
744 return r;
747 r = __open_or_format_metadata(pmd, format_device);
748 if (r) {
749 dm_block_manager_destroy(pmd->bm);
750 pmd->bm = NULL;
753 return r;
756 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
758 dm_sm_destroy(pmd->data_sm);
759 dm_sm_destroy(pmd->metadata_sm);
760 dm_tm_destroy(pmd->nb_tm);
761 dm_tm_destroy(pmd->tm);
762 dm_block_manager_destroy(pmd->bm);
765 static int __begin_transaction(struct dm_pool_metadata *pmd)
767 int r;
768 struct thin_disk_superblock *disk_super;
769 struct dm_block *sblock;
772 * We re-read the superblock every time. Shouldn't need to do this
773 * really.
775 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
776 &sb_validator, &sblock);
777 if (r)
778 return r;
780 disk_super = dm_block_data(sblock);
781 pmd->time = le32_to_cpu(disk_super->time);
782 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
783 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
784 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
785 pmd->flags = le32_to_cpu(disk_super->flags);
786 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
788 dm_bm_unlock(sblock);
789 return 0;
792 static int __write_changed_details(struct dm_pool_metadata *pmd)
794 int r;
795 struct dm_thin_device *td, *tmp;
796 struct disk_device_details details;
797 uint64_t key;
799 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
800 if (!td->changed)
801 continue;
803 key = td->id;
805 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
806 details.transaction_id = cpu_to_le64(td->transaction_id);
807 details.creation_time = cpu_to_le32(td->creation_time);
808 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
809 __dm_bless_for_disk(&details);
811 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
812 &key, &details, &pmd->details_root);
813 if (r)
814 return r;
816 if (td->open_count)
817 td->changed = 0;
818 else {
819 list_del(&td->list);
820 kfree(td);
824 return 0;
827 static int __commit_transaction(struct dm_pool_metadata *pmd)
829 int r;
830 struct thin_disk_superblock *disk_super;
831 struct dm_block *sblock;
834 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
836 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
837 BUG_ON(!rwsem_is_locked(&pmd->root_lock));
839 if (unlikely(!pmd->in_service))
840 return 0;
842 if (pmd->pre_commit_fn) {
843 r = pmd->pre_commit_fn(pmd->pre_commit_context);
844 if (r < 0) {
845 DMERR("pre-commit callback failed");
846 return r;
850 r = __write_changed_details(pmd);
851 if (r < 0)
852 return r;
854 r = dm_sm_commit(pmd->data_sm);
855 if (r < 0)
856 return r;
858 r = dm_tm_pre_commit(pmd->tm);
859 if (r < 0)
860 return r;
862 r = save_sm_roots(pmd);
863 if (r < 0)
864 return r;
866 r = superblock_lock(pmd, &sblock);
867 if (r)
868 return r;
870 disk_super = dm_block_data(sblock);
871 disk_super->time = cpu_to_le32(pmd->time);
872 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
873 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
874 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
875 disk_super->flags = cpu_to_le32(pmd->flags);
877 copy_sm_roots(pmd, disk_super);
879 return dm_tm_commit(pmd->tm, sblock);
882 static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
884 int r;
885 dm_block_t total;
886 dm_block_t max_blocks = 4096; /* 16M */
888 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
889 if (r) {
890 DMERR("could not get size of metadata device");
891 pmd->metadata_reserve = max_blocks;
892 } else
893 pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
896 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
897 sector_t data_block_size,
898 bool format_device)
900 int r;
901 struct dm_pool_metadata *pmd;
903 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
904 if (!pmd) {
905 DMERR("could not allocate metadata struct");
906 return ERR_PTR(-ENOMEM);
909 init_rwsem(&pmd->root_lock);
910 pmd->time = 0;
911 INIT_LIST_HEAD(&pmd->thin_devices);
912 pmd->fail_io = false;
913 pmd->in_service = false;
914 pmd->bdev = bdev;
915 pmd->data_block_size = data_block_size;
916 pmd->pre_commit_fn = NULL;
917 pmd->pre_commit_context = NULL;
919 r = __create_persistent_data_objects(pmd, format_device);
920 if (r) {
921 kfree(pmd);
922 return ERR_PTR(r);
925 r = __begin_transaction(pmd);
926 if (r < 0) {
927 if (dm_pool_metadata_close(pmd) < 0)
928 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
929 return ERR_PTR(r);
932 __set_metadata_reserve(pmd);
934 return pmd;
937 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
939 int r;
940 unsigned open_devices = 0;
941 struct dm_thin_device *td, *tmp;
943 down_read(&pmd->root_lock);
944 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
945 if (td->open_count)
946 open_devices++;
947 else {
948 list_del(&td->list);
949 kfree(td);
952 up_read(&pmd->root_lock);
954 if (open_devices) {
955 DMERR("attempt to close pmd when %u device(s) are still open",
956 open_devices);
957 return -EBUSY;
960 pmd_write_lock_in_core(pmd);
961 if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
962 r = __commit_transaction(pmd);
963 if (r < 0)
964 DMWARN("%s: __commit_transaction() failed, error = %d",
965 __func__, r);
967 pmd_write_unlock(pmd);
968 if (!pmd->fail_io)
969 __destroy_persistent_data_objects(pmd);
971 kfree(pmd);
972 return 0;
976 * __open_device: Returns @td corresponding to device with id @dev,
977 * creating it if @create is set and incrementing @td->open_count.
978 * On failure, @td is undefined.
980 static int __open_device(struct dm_pool_metadata *pmd,
981 dm_thin_id dev, int create,
982 struct dm_thin_device **td)
984 int r, changed = 0;
985 struct dm_thin_device *td2;
986 uint64_t key = dev;
987 struct disk_device_details details_le;
990 * If the device is already open, return it.
992 list_for_each_entry(td2, &pmd->thin_devices, list)
993 if (td2->id == dev) {
995 * May not create an already-open device.
997 if (create)
998 return -EEXIST;
1000 td2->open_count++;
1001 *td = td2;
1002 return 0;
1006 * Check the device exists.
1008 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1009 &key, &details_le);
1010 if (r) {
1011 if (r != -ENODATA || !create)
1012 return r;
1015 * Create new device.
1017 changed = 1;
1018 details_le.mapped_blocks = 0;
1019 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
1020 details_le.creation_time = cpu_to_le32(pmd->time);
1021 details_le.snapshotted_time = cpu_to_le32(pmd->time);
1024 *td = kmalloc(sizeof(**td), GFP_NOIO);
1025 if (!*td)
1026 return -ENOMEM;
1028 (*td)->pmd = pmd;
1029 (*td)->id = dev;
1030 (*td)->open_count = 1;
1031 (*td)->changed = changed;
1032 (*td)->aborted_with_changes = false;
1033 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
1034 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
1035 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
1036 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
1038 list_add(&(*td)->list, &pmd->thin_devices);
1040 return 0;
1043 static void __close_device(struct dm_thin_device *td)
1045 --td->open_count;
1048 static int __create_thin(struct dm_pool_metadata *pmd,
1049 dm_thin_id dev)
1051 int r;
1052 dm_block_t dev_root;
1053 uint64_t key = dev;
1054 struct disk_device_details details_le;
1055 struct dm_thin_device *td;
1056 __le64 value;
1058 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1059 &key, &details_le);
1060 if (!r)
1061 return -EEXIST;
1064 * Create an empty btree for the mappings.
1066 r = dm_btree_empty(&pmd->bl_info, &dev_root);
1067 if (r)
1068 return r;
1071 * Insert it into the main mapping tree.
1073 value = cpu_to_le64(dev_root);
1074 __dm_bless_for_disk(&value);
1075 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1076 if (r) {
1077 dm_btree_del(&pmd->bl_info, dev_root);
1078 return r;
1081 r = __open_device(pmd, dev, 1, &td);
1082 if (r) {
1083 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1084 dm_btree_del(&pmd->bl_info, dev_root);
1085 return r;
1087 __close_device(td);
1089 return r;
1092 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
1094 int r = -EINVAL;
1096 pmd_write_lock(pmd);
1097 if (!pmd->fail_io)
1098 r = __create_thin(pmd, dev);
1099 pmd_write_unlock(pmd);
1101 return r;
1104 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
1105 struct dm_thin_device *snap,
1106 dm_thin_id origin, uint32_t time)
1108 int r;
1109 struct dm_thin_device *td;
1111 r = __open_device(pmd, origin, 0, &td);
1112 if (r)
1113 return r;
1115 td->changed = 1;
1116 td->snapshotted_time = time;
1118 snap->mapped_blocks = td->mapped_blocks;
1119 snap->snapshotted_time = time;
1120 __close_device(td);
1122 return 0;
1125 static int __create_snap(struct dm_pool_metadata *pmd,
1126 dm_thin_id dev, dm_thin_id origin)
1128 int r;
1129 dm_block_t origin_root;
1130 uint64_t key = origin, dev_key = dev;
1131 struct dm_thin_device *td;
1132 struct disk_device_details details_le;
1133 __le64 value;
1135 /* check this device is unused */
1136 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1137 &dev_key, &details_le);
1138 if (!r)
1139 return -EEXIST;
1141 /* find the mapping tree for the origin */
1142 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
1143 if (r)
1144 return r;
1145 origin_root = le64_to_cpu(value);
1147 /* clone the origin, an inc will do */
1148 dm_tm_inc(pmd->tm, origin_root);
1150 /* insert into the main mapping tree */
1151 value = cpu_to_le64(origin_root);
1152 __dm_bless_for_disk(&value);
1153 key = dev;
1154 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1155 if (r) {
1156 dm_tm_dec(pmd->tm, origin_root);
1157 return r;
1160 pmd->time++;
1162 r = __open_device(pmd, dev, 1, &td);
1163 if (r)
1164 goto bad;
1166 r = __set_snapshot_details(pmd, td, origin, pmd->time);
1167 __close_device(td);
1169 if (r)
1170 goto bad;
1172 return 0;
1174 bad:
1175 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1176 dm_btree_remove(&pmd->details_info, pmd->details_root,
1177 &key, &pmd->details_root);
1178 return r;
1181 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1182 dm_thin_id dev,
1183 dm_thin_id origin)
1185 int r = -EINVAL;
1187 pmd_write_lock(pmd);
1188 if (!pmd->fail_io)
1189 r = __create_snap(pmd, dev, origin);
1190 pmd_write_unlock(pmd);
1192 return r;
1195 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1197 int r;
1198 uint64_t key = dev;
1199 struct dm_thin_device *td;
1201 /* TODO: failure should mark the transaction invalid */
1202 r = __open_device(pmd, dev, 0, &td);
1203 if (r)
1204 return r;
1206 if (td->open_count > 1) {
1207 __close_device(td);
1208 return -EBUSY;
1211 list_del(&td->list);
1212 kfree(td);
1213 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1214 &key, &pmd->details_root);
1215 if (r)
1216 return r;
1218 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1219 if (r)
1220 return r;
1222 return 0;
1225 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1226 dm_thin_id dev)
1228 int r = -EINVAL;
1230 pmd_write_lock(pmd);
1231 if (!pmd->fail_io)
1232 r = __delete_device(pmd, dev);
1233 pmd_write_unlock(pmd);
1235 return r;
1238 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1239 uint64_t current_id,
1240 uint64_t new_id)
1242 int r = -EINVAL;
1244 pmd_write_lock(pmd);
1246 if (pmd->fail_io)
1247 goto out;
1249 if (pmd->trans_id != current_id) {
1250 DMERR("mismatched transaction id");
1251 goto out;
1254 pmd->trans_id = new_id;
1255 r = 0;
1257 out:
1258 pmd_write_unlock(pmd);
1260 return r;
1263 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1264 uint64_t *result)
1266 int r = -EINVAL;
1268 down_read(&pmd->root_lock);
1269 if (!pmd->fail_io) {
1270 *result = pmd->trans_id;
1271 r = 0;
1273 up_read(&pmd->root_lock);
1275 return r;
1278 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1280 int r, inc;
1281 struct thin_disk_superblock *disk_super;
1282 struct dm_block *copy, *sblock;
1283 dm_block_t held_root;
1286 * We commit to ensure the btree roots which we increment in a
1287 * moment are up to date.
1289 r = __commit_transaction(pmd);
1290 if (r < 0) {
1291 DMWARN("%s: __commit_transaction() failed, error = %d",
1292 __func__, r);
1293 return r;
1297 * Copy the superblock.
1299 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1300 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1301 &sb_validator, &copy, &inc);
1302 if (r)
1303 return r;
1305 BUG_ON(!inc);
1307 held_root = dm_block_location(copy);
1308 disk_super = dm_block_data(copy);
1310 if (le64_to_cpu(disk_super->held_root)) {
1311 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1313 dm_tm_dec(pmd->tm, held_root);
1314 dm_tm_unlock(pmd->tm, copy);
1315 return -EBUSY;
1319 * Wipe the spacemap since we're not publishing this.
1321 memset(&disk_super->data_space_map_root, 0,
1322 sizeof(disk_super->data_space_map_root));
1323 memset(&disk_super->metadata_space_map_root, 0,
1324 sizeof(disk_super->metadata_space_map_root));
1327 * Increment the data structures that need to be preserved.
1329 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1330 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1331 dm_tm_unlock(pmd->tm, copy);
1334 * Write the held root into the superblock.
1336 r = superblock_lock(pmd, &sblock);
1337 if (r) {
1338 dm_tm_dec(pmd->tm, held_root);
1339 return r;
1342 disk_super = dm_block_data(sblock);
1343 disk_super->held_root = cpu_to_le64(held_root);
1344 dm_bm_unlock(sblock);
1345 return 0;
1348 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1350 int r = -EINVAL;
1352 pmd_write_lock(pmd);
1353 if (!pmd->fail_io)
1354 r = __reserve_metadata_snap(pmd);
1355 pmd_write_unlock(pmd);
1357 return r;
1360 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1362 int r;
1363 struct thin_disk_superblock *disk_super;
1364 struct dm_block *sblock, *copy;
1365 dm_block_t held_root;
1367 r = superblock_lock(pmd, &sblock);
1368 if (r)
1369 return r;
1371 disk_super = dm_block_data(sblock);
1372 held_root = le64_to_cpu(disk_super->held_root);
1373 disk_super->held_root = cpu_to_le64(0);
1375 dm_bm_unlock(sblock);
1377 if (!held_root) {
1378 DMWARN("No pool metadata snapshot found: nothing to release.");
1379 return -EINVAL;
1382 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1383 if (r)
1384 return r;
1386 disk_super = dm_block_data(copy);
1387 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1388 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1389 dm_sm_dec_block(pmd->metadata_sm, held_root);
1391 dm_tm_unlock(pmd->tm, copy);
1393 return 0;
1396 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1398 int r = -EINVAL;
1400 pmd_write_lock(pmd);
1401 if (!pmd->fail_io)
1402 r = __release_metadata_snap(pmd);
1403 pmd_write_unlock(pmd);
1405 return r;
1408 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1409 dm_block_t *result)
1411 int r;
1412 struct thin_disk_superblock *disk_super;
1413 struct dm_block *sblock;
1415 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1416 &sb_validator, &sblock);
1417 if (r)
1418 return r;
1420 disk_super = dm_block_data(sblock);
1421 *result = le64_to_cpu(disk_super->held_root);
1423 dm_bm_unlock(sblock);
1425 return 0;
1428 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1429 dm_block_t *result)
1431 int r = -EINVAL;
1433 down_read(&pmd->root_lock);
1434 if (!pmd->fail_io)
1435 r = __get_metadata_snap(pmd, result);
1436 up_read(&pmd->root_lock);
1438 return r;
1441 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1442 struct dm_thin_device **td)
1444 int r = -EINVAL;
1446 pmd_write_lock_in_core(pmd);
1447 if (!pmd->fail_io)
1448 r = __open_device(pmd, dev, 0, td);
1449 pmd_write_unlock(pmd);
1451 return r;
1454 int dm_pool_close_thin_device(struct dm_thin_device *td)
1456 pmd_write_lock_in_core(td->pmd);
1457 __close_device(td);
1458 pmd_write_unlock(td->pmd);
1460 return 0;
1463 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1465 return td->id;
1469 * Check whether @time (of block creation) is older than @td's last snapshot.
1470 * If so then the associated block is shared with the last snapshot device.
1471 * Any block on a device created *after* the device last got snapshotted is
1472 * necessarily not shared.
1474 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1476 return td->snapshotted_time > time;
1479 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
1480 struct dm_thin_lookup_result *result)
1482 uint64_t block_time = 0;
1483 dm_block_t exception_block;
1484 uint32_t exception_time;
1486 block_time = le64_to_cpu(value);
1487 unpack_block_time(block_time, &exception_block, &exception_time);
1488 result->block = exception_block;
1489 result->shared = __snapshotted_since(td, exception_time);
1492 static int __find_block(struct dm_thin_device *td, dm_block_t block,
1493 int can_issue_io, struct dm_thin_lookup_result *result)
1495 int r;
1496 __le64 value;
1497 struct dm_pool_metadata *pmd = td->pmd;
1498 dm_block_t keys[2] = { td->id, block };
1499 struct dm_btree_info *info;
1501 if (can_issue_io) {
1502 info = &pmd->info;
1503 } else
1504 info = &pmd->nb_info;
1506 r = dm_btree_lookup(info, pmd->root, keys, &value);
1507 if (!r)
1508 unpack_lookup_result(td, value, result);
1510 return r;
1513 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1514 int can_issue_io, struct dm_thin_lookup_result *result)
1516 int r;
1517 struct dm_pool_metadata *pmd = td->pmd;
1519 down_read(&pmd->root_lock);
1520 if (pmd->fail_io) {
1521 up_read(&pmd->root_lock);
1522 return -EINVAL;
1525 r = __find_block(td, block, can_issue_io, result);
1527 up_read(&pmd->root_lock);
1528 return r;
1531 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
1532 dm_block_t *vblock,
1533 struct dm_thin_lookup_result *result)
1535 int r;
1536 __le64 value;
1537 struct dm_pool_metadata *pmd = td->pmd;
1538 dm_block_t keys[2] = { td->id, block };
1540 r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
1541 if (!r)
1542 unpack_lookup_result(td, value, result);
1544 return r;
1547 static int __find_mapped_range(struct dm_thin_device *td,
1548 dm_block_t begin, dm_block_t end,
1549 dm_block_t *thin_begin, dm_block_t *thin_end,
1550 dm_block_t *pool_begin, bool *maybe_shared)
1552 int r;
1553 dm_block_t pool_end;
1554 struct dm_thin_lookup_result lookup;
1556 if (end < begin)
1557 return -ENODATA;
1559 r = __find_next_mapped_block(td, begin, &begin, &lookup);
1560 if (r)
1561 return r;
1563 if (begin >= end)
1564 return -ENODATA;
1566 *thin_begin = begin;
1567 *pool_begin = lookup.block;
1568 *maybe_shared = lookup.shared;
1570 begin++;
1571 pool_end = *pool_begin + 1;
1572 while (begin != end) {
1573 r = __find_block(td, begin, true, &lookup);
1574 if (r) {
1575 if (r == -ENODATA)
1576 break;
1577 else
1578 return r;
1581 if ((lookup.block != pool_end) ||
1582 (lookup.shared != *maybe_shared))
1583 break;
1585 pool_end++;
1586 begin++;
1589 *thin_end = begin;
1590 return 0;
1593 int dm_thin_find_mapped_range(struct dm_thin_device *td,
1594 dm_block_t begin, dm_block_t end,
1595 dm_block_t *thin_begin, dm_block_t *thin_end,
1596 dm_block_t *pool_begin, bool *maybe_shared)
1598 int r = -EINVAL;
1599 struct dm_pool_metadata *pmd = td->pmd;
1601 down_read(&pmd->root_lock);
1602 if (!pmd->fail_io) {
1603 r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
1604 pool_begin, maybe_shared);
1606 up_read(&pmd->root_lock);
1608 return r;
1611 static int __insert(struct dm_thin_device *td, dm_block_t block,
1612 dm_block_t data_block)
1614 int r, inserted;
1615 __le64 value;
1616 struct dm_pool_metadata *pmd = td->pmd;
1617 dm_block_t keys[2] = { td->id, block };
1619 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1620 __dm_bless_for_disk(&value);
1622 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1623 &pmd->root, &inserted);
1624 if (r)
1625 return r;
1627 td->changed = 1;
1628 if (inserted)
1629 td->mapped_blocks++;
1631 return 0;
1634 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1635 dm_block_t data_block)
1637 int r = -EINVAL;
1639 pmd_write_lock(td->pmd);
1640 if (!td->pmd->fail_io)
1641 r = __insert(td, block, data_block);
1642 pmd_write_unlock(td->pmd);
1644 return r;
1647 static int __remove(struct dm_thin_device *td, dm_block_t block)
1649 int r;
1650 struct dm_pool_metadata *pmd = td->pmd;
1651 dm_block_t keys[2] = { td->id, block };
1653 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1654 if (r)
1655 return r;
1657 td->mapped_blocks--;
1658 td->changed = 1;
1660 return 0;
1663 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1665 int r;
1666 unsigned count, total_count = 0;
1667 struct dm_pool_metadata *pmd = td->pmd;
1668 dm_block_t keys[1] = { td->id };
1669 __le64 value;
1670 dm_block_t mapping_root;
1673 * Find the mapping tree
1675 r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
1676 if (r)
1677 return r;
1680 * Remove from the mapping tree, taking care to inc the
1681 * ref count so it doesn't get deleted.
1683 mapping_root = le64_to_cpu(value);
1684 dm_tm_inc(pmd->tm, mapping_root);
1685 r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
1686 if (r)
1687 return r;
1690 * Remove leaves stops at the first unmapped entry, so we have to
1691 * loop round finding mapped ranges.
1693 while (begin < end) {
1694 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1695 if (r == -ENODATA)
1696 break;
1698 if (r)
1699 return r;
1701 if (begin >= end)
1702 break;
1704 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1705 if (r)
1706 return r;
1708 total_count += count;
1711 td->mapped_blocks -= total_count;
1712 td->changed = 1;
1715 * Reinsert the mapping tree.
1717 value = cpu_to_le64(mapping_root);
1718 __dm_bless_for_disk(&value);
1719 return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
1722 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1724 int r = -EINVAL;
1726 pmd_write_lock(td->pmd);
1727 if (!td->pmd->fail_io)
1728 r = __remove(td, block);
1729 pmd_write_unlock(td->pmd);
1731 return r;
1734 int dm_thin_remove_range(struct dm_thin_device *td,
1735 dm_block_t begin, dm_block_t end)
1737 int r = -EINVAL;
1739 pmd_write_lock(td->pmd);
1740 if (!td->pmd->fail_io)
1741 r = __remove_range(td, begin, end);
1742 pmd_write_unlock(td->pmd);
1744 return r;
1747 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1749 int r;
1750 uint32_t ref_count;
1752 down_read(&pmd->root_lock);
1753 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1754 if (!r)
1755 *result = (ref_count > 1);
1756 up_read(&pmd->root_lock);
1758 return r;
1761 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1763 int r = 0;
1765 pmd_write_lock(pmd);
1766 for (; b != e; b++) {
1767 r = dm_sm_inc_block(pmd->data_sm, b);
1768 if (r)
1769 break;
1771 pmd_write_unlock(pmd);
1773 return r;
1776 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1778 int r = 0;
1780 pmd_write_lock(pmd);
1781 for (; b != e; b++) {
1782 r = dm_sm_dec_block(pmd->data_sm, b);
1783 if (r)
1784 break;
1786 pmd_write_unlock(pmd);
1788 return r;
1791 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1793 int r;
1795 down_read(&td->pmd->root_lock);
1796 r = td->changed;
1797 up_read(&td->pmd->root_lock);
1799 return r;
1802 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1804 bool r = false;
1805 struct dm_thin_device *td, *tmp;
1807 down_read(&pmd->root_lock);
1808 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1809 if (td->changed) {
1810 r = td->changed;
1811 break;
1814 up_read(&pmd->root_lock);
1816 return r;
1819 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1821 bool r;
1823 down_read(&td->pmd->root_lock);
1824 r = td->aborted_with_changes;
1825 up_read(&td->pmd->root_lock);
1827 return r;
1830 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1832 int r = -EINVAL;
1834 pmd_write_lock(pmd);
1835 if (!pmd->fail_io)
1836 r = dm_sm_new_block(pmd->data_sm, result);
1837 pmd_write_unlock(pmd);
1839 return r;
1842 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1844 int r = -EINVAL;
1847 * Care is taken to not have commit be what
1848 * triggers putting the thin-pool in-service.
1850 pmd_write_lock_in_core(pmd);
1851 if (pmd->fail_io)
1852 goto out;
1854 r = __commit_transaction(pmd);
1855 if (r < 0)
1856 goto out;
1859 * Open the next transaction.
1861 r = __begin_transaction(pmd);
1862 out:
1863 pmd_write_unlock(pmd);
1864 return r;
1867 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
1869 struct dm_thin_device *td;
1871 list_for_each_entry(td, &pmd->thin_devices, list)
1872 td->aborted_with_changes = td->changed;
1875 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
1877 int r = -EINVAL;
1879 pmd_write_lock(pmd);
1880 if (pmd->fail_io)
1881 goto out;
1883 __set_abort_with_changes_flags(pmd);
1884 __destroy_persistent_data_objects(pmd);
1885 r = __create_persistent_data_objects(pmd, false);
1886 if (r)
1887 pmd->fail_io = true;
1889 out:
1890 pmd_write_unlock(pmd);
1892 return r;
1895 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1897 int r = -EINVAL;
1899 down_read(&pmd->root_lock);
1900 if (!pmd->fail_io)
1901 r = dm_sm_get_nr_free(pmd->data_sm, result);
1902 up_read(&pmd->root_lock);
1904 return r;
1907 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1908 dm_block_t *result)
1910 int r = -EINVAL;
1912 down_read(&pmd->root_lock);
1913 if (!pmd->fail_io)
1914 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1916 if (!r) {
1917 if (*result < pmd->metadata_reserve)
1918 *result = 0;
1919 else
1920 *result -= pmd->metadata_reserve;
1922 up_read(&pmd->root_lock);
1924 return r;
1927 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1928 dm_block_t *result)
1930 int r = -EINVAL;
1932 down_read(&pmd->root_lock);
1933 if (!pmd->fail_io)
1934 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1935 up_read(&pmd->root_lock);
1937 return r;
1940 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1942 int r = -EINVAL;
1944 down_read(&pmd->root_lock);
1945 if (!pmd->fail_io)
1946 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1947 up_read(&pmd->root_lock);
1949 return r;
1952 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1954 int r = -EINVAL;
1955 struct dm_pool_metadata *pmd = td->pmd;
1957 down_read(&pmd->root_lock);
1958 if (!pmd->fail_io) {
1959 *result = td->mapped_blocks;
1960 r = 0;
1962 up_read(&pmd->root_lock);
1964 return r;
1967 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1969 int r;
1970 __le64 value_le;
1971 dm_block_t thin_root;
1972 struct dm_pool_metadata *pmd = td->pmd;
1974 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1975 if (r)
1976 return r;
1978 thin_root = le64_to_cpu(value_le);
1980 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1983 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1984 dm_block_t *result)
1986 int r = -EINVAL;
1987 struct dm_pool_metadata *pmd = td->pmd;
1989 down_read(&pmd->root_lock);
1990 if (!pmd->fail_io)
1991 r = __highest_block(td, result);
1992 up_read(&pmd->root_lock);
1994 return r;
1997 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1999 int r;
2000 dm_block_t old_count;
2002 r = dm_sm_get_nr_blocks(sm, &old_count);
2003 if (r)
2004 return r;
2006 if (new_count == old_count)
2007 return 0;
2009 if (new_count < old_count) {
2010 DMERR("cannot reduce size of space map");
2011 return -EINVAL;
2014 return dm_sm_extend(sm, new_count - old_count);
2017 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
2019 int r = -EINVAL;
2021 pmd_write_lock(pmd);
2022 if (!pmd->fail_io)
2023 r = __resize_space_map(pmd->data_sm, new_count);
2024 pmd_write_unlock(pmd);
2026 return r;
2029 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
2031 int r = -EINVAL;
2033 pmd_write_lock(pmd);
2034 if (!pmd->fail_io) {
2035 r = __resize_space_map(pmd->metadata_sm, new_count);
2036 if (!r)
2037 __set_metadata_reserve(pmd);
2039 pmd_write_unlock(pmd);
2041 return r;
2044 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
2046 pmd_write_lock_in_core(pmd);
2047 dm_bm_set_read_only(pmd->bm);
2048 pmd_write_unlock(pmd);
2051 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
2053 pmd_write_lock_in_core(pmd);
2054 dm_bm_set_read_write(pmd->bm);
2055 pmd_write_unlock(pmd);
2058 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
2059 dm_block_t threshold,
2060 dm_sm_threshold_fn fn,
2061 void *context)
2063 int r;
2065 pmd_write_lock_in_core(pmd);
2066 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
2067 pmd_write_unlock(pmd);
2069 return r;
2072 void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
2073 dm_pool_pre_commit_fn fn,
2074 void *context)
2076 pmd_write_lock_in_core(pmd);
2077 pmd->pre_commit_fn = fn;
2078 pmd->pre_commit_context = context;
2079 pmd_write_unlock(pmd);
2082 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
2084 int r = -EINVAL;
2085 struct dm_block *sblock;
2086 struct thin_disk_superblock *disk_super;
2088 pmd_write_lock(pmd);
2089 if (pmd->fail_io)
2090 goto out;
2092 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
2094 r = superblock_lock(pmd, &sblock);
2095 if (r) {
2096 DMERR("couldn't lock superblock");
2097 goto out;
2100 disk_super = dm_block_data(sblock);
2101 disk_super->flags = cpu_to_le32(pmd->flags);
2103 dm_bm_unlock(sblock);
2104 out:
2105 pmd_write_unlock(pmd);
2106 return r;
2109 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
2111 bool needs_check;
2113 down_read(&pmd->root_lock);
2114 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
2115 up_read(&pmd->root_lock);
2117 return needs_check;
2120 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
2122 down_read(&pmd->root_lock);
2123 if (!pmd->fail_io)
2124 dm_tm_issue_prefetches(pmd->tm);
2125 up_read(&pmd->root_lock);