1 // SPDX-License-Identifier: GPL-2.0-only
3 #include "persistent-data/dm-transaction-manager.h"
4 #include "persistent-data/dm-bitset.h"
5 #include "persistent-data/dm-space-map.h"
7 #include <linux/dm-io.h>
8 #include <linux/dm-kcopyd.h>
9 #include <linux/init.h>
10 #include <linux/mempool.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #define DM_MSG_PREFIX "era"
17 #define SUPERBLOCK_LOCATION 0
18 #define SUPERBLOCK_MAGIC 2126579579
19 #define SUPERBLOCK_CSUM_XOR 146538381
20 #define MIN_ERA_VERSION 1
21 #define MAX_ERA_VERSION 1
22 #define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
23 #define MIN_BLOCK_SIZE 8
26 *--------------------------------------------------------------
28 *--------------------------------------------------------------
30 struct writeset_metadata
{
36 struct writeset_metadata md
;
39 * An in core copy of the bits to save constantly doing look ups on
46 * This does not free off the on disk bitset as this will normally be done
47 * after digesting into the era array.
49 static void writeset_free(struct writeset
*ws
)
55 static int setup_on_disk_bitset(struct dm_disk_bitset
*info
,
56 unsigned int nr_bits
, dm_block_t
*root
)
60 r
= dm_bitset_empty(info
, root
);
64 return dm_bitset_resize(info
, *root
, 0, nr_bits
, false, root
);
67 static size_t bitset_size(unsigned int nr_bits
)
69 return sizeof(unsigned long) * dm_div_up(nr_bits
, BITS_PER_LONG
);
73 * Allocates memory for the in core bitset.
75 static int writeset_alloc(struct writeset
*ws
, dm_block_t nr_blocks
)
77 ws
->bits
= vzalloc(bitset_size(nr_blocks
));
79 DMERR("%s: couldn't allocate in memory bitset", __func__
);
87 * Wipes the in-core bitset, and creates a new on disk bitset.
89 static int writeset_init(struct dm_disk_bitset
*info
, struct writeset
*ws
,
94 memset(ws
->bits
, 0, bitset_size(nr_blocks
));
96 ws
->md
.nr_bits
= nr_blocks
;
97 r
= setup_on_disk_bitset(info
, ws
->md
.nr_bits
, &ws
->md
.root
);
99 DMERR("%s: setup_on_disk_bitset failed", __func__
);
106 static bool writeset_marked(struct writeset
*ws
, dm_block_t block
)
108 return test_bit(block
, ws
->bits
);
111 static int writeset_marked_on_disk(struct dm_disk_bitset
*info
,
112 struct writeset_metadata
*m
, dm_block_t block
,
116 dm_block_t old
= m
->root
;
119 * The bitset was flushed when it was archived, so we know there'll
120 * be no change to the root.
122 r
= dm_bitset_test_bit(info
, m
->root
, block
, &m
->root
, result
);
124 DMERR("%s: dm_bitset_test_bit failed", __func__
);
128 BUG_ON(m
->root
!= old
);
134 * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
136 static int writeset_test_and_set(struct dm_disk_bitset
*info
,
137 struct writeset
*ws
, uint32_t block
)
141 if (!test_bit(block
, ws
->bits
)) {
142 r
= dm_bitset_set_bit(info
, ws
->md
.root
, block
, &ws
->md
.root
);
144 /* FIXME: fail mode */
155 *--------------------------------------------------------------
156 * On disk metadata layout
157 *--------------------------------------------------------------
159 #define SPACE_MAP_ROOT_SIZE 128
162 struct writeset_disk
{
167 struct superblock_disk
{
176 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
178 __le32 data_block_size
;
179 __le32 metadata_block_size
;
183 struct writeset_disk current_writeset
;
186 * Only these two fields are valid within the metadata snapshot.
188 __le64 writeset_tree_root
;
189 __le64 era_array_root
;
191 __le64 metadata_snap
;
195 *--------------------------------------------------------------
196 * Superblock validation
197 *--------------------------------------------------------------
199 static void sb_prepare_for_write(const struct dm_block_validator
*v
,
201 size_t sb_block_size
)
203 struct superblock_disk
*disk
= dm_block_data(b
);
205 disk
->blocknr
= cpu_to_le64(dm_block_location(b
));
206 disk
->csum
= cpu_to_le32(dm_bm_checksum(&disk
->flags
,
207 sb_block_size
- sizeof(__le32
),
208 SUPERBLOCK_CSUM_XOR
));
211 static int check_metadata_version(struct superblock_disk
*disk
)
213 uint32_t metadata_version
= le32_to_cpu(disk
->version
);
215 if (metadata_version
< MIN_ERA_VERSION
|| metadata_version
> MAX_ERA_VERSION
) {
216 DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
217 metadata_version
, MIN_ERA_VERSION
, MAX_ERA_VERSION
);
224 static int sb_check(const struct dm_block_validator
*v
,
226 size_t sb_block_size
)
228 struct superblock_disk
*disk
= dm_block_data(b
);
231 if (dm_block_location(b
) != le64_to_cpu(disk
->blocknr
)) {
232 DMERR("%s failed: blocknr %llu: wanted %llu",
233 __func__
, le64_to_cpu(disk
->blocknr
),
234 (unsigned long long)dm_block_location(b
));
238 if (le64_to_cpu(disk
->magic
) != SUPERBLOCK_MAGIC
) {
239 DMERR("%s failed: magic %llu: wanted %llu",
240 __func__
, le64_to_cpu(disk
->magic
),
241 (unsigned long long) SUPERBLOCK_MAGIC
);
245 csum_le
= cpu_to_le32(dm_bm_checksum(&disk
->flags
,
246 sb_block_size
- sizeof(__le32
),
247 SUPERBLOCK_CSUM_XOR
));
248 if (csum_le
!= disk
->csum
) {
249 DMERR("%s failed: csum %u: wanted %u",
250 __func__
, le32_to_cpu(csum_le
), le32_to_cpu(disk
->csum
));
254 return check_metadata_version(disk
);
257 static const struct dm_block_validator sb_validator
= {
258 .name
= "superblock",
259 .prepare_for_write
= sb_prepare_for_write
,
264 *--------------------------------------------------------------
265 * Low level metadata handling
266 *--------------------------------------------------------------
268 #define DM_ERA_METADATA_BLOCK_SIZE 4096
269 #define ERA_MAX_CONCURRENT_LOCKS 5
271 struct era_metadata
{
272 struct block_device
*bdev
;
273 struct dm_block_manager
*bm
;
274 struct dm_space_map
*sm
;
275 struct dm_transaction_manager
*tm
;
277 dm_block_t block_size
;
280 uint32_t current_era
;
283 * We preallocate 2 writesets. When an era rolls over we
284 * switch between them. This means the allocation is done at
285 * preresume time, rather than on the io path.
287 struct writeset writesets
[2];
288 struct writeset
*current_writeset
;
290 dm_block_t writeset_tree_root
;
291 dm_block_t era_array_root
;
293 struct dm_disk_bitset bitset_info
;
294 struct dm_btree_info writeset_tree_info
;
295 struct dm_array_info era_array_info
;
297 dm_block_t metadata_snap
;
300 * A flag that is set whenever a writeset has been archived.
302 bool archived_writesets
;
305 * Reading the space map root can fail, so we read it into this
306 * buffer before the superblock is locked and updated.
308 __u8 metadata_space_map_root
[SPACE_MAP_ROOT_SIZE
];
311 static int superblock_read_lock(struct era_metadata
*md
,
312 struct dm_block
**sblock
)
314 return dm_bm_read_lock(md
->bm
, SUPERBLOCK_LOCATION
,
315 &sb_validator
, sblock
);
318 static int superblock_lock_zero(struct era_metadata
*md
,
319 struct dm_block
**sblock
)
321 return dm_bm_write_lock_zero(md
->bm
, SUPERBLOCK_LOCATION
,
322 &sb_validator
, sblock
);
325 static int superblock_lock(struct era_metadata
*md
,
326 struct dm_block
**sblock
)
328 return dm_bm_write_lock(md
->bm
, SUPERBLOCK_LOCATION
,
329 &sb_validator
, sblock
);
332 /* FIXME: duplication with cache and thin */
333 static int superblock_all_zeroes(struct dm_block_manager
*bm
, bool *result
)
338 __le64
*data_le
, zero
= cpu_to_le64(0);
339 unsigned int sb_block_size
= dm_bm_block_size(bm
) / sizeof(__le64
);
342 * We can't use a validator here - it may be all zeroes.
344 r
= dm_bm_read_lock(bm
, SUPERBLOCK_LOCATION
, NULL
, &b
);
348 data_le
= dm_block_data(b
);
350 for (i
= 0; i
< sb_block_size
; i
++) {
351 if (data_le
[i
] != zero
) {
362 /*----------------------------------------------------------------*/
364 static void ws_pack(const struct writeset_metadata
*core
, struct writeset_disk
*disk
)
366 disk
->nr_bits
= cpu_to_le32(core
->nr_bits
);
367 disk
->root
= cpu_to_le64(core
->root
);
370 static void ws_unpack(const struct writeset_disk
*disk
, struct writeset_metadata
*core
)
372 core
->nr_bits
= le32_to_cpu(disk
->nr_bits
);
373 core
->root
= le64_to_cpu(disk
->root
);
376 static void ws_inc(void *context
, const void *value
, unsigned int count
)
378 struct era_metadata
*md
= context
;
379 struct writeset_disk ws_d
;
383 for (i
= 0; i
< count
; i
++) {
384 memcpy(&ws_d
, value
+ (i
* sizeof(ws_d
)), sizeof(ws_d
));
385 b
= le64_to_cpu(ws_d
.root
);
386 dm_tm_inc(md
->tm
, b
);
390 static void ws_dec(void *context
, const void *value
, unsigned int count
)
392 struct era_metadata
*md
= context
;
393 struct writeset_disk ws_d
;
397 for (i
= 0; i
< count
; i
++) {
398 memcpy(&ws_d
, value
+ (i
* sizeof(ws_d
)), sizeof(ws_d
));
399 b
= le64_to_cpu(ws_d
.root
);
400 dm_bitset_del(&md
->bitset_info
, b
);
404 static int ws_eq(void *context
, const void *value1
, const void *value2
)
406 return !memcmp(value1
, value2
, sizeof(struct writeset_disk
));
409 /*----------------------------------------------------------------*/
411 static void setup_writeset_tree_info(struct era_metadata
*md
)
413 struct dm_btree_value_type
*vt
= &md
->writeset_tree_info
.value_type
;
415 md
->writeset_tree_info
.tm
= md
->tm
;
416 md
->writeset_tree_info
.levels
= 1;
418 vt
->size
= sizeof(struct writeset_disk
);
424 static void setup_era_array_info(struct era_metadata
*md
)
426 struct dm_btree_value_type vt
;
429 vt
.size
= sizeof(__le32
);
434 dm_array_info_init(&md
->era_array_info
, md
->tm
, &vt
);
437 static void setup_infos(struct era_metadata
*md
)
439 dm_disk_bitset_init(md
->tm
, &md
->bitset_info
);
440 setup_writeset_tree_info(md
);
441 setup_era_array_info(md
);
444 /*----------------------------------------------------------------*/
446 static int create_fresh_metadata(struct era_metadata
*md
)
450 r
= dm_tm_create_with_sm(md
->bm
, SUPERBLOCK_LOCATION
,
453 DMERR("dm_tm_create_with_sm failed");
459 r
= dm_btree_empty(&md
->writeset_tree_info
, &md
->writeset_tree_root
);
461 DMERR("couldn't create new writeset tree");
465 r
= dm_array_empty(&md
->era_array_info
, &md
->era_array_root
);
467 DMERR("couldn't create era array");
474 dm_sm_destroy(md
->sm
);
475 dm_tm_destroy(md
->tm
);
480 static int save_sm_root(struct era_metadata
*md
)
485 r
= dm_sm_root_size(md
->sm
, &metadata_len
);
489 return dm_sm_copy_root(md
->sm
, &md
->metadata_space_map_root
,
493 static void copy_sm_root(struct era_metadata
*md
, struct superblock_disk
*disk
)
495 memcpy(&disk
->metadata_space_map_root
,
496 &md
->metadata_space_map_root
,
497 sizeof(md
->metadata_space_map_root
));
501 * Writes a superblock, including the static fields that don't get updated
502 * with every commit (possible optimisation here). 'md' should be fully
503 * constructed when this is called.
505 static void prepare_superblock(struct era_metadata
*md
, struct superblock_disk
*disk
)
507 disk
->magic
= cpu_to_le64(SUPERBLOCK_MAGIC
);
508 disk
->flags
= cpu_to_le32(0ul);
510 /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
511 memset(disk
->uuid
, 0, sizeof(disk
->uuid
));
512 disk
->version
= cpu_to_le32(MAX_ERA_VERSION
);
514 copy_sm_root(md
, disk
);
516 disk
->data_block_size
= cpu_to_le32(md
->block_size
);
517 disk
->metadata_block_size
= cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE
>> SECTOR_SHIFT
);
518 disk
->nr_blocks
= cpu_to_le32(md
->nr_blocks
);
519 disk
->current_era
= cpu_to_le32(md
->current_era
);
521 ws_pack(&md
->current_writeset
->md
, &disk
->current_writeset
);
522 disk
->writeset_tree_root
= cpu_to_le64(md
->writeset_tree_root
);
523 disk
->era_array_root
= cpu_to_le64(md
->era_array_root
);
524 disk
->metadata_snap
= cpu_to_le64(md
->metadata_snap
);
527 static int write_superblock(struct era_metadata
*md
)
530 struct dm_block
*sblock
;
531 struct superblock_disk
*disk
;
533 r
= save_sm_root(md
);
535 DMERR("%s: save_sm_root failed", __func__
);
539 r
= superblock_lock_zero(md
, &sblock
);
543 disk
= dm_block_data(sblock
);
544 prepare_superblock(md
, disk
);
546 return dm_tm_commit(md
->tm
, sblock
);
550 * Assumes block_size and the infos are set.
552 static int format_metadata(struct era_metadata
*md
)
556 r
= create_fresh_metadata(md
);
560 r
= write_superblock(md
);
562 dm_sm_destroy(md
->sm
);
563 dm_tm_destroy(md
->tm
);
570 static int open_metadata(struct era_metadata
*md
)
573 struct dm_block
*sblock
;
574 struct superblock_disk
*disk
;
576 r
= superblock_read_lock(md
, &sblock
);
578 DMERR("couldn't read_lock superblock");
582 disk
= dm_block_data(sblock
);
584 /* Verify the data block size hasn't changed */
585 if (le32_to_cpu(disk
->data_block_size
) != md
->block_size
) {
586 DMERR("changing the data block size (from %u to %llu) is not supported",
587 le32_to_cpu(disk
->data_block_size
), md
->block_size
);
592 r
= dm_tm_open_with_sm(md
->bm
, SUPERBLOCK_LOCATION
,
593 disk
->metadata_space_map_root
,
594 sizeof(disk
->metadata_space_map_root
),
597 DMERR("dm_tm_open_with_sm failed");
603 md
->nr_blocks
= le32_to_cpu(disk
->nr_blocks
);
604 md
->current_era
= le32_to_cpu(disk
->current_era
);
606 ws_unpack(&disk
->current_writeset
, &md
->current_writeset
->md
);
607 md
->writeset_tree_root
= le64_to_cpu(disk
->writeset_tree_root
);
608 md
->era_array_root
= le64_to_cpu(disk
->era_array_root
);
609 md
->metadata_snap
= le64_to_cpu(disk
->metadata_snap
);
610 md
->archived_writesets
= true;
612 dm_bm_unlock(sblock
);
617 dm_bm_unlock(sblock
);
621 static int open_or_format_metadata(struct era_metadata
*md
,
625 bool unformatted
= false;
627 r
= superblock_all_zeroes(md
->bm
, &unformatted
);
632 return may_format
? format_metadata(md
) : -EPERM
;
634 return open_metadata(md
);
637 static int create_persistent_data_objects(struct era_metadata
*md
,
642 md
->bm
= dm_block_manager_create(md
->bdev
, DM_ERA_METADATA_BLOCK_SIZE
,
643 ERA_MAX_CONCURRENT_LOCKS
);
644 if (IS_ERR(md
->bm
)) {
645 DMERR("could not create block manager");
646 return PTR_ERR(md
->bm
);
649 r
= open_or_format_metadata(md
, may_format
);
651 dm_block_manager_destroy(md
->bm
);
656 static void destroy_persistent_data_objects(struct era_metadata
*md
)
658 dm_sm_destroy(md
->sm
);
659 dm_tm_destroy(md
->tm
);
660 dm_block_manager_destroy(md
->bm
);
664 * This waits until all era_map threads have picked up the new filter.
666 static void swap_writeset(struct era_metadata
*md
, struct writeset
*new_writeset
)
668 rcu_assign_pointer(md
->current_writeset
, new_writeset
);
673 *------------------------------------------------------------------------
674 * Writesets get 'digested' into the main era array.
676 * We're using a coroutine here so the worker thread can do the digestion,
677 * thus avoiding synchronisation of the metadata. Digesting a whole
678 * writeset in one go would cause too much latency.
679 *------------------------------------------------------------------------
683 unsigned int nr_bits
, current_bit
;
684 struct writeset_metadata writeset
;
686 struct dm_disk_bitset info
;
688 int (*step
)(struct era_metadata
*md
, struct digest
*d
);
691 static int metadata_digest_lookup_writeset(struct era_metadata
*md
,
694 static int metadata_digest_remove_writeset(struct era_metadata
*md
,
698 uint64_t key
= d
->era
;
700 r
= dm_btree_remove(&md
->writeset_tree_info
, md
->writeset_tree_root
,
701 &key
, &md
->writeset_tree_root
);
703 DMERR("%s: dm_btree_remove failed", __func__
);
707 d
->step
= metadata_digest_lookup_writeset
;
711 #define INSERTS_PER_STEP 100
713 static int metadata_digest_transcribe_writeset(struct era_metadata
*md
,
718 unsigned int b
, e
= min(d
->current_bit
+ INSERTS_PER_STEP
, d
->nr_bits
);
720 for (b
= d
->current_bit
; b
< e
; b
++) {
721 r
= writeset_marked_on_disk(&d
->info
, &d
->writeset
, b
, &marked
);
723 DMERR("%s: writeset_marked_on_disk failed", __func__
);
730 __dm_bless_for_disk(&d
->value
);
731 r
= dm_array_set_value(&md
->era_array_info
, md
->era_array_root
,
732 b
, &d
->value
, &md
->era_array_root
);
734 DMERR("%s: dm_array_set_value failed", __func__
);
740 d
->step
= metadata_digest_remove_writeset
;
747 static int metadata_digest_lookup_writeset(struct era_metadata
*md
,
752 struct writeset_disk disk
;
754 r
= dm_btree_find_lowest_key(&md
->writeset_tree_info
,
755 md
->writeset_tree_root
, &key
);
761 r
= dm_btree_lookup(&md
->writeset_tree_info
,
762 md
->writeset_tree_root
, &key
, &disk
);
769 DMERR("%s: dm_btree_lookup failed", __func__
);
773 ws_unpack(&disk
, &d
->writeset
);
774 d
->value
= cpu_to_le32(key
);
777 * We initialise another bitset info to avoid any caching side effects
778 * with the previous one.
780 dm_disk_bitset_init(md
->tm
, &d
->info
);
782 d
->nr_bits
= min(d
->writeset
.nr_bits
, md
->nr_blocks
);
784 d
->step
= metadata_digest_transcribe_writeset
;
789 static int metadata_digest_start(struct era_metadata
*md
, struct digest
*d
)
794 memset(d
, 0, sizeof(*d
));
795 d
->step
= metadata_digest_lookup_writeset
;
801 *-----------------------------------------------------------------
802 * High level metadata interface. Target methods should use these,
803 * and not the lower level ones.
804 *-----------------------------------------------------------------
806 static struct era_metadata
*metadata_open(struct block_device
*bdev
,
811 struct era_metadata
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
817 md
->block_size
= block_size
;
819 md
->writesets
[0].md
.root
= INVALID_WRITESET_ROOT
;
820 md
->writesets
[1].md
.root
= INVALID_WRITESET_ROOT
;
821 md
->current_writeset
= &md
->writesets
[0];
823 r
= create_persistent_data_objects(md
, may_format
);
832 static void metadata_close(struct era_metadata
*md
)
834 writeset_free(&md
->writesets
[0]);
835 writeset_free(&md
->writesets
[1]);
836 destroy_persistent_data_objects(md
);
840 static bool valid_nr_blocks(dm_block_t n
)
843 * dm_bitset restricts us to 2^32. test_bit & co. restrict us
844 * further to 2^31 - 1
846 return n
< (1ull << 31);
849 static int metadata_resize(struct era_metadata
*md
, void *arg
)
852 dm_block_t
*new_size
= arg
;
855 if (!valid_nr_blocks(*new_size
)) {
856 DMERR("Invalid number of origin blocks %llu",
857 (unsigned long long) *new_size
);
861 writeset_free(&md
->writesets
[0]);
862 writeset_free(&md
->writesets
[1]);
864 r
= writeset_alloc(&md
->writesets
[0], *new_size
);
866 DMERR("%s: writeset_alloc failed for writeset 0", __func__
);
870 r
= writeset_alloc(&md
->writesets
[1], *new_size
);
872 DMERR("%s: writeset_alloc failed for writeset 1", __func__
);
873 writeset_free(&md
->writesets
[0]);
877 value
= cpu_to_le32(0u);
878 __dm_bless_for_disk(&value
);
879 r
= dm_array_resize(&md
->era_array_info
, md
->era_array_root
,
880 md
->nr_blocks
, *new_size
,
881 &value
, &md
->era_array_root
);
883 DMERR("%s: dm_array_resize failed", __func__
);
884 writeset_free(&md
->writesets
[0]);
885 writeset_free(&md
->writesets
[1]);
889 md
->nr_blocks
= *new_size
;
893 static int metadata_era_archive(struct era_metadata
*md
)
897 struct writeset_disk value
;
899 r
= dm_bitset_flush(&md
->bitset_info
, md
->current_writeset
->md
.root
,
900 &md
->current_writeset
->md
.root
);
902 DMERR("%s: dm_bitset_flush failed", __func__
);
906 ws_pack(&md
->current_writeset
->md
, &value
);
908 keys
[0] = md
->current_era
;
909 __dm_bless_for_disk(&value
);
910 r
= dm_btree_insert(&md
->writeset_tree_info
, md
->writeset_tree_root
,
911 keys
, &value
, &md
->writeset_tree_root
);
913 DMERR("%s: couldn't insert writeset into btree", __func__
);
914 /* FIXME: fail mode */
918 md
->current_writeset
->md
.root
= INVALID_WRITESET_ROOT
;
919 md
->archived_writesets
= true;
924 static struct writeset
*next_writeset(struct era_metadata
*md
)
926 return (md
->current_writeset
== &md
->writesets
[0]) ?
927 &md
->writesets
[1] : &md
->writesets
[0];
930 static int metadata_new_era(struct era_metadata
*md
)
933 struct writeset
*new_writeset
= next_writeset(md
);
935 r
= writeset_init(&md
->bitset_info
, new_writeset
, md
->nr_blocks
);
937 DMERR("%s: writeset_init failed", __func__
);
941 swap_writeset(md
, new_writeset
);
947 static int metadata_era_rollover(struct era_metadata
*md
)
951 if (md
->current_writeset
->md
.root
!= INVALID_WRITESET_ROOT
) {
952 r
= metadata_era_archive(md
);
954 DMERR("%s: metadata_archive_era failed", __func__
);
955 /* FIXME: fail mode? */
960 r
= metadata_new_era(md
);
962 DMERR("%s: new era failed", __func__
);
963 /* FIXME: fail mode */
970 static bool metadata_current_marked(struct era_metadata
*md
, dm_block_t block
)
976 ws
= rcu_dereference(md
->current_writeset
);
977 r
= writeset_marked(ws
, block
);
983 static int metadata_commit(struct era_metadata
*md
)
986 struct dm_block
*sblock
;
988 if (md
->current_writeset
->md
.root
!= INVALID_WRITESET_ROOT
) {
989 r
= dm_bitset_flush(&md
->bitset_info
, md
->current_writeset
->md
.root
,
990 &md
->current_writeset
->md
.root
);
992 DMERR("%s: bitset flush failed", __func__
);
997 r
= dm_tm_pre_commit(md
->tm
);
999 DMERR("%s: pre commit failed", __func__
);
1003 r
= save_sm_root(md
);
1005 DMERR("%s: save_sm_root failed", __func__
);
1009 r
= superblock_lock(md
, &sblock
);
1011 DMERR("%s: superblock lock failed", __func__
);
1015 prepare_superblock(md
, dm_block_data(sblock
));
1017 return dm_tm_commit(md
->tm
, sblock
);
1020 static int metadata_checkpoint(struct era_metadata
*md
)
1023 * For now we just rollover, but later I want to put a check in to
1024 * avoid this if the filter is still pretty fresh.
1026 return metadata_era_rollover(md
);
1030 * Metadata snapshots allow userland to access era data.
1032 static int metadata_take_snap(struct era_metadata
*md
)
1035 struct dm_block
*clone
;
1037 if (md
->metadata_snap
!= SUPERBLOCK_LOCATION
) {
1038 DMERR("%s: metadata snapshot already exists", __func__
);
1042 r
= metadata_era_rollover(md
);
1044 DMERR("%s: era rollover failed", __func__
);
1048 r
= metadata_commit(md
);
1050 DMERR("%s: pre commit failed", __func__
);
1054 r
= dm_sm_inc_block(md
->sm
, SUPERBLOCK_LOCATION
);
1056 DMERR("%s: couldn't increment superblock", __func__
);
1060 r
= dm_tm_shadow_block(md
->tm
, SUPERBLOCK_LOCATION
,
1061 &sb_validator
, &clone
, &inc
);
1063 DMERR("%s: couldn't shadow superblock", __func__
);
1064 dm_sm_dec_block(md
->sm
, SUPERBLOCK_LOCATION
);
1069 r
= dm_sm_inc_block(md
->sm
, md
->writeset_tree_root
);
1071 DMERR("%s: couldn't inc writeset tree root", __func__
);
1072 dm_tm_unlock(md
->tm
, clone
);
1076 r
= dm_sm_inc_block(md
->sm
, md
->era_array_root
);
1078 DMERR("%s: couldn't inc era tree root", __func__
);
1079 dm_sm_dec_block(md
->sm
, md
->writeset_tree_root
);
1080 dm_tm_unlock(md
->tm
, clone
);
1084 md
->metadata_snap
= dm_block_location(clone
);
1086 dm_tm_unlock(md
->tm
, clone
);
1091 static int metadata_drop_snap(struct era_metadata
*md
)
1094 dm_block_t location
;
1095 struct dm_block
*clone
;
1096 struct superblock_disk
*disk
;
1098 if (md
->metadata_snap
== SUPERBLOCK_LOCATION
) {
1099 DMERR("%s: no snap to drop", __func__
);
1103 r
= dm_tm_read_lock(md
->tm
, md
->metadata_snap
, &sb_validator
, &clone
);
1105 DMERR("%s: couldn't read lock superblock clone", __func__
);
1110 * Whatever happens now we'll commit with no record of the metadata
1113 md
->metadata_snap
= SUPERBLOCK_LOCATION
;
1115 disk
= dm_block_data(clone
);
1116 r
= dm_btree_del(&md
->writeset_tree_info
,
1117 le64_to_cpu(disk
->writeset_tree_root
));
1119 DMERR("%s: error deleting writeset tree clone", __func__
);
1120 dm_tm_unlock(md
->tm
, clone
);
1124 r
= dm_array_del(&md
->era_array_info
, le64_to_cpu(disk
->era_array_root
));
1126 DMERR("%s: error deleting era array clone", __func__
);
1127 dm_tm_unlock(md
->tm
, clone
);
1131 location
= dm_block_location(clone
);
1132 dm_tm_unlock(md
->tm
, clone
);
1134 return dm_sm_dec_block(md
->sm
, location
);
1137 struct metadata_stats
{
1144 static int metadata_get_stats(struct era_metadata
*md
, void *ptr
)
1147 struct metadata_stats
*s
= ptr
;
1148 dm_block_t nr_free
, nr_total
;
1150 r
= dm_sm_get_nr_free(md
->sm
, &nr_free
);
1152 DMERR("dm_sm_get_nr_free returned %d", r
);
1156 r
= dm_sm_get_nr_blocks(md
->sm
, &nr_total
);
1158 DMERR("dm_pool_get_metadata_dev_size returned %d", r
);
1162 s
->used
= nr_total
- nr_free
;
1163 s
->total
= nr_total
;
1164 s
->snap
= md
->metadata_snap
;
1165 s
->era
= md
->current_era
;
1170 /*----------------------------------------------------------------*/
1173 struct dm_target
*ti
;
1175 struct dm_dev
*metadata_dev
;
1176 struct dm_dev
*origin_dev
;
1178 dm_block_t nr_blocks
;
1179 uint32_t sectors_per_block
;
1180 int sectors_per_block_shift
;
1181 struct era_metadata
*md
;
1183 struct workqueue_struct
*wq
;
1184 struct work_struct worker
;
1186 spinlock_t deferred_lock
;
1187 struct bio_list deferred_bios
;
1189 spinlock_t rpc_lock
;
1190 struct list_head rpc_calls
;
1192 struct digest digest
;
1197 struct list_head list
;
1199 int (*fn0
)(struct era_metadata
*md
);
1200 int (*fn1
)(struct era_metadata
*md
, void *ref
);
1204 struct completion complete
;
1208 *---------------------------------------------------------------
1210 *---------------------------------------------------------------
1212 static bool block_size_is_power_of_two(struct era
*era
)
1214 return era
->sectors_per_block_shift
>= 0;
1217 static dm_block_t
get_block(struct era
*era
, struct bio
*bio
)
1219 sector_t block_nr
= bio
->bi_iter
.bi_sector
;
1221 if (!block_size_is_power_of_two(era
))
1222 (void) sector_div(block_nr
, era
->sectors_per_block
);
1224 block_nr
>>= era
->sectors_per_block_shift
;
1229 static void remap_to_origin(struct era
*era
, struct bio
*bio
)
1231 bio_set_dev(bio
, era
->origin_dev
->bdev
);
1235 *--------------------------------------------------------------
1237 *--------------------------------------------------------------
1239 static void wake_worker(struct era
*era
)
1241 if (!atomic_read(&era
->suspended
))
1242 queue_work(era
->wq
, &era
->worker
);
1245 static void process_old_eras(struct era
*era
)
1249 if (!era
->digest
.step
)
1252 r
= era
->digest
.step(era
->md
, &era
->digest
);
1254 DMERR("%s: digest step failed, stopping digestion", __func__
);
1255 era
->digest
.step
= NULL
;
1257 } else if (era
->digest
.step
)
1261 static void process_deferred_bios(struct era
*era
)
1264 struct bio_list deferred_bios
, marked_bios
;
1266 struct blk_plug plug
;
1267 bool commit_needed
= false;
1268 bool failed
= false;
1269 struct writeset
*ws
= era
->md
->current_writeset
;
1271 bio_list_init(&deferred_bios
);
1272 bio_list_init(&marked_bios
);
1274 spin_lock(&era
->deferred_lock
);
1275 bio_list_merge_init(&deferred_bios
, &era
->deferred_bios
);
1276 spin_unlock(&era
->deferred_lock
);
1278 if (bio_list_empty(&deferred_bios
))
1281 while ((bio
= bio_list_pop(&deferred_bios
))) {
1282 r
= writeset_test_and_set(&era
->md
->bitset_info
, ws
,
1283 get_block(era
, bio
));
1286 * This is bad news, we need to rollback.
1291 commit_needed
= true;
1293 bio_list_add(&marked_bios
, bio
);
1296 if (commit_needed
) {
1297 r
= metadata_commit(era
->md
);
1303 while ((bio
= bio_list_pop(&marked_bios
)))
1306 blk_start_plug(&plug
);
1307 while ((bio
= bio_list_pop(&marked_bios
))) {
1309 * Only update the in-core writeset if the on-disk one
1313 set_bit(get_block(era
, bio
), ws
->bits
);
1314 submit_bio_noacct(bio
);
1316 blk_finish_plug(&plug
);
1320 static void process_rpc_calls(struct era
*era
)
1323 bool need_commit
= false;
1324 struct list_head calls
;
1325 struct rpc
*rpc
, *tmp
;
1327 INIT_LIST_HEAD(&calls
);
1328 spin_lock(&era
->rpc_lock
);
1329 list_splice_init(&era
->rpc_calls
, &calls
);
1330 spin_unlock(&era
->rpc_lock
);
1332 list_for_each_entry_safe(rpc
, tmp
, &calls
, list
) {
1333 rpc
->result
= rpc
->fn0
? rpc
->fn0(era
->md
) : rpc
->fn1(era
->md
, rpc
->arg
);
1338 r
= metadata_commit(era
->md
);
1340 list_for_each_entry_safe(rpc
, tmp
, &calls
, list
)
1344 list_for_each_entry_safe(rpc
, tmp
, &calls
, list
)
1345 complete(&rpc
->complete
);
1348 static void kick_off_digest(struct era
*era
)
1350 if (era
->md
->archived_writesets
) {
1351 era
->md
->archived_writesets
= false;
1352 metadata_digest_start(era
->md
, &era
->digest
);
1356 static void do_work(struct work_struct
*ws
)
1358 struct era
*era
= container_of(ws
, struct era
, worker
);
1360 kick_off_digest(era
);
1361 process_old_eras(era
);
1362 process_deferred_bios(era
);
1363 process_rpc_calls(era
);
1366 static void defer_bio(struct era
*era
, struct bio
*bio
)
1368 spin_lock(&era
->deferred_lock
);
1369 bio_list_add(&era
->deferred_bios
, bio
);
1370 spin_unlock(&era
->deferred_lock
);
1376 * Make an rpc call to the worker to change the metadata.
1378 static int perform_rpc(struct era
*era
, struct rpc
*rpc
)
1381 init_completion(&rpc
->complete
);
1383 spin_lock(&era
->rpc_lock
);
1384 list_add(&rpc
->list
, &era
->rpc_calls
);
1385 spin_unlock(&era
->rpc_lock
);
1388 wait_for_completion(&rpc
->complete
);
1393 static int in_worker0(struct era
*era
, int (*fn
)(struct era_metadata
*md
))
1400 return perform_rpc(era
, &rpc
);
1403 static int in_worker1(struct era
*era
,
1404 int (*fn
)(struct era_metadata
*md
, void *ref
), void *arg
)
1412 return perform_rpc(era
, &rpc
);
1415 static void start_worker(struct era
*era
)
1417 atomic_set(&era
->suspended
, 0);
1420 static void stop_worker(struct era
*era
)
1422 atomic_set(&era
->suspended
, 1);
1423 drain_workqueue(era
->wq
);
1427 *--------------------------------------------------------------
1429 *--------------------------------------------------------------
1431 static void era_destroy(struct era
*era
)
1434 metadata_close(era
->md
);
1437 destroy_workqueue(era
->wq
);
1439 if (era
->origin_dev
)
1440 dm_put_device(era
->ti
, era
->origin_dev
);
1442 if (era
->metadata_dev
)
1443 dm_put_device(era
->ti
, era
->metadata_dev
);
1448 static dm_block_t
calc_nr_blocks(struct era
*era
)
1450 return dm_sector_div_up(era
->ti
->len
, era
->sectors_per_block
);
1453 static bool valid_block_size(dm_block_t block_size
)
1455 bool greater_than_zero
= block_size
> 0;
1456 bool multiple_of_min_block_size
= (block_size
& (MIN_BLOCK_SIZE
- 1)) == 0;
1458 return greater_than_zero
&& multiple_of_min_block_size
;
1462 * <metadata dev> <data dev> <data block size (sectors)>
1464 static int era_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1469 struct era_metadata
*md
;
1472 ti
->error
= "Invalid argument count";
1476 era
= kzalloc(sizeof(*era
), GFP_KERNEL
);
1478 ti
->error
= "Error allocating era structure";
1484 r
= dm_get_device(ti
, argv
[0], BLK_OPEN_READ
| BLK_OPEN_WRITE
,
1485 &era
->metadata_dev
);
1487 ti
->error
= "Error opening metadata device";
1492 r
= dm_get_device(ti
, argv
[1], BLK_OPEN_READ
| BLK_OPEN_WRITE
,
1495 ti
->error
= "Error opening data device";
1500 r
= sscanf(argv
[2], "%u%c", &era
->sectors_per_block
, &dummy
);
1502 ti
->error
= "Error parsing block size";
1507 r
= dm_set_target_max_io_len(ti
, era
->sectors_per_block
);
1509 ti
->error
= "could not set max io len";
1514 if (!valid_block_size(era
->sectors_per_block
)) {
1515 ti
->error
= "Invalid block size";
1519 if (era
->sectors_per_block
& (era
->sectors_per_block
- 1))
1520 era
->sectors_per_block_shift
= -1;
1522 era
->sectors_per_block_shift
= __ffs(era
->sectors_per_block
);
1524 md
= metadata_open(era
->metadata_dev
->bdev
, era
->sectors_per_block
, true);
1526 ti
->error
= "Error reading metadata";
1532 era
->wq
= alloc_ordered_workqueue("dm-" DM_MSG_PREFIX
, WQ_MEM_RECLAIM
);
1534 ti
->error
= "could not create workqueue for metadata object";
1538 INIT_WORK(&era
->worker
, do_work
);
1540 spin_lock_init(&era
->deferred_lock
);
1541 bio_list_init(&era
->deferred_bios
);
1543 spin_lock_init(&era
->rpc_lock
);
1544 INIT_LIST_HEAD(&era
->rpc_calls
);
1547 ti
->num_flush_bios
= 1;
1548 ti
->flush_supported
= true;
1550 ti
->num_discard_bios
= 1;
1555 static void era_dtr(struct dm_target
*ti
)
1557 era_destroy(ti
->private);
1560 static int era_map(struct dm_target
*ti
, struct bio
*bio
)
1562 struct era
*era
= ti
->private;
1563 dm_block_t block
= get_block(era
, bio
);
1566 * All bios get remapped to the origin device. We do this now, but
1567 * it may not get issued until later. Depending on whether the
1568 * block is marked in this era.
1570 remap_to_origin(era
, bio
);
1573 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1575 if (!(bio
->bi_opf
& REQ_PREFLUSH
) &&
1576 (bio_data_dir(bio
) == WRITE
) &&
1577 !metadata_current_marked(era
->md
, block
)) {
1578 defer_bio(era
, bio
);
1579 return DM_MAPIO_SUBMITTED
;
1582 return DM_MAPIO_REMAPPED
;
1585 static void era_postsuspend(struct dm_target
*ti
)
1588 struct era
*era
= ti
->private;
1590 r
= in_worker0(era
, metadata_era_archive
);
1592 DMERR("%s: couldn't archive current era", __func__
);
1593 /* FIXME: fail mode */
1598 r
= metadata_commit(era
->md
);
1600 DMERR("%s: metadata_commit failed", __func__
);
1601 /* FIXME: fail mode */
1605 static int era_preresume(struct dm_target
*ti
)
1608 struct era
*era
= ti
->private;
1609 dm_block_t new_size
= calc_nr_blocks(era
);
1611 if (era
->nr_blocks
!= new_size
) {
1612 r
= metadata_resize(era
->md
, &new_size
);
1614 DMERR("%s: metadata_resize failed", __func__
);
1618 r
= metadata_commit(era
->md
);
1620 DMERR("%s: metadata_commit failed", __func__
);
1624 era
->nr_blocks
= new_size
;
1629 r
= in_worker0(era
, metadata_era_rollover
);
1631 DMERR("%s: metadata_era_rollover failed", __func__
);
1641 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1642 * <current era> <held metadata root | '-'>
1644 static void era_status(struct dm_target
*ti
, status_type_t type
,
1645 unsigned int status_flags
, char *result
, unsigned int maxlen
)
1648 struct era
*era
= ti
->private;
1650 struct metadata_stats stats
;
1651 char buf
[BDEVNAME_SIZE
];
1654 case STATUSTYPE_INFO
:
1655 r
= in_worker1(era
, metadata_get_stats
, &stats
);
1659 DMEMIT("%u %llu/%llu %u",
1660 (unsigned int) (DM_ERA_METADATA_BLOCK_SIZE
>> SECTOR_SHIFT
),
1661 (unsigned long long) stats
.used
,
1662 (unsigned long long) stats
.total
,
1663 (unsigned int) stats
.era
);
1665 if (stats
.snap
!= SUPERBLOCK_LOCATION
)
1666 DMEMIT(" %llu", stats
.snap
);
1671 case STATUSTYPE_TABLE
:
1672 format_dev_t(buf
, era
->metadata_dev
->bdev
->bd_dev
);
1674 format_dev_t(buf
, era
->origin_dev
->bdev
->bd_dev
);
1675 DMEMIT("%s %u", buf
, era
->sectors_per_block
);
1678 case STATUSTYPE_IMA
:
1689 static int era_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
1690 char *result
, unsigned int maxlen
)
1692 struct era
*era
= ti
->private;
1695 DMERR("incorrect number of message arguments");
1699 if (!strcasecmp(argv
[0], "checkpoint"))
1700 return in_worker0(era
, metadata_checkpoint
);
1702 if (!strcasecmp(argv
[0], "take_metadata_snap"))
1703 return in_worker0(era
, metadata_take_snap
);
1705 if (!strcasecmp(argv
[0], "drop_metadata_snap"))
1706 return in_worker0(era
, metadata_drop_snap
);
1708 DMERR("unsupported message '%s'", argv
[0]);
1712 static sector_t
get_dev_size(struct dm_dev
*dev
)
1714 return bdev_nr_sectors(dev
->bdev
);
1717 static int era_iterate_devices(struct dm_target
*ti
,
1718 iterate_devices_callout_fn fn
, void *data
)
1720 struct era
*era
= ti
->private;
1722 return fn(ti
, era
->origin_dev
, 0, get_dev_size(era
->origin_dev
), data
);
1725 static void era_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
1727 struct era
*era
= ti
->private;
1728 uint64_t io_opt_sectors
= limits
->io_opt
>> SECTOR_SHIFT
;
1731 * If the system-determined stacked limits are compatible with the
1732 * era device's blocksize (io_opt is a factor) do not override them.
1734 if (io_opt_sectors
< era
->sectors_per_block
||
1735 do_div(io_opt_sectors
, era
->sectors_per_block
)) {
1737 limits
->io_opt
= era
->sectors_per_block
<< SECTOR_SHIFT
;
1741 /*----------------------------------------------------------------*/
1743 static struct target_type era_target
= {
1745 .version
= {1, 0, 0},
1746 .module
= THIS_MODULE
,
1750 .postsuspend
= era_postsuspend
,
1751 .preresume
= era_preresume
,
1752 .status
= era_status
,
1753 .message
= era_message
,
1754 .iterate_devices
= era_iterate_devices
,
1755 .io_hints
= era_io_hints
1759 MODULE_DESCRIPTION(DM_NAME
" era target");
1760 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1761 MODULE_LICENSE("GPL");