1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2006-2008 Red Hat GmbH
6 * This file is released under the GPL.
9 #include "dm-exception-store.h"
11 #include <linux/ctype.h>
13 #include <linux/pagemap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-bufio.h>
20 #define DM_MSG_PREFIX "persistent snapshot"
21 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
23 #define DM_PREFETCH_CHUNKS 12
26 *---------------------------------------------------------------
27 * Persistent snapshots, by persistent we mean that the snapshot
28 * will survive a reboot.
29 *---------------------------------------------------------------
33 * We need to store a record of which parts of the origin have
34 * been copied to the snapshot device. The snapshot code
35 * requires that we copy exception chunks to chunk aligned areas
36 * of the COW store. It makes sense therefore, to store the
37 * metadata in chunk size blocks.
39 * There is no backward or forward compatibility implemented,
40 * snapshots with different disk versions than the kernel will
41 * not be usable. It is expected that "lvcreate" will blank out
42 * the start of a fresh COW device before calling the snapshot
45 * The first chunk of the COW device just contains the header.
46 * After this there is a chunk filled with exception metadata,
47 * followed by as many exception chunks as can fit in the
50 * All on disk structures are in little-endian format. The end
51 * of the exceptions info is indicated by an exception with a
52 * new_chunk of 0, which is invalid since it would point to the
57 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
59 #define SNAP_MAGIC 0x70416e53
62 * The on-disk version of the metadata.
64 #define SNAPSHOT_DISK_VERSION 1
66 #define NUM_SNAPSHOT_HDR_CHUNKS 1
72 * Is this snapshot valid. There is no way of recovering
73 * an invalid snapshot.
78 * Simple, incrementing version. no backward
87 struct disk_exception
{
92 struct core_exception
{
97 struct commit_callback
{
98 void (*callback
)(void *ref
, int success
);
103 * The top level structure for a persistent exception store.
106 struct dm_exception_store
*store
;
109 uint32_t exceptions_per_area
;
112 * Now that we have an asynchronous kcopyd there is no
113 * need for large chunk sizes, so it wont hurt to have a
114 * whole chunks worth of metadata in memory at once.
119 * An area of zeros used to clear the next area.
124 * An area used for header. The header can be written
125 * concurrently with metadata (when invalidating the snapshot),
126 * so it needs a separate buffer.
131 * Used to keep track of which metadata area the data in
134 chunk_t current_area
;
137 * The next free chunk for an exception.
139 * When creating exceptions, all the chunks here and above are
140 * free. It holds the next chunk to be allocated. On rare
141 * occasions (e.g. after a system crash) holes can be left in
142 * the exception store because chunks can be committed out of
145 * When merging exceptions, it does not necessarily mean all the
146 * chunks here and above are free. It holds the value it would
147 * have held if all chunks had been committed in order of
148 * allocation. Consequently the value may occasionally be
149 * slightly too low, but since it's only used for 'status' and
150 * it can never reach its minimum value too early this doesn't
157 * The index of next free exception in the current
160 uint32_t current_committed
;
162 atomic_t pending_count
;
163 uint32_t callback_count
;
164 struct commit_callback
*callbacks
;
165 struct dm_io_client
*io_client
;
167 struct workqueue_struct
*metadata_wq
;
170 static int alloc_area(struct pstore
*ps
)
175 len
= ps
->store
->chunk_size
<< SECTOR_SHIFT
;
178 * Allocate the chunk_size block of memory that will hold
179 * a single metadata area.
181 ps
->area
= vmalloc(len
);
185 ps
->zero_area
= vzalloc(len
);
189 ps
->header_area
= vmalloc(len
);
190 if (!ps
->header_area
)
191 goto err_header_area
;
196 vfree(ps
->zero_area
);
205 static void free_area(struct pstore
*ps
)
209 vfree(ps
->zero_area
);
210 ps
->zero_area
= NULL
;
211 vfree(ps
->header_area
);
212 ps
->header_area
= NULL
;
216 struct dm_io_region
*where
;
217 struct dm_io_request
*io_req
;
218 struct work_struct work
;
222 static void do_metadata(struct work_struct
*work
)
224 struct mdata_req
*req
= container_of(work
, struct mdata_req
, work
);
226 req
->result
= dm_io(req
->io_req
, 1, req
->where
, NULL
, IOPRIO_DEFAULT
);
230 * Read or write a chunk aligned and sized block of data from a device.
232 static int chunk_io(struct pstore
*ps
, void *area
, chunk_t chunk
, blk_opf_t opf
,
235 struct dm_io_region where
= {
236 .bdev
= dm_snap_cow(ps
->store
->snap
)->bdev
,
237 .sector
= ps
->store
->chunk_size
* chunk
,
238 .count
= ps
->store
->chunk_size
,
240 struct dm_io_request io_req
= {
242 .mem
.type
= DM_IO_VMA
,
244 .client
= ps
->io_client
,
247 struct mdata_req req
;
250 return dm_io(&io_req
, 1, &where
, NULL
, IOPRIO_DEFAULT
);
253 req
.io_req
= &io_req
;
256 * Issue the synchronous I/O from a different thread
257 * to avoid submit_bio_noacct recursion.
259 INIT_WORK_ONSTACK(&req
.work
, do_metadata
);
260 queue_work(ps
->metadata_wq
, &req
.work
);
261 flush_workqueue(ps
->metadata_wq
);
262 destroy_work_on_stack(&req
.work
);
268 * Convert a metadata area index to a chunk index.
270 static chunk_t
area_location(struct pstore
*ps
, chunk_t area
)
272 return NUM_SNAPSHOT_HDR_CHUNKS
+ ((ps
->exceptions_per_area
+ 1) * area
);
275 static void skip_metadata(struct pstore
*ps
)
277 uint32_t stride
= ps
->exceptions_per_area
+ 1;
278 chunk_t next_free
= ps
->next_free
;
280 if (sector_div(next_free
, stride
) == NUM_SNAPSHOT_HDR_CHUNKS
)
285 * Read or write a metadata area. Remembering to skip the first
286 * chunk which holds the header.
288 static int area_io(struct pstore
*ps
, blk_opf_t opf
)
290 chunk_t chunk
= area_location(ps
, ps
->current_area
);
292 return chunk_io(ps
, ps
->area
, chunk
, opf
, 0);
295 static void zero_memory_area(struct pstore
*ps
)
297 memset(ps
->area
, 0, ps
->store
->chunk_size
<< SECTOR_SHIFT
);
300 static int zero_disk_area(struct pstore
*ps
, chunk_t area
)
302 return chunk_io(ps
, ps
->zero_area
, area_location(ps
, area
),
306 static int read_header(struct pstore
*ps
, int *new_snapshot
)
309 struct disk_header
*dh
;
310 unsigned int chunk_size
;
311 int chunk_size_supplied
= 1;
315 * Use default chunk size (or logical_block_size, if larger)
318 if (!ps
->store
->chunk_size
) {
319 ps
->store
->chunk_size
= max(DM_CHUNK_SIZE_DEFAULT_SECTORS
,
320 bdev_logical_block_size(dm_snap_cow(ps
->store
->snap
)->
322 ps
->store
->chunk_mask
= ps
->store
->chunk_size
- 1;
323 ps
->store
->chunk_shift
= __ffs(ps
->store
->chunk_size
);
324 chunk_size_supplied
= 0;
327 ps
->io_client
= dm_io_client_create();
328 if (IS_ERR(ps
->io_client
))
329 return PTR_ERR(ps
->io_client
);
335 r
= chunk_io(ps
, ps
->header_area
, 0, REQ_OP_READ
, 1);
339 dh
= ps
->header_area
;
341 if (le32_to_cpu(dh
->magic
) == 0) {
346 if (le32_to_cpu(dh
->magic
) != SNAP_MAGIC
) {
347 DMWARN("Invalid or corrupt snapshot");
353 ps
->valid
= le32_to_cpu(dh
->valid
);
354 ps
->version
= le32_to_cpu(dh
->version
);
355 chunk_size
= le32_to_cpu(dh
->chunk_size
);
357 if (ps
->store
->chunk_size
== chunk_size
)
360 if (chunk_size_supplied
)
361 DMWARN("chunk size %u in device metadata overrides table chunk size of %u.",
362 chunk_size
, ps
->store
->chunk_size
);
364 /* We had a bogus chunk_size. Fix stuff up. */
367 r
= dm_exception_store_set_chunk_size(ps
->store
, chunk_size
,
370 DMERR("invalid on-disk chunk size %u: %s.",
371 chunk_size
, chunk_err
);
383 static int write_header(struct pstore
*ps
)
385 struct disk_header
*dh
;
387 memset(ps
->header_area
, 0, ps
->store
->chunk_size
<< SECTOR_SHIFT
);
389 dh
= ps
->header_area
;
390 dh
->magic
= cpu_to_le32(SNAP_MAGIC
);
391 dh
->valid
= cpu_to_le32(ps
->valid
);
392 dh
->version
= cpu_to_le32(ps
->version
);
393 dh
->chunk_size
= cpu_to_le32(ps
->store
->chunk_size
);
395 return chunk_io(ps
, ps
->header_area
, 0, REQ_OP_WRITE
, 1);
399 * Access functions for the disk exceptions, these do the endian conversions.
401 static struct disk_exception
*get_exception(struct pstore
*ps
, void *ps_area
,
404 BUG_ON(index
>= ps
->exceptions_per_area
);
406 return ((struct disk_exception
*) ps_area
) + index
;
409 static void read_exception(struct pstore
*ps
, void *ps_area
,
410 uint32_t index
, struct core_exception
*result
)
412 struct disk_exception
*de
= get_exception(ps
, ps_area
, index
);
415 result
->old_chunk
= le64_to_cpu(de
->old_chunk
);
416 result
->new_chunk
= le64_to_cpu(de
->new_chunk
);
419 static void write_exception(struct pstore
*ps
,
420 uint32_t index
, struct core_exception
*e
)
422 struct disk_exception
*de
= get_exception(ps
, ps
->area
, index
);
425 de
->old_chunk
= cpu_to_le64(e
->old_chunk
);
426 de
->new_chunk
= cpu_to_le64(e
->new_chunk
);
429 static void clear_exception(struct pstore
*ps
, uint32_t index
)
431 struct disk_exception
*de
= get_exception(ps
, ps
->area
, index
);
439 * Registers the exceptions that are present in the current area.
440 * 'full' is filled in to indicate if the area has been
443 static int insert_exceptions(struct pstore
*ps
, void *ps_area
,
444 int (*callback
)(void *callback_context
,
445 chunk_t old
, chunk_t
new),
446 void *callback_context
,
451 struct core_exception e
;
453 /* presume the area is full */
456 for (i
= 0; i
< ps
->exceptions_per_area
; i
++) {
457 read_exception(ps
, ps_area
, i
, &e
);
460 * If the new_chunk is pointing at the start of
461 * the COW device, where the first metadata area
462 * is we know that we've hit the end of the
463 * exceptions. Therefore the area is not full.
465 if (e
.new_chunk
== 0LL) {
466 ps
->current_committed
= i
;
472 * Keep track of the start of the free chunks.
474 if (ps
->next_free
<= e
.new_chunk
)
475 ps
->next_free
= e
.new_chunk
+ 1;
478 * Otherwise we add the exception to the snapshot.
480 r
= callback(callback_context
, e
.old_chunk
, e
.new_chunk
);
488 static int read_exceptions(struct pstore
*ps
,
489 int (*callback
)(void *callback_context
, chunk_t old
,
491 void *callback_context
)
494 struct dm_bufio_client
*client
;
495 chunk_t prefetch_area
= 0;
497 client
= dm_bufio_client_create(dm_snap_cow(ps
->store
->snap
)->bdev
,
498 ps
->store
->chunk_size
<< SECTOR_SHIFT
,
499 1, 0, NULL
, NULL
, 0);
502 return PTR_ERR(client
);
505 * Setup for one current buffer + desired readahead buffers.
507 dm_bufio_set_minimum_buffers(client
, 1 + DM_PREFETCH_CHUNKS
);
510 * Keeping reading chunks and inserting exceptions until
511 * we find a partially full area.
513 for (ps
->current_area
= 0; full
; ps
->current_area
++) {
514 struct dm_buffer
*bp
;
518 if (unlikely(prefetch_area
< ps
->current_area
))
519 prefetch_area
= ps
->current_area
;
521 if (DM_PREFETCH_CHUNKS
) {
523 chunk_t pf_chunk
= area_location(ps
, prefetch_area
);
525 if (unlikely(pf_chunk
>= dm_bufio_get_device_size(client
)))
527 dm_bufio_prefetch(client
, pf_chunk
, 1);
529 if (unlikely(!prefetch_area
))
531 } while (prefetch_area
<= ps
->current_area
+ DM_PREFETCH_CHUNKS
);
534 chunk
= area_location(ps
, ps
->current_area
);
536 area
= dm_bufio_read(client
, chunk
, &bp
);
539 goto ret_destroy_bufio
;
542 r
= insert_exceptions(ps
, area
, callback
, callback_context
,
546 memcpy(ps
->area
, area
, ps
->store
->chunk_size
<< SECTOR_SHIFT
);
548 dm_bufio_release(bp
);
550 dm_bufio_forget(client
, chunk
);
553 goto ret_destroy_bufio
;
563 dm_bufio_client_destroy(client
);
568 static struct pstore
*get_info(struct dm_exception_store
*store
)
570 return store
->context
;
573 static void persistent_usage(struct dm_exception_store
*store
,
574 sector_t
*total_sectors
,
575 sector_t
*sectors_allocated
,
576 sector_t
*metadata_sectors
)
578 struct pstore
*ps
= get_info(store
);
580 *sectors_allocated
= ps
->next_free
* store
->chunk_size
;
581 *total_sectors
= get_dev_size(dm_snap_cow(store
->snap
)->bdev
);
584 * First chunk is the fixed header.
585 * Then there are (ps->current_area + 1) metadata chunks, each one
586 * separated from the next by ps->exceptions_per_area data chunks.
588 *metadata_sectors
= (ps
->current_area
+ 1 + NUM_SNAPSHOT_HDR_CHUNKS
) *
592 static void persistent_dtr(struct dm_exception_store
*store
)
594 struct pstore
*ps
= get_info(store
);
596 destroy_workqueue(ps
->metadata_wq
);
598 /* Created in read_header */
600 dm_io_client_destroy(ps
->io_client
);
603 /* Allocated in persistent_read_metadata */
604 kvfree(ps
->callbacks
);
609 static int persistent_read_metadata(struct dm_exception_store
*store
,
610 int (*callback
)(void *callback_context
,
611 chunk_t old
, chunk_t
new),
612 void *callback_context
)
615 struct pstore
*ps
= get_info(store
);
618 * Read the snapshot header.
620 r
= read_header(ps
, &new_snapshot
);
625 * Now we know correct chunk_size, complete the initialisation.
627 ps
->exceptions_per_area
= (ps
->store
->chunk_size
<< SECTOR_SHIFT
) /
628 sizeof(struct disk_exception
);
629 ps
->callbacks
= kvcalloc(ps
->exceptions_per_area
,
630 sizeof(*ps
->callbacks
), GFP_KERNEL
);
635 * Do we need to setup a new snapshot ?
638 r
= write_header(ps
);
640 DMWARN("write_header failed");
644 ps
->current_area
= 0;
645 zero_memory_area(ps
);
646 r
= zero_disk_area(ps
, 0);
648 DMWARN("zero_disk_area(0) failed");
654 if (ps
->version
!= SNAPSHOT_DISK_VERSION
) {
655 DMWARN("unable to handle snapshot disk version %d",
661 * Metadata are valid, but snapshot is invalidated
669 r
= read_exceptions(ps
, callback
, callback_context
);
674 static int persistent_prepare_exception(struct dm_exception_store
*store
,
675 struct dm_exception
*e
)
677 struct pstore
*ps
= get_info(store
);
678 sector_t size
= get_dev_size(dm_snap_cow(store
->snap
)->bdev
);
680 /* Is there enough room ? */
681 if (size
< ((ps
->next_free
+ 1) * store
->chunk_size
))
684 e
->new_chunk
= ps
->next_free
;
687 * Move onto the next free pending, making sure to take
688 * into account the location of the metadata chunks.
693 atomic_inc(&ps
->pending_count
);
697 static void persistent_commit_exception(struct dm_exception_store
*store
,
698 struct dm_exception
*e
, int valid
,
699 void (*callback
)(void *, int success
),
700 void *callback_context
)
703 struct pstore
*ps
= get_info(store
);
704 struct core_exception ce
;
705 struct commit_callback
*cb
;
710 ce
.old_chunk
= e
->old_chunk
;
711 ce
.new_chunk
= e
->new_chunk
;
712 write_exception(ps
, ps
->current_committed
++, &ce
);
715 * Add the callback to the back of the array. This code
716 * is the only place where the callback array is
717 * manipulated, and we know that it will never be called
718 * multiple times concurrently.
720 cb
= ps
->callbacks
+ ps
->callback_count
++;
721 cb
->callback
= callback
;
722 cb
->context
= callback_context
;
725 * If there are exceptions in flight and we have not yet
726 * filled this metadata area there's nothing more to do.
728 if (!atomic_dec_and_test(&ps
->pending_count
) &&
729 (ps
->current_committed
!= ps
->exceptions_per_area
))
733 * If we completely filled the current area, then wipe the next one.
735 if ((ps
->current_committed
== ps
->exceptions_per_area
) &&
736 zero_disk_area(ps
, ps
->current_area
+ 1))
740 * Commit exceptions to disk.
742 if (ps
->valid
&& area_io(ps
, REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_FUA
|
747 * Advance to the next area if this one is full.
749 if (ps
->current_committed
== ps
->exceptions_per_area
) {
750 ps
->current_committed
= 0;
752 zero_memory_area(ps
);
755 for (i
= 0; i
< ps
->callback_count
; i
++) {
756 cb
= ps
->callbacks
+ i
;
757 cb
->callback(cb
->context
, ps
->valid
);
760 ps
->callback_count
= 0;
763 static int persistent_prepare_merge(struct dm_exception_store
*store
,
764 chunk_t
*last_old_chunk
,
765 chunk_t
*last_new_chunk
)
767 struct pstore
*ps
= get_info(store
);
768 struct core_exception ce
;
773 * When current area is empty, move back to preceding area.
775 if (!ps
->current_committed
) {
779 if (!ps
->current_area
)
783 r
= area_io(ps
, REQ_OP_READ
);
786 ps
->current_committed
= ps
->exceptions_per_area
;
789 read_exception(ps
, ps
->area
, ps
->current_committed
- 1, &ce
);
790 *last_old_chunk
= ce
.old_chunk
;
791 *last_new_chunk
= ce
.new_chunk
;
794 * Find number of consecutive chunks within the current area,
797 for (nr_consecutive
= 1; nr_consecutive
< ps
->current_committed
;
799 read_exception(ps
, ps
->area
,
800 ps
->current_committed
- 1 - nr_consecutive
, &ce
);
801 if (ce
.old_chunk
!= *last_old_chunk
- nr_consecutive
||
802 ce
.new_chunk
!= *last_new_chunk
- nr_consecutive
)
806 return nr_consecutive
;
809 static int persistent_commit_merge(struct dm_exception_store
*store
,
813 struct pstore
*ps
= get_info(store
);
815 BUG_ON(nr_merged
> ps
->current_committed
);
817 for (i
= 0; i
< nr_merged
; i
++)
818 clear_exception(ps
, ps
->current_committed
- 1 - i
);
820 r
= area_io(ps
, REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_FUA
);
824 ps
->current_committed
-= nr_merged
;
827 * At this stage, only persistent_usage() uses ps->next_free, so
828 * we make no attempt to keep ps->next_free strictly accurate
829 * as exceptions may have been committed out-of-order originally.
830 * Once a snapshot has become merging, we set it to the value it
831 * would have held had all the exceptions been committed in order.
833 * ps->current_area does not get reduced by prepare_merge() until
834 * after commit_merge() has removed the nr_merged previous exceptions.
836 ps
->next_free
= area_location(ps
, ps
->current_area
) +
837 ps
->current_committed
+ 1;
842 static void persistent_drop_snapshot(struct dm_exception_store
*store
)
844 struct pstore
*ps
= get_info(store
);
847 if (write_header(ps
))
848 DMWARN("write header failed");
851 static int persistent_ctr(struct dm_exception_store
*store
, char *options
)
856 /* allocate the pstore */
857 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
863 ps
->version
= SNAPSHOT_DISK_VERSION
;
865 ps
->zero_area
= NULL
;
866 ps
->header_area
= NULL
;
867 ps
->next_free
= NUM_SNAPSHOT_HDR_CHUNKS
+ 1; /* header and 1st area */
868 ps
->current_committed
= 0;
870 ps
->callback_count
= 0;
871 atomic_set(&ps
->pending_count
, 0);
872 ps
->callbacks
= NULL
;
874 ps
->metadata_wq
= alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM
, 0);
875 if (!ps
->metadata_wq
) {
876 DMERR("couldn't start header metadata update thread");
882 char overflow
= toupper(options
[0]);
885 store
->userspace_supports_overflow
= true;
887 DMERR("Unsupported persistent store option: %s", options
);
898 destroy_workqueue(ps
->metadata_wq
);
905 static unsigned int persistent_status(struct dm_exception_store
*store
,
906 status_type_t status
, char *result
,
912 case STATUSTYPE_INFO
:
914 case STATUSTYPE_TABLE
:
915 DMEMIT(" %s %llu", store
->userspace_supports_overflow
? "PO" : "P",
916 (unsigned long long)store
->chunk_size
);
926 static struct dm_exception_store_type _persistent_type
= {
927 .name
= "persistent",
928 .module
= THIS_MODULE
,
929 .ctr
= persistent_ctr
,
930 .dtr
= persistent_dtr
,
931 .read_metadata
= persistent_read_metadata
,
932 .prepare_exception
= persistent_prepare_exception
,
933 .commit_exception
= persistent_commit_exception
,
934 .prepare_merge
= persistent_prepare_merge
,
935 .commit_merge
= persistent_commit_merge
,
936 .drop_snapshot
= persistent_drop_snapshot
,
937 .usage
= persistent_usage
,
938 .status
= persistent_status
,
941 static struct dm_exception_store_type _persistent_compat_type
= {
943 .module
= THIS_MODULE
,
944 .ctr
= persistent_ctr
,
945 .dtr
= persistent_dtr
,
946 .read_metadata
= persistent_read_metadata
,
947 .prepare_exception
= persistent_prepare_exception
,
948 .commit_exception
= persistent_commit_exception
,
949 .prepare_merge
= persistent_prepare_merge
,
950 .commit_merge
= persistent_commit_merge
,
951 .drop_snapshot
= persistent_drop_snapshot
,
952 .usage
= persistent_usage
,
953 .status
= persistent_status
,
956 int dm_persistent_snapshot_init(void)
960 r
= dm_exception_store_type_register(&_persistent_type
);
962 DMERR("Unable to register persistent exception store type");
966 r
= dm_exception_store_type_register(&_persistent_compat_type
);
968 DMERR("Unable to register old-style persistent exception store type");
969 dm_exception_store_type_unregister(&_persistent_type
);
976 void dm_persistent_snapshot_exit(void)
978 dm_exception_store_type_unregister(&_persistent_type
);
979 dm_exception_store_type_unregister(&_persistent_compat_type
);