1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * This file is released under the GPL.
8 #include <linux/blkdev.h>
9 #include <linux/device-mapper.h>
10 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/kdev_t.h>
14 #include <linux/list.h>
15 #include <linux/list_bl.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
25 #include "dm-exception-store.h"
27 #define DM_MSG_PREFIX "snapshots"
29 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
31 #define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
35 * The size of the mempool used to track chunks in use.
39 #define DM_TRACKED_CHUNK_HASH_SIZE 16
40 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
43 struct dm_exception_table
{
45 unsigned int hash_shift
;
46 struct hlist_bl_head
*table
;
50 struct rw_semaphore lock
;
52 struct dm_dev
*origin
;
57 /* List of snapshots per Origin */
58 struct list_head list
;
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
71 int snapshot_overflowed
;
73 /* Origin writes don't trigger exceptions until this is set */
76 atomic_t pending_exceptions_count
;
78 spinlock_t pe_allocation_lock
;
80 /* Protected by "pe_allocation_lock" */
81 sector_t exception_start_sequence
;
83 /* Protected by kcopyd single-threaded callback */
84 sector_t exception_complete_sequence
;
87 * A list of pending exceptions that completed out of order.
88 * Protected by kcopyd single-threaded callback.
90 struct rb_root out_of_order_tree
;
92 mempool_t pending_pool
;
94 struct dm_exception_table pending
;
95 struct dm_exception_table complete
;
98 * pe_lock protects all pending_exception operations and access
99 * as well as the snapshot_bios list.
103 /* Chunks with outstanding reads */
104 spinlock_t tracked_chunk_lock
;
105 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
107 /* The on disk metadata handler */
108 struct dm_exception_store
*store
;
110 unsigned int in_progress
;
111 struct wait_queue_head in_progress_wait
;
113 struct dm_kcopyd_client
*kcopyd_client
;
115 /* Wait for events based on state_bits */
116 unsigned long state_bits
;
118 /* Range of chunks currently being merged. */
119 chunk_t first_merging_chunk
;
120 int num_merging_chunks
;
123 * The merge operation failed if this flag is set.
124 * Failure modes are handled as follows:
125 * - I/O error reading the header
126 * => don't load the target; abort.
127 * - Header does not have "valid" flag set
128 * => use the origin; forget about the snapshot.
129 * - I/O error when reading exceptions
130 * => don't load the target; abort.
131 * (We can't use the intermediate origin state.)
132 * - I/O error while merging
133 * => stop merging; set merge_failed; process I/O normally.
137 bool discard_zeroes_cow
:1;
138 bool discard_passdown_origin
:1;
141 * Incoming bios that overlap with chunks being merged must wait
142 * for them to be committed.
144 struct bio_list bios_queued_during_merge
;
149 * RUNNING_MERGE - Merge operation is in progress.
150 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
151 * cleared afterwards.
153 #define RUNNING_MERGE 0
154 #define SHUTDOWN_MERGE 1
157 * Maximum number of chunks being copied on write.
159 * The value was decided experimentally as a trade-off between memory
160 * consumption, stalling the kernel's workqueues and maintaining a high enough
163 #define DEFAULT_COW_THRESHOLD 2048
165 static unsigned int cow_threshold
= DEFAULT_COW_THRESHOLD
;
166 module_param_named(snapshot_cow_threshold
, cow_threshold
, uint
, 0644);
167 MODULE_PARM_DESC(snapshot_cow_threshold
, "Maximum number of chunks being copied on write");
169 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle
,
170 "A percentage of time allocated for copy on write");
172 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
176 EXPORT_SYMBOL(dm_snap_origin
);
178 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
182 EXPORT_SYMBOL(dm_snap_cow
);
184 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
187 return chunk
<< store
->chunk_shift
;
190 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
193 * There is only ever one instance of a particular block
194 * device so we can compare pointers safely.
199 struct dm_snap_pending_exception
{
200 struct dm_exception e
;
203 * Origin buffers waiting for this to complete are held
206 struct bio_list origin_bios
;
207 struct bio_list snapshot_bios
;
209 /* Pointer back to snapshot context */
210 struct dm_snapshot
*snap
;
213 * 1 indicates the exception has already been sent to
218 /* There was copying error. */
221 /* A sequence number, it is used for in-order completion. */
222 sector_t exception_sequence
;
224 struct rb_node out_of_order_node
;
227 * For writing a complete chunk, bypassing the copy.
229 struct bio
*full_bio
;
230 bio_end_io_t
*full_bio_end_io
;
234 * Hash table mapping origin volumes to lists of snapshots and
235 * a lock to protect it
237 static struct kmem_cache
*exception_cache
;
238 static struct kmem_cache
*pending_cache
;
240 struct dm_snap_tracked_chunk
{
241 struct hlist_node node
;
245 static void init_tracked_chunk(struct bio
*bio
)
247 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
249 INIT_HLIST_NODE(&c
->node
);
252 static bool is_bio_tracked(struct bio
*bio
)
254 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
256 return !hlist_unhashed(&c
->node
);
259 static void track_chunk(struct dm_snapshot
*s
, struct bio
*bio
, chunk_t chunk
)
261 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
265 spin_lock_irq(&s
->tracked_chunk_lock
);
266 hlist_add_head(&c
->node
,
267 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
268 spin_unlock_irq(&s
->tracked_chunk_lock
);
271 static void stop_tracking_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
273 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
276 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
278 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
281 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
283 struct dm_snap_tracked_chunk
*c
;
286 spin_lock_irq(&s
->tracked_chunk_lock
);
288 hlist_for_each_entry(c
,
289 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
290 if (c
->chunk
== chunk
) {
296 spin_unlock_irq(&s
->tracked_chunk_lock
);
302 * This conflicting I/O is extremely improbable in the caller,
303 * so fsleep(1000) is sufficient and there is no need for a wait queue.
305 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
307 while (__chunk_is_tracked(s
, chunk
))
312 * One of these per registered origin, held in the snapshot_origins hash
315 /* The origin device */
316 struct block_device
*bdev
;
318 struct list_head hash_list
;
320 /* List of snapshots for this origin */
321 struct list_head snapshots
;
325 * This structure is allocated for each origin target
329 struct dm_target
*ti
;
330 unsigned int split_boundary
;
331 struct list_head hash_list
;
335 * Size of the hash table for origin volumes. If we make this
336 * the size of the minors list then it should be nearly perfect
338 #define ORIGIN_HASH_SIZE 256
339 #define ORIGIN_MASK 0xFF
340 static struct list_head
*_origins
;
341 static struct list_head
*_dm_origins
;
342 static struct rw_semaphore _origins_lock
;
344 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
345 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
346 static uint64_t _pending_exceptions_done_count
;
348 static int init_origin_hash(void)
352 _origins
= kmalloc_array(ORIGIN_HASH_SIZE
, sizeof(struct list_head
),
355 DMERR("unable to allocate memory for _origins");
358 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
359 INIT_LIST_HEAD(_origins
+ i
);
361 _dm_origins
= kmalloc_array(ORIGIN_HASH_SIZE
,
362 sizeof(struct list_head
),
365 DMERR("unable to allocate memory for _dm_origins");
369 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
370 INIT_LIST_HEAD(_dm_origins
+ i
);
372 init_rwsem(&_origins_lock
);
377 static void exit_origin_hash(void)
383 static unsigned int origin_hash(struct block_device
*bdev
)
385 return bdev
->bd_dev
& ORIGIN_MASK
;
388 static struct origin
*__lookup_origin(struct block_device
*origin
)
390 struct list_head
*ol
;
393 ol
= &_origins
[origin_hash(origin
)];
394 list_for_each_entry(o
, ol
, hash_list
)
395 if (bdev_equal(o
->bdev
, origin
))
401 static void __insert_origin(struct origin
*o
)
403 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
405 list_add_tail(&o
->hash_list
, sl
);
408 static struct dm_origin
*__lookup_dm_origin(struct block_device
*origin
)
410 struct list_head
*ol
;
413 ol
= &_dm_origins
[origin_hash(origin
)];
414 list_for_each_entry(o
, ol
, hash_list
)
415 if (bdev_equal(o
->dev
->bdev
, origin
))
421 static void __insert_dm_origin(struct dm_origin
*o
)
423 struct list_head
*sl
= &_dm_origins
[origin_hash(o
->dev
->bdev
)];
425 list_add_tail(&o
->hash_list
, sl
);
428 static void __remove_dm_origin(struct dm_origin
*o
)
430 list_del(&o
->hash_list
);
434 * _origins_lock must be held when calling this function.
435 * Returns number of snapshots registered using the supplied cow device, plus:
436 * snap_src - a snapshot suitable for use as a source of exception handover
437 * snap_dest - a snapshot capable of receiving exception handover.
438 * snap_merge - an existing snapshot-merge target linked to the same origin.
439 * There can be at most one snapshot-merge target. The parameter is optional.
441 * Possible return values and states of snap_src and snap_dest.
442 * 0: NULL, NULL - first new snapshot
443 * 1: snap_src, NULL - normal snapshot
444 * 2: snap_src, snap_dest - waiting for handover
445 * 2: snap_src, NULL - handed over, waiting for old to be deleted
446 * 1: NULL, snap_dest - source got destroyed without handover
448 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
449 struct dm_snapshot
**snap_src
,
450 struct dm_snapshot
**snap_dest
,
451 struct dm_snapshot
**snap_merge
)
453 struct dm_snapshot
*s
;
458 o
= __lookup_origin(snap
->origin
->bdev
);
462 list_for_each_entry(s
, &o
->snapshots
, list
) {
463 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
465 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
475 } else if (snap_dest
)
486 * On success, returns 1 if this snapshot is a handover destination,
487 * otherwise returns 0.
489 static int __validate_exception_handover(struct dm_snapshot
*snap
)
491 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
492 struct dm_snapshot
*snap_merge
= NULL
;
494 /* Does snapshot need exceptions handed over to it? */
495 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
496 &snap_merge
) == 2) ||
498 snap
->ti
->error
= "Snapshot cow pairing for exception table handover failed";
503 * If no snap_src was found, snap cannot become a handover
510 * Non-snapshot-merge handover?
512 if (!dm_target_is_snapshot_merge(snap
->ti
))
516 * Do not allow more than one merging snapshot.
519 snap
->ti
->error
= "A snapshot is already merging.";
523 if (!snap_src
->store
->type
->prepare_merge
||
524 !snap_src
->store
->type
->commit_merge
) {
525 snap
->ti
->error
= "Snapshot exception store does not support snapshot-merge.";
532 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
534 struct dm_snapshot
*l
;
536 /* Sort the list according to chunk size, largest-first smallest-last */
537 list_for_each_entry(l
, &o
->snapshots
, list
)
538 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
540 list_add_tail(&s
->list
, &l
->list
);
544 * Make a note of the snapshot and its origin so we can look it
545 * up when the origin has a write on it.
547 * Also validate snapshot exception store handovers.
548 * On success, returns 1 if this registration is a handover destination,
549 * otherwise returns 0.
551 static int register_snapshot(struct dm_snapshot
*snap
)
553 struct origin
*o
, *new_o
= NULL
;
554 struct block_device
*bdev
= snap
->origin
->bdev
;
557 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
561 down_write(&_origins_lock
);
563 r
= __validate_exception_handover(snap
);
569 o
= __lookup_origin(bdev
);
576 /* Initialise the struct */
577 INIT_LIST_HEAD(&o
->snapshots
);
583 __insert_snapshot(o
, snap
);
586 up_write(&_origins_lock
);
592 * Move snapshot to correct place in list according to chunk size.
594 static void reregister_snapshot(struct dm_snapshot
*s
)
596 struct block_device
*bdev
= s
->origin
->bdev
;
598 down_write(&_origins_lock
);
601 __insert_snapshot(__lookup_origin(bdev
), s
);
603 up_write(&_origins_lock
);
606 static void unregister_snapshot(struct dm_snapshot
*s
)
610 down_write(&_origins_lock
);
611 o
= __lookup_origin(s
->origin
->bdev
);
614 if (o
&& list_empty(&o
->snapshots
)) {
615 list_del(&o
->hash_list
);
619 up_write(&_origins_lock
);
623 * Implementation of the exception hash tables.
624 * The lowest hash_shift bits of the chunk number are ignored, allowing
625 * some consecutive chunks to be grouped together.
627 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
);
629 /* Lock to protect access to the completed and pending exception hash tables. */
630 struct dm_exception_table_lock
{
631 struct hlist_bl_head
*complete_slot
;
632 struct hlist_bl_head
*pending_slot
;
635 static void dm_exception_table_lock_init(struct dm_snapshot
*s
, chunk_t chunk
,
636 struct dm_exception_table_lock
*lock
)
638 struct dm_exception_table
*complete
= &s
->complete
;
639 struct dm_exception_table
*pending
= &s
->pending
;
641 lock
->complete_slot
= &complete
->table
[exception_hash(complete
, chunk
)];
642 lock
->pending_slot
= &pending
->table
[exception_hash(pending
, chunk
)];
645 static void dm_exception_table_lock(struct dm_exception_table_lock
*lock
)
647 hlist_bl_lock(lock
->complete_slot
);
648 hlist_bl_lock(lock
->pending_slot
);
651 static void dm_exception_table_unlock(struct dm_exception_table_lock
*lock
)
653 hlist_bl_unlock(lock
->pending_slot
);
654 hlist_bl_unlock(lock
->complete_slot
);
657 static int dm_exception_table_init(struct dm_exception_table
*et
,
658 uint32_t size
, unsigned int hash_shift
)
662 et
->hash_shift
= hash_shift
;
663 et
->hash_mask
= size
- 1;
664 et
->table
= kvmalloc_array(size
, sizeof(struct hlist_bl_head
),
669 for (i
= 0; i
< size
; i
++)
670 INIT_HLIST_BL_HEAD(et
->table
+ i
);
675 static void dm_exception_table_exit(struct dm_exception_table
*et
,
676 struct kmem_cache
*mem
)
678 struct hlist_bl_head
*slot
;
679 struct dm_exception
*ex
;
680 struct hlist_bl_node
*pos
, *n
;
683 size
= et
->hash_mask
+ 1;
684 for (i
= 0; i
< size
; i
++) {
685 slot
= et
->table
+ i
;
687 hlist_bl_for_each_entry_safe(ex
, pos
, n
, slot
, hash_list
) {
688 kmem_cache_free(mem
, ex
);
696 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
698 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
701 static void dm_remove_exception(struct dm_exception
*e
)
703 hlist_bl_del(&e
->hash_list
);
707 * Return the exception data for a sector, or NULL if not
710 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
713 struct hlist_bl_head
*slot
;
714 struct hlist_bl_node
*pos
;
715 struct dm_exception
*e
;
717 slot
= &et
->table
[exception_hash(et
, chunk
)];
718 hlist_bl_for_each_entry(e
, pos
, slot
, hash_list
)
719 if (chunk
>= e
->old_chunk
&&
720 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
726 static struct dm_exception
*alloc_completed_exception(gfp_t gfp
)
728 struct dm_exception
*e
;
730 e
= kmem_cache_alloc(exception_cache
, gfp
);
731 if (!e
&& gfp
== GFP_NOIO
)
732 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
737 static void free_completed_exception(struct dm_exception
*e
)
739 kmem_cache_free(exception_cache
, e
);
742 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
744 struct dm_snap_pending_exception
*pe
= mempool_alloc(&s
->pending_pool
,
747 atomic_inc(&s
->pending_exceptions_count
);
753 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
755 struct dm_snapshot
*s
= pe
->snap
;
757 mempool_free(pe
, &s
->pending_pool
);
758 smp_mb__before_atomic();
759 atomic_dec(&s
->pending_exceptions_count
);
762 static void dm_insert_exception(struct dm_exception_table
*eh
,
763 struct dm_exception
*new_e
)
765 struct hlist_bl_head
*l
;
766 struct hlist_bl_node
*pos
;
767 struct dm_exception
*e
= NULL
;
769 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
771 /* Add immediately if this table doesn't support consecutive chunks */
775 /* List is ordered by old_chunk */
776 hlist_bl_for_each_entry(e
, pos
, l
, hash_list
) {
777 /* Insert after an existing chunk? */
778 if (new_e
->old_chunk
== (e
->old_chunk
+
779 dm_consecutive_chunk_count(e
) + 1) &&
780 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
781 dm_consecutive_chunk_count(e
) + 1)) {
782 dm_consecutive_chunk_count_inc(e
);
783 free_completed_exception(new_e
);
787 /* Insert before an existing chunk? */
788 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
789 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
790 dm_consecutive_chunk_count_inc(e
);
793 free_completed_exception(new_e
);
797 if (new_e
->old_chunk
< e
->old_chunk
)
804 * Either the table doesn't support consecutive chunks or slot
807 hlist_bl_add_head(&new_e
->hash_list
, l
);
808 } else if (new_e
->old_chunk
< e
->old_chunk
) {
809 /* Add before an existing exception */
810 hlist_bl_add_before(&new_e
->hash_list
, &e
->hash_list
);
812 /* Add to l's tail: e is the last exception in this slot */
813 hlist_bl_add_behind(&new_e
->hash_list
, &e
->hash_list
);
818 * Callback used by the exception stores to load exceptions when
821 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
823 struct dm_exception_table_lock lock
;
824 struct dm_snapshot
*s
= context
;
825 struct dm_exception
*e
;
827 e
= alloc_completed_exception(GFP_KERNEL
);
833 /* Consecutive_count is implicitly initialised to zero */
837 * Although there is no need to lock access to the exception tables
838 * here, if we don't then hlist_bl_add_head(), called by
839 * dm_insert_exception(), will complain about accessing the
840 * corresponding list without locking it first.
842 dm_exception_table_lock_init(s
, old
, &lock
);
844 dm_exception_table_lock(&lock
);
845 dm_insert_exception(&s
->complete
, e
);
846 dm_exception_table_unlock(&lock
);
852 * Return a minimum chunk size of all snapshots that have the specified origin.
853 * Return zero if the origin has no snapshots.
855 static uint32_t __minimum_chunk_size(struct origin
*o
)
857 struct dm_snapshot
*snap
;
858 unsigned int chunk_size
= rounddown_pow_of_two(UINT_MAX
);
861 list_for_each_entry(snap
, &o
->snapshots
, list
)
862 chunk_size
= min_not_zero(chunk_size
,
863 snap
->store
->chunk_size
);
865 return (uint32_t) chunk_size
;
871 static int calc_max_buckets(void)
873 /* use a fixed size of 2MB */
874 unsigned long mem
= 2 * 1024 * 1024;
876 mem
/= sizeof(struct hlist_bl_head
);
882 * Allocate room for a suitable hash table.
884 static int init_hash_tables(struct dm_snapshot
*s
)
886 sector_t hash_size
, cow_dev_size
, max_buckets
;
889 * Calculate based on the size of the original volume or
892 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
893 max_buckets
= calc_max_buckets();
895 hash_size
= cow_dev_size
>> s
->store
->chunk_shift
;
896 hash_size
= min(hash_size
, max_buckets
);
900 hash_size
= rounddown_pow_of_two(hash_size
);
901 if (dm_exception_table_init(&s
->complete
, hash_size
,
902 DM_CHUNK_CONSECUTIVE_BITS
))
906 * Allocate hash table for in-flight exceptions
907 * Make this smaller than the real hash table
913 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
914 dm_exception_table_exit(&s
->complete
, exception_cache
);
921 static void merge_shutdown(struct dm_snapshot
*s
)
923 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
924 smp_mb__after_atomic();
925 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
928 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
930 s
->first_merging_chunk
= 0;
931 s
->num_merging_chunks
= 0;
933 return bio_list_get(&s
->bios_queued_during_merge
);
937 * Remove one chunk from the index of completed exceptions.
939 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
942 struct dm_exception
*e
;
944 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
946 DMERR("Corruption detected: exception for block %llu is on disk but not in memory",
947 (unsigned long long)old_chunk
);
952 * If this is the only chunk using this exception, remove exception.
954 if (!dm_consecutive_chunk_count(e
)) {
955 dm_remove_exception(e
);
956 free_completed_exception(e
);
961 * The chunk may be either at the beginning or the end of a
962 * group of consecutive chunks - never in the middle. We are
963 * removing chunks in the opposite order to that in which they
964 * were added, so this should always be true.
965 * Decrement the consecutive chunk counter and adjust the
966 * starting point if necessary.
968 if (old_chunk
== e
->old_chunk
) {
971 } else if (old_chunk
!= e
->old_chunk
+
972 dm_consecutive_chunk_count(e
)) {
973 DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]",
974 (unsigned long long)old_chunk
,
975 (unsigned long long)e
->old_chunk
,
977 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
981 dm_consecutive_chunk_count_dec(e
);
986 static void flush_bios(struct bio
*bio
);
988 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
990 struct bio
*b
= NULL
;
992 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
994 down_write(&s
->lock
);
997 * Process chunks (and associated exceptions) in reverse order
998 * so that dm_consecutive_chunk_count_dec() accounting works.
1001 r
= __remove_single_exception_chunk(s
, old_chunk
);
1004 } while (old_chunk
-- > s
->first_merging_chunk
);
1006 b
= __release_queued_bios_after_merge(s
);
1016 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
1017 sector_t sector
, unsigned int chunk_size
);
1019 static void merge_callback(int read_err
, unsigned long write_err
,
1022 static uint64_t read_pending_exceptions_done_count(void)
1024 uint64_t pending_exceptions_done
;
1026 spin_lock(&_pending_exceptions_done_spinlock
);
1027 pending_exceptions_done
= _pending_exceptions_done_count
;
1028 spin_unlock(&_pending_exceptions_done_spinlock
);
1030 return pending_exceptions_done
;
1033 static void increment_pending_exceptions_done_count(void)
1035 spin_lock(&_pending_exceptions_done_spinlock
);
1036 _pending_exceptions_done_count
++;
1037 spin_unlock(&_pending_exceptions_done_spinlock
);
1039 wake_up_all(&_pending_exceptions_done
);
1042 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
1044 int i
, linear_chunks
;
1045 chunk_t old_chunk
, new_chunk
;
1046 struct dm_io_region src
, dest
;
1048 uint64_t previous_count
;
1050 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
1051 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
1055 * valid flag never changes during merge, so no lock required.
1058 DMERR("Snapshot is invalid: can't merge");
1062 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
1064 if (linear_chunks
<= 0) {
1065 if (linear_chunks
< 0) {
1066 DMERR("Read error in exception store: shutting down merge");
1067 down_write(&s
->lock
);
1068 s
->merge_failed
= true;
1074 /* Adjust old_chunk and new_chunk to reflect start of linear region */
1075 old_chunk
= old_chunk
+ 1 - linear_chunks
;
1076 new_chunk
= new_chunk
+ 1 - linear_chunks
;
1079 * Use one (potentially large) I/O to copy all 'linear_chunks'
1080 * from the exception store to the origin
1082 io_size
= linear_chunks
* s
->store
->chunk_size
;
1084 dest
.bdev
= s
->origin
->bdev
;
1085 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
1086 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
1088 src
.bdev
= s
->cow
->bdev
;
1089 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
1090 src
.count
= dest
.count
;
1093 * Reallocate any exceptions needed in other snapshots then
1094 * wait for the pending exceptions to complete.
1095 * Each time any pending exception (globally on the system)
1096 * completes we are woken and repeat the process to find out
1097 * if we can proceed. While this may not seem a particularly
1098 * efficient algorithm, it is not expected to have any
1099 * significant impact on performance.
1101 previous_count
= read_pending_exceptions_done_count();
1102 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
1103 wait_event(_pending_exceptions_done
,
1104 (read_pending_exceptions_done_count() !=
1106 /* Retry after the wait, until all exceptions are done. */
1107 previous_count
= read_pending_exceptions_done_count();
1110 down_write(&s
->lock
);
1111 s
->first_merging_chunk
= old_chunk
;
1112 s
->num_merging_chunks
= linear_chunks
;
1115 /* Wait until writes to all 'linear_chunks' drain */
1116 for (i
= 0; i
< linear_chunks
; i
++)
1117 __check_for_conflicting_io(s
, old_chunk
+ i
);
1119 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
1126 static void error_bios(struct bio
*bio
);
1128 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
1130 struct dm_snapshot
*s
= context
;
1131 struct bio
*b
= NULL
;
1133 if (read_err
|| write_err
) {
1135 DMERR("Read error: shutting down merge.");
1137 DMERR("Write error: shutting down merge.");
1141 if (blkdev_issue_flush(s
->origin
->bdev
) < 0) {
1142 DMERR("Flush after merge failed: shutting down merge");
1146 if (s
->store
->type
->commit_merge(s
->store
,
1147 s
->num_merging_chunks
) < 0) {
1148 DMERR("Write error in exception store: shutting down merge");
1152 if (remove_single_exception_chunk(s
) < 0)
1155 snapshot_merge_next_chunks(s
);
1160 down_write(&s
->lock
);
1161 s
->merge_failed
= true;
1162 b
= __release_queued_bios_after_merge(s
);
1169 static void start_merge(struct dm_snapshot
*s
)
1171 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1172 snapshot_merge_next_chunks(s
);
1176 * Stop the merging process and wait until it finishes.
1178 static void stop_merge(struct dm_snapshot
*s
)
1180 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1181 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, TASK_UNINTERRUPTIBLE
);
1182 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1185 static int parse_snapshot_features(struct dm_arg_set
*as
, struct dm_snapshot
*s
,
1186 struct dm_target
*ti
)
1190 const char *arg_name
;
1192 static const struct dm_arg _args
[] = {
1193 {0, 2, "Invalid number of feature arguments"},
1197 * No feature arguments supplied.
1202 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
1206 while (argc
&& !r
) {
1207 arg_name
= dm_shift_arg(as
);
1210 if (!strcasecmp(arg_name
, "discard_zeroes_cow"))
1211 s
->discard_zeroes_cow
= true;
1213 else if (!strcasecmp(arg_name
, "discard_passdown_origin"))
1214 s
->discard_passdown_origin
= true;
1217 ti
->error
= "Unrecognised feature requested";
1223 if (!s
->discard_zeroes_cow
&& s
->discard_passdown_origin
) {
1225 * TODO: really these are disjoint.. but ti->num_discard_bios
1226 * and dm_bio_get_target_bio_nr() require rigid constraints.
1228 ti
->error
= "discard_passdown_origin feature depends on discard_zeroes_cow";
1236 * Construct a snapshot mapping:
1237 * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
1239 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1241 struct dm_snapshot
*s
;
1242 struct dm_arg_set as
;
1245 char *origin_path
, *cow_path
;
1246 unsigned int args_used
, num_flush_bios
= 1;
1247 blk_mode_t origin_mode
= BLK_OPEN_READ
;
1250 ti
->error
= "requires 4 or more arguments";
1255 if (dm_target_is_snapshot_merge(ti
)) {
1257 origin_mode
= BLK_OPEN_WRITE
;
1260 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
1262 ti
->error
= "Cannot allocate private snapshot structure";
1269 dm_consume_args(&as
, 4);
1270 r
= parse_snapshot_features(&as
, s
, ti
);
1274 origin_path
= argv
[0];
1278 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1280 ti
->error
= "Cannot get origin device";
1288 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1290 ti
->error
= "Cannot get COW device";
1293 if (s
->cow
->bdev
&& s
->cow
->bdev
== s
->origin
->bdev
) {
1294 ti
->error
= "COW device cannot be the same as origin device";
1299 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1301 ti
->error
= "Couldn't create exception store";
1311 s
->snapshot_overflowed
= 0;
1313 atomic_set(&s
->pending_exceptions_count
, 0);
1314 spin_lock_init(&s
->pe_allocation_lock
);
1315 s
->exception_start_sequence
= 0;
1316 s
->exception_complete_sequence
= 0;
1317 s
->out_of_order_tree
= RB_ROOT
;
1318 init_rwsem(&s
->lock
);
1319 INIT_LIST_HEAD(&s
->list
);
1320 spin_lock_init(&s
->pe_lock
);
1322 s
->merge_failed
= false;
1323 s
->first_merging_chunk
= 0;
1324 s
->num_merging_chunks
= 0;
1325 bio_list_init(&s
->bios_queued_during_merge
);
1327 /* Allocate hash table for COW data */
1328 if (init_hash_tables(s
)) {
1329 ti
->error
= "Unable to allocate hash table space";
1331 goto bad_hash_tables
;
1334 init_waitqueue_head(&s
->in_progress_wait
);
1336 s
->kcopyd_client
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
1337 if (IS_ERR(s
->kcopyd_client
)) {
1338 r
= PTR_ERR(s
->kcopyd_client
);
1339 ti
->error
= "Could not create kcopyd client";
1343 r
= mempool_init_slab_pool(&s
->pending_pool
, MIN_IOS
, pending_cache
);
1345 ti
->error
= "Could not allocate mempool for pending exceptions";
1346 goto bad_pending_pool
;
1349 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1350 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1352 spin_lock_init(&s
->tracked_chunk_lock
);
1355 ti
->num_flush_bios
= num_flush_bios
;
1356 if (s
->discard_zeroes_cow
)
1357 ti
->num_discard_bios
= (s
->discard_passdown_origin
? 2 : 1);
1358 ti
->per_io_data_size
= sizeof(struct dm_snap_tracked_chunk
);
1360 /* Add snapshot to the list of snapshots for this origin */
1361 /* Exceptions aren't triggered till snapshot_resume() is called */
1362 r
= register_snapshot(s
);
1364 ti
->error
= "Snapshot origin struct allocation failed";
1365 goto bad_load_and_register
;
1367 /* invalid handover, register_snapshot has set ti->error */
1368 goto bad_load_and_register
;
1372 * Metadata must only be loaded into one table at once, so skip this
1373 * if metadata will be handed over during resume.
1374 * Chunk size will be set during the handover - set it to zero to
1375 * ensure it's ignored.
1378 s
->store
->chunk_size
= 0;
1382 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1385 ti
->error
= "Failed to read snapshot metadata";
1386 goto bad_read_metadata
;
1389 DMWARN("Snapshot is marked invalid.");
1392 if (!s
->store
->chunk_size
) {
1393 ti
->error
= "Chunk size not set";
1395 goto bad_read_metadata
;
1398 r
= dm_set_target_max_io_len(ti
, s
->store
->chunk_size
);
1400 goto bad_read_metadata
;
1405 unregister_snapshot(s
);
1406 bad_load_and_register
:
1407 mempool_exit(&s
->pending_pool
);
1409 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1411 dm_exception_table_exit(&s
->pending
, pending_cache
);
1412 dm_exception_table_exit(&s
->complete
, exception_cache
);
1414 dm_exception_store_destroy(s
->store
);
1416 dm_put_device(ti
, s
->cow
);
1418 dm_put_device(ti
, s
->origin
);
1426 static void __free_exceptions(struct dm_snapshot
*s
)
1428 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1429 s
->kcopyd_client
= NULL
;
1431 dm_exception_table_exit(&s
->pending
, pending_cache
);
1432 dm_exception_table_exit(&s
->complete
, exception_cache
);
1435 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1436 struct dm_snapshot
*snap_dest
)
1439 struct dm_exception_table table_swap
;
1440 struct dm_exception_store
*store_swap
;
1444 * Swap all snapshot context information between the two instances.
1446 u
.table_swap
= snap_dest
->complete
;
1447 snap_dest
->complete
= snap_src
->complete
;
1448 snap_src
->complete
= u
.table_swap
;
1450 u
.store_swap
= snap_dest
->store
;
1451 snap_dest
->store
= snap_src
->store
;
1452 snap_dest
->store
->userspace_supports_overflow
= u
.store_swap
->userspace_supports_overflow
;
1453 snap_src
->store
= u
.store_swap
;
1455 snap_dest
->store
->snap
= snap_dest
;
1456 snap_src
->store
->snap
= snap_src
;
1458 snap_dest
->ti
->max_io_len
= snap_dest
->store
->chunk_size
;
1459 snap_dest
->valid
= snap_src
->valid
;
1460 snap_dest
->snapshot_overflowed
= snap_src
->snapshot_overflowed
;
1463 * Set source invalid to ensure it receives no further I/O.
1465 snap_src
->valid
= 0;
1468 static void snapshot_dtr(struct dm_target
*ti
)
1470 #ifdef CONFIG_DM_DEBUG
1473 struct dm_snapshot
*s
= ti
->private;
1474 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1476 down_read(&_origins_lock
);
1477 /* Check whether exception handover must be cancelled */
1478 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1479 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1480 down_write(&snap_dest
->lock
);
1481 snap_dest
->valid
= 0;
1482 up_write(&snap_dest
->lock
);
1483 DMERR("Cancelling snapshot handover.");
1485 up_read(&_origins_lock
);
1487 if (dm_target_is_snapshot_merge(ti
))
1490 /* Prevent further origin writes from using this snapshot. */
1491 /* After this returns there can be no new kcopyd jobs. */
1492 unregister_snapshot(s
);
1494 while (atomic_read(&s
->pending_exceptions_count
))
1497 * Ensure instructions in mempool_exit aren't reordered
1498 * before atomic_read.
1502 #ifdef CONFIG_DM_DEBUG
1503 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1504 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1507 __free_exceptions(s
);
1509 mempool_exit(&s
->pending_pool
);
1511 dm_exception_store_destroy(s
->store
);
1513 dm_put_device(ti
, s
->cow
);
1515 dm_put_device(ti
, s
->origin
);
1517 WARN_ON(s
->in_progress
);
1522 static void account_start_copy(struct dm_snapshot
*s
)
1524 spin_lock(&s
->in_progress_wait
.lock
);
1526 spin_unlock(&s
->in_progress_wait
.lock
);
1529 static void account_end_copy(struct dm_snapshot
*s
)
1531 spin_lock(&s
->in_progress_wait
.lock
);
1532 BUG_ON(!s
->in_progress
);
1534 if (likely(s
->in_progress
<= cow_threshold
) &&
1535 unlikely(waitqueue_active(&s
->in_progress_wait
)))
1536 wake_up_locked(&s
->in_progress_wait
);
1537 spin_unlock(&s
->in_progress_wait
.lock
);
1540 static bool wait_for_in_progress(struct dm_snapshot
*s
, bool unlock_origins
)
1542 if (unlikely(s
->in_progress
> cow_threshold
)) {
1543 spin_lock(&s
->in_progress_wait
.lock
);
1544 if (likely(s
->in_progress
> cow_threshold
)) {
1546 * NOTE: this throttle doesn't account for whether
1547 * the caller is servicing an IO that will trigger a COW
1548 * so excess throttling may result for chunks not required
1549 * to be COW'd. But if cow_threshold was reached, extra
1550 * throttling is unlikely to negatively impact performance.
1552 DECLARE_WAITQUEUE(wait
, current
);
1554 __add_wait_queue(&s
->in_progress_wait
, &wait
);
1555 __set_current_state(TASK_UNINTERRUPTIBLE
);
1556 spin_unlock(&s
->in_progress_wait
.lock
);
1558 up_read(&_origins_lock
);
1560 remove_wait_queue(&s
->in_progress_wait
, &wait
);
1563 spin_unlock(&s
->in_progress_wait
.lock
);
1569 * Flush a list of buffers.
1571 static void flush_bios(struct bio
*bio
)
1577 bio
->bi_next
= NULL
;
1578 submit_bio_noacct(bio
);
1583 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
, bool limit
);
1586 * Flush a list of buffers.
1588 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1595 bio
->bi_next
= NULL
;
1596 r
= do_origin(s
->origin
, bio
, false);
1597 if (r
== DM_MAPIO_REMAPPED
)
1598 submit_bio_noacct(bio
);
1604 * Error a list of buffers.
1606 static void error_bios(struct bio
*bio
)
1612 bio
->bi_next
= NULL
;
1618 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1624 DMERR("Invalidating snapshot: Error reading/writing.");
1625 else if (err
== -ENOMEM
)
1626 DMERR("Invalidating snapshot: Unable to allocate exception.");
1628 if (s
->store
->type
->drop_snapshot
)
1629 s
->store
->type
->drop_snapshot(s
->store
);
1633 dm_table_event(s
->ti
->table
);
1636 static void invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1638 down_write(&s
->lock
);
1639 __invalidate_snapshot(s
, err
);
1643 static void pending_complete(void *context
, int success
)
1645 struct dm_snap_pending_exception
*pe
= context
;
1646 struct dm_exception
*e
;
1647 struct dm_snapshot
*s
= pe
->snap
;
1648 struct bio
*origin_bios
= NULL
;
1649 struct bio
*snapshot_bios
= NULL
;
1650 struct bio
*full_bio
= NULL
;
1651 struct dm_exception_table_lock lock
;
1654 dm_exception_table_lock_init(s
, pe
->e
.old_chunk
, &lock
);
1657 /* Read/write error - snapshot is unusable */
1658 invalidate_snapshot(s
, -EIO
);
1661 dm_exception_table_lock(&lock
);
1665 e
= alloc_completed_exception(GFP_NOIO
);
1667 invalidate_snapshot(s
, -ENOMEM
);
1670 dm_exception_table_lock(&lock
);
1675 down_read(&s
->lock
);
1676 dm_exception_table_lock(&lock
);
1679 free_completed_exception(e
);
1686 * Add a proper exception. After inserting the completed exception all
1687 * subsequent snapshot reads to this chunk will be redirected to the
1688 * COW device. This ensures that we do not starve. Moreover, as long
1689 * as the pending exception exists, neither origin writes nor snapshot
1690 * merging can overwrite the chunk in origin.
1692 dm_insert_exception(&s
->complete
, e
);
1695 /* Wait for conflicting reads to drain */
1696 if (__chunk_is_tracked(s
, pe
->e
.old_chunk
)) {
1697 dm_exception_table_unlock(&lock
);
1698 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1699 dm_exception_table_lock(&lock
);
1703 /* Remove the in-flight exception from the list */
1704 dm_remove_exception(&pe
->e
);
1706 dm_exception_table_unlock(&lock
);
1708 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1709 origin_bios
= bio_list_get(&pe
->origin_bios
);
1710 full_bio
= pe
->full_bio
;
1712 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1713 increment_pending_exceptions_done_count();
1715 /* Submit any pending write bios */
1718 bio_io_error(full_bio
);
1719 error_bios(snapshot_bios
);
1722 bio_endio(full_bio
);
1723 flush_bios(snapshot_bios
);
1726 retry_origin_bios(s
, origin_bios
);
1728 free_pending_exception(pe
);
1731 static void complete_exception(struct dm_snap_pending_exception
*pe
)
1733 struct dm_snapshot
*s
= pe
->snap
;
1735 /* Update the metadata if we are persistent */
1736 s
->store
->type
->commit_exception(s
->store
, &pe
->e
, !pe
->copy_error
,
1737 pending_complete
, pe
);
1741 * Called when the copy I/O has finished. kcopyd actually runs
1742 * this code so don't block.
1744 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1746 struct dm_snap_pending_exception
*pe
= context
;
1747 struct dm_snapshot
*s
= pe
->snap
;
1749 pe
->copy_error
= read_err
|| write_err
;
1751 if (pe
->exception_sequence
== s
->exception_complete_sequence
) {
1752 struct rb_node
*next
;
1754 s
->exception_complete_sequence
++;
1755 complete_exception(pe
);
1757 next
= rb_first(&s
->out_of_order_tree
);
1759 pe
= rb_entry(next
, struct dm_snap_pending_exception
,
1761 if (pe
->exception_sequence
!= s
->exception_complete_sequence
)
1763 next
= rb_next(next
);
1764 s
->exception_complete_sequence
++;
1765 rb_erase(&pe
->out_of_order_node
, &s
->out_of_order_tree
);
1766 complete_exception(pe
);
1770 struct rb_node
*parent
= NULL
;
1771 struct rb_node
**p
= &s
->out_of_order_tree
.rb_node
;
1772 struct dm_snap_pending_exception
*pe2
;
1775 pe2
= rb_entry(*p
, struct dm_snap_pending_exception
, out_of_order_node
);
1778 BUG_ON(pe
->exception_sequence
== pe2
->exception_sequence
);
1779 if (pe
->exception_sequence
< pe2
->exception_sequence
)
1780 p
= &((*p
)->rb_left
);
1782 p
= &((*p
)->rb_right
);
1785 rb_link_node(&pe
->out_of_order_node
, parent
, p
);
1786 rb_insert_color(&pe
->out_of_order_node
, &s
->out_of_order_tree
);
1788 account_end_copy(s
);
1792 * Dispatches the copy operation to kcopyd.
1794 static void start_copy(struct dm_snap_pending_exception
*pe
)
1796 struct dm_snapshot
*s
= pe
->snap
;
1797 struct dm_io_region src
, dest
;
1798 struct block_device
*bdev
= s
->origin
->bdev
;
1801 dev_size
= get_dev_size(bdev
);
1804 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1805 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1807 dest
.bdev
= s
->cow
->bdev
;
1808 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1809 dest
.count
= src
.count
;
1811 /* Hand over to kcopyd */
1812 account_start_copy(s
);
1813 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1816 static void full_bio_end_io(struct bio
*bio
)
1818 void *callback_data
= bio
->bi_private
;
1820 dm_kcopyd_do_callback(callback_data
, 0, bio
->bi_status
? 1 : 0);
1823 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1826 struct dm_snapshot
*s
= pe
->snap
;
1827 void *callback_data
;
1830 pe
->full_bio_end_io
= bio
->bi_end_io
;
1832 account_start_copy(s
);
1833 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1836 bio
->bi_end_io
= full_bio_end_io
;
1837 bio
->bi_private
= callback_data
;
1839 submit_bio_noacct(bio
);
1842 static struct dm_snap_pending_exception
*
1843 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1845 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1850 return container_of(e
, struct dm_snap_pending_exception
, e
);
1854 * Inserts a pending exception into the pending table.
1856 * NOTE: a write lock must be held on the chunk's pending exception table slot
1857 * before calling this.
1859 static struct dm_snap_pending_exception
*
1860 __insert_pending_exception(struct dm_snapshot
*s
,
1861 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1863 pe
->e
.old_chunk
= chunk
;
1864 bio_list_init(&pe
->origin_bios
);
1865 bio_list_init(&pe
->snapshot_bios
);
1867 pe
->full_bio
= NULL
;
1869 spin_lock(&s
->pe_allocation_lock
);
1870 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1871 spin_unlock(&s
->pe_allocation_lock
);
1872 free_pending_exception(pe
);
1876 pe
->exception_sequence
= s
->exception_start_sequence
++;
1877 spin_unlock(&s
->pe_allocation_lock
);
1879 dm_insert_exception(&s
->pending
, &pe
->e
);
1885 * Looks to see if this snapshot already has a pending exception
1886 * for this chunk, otherwise it allocates a new one and inserts
1887 * it into the pending table.
1889 * NOTE: a write lock must be held on the chunk's pending exception table slot
1890 * before calling this.
1892 static struct dm_snap_pending_exception
*
1893 __find_pending_exception(struct dm_snapshot
*s
,
1894 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1896 struct dm_snap_pending_exception
*pe2
;
1898 pe2
= __lookup_pending_exception(s
, chunk
);
1900 free_pending_exception(pe
);
1904 return __insert_pending_exception(s
, pe
, chunk
);
1907 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1908 struct bio
*bio
, chunk_t chunk
)
1910 bio_set_dev(bio
, s
->cow
->bdev
);
1911 bio
->bi_iter
.bi_sector
=
1912 chunk_to_sector(s
->store
, dm_chunk_number(e
->new_chunk
) +
1913 (chunk
- e
->old_chunk
)) +
1914 (bio
->bi_iter
.bi_sector
& s
->store
->chunk_mask
);
1917 static void zero_callback(int read_err
, unsigned long write_err
, void *context
)
1919 struct bio
*bio
= context
;
1920 struct dm_snapshot
*s
= bio
->bi_private
;
1922 account_end_copy(s
);
1923 bio
->bi_status
= write_err
? BLK_STS_IOERR
: 0;
1927 static void zero_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1928 struct bio
*bio
, chunk_t chunk
)
1930 struct dm_io_region dest
;
1932 dest
.bdev
= s
->cow
->bdev
;
1933 dest
.sector
= bio
->bi_iter
.bi_sector
;
1934 dest
.count
= s
->store
->chunk_size
;
1936 account_start_copy(s
);
1937 WARN_ON_ONCE(bio
->bi_private
);
1938 bio
->bi_private
= s
;
1939 dm_kcopyd_zero(s
->kcopyd_client
, 1, &dest
, 0, zero_callback
, bio
);
1942 static bool io_overlaps_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
1944 return bio
->bi_iter
.bi_size
==
1945 (s
->store
->chunk_size
<< SECTOR_SHIFT
);
1948 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
)
1950 struct dm_exception
*e
;
1951 struct dm_snapshot
*s
= ti
->private;
1952 int r
= DM_MAPIO_REMAPPED
;
1954 struct dm_snap_pending_exception
*pe
= NULL
;
1955 struct dm_exception_table_lock lock
;
1957 init_tracked_chunk(bio
);
1959 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1960 bio_set_dev(bio
, s
->cow
->bdev
);
1961 return DM_MAPIO_REMAPPED
;
1964 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
1965 dm_exception_table_lock_init(s
, chunk
, &lock
);
1967 /* Full snapshots are not usable */
1968 /* To get here the table must be live so s->active is always set. */
1970 return DM_MAPIO_KILL
;
1972 if (bio_data_dir(bio
) == WRITE
) {
1973 while (unlikely(!wait_for_in_progress(s
, false)))
1974 ; /* wait_for_in_progress() has slept */
1977 down_read(&s
->lock
);
1978 dm_exception_table_lock(&lock
);
1980 if (!s
->valid
|| (unlikely(s
->snapshot_overflowed
) &&
1981 bio_data_dir(bio
) == WRITE
)) {
1986 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
1987 if (s
->discard_passdown_origin
&& dm_bio_get_target_bio_nr(bio
)) {
1989 * passdown discard to origin (without triggering
1990 * snapshot exceptions via do_origin; doing so would
1991 * defeat the goal of freeing space in origin that is
1992 * implied by the "discard_passdown_origin" feature)
1994 bio_set_dev(bio
, s
->origin
->bdev
);
1995 track_chunk(s
, bio
, chunk
);
1998 /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
2001 /* If the block is already remapped - use that, else remap it */
2002 e
= dm_lookup_exception(&s
->complete
, chunk
);
2004 remap_exception(s
, e
, bio
, chunk
);
2005 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
) &&
2006 io_overlaps_chunk(s
, bio
)) {
2007 dm_exception_table_unlock(&lock
);
2009 zero_exception(s
, e
, bio
, chunk
);
2010 r
= DM_MAPIO_SUBMITTED
; /* discard is not issued */
2016 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
2018 * If no exception exists, complete discard immediately
2019 * otherwise it'll trigger copy-out.
2022 r
= DM_MAPIO_SUBMITTED
;
2027 * Write to snapshot - higher level takes care of RW/RO
2028 * flags so we should only get this if we are
2031 if (bio_data_dir(bio
) == WRITE
) {
2032 pe
= __lookup_pending_exception(s
, chunk
);
2034 dm_exception_table_unlock(&lock
);
2035 pe
= alloc_pending_exception(s
);
2036 dm_exception_table_lock(&lock
);
2038 e
= dm_lookup_exception(&s
->complete
, chunk
);
2040 free_pending_exception(pe
);
2041 remap_exception(s
, e
, bio
, chunk
);
2045 pe
= __find_pending_exception(s
, pe
, chunk
);
2047 dm_exception_table_unlock(&lock
);
2050 down_write(&s
->lock
);
2052 if (s
->store
->userspace_supports_overflow
) {
2053 if (s
->valid
&& !s
->snapshot_overflowed
) {
2054 s
->snapshot_overflowed
= 1;
2055 DMERR("Snapshot overflowed: Unable to allocate exception.");
2058 __invalidate_snapshot(s
, -ENOMEM
);
2066 remap_exception(s
, &pe
->e
, bio
, chunk
);
2068 r
= DM_MAPIO_SUBMITTED
;
2070 if (!pe
->started
&& io_overlaps_chunk(s
, bio
)) {
2073 dm_exception_table_unlock(&lock
);
2076 start_full_bio(pe
, bio
);
2080 bio_list_add(&pe
->snapshot_bios
, bio
);
2083 /* this is protected by the exception table lock */
2086 dm_exception_table_unlock(&lock
);
2093 bio_set_dev(bio
, s
->origin
->bdev
);
2094 track_chunk(s
, bio
, chunk
);
2098 dm_exception_table_unlock(&lock
);
2105 * A snapshot-merge target behaves like a combination of a snapshot
2106 * target and a snapshot-origin target. It only generates new
2107 * exceptions in other snapshots and not in the one that is being
2110 * For each chunk, if there is an existing exception, it is used to
2111 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
2112 * which in turn might generate exceptions in other snapshots.
2113 * If merging is currently taking place on the chunk in question, the
2114 * I/O is deferred by adding it to s->bios_queued_during_merge.
2116 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
)
2118 struct dm_exception
*e
;
2119 struct dm_snapshot
*s
= ti
->private;
2120 int r
= DM_MAPIO_REMAPPED
;
2123 init_tracked_chunk(bio
);
2125 if (bio
->bi_opf
& REQ_PREFLUSH
) {
2126 if (!dm_bio_get_target_bio_nr(bio
))
2127 bio_set_dev(bio
, s
->origin
->bdev
);
2129 bio_set_dev(bio
, s
->cow
->bdev
);
2130 return DM_MAPIO_REMAPPED
;
2133 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
2134 /* Once merging, discards no longer effect change */
2136 return DM_MAPIO_SUBMITTED
;
2139 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
2141 down_write(&s
->lock
);
2143 /* Full merging snapshots are redirected to the origin */
2145 goto redirect_to_origin
;
2147 /* If the block is already remapped - use that */
2148 e
= dm_lookup_exception(&s
->complete
, chunk
);
2150 /* Queue writes overlapping with chunks being merged */
2151 if (bio_data_dir(bio
) == WRITE
&&
2152 chunk
>= s
->first_merging_chunk
&&
2153 chunk
< (s
->first_merging_chunk
+
2154 s
->num_merging_chunks
)) {
2155 bio_set_dev(bio
, s
->origin
->bdev
);
2156 bio_list_add(&s
->bios_queued_during_merge
, bio
);
2157 r
= DM_MAPIO_SUBMITTED
;
2161 remap_exception(s
, e
, bio
, chunk
);
2163 if (bio_data_dir(bio
) == WRITE
)
2164 track_chunk(s
, bio
, chunk
);
2169 bio_set_dev(bio
, s
->origin
->bdev
);
2171 if (bio_data_dir(bio
) == WRITE
) {
2173 return do_origin(s
->origin
, bio
, false);
2182 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
2183 blk_status_t
*error
)
2185 struct dm_snapshot
*s
= ti
->private;
2187 if (is_bio_tracked(bio
))
2188 stop_tracking_chunk(s
, bio
);
2190 return DM_ENDIO_DONE
;
2193 static void snapshot_merge_presuspend(struct dm_target
*ti
)
2195 struct dm_snapshot
*s
= ti
->private;
2200 static int snapshot_preresume(struct dm_target
*ti
)
2203 struct dm_snapshot
*s
= ti
->private;
2204 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
2206 down_read(&_origins_lock
);
2207 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
2208 if (snap_src
&& snap_dest
) {
2209 down_read(&snap_src
->lock
);
2210 if (s
== snap_src
) {
2211 DMERR("Unable to resume snapshot source until handover completes.");
2213 } else if (!dm_suspended(snap_src
->ti
)) {
2214 DMERR("Unable to perform snapshot handover until source is suspended.");
2217 up_read(&snap_src
->lock
);
2219 up_read(&_origins_lock
);
2224 static void snapshot_resume(struct dm_target
*ti
)
2226 struct dm_snapshot
*s
= ti
->private;
2227 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
, *snap_merging
= NULL
;
2228 struct dm_origin
*o
;
2229 struct mapped_device
*origin_md
= NULL
;
2230 bool must_restart_merging
= false;
2232 down_read(&_origins_lock
);
2234 o
= __lookup_dm_origin(s
->origin
->bdev
);
2236 origin_md
= dm_table_get_md(o
->ti
->table
);
2238 (void) __find_snapshots_sharing_cow(s
, NULL
, NULL
, &snap_merging
);
2240 origin_md
= dm_table_get_md(snap_merging
->ti
->table
);
2242 if (origin_md
== dm_table_get_md(ti
->table
))
2245 if (dm_hold(origin_md
))
2249 up_read(&_origins_lock
);
2252 dm_internal_suspend_fast(origin_md
);
2253 if (snap_merging
&& test_bit(RUNNING_MERGE
, &snap_merging
->state_bits
)) {
2254 must_restart_merging
= true;
2255 stop_merge(snap_merging
);
2259 down_read(&_origins_lock
);
2261 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
2262 if (snap_src
&& snap_dest
) {
2263 down_write(&snap_src
->lock
);
2264 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
2265 __handover_exceptions(snap_src
, snap_dest
);
2266 up_write(&snap_dest
->lock
);
2267 up_write(&snap_src
->lock
);
2270 up_read(&_origins_lock
);
2273 if (must_restart_merging
)
2274 start_merge(snap_merging
);
2275 dm_internal_resume_fast(origin_md
);
2279 /* Now we have correct chunk size, reregister */
2280 reregister_snapshot(s
);
2282 down_write(&s
->lock
);
2287 static uint32_t get_origin_minimum_chunksize(struct block_device
*bdev
)
2289 uint32_t min_chunksize
;
2291 down_read(&_origins_lock
);
2292 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
2293 up_read(&_origins_lock
);
2295 return min_chunksize
;
2298 static void snapshot_merge_resume(struct dm_target
*ti
)
2300 struct dm_snapshot
*s
= ti
->private;
2303 * Handover exceptions from existing snapshot.
2305 snapshot_resume(ti
);
2308 * snapshot-merge acts as an origin, so set ti->max_io_len
2310 ti
->max_io_len
= get_origin_minimum_chunksize(s
->origin
->bdev
);
2315 static void snapshot_status(struct dm_target
*ti
, status_type_t type
,
2316 unsigned int status_flags
, char *result
, unsigned int maxlen
)
2318 unsigned int sz
= 0;
2319 struct dm_snapshot
*snap
= ti
->private;
2320 unsigned int num_features
;
2323 case STATUSTYPE_INFO
:
2325 down_write(&snap
->lock
);
2329 else if (snap
->merge_failed
)
2330 DMEMIT("Merge failed");
2331 else if (snap
->snapshot_overflowed
)
2334 if (snap
->store
->type
->usage
) {
2335 sector_t total_sectors
, sectors_allocated
,
2337 snap
->store
->type
->usage(snap
->store
,
2341 DMEMIT("%llu/%llu %llu",
2342 (unsigned long long)sectors_allocated
,
2343 (unsigned long long)total_sectors
,
2344 (unsigned long long)metadata_sectors
);
2349 up_write(&snap
->lock
);
2353 case STATUSTYPE_TABLE
:
2355 * kdevname returns a static pointer so we need
2356 * to make private copies if the output is to
2359 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
2360 sz
+= snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
2362 num_features
= snap
->discard_zeroes_cow
+ snap
->discard_passdown_origin
;
2364 DMEMIT(" %u", num_features
);
2365 if (snap
->discard_zeroes_cow
)
2366 DMEMIT(" discard_zeroes_cow");
2367 if (snap
->discard_passdown_origin
)
2368 DMEMIT(" discard_passdown_origin");
2372 case STATUSTYPE_IMA
:
2373 DMEMIT_TARGET_NAME_VERSION(ti
->type
);
2374 DMEMIT(",snap_origin_name=%s", snap
->origin
->name
);
2375 DMEMIT(",snap_cow_name=%s", snap
->cow
->name
);
2376 DMEMIT(",snap_valid=%c", snap
->valid
? 'y' : 'n');
2377 DMEMIT(",snap_merge_failed=%c", snap
->merge_failed
? 'y' : 'n');
2378 DMEMIT(",snapshot_overflowed=%c", snap
->snapshot_overflowed
? 'y' : 'n');
2384 static int snapshot_iterate_devices(struct dm_target
*ti
,
2385 iterate_devices_callout_fn fn
, void *data
)
2387 struct dm_snapshot
*snap
= ti
->private;
2390 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
2393 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
2398 static void snapshot_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2400 struct dm_snapshot
*snap
= ti
->private;
2402 if (snap
->discard_zeroes_cow
) {
2403 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
2405 down_read(&_origins_lock
);
2407 (void) __find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
, NULL
);
2408 if (snap_src
&& snap_dest
)
2411 /* All discards are split on chunk_size boundary */
2412 limits
->discard_granularity
= snap
->store
->chunk_size
;
2413 limits
->max_hw_discard_sectors
= snap
->store
->chunk_size
;
2415 up_read(&_origins_lock
);
2420 *---------------------------------------------------------------
2422 *---------------------------------------------------------------
2425 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2426 * supplied bio was ignored. The caller may submit it immediately.
2427 * (No remapping actually occurs as the origin is always a direct linear
2430 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2431 * and any supplied bio is added to a list to be submitted once all
2432 * the necessary exceptions exist.
2434 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
2437 int r
= DM_MAPIO_REMAPPED
;
2438 struct dm_snapshot
*snap
;
2439 struct dm_exception
*e
;
2440 struct dm_snap_pending_exception
*pe
, *pe2
;
2441 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
2442 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
2443 struct dm_exception_table_lock lock
;
2446 /* Do all the snapshots on this origin */
2447 list_for_each_entry(snap
, snapshots
, list
) {
2449 * Don't make new exceptions in a merging snapshot
2450 * because it has effectively been deleted
2452 if (dm_target_is_snapshot_merge(snap
->ti
))
2455 /* Nothing to do if writing beyond end of snapshot */
2456 if (sector
>= dm_table_get_size(snap
->ti
->table
))
2460 * Remember, different snapshots can have
2461 * different chunk sizes.
2463 chunk
= sector_to_chunk(snap
->store
, sector
);
2464 dm_exception_table_lock_init(snap
, chunk
, &lock
);
2466 down_read(&snap
->lock
);
2467 dm_exception_table_lock(&lock
);
2469 /* Only deal with valid and active snapshots */
2470 if (!snap
->valid
|| !snap
->active
)
2473 pe
= __lookup_pending_exception(snap
, chunk
);
2476 * Check exception table to see if block is already
2477 * remapped in this snapshot and trigger an exception
2480 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2484 dm_exception_table_unlock(&lock
);
2485 pe
= alloc_pending_exception(snap
);
2486 dm_exception_table_lock(&lock
);
2488 pe2
= __lookup_pending_exception(snap
, chunk
);
2491 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2493 free_pending_exception(pe
);
2497 pe
= __insert_pending_exception(snap
, pe
, chunk
);
2499 dm_exception_table_unlock(&lock
);
2500 up_read(&snap
->lock
);
2502 invalidate_snapshot(snap
, -ENOMEM
);
2506 free_pending_exception(pe
);
2511 r
= DM_MAPIO_SUBMITTED
;
2514 * If an origin bio was supplied, queue it to wait for the
2515 * completion of this exception, and start this one last,
2516 * at the end of the function.
2519 bio_list_add(&pe
->origin_bios
, bio
);
2524 pe_to_start_last
= pe
;
2530 pe_to_start_now
= pe
;
2534 dm_exception_table_unlock(&lock
);
2535 up_read(&snap
->lock
);
2537 if (pe_to_start_now
) {
2538 start_copy(pe_to_start_now
);
2539 pe_to_start_now
= NULL
;
2544 * Submit the exception against which the bio is queued last,
2545 * to give the other exceptions a head start.
2547 if (pe_to_start_last
)
2548 start_copy(pe_to_start_last
);
2554 * Called on a write from the origin driver.
2556 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
, bool limit
)
2559 int r
= DM_MAPIO_REMAPPED
;
2562 down_read(&_origins_lock
);
2563 o
= __lookup_origin(origin
->bdev
);
2566 struct dm_snapshot
*s
;
2568 list_for_each_entry(s
, &o
->snapshots
, list
)
2569 if (unlikely(!wait_for_in_progress(s
, true)))
2573 r
= __origin_write(&o
->snapshots
, bio
->bi_iter
.bi_sector
, bio
);
2575 up_read(&_origins_lock
);
2581 * Trigger exceptions in all non-merging snapshots.
2583 * The chunk size of the merging snapshot may be larger than the chunk
2584 * size of some other snapshot so we may need to reallocate multiple
2585 * chunks in other snapshots.
2587 * We scan all the overlapping exceptions in the other snapshots.
2588 * Returns 1 if anything was reallocated and must be waited for,
2589 * otherwise returns 0.
2591 * size must be a multiple of merging_snap's chunk_size.
2593 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2594 sector_t sector
, unsigned int size
)
2601 * The origin's __minimum_chunk_size() got stored in max_io_len
2602 * by snapshot_merge_resume().
2604 down_read(&_origins_lock
);
2605 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2606 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->max_io_len
)
2607 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2610 up_read(&_origins_lock
);
2616 * Origin: maps a linear range of a device, with hooks for snapshotting.
2620 * Construct an origin mapping: <dev_path>
2621 * The context for an origin is merely a 'struct dm_dev *'
2622 * pointing to the real device.
2624 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2627 struct dm_origin
*o
;
2630 ti
->error
= "origin: incorrect number of arguments";
2634 o
= kmalloc(sizeof(struct dm_origin
), GFP_KERNEL
);
2636 ti
->error
= "Cannot allocate private origin structure";
2641 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &o
->dev
);
2643 ti
->error
= "Cannot get target device";
2649 ti
->num_flush_bios
= 1;
2659 static void origin_dtr(struct dm_target
*ti
)
2661 struct dm_origin
*o
= ti
->private;
2663 dm_put_device(ti
, o
->dev
);
2667 static int origin_map(struct dm_target
*ti
, struct bio
*bio
)
2669 struct dm_origin
*o
= ti
->private;
2670 unsigned int available_sectors
;
2672 bio_set_dev(bio
, o
->dev
->bdev
);
2674 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
))
2675 return DM_MAPIO_REMAPPED
;
2677 if (bio_data_dir(bio
) != WRITE
)
2678 return DM_MAPIO_REMAPPED
;
2680 available_sectors
= o
->split_boundary
-
2681 ((unsigned int)bio
->bi_iter
.bi_sector
& (o
->split_boundary
- 1));
2683 if (bio_sectors(bio
) > available_sectors
)
2684 dm_accept_partial_bio(bio
, available_sectors
);
2686 /* Only tell snapshots if this is a write */
2687 return do_origin(o
->dev
, bio
, true);
2691 * Set the target "max_io_len" field to the minimum of all the snapshots'
2694 static void origin_resume(struct dm_target
*ti
)
2696 struct dm_origin
*o
= ti
->private;
2698 o
->split_boundary
= get_origin_minimum_chunksize(o
->dev
->bdev
);
2700 down_write(&_origins_lock
);
2701 __insert_dm_origin(o
);
2702 up_write(&_origins_lock
);
2705 static void origin_postsuspend(struct dm_target
*ti
)
2707 struct dm_origin
*o
= ti
->private;
2709 down_write(&_origins_lock
);
2710 __remove_dm_origin(o
);
2711 up_write(&_origins_lock
);
2714 static void origin_status(struct dm_target
*ti
, status_type_t type
,
2715 unsigned int status_flags
, char *result
, unsigned int maxlen
)
2717 struct dm_origin
*o
= ti
->private;
2720 case STATUSTYPE_INFO
:
2724 case STATUSTYPE_TABLE
:
2725 snprintf(result
, maxlen
, "%s", o
->dev
->name
);
2727 case STATUSTYPE_IMA
:
2733 static int origin_iterate_devices(struct dm_target
*ti
,
2734 iterate_devices_callout_fn fn
, void *data
)
2736 struct dm_origin
*o
= ti
->private;
2738 return fn(ti
, o
->dev
, 0, ti
->len
, data
);
2741 static struct target_type origin_target
= {
2742 .name
= "snapshot-origin",
2743 .version
= {1, 9, 0},
2744 .module
= THIS_MODULE
,
2748 .resume
= origin_resume
,
2749 .postsuspend
= origin_postsuspend
,
2750 .status
= origin_status
,
2751 .iterate_devices
= origin_iterate_devices
,
2754 static struct target_type snapshot_target
= {
2756 .version
= {1, 16, 0},
2757 .module
= THIS_MODULE
,
2758 .ctr
= snapshot_ctr
,
2759 .dtr
= snapshot_dtr
,
2760 .map
= snapshot_map
,
2761 .end_io
= snapshot_end_io
,
2762 .preresume
= snapshot_preresume
,
2763 .resume
= snapshot_resume
,
2764 .status
= snapshot_status
,
2765 .iterate_devices
= snapshot_iterate_devices
,
2766 .io_hints
= snapshot_io_hints
,
2769 static struct target_type merge_target
= {
2770 .name
= dm_snapshot_merge_target_name
,
2771 .version
= {1, 5, 0},
2772 .module
= THIS_MODULE
,
2773 .ctr
= snapshot_ctr
,
2774 .dtr
= snapshot_dtr
,
2775 .map
= snapshot_merge_map
,
2776 .end_io
= snapshot_end_io
,
2777 .presuspend
= snapshot_merge_presuspend
,
2778 .preresume
= snapshot_preresume
,
2779 .resume
= snapshot_merge_resume
,
2780 .status
= snapshot_status
,
2781 .iterate_devices
= snapshot_iterate_devices
,
2782 .io_hints
= snapshot_io_hints
,
2785 static int __init
dm_snapshot_init(void)
2789 r
= dm_exception_store_init();
2791 DMERR("Failed to initialize exception stores");
2795 r
= init_origin_hash();
2797 DMERR("init_origin_hash failed.");
2798 goto bad_origin_hash
;
2801 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2802 if (!exception_cache
) {
2803 DMERR("Couldn't create exception cache.");
2805 goto bad_exception_cache
;
2808 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2809 if (!pending_cache
) {
2810 DMERR("Couldn't create pending cache.");
2812 goto bad_pending_cache
;
2815 r
= dm_register_target(&snapshot_target
);
2817 goto bad_register_snapshot_target
;
2819 r
= dm_register_target(&origin_target
);
2821 goto bad_register_origin_target
;
2823 r
= dm_register_target(&merge_target
);
2825 goto bad_register_merge_target
;
2829 bad_register_merge_target
:
2830 dm_unregister_target(&origin_target
);
2831 bad_register_origin_target
:
2832 dm_unregister_target(&snapshot_target
);
2833 bad_register_snapshot_target
:
2834 kmem_cache_destroy(pending_cache
);
2836 kmem_cache_destroy(exception_cache
);
2837 bad_exception_cache
:
2840 dm_exception_store_exit();
2845 static void __exit
dm_snapshot_exit(void)
2847 dm_unregister_target(&snapshot_target
);
2848 dm_unregister_target(&origin_target
);
2849 dm_unregister_target(&merge_target
);
2852 kmem_cache_destroy(pending_cache
);
2853 kmem_cache_destroy(exception_cache
);
2855 dm_exception_store_exit();
2859 module_init(dm_snapshot_init
);
2860 module_exit(dm_snapshot_exit
);
2862 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2863 MODULE_AUTHOR("Joe Thornber");
2864 MODULE_LICENSE("GPL");
2865 MODULE_ALIAS("dm-snapshot-origin");
2866 MODULE_ALIAS("dm-snapshot-merge");