4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
25 #include "dm-exception-store.h"
27 #define DM_MSG_PREFIX "snapshots"
29 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
31 #define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
35 * The size of the mempool used to track chunks in use.
39 #define DM_TRACKED_CHUNK_HASH_SIZE 16
40 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
43 struct dm_exception_table
{
46 struct list_head
*table
;
50 struct rw_semaphore lock
;
52 struct dm_dev
*origin
;
57 /* List of snapshots per Origin */
58 struct list_head list
;
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
71 int snapshot_overflowed
;
73 /* Origin writes don't trigger exceptions until this is set */
76 atomic_t pending_exceptions_count
;
78 /* Protected by "lock" */
79 sector_t exception_start_sequence
;
81 /* Protected by kcopyd single-threaded callback */
82 sector_t exception_complete_sequence
;
85 * A list of pending exceptions that completed out of order.
86 * Protected by kcopyd single-threaded callback.
88 struct list_head out_of_order_list
;
90 mempool_t
*pending_pool
;
92 struct dm_exception_table pending
;
93 struct dm_exception_table complete
;
96 * pe_lock protects all pending_exception operations and access
97 * as well as the snapshot_bios list.
101 /* Chunks with outstanding reads */
102 spinlock_t tracked_chunk_lock
;
103 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
105 /* The on disk metadata handler */
106 struct dm_exception_store
*store
;
108 struct dm_kcopyd_client
*kcopyd_client
;
110 /* Wait for events based on state_bits */
111 unsigned long state_bits
;
113 /* Range of chunks currently being merged. */
114 chunk_t first_merging_chunk
;
115 int num_merging_chunks
;
118 * The merge operation failed if this flag is set.
119 * Failure modes are handled as follows:
120 * - I/O error reading the header
121 * => don't load the target; abort.
122 * - Header does not have "valid" flag set
123 * => use the origin; forget about the snapshot.
124 * - I/O error when reading exceptions
125 * => don't load the target; abort.
126 * (We can't use the intermediate origin state.)
127 * - I/O error while merging
128 * => stop merging; set merge_failed; process I/O normally.
133 * Incoming bios that overlap with chunks being merged must wait
134 * for them to be committed.
136 struct bio_list bios_queued_during_merge
;
141 * RUNNING_MERGE - Merge operation is in progress.
142 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
143 * cleared afterwards.
145 #define RUNNING_MERGE 0
146 #define SHUTDOWN_MERGE 1
148 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle
,
149 "A percentage of time allocated for copy on write");
151 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
155 EXPORT_SYMBOL(dm_snap_origin
);
157 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
161 EXPORT_SYMBOL(dm_snap_cow
);
163 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
166 return chunk
<< store
->chunk_shift
;
169 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
172 * There is only ever one instance of a particular block
173 * device so we can compare pointers safely.
178 struct dm_snap_pending_exception
{
179 struct dm_exception e
;
182 * Origin buffers waiting for this to complete are held
185 struct bio_list origin_bios
;
186 struct bio_list snapshot_bios
;
188 /* Pointer back to snapshot context */
189 struct dm_snapshot
*snap
;
192 * 1 indicates the exception has already been sent to
197 /* There was copying error. */
200 /* A sequence number, it is used for in-order completion. */
201 sector_t exception_sequence
;
203 struct list_head out_of_order_entry
;
206 * For writing a complete chunk, bypassing the copy.
208 struct bio
*full_bio
;
209 bio_end_io_t
*full_bio_end_io
;
213 * Hash table mapping origin volumes to lists of snapshots and
214 * a lock to protect it
216 static struct kmem_cache
*exception_cache
;
217 static struct kmem_cache
*pending_cache
;
219 struct dm_snap_tracked_chunk
{
220 struct hlist_node node
;
224 static void init_tracked_chunk(struct bio
*bio
)
226 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
227 INIT_HLIST_NODE(&c
->node
);
230 static bool is_bio_tracked(struct bio
*bio
)
232 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
233 return !hlist_unhashed(&c
->node
);
236 static void track_chunk(struct dm_snapshot
*s
, struct bio
*bio
, chunk_t chunk
)
238 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
242 spin_lock_irq(&s
->tracked_chunk_lock
);
243 hlist_add_head(&c
->node
,
244 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
245 spin_unlock_irq(&s
->tracked_chunk_lock
);
248 static void stop_tracking_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
250 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
253 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
255 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
258 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
260 struct dm_snap_tracked_chunk
*c
;
263 spin_lock_irq(&s
->tracked_chunk_lock
);
265 hlist_for_each_entry(c
,
266 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
267 if (c
->chunk
== chunk
) {
273 spin_unlock_irq(&s
->tracked_chunk_lock
);
279 * This conflicting I/O is extremely improbable in the caller,
280 * so msleep(1) is sufficient and there is no need for a wait queue.
282 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
284 while (__chunk_is_tracked(s
, chunk
))
289 * One of these per registered origin, held in the snapshot_origins hash
292 /* The origin device */
293 struct block_device
*bdev
;
295 struct list_head hash_list
;
297 /* List of snapshots for this origin */
298 struct list_head snapshots
;
302 * This structure is allocated for each origin target
306 struct dm_target
*ti
;
307 unsigned split_boundary
;
308 struct list_head hash_list
;
312 * Size of the hash table for origin volumes. If we make this
313 * the size of the minors list then it should be nearly perfect
315 #define ORIGIN_HASH_SIZE 256
316 #define ORIGIN_MASK 0xFF
317 static struct list_head
*_origins
;
318 static struct list_head
*_dm_origins
;
319 static struct rw_semaphore _origins_lock
;
321 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
322 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
323 static uint64_t _pending_exceptions_done_count
;
325 static int init_origin_hash(void)
329 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
332 DMERR("unable to allocate memory for _origins");
335 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
336 INIT_LIST_HEAD(_origins
+ i
);
338 _dm_origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
341 DMERR("unable to allocate memory for _dm_origins");
345 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
346 INIT_LIST_HEAD(_dm_origins
+ i
);
348 init_rwsem(&_origins_lock
);
353 static void exit_origin_hash(void)
359 static unsigned origin_hash(struct block_device
*bdev
)
361 return bdev
->bd_dev
& ORIGIN_MASK
;
364 static struct origin
*__lookup_origin(struct block_device
*origin
)
366 struct list_head
*ol
;
369 ol
= &_origins
[origin_hash(origin
)];
370 list_for_each_entry (o
, ol
, hash_list
)
371 if (bdev_equal(o
->bdev
, origin
))
377 static void __insert_origin(struct origin
*o
)
379 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
380 list_add_tail(&o
->hash_list
, sl
);
383 static struct dm_origin
*__lookup_dm_origin(struct block_device
*origin
)
385 struct list_head
*ol
;
388 ol
= &_dm_origins
[origin_hash(origin
)];
389 list_for_each_entry (o
, ol
, hash_list
)
390 if (bdev_equal(o
->dev
->bdev
, origin
))
396 static void __insert_dm_origin(struct dm_origin
*o
)
398 struct list_head
*sl
= &_dm_origins
[origin_hash(o
->dev
->bdev
)];
399 list_add_tail(&o
->hash_list
, sl
);
402 static void __remove_dm_origin(struct dm_origin
*o
)
404 list_del(&o
->hash_list
);
408 * _origins_lock must be held when calling this function.
409 * Returns number of snapshots registered using the supplied cow device, plus:
410 * snap_src - a snapshot suitable for use as a source of exception handover
411 * snap_dest - a snapshot capable of receiving exception handover.
412 * snap_merge - an existing snapshot-merge target linked to the same origin.
413 * There can be at most one snapshot-merge target. The parameter is optional.
415 * Possible return values and states of snap_src and snap_dest.
416 * 0: NULL, NULL - first new snapshot
417 * 1: snap_src, NULL - normal snapshot
418 * 2: snap_src, snap_dest - waiting for handover
419 * 2: snap_src, NULL - handed over, waiting for old to be deleted
420 * 1: NULL, snap_dest - source got destroyed without handover
422 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
423 struct dm_snapshot
**snap_src
,
424 struct dm_snapshot
**snap_dest
,
425 struct dm_snapshot
**snap_merge
)
427 struct dm_snapshot
*s
;
432 o
= __lookup_origin(snap
->origin
->bdev
);
436 list_for_each_entry(s
, &o
->snapshots
, list
) {
437 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
439 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
449 } else if (snap_dest
)
460 * On success, returns 1 if this snapshot is a handover destination,
461 * otherwise returns 0.
463 static int __validate_exception_handover(struct dm_snapshot
*snap
)
465 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
466 struct dm_snapshot
*snap_merge
= NULL
;
468 /* Does snapshot need exceptions handed over to it? */
469 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
470 &snap_merge
) == 2) ||
472 snap
->ti
->error
= "Snapshot cow pairing for exception "
473 "table handover failed";
478 * If no snap_src was found, snap cannot become a handover
485 * Non-snapshot-merge handover?
487 if (!dm_target_is_snapshot_merge(snap
->ti
))
491 * Do not allow more than one merging snapshot.
494 snap
->ti
->error
= "A snapshot is already merging.";
498 if (!snap_src
->store
->type
->prepare_merge
||
499 !snap_src
->store
->type
->commit_merge
) {
500 snap
->ti
->error
= "Snapshot exception store does not "
501 "support snapshot-merge.";
508 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
510 struct dm_snapshot
*l
;
512 /* Sort the list according to chunk size, largest-first smallest-last */
513 list_for_each_entry(l
, &o
->snapshots
, list
)
514 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
516 list_add_tail(&s
->list
, &l
->list
);
520 * Make a note of the snapshot and its origin so we can look it
521 * up when the origin has a write on it.
523 * Also validate snapshot exception store handovers.
524 * On success, returns 1 if this registration is a handover destination,
525 * otherwise returns 0.
527 static int register_snapshot(struct dm_snapshot
*snap
)
529 struct origin
*o
, *new_o
= NULL
;
530 struct block_device
*bdev
= snap
->origin
->bdev
;
533 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
537 down_write(&_origins_lock
);
539 r
= __validate_exception_handover(snap
);
545 o
= __lookup_origin(bdev
);
552 /* Initialise the struct */
553 INIT_LIST_HEAD(&o
->snapshots
);
559 __insert_snapshot(o
, snap
);
562 up_write(&_origins_lock
);
568 * Move snapshot to correct place in list according to chunk size.
570 static void reregister_snapshot(struct dm_snapshot
*s
)
572 struct block_device
*bdev
= s
->origin
->bdev
;
574 down_write(&_origins_lock
);
577 __insert_snapshot(__lookup_origin(bdev
), s
);
579 up_write(&_origins_lock
);
582 static void unregister_snapshot(struct dm_snapshot
*s
)
586 down_write(&_origins_lock
);
587 o
= __lookup_origin(s
->origin
->bdev
);
590 if (o
&& list_empty(&o
->snapshots
)) {
591 list_del(&o
->hash_list
);
595 up_write(&_origins_lock
);
599 * Implementation of the exception hash tables.
600 * The lowest hash_shift bits of the chunk number are ignored, allowing
601 * some consecutive chunks to be grouped together.
603 static int dm_exception_table_init(struct dm_exception_table
*et
,
604 uint32_t size
, unsigned hash_shift
)
608 et
->hash_shift
= hash_shift
;
609 et
->hash_mask
= size
- 1;
610 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
614 for (i
= 0; i
< size
; i
++)
615 INIT_LIST_HEAD(et
->table
+ i
);
620 static void dm_exception_table_exit(struct dm_exception_table
*et
,
621 struct kmem_cache
*mem
)
623 struct list_head
*slot
;
624 struct dm_exception
*ex
, *next
;
627 size
= et
->hash_mask
+ 1;
628 for (i
= 0; i
< size
; i
++) {
629 slot
= et
->table
+ i
;
631 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
632 kmem_cache_free(mem
, ex
);
638 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
640 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
643 static void dm_remove_exception(struct dm_exception
*e
)
645 list_del(&e
->hash_list
);
649 * Return the exception data for a sector, or NULL if not
652 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
655 struct list_head
*slot
;
656 struct dm_exception
*e
;
658 slot
= &et
->table
[exception_hash(et
, chunk
)];
659 list_for_each_entry (e
, slot
, hash_list
)
660 if (chunk
>= e
->old_chunk
&&
661 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
667 static struct dm_exception
*alloc_completed_exception(gfp_t gfp
)
669 struct dm_exception
*e
;
671 e
= kmem_cache_alloc(exception_cache
, gfp
);
672 if (!e
&& gfp
== GFP_NOIO
)
673 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
678 static void free_completed_exception(struct dm_exception
*e
)
680 kmem_cache_free(exception_cache
, e
);
683 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
685 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
688 atomic_inc(&s
->pending_exceptions_count
);
694 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
696 struct dm_snapshot
*s
= pe
->snap
;
698 mempool_free(pe
, s
->pending_pool
);
699 smp_mb__before_atomic();
700 atomic_dec(&s
->pending_exceptions_count
);
703 static void dm_insert_exception(struct dm_exception_table
*eh
,
704 struct dm_exception
*new_e
)
707 struct dm_exception
*e
= NULL
;
709 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
711 /* Add immediately if this table doesn't support consecutive chunks */
715 /* List is ordered by old_chunk */
716 list_for_each_entry_reverse(e
, l
, hash_list
) {
717 /* Insert after an existing chunk? */
718 if (new_e
->old_chunk
== (e
->old_chunk
+
719 dm_consecutive_chunk_count(e
) + 1) &&
720 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
721 dm_consecutive_chunk_count(e
) + 1)) {
722 dm_consecutive_chunk_count_inc(e
);
723 free_completed_exception(new_e
);
727 /* Insert before an existing chunk? */
728 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
729 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
730 dm_consecutive_chunk_count_inc(e
);
733 free_completed_exception(new_e
);
737 if (new_e
->old_chunk
> e
->old_chunk
)
742 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
746 * Callback used by the exception stores to load exceptions when
749 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
751 struct dm_snapshot
*s
= context
;
752 struct dm_exception
*e
;
754 e
= alloc_completed_exception(GFP_KERNEL
);
760 /* Consecutive_count is implicitly initialised to zero */
763 dm_insert_exception(&s
->complete
, e
);
769 * Return a minimum chunk size of all snapshots that have the specified origin.
770 * Return zero if the origin has no snapshots.
772 static uint32_t __minimum_chunk_size(struct origin
*o
)
774 struct dm_snapshot
*snap
;
775 unsigned chunk_size
= 0;
778 list_for_each_entry(snap
, &o
->snapshots
, list
)
779 chunk_size
= min_not_zero(chunk_size
,
780 snap
->store
->chunk_size
);
782 return (uint32_t) chunk_size
;
788 static int calc_max_buckets(void)
790 /* use a fixed size of 2MB */
791 unsigned long mem
= 2 * 1024 * 1024;
792 mem
/= sizeof(struct list_head
);
798 * Allocate room for a suitable hash table.
800 static int init_hash_tables(struct dm_snapshot
*s
)
802 sector_t hash_size
, cow_dev_size
, max_buckets
;
805 * Calculate based on the size of the original volume or
808 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
809 max_buckets
= calc_max_buckets();
811 hash_size
= cow_dev_size
>> s
->store
->chunk_shift
;
812 hash_size
= min(hash_size
, max_buckets
);
816 hash_size
= rounddown_pow_of_two(hash_size
);
817 if (dm_exception_table_init(&s
->complete
, hash_size
,
818 DM_CHUNK_CONSECUTIVE_BITS
))
822 * Allocate hash table for in-flight exceptions
823 * Make this smaller than the real hash table
829 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
830 dm_exception_table_exit(&s
->complete
, exception_cache
);
837 static void merge_shutdown(struct dm_snapshot
*s
)
839 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
840 smp_mb__after_atomic();
841 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
844 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
846 s
->first_merging_chunk
= 0;
847 s
->num_merging_chunks
= 0;
849 return bio_list_get(&s
->bios_queued_during_merge
);
853 * Remove one chunk from the index of completed exceptions.
855 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
858 struct dm_exception
*e
;
860 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
862 DMERR("Corruption detected: exception for block %llu is "
863 "on disk but not in memory",
864 (unsigned long long)old_chunk
);
869 * If this is the only chunk using this exception, remove exception.
871 if (!dm_consecutive_chunk_count(e
)) {
872 dm_remove_exception(e
);
873 free_completed_exception(e
);
878 * The chunk may be either at the beginning or the end of a
879 * group of consecutive chunks - never in the middle. We are
880 * removing chunks in the opposite order to that in which they
881 * were added, so this should always be true.
882 * Decrement the consecutive chunk counter and adjust the
883 * starting point if necessary.
885 if (old_chunk
== e
->old_chunk
) {
888 } else if (old_chunk
!= e
->old_chunk
+
889 dm_consecutive_chunk_count(e
)) {
890 DMERR("Attempt to merge block %llu from the "
891 "middle of a chunk range [%llu - %llu]",
892 (unsigned long long)old_chunk
,
893 (unsigned long long)e
->old_chunk
,
895 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
899 dm_consecutive_chunk_count_dec(e
);
904 static void flush_bios(struct bio
*bio
);
906 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
908 struct bio
*b
= NULL
;
910 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
912 down_write(&s
->lock
);
915 * Process chunks (and associated exceptions) in reverse order
916 * so that dm_consecutive_chunk_count_dec() accounting works.
919 r
= __remove_single_exception_chunk(s
, old_chunk
);
922 } while (old_chunk
-- > s
->first_merging_chunk
);
924 b
= __release_queued_bios_after_merge(s
);
934 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
935 sector_t sector
, unsigned chunk_size
);
937 static void merge_callback(int read_err
, unsigned long write_err
,
940 static uint64_t read_pending_exceptions_done_count(void)
942 uint64_t pending_exceptions_done
;
944 spin_lock(&_pending_exceptions_done_spinlock
);
945 pending_exceptions_done
= _pending_exceptions_done_count
;
946 spin_unlock(&_pending_exceptions_done_spinlock
);
948 return pending_exceptions_done
;
951 static void increment_pending_exceptions_done_count(void)
953 spin_lock(&_pending_exceptions_done_spinlock
);
954 _pending_exceptions_done_count
++;
955 spin_unlock(&_pending_exceptions_done_spinlock
);
957 wake_up_all(&_pending_exceptions_done
);
960 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
962 int i
, linear_chunks
;
963 chunk_t old_chunk
, new_chunk
;
964 struct dm_io_region src
, dest
;
966 uint64_t previous_count
;
968 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
969 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
973 * valid flag never changes during merge, so no lock required.
976 DMERR("Snapshot is invalid: can't merge");
980 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
982 if (linear_chunks
<= 0) {
983 if (linear_chunks
< 0) {
984 DMERR("Read error in exception store: "
985 "shutting down merge");
986 down_write(&s
->lock
);
993 /* Adjust old_chunk and new_chunk to reflect start of linear region */
994 old_chunk
= old_chunk
+ 1 - linear_chunks
;
995 new_chunk
= new_chunk
+ 1 - linear_chunks
;
998 * Use one (potentially large) I/O to copy all 'linear_chunks'
999 * from the exception store to the origin
1001 io_size
= linear_chunks
* s
->store
->chunk_size
;
1003 dest
.bdev
= s
->origin
->bdev
;
1004 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
1005 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
1007 src
.bdev
= s
->cow
->bdev
;
1008 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
1009 src
.count
= dest
.count
;
1012 * Reallocate any exceptions needed in other snapshots then
1013 * wait for the pending exceptions to complete.
1014 * Each time any pending exception (globally on the system)
1015 * completes we are woken and repeat the process to find out
1016 * if we can proceed. While this may not seem a particularly
1017 * efficient algorithm, it is not expected to have any
1018 * significant impact on performance.
1020 previous_count
= read_pending_exceptions_done_count();
1021 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
1022 wait_event(_pending_exceptions_done
,
1023 (read_pending_exceptions_done_count() !=
1025 /* Retry after the wait, until all exceptions are done. */
1026 previous_count
= read_pending_exceptions_done_count();
1029 down_write(&s
->lock
);
1030 s
->first_merging_chunk
= old_chunk
;
1031 s
->num_merging_chunks
= linear_chunks
;
1034 /* Wait until writes to all 'linear_chunks' drain */
1035 for (i
= 0; i
< linear_chunks
; i
++)
1036 __check_for_conflicting_io(s
, old_chunk
+ i
);
1038 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
1045 static void error_bios(struct bio
*bio
);
1047 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
1049 struct dm_snapshot
*s
= context
;
1050 struct bio
*b
= NULL
;
1052 if (read_err
|| write_err
) {
1054 DMERR("Read error: shutting down merge.");
1056 DMERR("Write error: shutting down merge.");
1060 if (s
->store
->type
->commit_merge(s
->store
,
1061 s
->num_merging_chunks
) < 0) {
1062 DMERR("Write error in exception store: shutting down merge");
1066 if (remove_single_exception_chunk(s
) < 0)
1069 snapshot_merge_next_chunks(s
);
1074 down_write(&s
->lock
);
1075 s
->merge_failed
= 1;
1076 b
= __release_queued_bios_after_merge(s
);
1083 static void start_merge(struct dm_snapshot
*s
)
1085 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1086 snapshot_merge_next_chunks(s
);
1090 * Stop the merging process and wait until it finishes.
1092 static void stop_merge(struct dm_snapshot
*s
)
1094 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1095 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, TASK_UNINTERRUPTIBLE
);
1096 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1100 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
1102 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1104 struct dm_snapshot
*s
;
1107 char *origin_path
, *cow_path
;
1108 dev_t origin_dev
, cow_dev
;
1109 unsigned args_used
, num_flush_bios
= 1;
1110 fmode_t origin_mode
= FMODE_READ
;
1113 ti
->error
= "requires exactly 4 arguments";
1118 if (dm_target_is_snapshot_merge(ti
)) {
1120 origin_mode
= FMODE_WRITE
;
1123 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1125 ti
->error
= "Cannot allocate private snapshot structure";
1130 origin_path
= argv
[0];
1134 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1136 ti
->error
= "Cannot get origin device";
1139 origin_dev
= s
->origin
->bdev
->bd_dev
;
1145 cow_dev
= dm_get_dev_t(cow_path
);
1146 if (cow_dev
&& cow_dev
== origin_dev
) {
1147 ti
->error
= "COW device cannot be the same as origin device";
1152 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1154 ti
->error
= "Cannot get COW device";
1158 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1160 ti
->error
= "Couldn't create exception store";
1170 s
->snapshot_overflowed
= 0;
1172 atomic_set(&s
->pending_exceptions_count
, 0);
1173 s
->exception_start_sequence
= 0;
1174 s
->exception_complete_sequence
= 0;
1175 INIT_LIST_HEAD(&s
->out_of_order_list
);
1176 init_rwsem(&s
->lock
);
1177 INIT_LIST_HEAD(&s
->list
);
1178 spin_lock_init(&s
->pe_lock
);
1180 s
->merge_failed
= 0;
1181 s
->first_merging_chunk
= 0;
1182 s
->num_merging_chunks
= 0;
1183 bio_list_init(&s
->bios_queued_during_merge
);
1185 /* Allocate hash table for COW data */
1186 if (init_hash_tables(s
)) {
1187 ti
->error
= "Unable to allocate hash table space";
1189 goto bad_hash_tables
;
1192 s
->kcopyd_client
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
1193 if (IS_ERR(s
->kcopyd_client
)) {
1194 r
= PTR_ERR(s
->kcopyd_client
);
1195 ti
->error
= "Could not create kcopyd client";
1199 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1200 if (!s
->pending_pool
) {
1201 ti
->error
= "Could not allocate mempool for pending exceptions";
1203 goto bad_pending_pool
;
1206 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1207 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1209 spin_lock_init(&s
->tracked_chunk_lock
);
1212 ti
->num_flush_bios
= num_flush_bios
;
1213 ti
->per_io_data_size
= sizeof(struct dm_snap_tracked_chunk
);
1215 /* Add snapshot to the list of snapshots for this origin */
1216 /* Exceptions aren't triggered till snapshot_resume() is called */
1217 r
= register_snapshot(s
);
1219 ti
->error
= "Snapshot origin struct allocation failed";
1220 goto bad_load_and_register
;
1222 /* invalid handover, register_snapshot has set ti->error */
1223 goto bad_load_and_register
;
1227 * Metadata must only be loaded into one table at once, so skip this
1228 * if metadata will be handed over during resume.
1229 * Chunk size will be set during the handover - set it to zero to
1230 * ensure it's ignored.
1233 s
->store
->chunk_size
= 0;
1237 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1240 ti
->error
= "Failed to read snapshot metadata";
1241 goto bad_read_metadata
;
1244 DMWARN("Snapshot is marked invalid.");
1247 if (!s
->store
->chunk_size
) {
1248 ti
->error
= "Chunk size not set";
1249 goto bad_read_metadata
;
1252 r
= dm_set_target_max_io_len(ti
, s
->store
->chunk_size
);
1254 goto bad_read_metadata
;
1259 unregister_snapshot(s
);
1261 bad_load_and_register
:
1262 mempool_destroy(s
->pending_pool
);
1265 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1268 dm_exception_table_exit(&s
->pending
, pending_cache
);
1269 dm_exception_table_exit(&s
->complete
, exception_cache
);
1272 dm_exception_store_destroy(s
->store
);
1275 dm_put_device(ti
, s
->cow
);
1278 dm_put_device(ti
, s
->origin
);
1287 static void __free_exceptions(struct dm_snapshot
*s
)
1289 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1290 s
->kcopyd_client
= NULL
;
1292 dm_exception_table_exit(&s
->pending
, pending_cache
);
1293 dm_exception_table_exit(&s
->complete
, exception_cache
);
1296 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1297 struct dm_snapshot
*snap_dest
)
1300 struct dm_exception_table table_swap
;
1301 struct dm_exception_store
*store_swap
;
1305 * Swap all snapshot context information between the two instances.
1307 u
.table_swap
= snap_dest
->complete
;
1308 snap_dest
->complete
= snap_src
->complete
;
1309 snap_src
->complete
= u
.table_swap
;
1311 u
.store_swap
= snap_dest
->store
;
1312 snap_dest
->store
= snap_src
->store
;
1313 snap_dest
->store
->userspace_supports_overflow
= u
.store_swap
->userspace_supports_overflow
;
1314 snap_src
->store
= u
.store_swap
;
1316 snap_dest
->store
->snap
= snap_dest
;
1317 snap_src
->store
->snap
= snap_src
;
1319 snap_dest
->ti
->max_io_len
= snap_dest
->store
->chunk_size
;
1320 snap_dest
->valid
= snap_src
->valid
;
1321 snap_dest
->snapshot_overflowed
= snap_src
->snapshot_overflowed
;
1324 * Set source invalid to ensure it receives no further I/O.
1326 snap_src
->valid
= 0;
1329 static void snapshot_dtr(struct dm_target
*ti
)
1331 #ifdef CONFIG_DM_DEBUG
1334 struct dm_snapshot
*s
= ti
->private;
1335 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1337 down_read(&_origins_lock
);
1338 /* Check whether exception handover must be cancelled */
1339 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1340 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1341 down_write(&snap_dest
->lock
);
1342 snap_dest
->valid
= 0;
1343 up_write(&snap_dest
->lock
);
1344 DMERR("Cancelling snapshot handover.");
1346 up_read(&_origins_lock
);
1348 if (dm_target_is_snapshot_merge(ti
))
1351 /* Prevent further origin writes from using this snapshot. */
1352 /* After this returns there can be no new kcopyd jobs. */
1353 unregister_snapshot(s
);
1355 while (atomic_read(&s
->pending_exceptions_count
))
1358 * Ensure instructions in mempool_destroy aren't reordered
1359 * before atomic_read.
1363 #ifdef CONFIG_DM_DEBUG
1364 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1365 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1368 __free_exceptions(s
);
1370 mempool_destroy(s
->pending_pool
);
1372 dm_exception_store_destroy(s
->store
);
1374 dm_put_device(ti
, s
->cow
);
1376 dm_put_device(ti
, s
->origin
);
1382 * Flush a list of buffers.
1384 static void flush_bios(struct bio
*bio
)
1390 bio
->bi_next
= NULL
;
1391 generic_make_request(bio
);
1396 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1399 * Flush a list of buffers.
1401 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1408 bio
->bi_next
= NULL
;
1409 r
= do_origin(s
->origin
, bio
);
1410 if (r
== DM_MAPIO_REMAPPED
)
1411 generic_make_request(bio
);
1417 * Error a list of buffers.
1419 static void error_bios(struct bio
*bio
)
1425 bio
->bi_next
= NULL
;
1431 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1437 DMERR("Invalidating snapshot: Error reading/writing.");
1438 else if (err
== -ENOMEM
)
1439 DMERR("Invalidating snapshot: Unable to allocate exception.");
1441 if (s
->store
->type
->drop_snapshot
)
1442 s
->store
->type
->drop_snapshot(s
->store
);
1446 dm_table_event(s
->ti
->table
);
1449 static void pending_complete(void *context
, int success
)
1451 struct dm_snap_pending_exception
*pe
= context
;
1452 struct dm_exception
*e
;
1453 struct dm_snapshot
*s
= pe
->snap
;
1454 struct bio
*origin_bios
= NULL
;
1455 struct bio
*snapshot_bios
= NULL
;
1456 struct bio
*full_bio
= NULL
;
1460 /* Read/write error - snapshot is unusable */
1461 down_write(&s
->lock
);
1462 __invalidate_snapshot(s
, -EIO
);
1467 e
= alloc_completed_exception(GFP_NOIO
);
1469 down_write(&s
->lock
);
1470 __invalidate_snapshot(s
, -ENOMEM
);
1476 down_write(&s
->lock
);
1478 free_completed_exception(e
);
1483 /* Check for conflicting reads */
1484 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1487 * Add a proper exception, and remove the
1488 * in-flight exception from the list.
1490 dm_insert_exception(&s
->complete
, e
);
1493 dm_remove_exception(&pe
->e
);
1494 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1495 origin_bios
= bio_list_get(&pe
->origin_bios
);
1496 full_bio
= pe
->full_bio
;
1498 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1499 increment_pending_exceptions_done_count();
1503 /* Submit any pending write bios */
1506 bio_io_error(full_bio
);
1507 error_bios(snapshot_bios
);
1510 bio_endio(full_bio
);
1511 flush_bios(snapshot_bios
);
1514 retry_origin_bios(s
, origin_bios
);
1516 free_pending_exception(pe
);
1519 static void complete_exception(struct dm_snap_pending_exception
*pe
)
1521 struct dm_snapshot
*s
= pe
->snap
;
1523 /* Update the metadata if we are persistent */
1524 s
->store
->type
->commit_exception(s
->store
, &pe
->e
, !pe
->copy_error
,
1525 pending_complete
, pe
);
1529 * Called when the copy I/O has finished. kcopyd actually runs
1530 * this code so don't block.
1532 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1534 struct dm_snap_pending_exception
*pe
= context
;
1535 struct dm_snapshot
*s
= pe
->snap
;
1537 pe
->copy_error
= read_err
|| write_err
;
1539 if (pe
->exception_sequence
== s
->exception_complete_sequence
) {
1540 s
->exception_complete_sequence
++;
1541 complete_exception(pe
);
1543 while (!list_empty(&s
->out_of_order_list
)) {
1544 pe
= list_entry(s
->out_of_order_list
.next
,
1545 struct dm_snap_pending_exception
, out_of_order_entry
);
1546 if (pe
->exception_sequence
!= s
->exception_complete_sequence
)
1548 s
->exception_complete_sequence
++;
1549 list_del(&pe
->out_of_order_entry
);
1550 complete_exception(pe
);
1553 struct list_head
*lh
;
1554 struct dm_snap_pending_exception
*pe2
;
1556 list_for_each_prev(lh
, &s
->out_of_order_list
) {
1557 pe2
= list_entry(lh
, struct dm_snap_pending_exception
, out_of_order_entry
);
1558 if (pe2
->exception_sequence
< pe
->exception_sequence
)
1561 list_add(&pe
->out_of_order_entry
, lh
);
1566 * Dispatches the copy operation to kcopyd.
1568 static void start_copy(struct dm_snap_pending_exception
*pe
)
1570 struct dm_snapshot
*s
= pe
->snap
;
1571 struct dm_io_region src
, dest
;
1572 struct block_device
*bdev
= s
->origin
->bdev
;
1575 dev_size
= get_dev_size(bdev
);
1578 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1579 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1581 dest
.bdev
= s
->cow
->bdev
;
1582 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1583 dest
.count
= src
.count
;
1585 /* Hand over to kcopyd */
1586 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1589 static void full_bio_end_io(struct bio
*bio
)
1591 void *callback_data
= bio
->bi_private
;
1593 dm_kcopyd_do_callback(callback_data
, 0, bio
->bi_error
? 1 : 0);
1596 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1599 struct dm_snapshot
*s
= pe
->snap
;
1600 void *callback_data
;
1603 pe
->full_bio_end_io
= bio
->bi_end_io
;
1605 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1608 bio
->bi_end_io
= full_bio_end_io
;
1609 bio
->bi_private
= callback_data
;
1611 generic_make_request(bio
);
1614 static struct dm_snap_pending_exception
*
1615 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1617 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1622 return container_of(e
, struct dm_snap_pending_exception
, e
);
1626 * Looks to see if this snapshot already has a pending exception
1627 * for this chunk, otherwise it allocates a new one and inserts
1628 * it into the pending table.
1630 * NOTE: a write lock must be held on snap->lock before calling
1633 static struct dm_snap_pending_exception
*
1634 __find_pending_exception(struct dm_snapshot
*s
,
1635 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1637 struct dm_snap_pending_exception
*pe2
;
1639 pe2
= __lookup_pending_exception(s
, chunk
);
1641 free_pending_exception(pe
);
1645 pe
->e
.old_chunk
= chunk
;
1646 bio_list_init(&pe
->origin_bios
);
1647 bio_list_init(&pe
->snapshot_bios
);
1649 pe
->full_bio
= NULL
;
1651 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1652 free_pending_exception(pe
);
1656 pe
->exception_sequence
= s
->exception_start_sequence
++;
1658 dm_insert_exception(&s
->pending
, &pe
->e
);
1663 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1664 struct bio
*bio
, chunk_t chunk
)
1666 bio
->bi_bdev
= s
->cow
->bdev
;
1667 bio
->bi_iter
.bi_sector
=
1668 chunk_to_sector(s
->store
, dm_chunk_number(e
->new_chunk
) +
1669 (chunk
- e
->old_chunk
)) +
1670 (bio
->bi_iter
.bi_sector
& s
->store
->chunk_mask
);
1673 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
)
1675 struct dm_exception
*e
;
1676 struct dm_snapshot
*s
= ti
->private;
1677 int r
= DM_MAPIO_REMAPPED
;
1679 struct dm_snap_pending_exception
*pe
= NULL
;
1681 init_tracked_chunk(bio
);
1683 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1684 bio
->bi_bdev
= s
->cow
->bdev
;
1685 return DM_MAPIO_REMAPPED
;
1688 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
1690 /* Full snapshots are not usable */
1691 /* To get here the table must be live so s->active is always set. */
1695 /* FIXME: should only take write lock if we need
1696 * to copy an exception */
1697 down_write(&s
->lock
);
1699 if (!s
->valid
|| (unlikely(s
->snapshot_overflowed
) &&
1700 bio_data_dir(bio
) == WRITE
)) {
1705 /* If the block is already remapped - use that, else remap it */
1706 e
= dm_lookup_exception(&s
->complete
, chunk
);
1708 remap_exception(s
, e
, bio
, chunk
);
1713 * Write to snapshot - higher level takes care of RW/RO
1714 * flags so we should only get this if we are
1717 if (bio_data_dir(bio
) == WRITE
) {
1718 pe
= __lookup_pending_exception(s
, chunk
);
1721 pe
= alloc_pending_exception(s
);
1722 down_write(&s
->lock
);
1724 if (!s
->valid
|| s
->snapshot_overflowed
) {
1725 free_pending_exception(pe
);
1730 e
= dm_lookup_exception(&s
->complete
, chunk
);
1732 free_pending_exception(pe
);
1733 remap_exception(s
, e
, bio
, chunk
);
1737 pe
= __find_pending_exception(s
, pe
, chunk
);
1739 if (s
->store
->userspace_supports_overflow
) {
1740 s
->snapshot_overflowed
= 1;
1741 DMERR("Snapshot overflowed: Unable to allocate exception.");
1743 __invalidate_snapshot(s
, -ENOMEM
);
1749 remap_exception(s
, &pe
->e
, bio
, chunk
);
1751 r
= DM_MAPIO_SUBMITTED
;
1754 bio
->bi_iter
.bi_size
==
1755 (s
->store
->chunk_size
<< SECTOR_SHIFT
)) {
1758 start_full_bio(pe
, bio
);
1762 bio_list_add(&pe
->snapshot_bios
, bio
);
1765 /* this is protected by snap->lock */
1772 bio
->bi_bdev
= s
->origin
->bdev
;
1773 track_chunk(s
, bio
, chunk
);
1783 * A snapshot-merge target behaves like a combination of a snapshot
1784 * target and a snapshot-origin target. It only generates new
1785 * exceptions in other snapshots and not in the one that is being
1788 * For each chunk, if there is an existing exception, it is used to
1789 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1790 * which in turn might generate exceptions in other snapshots.
1791 * If merging is currently taking place on the chunk in question, the
1792 * I/O is deferred by adding it to s->bios_queued_during_merge.
1794 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
)
1796 struct dm_exception
*e
;
1797 struct dm_snapshot
*s
= ti
->private;
1798 int r
= DM_MAPIO_REMAPPED
;
1801 init_tracked_chunk(bio
);
1803 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1804 if (!dm_bio_get_target_bio_nr(bio
))
1805 bio
->bi_bdev
= s
->origin
->bdev
;
1807 bio
->bi_bdev
= s
->cow
->bdev
;
1808 return DM_MAPIO_REMAPPED
;
1811 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
1813 down_write(&s
->lock
);
1815 /* Full merging snapshots are redirected to the origin */
1817 goto redirect_to_origin
;
1819 /* If the block is already remapped - use that */
1820 e
= dm_lookup_exception(&s
->complete
, chunk
);
1822 /* Queue writes overlapping with chunks being merged */
1823 if (bio_data_dir(bio
) == WRITE
&&
1824 chunk
>= s
->first_merging_chunk
&&
1825 chunk
< (s
->first_merging_chunk
+
1826 s
->num_merging_chunks
)) {
1827 bio
->bi_bdev
= s
->origin
->bdev
;
1828 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1829 r
= DM_MAPIO_SUBMITTED
;
1833 remap_exception(s
, e
, bio
, chunk
);
1835 if (bio_data_dir(bio
) == WRITE
)
1836 track_chunk(s
, bio
, chunk
);
1841 bio
->bi_bdev
= s
->origin
->bdev
;
1843 if (bio_data_dir(bio
) == WRITE
) {
1845 return do_origin(s
->origin
, bio
);
1854 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
, int error
)
1856 struct dm_snapshot
*s
= ti
->private;
1858 if (is_bio_tracked(bio
))
1859 stop_tracking_chunk(s
, bio
);
1864 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1866 struct dm_snapshot
*s
= ti
->private;
1871 static int snapshot_preresume(struct dm_target
*ti
)
1874 struct dm_snapshot
*s
= ti
->private;
1875 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1877 down_read(&_origins_lock
);
1878 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1879 if (snap_src
&& snap_dest
) {
1880 down_read(&snap_src
->lock
);
1881 if (s
== snap_src
) {
1882 DMERR("Unable to resume snapshot source until "
1883 "handover completes.");
1885 } else if (!dm_suspended(snap_src
->ti
)) {
1886 DMERR("Unable to perform snapshot handover until "
1887 "source is suspended.");
1890 up_read(&snap_src
->lock
);
1892 up_read(&_origins_lock
);
1897 static void snapshot_resume(struct dm_target
*ti
)
1899 struct dm_snapshot
*s
= ti
->private;
1900 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
, *snap_merging
= NULL
;
1901 struct dm_origin
*o
;
1902 struct mapped_device
*origin_md
= NULL
;
1903 bool must_restart_merging
= false;
1905 down_read(&_origins_lock
);
1907 o
= __lookup_dm_origin(s
->origin
->bdev
);
1909 origin_md
= dm_table_get_md(o
->ti
->table
);
1911 (void) __find_snapshots_sharing_cow(s
, NULL
, NULL
, &snap_merging
);
1913 origin_md
= dm_table_get_md(snap_merging
->ti
->table
);
1915 if (origin_md
== dm_table_get_md(ti
->table
))
1918 if (dm_hold(origin_md
))
1922 up_read(&_origins_lock
);
1925 dm_internal_suspend_fast(origin_md
);
1926 if (snap_merging
&& test_bit(RUNNING_MERGE
, &snap_merging
->state_bits
)) {
1927 must_restart_merging
= true;
1928 stop_merge(snap_merging
);
1932 down_read(&_origins_lock
);
1934 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1935 if (snap_src
&& snap_dest
) {
1936 down_write(&snap_src
->lock
);
1937 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1938 __handover_exceptions(snap_src
, snap_dest
);
1939 up_write(&snap_dest
->lock
);
1940 up_write(&snap_src
->lock
);
1943 up_read(&_origins_lock
);
1946 if (must_restart_merging
)
1947 start_merge(snap_merging
);
1948 dm_internal_resume_fast(origin_md
);
1952 /* Now we have correct chunk size, reregister */
1953 reregister_snapshot(s
);
1955 down_write(&s
->lock
);
1960 static uint32_t get_origin_minimum_chunksize(struct block_device
*bdev
)
1962 uint32_t min_chunksize
;
1964 down_read(&_origins_lock
);
1965 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1966 up_read(&_origins_lock
);
1968 return min_chunksize
;
1971 static void snapshot_merge_resume(struct dm_target
*ti
)
1973 struct dm_snapshot
*s
= ti
->private;
1976 * Handover exceptions from existing snapshot.
1978 snapshot_resume(ti
);
1981 * snapshot-merge acts as an origin, so set ti->max_io_len
1983 ti
->max_io_len
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1988 static void snapshot_status(struct dm_target
*ti
, status_type_t type
,
1989 unsigned status_flags
, char *result
, unsigned maxlen
)
1992 struct dm_snapshot
*snap
= ti
->private;
1995 case STATUSTYPE_INFO
:
1997 down_write(&snap
->lock
);
2001 else if (snap
->merge_failed
)
2002 DMEMIT("Merge failed");
2003 else if (snap
->snapshot_overflowed
)
2006 if (snap
->store
->type
->usage
) {
2007 sector_t total_sectors
, sectors_allocated
,
2009 snap
->store
->type
->usage(snap
->store
,
2013 DMEMIT("%llu/%llu %llu",
2014 (unsigned long long)sectors_allocated
,
2015 (unsigned long long)total_sectors
,
2016 (unsigned long long)metadata_sectors
);
2022 up_write(&snap
->lock
);
2026 case STATUSTYPE_TABLE
:
2028 * kdevname returns a static pointer so we need
2029 * to make private copies if the output is to
2032 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
2033 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
2039 static int snapshot_iterate_devices(struct dm_target
*ti
,
2040 iterate_devices_callout_fn fn
, void *data
)
2042 struct dm_snapshot
*snap
= ti
->private;
2045 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
2048 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
2054 /*-----------------------------------------------------------------
2056 *---------------------------------------------------------------*/
2059 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2060 * supplied bio was ignored. The caller may submit it immediately.
2061 * (No remapping actually occurs as the origin is always a direct linear
2064 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2065 * and any supplied bio is added to a list to be submitted once all
2066 * the necessary exceptions exist.
2068 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
2071 int r
= DM_MAPIO_REMAPPED
;
2072 struct dm_snapshot
*snap
;
2073 struct dm_exception
*e
;
2074 struct dm_snap_pending_exception
*pe
;
2075 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
2076 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
2079 /* Do all the snapshots on this origin */
2080 list_for_each_entry (snap
, snapshots
, list
) {
2082 * Don't make new exceptions in a merging snapshot
2083 * because it has effectively been deleted
2085 if (dm_target_is_snapshot_merge(snap
->ti
))
2088 down_write(&snap
->lock
);
2090 /* Only deal with valid and active snapshots */
2091 if (!snap
->valid
|| !snap
->active
)
2094 /* Nothing to do if writing beyond end of snapshot */
2095 if (sector
>= dm_table_get_size(snap
->ti
->table
))
2099 * Remember, different snapshots can have
2100 * different chunk sizes.
2102 chunk
= sector_to_chunk(snap
->store
, sector
);
2105 * Check exception table to see if block
2106 * is already remapped in this snapshot
2107 * and trigger an exception if not.
2109 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2113 pe
= __lookup_pending_exception(snap
, chunk
);
2115 up_write(&snap
->lock
);
2116 pe
= alloc_pending_exception(snap
);
2117 down_write(&snap
->lock
);
2120 free_pending_exception(pe
);
2124 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2126 free_pending_exception(pe
);
2130 pe
= __find_pending_exception(snap
, pe
, chunk
);
2132 __invalidate_snapshot(snap
, -ENOMEM
);
2137 r
= DM_MAPIO_SUBMITTED
;
2140 * If an origin bio was supplied, queue it to wait for the
2141 * completion of this exception, and start this one last,
2142 * at the end of the function.
2145 bio_list_add(&pe
->origin_bios
, bio
);
2150 pe_to_start_last
= pe
;
2156 pe_to_start_now
= pe
;
2160 up_write(&snap
->lock
);
2162 if (pe_to_start_now
) {
2163 start_copy(pe_to_start_now
);
2164 pe_to_start_now
= NULL
;
2169 * Submit the exception against which the bio is queued last,
2170 * to give the other exceptions a head start.
2172 if (pe_to_start_last
)
2173 start_copy(pe_to_start_last
);
2179 * Called on a write from the origin driver.
2181 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2184 int r
= DM_MAPIO_REMAPPED
;
2186 down_read(&_origins_lock
);
2187 o
= __lookup_origin(origin
->bdev
);
2189 r
= __origin_write(&o
->snapshots
, bio
->bi_iter
.bi_sector
, bio
);
2190 up_read(&_origins_lock
);
2196 * Trigger exceptions in all non-merging snapshots.
2198 * The chunk size of the merging snapshot may be larger than the chunk
2199 * size of some other snapshot so we may need to reallocate multiple
2200 * chunks in other snapshots.
2202 * We scan all the overlapping exceptions in the other snapshots.
2203 * Returns 1 if anything was reallocated and must be waited for,
2204 * otherwise returns 0.
2206 * size must be a multiple of merging_snap's chunk_size.
2208 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2209 sector_t sector
, unsigned size
)
2216 * The origin's __minimum_chunk_size() got stored in max_io_len
2217 * by snapshot_merge_resume().
2219 down_read(&_origins_lock
);
2220 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2221 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->max_io_len
)
2222 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2225 up_read(&_origins_lock
);
2231 * Origin: maps a linear range of a device, with hooks for snapshotting.
2235 * Construct an origin mapping: <dev_path>
2236 * The context for an origin is merely a 'struct dm_dev *'
2237 * pointing to the real device.
2239 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2242 struct dm_origin
*o
;
2245 ti
->error
= "origin: incorrect number of arguments";
2249 o
= kmalloc(sizeof(struct dm_origin
), GFP_KERNEL
);
2251 ti
->error
= "Cannot allocate private origin structure";
2256 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &o
->dev
);
2258 ti
->error
= "Cannot get target device";
2264 ti
->num_flush_bios
= 1;
2274 static void origin_dtr(struct dm_target
*ti
)
2276 struct dm_origin
*o
= ti
->private;
2278 dm_put_device(ti
, o
->dev
);
2282 static int origin_map(struct dm_target
*ti
, struct bio
*bio
)
2284 struct dm_origin
*o
= ti
->private;
2285 unsigned available_sectors
;
2287 bio
->bi_bdev
= o
->dev
->bdev
;
2289 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
))
2290 return DM_MAPIO_REMAPPED
;
2292 if (bio_data_dir(bio
) != WRITE
)
2293 return DM_MAPIO_REMAPPED
;
2295 available_sectors
= o
->split_boundary
-
2296 ((unsigned)bio
->bi_iter
.bi_sector
& (o
->split_boundary
- 1));
2298 if (bio_sectors(bio
) > available_sectors
)
2299 dm_accept_partial_bio(bio
, available_sectors
);
2301 /* Only tell snapshots if this is a write */
2302 return do_origin(o
->dev
, bio
);
2305 static long origin_direct_access(struct dm_target
*ti
, sector_t sector
,
2306 void **kaddr
, pfn_t
*pfn
, long size
)
2308 DMWARN("device does not support dax.");
2313 * Set the target "max_io_len" field to the minimum of all the snapshots'
2316 static void origin_resume(struct dm_target
*ti
)
2318 struct dm_origin
*o
= ti
->private;
2320 o
->split_boundary
= get_origin_minimum_chunksize(o
->dev
->bdev
);
2322 down_write(&_origins_lock
);
2323 __insert_dm_origin(o
);
2324 up_write(&_origins_lock
);
2327 static void origin_postsuspend(struct dm_target
*ti
)
2329 struct dm_origin
*o
= ti
->private;
2331 down_write(&_origins_lock
);
2332 __remove_dm_origin(o
);
2333 up_write(&_origins_lock
);
2336 static void origin_status(struct dm_target
*ti
, status_type_t type
,
2337 unsigned status_flags
, char *result
, unsigned maxlen
)
2339 struct dm_origin
*o
= ti
->private;
2342 case STATUSTYPE_INFO
:
2346 case STATUSTYPE_TABLE
:
2347 snprintf(result
, maxlen
, "%s", o
->dev
->name
);
2352 static int origin_iterate_devices(struct dm_target
*ti
,
2353 iterate_devices_callout_fn fn
, void *data
)
2355 struct dm_origin
*o
= ti
->private;
2357 return fn(ti
, o
->dev
, 0, ti
->len
, data
);
2360 static struct target_type origin_target
= {
2361 .name
= "snapshot-origin",
2362 .version
= {1, 9, 0},
2363 .module
= THIS_MODULE
,
2367 .resume
= origin_resume
,
2368 .postsuspend
= origin_postsuspend
,
2369 .status
= origin_status
,
2370 .iterate_devices
= origin_iterate_devices
,
2371 .direct_access
= origin_direct_access
,
2374 static struct target_type snapshot_target
= {
2376 .version
= {1, 15, 0},
2377 .module
= THIS_MODULE
,
2378 .ctr
= snapshot_ctr
,
2379 .dtr
= snapshot_dtr
,
2380 .map
= snapshot_map
,
2381 .end_io
= snapshot_end_io
,
2382 .preresume
= snapshot_preresume
,
2383 .resume
= snapshot_resume
,
2384 .status
= snapshot_status
,
2385 .iterate_devices
= snapshot_iterate_devices
,
2388 static struct target_type merge_target
= {
2389 .name
= dm_snapshot_merge_target_name
,
2390 .version
= {1, 4, 0},
2391 .module
= THIS_MODULE
,
2392 .ctr
= snapshot_ctr
,
2393 .dtr
= snapshot_dtr
,
2394 .map
= snapshot_merge_map
,
2395 .end_io
= snapshot_end_io
,
2396 .presuspend
= snapshot_merge_presuspend
,
2397 .preresume
= snapshot_preresume
,
2398 .resume
= snapshot_merge_resume
,
2399 .status
= snapshot_status
,
2400 .iterate_devices
= snapshot_iterate_devices
,
2403 static int __init
dm_snapshot_init(void)
2407 r
= dm_exception_store_init();
2409 DMERR("Failed to initialize exception stores");
2413 r
= dm_register_target(&snapshot_target
);
2415 DMERR("snapshot target register failed %d", r
);
2416 goto bad_register_snapshot_target
;
2419 r
= dm_register_target(&origin_target
);
2421 DMERR("Origin target register failed %d", r
);
2422 goto bad_register_origin_target
;
2425 r
= dm_register_target(&merge_target
);
2427 DMERR("Merge target register failed %d", r
);
2428 goto bad_register_merge_target
;
2431 r
= init_origin_hash();
2433 DMERR("init_origin_hash failed.");
2434 goto bad_origin_hash
;
2437 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2438 if (!exception_cache
) {
2439 DMERR("Couldn't create exception cache.");
2441 goto bad_exception_cache
;
2444 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2445 if (!pending_cache
) {
2446 DMERR("Couldn't create pending cache.");
2448 goto bad_pending_cache
;
2454 kmem_cache_destroy(exception_cache
);
2455 bad_exception_cache
:
2458 dm_unregister_target(&merge_target
);
2459 bad_register_merge_target
:
2460 dm_unregister_target(&origin_target
);
2461 bad_register_origin_target
:
2462 dm_unregister_target(&snapshot_target
);
2463 bad_register_snapshot_target
:
2464 dm_exception_store_exit();
2469 static void __exit
dm_snapshot_exit(void)
2471 dm_unregister_target(&snapshot_target
);
2472 dm_unregister_target(&origin_target
);
2473 dm_unregister_target(&merge_target
);
2476 kmem_cache_destroy(pending_cache
);
2477 kmem_cache_destroy(exception_cache
);
2479 dm_exception_store_exit();
2483 module_init(dm_snapshot_init
);
2484 module_exit(dm_snapshot_exit
);
2486 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2487 MODULE_AUTHOR("Joe Thornber");
2488 MODULE_LICENSE("GPL");
2489 MODULE_ALIAS("dm-snapshot-origin");
2490 MODULE_ALIAS("dm-snapshot-merge");