4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-exception-store.h"
25 #define DM_MSG_PREFIX "snapshots"
27 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
29 #define dm_target_is_snapshot_merge(ti) \
30 ((ti)->type->name == dm_snapshot_merge_target_name)
33 * The size of the mempool used to track chunks in use.
37 #define DM_TRACKED_CHUNK_HASH_SIZE 16
38 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
39 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
41 struct dm_exception_table
{
44 struct list_head
*table
;
48 struct rw_semaphore lock
;
50 struct dm_dev
*origin
;
55 /* List of snapshots per Origin */
56 struct list_head list
;
59 * You can't use a snapshot if this is 0 (e.g. if full).
60 * A snapshot-merge target never clears this.
64 /* Origin writes don't trigger exceptions until this is set */
67 atomic_t pending_exceptions_count
;
69 /* Protected by "lock" */
70 sector_t exception_start_sequence
;
72 /* Protected by kcopyd single-threaded callback */
73 sector_t exception_complete_sequence
;
76 * A list of pending exceptions that completed out of order.
77 * Protected by kcopyd single-threaded callback.
79 struct list_head out_of_order_list
;
81 mempool_t
*pending_pool
;
83 struct dm_exception_table pending
;
84 struct dm_exception_table complete
;
87 * pe_lock protects all pending_exception operations and access
88 * as well as the snapshot_bios list.
92 /* Chunks with outstanding reads */
93 spinlock_t tracked_chunk_lock
;
94 mempool_t
*tracked_chunk_pool
;
95 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
97 /* The on disk metadata handler */
98 struct dm_exception_store
*store
;
100 struct dm_kcopyd_client
*kcopyd_client
;
102 /* Wait for events based on state_bits */
103 unsigned long state_bits
;
105 /* Range of chunks currently being merged. */
106 chunk_t first_merging_chunk
;
107 int num_merging_chunks
;
110 * The merge operation failed if this flag is set.
111 * Failure modes are handled as follows:
112 * - I/O error reading the header
113 * => don't load the target; abort.
114 * - Header does not have "valid" flag set
115 * => use the origin; forget about the snapshot.
116 * - I/O error when reading exceptions
117 * => don't load the target; abort.
118 * (We can't use the intermediate origin state.)
119 * - I/O error while merging
120 * => stop merging; set merge_failed; process I/O normally.
125 * Incoming bios that overlap with chunks being merged must wait
126 * for them to be committed.
128 struct bio_list bios_queued_during_merge
;
133 * RUNNING_MERGE - Merge operation is in progress.
134 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
135 * cleared afterwards.
137 #define RUNNING_MERGE 0
138 #define SHUTDOWN_MERGE 1
140 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
144 EXPORT_SYMBOL(dm_snap_origin
);
146 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
150 EXPORT_SYMBOL(dm_snap_cow
);
152 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
155 return chunk
<< store
->chunk_shift
;
158 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
161 * There is only ever one instance of a particular block
162 * device so we can compare pointers safely.
167 struct dm_snap_pending_exception
{
168 struct dm_exception e
;
171 * Origin buffers waiting for this to complete are held
174 struct bio_list origin_bios
;
175 struct bio_list snapshot_bios
;
177 /* Pointer back to snapshot context */
178 struct dm_snapshot
*snap
;
181 * 1 indicates the exception has already been sent to
186 /* There was copying error. */
189 /* A sequence number, it is used for in-order completion. */
190 sector_t exception_sequence
;
192 struct list_head out_of_order_entry
;
195 * For writing a complete chunk, bypassing the copy.
197 struct bio
*full_bio
;
198 bio_end_io_t
*full_bio_end_io
;
199 void *full_bio_private
;
203 * Hash table mapping origin volumes to lists of snapshots and
204 * a lock to protect it
206 static struct kmem_cache
*exception_cache
;
207 static struct kmem_cache
*pending_cache
;
209 struct dm_snap_tracked_chunk
{
210 struct hlist_node node
;
214 static struct kmem_cache
*tracked_chunk_cache
;
216 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
219 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
225 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
226 hlist_add_head(&c
->node
,
227 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
228 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
233 static void stop_tracking_chunk(struct dm_snapshot
*s
,
234 struct dm_snap_tracked_chunk
*c
)
238 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
240 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
242 mempool_free(c
, s
->tracked_chunk_pool
);
245 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
247 struct dm_snap_tracked_chunk
*c
;
248 struct hlist_node
*hn
;
251 spin_lock_irq(&s
->tracked_chunk_lock
);
253 hlist_for_each_entry(c
, hn
,
254 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
255 if (c
->chunk
== chunk
) {
261 spin_unlock_irq(&s
->tracked_chunk_lock
);
267 * This conflicting I/O is extremely improbable in the caller,
268 * so msleep(1) is sufficient and there is no need for a wait queue.
270 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
272 while (__chunk_is_tracked(s
, chunk
))
277 * One of these per registered origin, held in the snapshot_origins hash
280 /* The origin device */
281 struct block_device
*bdev
;
283 struct list_head hash_list
;
285 /* List of snapshots for this origin */
286 struct list_head snapshots
;
290 * Size of the hash table for origin volumes. If we make this
291 * the size of the minors list then it should be nearly perfect
293 #define ORIGIN_HASH_SIZE 256
294 #define ORIGIN_MASK 0xFF
295 static struct list_head
*_origins
;
296 static struct rw_semaphore _origins_lock
;
298 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
299 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
300 static uint64_t _pending_exceptions_done_count
;
302 static int init_origin_hash(void)
306 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
309 DMERR("unable to allocate memory");
313 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
314 INIT_LIST_HEAD(_origins
+ i
);
315 init_rwsem(&_origins_lock
);
320 static void exit_origin_hash(void)
325 static unsigned origin_hash(struct block_device
*bdev
)
327 return bdev
->bd_dev
& ORIGIN_MASK
;
330 static struct origin
*__lookup_origin(struct block_device
*origin
)
332 struct list_head
*ol
;
335 ol
= &_origins
[origin_hash(origin
)];
336 list_for_each_entry (o
, ol
, hash_list
)
337 if (bdev_equal(o
->bdev
, origin
))
343 static void __insert_origin(struct origin
*o
)
345 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
346 list_add_tail(&o
->hash_list
, sl
);
350 * _origins_lock must be held when calling this function.
351 * Returns number of snapshots registered using the supplied cow device, plus:
352 * snap_src - a snapshot suitable for use as a source of exception handover
353 * snap_dest - a snapshot capable of receiving exception handover.
354 * snap_merge - an existing snapshot-merge target linked to the same origin.
355 * There can be at most one snapshot-merge target. The parameter is optional.
357 * Possible return values and states of snap_src and snap_dest.
358 * 0: NULL, NULL - first new snapshot
359 * 1: snap_src, NULL - normal snapshot
360 * 2: snap_src, snap_dest - waiting for handover
361 * 2: snap_src, NULL - handed over, waiting for old to be deleted
362 * 1: NULL, snap_dest - source got destroyed without handover
364 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
365 struct dm_snapshot
**snap_src
,
366 struct dm_snapshot
**snap_dest
,
367 struct dm_snapshot
**snap_merge
)
369 struct dm_snapshot
*s
;
374 o
= __lookup_origin(snap
->origin
->bdev
);
378 list_for_each_entry(s
, &o
->snapshots
, list
) {
379 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
381 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
391 } else if (snap_dest
)
402 * On success, returns 1 if this snapshot is a handover destination,
403 * otherwise returns 0.
405 static int __validate_exception_handover(struct dm_snapshot
*snap
)
407 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
408 struct dm_snapshot
*snap_merge
= NULL
;
410 /* Does snapshot need exceptions handed over to it? */
411 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
412 &snap_merge
) == 2) ||
414 snap
->ti
->error
= "Snapshot cow pairing for exception "
415 "table handover failed";
420 * If no snap_src was found, snap cannot become a handover
427 * Non-snapshot-merge handover?
429 if (!dm_target_is_snapshot_merge(snap
->ti
))
433 * Do not allow more than one merging snapshot.
436 snap
->ti
->error
= "A snapshot is already merging.";
440 if (!snap_src
->store
->type
->prepare_merge
||
441 !snap_src
->store
->type
->commit_merge
) {
442 snap
->ti
->error
= "Snapshot exception store does not "
443 "support snapshot-merge.";
450 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
452 struct dm_snapshot
*l
;
454 /* Sort the list according to chunk size, largest-first smallest-last */
455 list_for_each_entry(l
, &o
->snapshots
, list
)
456 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
458 list_add_tail(&s
->list
, &l
->list
);
462 * Make a note of the snapshot and its origin so we can look it
463 * up when the origin has a write on it.
465 * Also validate snapshot exception store handovers.
466 * On success, returns 1 if this registration is a handover destination,
467 * otherwise returns 0.
469 static int register_snapshot(struct dm_snapshot
*snap
)
471 struct origin
*o
, *new_o
= NULL
;
472 struct block_device
*bdev
= snap
->origin
->bdev
;
475 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
479 down_write(&_origins_lock
);
481 r
= __validate_exception_handover(snap
);
487 o
= __lookup_origin(bdev
);
494 /* Initialise the struct */
495 INIT_LIST_HEAD(&o
->snapshots
);
501 __insert_snapshot(o
, snap
);
504 up_write(&_origins_lock
);
510 * Move snapshot to correct place in list according to chunk size.
512 static void reregister_snapshot(struct dm_snapshot
*s
)
514 struct block_device
*bdev
= s
->origin
->bdev
;
516 down_write(&_origins_lock
);
519 __insert_snapshot(__lookup_origin(bdev
), s
);
521 up_write(&_origins_lock
);
524 static void unregister_snapshot(struct dm_snapshot
*s
)
528 down_write(&_origins_lock
);
529 o
= __lookup_origin(s
->origin
->bdev
);
532 if (o
&& list_empty(&o
->snapshots
)) {
533 list_del(&o
->hash_list
);
537 up_write(&_origins_lock
);
541 * Implementation of the exception hash tables.
542 * The lowest hash_shift bits of the chunk number are ignored, allowing
543 * some consecutive chunks to be grouped together.
545 static int dm_exception_table_init(struct dm_exception_table
*et
,
546 uint32_t size
, unsigned hash_shift
)
550 et
->hash_shift
= hash_shift
;
551 et
->hash_mask
= size
- 1;
552 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
556 for (i
= 0; i
< size
; i
++)
557 INIT_LIST_HEAD(et
->table
+ i
);
562 static void dm_exception_table_exit(struct dm_exception_table
*et
,
563 struct kmem_cache
*mem
)
565 struct list_head
*slot
;
566 struct dm_exception
*ex
, *next
;
569 size
= et
->hash_mask
+ 1;
570 for (i
= 0; i
< size
; i
++) {
571 slot
= et
->table
+ i
;
573 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
574 kmem_cache_free(mem
, ex
);
580 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
582 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
585 static void dm_remove_exception(struct dm_exception
*e
)
587 list_del(&e
->hash_list
);
591 * Return the exception data for a sector, or NULL if not
594 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
597 struct list_head
*slot
;
598 struct dm_exception
*e
;
600 slot
= &et
->table
[exception_hash(et
, chunk
)];
601 list_for_each_entry (e
, slot
, hash_list
)
602 if (chunk
>= e
->old_chunk
&&
603 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
609 static struct dm_exception
*alloc_completed_exception(void)
611 struct dm_exception
*e
;
613 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
615 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
620 static void free_completed_exception(struct dm_exception
*e
)
622 kmem_cache_free(exception_cache
, e
);
625 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
627 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
630 atomic_inc(&s
->pending_exceptions_count
);
636 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
638 struct dm_snapshot
*s
= pe
->snap
;
640 mempool_free(pe
, s
->pending_pool
);
641 smp_mb__before_atomic_dec();
642 atomic_dec(&s
->pending_exceptions_count
);
645 static void dm_insert_exception(struct dm_exception_table
*eh
,
646 struct dm_exception
*new_e
)
649 struct dm_exception
*e
= NULL
;
651 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
653 /* Add immediately if this table doesn't support consecutive chunks */
657 /* List is ordered by old_chunk */
658 list_for_each_entry_reverse(e
, l
, hash_list
) {
659 /* Insert after an existing chunk? */
660 if (new_e
->old_chunk
== (e
->old_chunk
+
661 dm_consecutive_chunk_count(e
) + 1) &&
662 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
663 dm_consecutive_chunk_count(e
) + 1)) {
664 dm_consecutive_chunk_count_inc(e
);
665 free_completed_exception(new_e
);
669 /* Insert before an existing chunk? */
670 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
671 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
672 dm_consecutive_chunk_count_inc(e
);
675 free_completed_exception(new_e
);
679 if (new_e
->old_chunk
> e
->old_chunk
)
684 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
688 * Callback used by the exception stores to load exceptions when
691 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
693 struct dm_snapshot
*s
= context
;
694 struct dm_exception
*e
;
696 e
= alloc_completed_exception();
702 /* Consecutive_count is implicitly initialised to zero */
705 dm_insert_exception(&s
->complete
, e
);
711 * Return a minimum chunk size of all snapshots that have the specified origin.
712 * Return zero if the origin has no snapshots.
714 static sector_t
__minimum_chunk_size(struct origin
*o
)
716 struct dm_snapshot
*snap
;
717 unsigned chunk_size
= 0;
720 list_for_each_entry(snap
, &o
->snapshots
, list
)
721 chunk_size
= min_not_zero(chunk_size
,
722 snap
->store
->chunk_size
);
730 static int calc_max_buckets(void)
732 /* use a fixed size of 2MB */
733 unsigned long mem
= 2 * 1024 * 1024;
734 mem
/= sizeof(struct list_head
);
740 * Allocate room for a suitable hash table.
742 static int init_hash_tables(struct dm_snapshot
*s
)
744 sector_t hash_size
, cow_dev_size
, max_buckets
;
747 * Calculate based on the size of the original volume or
750 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
751 max_buckets
= calc_max_buckets();
753 hash_size
= cow_dev_size
>> s
->store
->chunk_shift
;
754 hash_size
= min(hash_size
, max_buckets
);
758 hash_size
= rounddown_pow_of_two(hash_size
);
759 if (dm_exception_table_init(&s
->complete
, hash_size
,
760 DM_CHUNK_CONSECUTIVE_BITS
))
764 * Allocate hash table for in-flight exceptions
765 * Make this smaller than the real hash table
771 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
772 dm_exception_table_exit(&s
->complete
, exception_cache
);
779 static void merge_shutdown(struct dm_snapshot
*s
)
781 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
782 smp_mb__after_clear_bit();
783 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
786 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
788 s
->first_merging_chunk
= 0;
789 s
->num_merging_chunks
= 0;
791 return bio_list_get(&s
->bios_queued_during_merge
);
795 * Remove one chunk from the index of completed exceptions.
797 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
800 struct dm_exception
*e
;
802 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
804 DMERR("Corruption detected: exception for block %llu is "
805 "on disk but not in memory",
806 (unsigned long long)old_chunk
);
811 * If this is the only chunk using this exception, remove exception.
813 if (!dm_consecutive_chunk_count(e
)) {
814 dm_remove_exception(e
);
815 free_completed_exception(e
);
820 * The chunk may be either at the beginning or the end of a
821 * group of consecutive chunks - never in the middle. We are
822 * removing chunks in the opposite order to that in which they
823 * were added, so this should always be true.
824 * Decrement the consecutive chunk counter and adjust the
825 * starting point if necessary.
827 if (old_chunk
== e
->old_chunk
) {
830 } else if (old_chunk
!= e
->old_chunk
+
831 dm_consecutive_chunk_count(e
)) {
832 DMERR("Attempt to merge block %llu from the "
833 "middle of a chunk range [%llu - %llu]",
834 (unsigned long long)old_chunk
,
835 (unsigned long long)e
->old_chunk
,
837 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
841 dm_consecutive_chunk_count_dec(e
);
846 static void flush_bios(struct bio
*bio
);
848 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
850 struct bio
*b
= NULL
;
852 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
854 down_write(&s
->lock
);
857 * Process chunks (and associated exceptions) in reverse order
858 * so that dm_consecutive_chunk_count_dec() accounting works.
861 r
= __remove_single_exception_chunk(s
, old_chunk
);
864 } while (old_chunk
-- > s
->first_merging_chunk
);
866 b
= __release_queued_bios_after_merge(s
);
876 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
877 sector_t sector
, unsigned chunk_size
);
879 static void merge_callback(int read_err
, unsigned long write_err
,
882 static uint64_t read_pending_exceptions_done_count(void)
884 uint64_t pending_exceptions_done
;
886 spin_lock(&_pending_exceptions_done_spinlock
);
887 pending_exceptions_done
= _pending_exceptions_done_count
;
888 spin_unlock(&_pending_exceptions_done_spinlock
);
890 return pending_exceptions_done
;
893 static void increment_pending_exceptions_done_count(void)
895 spin_lock(&_pending_exceptions_done_spinlock
);
896 _pending_exceptions_done_count
++;
897 spin_unlock(&_pending_exceptions_done_spinlock
);
899 wake_up_all(&_pending_exceptions_done
);
902 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
904 int i
, linear_chunks
;
905 chunk_t old_chunk
, new_chunk
;
906 struct dm_io_region src
, dest
;
908 uint64_t previous_count
;
910 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
911 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
915 * valid flag never changes during merge, so no lock required.
918 DMERR("Snapshot is invalid: can't merge");
922 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
924 if (linear_chunks
<= 0) {
925 if (linear_chunks
< 0) {
926 DMERR("Read error in exception store: "
927 "shutting down merge");
928 down_write(&s
->lock
);
935 /* Adjust old_chunk and new_chunk to reflect start of linear region */
936 old_chunk
= old_chunk
+ 1 - linear_chunks
;
937 new_chunk
= new_chunk
+ 1 - linear_chunks
;
940 * Use one (potentially large) I/O to copy all 'linear_chunks'
941 * from the exception store to the origin
943 io_size
= linear_chunks
* s
->store
->chunk_size
;
945 dest
.bdev
= s
->origin
->bdev
;
946 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
947 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
949 src
.bdev
= s
->cow
->bdev
;
950 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
951 src
.count
= dest
.count
;
954 * Reallocate any exceptions needed in other snapshots then
955 * wait for the pending exceptions to complete.
956 * Each time any pending exception (globally on the system)
957 * completes we are woken and repeat the process to find out
958 * if we can proceed. While this may not seem a particularly
959 * efficient algorithm, it is not expected to have any
960 * significant impact on performance.
962 previous_count
= read_pending_exceptions_done_count();
963 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
964 wait_event(_pending_exceptions_done
,
965 (read_pending_exceptions_done_count() !=
967 /* Retry after the wait, until all exceptions are done. */
968 previous_count
= read_pending_exceptions_done_count();
971 down_write(&s
->lock
);
972 s
->first_merging_chunk
= old_chunk
;
973 s
->num_merging_chunks
= linear_chunks
;
976 /* Wait until writes to all 'linear_chunks' drain */
977 for (i
= 0; i
< linear_chunks
; i
++)
978 __check_for_conflicting_io(s
, old_chunk
+ i
);
980 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
987 static void error_bios(struct bio
*bio
);
989 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
991 struct dm_snapshot
*s
= context
;
992 struct bio
*b
= NULL
;
994 if (read_err
|| write_err
) {
996 DMERR("Read error: shutting down merge.");
998 DMERR("Write error: shutting down merge.");
1002 if (s
->store
->type
->commit_merge(s
->store
,
1003 s
->num_merging_chunks
) < 0) {
1004 DMERR("Write error in exception store: shutting down merge");
1008 if (remove_single_exception_chunk(s
) < 0)
1011 snapshot_merge_next_chunks(s
);
1016 down_write(&s
->lock
);
1017 s
->merge_failed
= 1;
1018 b
= __release_queued_bios_after_merge(s
);
1025 static void start_merge(struct dm_snapshot
*s
)
1027 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1028 snapshot_merge_next_chunks(s
);
1031 static int wait_schedule(void *ptr
)
1039 * Stop the merging process and wait until it finishes.
1041 static void stop_merge(struct dm_snapshot
*s
)
1043 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1044 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, wait_schedule
,
1045 TASK_UNINTERRUPTIBLE
);
1046 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1050 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1052 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1054 struct dm_snapshot
*s
;
1057 char *origin_path
, *cow_path
;
1058 unsigned args_used
, num_flush_requests
= 1;
1059 fmode_t origin_mode
= FMODE_READ
;
1062 ti
->error
= "requires exactly 4 arguments";
1067 if (dm_target_is_snapshot_merge(ti
)) {
1068 num_flush_requests
= 2;
1069 origin_mode
= FMODE_WRITE
;
1072 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1074 ti
->error
= "Cannot allocate private snapshot structure";
1079 origin_path
= argv
[0];
1083 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1085 ti
->error
= "Cannot get origin device";
1093 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1095 ti
->error
= "Cannot get COW device";
1099 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1101 ti
->error
= "Couldn't create exception store";
1112 atomic_set(&s
->pending_exceptions_count
, 0);
1113 s
->exception_start_sequence
= 0;
1114 s
->exception_complete_sequence
= 0;
1115 INIT_LIST_HEAD(&s
->out_of_order_list
);
1116 init_rwsem(&s
->lock
);
1117 INIT_LIST_HEAD(&s
->list
);
1118 spin_lock_init(&s
->pe_lock
);
1120 s
->merge_failed
= 0;
1121 s
->first_merging_chunk
= 0;
1122 s
->num_merging_chunks
= 0;
1123 bio_list_init(&s
->bios_queued_during_merge
);
1125 /* Allocate hash table for COW data */
1126 if (init_hash_tables(s
)) {
1127 ti
->error
= "Unable to allocate hash table space";
1129 goto bad_hash_tables
;
1132 s
->kcopyd_client
= dm_kcopyd_client_create();
1133 if (IS_ERR(s
->kcopyd_client
)) {
1134 r
= PTR_ERR(s
->kcopyd_client
);
1135 ti
->error
= "Could not create kcopyd client";
1139 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1140 if (!s
->pending_pool
) {
1141 ti
->error
= "Could not allocate mempool for pending exceptions";
1143 goto bad_pending_pool
;
1146 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
1147 tracked_chunk_cache
);
1148 if (!s
->tracked_chunk_pool
) {
1149 ti
->error
= "Could not allocate tracked_chunk mempool for "
1151 goto bad_tracked_chunk_pool
;
1154 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1155 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1157 spin_lock_init(&s
->tracked_chunk_lock
);
1160 ti
->num_flush_requests
= num_flush_requests
;
1162 /* Add snapshot to the list of snapshots for this origin */
1163 /* Exceptions aren't triggered till snapshot_resume() is called */
1164 r
= register_snapshot(s
);
1166 ti
->error
= "Snapshot origin struct allocation failed";
1167 goto bad_load_and_register
;
1169 /* invalid handover, register_snapshot has set ti->error */
1170 goto bad_load_and_register
;
1174 * Metadata must only be loaded into one table at once, so skip this
1175 * if metadata will be handed over during resume.
1176 * Chunk size will be set during the handover - set it to zero to
1177 * ensure it's ignored.
1180 s
->store
->chunk_size
= 0;
1184 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1187 ti
->error
= "Failed to read snapshot metadata";
1188 goto bad_read_metadata
;
1191 DMWARN("Snapshot is marked invalid.");
1194 if (!s
->store
->chunk_size
) {
1195 ti
->error
= "Chunk size not set";
1196 goto bad_read_metadata
;
1198 ti
->split_io
= s
->store
->chunk_size
;
1203 unregister_snapshot(s
);
1205 bad_load_and_register
:
1206 mempool_destroy(s
->tracked_chunk_pool
);
1208 bad_tracked_chunk_pool
:
1209 mempool_destroy(s
->pending_pool
);
1212 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1215 dm_exception_table_exit(&s
->pending
, pending_cache
);
1216 dm_exception_table_exit(&s
->complete
, exception_cache
);
1219 dm_exception_store_destroy(s
->store
);
1222 dm_put_device(ti
, s
->cow
);
1225 dm_put_device(ti
, s
->origin
);
1234 static void __free_exceptions(struct dm_snapshot
*s
)
1236 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1237 s
->kcopyd_client
= NULL
;
1239 dm_exception_table_exit(&s
->pending
, pending_cache
);
1240 dm_exception_table_exit(&s
->complete
, exception_cache
);
1243 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1244 struct dm_snapshot
*snap_dest
)
1247 struct dm_exception_table table_swap
;
1248 struct dm_exception_store
*store_swap
;
1252 * Swap all snapshot context information between the two instances.
1254 u
.table_swap
= snap_dest
->complete
;
1255 snap_dest
->complete
= snap_src
->complete
;
1256 snap_src
->complete
= u
.table_swap
;
1258 u
.store_swap
= snap_dest
->store
;
1259 snap_dest
->store
= snap_src
->store
;
1260 snap_src
->store
= u
.store_swap
;
1262 snap_dest
->store
->snap
= snap_dest
;
1263 snap_src
->store
->snap
= snap_src
;
1265 snap_dest
->ti
->split_io
= snap_dest
->store
->chunk_size
;
1266 snap_dest
->valid
= snap_src
->valid
;
1269 * Set source invalid to ensure it receives no further I/O.
1271 snap_src
->valid
= 0;
1274 static void snapshot_dtr(struct dm_target
*ti
)
1276 #ifdef CONFIG_DM_DEBUG
1279 struct dm_snapshot
*s
= ti
->private;
1280 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1282 down_read(&_origins_lock
);
1283 /* Check whether exception handover must be cancelled */
1284 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1285 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1286 down_write(&snap_dest
->lock
);
1287 snap_dest
->valid
= 0;
1288 up_write(&snap_dest
->lock
);
1289 DMERR("Cancelling snapshot handover.");
1291 up_read(&_origins_lock
);
1293 if (dm_target_is_snapshot_merge(ti
))
1296 /* Prevent further origin writes from using this snapshot. */
1297 /* After this returns there can be no new kcopyd jobs. */
1298 unregister_snapshot(s
);
1300 while (atomic_read(&s
->pending_exceptions_count
))
1303 * Ensure instructions in mempool_destroy aren't reordered
1304 * before atomic_read.
1308 #ifdef CONFIG_DM_DEBUG
1309 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1310 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1313 mempool_destroy(s
->tracked_chunk_pool
);
1315 __free_exceptions(s
);
1317 mempool_destroy(s
->pending_pool
);
1319 dm_exception_store_destroy(s
->store
);
1321 dm_put_device(ti
, s
->cow
);
1323 dm_put_device(ti
, s
->origin
);
1329 * Flush a list of buffers.
1331 static void flush_bios(struct bio
*bio
)
1337 bio
->bi_next
= NULL
;
1338 generic_make_request(bio
);
1343 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1346 * Flush a list of buffers.
1348 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1355 bio
->bi_next
= NULL
;
1356 r
= do_origin(s
->origin
, bio
);
1357 if (r
== DM_MAPIO_REMAPPED
)
1358 generic_make_request(bio
);
1364 * Error a list of buffers.
1366 static void error_bios(struct bio
*bio
)
1372 bio
->bi_next
= NULL
;
1378 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1384 DMERR("Invalidating snapshot: Error reading/writing.");
1385 else if (err
== -ENOMEM
)
1386 DMERR("Invalidating snapshot: Unable to allocate exception.");
1388 if (s
->store
->type
->drop_snapshot
)
1389 s
->store
->type
->drop_snapshot(s
->store
);
1393 dm_table_event(s
->ti
->table
);
1396 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1398 struct dm_exception
*e
;
1399 struct dm_snapshot
*s
= pe
->snap
;
1400 struct bio
*origin_bios
= NULL
;
1401 struct bio
*snapshot_bios
= NULL
;
1402 struct bio
*full_bio
= NULL
;
1406 /* Read/write error - snapshot is unusable */
1407 down_write(&s
->lock
);
1408 __invalidate_snapshot(s
, -EIO
);
1413 e
= alloc_completed_exception();
1415 down_write(&s
->lock
);
1416 __invalidate_snapshot(s
, -ENOMEM
);
1422 down_write(&s
->lock
);
1424 free_completed_exception(e
);
1429 /* Check for conflicting reads */
1430 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1433 * Add a proper exception, and remove the
1434 * in-flight exception from the list.
1436 dm_insert_exception(&s
->complete
, e
);
1439 dm_remove_exception(&pe
->e
);
1440 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1441 origin_bios
= bio_list_get(&pe
->origin_bios
);
1442 full_bio
= pe
->full_bio
;
1444 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1445 full_bio
->bi_private
= pe
->full_bio_private
;
1447 free_pending_exception(pe
);
1449 increment_pending_exceptions_done_count();
1453 /* Submit any pending write bios */
1456 bio_io_error(full_bio
);
1457 error_bios(snapshot_bios
);
1460 bio_endio(full_bio
, 0);
1461 flush_bios(snapshot_bios
);
1464 retry_origin_bios(s
, origin_bios
);
1467 static void commit_callback(void *context
, int success
)
1469 struct dm_snap_pending_exception
*pe
= context
;
1471 pending_complete(pe
, success
);
1474 static void complete_exception(struct dm_snap_pending_exception
*pe
)
1476 struct dm_snapshot
*s
= pe
->snap
;
1478 if (unlikely(pe
->copy_error
))
1479 pending_complete(pe
, 0);
1482 /* Update the metadata if we are persistent */
1483 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1484 commit_callback
, pe
);
1488 * Called when the copy I/O has finished. kcopyd actually runs
1489 * this code so don't block.
1491 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1493 struct dm_snap_pending_exception
*pe
= context
;
1494 struct dm_snapshot
*s
= pe
->snap
;
1496 pe
->copy_error
= read_err
|| write_err
;
1498 if (pe
->exception_sequence
== s
->exception_complete_sequence
) {
1499 s
->exception_complete_sequence
++;
1500 complete_exception(pe
);
1502 while (!list_empty(&s
->out_of_order_list
)) {
1503 pe
= list_entry(s
->out_of_order_list
.next
,
1504 struct dm_snap_pending_exception
, out_of_order_entry
);
1505 if (pe
->exception_sequence
!= s
->exception_complete_sequence
)
1507 s
->exception_complete_sequence
++;
1508 list_del(&pe
->out_of_order_entry
);
1509 complete_exception(pe
);
1512 struct list_head
*lh
;
1513 struct dm_snap_pending_exception
*pe2
;
1515 list_for_each_prev(lh
, &s
->out_of_order_list
) {
1516 pe2
= list_entry(lh
, struct dm_snap_pending_exception
, out_of_order_entry
);
1517 if (pe2
->exception_sequence
< pe
->exception_sequence
)
1520 list_add(&pe
->out_of_order_entry
, lh
);
1525 * Dispatches the copy operation to kcopyd.
1527 static void start_copy(struct dm_snap_pending_exception
*pe
)
1529 struct dm_snapshot
*s
= pe
->snap
;
1530 struct dm_io_region src
, dest
;
1531 struct block_device
*bdev
= s
->origin
->bdev
;
1534 dev_size
= get_dev_size(bdev
);
1537 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1538 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1540 dest
.bdev
= s
->cow
->bdev
;
1541 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1542 dest
.count
= src
.count
;
1544 /* Hand over to kcopyd */
1545 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1548 static void full_bio_end_io(struct bio
*bio
, int error
)
1550 void *callback_data
= bio
->bi_private
;
1552 dm_kcopyd_do_callback(callback_data
, 0, error
? 1 : 0);
1555 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1558 struct dm_snapshot
*s
= pe
->snap
;
1559 void *callback_data
;
1562 pe
->full_bio_end_io
= bio
->bi_end_io
;
1563 pe
->full_bio_private
= bio
->bi_private
;
1565 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1568 bio
->bi_end_io
= full_bio_end_io
;
1569 bio
->bi_private
= callback_data
;
1571 generic_make_request(bio
);
1574 static struct dm_snap_pending_exception
*
1575 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1577 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1582 return container_of(e
, struct dm_snap_pending_exception
, e
);
1586 * Looks to see if this snapshot already has a pending exception
1587 * for this chunk, otherwise it allocates a new one and inserts
1588 * it into the pending table.
1590 * NOTE: a write lock must be held on snap->lock before calling
1593 static struct dm_snap_pending_exception
*
1594 __find_pending_exception(struct dm_snapshot
*s
,
1595 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1597 struct dm_snap_pending_exception
*pe2
;
1599 pe2
= __lookup_pending_exception(s
, chunk
);
1601 free_pending_exception(pe
);
1605 pe
->e
.old_chunk
= chunk
;
1606 bio_list_init(&pe
->origin_bios
);
1607 bio_list_init(&pe
->snapshot_bios
);
1609 pe
->full_bio
= NULL
;
1611 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1612 free_pending_exception(pe
);
1616 pe
->exception_sequence
= s
->exception_start_sequence
++;
1618 dm_insert_exception(&s
->pending
, &pe
->e
);
1623 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1624 struct bio
*bio
, chunk_t chunk
)
1626 bio
->bi_bdev
= s
->cow
->bdev
;
1627 bio
->bi_sector
= chunk_to_sector(s
->store
,
1628 dm_chunk_number(e
->new_chunk
) +
1629 (chunk
- e
->old_chunk
)) +
1631 s
->store
->chunk_mask
);
1634 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1635 union map_info
*map_context
)
1637 struct dm_exception
*e
;
1638 struct dm_snapshot
*s
= ti
->private;
1639 int r
= DM_MAPIO_REMAPPED
;
1641 struct dm_snap_pending_exception
*pe
= NULL
;
1643 if (bio
->bi_rw
& REQ_FLUSH
) {
1644 bio
->bi_bdev
= s
->cow
->bdev
;
1645 return DM_MAPIO_REMAPPED
;
1648 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1650 /* Full snapshots are not usable */
1651 /* To get here the table must be live so s->active is always set. */
1655 /* FIXME: should only take write lock if we need
1656 * to copy an exception */
1657 down_write(&s
->lock
);
1664 /* If the block is already remapped - use that, else remap it */
1665 e
= dm_lookup_exception(&s
->complete
, chunk
);
1667 remap_exception(s
, e
, bio
, chunk
);
1672 * Write to snapshot - higher level takes care of RW/RO
1673 * flags so we should only get this if we are
1676 if (bio_rw(bio
) == WRITE
) {
1677 pe
= __lookup_pending_exception(s
, chunk
);
1680 pe
= alloc_pending_exception(s
);
1681 down_write(&s
->lock
);
1684 free_pending_exception(pe
);
1689 e
= dm_lookup_exception(&s
->complete
, chunk
);
1691 free_pending_exception(pe
);
1692 remap_exception(s
, e
, bio
, chunk
);
1696 pe
= __find_pending_exception(s
, pe
, chunk
);
1698 __invalidate_snapshot(s
, -ENOMEM
);
1704 remap_exception(s
, &pe
->e
, bio
, chunk
);
1706 r
= DM_MAPIO_SUBMITTED
;
1709 bio
->bi_size
== (s
->store
->chunk_size
<< SECTOR_SHIFT
)) {
1712 start_full_bio(pe
, bio
);
1716 bio_list_add(&pe
->snapshot_bios
, bio
);
1719 /* this is protected by snap->lock */
1726 bio
->bi_bdev
= s
->origin
->bdev
;
1727 map_context
->ptr
= track_chunk(s
, chunk
);
1737 * A snapshot-merge target behaves like a combination of a snapshot
1738 * target and a snapshot-origin target. It only generates new
1739 * exceptions in other snapshots and not in the one that is being
1742 * For each chunk, if there is an existing exception, it is used to
1743 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1744 * which in turn might generate exceptions in other snapshots.
1745 * If merging is currently taking place on the chunk in question, the
1746 * I/O is deferred by adding it to s->bios_queued_during_merge.
1748 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
,
1749 union map_info
*map_context
)
1751 struct dm_exception
*e
;
1752 struct dm_snapshot
*s
= ti
->private;
1753 int r
= DM_MAPIO_REMAPPED
;
1756 if (bio
->bi_rw
& REQ_FLUSH
) {
1757 if (!map_context
->target_request_nr
)
1758 bio
->bi_bdev
= s
->origin
->bdev
;
1760 bio
->bi_bdev
= s
->cow
->bdev
;
1761 map_context
->ptr
= NULL
;
1762 return DM_MAPIO_REMAPPED
;
1765 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1767 down_write(&s
->lock
);
1769 /* Full merging snapshots are redirected to the origin */
1771 goto redirect_to_origin
;
1773 /* If the block is already remapped - use that */
1774 e
= dm_lookup_exception(&s
->complete
, chunk
);
1776 /* Queue writes overlapping with chunks being merged */
1777 if (bio_rw(bio
) == WRITE
&&
1778 chunk
>= s
->first_merging_chunk
&&
1779 chunk
< (s
->first_merging_chunk
+
1780 s
->num_merging_chunks
)) {
1781 bio
->bi_bdev
= s
->origin
->bdev
;
1782 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1783 r
= DM_MAPIO_SUBMITTED
;
1787 remap_exception(s
, e
, bio
, chunk
);
1789 if (bio_rw(bio
) == WRITE
)
1790 map_context
->ptr
= track_chunk(s
, chunk
);
1795 bio
->bi_bdev
= s
->origin
->bdev
;
1797 if (bio_rw(bio
) == WRITE
) {
1799 return do_origin(s
->origin
, bio
);
1808 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1809 int error
, union map_info
*map_context
)
1811 struct dm_snapshot
*s
= ti
->private;
1812 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1815 stop_tracking_chunk(s
, c
);
1820 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1822 struct dm_snapshot
*s
= ti
->private;
1827 static int snapshot_preresume(struct dm_target
*ti
)
1830 struct dm_snapshot
*s
= ti
->private;
1831 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1833 down_read(&_origins_lock
);
1834 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1835 if (snap_src
&& snap_dest
) {
1836 down_read(&snap_src
->lock
);
1837 if (s
== snap_src
) {
1838 DMERR("Unable to resume snapshot source until "
1839 "handover completes.");
1841 } else if (!dm_suspended(snap_src
->ti
)) {
1842 DMERR("Unable to perform snapshot handover until "
1843 "source is suspended.");
1846 up_read(&snap_src
->lock
);
1848 up_read(&_origins_lock
);
1853 static void snapshot_resume(struct dm_target
*ti
)
1855 struct dm_snapshot
*s
= ti
->private;
1856 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1858 down_read(&_origins_lock
);
1859 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1860 if (snap_src
&& snap_dest
) {
1861 down_write(&snap_src
->lock
);
1862 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1863 __handover_exceptions(snap_src
, snap_dest
);
1864 up_write(&snap_dest
->lock
);
1865 up_write(&snap_src
->lock
);
1867 up_read(&_origins_lock
);
1869 /* Now we have correct chunk size, reregister */
1870 reregister_snapshot(s
);
1872 down_write(&s
->lock
);
1877 static sector_t
get_origin_minimum_chunksize(struct block_device
*bdev
)
1879 sector_t min_chunksize
;
1881 down_read(&_origins_lock
);
1882 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1883 up_read(&_origins_lock
);
1885 return min_chunksize
;
1888 static void snapshot_merge_resume(struct dm_target
*ti
)
1890 struct dm_snapshot
*s
= ti
->private;
1893 * Handover exceptions from existing snapshot.
1895 snapshot_resume(ti
);
1898 * snapshot-merge acts as an origin, so set ti->split_io
1900 ti
->split_io
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1905 static void snapshot_status(struct dm_target
*ti
, status_type_t type
,
1906 char *result
, unsigned int maxlen
)
1909 struct dm_snapshot
*snap
= ti
->private;
1912 case STATUSTYPE_INFO
:
1914 down_write(&snap
->lock
);
1918 else if (snap
->merge_failed
)
1919 DMEMIT("Merge failed");
1921 if (snap
->store
->type
->usage
) {
1922 sector_t total_sectors
, sectors_allocated
,
1924 snap
->store
->type
->usage(snap
->store
,
1928 DMEMIT("%llu/%llu %llu",
1929 (unsigned long long)sectors_allocated
,
1930 (unsigned long long)total_sectors
,
1931 (unsigned long long)metadata_sectors
);
1937 up_write(&snap
->lock
);
1941 case STATUSTYPE_TABLE
:
1943 * kdevname returns a static pointer so we need
1944 * to make private copies if the output is to
1947 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1948 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1954 static int snapshot_iterate_devices(struct dm_target
*ti
,
1955 iterate_devices_callout_fn fn
, void *data
)
1957 struct dm_snapshot
*snap
= ti
->private;
1960 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1963 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
1969 /*-----------------------------------------------------------------
1971 *---------------------------------------------------------------*/
1974 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1975 * supplied bio was ignored. The caller may submit it immediately.
1976 * (No remapping actually occurs as the origin is always a direct linear
1979 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1980 * and any supplied bio is added to a list to be submitted once all
1981 * the necessary exceptions exist.
1983 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1986 int r
= DM_MAPIO_REMAPPED
;
1987 struct dm_snapshot
*snap
;
1988 struct dm_exception
*e
;
1989 struct dm_snap_pending_exception
*pe
;
1990 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
1991 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
1994 /* Do all the snapshots on this origin */
1995 list_for_each_entry (snap
, snapshots
, list
) {
1997 * Don't make new exceptions in a merging snapshot
1998 * because it has effectively been deleted
2000 if (dm_target_is_snapshot_merge(snap
->ti
))
2003 down_write(&snap
->lock
);
2005 /* Only deal with valid and active snapshots */
2006 if (!snap
->valid
|| !snap
->active
)
2009 /* Nothing to do if writing beyond end of snapshot */
2010 if (sector
>= dm_table_get_size(snap
->ti
->table
))
2014 * Remember, different snapshots can have
2015 * different chunk sizes.
2017 chunk
= sector_to_chunk(snap
->store
, sector
);
2020 * Check exception table to see if block
2021 * is already remapped in this snapshot
2022 * and trigger an exception if not.
2024 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2028 pe
= __lookup_pending_exception(snap
, chunk
);
2030 up_write(&snap
->lock
);
2031 pe
= alloc_pending_exception(snap
);
2032 down_write(&snap
->lock
);
2035 free_pending_exception(pe
);
2039 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2041 free_pending_exception(pe
);
2045 pe
= __find_pending_exception(snap
, pe
, chunk
);
2047 __invalidate_snapshot(snap
, -ENOMEM
);
2052 r
= DM_MAPIO_SUBMITTED
;
2055 * If an origin bio was supplied, queue it to wait for the
2056 * completion of this exception, and start this one last,
2057 * at the end of the function.
2060 bio_list_add(&pe
->origin_bios
, bio
);
2065 pe_to_start_last
= pe
;
2071 pe_to_start_now
= pe
;
2075 up_write(&snap
->lock
);
2077 if (pe_to_start_now
) {
2078 start_copy(pe_to_start_now
);
2079 pe_to_start_now
= NULL
;
2084 * Submit the exception against which the bio is queued last,
2085 * to give the other exceptions a head start.
2087 if (pe_to_start_last
)
2088 start_copy(pe_to_start_last
);
2094 * Called on a write from the origin driver.
2096 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2099 int r
= DM_MAPIO_REMAPPED
;
2101 down_read(&_origins_lock
);
2102 o
= __lookup_origin(origin
->bdev
);
2104 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
2105 up_read(&_origins_lock
);
2111 * Trigger exceptions in all non-merging snapshots.
2113 * The chunk size of the merging snapshot may be larger than the chunk
2114 * size of some other snapshot so we may need to reallocate multiple
2115 * chunks in other snapshots.
2117 * We scan all the overlapping exceptions in the other snapshots.
2118 * Returns 1 if anything was reallocated and must be waited for,
2119 * otherwise returns 0.
2121 * size must be a multiple of merging_snap's chunk_size.
2123 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2124 sector_t sector
, unsigned size
)
2131 * The origin's __minimum_chunk_size() got stored in split_io
2132 * by snapshot_merge_resume().
2134 down_read(&_origins_lock
);
2135 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2136 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->split_io
)
2137 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2140 up_read(&_origins_lock
);
2146 * Origin: maps a linear range of a device, with hooks for snapshotting.
2150 * Construct an origin mapping: <dev_path>
2151 * The context for an origin is merely a 'struct dm_dev *'
2152 * pointing to the real device.
2154 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2160 ti
->error
= "origin: incorrect number of arguments";
2164 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dev
);
2166 ti
->error
= "Cannot get target device";
2171 ti
->num_flush_requests
= 1;
2176 static void origin_dtr(struct dm_target
*ti
)
2178 struct dm_dev
*dev
= ti
->private;
2179 dm_put_device(ti
, dev
);
2182 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
2183 union map_info
*map_context
)
2185 struct dm_dev
*dev
= ti
->private;
2186 bio
->bi_bdev
= dev
->bdev
;
2188 if (bio
->bi_rw
& REQ_FLUSH
)
2189 return DM_MAPIO_REMAPPED
;
2191 /* Only tell snapshots if this is a write */
2192 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
2196 * Set the target "split_io" field to the minimum of all the snapshots'
2199 static void origin_resume(struct dm_target
*ti
)
2201 struct dm_dev
*dev
= ti
->private;
2203 ti
->split_io
= get_origin_minimum_chunksize(dev
->bdev
);
2206 static void origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
2207 unsigned int maxlen
)
2209 struct dm_dev
*dev
= ti
->private;
2212 case STATUSTYPE_INFO
:
2216 case STATUSTYPE_TABLE
:
2217 snprintf(result
, maxlen
, "%s", dev
->name
);
2222 static int origin_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
2223 struct bio_vec
*biovec
, int max_size
)
2225 struct dm_dev
*dev
= ti
->private;
2226 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
2228 if (!q
->merge_bvec_fn
)
2231 bvm
->bi_bdev
= dev
->bdev
;
2232 bvm
->bi_sector
= bvm
->bi_sector
;
2234 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
2237 static int origin_iterate_devices(struct dm_target
*ti
,
2238 iterate_devices_callout_fn fn
, void *data
)
2240 struct dm_dev
*dev
= ti
->private;
2242 return fn(ti
, dev
, 0, ti
->len
, data
);
2245 static struct target_type origin_target
= {
2246 .name
= "snapshot-origin",
2247 .version
= {1, 7, 1},
2248 .module
= THIS_MODULE
,
2252 .resume
= origin_resume
,
2253 .status
= origin_status
,
2254 .merge
= origin_merge
,
2255 .iterate_devices
= origin_iterate_devices
,
2258 static struct target_type snapshot_target
= {
2260 .version
= {1, 10, 2},
2261 .module
= THIS_MODULE
,
2262 .ctr
= snapshot_ctr
,
2263 .dtr
= snapshot_dtr
,
2264 .map
= snapshot_map
,
2265 .end_io
= snapshot_end_io
,
2266 .preresume
= snapshot_preresume
,
2267 .resume
= snapshot_resume
,
2268 .status
= snapshot_status
,
2269 .iterate_devices
= snapshot_iterate_devices
,
2272 static struct target_type merge_target
= {
2273 .name
= dm_snapshot_merge_target_name
,
2274 .version
= {1, 1, 0},
2275 .module
= THIS_MODULE
,
2276 .ctr
= snapshot_ctr
,
2277 .dtr
= snapshot_dtr
,
2278 .map
= snapshot_merge_map
,
2279 .end_io
= snapshot_end_io
,
2280 .presuspend
= snapshot_merge_presuspend
,
2281 .preresume
= snapshot_preresume
,
2282 .resume
= snapshot_merge_resume
,
2283 .status
= snapshot_status
,
2284 .iterate_devices
= snapshot_iterate_devices
,
2287 static int __init
dm_snapshot_init(void)
2291 r
= dm_exception_store_init();
2293 DMERR("Failed to initialize exception stores");
2297 r
= dm_register_target(&snapshot_target
);
2299 DMERR("snapshot target register failed %d", r
);
2300 goto bad_register_snapshot_target
;
2303 r
= dm_register_target(&origin_target
);
2305 DMERR("Origin target register failed %d", r
);
2306 goto bad_register_origin_target
;
2309 r
= dm_register_target(&merge_target
);
2311 DMERR("Merge target register failed %d", r
);
2312 goto bad_register_merge_target
;
2315 r
= init_origin_hash();
2317 DMERR("init_origin_hash failed.");
2318 goto bad_origin_hash
;
2321 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2322 if (!exception_cache
) {
2323 DMERR("Couldn't create exception cache.");
2325 goto bad_exception_cache
;
2328 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2329 if (!pending_cache
) {
2330 DMERR("Couldn't create pending cache.");
2332 goto bad_pending_cache
;
2335 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
2336 if (!tracked_chunk_cache
) {
2337 DMERR("Couldn't create cache to track chunks in use.");
2339 goto bad_tracked_chunk_cache
;
2344 bad_tracked_chunk_cache
:
2345 kmem_cache_destroy(pending_cache
);
2347 kmem_cache_destroy(exception_cache
);
2348 bad_exception_cache
:
2351 dm_unregister_target(&merge_target
);
2352 bad_register_merge_target
:
2353 dm_unregister_target(&origin_target
);
2354 bad_register_origin_target
:
2355 dm_unregister_target(&snapshot_target
);
2356 bad_register_snapshot_target
:
2357 dm_exception_store_exit();
2362 static void __exit
dm_snapshot_exit(void)
2364 dm_unregister_target(&snapshot_target
);
2365 dm_unregister_target(&origin_target
);
2366 dm_unregister_target(&merge_target
);
2369 kmem_cache_destroy(pending_cache
);
2370 kmem_cache_destroy(exception_cache
);
2371 kmem_cache_destroy(tracked_chunk_cache
);
2373 dm_exception_store_exit();
2377 module_init(dm_snapshot_init
);
2378 module_exit(dm_snapshot_exit
);
2380 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2381 MODULE_AUTHOR("Joe Thornber");
2382 MODULE_LICENSE("GPL");
2383 MODULE_ALIAS("dm-snapshot-origin");
2384 MODULE_ALIAS("dm-snapshot-merge");