4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-exception-store.h"
25 #define DM_MSG_PREFIX "snapshots"
27 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
29 #define dm_target_is_snapshot_merge(ti) \
30 ((ti)->type->name == dm_snapshot_merge_target_name)
33 * The size of the mempool used to track chunks in use.
37 #define DM_TRACKED_CHUNK_HASH_SIZE 16
38 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
39 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
41 struct dm_exception_table
{
44 struct list_head
*table
;
48 struct rw_semaphore lock
;
50 struct dm_dev
*origin
;
55 /* List of snapshots per Origin */
56 struct list_head list
;
59 * You can't use a snapshot if this is 0 (e.g. if full).
60 * A snapshot-merge target never clears this.
64 /* Origin writes don't trigger exceptions until this is set */
67 atomic_t pending_exceptions_count
;
69 mempool_t
*pending_pool
;
71 struct dm_exception_table pending
;
72 struct dm_exception_table complete
;
75 * pe_lock protects all pending_exception operations and access
76 * as well as the snapshot_bios list.
80 /* Chunks with outstanding reads */
81 spinlock_t tracked_chunk_lock
;
82 mempool_t
*tracked_chunk_pool
;
83 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
85 /* The on disk metadata handler */
86 struct dm_exception_store
*store
;
88 struct dm_kcopyd_client
*kcopyd_client
;
90 /* Wait for events based on state_bits */
91 unsigned long state_bits
;
93 /* Range of chunks currently being merged. */
94 chunk_t first_merging_chunk
;
95 int num_merging_chunks
;
98 * The merge operation failed if this flag is set.
99 * Failure modes are handled as follows:
100 * - I/O error reading the header
101 * => don't load the target; abort.
102 * - Header does not have "valid" flag set
103 * => use the origin; forget about the snapshot.
104 * - I/O error when reading exceptions
105 * => don't load the target; abort.
106 * (We can't use the intermediate origin state.)
107 * - I/O error while merging
108 * => stop merging; set merge_failed; process I/O normally.
113 * Incoming bios that overlap with chunks being merged must wait
114 * for them to be committed.
116 struct bio_list bios_queued_during_merge
;
121 * RUNNING_MERGE - Merge operation is in progress.
122 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
123 * cleared afterwards.
125 #define RUNNING_MERGE 0
126 #define SHUTDOWN_MERGE 1
128 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
132 EXPORT_SYMBOL(dm_snap_origin
);
134 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
138 EXPORT_SYMBOL(dm_snap_cow
);
140 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
143 return chunk
<< store
->chunk_shift
;
146 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
149 * There is only ever one instance of a particular block
150 * device so we can compare pointers safely.
155 struct dm_snap_pending_exception
{
156 struct dm_exception e
;
159 * Origin buffers waiting for this to complete are held
162 struct bio_list origin_bios
;
163 struct bio_list snapshot_bios
;
165 /* Pointer back to snapshot context */
166 struct dm_snapshot
*snap
;
169 * 1 indicates the exception has already been sent to
175 * For writing a complete chunk, bypassing the copy.
177 struct bio
*full_bio
;
178 bio_end_io_t
*full_bio_end_io
;
179 void *full_bio_private
;
183 * Hash table mapping origin volumes to lists of snapshots and
184 * a lock to protect it
186 static struct kmem_cache
*exception_cache
;
187 static struct kmem_cache
*pending_cache
;
189 struct dm_snap_tracked_chunk
{
190 struct hlist_node node
;
194 static struct kmem_cache
*tracked_chunk_cache
;
196 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
199 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
205 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
206 hlist_add_head(&c
->node
,
207 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
208 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
213 static void stop_tracking_chunk(struct dm_snapshot
*s
,
214 struct dm_snap_tracked_chunk
*c
)
218 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
220 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
222 mempool_free(c
, s
->tracked_chunk_pool
);
225 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
227 struct dm_snap_tracked_chunk
*c
;
228 struct hlist_node
*hn
;
231 spin_lock_irq(&s
->tracked_chunk_lock
);
233 hlist_for_each_entry(c
, hn
,
234 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
235 if (c
->chunk
== chunk
) {
241 spin_unlock_irq(&s
->tracked_chunk_lock
);
247 * This conflicting I/O is extremely improbable in the caller,
248 * so msleep(1) is sufficient and there is no need for a wait queue.
250 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
252 while (__chunk_is_tracked(s
, chunk
))
257 * One of these per registered origin, held in the snapshot_origins hash
260 /* The origin device */
261 struct block_device
*bdev
;
263 struct list_head hash_list
;
265 /* List of snapshots for this origin */
266 struct list_head snapshots
;
270 * Size of the hash table for origin volumes. If we make this
271 * the size of the minors list then it should be nearly perfect
273 #define ORIGIN_HASH_SIZE 256
274 #define ORIGIN_MASK 0xFF
275 static struct list_head
*_origins
;
276 static struct rw_semaphore _origins_lock
;
278 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
279 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
280 static uint64_t _pending_exceptions_done_count
;
282 static int init_origin_hash(void)
286 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
289 DMERR("unable to allocate memory");
293 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
294 INIT_LIST_HEAD(_origins
+ i
);
295 init_rwsem(&_origins_lock
);
300 static void exit_origin_hash(void)
305 static unsigned origin_hash(struct block_device
*bdev
)
307 return bdev
->bd_dev
& ORIGIN_MASK
;
310 static struct origin
*__lookup_origin(struct block_device
*origin
)
312 struct list_head
*ol
;
315 ol
= &_origins
[origin_hash(origin
)];
316 list_for_each_entry (o
, ol
, hash_list
)
317 if (bdev_equal(o
->bdev
, origin
))
323 static void __insert_origin(struct origin
*o
)
325 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
326 list_add_tail(&o
->hash_list
, sl
);
330 * _origins_lock must be held when calling this function.
331 * Returns number of snapshots registered using the supplied cow device, plus:
332 * snap_src - a snapshot suitable for use as a source of exception handover
333 * snap_dest - a snapshot capable of receiving exception handover.
334 * snap_merge - an existing snapshot-merge target linked to the same origin.
335 * There can be at most one snapshot-merge target. The parameter is optional.
337 * Possible return values and states of snap_src and snap_dest.
338 * 0: NULL, NULL - first new snapshot
339 * 1: snap_src, NULL - normal snapshot
340 * 2: snap_src, snap_dest - waiting for handover
341 * 2: snap_src, NULL - handed over, waiting for old to be deleted
342 * 1: NULL, snap_dest - source got destroyed without handover
344 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
345 struct dm_snapshot
**snap_src
,
346 struct dm_snapshot
**snap_dest
,
347 struct dm_snapshot
**snap_merge
)
349 struct dm_snapshot
*s
;
354 o
= __lookup_origin(snap
->origin
->bdev
);
358 list_for_each_entry(s
, &o
->snapshots
, list
) {
359 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
361 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
371 } else if (snap_dest
)
382 * On success, returns 1 if this snapshot is a handover destination,
383 * otherwise returns 0.
385 static int __validate_exception_handover(struct dm_snapshot
*snap
)
387 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
388 struct dm_snapshot
*snap_merge
= NULL
;
390 /* Does snapshot need exceptions handed over to it? */
391 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
392 &snap_merge
) == 2) ||
394 snap
->ti
->error
= "Snapshot cow pairing for exception "
395 "table handover failed";
400 * If no snap_src was found, snap cannot become a handover
407 * Non-snapshot-merge handover?
409 if (!dm_target_is_snapshot_merge(snap
->ti
))
413 * Do not allow more than one merging snapshot.
416 snap
->ti
->error
= "A snapshot is already merging.";
420 if (!snap_src
->store
->type
->prepare_merge
||
421 !snap_src
->store
->type
->commit_merge
) {
422 snap
->ti
->error
= "Snapshot exception store does not "
423 "support snapshot-merge.";
430 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
432 struct dm_snapshot
*l
;
434 /* Sort the list according to chunk size, largest-first smallest-last */
435 list_for_each_entry(l
, &o
->snapshots
, list
)
436 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
438 list_add_tail(&s
->list
, &l
->list
);
442 * Make a note of the snapshot and its origin so we can look it
443 * up when the origin has a write on it.
445 * Also validate snapshot exception store handovers.
446 * On success, returns 1 if this registration is a handover destination,
447 * otherwise returns 0.
449 static int register_snapshot(struct dm_snapshot
*snap
)
451 struct origin
*o
, *new_o
= NULL
;
452 struct block_device
*bdev
= snap
->origin
->bdev
;
455 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
459 down_write(&_origins_lock
);
461 r
= __validate_exception_handover(snap
);
467 o
= __lookup_origin(bdev
);
474 /* Initialise the struct */
475 INIT_LIST_HEAD(&o
->snapshots
);
481 __insert_snapshot(o
, snap
);
484 up_write(&_origins_lock
);
490 * Move snapshot to correct place in list according to chunk size.
492 static void reregister_snapshot(struct dm_snapshot
*s
)
494 struct block_device
*bdev
= s
->origin
->bdev
;
496 down_write(&_origins_lock
);
499 __insert_snapshot(__lookup_origin(bdev
), s
);
501 up_write(&_origins_lock
);
504 static void unregister_snapshot(struct dm_snapshot
*s
)
508 down_write(&_origins_lock
);
509 o
= __lookup_origin(s
->origin
->bdev
);
512 if (o
&& list_empty(&o
->snapshots
)) {
513 list_del(&o
->hash_list
);
517 up_write(&_origins_lock
);
521 * Implementation of the exception hash tables.
522 * The lowest hash_shift bits of the chunk number are ignored, allowing
523 * some consecutive chunks to be grouped together.
525 static int dm_exception_table_init(struct dm_exception_table
*et
,
526 uint32_t size
, unsigned hash_shift
)
530 et
->hash_shift
= hash_shift
;
531 et
->hash_mask
= size
- 1;
532 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
536 for (i
= 0; i
< size
; i
++)
537 INIT_LIST_HEAD(et
->table
+ i
);
542 static void dm_exception_table_exit(struct dm_exception_table
*et
,
543 struct kmem_cache
*mem
)
545 struct list_head
*slot
;
546 struct dm_exception
*ex
, *next
;
549 size
= et
->hash_mask
+ 1;
550 for (i
= 0; i
< size
; i
++) {
551 slot
= et
->table
+ i
;
553 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
554 kmem_cache_free(mem
, ex
);
560 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
562 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
565 static void dm_remove_exception(struct dm_exception
*e
)
567 list_del(&e
->hash_list
);
571 * Return the exception data for a sector, or NULL if not
574 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
577 struct list_head
*slot
;
578 struct dm_exception
*e
;
580 slot
= &et
->table
[exception_hash(et
, chunk
)];
581 list_for_each_entry (e
, slot
, hash_list
)
582 if (chunk
>= e
->old_chunk
&&
583 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
589 static struct dm_exception
*alloc_completed_exception(void)
591 struct dm_exception
*e
;
593 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
595 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
600 static void free_completed_exception(struct dm_exception
*e
)
602 kmem_cache_free(exception_cache
, e
);
605 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
607 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
610 atomic_inc(&s
->pending_exceptions_count
);
616 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
618 struct dm_snapshot
*s
= pe
->snap
;
620 mempool_free(pe
, s
->pending_pool
);
621 smp_mb__before_atomic_dec();
622 atomic_dec(&s
->pending_exceptions_count
);
625 static void dm_insert_exception(struct dm_exception_table
*eh
,
626 struct dm_exception
*new_e
)
629 struct dm_exception
*e
= NULL
;
631 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
633 /* Add immediately if this table doesn't support consecutive chunks */
637 /* List is ordered by old_chunk */
638 list_for_each_entry_reverse(e
, l
, hash_list
) {
639 /* Insert after an existing chunk? */
640 if (new_e
->old_chunk
== (e
->old_chunk
+
641 dm_consecutive_chunk_count(e
) + 1) &&
642 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
643 dm_consecutive_chunk_count(e
) + 1)) {
644 dm_consecutive_chunk_count_inc(e
);
645 free_completed_exception(new_e
);
649 /* Insert before an existing chunk? */
650 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
651 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
652 dm_consecutive_chunk_count_inc(e
);
655 free_completed_exception(new_e
);
659 if (new_e
->old_chunk
> e
->old_chunk
)
664 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
668 * Callback used by the exception stores to load exceptions when
671 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
673 struct dm_snapshot
*s
= context
;
674 struct dm_exception
*e
;
676 e
= alloc_completed_exception();
682 /* Consecutive_count is implicitly initialised to zero */
685 dm_insert_exception(&s
->complete
, e
);
691 * Return a minimum chunk size of all snapshots that have the specified origin.
692 * Return zero if the origin has no snapshots.
694 static uint32_t __minimum_chunk_size(struct origin
*o
)
696 struct dm_snapshot
*snap
;
697 unsigned chunk_size
= 0;
700 list_for_each_entry(snap
, &o
->snapshots
, list
)
701 chunk_size
= min_not_zero(chunk_size
,
702 snap
->store
->chunk_size
);
704 return (uint32_t) chunk_size
;
710 static int calc_max_buckets(void)
712 /* use a fixed size of 2MB */
713 unsigned long mem
= 2 * 1024 * 1024;
714 mem
/= sizeof(struct list_head
);
720 * Allocate room for a suitable hash table.
722 static int init_hash_tables(struct dm_snapshot
*s
)
724 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
727 * Calculate based on the size of the original volume or
730 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
731 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
732 max_buckets
= calc_max_buckets();
734 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->store
->chunk_shift
;
735 hash_size
= min(hash_size
, max_buckets
);
739 hash_size
= rounddown_pow_of_two(hash_size
);
740 if (dm_exception_table_init(&s
->complete
, hash_size
,
741 DM_CHUNK_CONSECUTIVE_BITS
))
745 * Allocate hash table for in-flight exceptions
746 * Make this smaller than the real hash table
752 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
753 dm_exception_table_exit(&s
->complete
, exception_cache
);
760 static void merge_shutdown(struct dm_snapshot
*s
)
762 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
763 smp_mb__after_clear_bit();
764 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
767 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
769 s
->first_merging_chunk
= 0;
770 s
->num_merging_chunks
= 0;
772 return bio_list_get(&s
->bios_queued_during_merge
);
776 * Remove one chunk from the index of completed exceptions.
778 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
781 struct dm_exception
*e
;
783 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
785 DMERR("Corruption detected: exception for block %llu is "
786 "on disk but not in memory",
787 (unsigned long long)old_chunk
);
792 * If this is the only chunk using this exception, remove exception.
794 if (!dm_consecutive_chunk_count(e
)) {
795 dm_remove_exception(e
);
796 free_completed_exception(e
);
801 * The chunk may be either at the beginning or the end of a
802 * group of consecutive chunks - never in the middle. We are
803 * removing chunks in the opposite order to that in which they
804 * were added, so this should always be true.
805 * Decrement the consecutive chunk counter and adjust the
806 * starting point if necessary.
808 if (old_chunk
== e
->old_chunk
) {
811 } else if (old_chunk
!= e
->old_chunk
+
812 dm_consecutive_chunk_count(e
)) {
813 DMERR("Attempt to merge block %llu from the "
814 "middle of a chunk range [%llu - %llu]",
815 (unsigned long long)old_chunk
,
816 (unsigned long long)e
->old_chunk
,
818 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
822 dm_consecutive_chunk_count_dec(e
);
827 static void flush_bios(struct bio
*bio
);
829 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
831 struct bio
*b
= NULL
;
833 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
835 down_write(&s
->lock
);
838 * Process chunks (and associated exceptions) in reverse order
839 * so that dm_consecutive_chunk_count_dec() accounting works.
842 r
= __remove_single_exception_chunk(s
, old_chunk
);
845 } while (old_chunk
-- > s
->first_merging_chunk
);
847 b
= __release_queued_bios_after_merge(s
);
857 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
858 sector_t sector
, unsigned chunk_size
);
860 static void merge_callback(int read_err
, unsigned long write_err
,
863 static uint64_t read_pending_exceptions_done_count(void)
865 uint64_t pending_exceptions_done
;
867 spin_lock(&_pending_exceptions_done_spinlock
);
868 pending_exceptions_done
= _pending_exceptions_done_count
;
869 spin_unlock(&_pending_exceptions_done_spinlock
);
871 return pending_exceptions_done
;
874 static void increment_pending_exceptions_done_count(void)
876 spin_lock(&_pending_exceptions_done_spinlock
);
877 _pending_exceptions_done_count
++;
878 spin_unlock(&_pending_exceptions_done_spinlock
);
880 wake_up_all(&_pending_exceptions_done
);
883 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
885 int i
, linear_chunks
;
886 chunk_t old_chunk
, new_chunk
;
887 struct dm_io_region src
, dest
;
889 uint64_t previous_count
;
891 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
892 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
896 * valid flag never changes during merge, so no lock required.
899 DMERR("Snapshot is invalid: can't merge");
903 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
905 if (linear_chunks
<= 0) {
906 if (linear_chunks
< 0) {
907 DMERR("Read error in exception store: "
908 "shutting down merge");
909 down_write(&s
->lock
);
916 /* Adjust old_chunk and new_chunk to reflect start of linear region */
917 old_chunk
= old_chunk
+ 1 - linear_chunks
;
918 new_chunk
= new_chunk
+ 1 - linear_chunks
;
921 * Use one (potentially large) I/O to copy all 'linear_chunks'
922 * from the exception store to the origin
924 io_size
= linear_chunks
* s
->store
->chunk_size
;
926 dest
.bdev
= s
->origin
->bdev
;
927 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
928 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
930 src
.bdev
= s
->cow
->bdev
;
931 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
932 src
.count
= dest
.count
;
935 * Reallocate any exceptions needed in other snapshots then
936 * wait for the pending exceptions to complete.
937 * Each time any pending exception (globally on the system)
938 * completes we are woken and repeat the process to find out
939 * if we can proceed. While this may not seem a particularly
940 * efficient algorithm, it is not expected to have any
941 * significant impact on performance.
943 previous_count
= read_pending_exceptions_done_count();
944 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
945 wait_event(_pending_exceptions_done
,
946 (read_pending_exceptions_done_count() !=
948 /* Retry after the wait, until all exceptions are done. */
949 previous_count
= read_pending_exceptions_done_count();
952 down_write(&s
->lock
);
953 s
->first_merging_chunk
= old_chunk
;
954 s
->num_merging_chunks
= linear_chunks
;
957 /* Wait until writes to all 'linear_chunks' drain */
958 for (i
= 0; i
< linear_chunks
; i
++)
959 __check_for_conflicting_io(s
, old_chunk
+ i
);
961 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
968 static void error_bios(struct bio
*bio
);
970 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
972 struct dm_snapshot
*s
= context
;
973 struct bio
*b
= NULL
;
975 if (read_err
|| write_err
) {
977 DMERR("Read error: shutting down merge.");
979 DMERR("Write error: shutting down merge.");
983 if (s
->store
->type
->commit_merge(s
->store
,
984 s
->num_merging_chunks
) < 0) {
985 DMERR("Write error in exception store: shutting down merge");
989 if (remove_single_exception_chunk(s
) < 0)
992 snapshot_merge_next_chunks(s
);
997 down_write(&s
->lock
);
999 b
= __release_queued_bios_after_merge(s
);
1006 static void start_merge(struct dm_snapshot
*s
)
1008 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1009 snapshot_merge_next_chunks(s
);
1012 static int wait_schedule(void *ptr
)
1020 * Stop the merging process and wait until it finishes.
1022 static void stop_merge(struct dm_snapshot
*s
)
1024 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1025 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, wait_schedule
,
1026 TASK_UNINTERRUPTIBLE
);
1027 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1031 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1033 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1035 struct dm_snapshot
*s
;
1038 char *origin_path
, *cow_path
;
1039 unsigned args_used
, num_flush_requests
= 1;
1040 fmode_t origin_mode
= FMODE_READ
;
1043 ti
->error
= "requires exactly 4 arguments";
1048 if (dm_target_is_snapshot_merge(ti
)) {
1049 num_flush_requests
= 2;
1050 origin_mode
= FMODE_WRITE
;
1053 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1055 ti
->error
= "Cannot allocate private snapshot structure";
1060 origin_path
= argv
[0];
1064 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1066 ti
->error
= "Cannot get origin device";
1074 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1076 ti
->error
= "Cannot get COW device";
1080 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1082 ti
->error
= "Couldn't create exception store";
1093 atomic_set(&s
->pending_exceptions_count
, 0);
1094 init_rwsem(&s
->lock
);
1095 INIT_LIST_HEAD(&s
->list
);
1096 spin_lock_init(&s
->pe_lock
);
1098 s
->merge_failed
= 0;
1099 s
->first_merging_chunk
= 0;
1100 s
->num_merging_chunks
= 0;
1101 bio_list_init(&s
->bios_queued_during_merge
);
1103 /* Allocate hash table for COW data */
1104 if (init_hash_tables(s
)) {
1105 ti
->error
= "Unable to allocate hash table space";
1107 goto bad_hash_tables
;
1110 s
->kcopyd_client
= dm_kcopyd_client_create();
1111 if (IS_ERR(s
->kcopyd_client
)) {
1112 r
= PTR_ERR(s
->kcopyd_client
);
1113 ti
->error
= "Could not create kcopyd client";
1117 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1118 if (!s
->pending_pool
) {
1119 ti
->error
= "Could not allocate mempool for pending exceptions";
1120 goto bad_pending_pool
;
1123 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
1124 tracked_chunk_cache
);
1125 if (!s
->tracked_chunk_pool
) {
1126 ti
->error
= "Could not allocate tracked_chunk mempool for "
1128 goto bad_tracked_chunk_pool
;
1131 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1132 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1134 spin_lock_init(&s
->tracked_chunk_lock
);
1137 ti
->num_flush_requests
= num_flush_requests
;
1139 /* Add snapshot to the list of snapshots for this origin */
1140 /* Exceptions aren't triggered till snapshot_resume() is called */
1141 r
= register_snapshot(s
);
1143 ti
->error
= "Snapshot origin struct allocation failed";
1144 goto bad_load_and_register
;
1146 /* invalid handover, register_snapshot has set ti->error */
1147 goto bad_load_and_register
;
1151 * Metadata must only be loaded into one table at once, so skip this
1152 * if metadata will be handed over during resume.
1153 * Chunk size will be set during the handover - set it to zero to
1154 * ensure it's ignored.
1157 s
->store
->chunk_size
= 0;
1161 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1164 ti
->error
= "Failed to read snapshot metadata";
1165 goto bad_read_metadata
;
1168 DMWARN("Snapshot is marked invalid.");
1171 if (!s
->store
->chunk_size
) {
1172 ti
->error
= "Chunk size not set";
1173 goto bad_read_metadata
;
1176 r
= dm_set_target_max_io_len(ti
, s
->store
->chunk_size
);
1178 goto bad_read_metadata
;
1183 unregister_snapshot(s
);
1185 bad_load_and_register
:
1186 mempool_destroy(s
->tracked_chunk_pool
);
1188 bad_tracked_chunk_pool
:
1189 mempool_destroy(s
->pending_pool
);
1192 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1195 dm_exception_table_exit(&s
->pending
, pending_cache
);
1196 dm_exception_table_exit(&s
->complete
, exception_cache
);
1199 dm_exception_store_destroy(s
->store
);
1202 dm_put_device(ti
, s
->cow
);
1205 dm_put_device(ti
, s
->origin
);
1214 static void __free_exceptions(struct dm_snapshot
*s
)
1216 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1217 s
->kcopyd_client
= NULL
;
1219 dm_exception_table_exit(&s
->pending
, pending_cache
);
1220 dm_exception_table_exit(&s
->complete
, exception_cache
);
1223 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1224 struct dm_snapshot
*snap_dest
)
1227 struct dm_exception_table table_swap
;
1228 struct dm_exception_store
*store_swap
;
1232 * Swap all snapshot context information between the two instances.
1234 u
.table_swap
= snap_dest
->complete
;
1235 snap_dest
->complete
= snap_src
->complete
;
1236 snap_src
->complete
= u
.table_swap
;
1238 u
.store_swap
= snap_dest
->store
;
1239 snap_dest
->store
= snap_src
->store
;
1240 snap_src
->store
= u
.store_swap
;
1242 snap_dest
->store
->snap
= snap_dest
;
1243 snap_src
->store
->snap
= snap_src
;
1245 snap_dest
->ti
->max_io_len
= snap_dest
->store
->chunk_size
;
1246 snap_dest
->valid
= snap_src
->valid
;
1249 * Set source invalid to ensure it receives no further I/O.
1251 snap_src
->valid
= 0;
1254 static void snapshot_dtr(struct dm_target
*ti
)
1256 #ifdef CONFIG_DM_DEBUG
1259 struct dm_snapshot
*s
= ti
->private;
1260 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1262 down_read(&_origins_lock
);
1263 /* Check whether exception handover must be cancelled */
1264 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1265 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1266 down_write(&snap_dest
->lock
);
1267 snap_dest
->valid
= 0;
1268 up_write(&snap_dest
->lock
);
1269 DMERR("Cancelling snapshot handover.");
1271 up_read(&_origins_lock
);
1273 if (dm_target_is_snapshot_merge(ti
))
1276 /* Prevent further origin writes from using this snapshot. */
1277 /* After this returns there can be no new kcopyd jobs. */
1278 unregister_snapshot(s
);
1280 while (atomic_read(&s
->pending_exceptions_count
))
1283 * Ensure instructions in mempool_destroy aren't reordered
1284 * before atomic_read.
1288 #ifdef CONFIG_DM_DEBUG
1289 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1290 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1293 mempool_destroy(s
->tracked_chunk_pool
);
1295 __free_exceptions(s
);
1297 mempool_destroy(s
->pending_pool
);
1299 dm_exception_store_destroy(s
->store
);
1301 dm_put_device(ti
, s
->cow
);
1303 dm_put_device(ti
, s
->origin
);
1309 * Flush a list of buffers.
1311 static void flush_bios(struct bio
*bio
)
1317 bio
->bi_next
= NULL
;
1318 generic_make_request(bio
);
1323 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1326 * Flush a list of buffers.
1328 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1335 bio
->bi_next
= NULL
;
1336 r
= do_origin(s
->origin
, bio
);
1337 if (r
== DM_MAPIO_REMAPPED
)
1338 generic_make_request(bio
);
1344 * Error a list of buffers.
1346 static void error_bios(struct bio
*bio
)
1352 bio
->bi_next
= NULL
;
1358 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1364 DMERR("Invalidating snapshot: Error reading/writing.");
1365 else if (err
== -ENOMEM
)
1366 DMERR("Invalidating snapshot: Unable to allocate exception.");
1368 if (s
->store
->type
->drop_snapshot
)
1369 s
->store
->type
->drop_snapshot(s
->store
);
1373 dm_table_event(s
->ti
->table
);
1376 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1378 struct dm_exception
*e
;
1379 struct dm_snapshot
*s
= pe
->snap
;
1380 struct bio
*origin_bios
= NULL
;
1381 struct bio
*snapshot_bios
= NULL
;
1382 struct bio
*full_bio
= NULL
;
1386 /* Read/write error - snapshot is unusable */
1387 down_write(&s
->lock
);
1388 __invalidate_snapshot(s
, -EIO
);
1393 e
= alloc_completed_exception();
1395 down_write(&s
->lock
);
1396 __invalidate_snapshot(s
, -ENOMEM
);
1402 down_write(&s
->lock
);
1404 free_completed_exception(e
);
1409 /* Check for conflicting reads */
1410 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1413 * Add a proper exception, and remove the
1414 * in-flight exception from the list.
1416 dm_insert_exception(&s
->complete
, e
);
1419 dm_remove_exception(&pe
->e
);
1420 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1421 origin_bios
= bio_list_get(&pe
->origin_bios
);
1422 full_bio
= pe
->full_bio
;
1424 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1425 full_bio
->bi_private
= pe
->full_bio_private
;
1427 free_pending_exception(pe
);
1429 increment_pending_exceptions_done_count();
1433 /* Submit any pending write bios */
1436 bio_io_error(full_bio
);
1437 error_bios(snapshot_bios
);
1440 bio_endio(full_bio
, 0);
1441 flush_bios(snapshot_bios
);
1444 retry_origin_bios(s
, origin_bios
);
1447 static void commit_callback(void *context
, int success
)
1449 struct dm_snap_pending_exception
*pe
= context
;
1451 pending_complete(pe
, success
);
1455 * Called when the copy I/O has finished. kcopyd actually runs
1456 * this code so don't block.
1458 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1460 struct dm_snap_pending_exception
*pe
= context
;
1461 struct dm_snapshot
*s
= pe
->snap
;
1463 if (read_err
|| write_err
)
1464 pending_complete(pe
, 0);
1467 /* Update the metadata if we are persistent */
1468 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1469 commit_callback
, pe
);
1473 * Dispatches the copy operation to kcopyd.
1475 static void start_copy(struct dm_snap_pending_exception
*pe
)
1477 struct dm_snapshot
*s
= pe
->snap
;
1478 struct dm_io_region src
, dest
;
1479 struct block_device
*bdev
= s
->origin
->bdev
;
1482 dev_size
= get_dev_size(bdev
);
1485 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1486 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1488 dest
.bdev
= s
->cow
->bdev
;
1489 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1490 dest
.count
= src
.count
;
1492 /* Hand over to kcopyd */
1493 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1496 static void full_bio_end_io(struct bio
*bio
, int error
)
1498 void *callback_data
= bio
->bi_private
;
1500 dm_kcopyd_do_callback(callback_data
, 0, error
? 1 : 0);
1503 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1506 struct dm_snapshot
*s
= pe
->snap
;
1507 void *callback_data
;
1510 pe
->full_bio_end_io
= bio
->bi_end_io
;
1511 pe
->full_bio_private
= bio
->bi_private
;
1513 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1516 bio
->bi_end_io
= full_bio_end_io
;
1517 bio
->bi_private
= callback_data
;
1519 generic_make_request(bio
);
1522 static struct dm_snap_pending_exception
*
1523 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1525 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1530 return container_of(e
, struct dm_snap_pending_exception
, e
);
1534 * Looks to see if this snapshot already has a pending exception
1535 * for this chunk, otherwise it allocates a new one and inserts
1536 * it into the pending table.
1538 * NOTE: a write lock must be held on snap->lock before calling
1541 static struct dm_snap_pending_exception
*
1542 __find_pending_exception(struct dm_snapshot
*s
,
1543 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1545 struct dm_snap_pending_exception
*pe2
;
1547 pe2
= __lookup_pending_exception(s
, chunk
);
1549 free_pending_exception(pe
);
1553 pe
->e
.old_chunk
= chunk
;
1554 bio_list_init(&pe
->origin_bios
);
1555 bio_list_init(&pe
->snapshot_bios
);
1557 pe
->full_bio
= NULL
;
1559 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1560 free_pending_exception(pe
);
1564 dm_insert_exception(&s
->pending
, &pe
->e
);
1569 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1570 struct bio
*bio
, chunk_t chunk
)
1572 bio
->bi_bdev
= s
->cow
->bdev
;
1573 bio
->bi_sector
= chunk_to_sector(s
->store
,
1574 dm_chunk_number(e
->new_chunk
) +
1575 (chunk
- e
->old_chunk
)) +
1577 s
->store
->chunk_mask
);
1580 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1581 union map_info
*map_context
)
1583 struct dm_exception
*e
;
1584 struct dm_snapshot
*s
= ti
->private;
1585 int r
= DM_MAPIO_REMAPPED
;
1587 struct dm_snap_pending_exception
*pe
= NULL
;
1589 if (bio
->bi_rw
& REQ_FLUSH
) {
1590 bio
->bi_bdev
= s
->cow
->bdev
;
1591 return DM_MAPIO_REMAPPED
;
1594 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1596 /* Full snapshots are not usable */
1597 /* To get here the table must be live so s->active is always set. */
1601 /* FIXME: should only take write lock if we need
1602 * to copy an exception */
1603 down_write(&s
->lock
);
1610 /* If the block is already remapped - use that, else remap it */
1611 e
= dm_lookup_exception(&s
->complete
, chunk
);
1613 remap_exception(s
, e
, bio
, chunk
);
1618 * Write to snapshot - higher level takes care of RW/RO
1619 * flags so we should only get this if we are
1622 if (bio_rw(bio
) == WRITE
) {
1623 pe
= __lookup_pending_exception(s
, chunk
);
1626 pe
= alloc_pending_exception(s
);
1627 down_write(&s
->lock
);
1630 free_pending_exception(pe
);
1635 e
= dm_lookup_exception(&s
->complete
, chunk
);
1637 free_pending_exception(pe
);
1638 remap_exception(s
, e
, bio
, chunk
);
1642 pe
= __find_pending_exception(s
, pe
, chunk
);
1644 __invalidate_snapshot(s
, -ENOMEM
);
1650 remap_exception(s
, &pe
->e
, bio
, chunk
);
1652 r
= DM_MAPIO_SUBMITTED
;
1655 bio
->bi_size
== (s
->store
->chunk_size
<< SECTOR_SHIFT
)) {
1658 start_full_bio(pe
, bio
);
1662 bio_list_add(&pe
->snapshot_bios
, bio
);
1665 /* this is protected by snap->lock */
1672 bio
->bi_bdev
= s
->origin
->bdev
;
1673 map_context
->ptr
= track_chunk(s
, chunk
);
1683 * A snapshot-merge target behaves like a combination of a snapshot
1684 * target and a snapshot-origin target. It only generates new
1685 * exceptions in other snapshots and not in the one that is being
1688 * For each chunk, if there is an existing exception, it is used to
1689 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1690 * which in turn might generate exceptions in other snapshots.
1691 * If merging is currently taking place on the chunk in question, the
1692 * I/O is deferred by adding it to s->bios_queued_during_merge.
1694 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
,
1695 union map_info
*map_context
)
1697 struct dm_exception
*e
;
1698 struct dm_snapshot
*s
= ti
->private;
1699 int r
= DM_MAPIO_REMAPPED
;
1702 if (bio
->bi_rw
& REQ_FLUSH
) {
1703 if (!map_context
->target_request_nr
)
1704 bio
->bi_bdev
= s
->origin
->bdev
;
1706 bio
->bi_bdev
= s
->cow
->bdev
;
1707 map_context
->ptr
= NULL
;
1708 return DM_MAPIO_REMAPPED
;
1711 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1713 down_write(&s
->lock
);
1715 /* Full merging snapshots are redirected to the origin */
1717 goto redirect_to_origin
;
1719 /* If the block is already remapped - use that */
1720 e
= dm_lookup_exception(&s
->complete
, chunk
);
1722 /* Queue writes overlapping with chunks being merged */
1723 if (bio_rw(bio
) == WRITE
&&
1724 chunk
>= s
->first_merging_chunk
&&
1725 chunk
< (s
->first_merging_chunk
+
1726 s
->num_merging_chunks
)) {
1727 bio
->bi_bdev
= s
->origin
->bdev
;
1728 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1729 r
= DM_MAPIO_SUBMITTED
;
1733 remap_exception(s
, e
, bio
, chunk
);
1735 if (bio_rw(bio
) == WRITE
)
1736 map_context
->ptr
= track_chunk(s
, chunk
);
1741 bio
->bi_bdev
= s
->origin
->bdev
;
1743 if (bio_rw(bio
) == WRITE
) {
1745 return do_origin(s
->origin
, bio
);
1754 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1755 int error
, union map_info
*map_context
)
1757 struct dm_snapshot
*s
= ti
->private;
1758 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1761 stop_tracking_chunk(s
, c
);
1766 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1768 struct dm_snapshot
*s
= ti
->private;
1773 static int snapshot_preresume(struct dm_target
*ti
)
1776 struct dm_snapshot
*s
= ti
->private;
1777 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1779 down_read(&_origins_lock
);
1780 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1781 if (snap_src
&& snap_dest
) {
1782 down_read(&snap_src
->lock
);
1783 if (s
== snap_src
) {
1784 DMERR("Unable to resume snapshot source until "
1785 "handover completes.");
1787 } else if (!dm_suspended(snap_src
->ti
)) {
1788 DMERR("Unable to perform snapshot handover until "
1789 "source is suspended.");
1792 up_read(&snap_src
->lock
);
1794 up_read(&_origins_lock
);
1799 static void snapshot_resume(struct dm_target
*ti
)
1801 struct dm_snapshot
*s
= ti
->private;
1802 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1804 down_read(&_origins_lock
);
1805 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1806 if (snap_src
&& snap_dest
) {
1807 down_write(&snap_src
->lock
);
1808 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1809 __handover_exceptions(snap_src
, snap_dest
);
1810 up_write(&snap_dest
->lock
);
1811 up_write(&snap_src
->lock
);
1813 up_read(&_origins_lock
);
1815 /* Now we have correct chunk size, reregister */
1816 reregister_snapshot(s
);
1818 down_write(&s
->lock
);
1823 static uint32_t get_origin_minimum_chunksize(struct block_device
*bdev
)
1825 uint32_t min_chunksize
;
1827 down_read(&_origins_lock
);
1828 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1829 up_read(&_origins_lock
);
1831 return min_chunksize
;
1834 static void snapshot_merge_resume(struct dm_target
*ti
)
1836 struct dm_snapshot
*s
= ti
->private;
1839 * Handover exceptions from existing snapshot.
1841 snapshot_resume(ti
);
1844 * snapshot-merge acts as an origin, so set ti->max_io_len
1846 ti
->max_io_len
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1851 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1852 unsigned status_flags
, char *result
, unsigned maxlen
)
1855 struct dm_snapshot
*snap
= ti
->private;
1858 case STATUSTYPE_INFO
:
1860 down_write(&snap
->lock
);
1864 else if (snap
->merge_failed
)
1865 DMEMIT("Merge failed");
1867 if (snap
->store
->type
->usage
) {
1868 sector_t total_sectors
, sectors_allocated
,
1870 snap
->store
->type
->usage(snap
->store
,
1874 DMEMIT("%llu/%llu %llu",
1875 (unsigned long long)sectors_allocated
,
1876 (unsigned long long)total_sectors
,
1877 (unsigned long long)metadata_sectors
);
1883 up_write(&snap
->lock
);
1887 case STATUSTYPE_TABLE
:
1889 * kdevname returns a static pointer so we need
1890 * to make private copies if the output is to
1893 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1894 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1902 static int snapshot_iterate_devices(struct dm_target
*ti
,
1903 iterate_devices_callout_fn fn
, void *data
)
1905 struct dm_snapshot
*snap
= ti
->private;
1908 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1911 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
1917 /*-----------------------------------------------------------------
1919 *---------------------------------------------------------------*/
1922 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1923 * supplied bio was ignored. The caller may submit it immediately.
1924 * (No remapping actually occurs as the origin is always a direct linear
1927 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1928 * and any supplied bio is added to a list to be submitted once all
1929 * the necessary exceptions exist.
1931 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1934 int r
= DM_MAPIO_REMAPPED
;
1935 struct dm_snapshot
*snap
;
1936 struct dm_exception
*e
;
1937 struct dm_snap_pending_exception
*pe
;
1938 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
1939 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
1942 /* Do all the snapshots on this origin */
1943 list_for_each_entry (snap
, snapshots
, list
) {
1945 * Don't make new exceptions in a merging snapshot
1946 * because it has effectively been deleted
1948 if (dm_target_is_snapshot_merge(snap
->ti
))
1951 down_write(&snap
->lock
);
1953 /* Only deal with valid and active snapshots */
1954 if (!snap
->valid
|| !snap
->active
)
1957 /* Nothing to do if writing beyond end of snapshot */
1958 if (sector
>= dm_table_get_size(snap
->ti
->table
))
1962 * Remember, different snapshots can have
1963 * different chunk sizes.
1965 chunk
= sector_to_chunk(snap
->store
, sector
);
1968 * Check exception table to see if block
1969 * is already remapped in this snapshot
1970 * and trigger an exception if not.
1972 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1976 pe
= __lookup_pending_exception(snap
, chunk
);
1978 up_write(&snap
->lock
);
1979 pe
= alloc_pending_exception(snap
);
1980 down_write(&snap
->lock
);
1983 free_pending_exception(pe
);
1987 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1989 free_pending_exception(pe
);
1993 pe
= __find_pending_exception(snap
, pe
, chunk
);
1995 __invalidate_snapshot(snap
, -ENOMEM
);
2000 r
= DM_MAPIO_SUBMITTED
;
2003 * If an origin bio was supplied, queue it to wait for the
2004 * completion of this exception, and start this one last,
2005 * at the end of the function.
2008 bio_list_add(&pe
->origin_bios
, bio
);
2013 pe_to_start_last
= pe
;
2019 pe_to_start_now
= pe
;
2023 up_write(&snap
->lock
);
2025 if (pe_to_start_now
) {
2026 start_copy(pe_to_start_now
);
2027 pe_to_start_now
= NULL
;
2032 * Submit the exception against which the bio is queued last,
2033 * to give the other exceptions a head start.
2035 if (pe_to_start_last
)
2036 start_copy(pe_to_start_last
);
2042 * Called on a write from the origin driver.
2044 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2047 int r
= DM_MAPIO_REMAPPED
;
2049 down_read(&_origins_lock
);
2050 o
= __lookup_origin(origin
->bdev
);
2052 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
2053 up_read(&_origins_lock
);
2059 * Trigger exceptions in all non-merging snapshots.
2061 * The chunk size of the merging snapshot may be larger than the chunk
2062 * size of some other snapshot so we may need to reallocate multiple
2063 * chunks in other snapshots.
2065 * We scan all the overlapping exceptions in the other snapshots.
2066 * Returns 1 if anything was reallocated and must be waited for,
2067 * otherwise returns 0.
2069 * size must be a multiple of merging_snap's chunk_size.
2071 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2072 sector_t sector
, unsigned size
)
2079 * The origin's __minimum_chunk_size() got stored in max_io_len
2080 * by snapshot_merge_resume().
2082 down_read(&_origins_lock
);
2083 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2084 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->max_io_len
)
2085 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2088 up_read(&_origins_lock
);
2094 * Origin: maps a linear range of a device, with hooks for snapshotting.
2098 * Construct an origin mapping: <dev_path>
2099 * The context for an origin is merely a 'struct dm_dev *'
2100 * pointing to the real device.
2102 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2108 ti
->error
= "origin: incorrect number of arguments";
2112 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dev
);
2114 ti
->error
= "Cannot get target device";
2119 ti
->num_flush_requests
= 1;
2124 static void origin_dtr(struct dm_target
*ti
)
2126 struct dm_dev
*dev
= ti
->private;
2127 dm_put_device(ti
, dev
);
2130 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
2131 union map_info
*map_context
)
2133 struct dm_dev
*dev
= ti
->private;
2134 bio
->bi_bdev
= dev
->bdev
;
2136 if (bio
->bi_rw
& REQ_FLUSH
)
2137 return DM_MAPIO_REMAPPED
;
2139 /* Only tell snapshots if this is a write */
2140 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
2144 * Set the target "max_io_len" field to the minimum of all the snapshots'
2147 static void origin_resume(struct dm_target
*ti
)
2149 struct dm_dev
*dev
= ti
->private;
2151 ti
->max_io_len
= get_origin_minimum_chunksize(dev
->bdev
);
2154 static int origin_status(struct dm_target
*ti
, status_type_t type
,
2155 unsigned status_flags
, char *result
, unsigned maxlen
)
2157 struct dm_dev
*dev
= ti
->private;
2160 case STATUSTYPE_INFO
:
2164 case STATUSTYPE_TABLE
:
2165 snprintf(result
, maxlen
, "%s", dev
->name
);
2172 static int origin_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
2173 struct bio_vec
*biovec
, int max_size
)
2175 struct dm_dev
*dev
= ti
->private;
2176 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
2178 if (!q
->merge_bvec_fn
)
2181 bvm
->bi_bdev
= dev
->bdev
;
2183 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
2186 static int origin_iterate_devices(struct dm_target
*ti
,
2187 iterate_devices_callout_fn fn
, void *data
)
2189 struct dm_dev
*dev
= ti
->private;
2191 return fn(ti
, dev
, 0, ti
->len
, data
);
2194 static struct target_type origin_target
= {
2195 .name
= "snapshot-origin",
2196 .version
= {1, 7, 1},
2197 .module
= THIS_MODULE
,
2201 .resume
= origin_resume
,
2202 .status
= origin_status
,
2203 .merge
= origin_merge
,
2204 .iterate_devices
= origin_iterate_devices
,
2207 static struct target_type snapshot_target
= {
2209 .version
= {1, 10, 0},
2210 .module
= THIS_MODULE
,
2211 .ctr
= snapshot_ctr
,
2212 .dtr
= snapshot_dtr
,
2213 .map
= snapshot_map
,
2214 .end_io
= snapshot_end_io
,
2215 .preresume
= snapshot_preresume
,
2216 .resume
= snapshot_resume
,
2217 .status
= snapshot_status
,
2218 .iterate_devices
= snapshot_iterate_devices
,
2221 static struct target_type merge_target
= {
2222 .name
= dm_snapshot_merge_target_name
,
2223 .version
= {1, 1, 0},
2224 .module
= THIS_MODULE
,
2225 .ctr
= snapshot_ctr
,
2226 .dtr
= snapshot_dtr
,
2227 .map
= snapshot_merge_map
,
2228 .end_io
= snapshot_end_io
,
2229 .presuspend
= snapshot_merge_presuspend
,
2230 .preresume
= snapshot_preresume
,
2231 .resume
= snapshot_merge_resume
,
2232 .status
= snapshot_status
,
2233 .iterate_devices
= snapshot_iterate_devices
,
2236 static int __init
dm_snapshot_init(void)
2240 r
= dm_exception_store_init();
2242 DMERR("Failed to initialize exception stores");
2246 r
= dm_register_target(&snapshot_target
);
2248 DMERR("snapshot target register failed %d", r
);
2249 goto bad_register_snapshot_target
;
2252 r
= dm_register_target(&origin_target
);
2254 DMERR("Origin target register failed %d", r
);
2255 goto bad_register_origin_target
;
2258 r
= dm_register_target(&merge_target
);
2260 DMERR("Merge target register failed %d", r
);
2261 goto bad_register_merge_target
;
2264 r
= init_origin_hash();
2266 DMERR("init_origin_hash failed.");
2267 goto bad_origin_hash
;
2270 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2271 if (!exception_cache
) {
2272 DMERR("Couldn't create exception cache.");
2274 goto bad_exception_cache
;
2277 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2278 if (!pending_cache
) {
2279 DMERR("Couldn't create pending cache.");
2281 goto bad_pending_cache
;
2284 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
2285 if (!tracked_chunk_cache
) {
2286 DMERR("Couldn't create cache to track chunks in use.");
2288 goto bad_tracked_chunk_cache
;
2293 bad_tracked_chunk_cache
:
2294 kmem_cache_destroy(pending_cache
);
2296 kmem_cache_destroy(exception_cache
);
2297 bad_exception_cache
:
2300 dm_unregister_target(&merge_target
);
2301 bad_register_merge_target
:
2302 dm_unregister_target(&origin_target
);
2303 bad_register_origin_target
:
2304 dm_unregister_target(&snapshot_target
);
2305 bad_register_snapshot_target
:
2306 dm_exception_store_exit();
2311 static void __exit
dm_snapshot_exit(void)
2313 dm_unregister_target(&snapshot_target
);
2314 dm_unregister_target(&origin_target
);
2315 dm_unregister_target(&merge_target
);
2318 kmem_cache_destroy(pending_cache
);
2319 kmem_cache_destroy(exception_cache
);
2320 kmem_cache_destroy(tracked_chunk_cache
);
2322 dm_exception_store_exit();
2326 module_init(dm_snapshot_init
);
2327 module_exit(dm_snapshot_exit
);
2329 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2330 MODULE_AUTHOR("Joe Thornber");
2331 MODULE_LICENSE("GPL");