ath9k: make ath_frame_info fit into reduced-size rate_driver_data
[linux/fpc-iii.git] / drivers / md / dm-snap.c
blob8b204ae216ab62d354c814277dc413c34f0bf9a4
1 /*
2 * dm-snapshot.c
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
7 */
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-exception-store.h"
25 #define DM_MSG_PREFIX "snapshots"
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29 #define dm_target_is_snapshot_merge(ti) \
30 ((ti)->type->name == dm_snapshot_merge_target_name)
33 * The size of the mempool used to track chunks in use.
35 #define MIN_IOS 256
37 #define DM_TRACKED_CHUNK_HASH_SIZE 16
38 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
39 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
41 struct dm_exception_table {
42 uint32_t hash_mask;
43 unsigned hash_shift;
44 struct list_head *table;
47 struct dm_snapshot {
48 struct rw_semaphore lock;
50 struct dm_dev *origin;
51 struct dm_dev *cow;
53 struct dm_target *ti;
55 /* List of snapshots per Origin */
56 struct list_head list;
59 * You can't use a snapshot if this is 0 (e.g. if full).
60 * A snapshot-merge target never clears this.
62 int valid;
64 /* Origin writes don't trigger exceptions until this is set */
65 int active;
67 atomic_t pending_exceptions_count;
69 /* Protected by "lock" */
70 sector_t exception_start_sequence;
72 /* Protected by kcopyd single-threaded callback */
73 sector_t exception_complete_sequence;
76 * A list of pending exceptions that completed out of order.
77 * Protected by kcopyd single-threaded callback.
79 struct list_head out_of_order_list;
81 mempool_t *pending_pool;
83 struct dm_exception_table pending;
84 struct dm_exception_table complete;
87 * pe_lock protects all pending_exception operations and access
88 * as well as the snapshot_bios list.
90 spinlock_t pe_lock;
92 /* Chunks with outstanding reads */
93 spinlock_t tracked_chunk_lock;
94 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
96 /* The on disk metadata handler */
97 struct dm_exception_store *store;
99 struct dm_kcopyd_client *kcopyd_client;
101 /* Wait for events based on state_bits */
102 unsigned long state_bits;
104 /* Range of chunks currently being merged. */
105 chunk_t first_merging_chunk;
106 int num_merging_chunks;
109 * The merge operation failed if this flag is set.
110 * Failure modes are handled as follows:
111 * - I/O error reading the header
112 * => don't load the target; abort.
113 * - Header does not have "valid" flag set
114 * => use the origin; forget about the snapshot.
115 * - I/O error when reading exceptions
116 * => don't load the target; abort.
117 * (We can't use the intermediate origin state.)
118 * - I/O error while merging
119 * => stop merging; set merge_failed; process I/O normally.
121 int merge_failed;
124 * Incoming bios that overlap with chunks being merged must wait
125 * for them to be committed.
127 struct bio_list bios_queued_during_merge;
131 * state_bits:
132 * RUNNING_MERGE - Merge operation is in progress.
133 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
134 * cleared afterwards.
136 #define RUNNING_MERGE 0
137 #define SHUTDOWN_MERGE 1
139 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
140 "A percentage of time allocated for copy on write");
142 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
144 return s->origin;
146 EXPORT_SYMBOL(dm_snap_origin);
148 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
150 return s->cow;
152 EXPORT_SYMBOL(dm_snap_cow);
154 static sector_t chunk_to_sector(struct dm_exception_store *store,
155 chunk_t chunk)
157 return chunk << store->chunk_shift;
160 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
163 * There is only ever one instance of a particular block
164 * device so we can compare pointers safely.
166 return lhs == rhs;
169 struct dm_snap_pending_exception {
170 struct dm_exception e;
173 * Origin buffers waiting for this to complete are held
174 * in a bio list
176 struct bio_list origin_bios;
177 struct bio_list snapshot_bios;
179 /* Pointer back to snapshot context */
180 struct dm_snapshot *snap;
183 * 1 indicates the exception has already been sent to
184 * kcopyd.
186 int started;
188 /* There was copying error. */
189 int copy_error;
191 /* A sequence number, it is used for in-order completion. */
192 sector_t exception_sequence;
194 struct list_head out_of_order_entry;
197 * For writing a complete chunk, bypassing the copy.
199 struct bio *full_bio;
200 bio_end_io_t *full_bio_end_io;
201 void *full_bio_private;
205 * Hash table mapping origin volumes to lists of snapshots and
206 * a lock to protect it
208 static struct kmem_cache *exception_cache;
209 static struct kmem_cache *pending_cache;
211 struct dm_snap_tracked_chunk {
212 struct hlist_node node;
213 chunk_t chunk;
216 static void init_tracked_chunk(struct bio *bio)
218 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
219 INIT_HLIST_NODE(&c->node);
222 static bool is_bio_tracked(struct bio *bio)
224 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
225 return !hlist_unhashed(&c->node);
228 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
230 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
232 c->chunk = chunk;
234 spin_lock_irq(&s->tracked_chunk_lock);
235 hlist_add_head(&c->node,
236 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
237 spin_unlock_irq(&s->tracked_chunk_lock);
240 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
242 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
243 unsigned long flags;
245 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
246 hlist_del(&c->node);
247 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
250 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
252 struct dm_snap_tracked_chunk *c;
253 int found = 0;
255 spin_lock_irq(&s->tracked_chunk_lock);
257 hlist_for_each_entry(c,
258 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
259 if (c->chunk == chunk) {
260 found = 1;
261 break;
265 spin_unlock_irq(&s->tracked_chunk_lock);
267 return found;
271 * This conflicting I/O is extremely improbable in the caller,
272 * so msleep(1) is sufficient and there is no need for a wait queue.
274 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
276 while (__chunk_is_tracked(s, chunk))
277 msleep(1);
281 * One of these per registered origin, held in the snapshot_origins hash
283 struct origin {
284 /* The origin device */
285 struct block_device *bdev;
287 struct list_head hash_list;
289 /* List of snapshots for this origin */
290 struct list_head snapshots;
294 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect
297 #define ORIGIN_HASH_SIZE 256
298 #define ORIGIN_MASK 0xFF
299 static struct list_head *_origins;
300 static struct rw_semaphore _origins_lock;
302 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
303 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
304 static uint64_t _pending_exceptions_done_count;
306 static int init_origin_hash(void)
308 int i;
310 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311 GFP_KERNEL);
312 if (!_origins) {
313 DMERR("unable to allocate memory");
314 return -ENOMEM;
317 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318 INIT_LIST_HEAD(_origins + i);
319 init_rwsem(&_origins_lock);
321 return 0;
324 static void exit_origin_hash(void)
326 kfree(_origins);
329 static unsigned origin_hash(struct block_device *bdev)
331 return bdev->bd_dev & ORIGIN_MASK;
334 static struct origin *__lookup_origin(struct block_device *origin)
336 struct list_head *ol;
337 struct origin *o;
339 ol = &_origins[origin_hash(origin)];
340 list_for_each_entry (o, ol, hash_list)
341 if (bdev_equal(o->bdev, origin))
342 return o;
344 return NULL;
347 static void __insert_origin(struct origin *o)
349 struct list_head *sl = &_origins[origin_hash(o->bdev)];
350 list_add_tail(&o->hash_list, sl);
354 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus:
356 * snap_src - a snapshot suitable for use as a source of exception handover
357 * snap_dest - a snapshot capable of receiving exception handover.
358 * snap_merge - an existing snapshot-merge target linked to the same origin.
359 * There can be at most one snapshot-merge target. The parameter is optional.
361 * Possible return values and states of snap_src and snap_dest.
362 * 0: NULL, NULL - first new snapshot
363 * 1: snap_src, NULL - normal snapshot
364 * 2: snap_src, snap_dest - waiting for handover
365 * 2: snap_src, NULL - handed over, waiting for old to be deleted
366 * 1: NULL, snap_dest - source got destroyed without handover
368 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
369 struct dm_snapshot **snap_src,
370 struct dm_snapshot **snap_dest,
371 struct dm_snapshot **snap_merge)
373 struct dm_snapshot *s;
374 struct origin *o;
375 int count = 0;
376 int active;
378 o = __lookup_origin(snap->origin->bdev);
379 if (!o)
380 goto out;
382 list_for_each_entry(s, &o->snapshots, list) {
383 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
384 *snap_merge = s;
385 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
386 continue;
388 down_read(&s->lock);
389 active = s->active;
390 up_read(&s->lock);
392 if (active) {
393 if (snap_src)
394 *snap_src = s;
395 } else if (snap_dest)
396 *snap_dest = s;
398 count++;
401 out:
402 return count;
406 * On success, returns 1 if this snapshot is a handover destination,
407 * otherwise returns 0.
409 static int __validate_exception_handover(struct dm_snapshot *snap)
411 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
412 struct dm_snapshot *snap_merge = NULL;
414 /* Does snapshot need exceptions handed over to it? */
415 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
416 &snap_merge) == 2) ||
417 snap_dest) {
418 snap->ti->error = "Snapshot cow pairing for exception "
419 "table handover failed";
420 return -EINVAL;
424 * If no snap_src was found, snap cannot become a handover
425 * destination.
427 if (!snap_src)
428 return 0;
431 * Non-snapshot-merge handover?
433 if (!dm_target_is_snapshot_merge(snap->ti))
434 return 1;
437 * Do not allow more than one merging snapshot.
439 if (snap_merge) {
440 snap->ti->error = "A snapshot is already merging.";
441 return -EINVAL;
444 if (!snap_src->store->type->prepare_merge ||
445 !snap_src->store->type->commit_merge) {
446 snap->ti->error = "Snapshot exception store does not "
447 "support snapshot-merge.";
448 return -EINVAL;
451 return 1;
454 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
456 struct dm_snapshot *l;
458 /* Sort the list according to chunk size, largest-first smallest-last */
459 list_for_each_entry(l, &o->snapshots, list)
460 if (l->store->chunk_size < s->store->chunk_size)
461 break;
462 list_add_tail(&s->list, &l->list);
466 * Make a note of the snapshot and its origin so we can look it
467 * up when the origin has a write on it.
469 * Also validate snapshot exception store handovers.
470 * On success, returns 1 if this registration is a handover destination,
471 * otherwise returns 0.
473 static int register_snapshot(struct dm_snapshot *snap)
475 struct origin *o, *new_o = NULL;
476 struct block_device *bdev = snap->origin->bdev;
477 int r = 0;
479 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
480 if (!new_o)
481 return -ENOMEM;
483 down_write(&_origins_lock);
485 r = __validate_exception_handover(snap);
486 if (r < 0) {
487 kfree(new_o);
488 goto out;
491 o = __lookup_origin(bdev);
492 if (o)
493 kfree(new_o);
494 else {
495 /* New origin */
496 o = new_o;
498 /* Initialise the struct */
499 INIT_LIST_HEAD(&o->snapshots);
500 o->bdev = bdev;
502 __insert_origin(o);
505 __insert_snapshot(o, snap);
507 out:
508 up_write(&_origins_lock);
510 return r;
514 * Move snapshot to correct place in list according to chunk size.
516 static void reregister_snapshot(struct dm_snapshot *s)
518 struct block_device *bdev = s->origin->bdev;
520 down_write(&_origins_lock);
522 list_del(&s->list);
523 __insert_snapshot(__lookup_origin(bdev), s);
525 up_write(&_origins_lock);
528 static void unregister_snapshot(struct dm_snapshot *s)
530 struct origin *o;
532 down_write(&_origins_lock);
533 o = __lookup_origin(s->origin->bdev);
535 list_del(&s->list);
536 if (o && list_empty(&o->snapshots)) {
537 list_del(&o->hash_list);
538 kfree(o);
541 up_write(&_origins_lock);
545 * Implementation of the exception hash tables.
546 * The lowest hash_shift bits of the chunk number are ignored, allowing
547 * some consecutive chunks to be grouped together.
549 static int dm_exception_table_init(struct dm_exception_table *et,
550 uint32_t size, unsigned hash_shift)
552 unsigned int i;
554 et->hash_shift = hash_shift;
555 et->hash_mask = size - 1;
556 et->table = dm_vcalloc(size, sizeof(struct list_head));
557 if (!et->table)
558 return -ENOMEM;
560 for (i = 0; i < size; i++)
561 INIT_LIST_HEAD(et->table + i);
563 return 0;
566 static void dm_exception_table_exit(struct dm_exception_table *et,
567 struct kmem_cache *mem)
569 struct list_head *slot;
570 struct dm_exception *ex, *next;
571 int i, size;
573 size = et->hash_mask + 1;
574 for (i = 0; i < size; i++) {
575 slot = et->table + i;
577 list_for_each_entry_safe (ex, next, slot, hash_list)
578 kmem_cache_free(mem, ex);
581 vfree(et->table);
584 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
586 return (chunk >> et->hash_shift) & et->hash_mask;
589 static void dm_remove_exception(struct dm_exception *e)
591 list_del(&e->hash_list);
595 * Return the exception data for a sector, or NULL if not
596 * remapped.
598 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
599 chunk_t chunk)
601 struct list_head *slot;
602 struct dm_exception *e;
604 slot = &et->table[exception_hash(et, chunk)];
605 list_for_each_entry (e, slot, hash_list)
606 if (chunk >= e->old_chunk &&
607 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
608 return e;
610 return NULL;
613 static struct dm_exception *alloc_completed_exception(gfp_t gfp)
615 struct dm_exception *e;
617 e = kmem_cache_alloc(exception_cache, gfp);
618 if (!e && gfp == GFP_NOIO)
619 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
621 return e;
624 static void free_completed_exception(struct dm_exception *e)
626 kmem_cache_free(exception_cache, e);
629 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
631 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
632 GFP_NOIO);
634 atomic_inc(&s->pending_exceptions_count);
635 pe->snap = s;
637 return pe;
640 static void free_pending_exception(struct dm_snap_pending_exception *pe)
642 struct dm_snapshot *s = pe->snap;
644 mempool_free(pe, s->pending_pool);
645 smp_mb__before_atomic();
646 atomic_dec(&s->pending_exceptions_count);
649 static void dm_insert_exception(struct dm_exception_table *eh,
650 struct dm_exception *new_e)
652 struct list_head *l;
653 struct dm_exception *e = NULL;
655 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
657 /* Add immediately if this table doesn't support consecutive chunks */
658 if (!eh->hash_shift)
659 goto out;
661 /* List is ordered by old_chunk */
662 list_for_each_entry_reverse(e, l, hash_list) {
663 /* Insert after an existing chunk? */
664 if (new_e->old_chunk == (e->old_chunk +
665 dm_consecutive_chunk_count(e) + 1) &&
666 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
667 dm_consecutive_chunk_count(e) + 1)) {
668 dm_consecutive_chunk_count_inc(e);
669 free_completed_exception(new_e);
670 return;
673 /* Insert before an existing chunk? */
674 if (new_e->old_chunk == (e->old_chunk - 1) &&
675 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
676 dm_consecutive_chunk_count_inc(e);
677 e->old_chunk--;
678 e->new_chunk--;
679 free_completed_exception(new_e);
680 return;
683 if (new_e->old_chunk > e->old_chunk)
684 break;
687 out:
688 list_add(&new_e->hash_list, e ? &e->hash_list : l);
692 * Callback used by the exception stores to load exceptions when
693 * initialising.
695 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
697 struct dm_snapshot *s = context;
698 struct dm_exception *e;
700 e = alloc_completed_exception(GFP_KERNEL);
701 if (!e)
702 return -ENOMEM;
704 e->old_chunk = old;
706 /* Consecutive_count is implicitly initialised to zero */
707 e->new_chunk = new;
709 dm_insert_exception(&s->complete, e);
711 return 0;
715 * Return a minimum chunk size of all snapshots that have the specified origin.
716 * Return zero if the origin has no snapshots.
718 static uint32_t __minimum_chunk_size(struct origin *o)
720 struct dm_snapshot *snap;
721 unsigned chunk_size = 0;
723 if (o)
724 list_for_each_entry(snap, &o->snapshots, list)
725 chunk_size = min_not_zero(chunk_size,
726 snap->store->chunk_size);
728 return (uint32_t) chunk_size;
732 * Hard coded magic.
734 static int calc_max_buckets(void)
736 /* use a fixed size of 2MB */
737 unsigned long mem = 2 * 1024 * 1024;
738 mem /= sizeof(struct list_head);
740 return mem;
744 * Allocate room for a suitable hash table.
746 static int init_hash_tables(struct dm_snapshot *s)
748 sector_t hash_size, cow_dev_size, max_buckets;
751 * Calculate based on the size of the original volume or
752 * the COW volume...
754 cow_dev_size = get_dev_size(s->cow->bdev);
755 max_buckets = calc_max_buckets();
757 hash_size = cow_dev_size >> s->store->chunk_shift;
758 hash_size = min(hash_size, max_buckets);
760 if (hash_size < 64)
761 hash_size = 64;
762 hash_size = rounddown_pow_of_two(hash_size);
763 if (dm_exception_table_init(&s->complete, hash_size,
764 DM_CHUNK_CONSECUTIVE_BITS))
765 return -ENOMEM;
768 * Allocate hash table for in-flight exceptions
769 * Make this smaller than the real hash table
771 hash_size >>= 3;
772 if (hash_size < 64)
773 hash_size = 64;
775 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
776 dm_exception_table_exit(&s->complete, exception_cache);
777 return -ENOMEM;
780 return 0;
783 static void merge_shutdown(struct dm_snapshot *s)
785 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
786 smp_mb__after_atomic();
787 wake_up_bit(&s->state_bits, RUNNING_MERGE);
790 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
792 s->first_merging_chunk = 0;
793 s->num_merging_chunks = 0;
795 return bio_list_get(&s->bios_queued_during_merge);
799 * Remove one chunk from the index of completed exceptions.
801 static int __remove_single_exception_chunk(struct dm_snapshot *s,
802 chunk_t old_chunk)
804 struct dm_exception *e;
806 e = dm_lookup_exception(&s->complete, old_chunk);
807 if (!e) {
808 DMERR("Corruption detected: exception for block %llu is "
809 "on disk but not in memory",
810 (unsigned long long)old_chunk);
811 return -EINVAL;
815 * If this is the only chunk using this exception, remove exception.
817 if (!dm_consecutive_chunk_count(e)) {
818 dm_remove_exception(e);
819 free_completed_exception(e);
820 return 0;
824 * The chunk may be either at the beginning or the end of a
825 * group of consecutive chunks - never in the middle. We are
826 * removing chunks in the opposite order to that in which they
827 * were added, so this should always be true.
828 * Decrement the consecutive chunk counter and adjust the
829 * starting point if necessary.
831 if (old_chunk == e->old_chunk) {
832 e->old_chunk++;
833 e->new_chunk++;
834 } else if (old_chunk != e->old_chunk +
835 dm_consecutive_chunk_count(e)) {
836 DMERR("Attempt to merge block %llu from the "
837 "middle of a chunk range [%llu - %llu]",
838 (unsigned long long)old_chunk,
839 (unsigned long long)e->old_chunk,
840 (unsigned long long)
841 e->old_chunk + dm_consecutive_chunk_count(e));
842 return -EINVAL;
845 dm_consecutive_chunk_count_dec(e);
847 return 0;
850 static void flush_bios(struct bio *bio);
852 static int remove_single_exception_chunk(struct dm_snapshot *s)
854 struct bio *b = NULL;
855 int r;
856 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
858 down_write(&s->lock);
861 * Process chunks (and associated exceptions) in reverse order
862 * so that dm_consecutive_chunk_count_dec() accounting works.
864 do {
865 r = __remove_single_exception_chunk(s, old_chunk);
866 if (r)
867 goto out;
868 } while (old_chunk-- > s->first_merging_chunk);
870 b = __release_queued_bios_after_merge(s);
872 out:
873 up_write(&s->lock);
874 if (b)
875 flush_bios(b);
877 return r;
880 static int origin_write_extent(struct dm_snapshot *merging_snap,
881 sector_t sector, unsigned chunk_size);
883 static void merge_callback(int read_err, unsigned long write_err,
884 void *context);
886 static uint64_t read_pending_exceptions_done_count(void)
888 uint64_t pending_exceptions_done;
890 spin_lock(&_pending_exceptions_done_spinlock);
891 pending_exceptions_done = _pending_exceptions_done_count;
892 spin_unlock(&_pending_exceptions_done_spinlock);
894 return pending_exceptions_done;
897 static void increment_pending_exceptions_done_count(void)
899 spin_lock(&_pending_exceptions_done_spinlock);
900 _pending_exceptions_done_count++;
901 spin_unlock(&_pending_exceptions_done_spinlock);
903 wake_up_all(&_pending_exceptions_done);
906 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
908 int i, linear_chunks;
909 chunk_t old_chunk, new_chunk;
910 struct dm_io_region src, dest;
911 sector_t io_size;
912 uint64_t previous_count;
914 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
915 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
916 goto shut;
919 * valid flag never changes during merge, so no lock required.
921 if (!s->valid) {
922 DMERR("Snapshot is invalid: can't merge");
923 goto shut;
926 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
927 &new_chunk);
928 if (linear_chunks <= 0) {
929 if (linear_chunks < 0) {
930 DMERR("Read error in exception store: "
931 "shutting down merge");
932 down_write(&s->lock);
933 s->merge_failed = 1;
934 up_write(&s->lock);
936 goto shut;
939 /* Adjust old_chunk and new_chunk to reflect start of linear region */
940 old_chunk = old_chunk + 1 - linear_chunks;
941 new_chunk = new_chunk + 1 - linear_chunks;
944 * Use one (potentially large) I/O to copy all 'linear_chunks'
945 * from the exception store to the origin
947 io_size = linear_chunks * s->store->chunk_size;
949 dest.bdev = s->origin->bdev;
950 dest.sector = chunk_to_sector(s->store, old_chunk);
951 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
953 src.bdev = s->cow->bdev;
954 src.sector = chunk_to_sector(s->store, new_chunk);
955 src.count = dest.count;
958 * Reallocate any exceptions needed in other snapshots then
959 * wait for the pending exceptions to complete.
960 * Each time any pending exception (globally on the system)
961 * completes we are woken and repeat the process to find out
962 * if we can proceed. While this may not seem a particularly
963 * efficient algorithm, it is not expected to have any
964 * significant impact on performance.
966 previous_count = read_pending_exceptions_done_count();
967 while (origin_write_extent(s, dest.sector, io_size)) {
968 wait_event(_pending_exceptions_done,
969 (read_pending_exceptions_done_count() !=
970 previous_count));
971 /* Retry after the wait, until all exceptions are done. */
972 previous_count = read_pending_exceptions_done_count();
975 down_write(&s->lock);
976 s->first_merging_chunk = old_chunk;
977 s->num_merging_chunks = linear_chunks;
978 up_write(&s->lock);
980 /* Wait until writes to all 'linear_chunks' drain */
981 for (i = 0; i < linear_chunks; i++)
982 __check_for_conflicting_io(s, old_chunk + i);
984 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
985 return;
987 shut:
988 merge_shutdown(s);
991 static void error_bios(struct bio *bio);
993 static void merge_callback(int read_err, unsigned long write_err, void *context)
995 struct dm_snapshot *s = context;
996 struct bio *b = NULL;
998 if (read_err || write_err) {
999 if (read_err)
1000 DMERR("Read error: shutting down merge.");
1001 else
1002 DMERR("Write error: shutting down merge.");
1003 goto shut;
1006 if (s->store->type->commit_merge(s->store,
1007 s->num_merging_chunks) < 0) {
1008 DMERR("Write error in exception store: shutting down merge");
1009 goto shut;
1012 if (remove_single_exception_chunk(s) < 0)
1013 goto shut;
1015 snapshot_merge_next_chunks(s);
1017 return;
1019 shut:
1020 down_write(&s->lock);
1021 s->merge_failed = 1;
1022 b = __release_queued_bios_after_merge(s);
1023 up_write(&s->lock);
1024 error_bios(b);
1026 merge_shutdown(s);
1029 static void start_merge(struct dm_snapshot *s)
1031 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1032 snapshot_merge_next_chunks(s);
1036 * Stop the merging process and wait until it finishes.
1038 static void stop_merge(struct dm_snapshot *s)
1040 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1041 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1042 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1048 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1050 struct dm_snapshot *s;
1051 int i;
1052 int r = -EINVAL;
1053 char *origin_path, *cow_path;
1054 unsigned args_used, num_flush_bios = 1;
1055 fmode_t origin_mode = FMODE_READ;
1057 if (argc != 4) {
1058 ti->error = "requires exactly 4 arguments";
1059 r = -EINVAL;
1060 goto bad;
1063 if (dm_target_is_snapshot_merge(ti)) {
1064 num_flush_bios = 2;
1065 origin_mode = FMODE_WRITE;
1068 s = kmalloc(sizeof(*s), GFP_KERNEL);
1069 if (!s) {
1070 ti->error = "Cannot allocate private snapshot structure";
1071 r = -ENOMEM;
1072 goto bad;
1075 origin_path = argv[0];
1076 argv++;
1077 argc--;
1079 r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1080 if (r) {
1081 ti->error = "Cannot get origin device";
1082 goto bad_origin;
1085 cow_path = argv[0];
1086 argv++;
1087 argc--;
1089 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1090 if (r) {
1091 ti->error = "Cannot get COW device";
1092 goto bad_cow;
1095 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1096 if (r) {
1097 ti->error = "Couldn't create exception store";
1098 r = -EINVAL;
1099 goto bad_store;
1102 argv += args_used;
1103 argc -= args_used;
1105 s->ti = ti;
1106 s->valid = 1;
1107 s->active = 0;
1108 atomic_set(&s->pending_exceptions_count, 0);
1109 s->exception_start_sequence = 0;
1110 s->exception_complete_sequence = 0;
1111 INIT_LIST_HEAD(&s->out_of_order_list);
1112 init_rwsem(&s->lock);
1113 INIT_LIST_HEAD(&s->list);
1114 spin_lock_init(&s->pe_lock);
1115 s->state_bits = 0;
1116 s->merge_failed = 0;
1117 s->first_merging_chunk = 0;
1118 s->num_merging_chunks = 0;
1119 bio_list_init(&s->bios_queued_during_merge);
1121 /* Allocate hash table for COW data */
1122 if (init_hash_tables(s)) {
1123 ti->error = "Unable to allocate hash table space";
1124 r = -ENOMEM;
1125 goto bad_hash_tables;
1128 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1129 if (IS_ERR(s->kcopyd_client)) {
1130 r = PTR_ERR(s->kcopyd_client);
1131 ti->error = "Could not create kcopyd client";
1132 goto bad_kcopyd;
1135 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1136 if (!s->pending_pool) {
1137 ti->error = "Could not allocate mempool for pending exceptions";
1138 r = -ENOMEM;
1139 goto bad_pending_pool;
1142 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1143 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1145 spin_lock_init(&s->tracked_chunk_lock);
1147 ti->private = s;
1148 ti->num_flush_bios = num_flush_bios;
1149 ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1151 /* Add snapshot to the list of snapshots for this origin */
1152 /* Exceptions aren't triggered till snapshot_resume() is called */
1153 r = register_snapshot(s);
1154 if (r == -ENOMEM) {
1155 ti->error = "Snapshot origin struct allocation failed";
1156 goto bad_load_and_register;
1157 } else if (r < 0) {
1158 /* invalid handover, register_snapshot has set ti->error */
1159 goto bad_load_and_register;
1163 * Metadata must only be loaded into one table at once, so skip this
1164 * if metadata will be handed over during resume.
1165 * Chunk size will be set during the handover - set it to zero to
1166 * ensure it's ignored.
1168 if (r > 0) {
1169 s->store->chunk_size = 0;
1170 return 0;
1173 r = s->store->type->read_metadata(s->store, dm_add_exception,
1174 (void *)s);
1175 if (r < 0) {
1176 ti->error = "Failed to read snapshot metadata";
1177 goto bad_read_metadata;
1178 } else if (r > 0) {
1179 s->valid = 0;
1180 DMWARN("Snapshot is marked invalid.");
1183 if (!s->store->chunk_size) {
1184 ti->error = "Chunk size not set";
1185 goto bad_read_metadata;
1188 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1189 if (r)
1190 goto bad_read_metadata;
1192 return 0;
1194 bad_read_metadata:
1195 unregister_snapshot(s);
1197 bad_load_and_register:
1198 mempool_destroy(s->pending_pool);
1200 bad_pending_pool:
1201 dm_kcopyd_client_destroy(s->kcopyd_client);
1203 bad_kcopyd:
1204 dm_exception_table_exit(&s->pending, pending_cache);
1205 dm_exception_table_exit(&s->complete, exception_cache);
1207 bad_hash_tables:
1208 dm_exception_store_destroy(s->store);
1210 bad_store:
1211 dm_put_device(ti, s->cow);
1213 bad_cow:
1214 dm_put_device(ti, s->origin);
1216 bad_origin:
1217 kfree(s);
1219 bad:
1220 return r;
1223 static void __free_exceptions(struct dm_snapshot *s)
1225 dm_kcopyd_client_destroy(s->kcopyd_client);
1226 s->kcopyd_client = NULL;
1228 dm_exception_table_exit(&s->pending, pending_cache);
1229 dm_exception_table_exit(&s->complete, exception_cache);
1232 static void __handover_exceptions(struct dm_snapshot *snap_src,
1233 struct dm_snapshot *snap_dest)
1235 union {
1236 struct dm_exception_table table_swap;
1237 struct dm_exception_store *store_swap;
1238 } u;
1241 * Swap all snapshot context information between the two instances.
1243 u.table_swap = snap_dest->complete;
1244 snap_dest->complete = snap_src->complete;
1245 snap_src->complete = u.table_swap;
1247 u.store_swap = snap_dest->store;
1248 snap_dest->store = snap_src->store;
1249 snap_src->store = u.store_swap;
1251 snap_dest->store->snap = snap_dest;
1252 snap_src->store->snap = snap_src;
1254 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1255 snap_dest->valid = snap_src->valid;
1258 * Set source invalid to ensure it receives no further I/O.
1260 snap_src->valid = 0;
1263 static void snapshot_dtr(struct dm_target *ti)
1265 #ifdef CONFIG_DM_DEBUG
1266 int i;
1267 #endif
1268 struct dm_snapshot *s = ti->private;
1269 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1271 down_read(&_origins_lock);
1272 /* Check whether exception handover must be cancelled */
1273 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1274 if (snap_src && snap_dest && (s == snap_src)) {
1275 down_write(&snap_dest->lock);
1276 snap_dest->valid = 0;
1277 up_write(&snap_dest->lock);
1278 DMERR("Cancelling snapshot handover.");
1280 up_read(&_origins_lock);
1282 if (dm_target_is_snapshot_merge(ti))
1283 stop_merge(s);
1285 /* Prevent further origin writes from using this snapshot. */
1286 /* After this returns there can be no new kcopyd jobs. */
1287 unregister_snapshot(s);
1289 while (atomic_read(&s->pending_exceptions_count))
1290 msleep(1);
1292 * Ensure instructions in mempool_destroy aren't reordered
1293 * before atomic_read.
1295 smp_mb();
1297 #ifdef CONFIG_DM_DEBUG
1298 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1299 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1300 #endif
1302 __free_exceptions(s);
1304 mempool_destroy(s->pending_pool);
1306 dm_exception_store_destroy(s->store);
1308 dm_put_device(ti, s->cow);
1310 dm_put_device(ti, s->origin);
1312 kfree(s);
1316 * Flush a list of buffers.
1318 static void flush_bios(struct bio *bio)
1320 struct bio *n;
1322 while (bio) {
1323 n = bio->bi_next;
1324 bio->bi_next = NULL;
1325 generic_make_request(bio);
1326 bio = n;
1330 static int do_origin(struct dm_dev *origin, struct bio *bio);
1333 * Flush a list of buffers.
1335 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1337 struct bio *n;
1338 int r;
1340 while (bio) {
1341 n = bio->bi_next;
1342 bio->bi_next = NULL;
1343 r = do_origin(s->origin, bio);
1344 if (r == DM_MAPIO_REMAPPED)
1345 generic_make_request(bio);
1346 bio = n;
1351 * Error a list of buffers.
1353 static void error_bios(struct bio *bio)
1355 struct bio *n;
1357 while (bio) {
1358 n = bio->bi_next;
1359 bio->bi_next = NULL;
1360 bio_io_error(bio);
1361 bio = n;
1365 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1367 if (!s->valid)
1368 return;
1370 if (err == -EIO)
1371 DMERR("Invalidating snapshot: Error reading/writing.");
1372 else if (err == -ENOMEM)
1373 DMERR("Invalidating snapshot: Unable to allocate exception.");
1375 if (s->store->type->drop_snapshot)
1376 s->store->type->drop_snapshot(s->store);
1378 s->valid = 0;
1380 dm_table_event(s->ti->table);
1383 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1385 struct dm_exception *e;
1386 struct dm_snapshot *s = pe->snap;
1387 struct bio *origin_bios = NULL;
1388 struct bio *snapshot_bios = NULL;
1389 struct bio *full_bio = NULL;
1390 int error = 0;
1392 if (!success) {
1393 /* Read/write error - snapshot is unusable */
1394 down_write(&s->lock);
1395 __invalidate_snapshot(s, -EIO);
1396 error = 1;
1397 goto out;
1400 e = alloc_completed_exception(GFP_NOIO);
1401 if (!e) {
1402 down_write(&s->lock);
1403 __invalidate_snapshot(s, -ENOMEM);
1404 error = 1;
1405 goto out;
1407 *e = pe->e;
1409 down_write(&s->lock);
1410 if (!s->valid) {
1411 free_completed_exception(e);
1412 error = 1;
1413 goto out;
1416 /* Check for conflicting reads */
1417 __check_for_conflicting_io(s, pe->e.old_chunk);
1420 * Add a proper exception, and remove the
1421 * in-flight exception from the list.
1423 dm_insert_exception(&s->complete, e);
1425 out:
1426 dm_remove_exception(&pe->e);
1427 snapshot_bios = bio_list_get(&pe->snapshot_bios);
1428 origin_bios = bio_list_get(&pe->origin_bios);
1429 full_bio = pe->full_bio;
1430 if (full_bio) {
1431 full_bio->bi_end_io = pe->full_bio_end_io;
1432 full_bio->bi_private = pe->full_bio_private;
1433 atomic_inc(&full_bio->bi_remaining);
1435 increment_pending_exceptions_done_count();
1437 up_write(&s->lock);
1439 /* Submit any pending write bios */
1440 if (error) {
1441 if (full_bio)
1442 bio_io_error(full_bio);
1443 error_bios(snapshot_bios);
1444 } else {
1445 if (full_bio)
1446 bio_endio(full_bio, 0);
1447 flush_bios(snapshot_bios);
1450 retry_origin_bios(s, origin_bios);
1452 free_pending_exception(pe);
1455 static void commit_callback(void *context, int success)
1457 struct dm_snap_pending_exception *pe = context;
1459 pending_complete(pe, success);
1462 static void complete_exception(struct dm_snap_pending_exception *pe)
1464 struct dm_snapshot *s = pe->snap;
1466 if (unlikely(pe->copy_error))
1467 pending_complete(pe, 0);
1469 else
1470 /* Update the metadata if we are persistent */
1471 s->store->type->commit_exception(s->store, &pe->e,
1472 commit_callback, pe);
1476 * Called when the copy I/O has finished. kcopyd actually runs
1477 * this code so don't block.
1479 static void copy_callback(int read_err, unsigned long write_err, void *context)
1481 struct dm_snap_pending_exception *pe = context;
1482 struct dm_snapshot *s = pe->snap;
1484 pe->copy_error = read_err || write_err;
1486 if (pe->exception_sequence == s->exception_complete_sequence) {
1487 s->exception_complete_sequence++;
1488 complete_exception(pe);
1490 while (!list_empty(&s->out_of_order_list)) {
1491 pe = list_entry(s->out_of_order_list.next,
1492 struct dm_snap_pending_exception, out_of_order_entry);
1493 if (pe->exception_sequence != s->exception_complete_sequence)
1494 break;
1495 s->exception_complete_sequence++;
1496 list_del(&pe->out_of_order_entry);
1497 complete_exception(pe);
1499 } else {
1500 struct list_head *lh;
1501 struct dm_snap_pending_exception *pe2;
1503 list_for_each_prev(lh, &s->out_of_order_list) {
1504 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1505 if (pe2->exception_sequence < pe->exception_sequence)
1506 break;
1508 list_add(&pe->out_of_order_entry, lh);
1513 * Dispatches the copy operation to kcopyd.
1515 static void start_copy(struct dm_snap_pending_exception *pe)
1517 struct dm_snapshot *s = pe->snap;
1518 struct dm_io_region src, dest;
1519 struct block_device *bdev = s->origin->bdev;
1520 sector_t dev_size;
1522 dev_size = get_dev_size(bdev);
1524 src.bdev = bdev;
1525 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1526 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1528 dest.bdev = s->cow->bdev;
1529 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1530 dest.count = src.count;
1532 /* Hand over to kcopyd */
1533 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1536 static void full_bio_end_io(struct bio *bio, int error)
1538 void *callback_data = bio->bi_private;
1540 dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1543 static void start_full_bio(struct dm_snap_pending_exception *pe,
1544 struct bio *bio)
1546 struct dm_snapshot *s = pe->snap;
1547 void *callback_data;
1549 pe->full_bio = bio;
1550 pe->full_bio_end_io = bio->bi_end_io;
1551 pe->full_bio_private = bio->bi_private;
1553 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1554 copy_callback, pe);
1556 bio->bi_end_io = full_bio_end_io;
1557 bio->bi_private = callback_data;
1559 generic_make_request(bio);
1562 static struct dm_snap_pending_exception *
1563 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1565 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1567 if (!e)
1568 return NULL;
1570 return container_of(e, struct dm_snap_pending_exception, e);
1574 * Looks to see if this snapshot already has a pending exception
1575 * for this chunk, otherwise it allocates a new one and inserts
1576 * it into the pending table.
1578 * NOTE: a write lock must be held on snap->lock before calling
1579 * this.
1581 static struct dm_snap_pending_exception *
1582 __find_pending_exception(struct dm_snapshot *s,
1583 struct dm_snap_pending_exception *pe, chunk_t chunk)
1585 struct dm_snap_pending_exception *pe2;
1587 pe2 = __lookup_pending_exception(s, chunk);
1588 if (pe2) {
1589 free_pending_exception(pe);
1590 return pe2;
1593 pe->e.old_chunk = chunk;
1594 bio_list_init(&pe->origin_bios);
1595 bio_list_init(&pe->snapshot_bios);
1596 pe->started = 0;
1597 pe->full_bio = NULL;
1599 if (s->store->type->prepare_exception(s->store, &pe->e)) {
1600 free_pending_exception(pe);
1601 return NULL;
1604 pe->exception_sequence = s->exception_start_sequence++;
1606 dm_insert_exception(&s->pending, &pe->e);
1608 return pe;
1611 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1612 struct bio *bio, chunk_t chunk)
1614 bio->bi_bdev = s->cow->bdev;
1615 bio->bi_iter.bi_sector =
1616 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1617 (chunk - e->old_chunk)) +
1618 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1621 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1623 struct dm_exception *e;
1624 struct dm_snapshot *s = ti->private;
1625 int r = DM_MAPIO_REMAPPED;
1626 chunk_t chunk;
1627 struct dm_snap_pending_exception *pe = NULL;
1629 init_tracked_chunk(bio);
1631 if (bio->bi_rw & REQ_FLUSH) {
1632 bio->bi_bdev = s->cow->bdev;
1633 return DM_MAPIO_REMAPPED;
1636 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1638 /* Full snapshots are not usable */
1639 /* To get here the table must be live so s->active is always set. */
1640 if (!s->valid)
1641 return -EIO;
1643 /* FIXME: should only take write lock if we need
1644 * to copy an exception */
1645 down_write(&s->lock);
1647 if (!s->valid) {
1648 r = -EIO;
1649 goto out_unlock;
1652 /* If the block is already remapped - use that, else remap it */
1653 e = dm_lookup_exception(&s->complete, chunk);
1654 if (e) {
1655 remap_exception(s, e, bio, chunk);
1656 goto out_unlock;
1660 * Write to snapshot - higher level takes care of RW/RO
1661 * flags so we should only get this if we are
1662 * writeable.
1664 if (bio_rw(bio) == WRITE) {
1665 pe = __lookup_pending_exception(s, chunk);
1666 if (!pe) {
1667 up_write(&s->lock);
1668 pe = alloc_pending_exception(s);
1669 down_write(&s->lock);
1671 if (!s->valid) {
1672 free_pending_exception(pe);
1673 r = -EIO;
1674 goto out_unlock;
1677 e = dm_lookup_exception(&s->complete, chunk);
1678 if (e) {
1679 free_pending_exception(pe);
1680 remap_exception(s, e, bio, chunk);
1681 goto out_unlock;
1684 pe = __find_pending_exception(s, pe, chunk);
1685 if (!pe) {
1686 __invalidate_snapshot(s, -ENOMEM);
1687 r = -EIO;
1688 goto out_unlock;
1692 remap_exception(s, &pe->e, bio, chunk);
1694 r = DM_MAPIO_SUBMITTED;
1696 if (!pe->started &&
1697 bio->bi_iter.bi_size ==
1698 (s->store->chunk_size << SECTOR_SHIFT)) {
1699 pe->started = 1;
1700 up_write(&s->lock);
1701 start_full_bio(pe, bio);
1702 goto out;
1705 bio_list_add(&pe->snapshot_bios, bio);
1707 if (!pe->started) {
1708 /* this is protected by snap->lock */
1709 pe->started = 1;
1710 up_write(&s->lock);
1711 start_copy(pe);
1712 goto out;
1714 } else {
1715 bio->bi_bdev = s->origin->bdev;
1716 track_chunk(s, bio, chunk);
1719 out_unlock:
1720 up_write(&s->lock);
1721 out:
1722 return r;
1726 * A snapshot-merge target behaves like a combination of a snapshot
1727 * target and a snapshot-origin target. It only generates new
1728 * exceptions in other snapshots and not in the one that is being
1729 * merged.
1731 * For each chunk, if there is an existing exception, it is used to
1732 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1733 * which in turn might generate exceptions in other snapshots.
1734 * If merging is currently taking place on the chunk in question, the
1735 * I/O is deferred by adding it to s->bios_queued_during_merge.
1737 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1739 struct dm_exception *e;
1740 struct dm_snapshot *s = ti->private;
1741 int r = DM_MAPIO_REMAPPED;
1742 chunk_t chunk;
1744 init_tracked_chunk(bio);
1746 if (bio->bi_rw & REQ_FLUSH) {
1747 if (!dm_bio_get_target_bio_nr(bio))
1748 bio->bi_bdev = s->origin->bdev;
1749 else
1750 bio->bi_bdev = s->cow->bdev;
1751 return DM_MAPIO_REMAPPED;
1754 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1756 down_write(&s->lock);
1758 /* Full merging snapshots are redirected to the origin */
1759 if (!s->valid)
1760 goto redirect_to_origin;
1762 /* If the block is already remapped - use that */
1763 e = dm_lookup_exception(&s->complete, chunk);
1764 if (e) {
1765 /* Queue writes overlapping with chunks being merged */
1766 if (bio_rw(bio) == WRITE &&
1767 chunk >= s->first_merging_chunk &&
1768 chunk < (s->first_merging_chunk +
1769 s->num_merging_chunks)) {
1770 bio->bi_bdev = s->origin->bdev;
1771 bio_list_add(&s->bios_queued_during_merge, bio);
1772 r = DM_MAPIO_SUBMITTED;
1773 goto out_unlock;
1776 remap_exception(s, e, bio, chunk);
1778 if (bio_rw(bio) == WRITE)
1779 track_chunk(s, bio, chunk);
1780 goto out_unlock;
1783 redirect_to_origin:
1784 bio->bi_bdev = s->origin->bdev;
1786 if (bio_rw(bio) == WRITE) {
1787 up_write(&s->lock);
1788 return do_origin(s->origin, bio);
1791 out_unlock:
1792 up_write(&s->lock);
1794 return r;
1797 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1799 struct dm_snapshot *s = ti->private;
1801 if (is_bio_tracked(bio))
1802 stop_tracking_chunk(s, bio);
1804 return 0;
1807 static void snapshot_merge_presuspend(struct dm_target *ti)
1809 struct dm_snapshot *s = ti->private;
1811 stop_merge(s);
1814 static int snapshot_preresume(struct dm_target *ti)
1816 int r = 0;
1817 struct dm_snapshot *s = ti->private;
1818 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1820 down_read(&_origins_lock);
1821 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1822 if (snap_src && snap_dest) {
1823 down_read(&snap_src->lock);
1824 if (s == snap_src) {
1825 DMERR("Unable to resume snapshot source until "
1826 "handover completes.");
1827 r = -EINVAL;
1828 } else if (!dm_suspended(snap_src->ti)) {
1829 DMERR("Unable to perform snapshot handover until "
1830 "source is suspended.");
1831 r = -EINVAL;
1833 up_read(&snap_src->lock);
1835 up_read(&_origins_lock);
1837 return r;
1840 static void snapshot_resume(struct dm_target *ti)
1842 struct dm_snapshot *s = ti->private;
1843 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1845 down_read(&_origins_lock);
1846 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847 if (snap_src && snap_dest) {
1848 down_write(&snap_src->lock);
1849 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1850 __handover_exceptions(snap_src, snap_dest);
1851 up_write(&snap_dest->lock);
1852 up_write(&snap_src->lock);
1854 up_read(&_origins_lock);
1856 /* Now we have correct chunk size, reregister */
1857 reregister_snapshot(s);
1859 down_write(&s->lock);
1860 s->active = 1;
1861 up_write(&s->lock);
1864 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1866 uint32_t min_chunksize;
1868 down_read(&_origins_lock);
1869 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1870 up_read(&_origins_lock);
1872 return min_chunksize;
1875 static void snapshot_merge_resume(struct dm_target *ti)
1877 struct dm_snapshot *s = ti->private;
1880 * Handover exceptions from existing snapshot.
1882 snapshot_resume(ti);
1885 * snapshot-merge acts as an origin, so set ti->max_io_len
1887 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1889 start_merge(s);
1892 static void snapshot_status(struct dm_target *ti, status_type_t type,
1893 unsigned status_flags, char *result, unsigned maxlen)
1895 unsigned sz = 0;
1896 struct dm_snapshot *snap = ti->private;
1898 switch (type) {
1899 case STATUSTYPE_INFO:
1901 down_write(&snap->lock);
1903 if (!snap->valid)
1904 DMEMIT("Invalid");
1905 else if (snap->merge_failed)
1906 DMEMIT("Merge failed");
1907 else {
1908 if (snap->store->type->usage) {
1909 sector_t total_sectors, sectors_allocated,
1910 metadata_sectors;
1911 snap->store->type->usage(snap->store,
1912 &total_sectors,
1913 &sectors_allocated,
1914 &metadata_sectors);
1915 DMEMIT("%llu/%llu %llu",
1916 (unsigned long long)sectors_allocated,
1917 (unsigned long long)total_sectors,
1918 (unsigned long long)metadata_sectors);
1920 else
1921 DMEMIT("Unknown");
1924 up_write(&snap->lock);
1926 break;
1928 case STATUSTYPE_TABLE:
1930 * kdevname returns a static pointer so we need
1931 * to make private copies if the output is to
1932 * make sense.
1934 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1935 snap->store->type->status(snap->store, type, result + sz,
1936 maxlen - sz);
1937 break;
1941 static int snapshot_iterate_devices(struct dm_target *ti,
1942 iterate_devices_callout_fn fn, void *data)
1944 struct dm_snapshot *snap = ti->private;
1945 int r;
1947 r = fn(ti, snap->origin, 0, ti->len, data);
1949 if (!r)
1950 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1952 return r;
1956 /*-----------------------------------------------------------------
1957 * Origin methods
1958 *---------------------------------------------------------------*/
1961 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1962 * supplied bio was ignored. The caller may submit it immediately.
1963 * (No remapping actually occurs as the origin is always a direct linear
1964 * map.)
1966 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1967 * and any supplied bio is added to a list to be submitted once all
1968 * the necessary exceptions exist.
1970 static int __origin_write(struct list_head *snapshots, sector_t sector,
1971 struct bio *bio)
1973 int r = DM_MAPIO_REMAPPED;
1974 struct dm_snapshot *snap;
1975 struct dm_exception *e;
1976 struct dm_snap_pending_exception *pe;
1977 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1978 struct dm_snap_pending_exception *pe_to_start_last = NULL;
1979 chunk_t chunk;
1981 /* Do all the snapshots on this origin */
1982 list_for_each_entry (snap, snapshots, list) {
1984 * Don't make new exceptions in a merging snapshot
1985 * because it has effectively been deleted
1987 if (dm_target_is_snapshot_merge(snap->ti))
1988 continue;
1990 down_write(&snap->lock);
1992 /* Only deal with valid and active snapshots */
1993 if (!snap->valid || !snap->active)
1994 goto next_snapshot;
1996 /* Nothing to do if writing beyond end of snapshot */
1997 if (sector >= dm_table_get_size(snap->ti->table))
1998 goto next_snapshot;
2001 * Remember, different snapshots can have
2002 * different chunk sizes.
2004 chunk = sector_to_chunk(snap->store, sector);
2007 * Check exception table to see if block
2008 * is already remapped in this snapshot
2009 * and trigger an exception if not.
2011 e = dm_lookup_exception(&snap->complete, chunk);
2012 if (e)
2013 goto next_snapshot;
2015 pe = __lookup_pending_exception(snap, chunk);
2016 if (!pe) {
2017 up_write(&snap->lock);
2018 pe = alloc_pending_exception(snap);
2019 down_write(&snap->lock);
2021 if (!snap->valid) {
2022 free_pending_exception(pe);
2023 goto next_snapshot;
2026 e = dm_lookup_exception(&snap->complete, chunk);
2027 if (e) {
2028 free_pending_exception(pe);
2029 goto next_snapshot;
2032 pe = __find_pending_exception(snap, pe, chunk);
2033 if (!pe) {
2034 __invalidate_snapshot(snap, -ENOMEM);
2035 goto next_snapshot;
2039 r = DM_MAPIO_SUBMITTED;
2042 * If an origin bio was supplied, queue it to wait for the
2043 * completion of this exception, and start this one last,
2044 * at the end of the function.
2046 if (bio) {
2047 bio_list_add(&pe->origin_bios, bio);
2048 bio = NULL;
2050 if (!pe->started) {
2051 pe->started = 1;
2052 pe_to_start_last = pe;
2056 if (!pe->started) {
2057 pe->started = 1;
2058 pe_to_start_now = pe;
2061 next_snapshot:
2062 up_write(&snap->lock);
2064 if (pe_to_start_now) {
2065 start_copy(pe_to_start_now);
2066 pe_to_start_now = NULL;
2071 * Submit the exception against which the bio is queued last,
2072 * to give the other exceptions a head start.
2074 if (pe_to_start_last)
2075 start_copy(pe_to_start_last);
2077 return r;
2081 * Called on a write from the origin driver.
2083 static int do_origin(struct dm_dev *origin, struct bio *bio)
2085 struct origin *o;
2086 int r = DM_MAPIO_REMAPPED;
2088 down_read(&_origins_lock);
2089 o = __lookup_origin(origin->bdev);
2090 if (o)
2091 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2092 up_read(&_origins_lock);
2094 return r;
2098 * Trigger exceptions in all non-merging snapshots.
2100 * The chunk size of the merging snapshot may be larger than the chunk
2101 * size of some other snapshot so we may need to reallocate multiple
2102 * chunks in other snapshots.
2104 * We scan all the overlapping exceptions in the other snapshots.
2105 * Returns 1 if anything was reallocated and must be waited for,
2106 * otherwise returns 0.
2108 * size must be a multiple of merging_snap's chunk_size.
2110 static int origin_write_extent(struct dm_snapshot *merging_snap,
2111 sector_t sector, unsigned size)
2113 int must_wait = 0;
2114 sector_t n;
2115 struct origin *o;
2118 * The origin's __minimum_chunk_size() got stored in max_io_len
2119 * by snapshot_merge_resume().
2121 down_read(&_origins_lock);
2122 o = __lookup_origin(merging_snap->origin->bdev);
2123 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2124 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2125 DM_MAPIO_SUBMITTED)
2126 must_wait = 1;
2127 up_read(&_origins_lock);
2129 return must_wait;
2133 * Origin: maps a linear range of a device, with hooks for snapshotting.
2136 struct dm_origin {
2137 struct dm_dev *dev;
2138 unsigned split_boundary;
2142 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *'
2144 * pointing to the real device.
2146 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2148 int r;
2149 struct dm_origin *o;
2151 if (argc != 1) {
2152 ti->error = "origin: incorrect number of arguments";
2153 return -EINVAL;
2156 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2157 if (!o) {
2158 ti->error = "Cannot allocate private origin structure";
2159 r = -ENOMEM;
2160 goto bad_alloc;
2163 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2164 if (r) {
2165 ti->error = "Cannot get target device";
2166 goto bad_open;
2169 ti->private = o;
2170 ti->num_flush_bios = 1;
2172 return 0;
2174 bad_open:
2175 kfree(o);
2176 bad_alloc:
2177 return r;
2180 static void origin_dtr(struct dm_target *ti)
2182 struct dm_origin *o = ti->private;
2183 dm_put_device(ti, o->dev);
2184 kfree(o);
2187 static int origin_map(struct dm_target *ti, struct bio *bio)
2189 struct dm_origin *o = ti->private;
2190 unsigned available_sectors;
2192 bio->bi_bdev = o->dev->bdev;
2194 if (unlikely(bio->bi_rw & REQ_FLUSH))
2195 return DM_MAPIO_REMAPPED;
2197 if (bio_rw(bio) != WRITE)
2198 return DM_MAPIO_REMAPPED;
2200 available_sectors = o->split_boundary -
2201 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2203 if (bio_sectors(bio) > available_sectors)
2204 dm_accept_partial_bio(bio, available_sectors);
2206 /* Only tell snapshots if this is a write */
2207 return do_origin(o->dev, bio);
2211 * Set the target "max_io_len" field to the minimum of all the snapshots'
2212 * chunk sizes.
2214 static void origin_resume(struct dm_target *ti)
2216 struct dm_origin *o = ti->private;
2218 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2221 static void origin_status(struct dm_target *ti, status_type_t type,
2222 unsigned status_flags, char *result, unsigned maxlen)
2224 struct dm_origin *o = ti->private;
2226 switch (type) {
2227 case STATUSTYPE_INFO:
2228 result[0] = '\0';
2229 break;
2231 case STATUSTYPE_TABLE:
2232 snprintf(result, maxlen, "%s", o->dev->name);
2233 break;
2237 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2238 struct bio_vec *biovec, int max_size)
2240 struct dm_origin *o = ti->private;
2241 struct request_queue *q = bdev_get_queue(o->dev->bdev);
2243 if (!q->merge_bvec_fn)
2244 return max_size;
2246 bvm->bi_bdev = o->dev->bdev;
2248 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2251 static int origin_iterate_devices(struct dm_target *ti,
2252 iterate_devices_callout_fn fn, void *data)
2254 struct dm_origin *o = ti->private;
2256 return fn(ti, o->dev, 0, ti->len, data);
2259 static struct target_type origin_target = {
2260 .name = "snapshot-origin",
2261 .version = {1, 8, 1},
2262 .module = THIS_MODULE,
2263 .ctr = origin_ctr,
2264 .dtr = origin_dtr,
2265 .map = origin_map,
2266 .resume = origin_resume,
2267 .status = origin_status,
2268 .merge = origin_merge,
2269 .iterate_devices = origin_iterate_devices,
2272 static struct target_type snapshot_target = {
2273 .name = "snapshot",
2274 .version = {1, 12, 0},
2275 .module = THIS_MODULE,
2276 .ctr = snapshot_ctr,
2277 .dtr = snapshot_dtr,
2278 .map = snapshot_map,
2279 .end_io = snapshot_end_io,
2280 .preresume = snapshot_preresume,
2281 .resume = snapshot_resume,
2282 .status = snapshot_status,
2283 .iterate_devices = snapshot_iterate_devices,
2286 static struct target_type merge_target = {
2287 .name = dm_snapshot_merge_target_name,
2288 .version = {1, 2, 0},
2289 .module = THIS_MODULE,
2290 .ctr = snapshot_ctr,
2291 .dtr = snapshot_dtr,
2292 .map = snapshot_merge_map,
2293 .end_io = snapshot_end_io,
2294 .presuspend = snapshot_merge_presuspend,
2295 .preresume = snapshot_preresume,
2296 .resume = snapshot_merge_resume,
2297 .status = snapshot_status,
2298 .iterate_devices = snapshot_iterate_devices,
2301 static int __init dm_snapshot_init(void)
2303 int r;
2305 r = dm_exception_store_init();
2306 if (r) {
2307 DMERR("Failed to initialize exception stores");
2308 return r;
2311 r = dm_register_target(&snapshot_target);
2312 if (r < 0) {
2313 DMERR("snapshot target register failed %d", r);
2314 goto bad_register_snapshot_target;
2317 r = dm_register_target(&origin_target);
2318 if (r < 0) {
2319 DMERR("Origin target register failed %d", r);
2320 goto bad_register_origin_target;
2323 r = dm_register_target(&merge_target);
2324 if (r < 0) {
2325 DMERR("Merge target register failed %d", r);
2326 goto bad_register_merge_target;
2329 r = init_origin_hash();
2330 if (r) {
2331 DMERR("init_origin_hash failed.");
2332 goto bad_origin_hash;
2335 exception_cache = KMEM_CACHE(dm_exception, 0);
2336 if (!exception_cache) {
2337 DMERR("Couldn't create exception cache.");
2338 r = -ENOMEM;
2339 goto bad_exception_cache;
2342 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2343 if (!pending_cache) {
2344 DMERR("Couldn't create pending cache.");
2345 r = -ENOMEM;
2346 goto bad_pending_cache;
2349 return 0;
2351 bad_pending_cache:
2352 kmem_cache_destroy(exception_cache);
2353 bad_exception_cache:
2354 exit_origin_hash();
2355 bad_origin_hash:
2356 dm_unregister_target(&merge_target);
2357 bad_register_merge_target:
2358 dm_unregister_target(&origin_target);
2359 bad_register_origin_target:
2360 dm_unregister_target(&snapshot_target);
2361 bad_register_snapshot_target:
2362 dm_exception_store_exit();
2364 return r;
2367 static void __exit dm_snapshot_exit(void)
2369 dm_unregister_target(&snapshot_target);
2370 dm_unregister_target(&origin_target);
2371 dm_unregister_target(&merge_target);
2373 exit_origin_hash();
2374 kmem_cache_destroy(pending_cache);
2375 kmem_cache_destroy(exception_cache);
2377 dm_exception_store_exit();
2380 /* Module hooks */
2381 module_init(dm_snapshot_init);
2382 module_exit(dm_snapshot_exit);
2384 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2385 MODULE_AUTHOR("Joe Thornber");
2386 MODULE_LICENSE("GPL");
2387 MODULE_ALIAS("dm-snapshot-origin");
2388 MODULE_ALIAS("dm-snapshot-merge");