Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / md / dm-clone-target.c
blobbdb255edc20043950e96a0ae2a4c4f09a29bb385
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
4 */
6 #include <linux/mm.h>
7 #include <linux/bio.h>
8 #include <linux/err.h>
9 #include <linux/hash.h>
10 #include <linux/list.h>
11 #include <linux/log2.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/dm-io.h>
16 #include <linux/mutex.h>
17 #include <linux/atomic.h>
18 #include <linux/bitops.h>
19 #include <linux/blkdev.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/mempool.h>
25 #include <linux/spinlock.h>
26 #include <linux/blk_types.h>
27 #include <linux/dm-kcopyd.h>
28 #include <linux/workqueue.h>
29 #include <linux/backing-dev.h>
30 #include <linux/device-mapper.h>
32 #include "dm.h"
33 #include "dm-clone-metadata.h"
35 #define DM_MSG_PREFIX "clone"
38 * Minimum and maximum allowed region sizes
40 #define MIN_REGION_SIZE (1 << 3) /* 4KB */
41 #define MAX_REGION_SIZE (1 << 21) /* 1GB */
43 #define MIN_HYDRATIONS 256 /* Size of hydration mempool */
44 #define DEFAULT_HYDRATION_THRESHOLD 1 /* 1 region */
45 #define DEFAULT_HYDRATION_BATCH_SIZE 1 /* Hydrate in batches of 1 region */
47 #define COMMIT_PERIOD HZ /* 1 sec */
50 * Hydration hash table size: 1 << HASH_TABLE_BITS
52 #define HASH_TABLE_BITS 15
54 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55 "A percentage of time allocated for hydrating regions");
57 /* Slab cache for struct dm_clone_region_hydration */
58 static struct kmem_cache *_hydration_cache;
60 /* dm-clone metadata modes */
61 enum clone_metadata_mode {
62 CM_WRITE, /* metadata may be changed */
63 CM_READ_ONLY, /* metadata may not be changed */
64 CM_FAIL, /* all metadata I/O fails */
67 struct hash_table_bucket;
69 struct clone {
70 struct dm_target *ti;
72 struct dm_dev *metadata_dev;
73 struct dm_dev *dest_dev;
74 struct dm_dev *source_dev;
76 unsigned long nr_regions;
77 sector_t region_size;
78 unsigned int region_shift;
81 * A metadata commit and the actions taken in case it fails should run
82 * as a single atomic step.
84 struct mutex commit_lock;
86 struct dm_clone_metadata *cmd;
89 * bio used to flush the destination device, before committing the
90 * metadata.
92 struct bio flush_bio;
94 /* Region hydration hash table */
95 struct hash_table_bucket *ht;
97 atomic_t ios_in_flight;
99 wait_queue_head_t hydration_stopped;
101 mempool_t hydration_pool;
103 unsigned long last_commit_jiffies;
106 * We defer incoming WRITE bios for regions that are not hydrated,
107 * until after these regions have been hydrated.
109 * Also, we defer REQ_FUA and REQ_PREFLUSH bios, until after the
110 * metadata have been committed.
112 spinlock_t lock;
113 struct bio_list deferred_bios;
114 struct bio_list deferred_discard_bios;
115 struct bio_list deferred_flush_bios;
116 struct bio_list deferred_flush_completions;
118 /* Maximum number of regions being copied during background hydration. */
119 unsigned int hydration_threshold;
121 /* Number of regions to batch together during background hydration. */
122 unsigned int hydration_batch_size;
124 /* Which region to hydrate next */
125 unsigned long hydration_offset;
127 atomic_t hydrations_in_flight;
130 * Save a copy of the table line rather than reconstructing it for the
131 * status.
133 unsigned int nr_ctr_args;
134 const char **ctr_args;
136 struct workqueue_struct *wq;
137 struct work_struct worker;
138 struct delayed_work waker;
140 struct dm_kcopyd_client *kcopyd_client;
142 enum clone_metadata_mode mode;
143 unsigned long flags;
147 * dm-clone flags
149 #define DM_CLONE_DISCARD_PASSDOWN 0
150 #define DM_CLONE_HYDRATION_ENABLED 1
151 #define DM_CLONE_HYDRATION_SUSPENDED 2
153 /*---------------------------------------------------------------------------*/
156 * Metadata failure handling.
158 static enum clone_metadata_mode get_clone_mode(struct clone *clone)
160 return READ_ONCE(clone->mode);
163 static const char *clone_device_name(struct clone *clone)
165 return dm_table_device_name(clone->ti->table);
168 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
170 const char *descs[] = {
171 "read-write",
172 "read-only",
173 "fail"
176 enum clone_metadata_mode old_mode = get_clone_mode(clone);
178 /* Never move out of fail mode */
179 if (old_mode == CM_FAIL)
180 new_mode = CM_FAIL;
182 switch (new_mode) {
183 case CM_FAIL:
184 case CM_READ_ONLY:
185 dm_clone_metadata_set_read_only(clone->cmd);
186 break;
188 case CM_WRITE:
189 dm_clone_metadata_set_read_write(clone->cmd);
190 break;
193 WRITE_ONCE(clone->mode, new_mode);
195 if (new_mode != old_mode) {
196 dm_table_event(clone->ti->table);
197 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
198 descs[(int)new_mode]);
202 static void __abort_transaction(struct clone *clone)
204 const char *dev_name = clone_device_name(clone);
206 if (get_clone_mode(clone) >= CM_READ_ONLY)
207 return;
209 DMERR("%s: Aborting current metadata transaction", dev_name);
210 if (dm_clone_metadata_abort(clone->cmd)) {
211 DMERR("%s: Failed to abort metadata transaction", dev_name);
212 __set_clone_mode(clone, CM_FAIL);
216 static void __reload_in_core_bitset(struct clone *clone)
218 const char *dev_name = clone_device_name(clone);
220 if (get_clone_mode(clone) == CM_FAIL)
221 return;
223 /* Reload the on-disk bitset */
224 DMINFO("%s: Reloading on-disk bitmap", dev_name);
225 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
226 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
227 __set_clone_mode(clone, CM_FAIL);
231 static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
233 DMERR("%s: Metadata operation `%s' failed: error = %d",
234 clone_device_name(clone), op, r);
236 __abort_transaction(clone);
237 __set_clone_mode(clone, CM_READ_ONLY);
240 * dm_clone_reload_in_core_bitset() may run concurrently with either
241 * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), but
242 * it's safe as we have already set the metadata to read-only mode.
244 __reload_in_core_bitset(clone);
247 /*---------------------------------------------------------------------------*/
249 /* Wake up anyone waiting for region hydrations to stop */
250 static inline void wakeup_hydration_waiters(struct clone *clone)
252 wake_up_all(&clone->hydration_stopped);
255 static inline void wake_worker(struct clone *clone)
257 queue_work(clone->wq, &clone->worker);
260 /*---------------------------------------------------------------------------*/
263 * bio helper functions.
265 static inline void remap_to_source(struct clone *clone, struct bio *bio)
267 bio_set_dev(bio, clone->source_dev->bdev);
270 static inline void remap_to_dest(struct clone *clone, struct bio *bio)
272 bio_set_dev(bio, clone->dest_dev->bdev);
275 static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
277 return op_is_flush(bio->bi_opf) &&
278 dm_clone_changed_this_transaction(clone->cmd);
281 /* Get the address of the region in sectors */
282 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
284 return ((sector_t)region_nr << clone->region_shift);
287 /* Get the region number of the bio */
288 static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
290 return (bio->bi_iter.bi_sector >> clone->region_shift);
293 /* Get the region range covered by the bio */
294 static void bio_region_range(struct clone *clone, struct bio *bio,
295 unsigned long *rs, unsigned long *nr_regions)
297 unsigned long end;
299 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
300 end = bio_end_sector(bio) >> clone->region_shift;
302 if (*rs >= end)
303 *nr_regions = 0;
304 else
305 *nr_regions = end - *rs;
308 /* Check whether a bio overwrites a region */
309 static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
311 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
314 static void fail_bios(struct bio_list *bios, blk_status_t status)
316 struct bio *bio;
318 while ((bio = bio_list_pop(bios))) {
319 bio->bi_status = status;
320 bio_endio(bio);
324 static void submit_bios(struct bio_list *bios)
326 struct bio *bio;
327 struct blk_plug plug;
329 blk_start_plug(&plug);
331 while ((bio = bio_list_pop(bios)))
332 submit_bio_noacct(bio);
334 blk_finish_plug(&plug);
338 * Submit bio to the underlying device.
340 * If the bio triggers a commit, delay it, until after the metadata have been
341 * committed.
343 * NOTE: The bio remapping must be performed by the caller.
345 static void issue_bio(struct clone *clone, struct bio *bio)
347 if (!bio_triggers_commit(clone, bio)) {
348 submit_bio_noacct(bio);
349 return;
353 * If the metadata mode is RO or FAIL we won't be able to commit the
354 * metadata, so we complete the bio with an error.
356 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
357 bio_io_error(bio);
358 return;
362 * Batch together any bios that trigger commits and then issue a single
363 * commit for them in process_deferred_flush_bios().
365 spin_lock_irq(&clone->lock);
366 bio_list_add(&clone->deferred_flush_bios, bio);
367 spin_unlock_irq(&clone->lock);
369 wake_worker(clone);
373 * Remap bio to the destination device and submit it.
375 * If the bio triggers a commit, delay it, until after the metadata have been
376 * committed.
378 static void remap_and_issue(struct clone *clone, struct bio *bio)
380 remap_to_dest(clone, bio);
381 issue_bio(clone, bio);
385 * Issue bios that have been deferred until after their region has finished
386 * hydrating.
388 * We delegate the bio submission to the worker thread, so this is safe to call
389 * from interrupt context.
391 static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
393 struct bio *bio;
394 unsigned long flags;
395 struct bio_list flush_bios = BIO_EMPTY_LIST;
396 struct bio_list normal_bios = BIO_EMPTY_LIST;
398 if (bio_list_empty(bios))
399 return;
401 while ((bio = bio_list_pop(bios))) {
402 if (bio_triggers_commit(clone, bio))
403 bio_list_add(&flush_bios, bio);
404 else
405 bio_list_add(&normal_bios, bio);
408 spin_lock_irqsave(&clone->lock, flags);
409 bio_list_merge(&clone->deferred_bios, &normal_bios);
410 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
411 spin_unlock_irqrestore(&clone->lock, flags);
413 wake_worker(clone);
416 static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
418 unsigned long flags;
421 * If the bio has the REQ_FUA flag set we must commit the metadata
422 * before signaling its completion.
424 * complete_overwrite_bio() is only called by hydration_complete(),
425 * after having successfully updated the metadata. This means we don't
426 * need to call dm_clone_changed_this_transaction() to check if the
427 * metadata has changed and thus we can avoid taking the metadata spin
428 * lock.
430 if (!(bio->bi_opf & REQ_FUA)) {
431 bio_endio(bio);
432 return;
436 * If the metadata mode is RO or FAIL we won't be able to commit the
437 * metadata, so we complete the bio with an error.
439 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
440 bio_io_error(bio);
441 return;
445 * Batch together any bios that trigger commits and then issue a single
446 * commit for them in process_deferred_flush_bios().
448 spin_lock_irqsave(&clone->lock, flags);
449 bio_list_add(&clone->deferred_flush_completions, bio);
450 spin_unlock_irqrestore(&clone->lock, flags);
452 wake_worker(clone);
455 static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
457 bio->bi_iter.bi_sector = sector;
458 bio->bi_iter.bi_size = to_bytes(len);
461 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
463 unsigned long rs, nr_regions;
466 * If the destination device supports discards, remap and trim the
467 * discard bio and pass it down. Otherwise complete the bio
468 * immediately.
470 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
471 remap_to_dest(clone, bio);
472 bio_region_range(clone, bio, &rs, &nr_regions);
473 trim_bio(bio, region_to_sector(clone, rs),
474 nr_regions << clone->region_shift);
475 submit_bio_noacct(bio);
476 } else
477 bio_endio(bio);
480 static void process_discard_bio(struct clone *clone, struct bio *bio)
482 unsigned long rs, nr_regions;
484 bio_region_range(clone, bio, &rs, &nr_regions);
485 if (!nr_regions) {
486 bio_endio(bio);
487 return;
490 if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
491 (rs + nr_regions) > clone->nr_regions)) {
492 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
493 clone_device_name(clone), rs, nr_regions,
494 clone->nr_regions,
495 (unsigned long long)bio->bi_iter.bi_sector,
496 bio_sectors(bio));
497 bio_endio(bio);
498 return;
502 * The covered regions are already hydrated so we just need to pass
503 * down the discard.
505 if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
506 complete_discard_bio(clone, bio, true);
507 return;
511 * If the metadata mode is RO or FAIL we won't be able to update the
512 * metadata for the regions covered by the discard so we just ignore
513 * it.
515 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
516 bio_endio(bio);
517 return;
521 * Defer discard processing.
523 spin_lock_irq(&clone->lock);
524 bio_list_add(&clone->deferred_discard_bios, bio);
525 spin_unlock_irq(&clone->lock);
527 wake_worker(clone);
530 /*---------------------------------------------------------------------------*/
533 * dm-clone region hydrations.
535 struct dm_clone_region_hydration {
536 struct clone *clone;
537 unsigned long region_nr;
539 struct bio *overwrite_bio;
540 bio_end_io_t *overwrite_bio_end_io;
542 struct bio_list deferred_bios;
544 blk_status_t status;
546 /* Used by hydration batching */
547 struct list_head list;
549 /* Used by hydration hash table */
550 struct hlist_node h;
554 * Hydration hash table implementation.
556 * Ideally we would like to use list_bl, which uses bit spin locks and employs
557 * the least significant bit of the list head to lock the corresponding bucket,
558 * reducing the memory overhead for the locks. But, currently, list_bl and bit
559 * spin locks don't support IRQ safe versions. Since we have to take the lock
560 * in both process and interrupt context, we must fall back to using regular
561 * spin locks; one per hash table bucket.
563 struct hash_table_bucket {
564 struct hlist_head head;
566 /* Spinlock protecting the bucket */
567 spinlock_t lock;
570 #define bucket_lock_irqsave(bucket, flags) \
571 spin_lock_irqsave(&(bucket)->lock, flags)
573 #define bucket_unlock_irqrestore(bucket, flags) \
574 spin_unlock_irqrestore(&(bucket)->lock, flags)
576 #define bucket_lock_irq(bucket) \
577 spin_lock_irq(&(bucket)->lock)
579 #define bucket_unlock_irq(bucket) \
580 spin_unlock_irq(&(bucket)->lock)
582 static int hash_table_init(struct clone *clone)
584 unsigned int i, sz;
585 struct hash_table_bucket *bucket;
587 sz = 1 << HASH_TABLE_BITS;
589 clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
590 if (!clone->ht)
591 return -ENOMEM;
593 for (i = 0; i < sz; i++) {
594 bucket = clone->ht + i;
596 INIT_HLIST_HEAD(&bucket->head);
597 spin_lock_init(&bucket->lock);
600 return 0;
603 static void hash_table_exit(struct clone *clone)
605 kvfree(clone->ht);
608 static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
609 unsigned long region_nr)
611 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
615 * Search hash table for a hydration with hd->region_nr == region_nr
617 * NOTE: Must be called with the bucket lock held
619 static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
620 unsigned long region_nr)
622 struct dm_clone_region_hydration *hd;
624 hlist_for_each_entry(hd, &bucket->head, h) {
625 if (hd->region_nr == region_nr)
626 return hd;
629 return NULL;
633 * Insert a hydration into the hash table.
635 * NOTE: Must be called with the bucket lock held.
637 static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
638 struct dm_clone_region_hydration *hd)
640 hlist_add_head(&hd->h, &bucket->head);
644 * This function inserts a hydration into the hash table, unless someone else
645 * managed to insert a hydration for the same region first. In the latter case
646 * it returns the existing hydration descriptor for this region.
648 * NOTE: Must be called with the hydration hash table lock held.
650 static struct dm_clone_region_hydration *
651 __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
652 struct dm_clone_region_hydration *hd)
654 struct dm_clone_region_hydration *hd2;
656 hd2 = __hash_find(bucket, hd->region_nr);
657 if (hd2)
658 return hd2;
660 __insert_region_hydration(bucket, hd);
662 return hd;
665 /*---------------------------------------------------------------------------*/
667 /* Allocate a hydration */
668 static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
670 struct dm_clone_region_hydration *hd;
673 * Allocate a hydration from the hydration mempool.
674 * This might block but it can't fail.
676 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
677 hd->clone = clone;
679 return hd;
682 static inline void free_hydration(struct dm_clone_region_hydration *hd)
684 mempool_free(hd, &hd->clone->hydration_pool);
687 /* Initialize a hydration */
688 static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
690 hd->region_nr = region_nr;
691 hd->overwrite_bio = NULL;
692 bio_list_init(&hd->deferred_bios);
693 hd->status = 0;
695 INIT_LIST_HEAD(&hd->list);
696 INIT_HLIST_NODE(&hd->h);
699 /*---------------------------------------------------------------------------*/
702 * Update dm-clone's metadata after a region has finished hydrating and remove
703 * hydration from the hash table.
705 static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
707 int r = 0;
708 unsigned long flags;
709 struct hash_table_bucket *bucket;
710 struct clone *clone = hd->clone;
712 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
713 r = -EPERM;
715 /* Update the metadata */
716 if (likely(!r) && hd->status == BLK_STS_OK)
717 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
719 bucket = get_hash_table_bucket(clone, hd->region_nr);
721 /* Remove hydration from hash table */
722 bucket_lock_irqsave(bucket, flags);
723 hlist_del(&hd->h);
724 bucket_unlock_irqrestore(bucket, flags);
726 return r;
730 * Complete a region's hydration:
732 * 1. Update dm-clone's metadata.
733 * 2. Remove hydration from hash table.
734 * 3. Complete overwrite bio.
735 * 4. Issue deferred bios.
736 * 5. If this was the last hydration, wake up anyone waiting for
737 * hydrations to finish.
739 static void hydration_complete(struct dm_clone_region_hydration *hd)
741 int r;
742 blk_status_t status;
743 struct clone *clone = hd->clone;
745 r = hydration_update_metadata(hd);
747 if (hd->status == BLK_STS_OK && likely(!r)) {
748 if (hd->overwrite_bio)
749 complete_overwrite_bio(clone, hd->overwrite_bio);
751 issue_deferred_bios(clone, &hd->deferred_bios);
752 } else {
753 status = r ? BLK_STS_IOERR : hd->status;
755 if (hd->overwrite_bio)
756 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
758 fail_bios(&hd->deferred_bios, status);
761 free_hydration(hd);
763 if (atomic_dec_and_test(&clone->hydrations_in_flight))
764 wakeup_hydration_waiters(clone);
767 static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
769 blk_status_t status;
771 struct dm_clone_region_hydration *tmp, *hd = context;
772 struct clone *clone = hd->clone;
774 LIST_HEAD(batched_hydrations);
776 if (read_err || write_err) {
777 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
778 status = BLK_STS_IOERR;
779 } else {
780 status = BLK_STS_OK;
782 list_splice_tail(&hd->list, &batched_hydrations);
784 hd->status = status;
785 hydration_complete(hd);
787 /* Complete batched hydrations */
788 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
789 hd->status = status;
790 hydration_complete(hd);
793 /* Continue background hydration, if there is no I/O in-flight */
794 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
795 !atomic_read(&clone->ios_in_flight))
796 wake_worker(clone);
799 static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
801 unsigned long region_start, region_end;
802 sector_t tail_size, region_size, total_size;
803 struct dm_io_region from, to;
804 struct clone *clone = hd->clone;
806 if (WARN_ON(!nr_regions))
807 return;
809 region_size = clone->region_size;
810 region_start = hd->region_nr;
811 region_end = region_start + nr_regions - 1;
813 total_size = region_to_sector(clone, nr_regions - 1);
815 if (region_end == clone->nr_regions - 1) {
817 * The last region of the target might be smaller than
818 * region_size.
820 tail_size = clone->ti->len & (region_size - 1);
821 if (!tail_size)
822 tail_size = region_size;
823 } else {
824 tail_size = region_size;
827 total_size += tail_size;
829 from.bdev = clone->source_dev->bdev;
830 from.sector = region_to_sector(clone, region_start);
831 from.count = total_size;
833 to.bdev = clone->dest_dev->bdev;
834 to.sector = from.sector;
835 to.count = from.count;
837 /* Issue copy */
838 atomic_add(nr_regions, &clone->hydrations_in_flight);
839 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
840 hydration_kcopyd_callback, hd);
843 static void overwrite_endio(struct bio *bio)
845 struct dm_clone_region_hydration *hd = bio->bi_private;
847 bio->bi_end_io = hd->overwrite_bio_end_io;
848 hd->status = bio->bi_status;
850 hydration_complete(hd);
853 static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
856 * We don't need to save and restore bio->bi_private because device
857 * mapper core generates a new bio for us to use, with clean
858 * bi_private.
860 hd->overwrite_bio = bio;
861 hd->overwrite_bio_end_io = bio->bi_end_io;
863 bio->bi_end_io = overwrite_endio;
864 bio->bi_private = hd;
866 atomic_inc(&hd->clone->hydrations_in_flight);
867 submit_bio_noacct(bio);
871 * Hydrate bio's region.
873 * This function starts the hydration of the bio's region and puts the bio in
874 * the list of deferred bios for this region. In case, by the time this
875 * function is called, the region has finished hydrating it's submitted to the
876 * destination device.
878 * NOTE: The bio remapping must be performed by the caller.
880 static void hydrate_bio_region(struct clone *clone, struct bio *bio)
882 unsigned long region_nr;
883 struct hash_table_bucket *bucket;
884 struct dm_clone_region_hydration *hd, *hd2;
886 region_nr = bio_to_region(clone, bio);
887 bucket = get_hash_table_bucket(clone, region_nr);
889 bucket_lock_irq(bucket);
891 hd = __hash_find(bucket, region_nr);
892 if (hd) {
893 /* Someone else is hydrating the region */
894 bio_list_add(&hd->deferred_bios, bio);
895 bucket_unlock_irq(bucket);
896 return;
899 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
900 /* The region has been hydrated */
901 bucket_unlock_irq(bucket);
902 issue_bio(clone, bio);
903 return;
907 * We must allocate a hydration descriptor and start the hydration of
908 * the corresponding region.
910 bucket_unlock_irq(bucket);
912 hd = alloc_hydration(clone);
913 hydration_init(hd, region_nr);
915 bucket_lock_irq(bucket);
917 /* Check if the region has been hydrated in the meantime. */
918 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
919 bucket_unlock_irq(bucket);
920 free_hydration(hd);
921 issue_bio(clone, bio);
922 return;
925 hd2 = __find_or_insert_region_hydration(bucket, hd);
926 if (hd2 != hd) {
927 /* Someone else started the region's hydration. */
928 bio_list_add(&hd2->deferred_bios, bio);
929 bucket_unlock_irq(bucket);
930 free_hydration(hd);
931 return;
935 * If the metadata mode is RO or FAIL then there is no point starting a
936 * hydration, since we will not be able to update the metadata when the
937 * hydration finishes.
939 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
940 hlist_del(&hd->h);
941 bucket_unlock_irq(bucket);
942 free_hydration(hd);
943 bio_io_error(bio);
944 return;
948 * Start region hydration.
950 * If a bio overwrites a region, i.e., its size is equal to the
951 * region's size, then we don't need to copy the region from the source
952 * to the destination device.
954 if (is_overwrite_bio(clone, bio)) {
955 bucket_unlock_irq(bucket);
956 hydration_overwrite(hd, bio);
957 } else {
958 bio_list_add(&hd->deferred_bios, bio);
959 bucket_unlock_irq(bucket);
960 hydration_copy(hd, 1);
964 /*---------------------------------------------------------------------------*/
967 * Background hydrations.
971 * Batch region hydrations.
973 * To better utilize device bandwidth we batch together the hydration of
974 * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
975 * is good for small, random write performance (because of the overwriting of
976 * un-hydrated regions) and at the same time issue big copy requests to kcopyd
977 * to achieve high hydration bandwidth.
979 struct batch_info {
980 struct dm_clone_region_hydration *head;
981 unsigned int nr_batched_regions;
984 static void __batch_hydration(struct batch_info *batch,
985 struct dm_clone_region_hydration *hd)
987 struct clone *clone = hd->clone;
988 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
990 if (batch->head) {
991 /* Try to extend the current batch */
992 if (batch->nr_batched_regions < max_batch_size &&
993 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
994 list_add_tail(&hd->list, &batch->head->list);
995 batch->nr_batched_regions++;
996 hd = NULL;
999 /* Check if we should issue the current batch */
1000 if (batch->nr_batched_regions >= max_batch_size || hd) {
1001 hydration_copy(batch->head, batch->nr_batched_regions);
1002 batch->head = NULL;
1003 batch->nr_batched_regions = 0;
1007 if (!hd)
1008 return;
1010 /* We treat max batch sizes of zero and one equivalently */
1011 if (max_batch_size <= 1) {
1012 hydration_copy(hd, 1);
1013 return;
1016 /* Start a new batch */
1017 BUG_ON(!list_empty(&hd->list));
1018 batch->head = hd;
1019 batch->nr_batched_regions = 1;
1022 static unsigned long __start_next_hydration(struct clone *clone,
1023 unsigned long offset,
1024 struct batch_info *batch)
1026 struct hash_table_bucket *bucket;
1027 struct dm_clone_region_hydration *hd;
1028 unsigned long nr_regions = clone->nr_regions;
1030 hd = alloc_hydration(clone);
1032 /* Try to find a region to hydrate. */
1033 do {
1034 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1035 if (offset == nr_regions)
1036 break;
1038 bucket = get_hash_table_bucket(clone, offset);
1039 bucket_lock_irq(bucket);
1041 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1042 !__hash_find(bucket, offset)) {
1043 hydration_init(hd, offset);
1044 __insert_region_hydration(bucket, hd);
1045 bucket_unlock_irq(bucket);
1047 /* Batch hydration */
1048 __batch_hydration(batch, hd);
1050 return (offset + 1);
1053 bucket_unlock_irq(bucket);
1055 } while (++offset < nr_regions);
1057 if (hd)
1058 free_hydration(hd);
1060 return offset;
1064 * This function searches for regions that still reside in the source device
1065 * and starts their hydration.
1067 static void do_hydration(struct clone *clone)
1069 unsigned int current_volume;
1070 unsigned long offset, nr_regions = clone->nr_regions;
1072 struct batch_info batch = {
1073 .head = NULL,
1074 .nr_batched_regions = 0,
1077 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1078 return;
1080 if (dm_clone_is_hydration_done(clone->cmd))
1081 return;
1084 * Avoid race with device suspension.
1086 atomic_inc(&clone->hydrations_in_flight);
1089 * Make sure atomic_inc() is ordered before test_bit(), otherwise we
1090 * might race with clone_postsuspend() and start a region hydration
1091 * after the target has been suspended.
1093 * This is paired with the smp_mb__after_atomic() in
1094 * clone_postsuspend().
1096 smp_mb__after_atomic();
1098 offset = clone->hydration_offset;
1099 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1100 !atomic_read(&clone->ios_in_flight) &&
1101 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1102 offset < nr_regions) {
1103 current_volume = atomic_read(&clone->hydrations_in_flight);
1104 current_volume += batch.nr_batched_regions;
1106 if (current_volume > READ_ONCE(clone->hydration_threshold))
1107 break;
1109 offset = __start_next_hydration(clone, offset, &batch);
1112 if (batch.head)
1113 hydration_copy(batch.head, batch.nr_batched_regions);
1115 if (offset >= nr_regions)
1116 offset = 0;
1118 clone->hydration_offset = offset;
1120 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1121 wakeup_hydration_waiters(clone);
1124 /*---------------------------------------------------------------------------*/
1126 static bool need_commit_due_to_time(struct clone *clone)
1128 return !time_in_range(jiffies, clone->last_commit_jiffies,
1129 clone->last_commit_jiffies + COMMIT_PERIOD);
1133 * A non-zero return indicates read-only or fail mode.
1135 static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1137 int r = 0;
1139 if (dest_dev_flushed)
1140 *dest_dev_flushed = false;
1142 mutex_lock(&clone->commit_lock);
1144 if (!dm_clone_changed_this_transaction(clone->cmd))
1145 goto out;
1147 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1148 r = -EPERM;
1149 goto out;
1152 r = dm_clone_metadata_pre_commit(clone->cmd);
1153 if (unlikely(r)) {
1154 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1155 goto out;
1158 bio_reset(&clone->flush_bio);
1159 bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
1160 clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1162 r = submit_bio_wait(&clone->flush_bio);
1163 if (unlikely(r)) {
1164 __metadata_operation_failed(clone, "flush destination device", r);
1165 goto out;
1168 if (dest_dev_flushed)
1169 *dest_dev_flushed = true;
1171 r = dm_clone_metadata_commit(clone->cmd);
1172 if (unlikely(r)) {
1173 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1174 goto out;
1177 if (dm_clone_is_hydration_done(clone->cmd))
1178 dm_table_event(clone->ti->table);
1179 out:
1180 mutex_unlock(&clone->commit_lock);
1182 return r;
1185 static void process_deferred_discards(struct clone *clone)
1187 int r = -EPERM;
1188 struct bio *bio;
1189 struct blk_plug plug;
1190 unsigned long rs, nr_regions;
1191 struct bio_list discards = BIO_EMPTY_LIST;
1193 spin_lock_irq(&clone->lock);
1194 bio_list_merge(&discards, &clone->deferred_discard_bios);
1195 bio_list_init(&clone->deferred_discard_bios);
1196 spin_unlock_irq(&clone->lock);
1198 if (bio_list_empty(&discards))
1199 return;
1201 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1202 goto out;
1204 /* Update the metadata */
1205 bio_list_for_each(bio, &discards) {
1206 bio_region_range(clone, bio, &rs, &nr_regions);
1208 * A discard request might cover regions that have been already
1209 * hydrated. There is no need to update the metadata for these
1210 * regions.
1212 r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
1213 if (unlikely(r))
1214 break;
1216 out:
1217 blk_start_plug(&plug);
1218 while ((bio = bio_list_pop(&discards)))
1219 complete_discard_bio(clone, bio, r == 0);
1220 blk_finish_plug(&plug);
1223 static void process_deferred_bios(struct clone *clone)
1225 struct bio_list bios = BIO_EMPTY_LIST;
1227 spin_lock_irq(&clone->lock);
1228 bio_list_merge(&bios, &clone->deferred_bios);
1229 bio_list_init(&clone->deferred_bios);
1230 spin_unlock_irq(&clone->lock);
1232 if (bio_list_empty(&bios))
1233 return;
1235 submit_bios(&bios);
1238 static void process_deferred_flush_bios(struct clone *clone)
1240 struct bio *bio;
1241 bool dest_dev_flushed;
1242 struct bio_list bios = BIO_EMPTY_LIST;
1243 struct bio_list bio_completions = BIO_EMPTY_LIST;
1246 * If there are any deferred flush bios, we must commit the metadata
1247 * before issuing them or signaling their completion.
1249 spin_lock_irq(&clone->lock);
1250 bio_list_merge(&bios, &clone->deferred_flush_bios);
1251 bio_list_init(&clone->deferred_flush_bios);
1253 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1254 bio_list_init(&clone->deferred_flush_completions);
1255 spin_unlock_irq(&clone->lock);
1257 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1258 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1259 return;
1261 if (commit_metadata(clone, &dest_dev_flushed)) {
1262 bio_list_merge(&bios, &bio_completions);
1264 while ((bio = bio_list_pop(&bios)))
1265 bio_io_error(bio);
1267 return;
1270 clone->last_commit_jiffies = jiffies;
1272 while ((bio = bio_list_pop(&bio_completions)))
1273 bio_endio(bio);
1275 while ((bio = bio_list_pop(&bios))) {
1276 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1277 /* We just flushed the destination device as part of
1278 * the metadata commit, so there is no reason to send
1279 * another flush.
1281 bio_endio(bio);
1282 } else {
1283 submit_bio_noacct(bio);
1288 static void do_worker(struct work_struct *work)
1290 struct clone *clone = container_of(work, typeof(*clone), worker);
1292 process_deferred_bios(clone);
1293 process_deferred_discards(clone);
1296 * process_deferred_flush_bios():
1298 * - Commit metadata
1300 * - Process deferred REQ_FUA completions
1302 * - Process deferred REQ_PREFLUSH bios
1304 process_deferred_flush_bios(clone);
1306 /* Background hydration */
1307 do_hydration(clone);
1311 * Commit periodically so that not too much unwritten data builds up.
1313 * Also, restart background hydration, if it has been stopped by in-flight I/O.
1315 static void do_waker(struct work_struct *work)
1317 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1319 wake_worker(clone);
1320 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1323 /*---------------------------------------------------------------------------*/
1326 * Target methods
1328 static int clone_map(struct dm_target *ti, struct bio *bio)
1330 struct clone *clone = ti->private;
1331 unsigned long region_nr;
1333 atomic_inc(&clone->ios_in_flight);
1335 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1336 return DM_MAPIO_KILL;
1339 * REQ_PREFLUSH bios carry no data:
1341 * - Commit metadata, if changed
1343 * - Pass down to destination device
1345 if (bio->bi_opf & REQ_PREFLUSH) {
1346 remap_and_issue(clone, bio);
1347 return DM_MAPIO_SUBMITTED;
1350 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1353 * dm-clone interprets discards and performs a fast hydration of the
1354 * discarded regions, i.e., we skip the copy from the source device and
1355 * just mark the regions as hydrated.
1357 if (bio_op(bio) == REQ_OP_DISCARD) {
1358 process_discard_bio(clone, bio);
1359 return DM_MAPIO_SUBMITTED;
1363 * If the bio's region is hydrated, redirect it to the destination
1364 * device.
1366 * If the region is not hydrated and the bio is a READ, redirect it to
1367 * the source device.
1369 * Else, defer WRITE bio until after its region has been hydrated and
1370 * start the region's hydration immediately.
1372 region_nr = bio_to_region(clone, bio);
1373 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1374 remap_and_issue(clone, bio);
1375 return DM_MAPIO_SUBMITTED;
1376 } else if (bio_data_dir(bio) == READ) {
1377 remap_to_source(clone, bio);
1378 return DM_MAPIO_REMAPPED;
1381 remap_to_dest(clone, bio);
1382 hydrate_bio_region(clone, bio);
1384 return DM_MAPIO_SUBMITTED;
1387 static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1389 struct clone *clone = ti->private;
1391 atomic_dec(&clone->ios_in_flight);
1393 return DM_ENDIO_DONE;
1396 static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1397 ssize_t *sz_ptr)
1399 ssize_t sz = *sz_ptr;
1400 unsigned int count;
1402 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1403 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1405 DMEMIT("%u ", count);
1407 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1408 DMEMIT("no_hydration ");
1410 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1411 DMEMIT("no_discard_passdown ");
1413 *sz_ptr = sz;
1416 static void emit_core_args(struct clone *clone, char *result,
1417 unsigned int maxlen, ssize_t *sz_ptr)
1419 ssize_t sz = *sz_ptr;
1420 unsigned int count = 4;
1422 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1423 READ_ONCE(clone->hydration_threshold),
1424 READ_ONCE(clone->hydration_batch_size));
1426 *sz_ptr = sz;
1430 * Status format:
1432 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1433 * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
1434 * <#features> <features>* <#core args> <core args>* <clone metadata mode>
1436 static void clone_status(struct dm_target *ti, status_type_t type,
1437 unsigned int status_flags, char *result,
1438 unsigned int maxlen)
1440 int r;
1441 unsigned int i;
1442 ssize_t sz = 0;
1443 dm_block_t nr_free_metadata_blocks = 0;
1444 dm_block_t nr_metadata_blocks = 0;
1445 char buf[BDEVNAME_SIZE];
1446 struct clone *clone = ti->private;
1448 switch (type) {
1449 case STATUSTYPE_INFO:
1450 if (get_clone_mode(clone) == CM_FAIL) {
1451 DMEMIT("Fail");
1452 break;
1455 /* Commit to ensure statistics aren't out-of-date */
1456 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1457 (void) commit_metadata(clone, NULL);
1459 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1461 if (r) {
1462 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1463 clone_device_name(clone), r);
1464 goto error;
1467 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1469 if (r) {
1470 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1471 clone_device_name(clone), r);
1472 goto error;
1475 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1476 DM_CLONE_METADATA_BLOCK_SIZE,
1477 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1478 (unsigned long long)nr_metadata_blocks,
1479 (unsigned long long)clone->region_size,
1480 dm_clone_nr_of_hydrated_regions(clone->cmd),
1481 clone->nr_regions,
1482 atomic_read(&clone->hydrations_in_flight));
1484 emit_flags(clone, result, maxlen, &sz);
1485 emit_core_args(clone, result, maxlen, &sz);
1487 switch (get_clone_mode(clone)) {
1488 case CM_WRITE:
1489 DMEMIT("rw");
1490 break;
1491 case CM_READ_ONLY:
1492 DMEMIT("ro");
1493 break;
1494 case CM_FAIL:
1495 DMEMIT("Fail");
1498 break;
1500 case STATUSTYPE_TABLE:
1501 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1502 DMEMIT("%s ", buf);
1504 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1505 DMEMIT("%s ", buf);
1507 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1508 DMEMIT("%s", buf);
1510 for (i = 0; i < clone->nr_ctr_args; i++)
1511 DMEMIT(" %s", clone->ctr_args[i]);
1514 return;
1516 error:
1517 DMEMIT("Error");
1520 static sector_t get_dev_size(struct dm_dev *dev)
1522 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1525 /*---------------------------------------------------------------------------*/
1528 * Construct a clone device mapping:
1530 * clone <metadata dev> <destination dev> <source dev> <region size>
1531 * [<#feature args> [<feature arg>]* [<#core args> [key value]*]]
1533 * metadata dev: Fast device holding the persistent metadata
1534 * destination dev: The destination device, which will become a clone of the
1535 * source device
1536 * source dev: The read-only source device that gets cloned
1537 * region size: dm-clone unit size in sectors
1539 * #feature args: Number of feature arguments passed
1540 * feature args: E.g. no_hydration, no_discard_passdown
1542 * #core arguments: An even number of core arguments
1543 * core arguments: Key/value pairs for tuning the core
1544 * E.g. 'hydration_threshold 256'
1546 static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1548 int r;
1549 unsigned int argc;
1550 const char *arg_name;
1551 struct dm_target *ti = clone->ti;
1553 const struct dm_arg args = {
1554 .min = 0,
1555 .max = 2,
1556 .error = "Invalid number of feature arguments"
1559 /* No feature arguments supplied */
1560 if (!as->argc)
1561 return 0;
1563 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1564 if (r)
1565 return r;
1567 while (argc) {
1568 arg_name = dm_shift_arg(as);
1569 argc--;
1571 if (!strcasecmp(arg_name, "no_hydration")) {
1572 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1573 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1574 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1575 } else {
1576 ti->error = "Invalid feature argument";
1577 return -EINVAL;
1581 return 0;
1584 static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1586 int r;
1587 unsigned int argc;
1588 unsigned int value;
1589 const char *arg_name;
1590 struct dm_target *ti = clone->ti;
1592 const struct dm_arg args = {
1593 .min = 0,
1594 .max = 4,
1595 .error = "Invalid number of core arguments"
1598 /* Initialize core arguments */
1599 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1600 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1602 /* No core arguments supplied */
1603 if (!as->argc)
1604 return 0;
1606 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1607 if (r)
1608 return r;
1610 if (argc & 1) {
1611 ti->error = "Number of core arguments must be even";
1612 return -EINVAL;
1615 while (argc) {
1616 arg_name = dm_shift_arg(as);
1617 argc -= 2;
1619 if (!strcasecmp(arg_name, "hydration_threshold")) {
1620 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1621 ti->error = "Invalid value for argument `hydration_threshold'";
1622 return -EINVAL;
1624 clone->hydration_threshold = value;
1625 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1626 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1627 ti->error = "Invalid value for argument `hydration_batch_size'";
1628 return -EINVAL;
1630 clone->hydration_batch_size = value;
1631 } else {
1632 ti->error = "Invalid core argument";
1633 return -EINVAL;
1637 return 0;
1640 static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1642 int r;
1643 unsigned int region_size;
1644 struct dm_arg arg;
1646 arg.min = MIN_REGION_SIZE;
1647 arg.max = MAX_REGION_SIZE;
1648 arg.error = "Invalid region size";
1650 r = dm_read_arg(&arg, as, &region_size, error);
1651 if (r)
1652 return r;
1654 /* Check region size is a power of 2 */
1655 if (!is_power_of_2(region_size)) {
1656 *error = "Region size is not a power of 2";
1657 return -EINVAL;
1660 /* Validate the region size against the device logical block size */
1661 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1662 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1663 *error = "Region size is not a multiple of device logical block size";
1664 return -EINVAL;
1667 clone->region_size = region_size;
1669 return 0;
1672 static int validate_nr_regions(unsigned long n, char **error)
1675 * dm_bitset restricts us to 2^32 regions. test_bit & co. restrict us
1676 * further to 2^31 regions.
1678 if (n > (1UL << 31)) {
1679 *error = "Too many regions. Consider increasing the region size";
1680 return -EINVAL;
1683 return 0;
1686 static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1688 int r;
1689 sector_t metadata_dev_size;
1690 char b[BDEVNAME_SIZE];
1692 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1693 &clone->metadata_dev);
1694 if (r) {
1695 *error = "Error opening metadata device";
1696 return r;
1699 metadata_dev_size = get_dev_size(clone->metadata_dev);
1700 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1701 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1702 bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
1704 return 0;
1707 static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1709 int r;
1710 sector_t dest_dev_size;
1712 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1713 &clone->dest_dev);
1714 if (r) {
1715 *error = "Error opening destination device";
1716 return r;
1719 dest_dev_size = get_dev_size(clone->dest_dev);
1720 if (dest_dev_size < clone->ti->len) {
1721 dm_put_device(clone->ti, clone->dest_dev);
1722 *error = "Device size larger than destination device";
1723 return -EINVAL;
1726 return 0;
1729 static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1731 int r;
1732 sector_t source_dev_size;
1734 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1735 &clone->source_dev);
1736 if (r) {
1737 *error = "Error opening source device";
1738 return r;
1741 source_dev_size = get_dev_size(clone->source_dev);
1742 if (source_dev_size < clone->ti->len) {
1743 dm_put_device(clone->ti, clone->source_dev);
1744 *error = "Device size larger than source device";
1745 return -EINVAL;
1748 return 0;
1751 static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1753 unsigned int i;
1754 const char **copy;
1756 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1757 if (!copy)
1758 goto error;
1760 for (i = 0; i < argc; i++) {
1761 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1763 if (!copy[i]) {
1764 while (i--)
1765 kfree(copy[i]);
1766 kfree(copy);
1767 goto error;
1771 clone->nr_ctr_args = argc;
1772 clone->ctr_args = copy;
1773 return 0;
1775 error:
1776 *error = "Failed to allocate memory for table line";
1777 return -ENOMEM;
1780 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1782 int r;
1783 sector_t nr_regions;
1784 struct clone *clone;
1785 struct dm_arg_set as;
1787 if (argc < 4) {
1788 ti->error = "Invalid number of arguments";
1789 return -EINVAL;
1792 as.argc = argc;
1793 as.argv = argv;
1795 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1796 if (!clone) {
1797 ti->error = "Failed to allocate clone structure";
1798 return -ENOMEM;
1801 clone->ti = ti;
1803 /* Initialize dm-clone flags */
1804 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1805 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1806 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1808 r = parse_metadata_dev(clone, &as, &ti->error);
1809 if (r)
1810 goto out_with_clone;
1812 r = parse_dest_dev(clone, &as, &ti->error);
1813 if (r)
1814 goto out_with_meta_dev;
1816 r = parse_source_dev(clone, &as, &ti->error);
1817 if (r)
1818 goto out_with_dest_dev;
1820 r = parse_region_size(clone, &as, &ti->error);
1821 if (r)
1822 goto out_with_source_dev;
1824 clone->region_shift = __ffs(clone->region_size);
1825 nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1827 /* Check for overflow */
1828 if (nr_regions != (unsigned long)nr_regions) {
1829 ti->error = "Too many regions. Consider increasing the region size";
1830 r = -EOVERFLOW;
1831 goto out_with_source_dev;
1834 clone->nr_regions = nr_regions;
1836 r = validate_nr_regions(clone->nr_regions, &ti->error);
1837 if (r)
1838 goto out_with_source_dev;
1840 r = dm_set_target_max_io_len(ti, clone->region_size);
1841 if (r) {
1842 ti->error = "Failed to set max io len";
1843 goto out_with_source_dev;
1846 r = parse_feature_args(&as, clone);
1847 if (r)
1848 goto out_with_source_dev;
1850 r = parse_core_args(&as, clone);
1851 if (r)
1852 goto out_with_source_dev;
1854 /* Load metadata */
1855 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1856 clone->region_size);
1857 if (IS_ERR(clone->cmd)) {
1858 ti->error = "Failed to load metadata";
1859 r = PTR_ERR(clone->cmd);
1860 goto out_with_source_dev;
1863 __set_clone_mode(clone, CM_WRITE);
1865 if (get_clone_mode(clone) != CM_WRITE) {
1866 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1867 r = -EPERM;
1868 goto out_with_metadata;
1871 clone->last_commit_jiffies = jiffies;
1873 /* Allocate hydration hash table */
1874 r = hash_table_init(clone);
1875 if (r) {
1876 ti->error = "Failed to allocate hydration hash table";
1877 goto out_with_metadata;
1880 atomic_set(&clone->ios_in_flight, 0);
1881 init_waitqueue_head(&clone->hydration_stopped);
1882 spin_lock_init(&clone->lock);
1883 bio_list_init(&clone->deferred_bios);
1884 bio_list_init(&clone->deferred_discard_bios);
1885 bio_list_init(&clone->deferred_flush_bios);
1886 bio_list_init(&clone->deferred_flush_completions);
1887 clone->hydration_offset = 0;
1888 atomic_set(&clone->hydrations_in_flight, 0);
1889 bio_init(&clone->flush_bio, NULL, 0);
1891 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1892 if (!clone->wq) {
1893 ti->error = "Failed to allocate workqueue";
1894 r = -ENOMEM;
1895 goto out_with_ht;
1898 INIT_WORK(&clone->worker, do_worker);
1899 INIT_DELAYED_WORK(&clone->waker, do_waker);
1901 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1902 if (IS_ERR(clone->kcopyd_client)) {
1903 r = PTR_ERR(clone->kcopyd_client);
1904 goto out_with_wq;
1907 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1908 _hydration_cache);
1909 if (r) {
1910 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1911 goto out_with_kcopyd;
1914 /* Save a copy of the table line */
1915 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1916 if (r)
1917 goto out_with_mempool;
1919 mutex_init(&clone->commit_lock);
1921 /* Enable flushes */
1922 ti->num_flush_bios = 1;
1923 ti->flush_supported = true;
1925 /* Enable discards */
1926 ti->discards_supported = true;
1927 ti->num_discard_bios = 1;
1929 ti->private = clone;
1931 return 0;
1933 out_with_mempool:
1934 mempool_exit(&clone->hydration_pool);
1935 out_with_kcopyd:
1936 dm_kcopyd_client_destroy(clone->kcopyd_client);
1937 out_with_wq:
1938 destroy_workqueue(clone->wq);
1939 out_with_ht:
1940 hash_table_exit(clone);
1941 out_with_metadata:
1942 dm_clone_metadata_close(clone->cmd);
1943 out_with_source_dev:
1944 dm_put_device(ti, clone->source_dev);
1945 out_with_dest_dev:
1946 dm_put_device(ti, clone->dest_dev);
1947 out_with_meta_dev:
1948 dm_put_device(ti, clone->metadata_dev);
1949 out_with_clone:
1950 kfree(clone);
1952 return r;
1955 static void clone_dtr(struct dm_target *ti)
1957 unsigned int i;
1958 struct clone *clone = ti->private;
1960 mutex_destroy(&clone->commit_lock);
1961 bio_uninit(&clone->flush_bio);
1963 for (i = 0; i < clone->nr_ctr_args; i++)
1964 kfree(clone->ctr_args[i]);
1965 kfree(clone->ctr_args);
1967 mempool_exit(&clone->hydration_pool);
1968 dm_kcopyd_client_destroy(clone->kcopyd_client);
1969 destroy_workqueue(clone->wq);
1970 hash_table_exit(clone);
1971 dm_clone_metadata_close(clone->cmd);
1972 dm_put_device(ti, clone->source_dev);
1973 dm_put_device(ti, clone->dest_dev);
1974 dm_put_device(ti, clone->metadata_dev);
1976 kfree(clone);
1979 /*---------------------------------------------------------------------------*/
1981 static void clone_postsuspend(struct dm_target *ti)
1983 struct clone *clone = ti->private;
1986 * To successfully suspend the device:
1988 * - We cancel the delayed work for periodic commits and wait for
1989 * it to finish.
1991 * - We stop the background hydration, i.e. we prevent new region
1992 * hydrations from starting.
1994 * - We wait for any in-flight hydrations to finish.
1996 * - We flush the workqueue.
1998 * - We commit the metadata.
2000 cancel_delayed_work_sync(&clone->waker);
2002 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2005 * Make sure set_bit() is ordered before atomic_read(), otherwise we
2006 * might race with do_hydration() and miss some started region
2007 * hydrations.
2009 * This is paired with smp_mb__after_atomic() in do_hydration().
2011 smp_mb__after_atomic();
2013 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2014 flush_workqueue(clone->wq);
2016 (void) commit_metadata(clone, NULL);
2019 static void clone_resume(struct dm_target *ti)
2021 struct clone *clone = ti->private;
2023 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2024 do_waker(&clone->waker.work);
2027 static bool bdev_supports_discards(struct block_device *bdev)
2029 struct request_queue *q = bdev_get_queue(bdev);
2031 return (q && blk_queue_discard(q));
2035 * If discard_passdown was enabled verify that the destination device supports
2036 * discards. Disable discard_passdown if not.
2038 static void disable_passdown_if_not_supported(struct clone *clone)
2040 struct block_device *dest_dev = clone->dest_dev->bdev;
2041 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2042 const char *reason = NULL;
2043 char buf[BDEVNAME_SIZE];
2045 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2046 return;
2048 if (!bdev_supports_discards(dest_dev))
2049 reason = "discard unsupported";
2050 else if (dest_limits->max_discard_sectors < clone->region_size)
2051 reason = "max discard sectors smaller than a region";
2053 if (reason) {
2054 DMWARN("Destination device (%s) %s: Disabling discard passdown.",
2055 bdevname(dest_dev, buf), reason);
2056 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2060 static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2062 struct block_device *dest_bdev = clone->dest_dev->bdev;
2063 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2065 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2066 /* No passdown is done so we set our own virtual limits */
2067 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2068 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2069 return;
2073 * clone_iterate_devices() is stacking both the source and destination
2074 * device limits but discards aren't passed to the source device, so
2075 * inherit destination's limits.
2077 limits->max_discard_sectors = dest_limits->max_discard_sectors;
2078 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2079 limits->discard_granularity = dest_limits->discard_granularity;
2080 limits->discard_alignment = dest_limits->discard_alignment;
2081 limits->discard_misaligned = dest_limits->discard_misaligned;
2082 limits->max_discard_segments = dest_limits->max_discard_segments;
2085 static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2087 struct clone *clone = ti->private;
2088 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2091 * If the system-determined stacked limits are compatible with
2092 * dm-clone's region size (io_opt is a factor) do not override them.
2094 if (io_opt_sectors < clone->region_size ||
2095 do_div(io_opt_sectors, clone->region_size)) {
2096 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2097 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2100 disable_passdown_if_not_supported(clone);
2101 set_discard_limits(clone, limits);
2104 static int clone_iterate_devices(struct dm_target *ti,
2105 iterate_devices_callout_fn fn, void *data)
2107 int ret;
2108 struct clone *clone = ti->private;
2109 struct dm_dev *dest_dev = clone->dest_dev;
2110 struct dm_dev *source_dev = clone->source_dev;
2112 ret = fn(ti, source_dev, 0, ti->len, data);
2113 if (!ret)
2114 ret = fn(ti, dest_dev, 0, ti->len, data);
2115 return ret;
2119 * dm-clone message functions.
2121 static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2123 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2126 * If user space sets hydration_threshold to zero then the hydration
2127 * will stop. If at a later time the hydration_threshold is increased
2128 * we must restart the hydration process by waking up the worker.
2130 wake_worker(clone);
2133 static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2135 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2138 static void enable_hydration(struct clone *clone)
2140 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2141 wake_worker(clone);
2144 static void disable_hydration(struct clone *clone)
2146 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2149 static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2150 char *result, unsigned int maxlen)
2152 struct clone *clone = ti->private;
2153 unsigned int value;
2155 if (!argc)
2156 return -EINVAL;
2158 if (!strcasecmp(argv[0], "enable_hydration")) {
2159 enable_hydration(clone);
2160 return 0;
2163 if (!strcasecmp(argv[0], "disable_hydration")) {
2164 disable_hydration(clone);
2165 return 0;
2168 if (argc != 2)
2169 return -EINVAL;
2171 if (!strcasecmp(argv[0], "hydration_threshold")) {
2172 if (kstrtouint(argv[1], 10, &value))
2173 return -EINVAL;
2175 set_hydration_threshold(clone, value);
2177 return 0;
2180 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2181 if (kstrtouint(argv[1], 10, &value))
2182 return -EINVAL;
2184 set_hydration_batch_size(clone, value);
2186 return 0;
2189 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2190 return -EINVAL;
2193 static struct target_type clone_target = {
2194 .name = "clone",
2195 .version = {1, 0, 0},
2196 .module = THIS_MODULE,
2197 .ctr = clone_ctr,
2198 .dtr = clone_dtr,
2199 .map = clone_map,
2200 .end_io = clone_endio,
2201 .postsuspend = clone_postsuspend,
2202 .resume = clone_resume,
2203 .status = clone_status,
2204 .message = clone_message,
2205 .io_hints = clone_io_hints,
2206 .iterate_devices = clone_iterate_devices,
2209 /*---------------------------------------------------------------------------*/
2211 /* Module functions */
2212 static int __init dm_clone_init(void)
2214 int r;
2216 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2217 if (!_hydration_cache)
2218 return -ENOMEM;
2220 r = dm_register_target(&clone_target);
2221 if (r < 0) {
2222 DMERR("Failed to register clone target");
2223 return r;
2226 return 0;
2229 static void __exit dm_clone_exit(void)
2231 dm_unregister_target(&clone_target);
2233 kmem_cache_destroy(_hydration_cache);
2234 _hydration_cache = NULL;
2237 /* Module hooks */
2238 module_init(dm_clone_init);
2239 module_exit(dm_clone_exit);
2241 MODULE_DESCRIPTION(DM_NAME " clone target");
2242 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2243 MODULE_LICENSE("GPL");