1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
9 #include <linux/hash.h>
10 #include <linux/list.h>
11 #include <linux/log2.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/dm-io.h>
16 #include <linux/mutex.h>
17 #include <linux/atomic.h>
18 #include <linux/bitops.h>
19 #include <linux/blkdev.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/mempool.h>
25 #include <linux/spinlock.h>
26 #include <linux/blk_types.h>
27 #include <linux/dm-kcopyd.h>
28 #include <linux/workqueue.h>
29 #include <linux/backing-dev.h>
30 #include <linux/device-mapper.h>
33 #include "dm-clone-metadata.h"
35 #define DM_MSG_PREFIX "clone"
38 * Minimum and maximum allowed region sizes
40 #define MIN_REGION_SIZE (1 << 3) /* 4KB */
41 #define MAX_REGION_SIZE (1 << 21) /* 1GB */
43 #define MIN_HYDRATIONS 256 /* Size of hydration mempool */
44 #define DEFAULT_HYDRATION_THRESHOLD 1 /* 1 region */
45 #define DEFAULT_HYDRATION_BATCH_SIZE 1 /* Hydrate in batches of 1 region */
47 #define COMMIT_PERIOD HZ /* 1 sec */
50 * Hydration hash table size: 1 << HASH_TABLE_BITS
52 #define HASH_TABLE_BITS 15
54 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle
,
55 "A percentage of time allocated for hydrating regions");
57 /* Slab cache for struct dm_clone_region_hydration */
58 static struct kmem_cache
*_hydration_cache
;
60 /* dm-clone metadata modes */
61 enum clone_metadata_mode
{
62 CM_WRITE
, /* metadata may be changed */
63 CM_READ_ONLY
, /* metadata may not be changed */
64 CM_FAIL
, /* all metadata I/O fails */
67 struct hash_table_bucket
;
72 struct dm_dev
*metadata_dev
;
73 struct dm_dev
*dest_dev
;
74 struct dm_dev
*source_dev
;
76 unsigned long nr_regions
;
78 unsigned int region_shift
;
81 * A metadata commit and the actions taken in case it fails should run
82 * as a single atomic step.
84 struct mutex commit_lock
;
86 struct dm_clone_metadata
*cmd
;
88 /* Region hydration hash table */
89 struct hash_table_bucket
*ht
;
91 atomic_t ios_in_flight
;
93 wait_queue_head_t hydration_stopped
;
95 mempool_t hydration_pool
;
97 unsigned long last_commit_jiffies
;
100 * We defer incoming WRITE bios for regions that are not hydrated,
101 * until after these regions have been hydrated.
103 * Also, we defer REQ_FUA and REQ_PREFLUSH bios, until after the
104 * metadata have been committed.
107 struct bio_list deferred_bios
;
108 struct bio_list deferred_discard_bios
;
109 struct bio_list deferred_flush_bios
;
110 struct bio_list deferred_flush_completions
;
112 /* Maximum number of regions being copied during background hydration. */
113 unsigned int hydration_threshold
;
115 /* Number of regions to batch together during background hydration. */
116 unsigned int hydration_batch_size
;
118 /* Which region to hydrate next */
119 unsigned long hydration_offset
;
121 atomic_t hydrations_in_flight
;
124 * Save a copy of the table line rather than reconstructing it for the
127 unsigned int nr_ctr_args
;
128 const char **ctr_args
;
130 struct workqueue_struct
*wq
;
131 struct work_struct worker
;
132 struct delayed_work waker
;
134 struct dm_kcopyd_client
*kcopyd_client
;
136 enum clone_metadata_mode mode
;
143 #define DM_CLONE_DISCARD_PASSDOWN 0
144 #define DM_CLONE_HYDRATION_ENABLED 1
145 #define DM_CLONE_HYDRATION_SUSPENDED 2
147 /*---------------------------------------------------------------------------*/
150 * Metadata failure handling.
152 static enum clone_metadata_mode
get_clone_mode(struct clone
*clone
)
154 return READ_ONCE(clone
->mode
);
157 static const char *clone_device_name(struct clone
*clone
)
159 return dm_table_device_name(clone
->ti
->table
);
162 static void __set_clone_mode(struct clone
*clone
, enum clone_metadata_mode new_mode
)
164 static const char * const descs
[] = {
170 enum clone_metadata_mode old_mode
= get_clone_mode(clone
);
172 /* Never move out of fail mode */
173 if (old_mode
== CM_FAIL
)
179 dm_clone_metadata_set_read_only(clone
->cmd
);
183 dm_clone_metadata_set_read_write(clone
->cmd
);
187 WRITE_ONCE(clone
->mode
, new_mode
);
189 if (new_mode
!= old_mode
) {
190 dm_table_event(clone
->ti
->table
);
191 DMINFO("%s: Switching to %s mode", clone_device_name(clone
),
192 descs
[(int)new_mode
]);
196 static void __abort_transaction(struct clone
*clone
)
198 const char *dev_name
= clone_device_name(clone
);
200 if (get_clone_mode(clone
) >= CM_READ_ONLY
)
203 DMERR("%s: Aborting current metadata transaction", dev_name
);
204 if (dm_clone_metadata_abort(clone
->cmd
)) {
205 DMERR("%s: Failed to abort metadata transaction", dev_name
);
206 __set_clone_mode(clone
, CM_FAIL
);
210 static void __reload_in_core_bitset(struct clone
*clone
)
212 const char *dev_name
= clone_device_name(clone
);
214 if (get_clone_mode(clone
) == CM_FAIL
)
217 /* Reload the on-disk bitset */
218 DMINFO("%s: Reloading on-disk bitmap", dev_name
);
219 if (dm_clone_reload_in_core_bitset(clone
->cmd
)) {
220 DMERR("%s: Failed to reload on-disk bitmap", dev_name
);
221 __set_clone_mode(clone
, CM_FAIL
);
225 static void __metadata_operation_failed(struct clone
*clone
, const char *op
, int r
)
227 DMERR("%s: Metadata operation `%s' failed: error = %d",
228 clone_device_name(clone
), op
, r
);
230 __abort_transaction(clone
);
231 __set_clone_mode(clone
, CM_READ_ONLY
);
234 * dm_clone_reload_in_core_bitset() may run concurrently with either
235 * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), but
236 * it's safe as we have already set the metadata to read-only mode.
238 __reload_in_core_bitset(clone
);
241 /*---------------------------------------------------------------------------*/
243 /* Wake up anyone waiting for region hydrations to stop */
244 static inline void wakeup_hydration_waiters(struct clone
*clone
)
246 wake_up_all(&clone
->hydration_stopped
);
249 static inline void wake_worker(struct clone
*clone
)
251 queue_work(clone
->wq
, &clone
->worker
);
254 /*---------------------------------------------------------------------------*/
257 * bio helper functions.
259 static inline void remap_to_source(struct clone
*clone
, struct bio
*bio
)
261 bio_set_dev(bio
, clone
->source_dev
->bdev
);
264 static inline void remap_to_dest(struct clone
*clone
, struct bio
*bio
)
266 bio_set_dev(bio
, clone
->dest_dev
->bdev
);
269 static bool bio_triggers_commit(struct clone
*clone
, struct bio
*bio
)
271 return op_is_flush(bio
->bi_opf
) &&
272 dm_clone_changed_this_transaction(clone
->cmd
);
275 /* Get the address of the region in sectors */
276 static inline sector_t
region_to_sector(struct clone
*clone
, unsigned long region_nr
)
278 return ((sector_t
)region_nr
<< clone
->region_shift
);
281 /* Get the region number of the bio */
282 static inline unsigned long bio_to_region(struct clone
*clone
, struct bio
*bio
)
284 return (bio
->bi_iter
.bi_sector
>> clone
->region_shift
);
287 /* Get the region range covered by the bio */
288 static void bio_region_range(struct clone
*clone
, struct bio
*bio
,
289 unsigned long *rs
, unsigned long *nr_regions
)
293 *rs
= dm_sector_div_up(bio
->bi_iter
.bi_sector
, clone
->region_size
);
294 end
= bio_end_sector(bio
) >> clone
->region_shift
;
299 *nr_regions
= end
- *rs
;
302 /* Check whether a bio overwrites a region */
303 static inline bool is_overwrite_bio(struct clone
*clone
, struct bio
*bio
)
305 return (bio_data_dir(bio
) == WRITE
&& bio_sectors(bio
) == clone
->region_size
);
308 static void fail_bios(struct bio_list
*bios
, blk_status_t status
)
312 while ((bio
= bio_list_pop(bios
))) {
313 bio
->bi_status
= status
;
318 static void submit_bios(struct bio_list
*bios
)
321 struct blk_plug plug
;
323 blk_start_plug(&plug
);
325 while ((bio
= bio_list_pop(bios
)))
326 submit_bio_noacct(bio
);
328 blk_finish_plug(&plug
);
332 * Submit bio to the underlying device.
334 * If the bio triggers a commit, delay it, until after the metadata have been
337 * NOTE: The bio remapping must be performed by the caller.
339 static void issue_bio(struct clone
*clone
, struct bio
*bio
)
341 if (!bio_triggers_commit(clone
, bio
)) {
342 submit_bio_noacct(bio
);
347 * If the metadata mode is RO or FAIL we won't be able to commit the
348 * metadata, so we complete the bio with an error.
350 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
)) {
356 * Batch together any bios that trigger commits and then issue a single
357 * commit for them in process_deferred_flush_bios().
359 spin_lock_irq(&clone
->lock
);
360 bio_list_add(&clone
->deferred_flush_bios
, bio
);
361 spin_unlock_irq(&clone
->lock
);
367 * Remap bio to the destination device and submit it.
369 * If the bio triggers a commit, delay it, until after the metadata have been
372 static void remap_and_issue(struct clone
*clone
, struct bio
*bio
)
374 remap_to_dest(clone
, bio
);
375 issue_bio(clone
, bio
);
379 * Issue bios that have been deferred until after their region has finished
382 * We delegate the bio submission to the worker thread, so this is safe to call
383 * from interrupt context.
385 static void issue_deferred_bios(struct clone
*clone
, struct bio_list
*bios
)
389 struct bio_list flush_bios
= BIO_EMPTY_LIST
;
390 struct bio_list normal_bios
= BIO_EMPTY_LIST
;
392 if (bio_list_empty(bios
))
395 while ((bio
= bio_list_pop(bios
))) {
396 if (bio_triggers_commit(clone
, bio
))
397 bio_list_add(&flush_bios
, bio
);
399 bio_list_add(&normal_bios
, bio
);
402 spin_lock_irqsave(&clone
->lock
, flags
);
403 bio_list_merge(&clone
->deferred_bios
, &normal_bios
);
404 bio_list_merge(&clone
->deferred_flush_bios
, &flush_bios
);
405 spin_unlock_irqrestore(&clone
->lock
, flags
);
410 static void complete_overwrite_bio(struct clone
*clone
, struct bio
*bio
)
415 * If the bio has the REQ_FUA flag set we must commit the metadata
416 * before signaling its completion.
418 * complete_overwrite_bio() is only called by hydration_complete(),
419 * after having successfully updated the metadata. This means we don't
420 * need to call dm_clone_changed_this_transaction() to check if the
421 * metadata has changed and thus we can avoid taking the metadata spin
424 if (!(bio
->bi_opf
& REQ_FUA
)) {
430 * If the metadata mode is RO or FAIL we won't be able to commit the
431 * metadata, so we complete the bio with an error.
433 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
)) {
439 * Batch together any bios that trigger commits and then issue a single
440 * commit for them in process_deferred_flush_bios().
442 spin_lock_irqsave(&clone
->lock
, flags
);
443 bio_list_add(&clone
->deferred_flush_completions
, bio
);
444 spin_unlock_irqrestore(&clone
->lock
, flags
);
449 static void trim_bio(struct bio
*bio
, sector_t sector
, unsigned int len
)
451 bio
->bi_iter
.bi_sector
= sector
;
452 bio
->bi_iter
.bi_size
= to_bytes(len
);
455 static void complete_discard_bio(struct clone
*clone
, struct bio
*bio
, bool success
)
457 unsigned long rs
, nr_regions
;
460 * If the destination device supports discards, remap and trim the
461 * discard bio and pass it down. Otherwise complete the bio
464 if (test_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
) && success
) {
465 remap_to_dest(clone
, bio
);
466 bio_region_range(clone
, bio
, &rs
, &nr_regions
);
467 trim_bio(bio
, region_to_sector(clone
, rs
),
468 nr_regions
<< clone
->region_shift
);
469 submit_bio_noacct(bio
);
474 static void process_discard_bio(struct clone
*clone
, struct bio
*bio
)
476 unsigned long rs
, nr_regions
;
478 bio_region_range(clone
, bio
, &rs
, &nr_regions
);
484 if (WARN_ON(rs
>= clone
->nr_regions
|| (rs
+ nr_regions
) < rs
||
485 (rs
+ nr_regions
) > clone
->nr_regions
)) {
486 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
487 clone_device_name(clone
), rs
, nr_regions
,
489 (unsigned long long)bio
->bi_iter
.bi_sector
,
496 * The covered regions are already hydrated so we just need to pass
499 if (dm_clone_is_range_hydrated(clone
->cmd
, rs
, nr_regions
)) {
500 complete_discard_bio(clone
, bio
, true);
505 * If the metadata mode is RO or FAIL we won't be able to update the
506 * metadata for the regions covered by the discard so we just ignore
509 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
)) {
515 * Defer discard processing.
517 spin_lock_irq(&clone
->lock
);
518 bio_list_add(&clone
->deferred_discard_bios
, bio
);
519 spin_unlock_irq(&clone
->lock
);
524 /*---------------------------------------------------------------------------*/
527 * dm-clone region hydrations.
529 struct dm_clone_region_hydration
{
531 unsigned long region_nr
;
533 struct bio
*overwrite_bio
;
534 bio_end_io_t
*overwrite_bio_end_io
;
536 struct bio_list deferred_bios
;
540 /* Used by hydration batching */
541 struct list_head list
;
543 /* Used by hydration hash table */
548 * Hydration hash table implementation.
550 * Ideally we would like to use list_bl, which uses bit spin locks and employs
551 * the least significant bit of the list head to lock the corresponding bucket,
552 * reducing the memory overhead for the locks. But, currently, list_bl and bit
553 * spin locks don't support IRQ safe versions. Since we have to take the lock
554 * in both process and interrupt context, we must fall back to using regular
555 * spin locks; one per hash table bucket.
557 struct hash_table_bucket
{
558 struct hlist_head head
;
560 /* Spinlock protecting the bucket */
564 #define bucket_lock_irqsave(bucket, flags) \
565 spin_lock_irqsave(&(bucket)->lock, flags)
567 #define bucket_unlock_irqrestore(bucket, flags) \
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
570 #define bucket_lock_irq(bucket) \
571 spin_lock_irq(&(bucket)->lock)
573 #define bucket_unlock_irq(bucket) \
574 spin_unlock_irq(&(bucket)->lock)
576 static int hash_table_init(struct clone
*clone
)
579 struct hash_table_bucket
*bucket
;
581 sz
= 1 << HASH_TABLE_BITS
;
583 clone
->ht
= kvmalloc_array(sz
, sizeof(struct hash_table_bucket
), GFP_KERNEL
);
587 for (i
= 0; i
< sz
; i
++) {
588 bucket
= clone
->ht
+ i
;
590 INIT_HLIST_HEAD(&bucket
->head
);
591 spin_lock_init(&bucket
->lock
);
597 static void hash_table_exit(struct clone
*clone
)
602 static struct hash_table_bucket
*get_hash_table_bucket(struct clone
*clone
,
603 unsigned long region_nr
)
605 return &clone
->ht
[hash_long(region_nr
, HASH_TABLE_BITS
)];
609 * Search hash table for a hydration with hd->region_nr == region_nr
611 * NOTE: Must be called with the bucket lock held
613 static struct dm_clone_region_hydration
*__hash_find(struct hash_table_bucket
*bucket
,
614 unsigned long region_nr
)
616 struct dm_clone_region_hydration
*hd
;
618 hlist_for_each_entry(hd
, &bucket
->head
, h
) {
619 if (hd
->region_nr
== region_nr
)
627 * Insert a hydration into the hash table.
629 * NOTE: Must be called with the bucket lock held.
631 static inline void __insert_region_hydration(struct hash_table_bucket
*bucket
,
632 struct dm_clone_region_hydration
*hd
)
634 hlist_add_head(&hd
->h
, &bucket
->head
);
638 * This function inserts a hydration into the hash table, unless someone else
639 * managed to insert a hydration for the same region first. In the latter case
640 * it returns the existing hydration descriptor for this region.
642 * NOTE: Must be called with the hydration hash table lock held.
644 static struct dm_clone_region_hydration
*
645 __find_or_insert_region_hydration(struct hash_table_bucket
*bucket
,
646 struct dm_clone_region_hydration
*hd
)
648 struct dm_clone_region_hydration
*hd2
;
650 hd2
= __hash_find(bucket
, hd
->region_nr
);
654 __insert_region_hydration(bucket
, hd
);
659 /*---------------------------------------------------------------------------*/
661 /* Allocate a hydration */
662 static struct dm_clone_region_hydration
*alloc_hydration(struct clone
*clone
)
664 struct dm_clone_region_hydration
*hd
;
667 * Allocate a hydration from the hydration mempool.
668 * This might block but it can't fail.
670 hd
= mempool_alloc(&clone
->hydration_pool
, GFP_NOIO
);
676 static inline void free_hydration(struct dm_clone_region_hydration
*hd
)
678 mempool_free(hd
, &hd
->clone
->hydration_pool
);
681 /* Initialize a hydration */
682 static void hydration_init(struct dm_clone_region_hydration
*hd
, unsigned long region_nr
)
684 hd
->region_nr
= region_nr
;
685 hd
->overwrite_bio
= NULL
;
686 bio_list_init(&hd
->deferred_bios
);
689 INIT_LIST_HEAD(&hd
->list
);
690 INIT_HLIST_NODE(&hd
->h
);
693 /*---------------------------------------------------------------------------*/
696 * Update dm-clone's metadata after a region has finished hydrating and remove
697 * hydration from the hash table.
699 static int hydration_update_metadata(struct dm_clone_region_hydration
*hd
)
703 struct hash_table_bucket
*bucket
;
704 struct clone
*clone
= hd
->clone
;
706 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
))
709 /* Update the metadata */
710 if (likely(!r
) && hd
->status
== BLK_STS_OK
)
711 r
= dm_clone_set_region_hydrated(clone
->cmd
, hd
->region_nr
);
713 bucket
= get_hash_table_bucket(clone
, hd
->region_nr
);
715 /* Remove hydration from hash table */
716 bucket_lock_irqsave(bucket
, flags
);
718 bucket_unlock_irqrestore(bucket
, flags
);
724 * Complete a region's hydration:
726 * 1. Update dm-clone's metadata.
727 * 2. Remove hydration from hash table.
728 * 3. Complete overwrite bio.
729 * 4. Issue deferred bios.
730 * 5. If this was the last hydration, wake up anyone waiting for
731 * hydrations to finish.
733 static void hydration_complete(struct dm_clone_region_hydration
*hd
)
737 struct clone
*clone
= hd
->clone
;
739 r
= hydration_update_metadata(hd
);
741 if (hd
->status
== BLK_STS_OK
&& likely(!r
)) {
742 if (hd
->overwrite_bio
)
743 complete_overwrite_bio(clone
, hd
->overwrite_bio
);
745 issue_deferred_bios(clone
, &hd
->deferred_bios
);
747 status
= r
? BLK_STS_IOERR
: hd
->status
;
749 if (hd
->overwrite_bio
)
750 bio_list_add(&hd
->deferred_bios
, hd
->overwrite_bio
);
752 fail_bios(&hd
->deferred_bios
, status
);
757 if (atomic_dec_and_test(&clone
->hydrations_in_flight
))
758 wakeup_hydration_waiters(clone
);
761 static void hydration_kcopyd_callback(int read_err
, unsigned long write_err
, void *context
)
765 struct dm_clone_region_hydration
*tmp
, *hd
= context
;
766 struct clone
*clone
= hd
->clone
;
768 LIST_HEAD(batched_hydrations
);
770 if (read_err
|| write_err
) {
771 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone
));
772 status
= BLK_STS_IOERR
;
776 list_splice_tail(&hd
->list
, &batched_hydrations
);
779 hydration_complete(hd
);
781 /* Complete batched hydrations */
782 list_for_each_entry_safe(hd
, tmp
, &batched_hydrations
, list
) {
784 hydration_complete(hd
);
787 /* Continue background hydration, if there is no I/O in-flight */
788 if (test_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
) &&
789 !atomic_read(&clone
->ios_in_flight
))
793 static void hydration_copy(struct dm_clone_region_hydration
*hd
, unsigned int nr_regions
)
795 unsigned long region_start
, region_end
;
796 sector_t tail_size
, region_size
, total_size
;
797 struct dm_io_region from
, to
;
798 struct clone
*clone
= hd
->clone
;
800 if (WARN_ON(!nr_regions
))
803 region_size
= clone
->region_size
;
804 region_start
= hd
->region_nr
;
805 region_end
= region_start
+ nr_regions
- 1;
807 total_size
= region_to_sector(clone
, nr_regions
- 1);
809 if (region_end
== clone
->nr_regions
- 1) {
811 * The last region of the target might be smaller than
814 tail_size
= clone
->ti
->len
& (region_size
- 1);
816 tail_size
= region_size
;
818 tail_size
= region_size
;
821 total_size
+= tail_size
;
823 from
.bdev
= clone
->source_dev
->bdev
;
824 from
.sector
= region_to_sector(clone
, region_start
);
825 from
.count
= total_size
;
827 to
.bdev
= clone
->dest_dev
->bdev
;
828 to
.sector
= from
.sector
;
829 to
.count
= from
.count
;
832 atomic_add(nr_regions
, &clone
->hydrations_in_flight
);
833 dm_kcopyd_copy(clone
->kcopyd_client
, &from
, 1, &to
, 0,
834 hydration_kcopyd_callback
, hd
);
837 static void overwrite_endio(struct bio
*bio
)
839 struct dm_clone_region_hydration
*hd
= bio
->bi_private
;
841 bio
->bi_end_io
= hd
->overwrite_bio_end_io
;
842 hd
->status
= bio
->bi_status
;
844 hydration_complete(hd
);
847 static void hydration_overwrite(struct dm_clone_region_hydration
*hd
, struct bio
*bio
)
850 * We don't need to save and restore bio->bi_private because device
851 * mapper core generates a new bio for us to use, with clean
854 hd
->overwrite_bio
= bio
;
855 hd
->overwrite_bio_end_io
= bio
->bi_end_io
;
857 bio
->bi_end_io
= overwrite_endio
;
858 bio
->bi_private
= hd
;
860 atomic_inc(&hd
->clone
->hydrations_in_flight
);
861 submit_bio_noacct(bio
);
865 * Hydrate bio's region.
867 * This function starts the hydration of the bio's region and puts the bio in
868 * the list of deferred bios for this region. In case, by the time this
869 * function is called, the region has finished hydrating it's submitted to the
870 * destination device.
872 * NOTE: The bio remapping must be performed by the caller.
874 static void hydrate_bio_region(struct clone
*clone
, struct bio
*bio
)
876 unsigned long region_nr
;
877 struct hash_table_bucket
*bucket
;
878 struct dm_clone_region_hydration
*hd
, *hd2
;
880 region_nr
= bio_to_region(clone
, bio
);
881 bucket
= get_hash_table_bucket(clone
, region_nr
);
883 bucket_lock_irq(bucket
);
885 hd
= __hash_find(bucket
, region_nr
);
887 /* Someone else is hydrating the region */
888 bio_list_add(&hd
->deferred_bios
, bio
);
889 bucket_unlock_irq(bucket
);
893 if (dm_clone_is_region_hydrated(clone
->cmd
, region_nr
)) {
894 /* The region has been hydrated */
895 bucket_unlock_irq(bucket
);
896 issue_bio(clone
, bio
);
901 * We must allocate a hydration descriptor and start the hydration of
902 * the corresponding region.
904 bucket_unlock_irq(bucket
);
906 hd
= alloc_hydration(clone
);
907 hydration_init(hd
, region_nr
);
909 bucket_lock_irq(bucket
);
911 /* Check if the region has been hydrated in the meantime. */
912 if (dm_clone_is_region_hydrated(clone
->cmd
, region_nr
)) {
913 bucket_unlock_irq(bucket
);
915 issue_bio(clone
, bio
);
919 hd2
= __find_or_insert_region_hydration(bucket
, hd
);
921 /* Someone else started the region's hydration. */
922 bio_list_add(&hd2
->deferred_bios
, bio
);
923 bucket_unlock_irq(bucket
);
929 * If the metadata mode is RO or FAIL then there is no point starting a
930 * hydration, since we will not be able to update the metadata when the
931 * hydration finishes.
933 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
)) {
935 bucket_unlock_irq(bucket
);
942 * Start region hydration.
944 * If a bio overwrites a region, i.e., its size is equal to the
945 * region's size, then we don't need to copy the region from the source
946 * to the destination device.
948 if (is_overwrite_bio(clone
, bio
)) {
949 bucket_unlock_irq(bucket
);
950 hydration_overwrite(hd
, bio
);
952 bio_list_add(&hd
->deferred_bios
, bio
);
953 bucket_unlock_irq(bucket
);
954 hydration_copy(hd
, 1);
958 /*---------------------------------------------------------------------------*/
961 * Background hydrations.
965 * Batch region hydrations.
967 * To better utilize device bandwidth we batch together the hydration of
968 * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
969 * is good for small, random write performance (because of the overwriting of
970 * un-hydrated regions) and at the same time issue big copy requests to kcopyd
971 * to achieve high hydration bandwidth.
974 struct dm_clone_region_hydration
*head
;
975 unsigned int nr_batched_regions
;
978 static void __batch_hydration(struct batch_info
*batch
,
979 struct dm_clone_region_hydration
*hd
)
981 struct clone
*clone
= hd
->clone
;
982 unsigned int max_batch_size
= READ_ONCE(clone
->hydration_batch_size
);
985 /* Try to extend the current batch */
986 if (batch
->nr_batched_regions
< max_batch_size
&&
987 (batch
->head
->region_nr
+ batch
->nr_batched_regions
) == hd
->region_nr
) {
988 list_add_tail(&hd
->list
, &batch
->head
->list
);
989 batch
->nr_batched_regions
++;
993 /* Check if we should issue the current batch */
994 if (batch
->nr_batched_regions
>= max_batch_size
|| hd
) {
995 hydration_copy(batch
->head
, batch
->nr_batched_regions
);
997 batch
->nr_batched_regions
= 0;
1004 /* We treat max batch sizes of zero and one equivalently */
1005 if (max_batch_size
<= 1) {
1006 hydration_copy(hd
, 1);
1010 /* Start a new batch */
1011 BUG_ON(!list_empty(&hd
->list
));
1013 batch
->nr_batched_regions
= 1;
1016 static unsigned long __start_next_hydration(struct clone
*clone
,
1017 unsigned long offset
,
1018 struct batch_info
*batch
)
1020 struct hash_table_bucket
*bucket
;
1021 struct dm_clone_region_hydration
*hd
;
1022 unsigned long nr_regions
= clone
->nr_regions
;
1024 hd
= alloc_hydration(clone
);
1026 /* Try to find a region to hydrate. */
1028 offset
= dm_clone_find_next_unhydrated_region(clone
->cmd
, offset
);
1029 if (offset
== nr_regions
)
1032 bucket
= get_hash_table_bucket(clone
, offset
);
1033 bucket_lock_irq(bucket
);
1035 if (!dm_clone_is_region_hydrated(clone
->cmd
, offset
) &&
1036 !__hash_find(bucket
, offset
)) {
1037 hydration_init(hd
, offset
);
1038 __insert_region_hydration(bucket
, hd
);
1039 bucket_unlock_irq(bucket
);
1041 /* Batch hydration */
1042 __batch_hydration(batch
, hd
);
1044 return (offset
+ 1);
1047 bucket_unlock_irq(bucket
);
1049 } while (++offset
< nr_regions
);
1058 * This function searches for regions that still reside in the source device
1059 * and starts their hydration.
1061 static void do_hydration(struct clone
*clone
)
1063 unsigned int current_volume
;
1064 unsigned long offset
, nr_regions
= clone
->nr_regions
;
1066 struct batch_info batch
= {
1068 .nr_batched_regions
= 0,
1071 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
))
1074 if (dm_clone_is_hydration_done(clone
->cmd
))
1078 * Avoid race with device suspension.
1080 atomic_inc(&clone
->hydrations_in_flight
);
1083 * Make sure atomic_inc() is ordered before test_bit(), otherwise we
1084 * might race with clone_postsuspend() and start a region hydration
1085 * after the target has been suspended.
1087 * This is paired with the smp_mb__after_atomic() in
1088 * clone_postsuspend().
1090 smp_mb__after_atomic();
1092 offset
= clone
->hydration_offset
;
1093 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED
, &clone
->flags
)) &&
1094 !atomic_read(&clone
->ios_in_flight
) &&
1095 test_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
) &&
1096 offset
< nr_regions
) {
1097 current_volume
= atomic_read(&clone
->hydrations_in_flight
);
1098 current_volume
+= batch
.nr_batched_regions
;
1100 if (current_volume
> READ_ONCE(clone
->hydration_threshold
))
1103 offset
= __start_next_hydration(clone
, offset
, &batch
);
1107 hydration_copy(batch
.head
, batch
.nr_batched_regions
);
1109 if (offset
>= nr_regions
)
1112 clone
->hydration_offset
= offset
;
1114 if (atomic_dec_and_test(&clone
->hydrations_in_flight
))
1115 wakeup_hydration_waiters(clone
);
1118 /*---------------------------------------------------------------------------*/
1120 static bool need_commit_due_to_time(struct clone
*clone
)
1122 return !time_in_range(jiffies
, clone
->last_commit_jiffies
,
1123 clone
->last_commit_jiffies
+ COMMIT_PERIOD
);
1127 * A non-zero return indicates read-only or fail mode.
1129 static int commit_metadata(struct clone
*clone
, bool *dest_dev_flushed
)
1133 if (dest_dev_flushed
)
1134 *dest_dev_flushed
= false;
1136 mutex_lock(&clone
->commit_lock
);
1138 if (!dm_clone_changed_this_transaction(clone
->cmd
))
1141 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
)) {
1146 r
= dm_clone_metadata_pre_commit(clone
->cmd
);
1148 __metadata_operation_failed(clone
, "dm_clone_metadata_pre_commit", r
);
1152 r
= blkdev_issue_flush(clone
->dest_dev
->bdev
);
1154 __metadata_operation_failed(clone
, "flush destination device", r
);
1158 if (dest_dev_flushed
)
1159 *dest_dev_flushed
= true;
1161 r
= dm_clone_metadata_commit(clone
->cmd
);
1163 __metadata_operation_failed(clone
, "dm_clone_metadata_commit", r
);
1167 if (dm_clone_is_hydration_done(clone
->cmd
))
1168 dm_table_event(clone
->ti
->table
);
1170 mutex_unlock(&clone
->commit_lock
);
1175 static void process_deferred_discards(struct clone
*clone
)
1179 struct blk_plug plug
;
1180 unsigned long rs
, nr_regions
;
1181 struct bio_list discards
= BIO_EMPTY_LIST
;
1183 spin_lock_irq(&clone
->lock
);
1184 bio_list_merge_init(&discards
, &clone
->deferred_discard_bios
);
1185 spin_unlock_irq(&clone
->lock
);
1187 if (bio_list_empty(&discards
))
1190 if (unlikely(get_clone_mode(clone
) >= CM_READ_ONLY
))
1193 /* Update the metadata */
1194 bio_list_for_each(bio
, &discards
) {
1195 bio_region_range(clone
, bio
, &rs
, &nr_regions
);
1197 * A discard request might cover regions that have been already
1198 * hydrated. There is no need to update the metadata for these
1201 r
= dm_clone_cond_set_range(clone
->cmd
, rs
, nr_regions
);
1206 blk_start_plug(&plug
);
1207 while ((bio
= bio_list_pop(&discards
)))
1208 complete_discard_bio(clone
, bio
, r
== 0);
1209 blk_finish_plug(&plug
);
1212 static void process_deferred_bios(struct clone
*clone
)
1214 struct bio_list bios
= BIO_EMPTY_LIST
;
1216 spin_lock_irq(&clone
->lock
);
1217 bio_list_merge_init(&bios
, &clone
->deferred_bios
);
1218 spin_unlock_irq(&clone
->lock
);
1220 if (bio_list_empty(&bios
))
1226 static void process_deferred_flush_bios(struct clone
*clone
)
1229 bool dest_dev_flushed
;
1230 struct bio_list bios
= BIO_EMPTY_LIST
;
1231 struct bio_list bio_completions
= BIO_EMPTY_LIST
;
1234 * If there are any deferred flush bios, we must commit the metadata
1235 * before issuing them or signaling their completion.
1237 spin_lock_irq(&clone
->lock
);
1238 bio_list_merge_init(&bios
, &clone
->deferred_flush_bios
);
1239 bio_list_merge_init(&bio_completions
,
1240 &clone
->deferred_flush_completions
);
1241 spin_unlock_irq(&clone
->lock
);
1243 if (bio_list_empty(&bios
) && bio_list_empty(&bio_completions
) &&
1244 !(dm_clone_changed_this_transaction(clone
->cmd
) && need_commit_due_to_time(clone
)))
1247 if (commit_metadata(clone
, &dest_dev_flushed
)) {
1248 bio_list_merge(&bios
, &bio_completions
);
1250 while ((bio
= bio_list_pop(&bios
)))
1256 clone
->last_commit_jiffies
= jiffies
;
1258 while ((bio
= bio_list_pop(&bio_completions
)))
1261 while ((bio
= bio_list_pop(&bios
))) {
1262 if ((bio
->bi_opf
& REQ_PREFLUSH
) && dest_dev_flushed
) {
1263 /* We just flushed the destination device as part of
1264 * the metadata commit, so there is no reason to send
1269 submit_bio_noacct(bio
);
1274 static void do_worker(struct work_struct
*work
)
1276 struct clone
*clone
= container_of(work
, typeof(*clone
), worker
);
1278 process_deferred_bios(clone
);
1279 process_deferred_discards(clone
);
1282 * process_deferred_flush_bios():
1286 * - Process deferred REQ_FUA completions
1288 * - Process deferred REQ_PREFLUSH bios
1290 process_deferred_flush_bios(clone
);
1292 /* Background hydration */
1293 do_hydration(clone
);
1297 * Commit periodically so that not too much unwritten data builds up.
1299 * Also, restart background hydration, if it has been stopped by in-flight I/O.
1301 static void do_waker(struct work_struct
*work
)
1303 struct clone
*clone
= container_of(to_delayed_work(work
), struct clone
, waker
);
1306 queue_delayed_work(clone
->wq
, &clone
->waker
, COMMIT_PERIOD
);
1309 /*---------------------------------------------------------------------------*/
1314 static int clone_map(struct dm_target
*ti
, struct bio
*bio
)
1316 struct clone
*clone
= ti
->private;
1317 unsigned long region_nr
;
1319 atomic_inc(&clone
->ios_in_flight
);
1321 if (unlikely(get_clone_mode(clone
) == CM_FAIL
))
1322 return DM_MAPIO_KILL
;
1325 * REQ_PREFLUSH bios carry no data:
1327 * - Commit metadata, if changed
1329 * - Pass down to destination device
1331 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1332 remap_and_issue(clone
, bio
);
1333 return DM_MAPIO_SUBMITTED
;
1336 bio
->bi_iter
.bi_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1339 * dm-clone interprets discards and performs a fast hydration of the
1340 * discarded regions, i.e., we skip the copy from the source device and
1341 * just mark the regions as hydrated.
1343 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1344 process_discard_bio(clone
, bio
);
1345 return DM_MAPIO_SUBMITTED
;
1349 * If the bio's region is hydrated, redirect it to the destination
1352 * If the region is not hydrated and the bio is a READ, redirect it to
1353 * the source device.
1355 * Else, defer WRITE bio until after its region has been hydrated and
1356 * start the region's hydration immediately.
1358 region_nr
= bio_to_region(clone
, bio
);
1359 if (dm_clone_is_region_hydrated(clone
->cmd
, region_nr
)) {
1360 remap_and_issue(clone
, bio
);
1361 return DM_MAPIO_SUBMITTED
;
1362 } else if (bio_data_dir(bio
) == READ
) {
1363 remap_to_source(clone
, bio
);
1364 return DM_MAPIO_REMAPPED
;
1367 remap_to_dest(clone
, bio
);
1368 hydrate_bio_region(clone
, bio
);
1370 return DM_MAPIO_SUBMITTED
;
1373 static int clone_endio(struct dm_target
*ti
, struct bio
*bio
, blk_status_t
*error
)
1375 struct clone
*clone
= ti
->private;
1377 atomic_dec(&clone
->ios_in_flight
);
1379 return DM_ENDIO_DONE
;
1382 static void emit_flags(struct clone
*clone
, char *result
, unsigned int maxlen
,
1385 ssize_t sz
= *sz_ptr
;
1388 count
= !test_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
);
1389 count
+= !test_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
);
1391 DMEMIT("%u ", count
);
1393 if (!test_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
))
1394 DMEMIT("no_hydration ");
1396 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
))
1397 DMEMIT("no_discard_passdown ");
1402 static void emit_core_args(struct clone
*clone
, char *result
,
1403 unsigned int maxlen
, ssize_t
*sz_ptr
)
1405 ssize_t sz
= *sz_ptr
;
1406 unsigned int count
= 4;
1408 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count
,
1409 READ_ONCE(clone
->hydration_threshold
),
1410 READ_ONCE(clone
->hydration_batch_size
));
1418 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1419 * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
1420 * <#features> <features>* <#core args> <core args>* <clone metadata mode>
1422 static void clone_status(struct dm_target
*ti
, status_type_t type
,
1423 unsigned int status_flags
, char *result
,
1424 unsigned int maxlen
)
1429 dm_block_t nr_free_metadata_blocks
= 0;
1430 dm_block_t nr_metadata_blocks
= 0;
1431 char buf
[BDEVNAME_SIZE
];
1432 struct clone
*clone
= ti
->private;
1435 case STATUSTYPE_INFO
:
1436 if (get_clone_mode(clone
) == CM_FAIL
) {
1441 /* Commit to ensure statistics aren't out-of-date */
1442 if (!(status_flags
& DM_STATUS_NOFLUSH_FLAG
) && !dm_suspended(ti
))
1443 (void) commit_metadata(clone
, NULL
);
1445 r
= dm_clone_get_free_metadata_block_count(clone
->cmd
, &nr_free_metadata_blocks
);
1448 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1449 clone_device_name(clone
), r
);
1453 r
= dm_clone_get_metadata_dev_size(clone
->cmd
, &nr_metadata_blocks
);
1456 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1457 clone_device_name(clone
), r
);
1461 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1462 DM_CLONE_METADATA_BLOCK_SIZE
,
1463 (unsigned long long)(nr_metadata_blocks
- nr_free_metadata_blocks
),
1464 (unsigned long long)nr_metadata_blocks
,
1465 (unsigned long long)clone
->region_size
,
1466 dm_clone_nr_of_hydrated_regions(clone
->cmd
),
1468 atomic_read(&clone
->hydrations_in_flight
));
1470 emit_flags(clone
, result
, maxlen
, &sz
);
1471 emit_core_args(clone
, result
, maxlen
, &sz
);
1473 switch (get_clone_mode(clone
)) {
1486 case STATUSTYPE_TABLE
:
1487 format_dev_t(buf
, clone
->metadata_dev
->bdev
->bd_dev
);
1490 format_dev_t(buf
, clone
->dest_dev
->bdev
->bd_dev
);
1493 format_dev_t(buf
, clone
->source_dev
->bdev
->bd_dev
);
1496 for (i
= 0; i
< clone
->nr_ctr_args
; i
++)
1497 DMEMIT(" %s", clone
->ctr_args
[i
]);
1500 case STATUSTYPE_IMA
:
1511 static sector_t
get_dev_size(struct dm_dev
*dev
)
1513 return bdev_nr_sectors(dev
->bdev
);
1516 /*---------------------------------------------------------------------------*/
1519 * Construct a clone device mapping:
1521 * clone <metadata dev> <destination dev> <source dev> <region size>
1522 * [<#feature args> [<feature arg>]* [<#core args> [key value]*]]
1524 * metadata dev: Fast device holding the persistent metadata
1525 * destination dev: The destination device, which will become a clone of the
1527 * source dev: The read-only source device that gets cloned
1528 * region size: dm-clone unit size in sectors
1530 * #feature args: Number of feature arguments passed
1531 * feature args: E.g. no_hydration, no_discard_passdown
1533 * #core arguments: An even number of core arguments
1534 * core arguments: Key/value pairs for tuning the core
1535 * E.g. 'hydration_threshold 256'
1537 static int parse_feature_args(struct dm_arg_set
*as
, struct clone
*clone
)
1541 const char *arg_name
;
1542 struct dm_target
*ti
= clone
->ti
;
1544 const struct dm_arg args
= {
1547 .error
= "Invalid number of feature arguments"
1550 /* No feature arguments supplied */
1554 r
= dm_read_arg_group(&args
, as
, &argc
, &ti
->error
);
1559 arg_name
= dm_shift_arg(as
);
1562 if (!strcasecmp(arg_name
, "no_hydration")) {
1563 __clear_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
);
1564 } else if (!strcasecmp(arg_name
, "no_discard_passdown")) {
1565 __clear_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
);
1567 ti
->error
= "Invalid feature argument";
1575 static int parse_core_args(struct dm_arg_set
*as
, struct clone
*clone
)
1580 const char *arg_name
;
1581 struct dm_target
*ti
= clone
->ti
;
1583 const struct dm_arg args
= {
1586 .error
= "Invalid number of core arguments"
1589 /* Initialize core arguments */
1590 clone
->hydration_batch_size
= DEFAULT_HYDRATION_BATCH_SIZE
;
1591 clone
->hydration_threshold
= DEFAULT_HYDRATION_THRESHOLD
;
1593 /* No core arguments supplied */
1597 r
= dm_read_arg_group(&args
, as
, &argc
, &ti
->error
);
1602 ti
->error
= "Number of core arguments must be even";
1607 arg_name
= dm_shift_arg(as
);
1610 if (!strcasecmp(arg_name
, "hydration_threshold")) {
1611 if (kstrtouint(dm_shift_arg(as
), 10, &value
)) {
1612 ti
->error
= "Invalid value for argument `hydration_threshold'";
1615 clone
->hydration_threshold
= value
;
1616 } else if (!strcasecmp(arg_name
, "hydration_batch_size")) {
1617 if (kstrtouint(dm_shift_arg(as
), 10, &value
)) {
1618 ti
->error
= "Invalid value for argument `hydration_batch_size'";
1621 clone
->hydration_batch_size
= value
;
1623 ti
->error
= "Invalid core argument";
1631 static int parse_region_size(struct clone
*clone
, struct dm_arg_set
*as
, char **error
)
1634 unsigned int region_size
;
1637 arg
.min
= MIN_REGION_SIZE
;
1638 arg
.max
= MAX_REGION_SIZE
;
1639 arg
.error
= "Invalid region size";
1641 r
= dm_read_arg(&arg
, as
, ®ion_size
, error
);
1645 /* Check region size is a power of 2 */
1646 if (!is_power_of_2(region_size
)) {
1647 *error
= "Region size is not a power of 2";
1651 /* Validate the region size against the device logical block size */
1652 if (region_size
% (bdev_logical_block_size(clone
->source_dev
->bdev
) >> 9) ||
1653 region_size
% (bdev_logical_block_size(clone
->dest_dev
->bdev
) >> 9)) {
1654 *error
= "Region size is not a multiple of device logical block size";
1658 clone
->region_size
= region_size
;
1663 static int validate_nr_regions(unsigned long n
, char **error
)
1666 * dm_bitset restricts us to 2^32 regions. test_bit & co. restrict us
1667 * further to 2^31 regions.
1669 if (n
> (1UL << 31)) {
1670 *error
= "Too many regions. Consider increasing the region size";
1677 static int parse_metadata_dev(struct clone
*clone
, struct dm_arg_set
*as
, char **error
)
1680 sector_t metadata_dev_size
;
1682 r
= dm_get_device(clone
->ti
, dm_shift_arg(as
),
1683 BLK_OPEN_READ
| BLK_OPEN_WRITE
, &clone
->metadata_dev
);
1685 *error
= "Error opening metadata device";
1689 metadata_dev_size
= get_dev_size(clone
->metadata_dev
);
1690 if (metadata_dev_size
> DM_CLONE_METADATA_MAX_SECTORS_WARNING
)
1691 DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
1692 clone
->metadata_dev
->bdev
, DM_CLONE_METADATA_MAX_SECTORS
);
1697 static int parse_dest_dev(struct clone
*clone
, struct dm_arg_set
*as
, char **error
)
1700 sector_t dest_dev_size
;
1702 r
= dm_get_device(clone
->ti
, dm_shift_arg(as
),
1703 BLK_OPEN_READ
| BLK_OPEN_WRITE
, &clone
->dest_dev
);
1705 *error
= "Error opening destination device";
1709 dest_dev_size
= get_dev_size(clone
->dest_dev
);
1710 if (dest_dev_size
< clone
->ti
->len
) {
1711 dm_put_device(clone
->ti
, clone
->dest_dev
);
1712 *error
= "Device size larger than destination device";
1719 static int parse_source_dev(struct clone
*clone
, struct dm_arg_set
*as
, char **error
)
1722 sector_t source_dev_size
;
1724 r
= dm_get_device(clone
->ti
, dm_shift_arg(as
), BLK_OPEN_READ
,
1725 &clone
->source_dev
);
1727 *error
= "Error opening source device";
1731 source_dev_size
= get_dev_size(clone
->source_dev
);
1732 if (source_dev_size
< clone
->ti
->len
) {
1733 dm_put_device(clone
->ti
, clone
->source_dev
);
1734 *error
= "Device size larger than source device";
1741 static int copy_ctr_args(struct clone
*clone
, int argc
, const char **argv
, char **error
)
1746 copy
= kcalloc(argc
, sizeof(*copy
), GFP_KERNEL
);
1750 for (i
= 0; i
< argc
; i
++) {
1751 copy
[i
] = kstrdup(argv
[i
], GFP_KERNEL
);
1761 clone
->nr_ctr_args
= argc
;
1762 clone
->ctr_args
= copy
;
1766 *error
= "Failed to allocate memory for table line";
1770 static int clone_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1773 sector_t nr_regions
;
1774 struct clone
*clone
;
1775 struct dm_arg_set as
;
1778 ti
->error
= "Invalid number of arguments";
1785 clone
= kzalloc(sizeof(*clone
), GFP_KERNEL
);
1787 ti
->error
= "Failed to allocate clone structure";
1793 /* Initialize dm-clone flags */
1794 __set_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
);
1795 __set_bit(DM_CLONE_HYDRATION_SUSPENDED
, &clone
->flags
);
1796 __set_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
);
1798 r
= parse_metadata_dev(clone
, &as
, &ti
->error
);
1800 goto out_with_clone
;
1802 r
= parse_dest_dev(clone
, &as
, &ti
->error
);
1804 goto out_with_meta_dev
;
1806 r
= parse_source_dev(clone
, &as
, &ti
->error
);
1808 goto out_with_dest_dev
;
1810 r
= parse_region_size(clone
, &as
, &ti
->error
);
1812 goto out_with_source_dev
;
1814 clone
->region_shift
= __ffs(clone
->region_size
);
1815 nr_regions
= dm_sector_div_up(ti
->len
, clone
->region_size
);
1817 /* Check for overflow */
1818 if (nr_regions
!= (unsigned long)nr_regions
) {
1819 ti
->error
= "Too many regions. Consider increasing the region size";
1821 goto out_with_source_dev
;
1824 clone
->nr_regions
= nr_regions
;
1826 r
= validate_nr_regions(clone
->nr_regions
, &ti
->error
);
1828 goto out_with_source_dev
;
1830 r
= dm_set_target_max_io_len(ti
, clone
->region_size
);
1832 ti
->error
= "Failed to set max io len";
1833 goto out_with_source_dev
;
1836 r
= parse_feature_args(&as
, clone
);
1838 goto out_with_source_dev
;
1840 r
= parse_core_args(&as
, clone
);
1842 goto out_with_source_dev
;
1845 clone
->cmd
= dm_clone_metadata_open(clone
->metadata_dev
->bdev
, ti
->len
,
1846 clone
->region_size
);
1847 if (IS_ERR(clone
->cmd
)) {
1848 ti
->error
= "Failed to load metadata";
1849 r
= PTR_ERR(clone
->cmd
);
1850 goto out_with_source_dev
;
1853 __set_clone_mode(clone
, CM_WRITE
);
1855 if (get_clone_mode(clone
) != CM_WRITE
) {
1856 ti
->error
= "Unable to get write access to metadata, please check/repair metadata";
1858 goto out_with_metadata
;
1861 clone
->last_commit_jiffies
= jiffies
;
1863 /* Allocate hydration hash table */
1864 r
= hash_table_init(clone
);
1866 ti
->error
= "Failed to allocate hydration hash table";
1867 goto out_with_metadata
;
1870 atomic_set(&clone
->ios_in_flight
, 0);
1871 init_waitqueue_head(&clone
->hydration_stopped
);
1872 spin_lock_init(&clone
->lock
);
1873 bio_list_init(&clone
->deferred_bios
);
1874 bio_list_init(&clone
->deferred_discard_bios
);
1875 bio_list_init(&clone
->deferred_flush_bios
);
1876 bio_list_init(&clone
->deferred_flush_completions
);
1877 clone
->hydration_offset
= 0;
1878 atomic_set(&clone
->hydrations_in_flight
, 0);
1880 clone
->wq
= alloc_workqueue("dm-" DM_MSG_PREFIX
, WQ_MEM_RECLAIM
, 0);
1882 ti
->error
= "Failed to allocate workqueue";
1887 INIT_WORK(&clone
->worker
, do_worker
);
1888 INIT_DELAYED_WORK(&clone
->waker
, do_waker
);
1890 clone
->kcopyd_client
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
1891 if (IS_ERR(clone
->kcopyd_client
)) {
1892 r
= PTR_ERR(clone
->kcopyd_client
);
1896 r
= mempool_init_slab_pool(&clone
->hydration_pool
, MIN_HYDRATIONS
,
1899 ti
->error
= "Failed to create dm_clone_region_hydration memory pool";
1900 goto out_with_kcopyd
;
1903 /* Save a copy of the table line */
1904 r
= copy_ctr_args(clone
, argc
- 3, (const char **)argv
+ 3, &ti
->error
);
1906 goto out_with_mempool
;
1908 mutex_init(&clone
->commit_lock
);
1910 /* Enable flushes */
1911 ti
->num_flush_bios
= 1;
1912 ti
->flush_supported
= true;
1914 /* Enable discards */
1915 ti
->discards_supported
= true;
1916 ti
->num_discard_bios
= 1;
1918 ti
->private = clone
;
1923 mempool_exit(&clone
->hydration_pool
);
1925 dm_kcopyd_client_destroy(clone
->kcopyd_client
);
1927 destroy_workqueue(clone
->wq
);
1929 hash_table_exit(clone
);
1931 dm_clone_metadata_close(clone
->cmd
);
1932 out_with_source_dev
:
1933 dm_put_device(ti
, clone
->source_dev
);
1935 dm_put_device(ti
, clone
->dest_dev
);
1937 dm_put_device(ti
, clone
->metadata_dev
);
1944 static void clone_dtr(struct dm_target
*ti
)
1947 struct clone
*clone
= ti
->private;
1949 mutex_destroy(&clone
->commit_lock
);
1951 for (i
= 0; i
< clone
->nr_ctr_args
; i
++)
1952 kfree(clone
->ctr_args
[i
]);
1953 kfree(clone
->ctr_args
);
1955 mempool_exit(&clone
->hydration_pool
);
1956 dm_kcopyd_client_destroy(clone
->kcopyd_client
);
1957 cancel_delayed_work_sync(&clone
->waker
);
1958 destroy_workqueue(clone
->wq
);
1959 hash_table_exit(clone
);
1960 dm_clone_metadata_close(clone
->cmd
);
1961 dm_put_device(ti
, clone
->source_dev
);
1962 dm_put_device(ti
, clone
->dest_dev
);
1963 dm_put_device(ti
, clone
->metadata_dev
);
1968 /*---------------------------------------------------------------------------*/
1970 static void clone_postsuspend(struct dm_target
*ti
)
1972 struct clone
*clone
= ti
->private;
1975 * To successfully suspend the device:
1977 * - We cancel the delayed work for periodic commits and wait for
1980 * - We stop the background hydration, i.e. we prevent new region
1981 * hydrations from starting.
1983 * - We wait for any in-flight hydrations to finish.
1985 * - We flush the workqueue.
1987 * - We commit the metadata.
1989 cancel_delayed_work_sync(&clone
->waker
);
1991 set_bit(DM_CLONE_HYDRATION_SUSPENDED
, &clone
->flags
);
1994 * Make sure set_bit() is ordered before atomic_read(), otherwise we
1995 * might race with do_hydration() and miss some started region
1998 * This is paired with smp_mb__after_atomic() in do_hydration().
2000 smp_mb__after_atomic();
2002 wait_event(clone
->hydration_stopped
, !atomic_read(&clone
->hydrations_in_flight
));
2003 flush_workqueue(clone
->wq
);
2005 (void) commit_metadata(clone
, NULL
);
2008 static void clone_resume(struct dm_target
*ti
)
2010 struct clone
*clone
= ti
->private;
2012 clear_bit(DM_CLONE_HYDRATION_SUSPENDED
, &clone
->flags
);
2013 do_waker(&clone
->waker
.work
);
2017 * If discard_passdown was enabled verify that the destination device supports
2018 * discards. Disable discard_passdown if not.
2020 static void disable_passdown_if_not_supported(struct clone
*clone
)
2022 struct block_device
*dest_dev
= clone
->dest_dev
->bdev
;
2023 struct queue_limits
*dest_limits
= bdev_limits(dest_dev
);
2024 const char *reason
= NULL
;
2026 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
))
2029 if (!bdev_max_discard_sectors(dest_dev
))
2030 reason
= "discard unsupported";
2031 else if (dest_limits
->max_discard_sectors
< clone
->region_size
)
2032 reason
= "max discard sectors smaller than a region";
2035 DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
2037 clear_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
);
2041 static void set_discard_limits(struct clone
*clone
, struct queue_limits
*limits
)
2043 struct block_device
*dest_bdev
= clone
->dest_dev
->bdev
;
2044 struct queue_limits
*dest_limits
= bdev_limits(dest_bdev
);
2046 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN
, &clone
->flags
)) {
2047 /* No passdown is done so we set our own virtual limits */
2048 limits
->discard_granularity
= clone
->region_size
<< SECTOR_SHIFT
;
2049 limits
->max_hw_discard_sectors
= round_down(UINT_MAX
>> SECTOR_SHIFT
,
2050 clone
->region_size
);
2055 * clone_iterate_devices() is stacking both the source and destination
2056 * device limits but discards aren't passed to the source device, so
2057 * inherit destination's limits.
2059 limits
->max_hw_discard_sectors
= dest_limits
->max_hw_discard_sectors
;
2060 limits
->discard_granularity
= dest_limits
->discard_granularity
;
2061 limits
->discard_alignment
= dest_limits
->discard_alignment
;
2062 limits
->max_discard_segments
= dest_limits
->max_discard_segments
;
2065 static void clone_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2067 struct clone
*clone
= ti
->private;
2068 u64 io_opt_sectors
= limits
->io_opt
>> SECTOR_SHIFT
;
2071 * If the system-determined stacked limits are compatible with
2072 * dm-clone's region size (io_opt is a factor) do not override them.
2074 if (io_opt_sectors
< clone
->region_size
||
2075 do_div(io_opt_sectors
, clone
->region_size
)) {
2076 limits
->io_min
= clone
->region_size
<< SECTOR_SHIFT
;
2077 limits
->io_opt
= clone
->region_size
<< SECTOR_SHIFT
;
2080 disable_passdown_if_not_supported(clone
);
2081 set_discard_limits(clone
, limits
);
2084 static int clone_iterate_devices(struct dm_target
*ti
,
2085 iterate_devices_callout_fn fn
, void *data
)
2088 struct clone
*clone
= ti
->private;
2089 struct dm_dev
*dest_dev
= clone
->dest_dev
;
2090 struct dm_dev
*source_dev
= clone
->source_dev
;
2092 ret
= fn(ti
, source_dev
, 0, ti
->len
, data
);
2094 ret
= fn(ti
, dest_dev
, 0, ti
->len
, data
);
2099 * dm-clone message functions.
2101 static void set_hydration_threshold(struct clone
*clone
, unsigned int nr_regions
)
2103 WRITE_ONCE(clone
->hydration_threshold
, nr_regions
);
2106 * If user space sets hydration_threshold to zero then the hydration
2107 * will stop. If at a later time the hydration_threshold is increased
2108 * we must restart the hydration process by waking up the worker.
2113 static void set_hydration_batch_size(struct clone
*clone
, unsigned int nr_regions
)
2115 WRITE_ONCE(clone
->hydration_batch_size
, nr_regions
);
2118 static void enable_hydration(struct clone
*clone
)
2120 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
))
2124 static void disable_hydration(struct clone
*clone
)
2126 clear_bit(DM_CLONE_HYDRATION_ENABLED
, &clone
->flags
);
2129 static int clone_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
2130 char *result
, unsigned int maxlen
)
2132 struct clone
*clone
= ti
->private;
2138 if (!strcasecmp(argv
[0], "enable_hydration")) {
2139 enable_hydration(clone
);
2143 if (!strcasecmp(argv
[0], "disable_hydration")) {
2144 disable_hydration(clone
);
2151 if (!strcasecmp(argv
[0], "hydration_threshold")) {
2152 if (kstrtouint(argv
[1], 10, &value
))
2155 set_hydration_threshold(clone
, value
);
2160 if (!strcasecmp(argv
[0], "hydration_batch_size")) {
2161 if (kstrtouint(argv
[1], 10, &value
))
2164 set_hydration_batch_size(clone
, value
);
2169 DMERR("%s: Unsupported message `%s'", clone_device_name(clone
), argv
[0]);
2173 static struct target_type clone_target
= {
2175 .version
= {1, 0, 0},
2176 .module
= THIS_MODULE
,
2180 .end_io
= clone_endio
,
2181 .postsuspend
= clone_postsuspend
,
2182 .resume
= clone_resume
,
2183 .status
= clone_status
,
2184 .message
= clone_message
,
2185 .io_hints
= clone_io_hints
,
2186 .iterate_devices
= clone_iterate_devices
,
2189 /*---------------------------------------------------------------------------*/
2191 /* Module functions */
2192 static int __init
dm_clone_init(void)
2196 _hydration_cache
= KMEM_CACHE(dm_clone_region_hydration
, 0);
2197 if (!_hydration_cache
)
2200 r
= dm_register_target(&clone_target
);
2202 kmem_cache_destroy(_hydration_cache
);
2209 static void __exit
dm_clone_exit(void)
2211 dm_unregister_target(&clone_target
);
2213 kmem_cache_destroy(_hydration_cache
);
2214 _hydration_cache
= NULL
;
2218 module_init(dm_clone_init
);
2219 module_exit(dm_clone_exit
);
2221 MODULE_DESCRIPTION(DM_NAME
" clone target");
2222 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2223 MODULE_LICENSE("GPL");