1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Red Hat. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
19 #define DM_MSG_PREFIX "writecache"
21 #define HIGH_WATERMARK 50
22 #define LOW_WATERMARK 45
23 #define MAX_WRITEBACK_JOBS 0
24 #define ENDIO_LATENCY 16
25 #define WRITEBACK_LATENCY 64
26 #define AUTOCOMMIT_BLOCKS_SSD 65536
27 #define AUTOCOMMIT_BLOCKS_PMEM 64
28 #define AUTOCOMMIT_MSEC 1000
29 #define MAX_AGE_DIV 16
30 #define MAX_AGE_UNSPECIFIED -1UL
32 #define BITMAP_GRANULARITY 65536
33 #if BITMAP_GRANULARITY < PAGE_SIZE
34 #undef BITMAP_GRANULARITY
35 #define BITMAP_GRANULARITY PAGE_SIZE
38 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39 #define DM_WRITECACHE_HAS_PMEM
42 #ifdef DM_WRITECACHE_HAS_PMEM
43 #define pmem_assign(dest, src) \
45 typeof(dest) uniq = (src); \
46 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
49 #define pmem_assign(dest, src) ((dest) = (src))
52 #if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
53 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
56 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
57 #define MEMORY_SUPERBLOCK_VERSION 1
59 struct wc_memory_entry
{
60 __le64 original_sector
;
64 struct wc_memory_superblock
{
76 struct wc_memory_entry entries
[0];
80 struct rb_node rb_node
;
82 unsigned short wc_list_contiguous
;
83 bool write_in_progress
84 #if BITS_PER_LONG == 64
89 #if BITS_PER_LONG == 64
94 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95 uint64_t original_sector
;
100 #ifdef DM_WRITECACHE_HAS_PMEM
101 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
102 #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
104 #define WC_MODE_PMEM(wc) false
105 #define WC_MODE_FUA(wc) false
107 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
109 struct dm_writecache
{
111 struct list_head lru
;
113 struct list_head freelist
;
115 struct rb_root freetree
;
116 struct wc_entry
*current_free
;
121 size_t freelist_size
;
122 size_t writeback_size
;
123 size_t freelist_high_watermark
;
124 size_t freelist_low_watermark
;
125 unsigned long max_age
;
127 unsigned uncommitted_blocks
;
128 unsigned autocommit_blocks
;
129 unsigned max_writeback_jobs
;
133 unsigned long autocommit_jiffies
;
134 struct timer_list autocommit_timer
;
135 struct wait_queue_head freelist_wait
;
137 struct timer_list max_age_timer
;
139 atomic_t bio_in_progress
[2];
140 struct wait_queue_head bio_in_progress_wait
[2];
142 struct dm_target
*ti
;
144 struct dm_dev
*ssd_dev
;
145 sector_t start_sector
;
147 uint64_t memory_map_size
;
148 size_t metadata_sectors
;
152 struct wc_entry
*entries
;
154 unsigned char block_size_bits
;
157 bool writeback_fua
:1;
159 bool overwrote_committed
:1;
160 bool memory_vmapped
:1;
162 bool high_wm_percent_set
:1;
163 bool low_wm_percent_set
:1;
164 bool max_writeback_jobs_set
:1;
165 bool autocommit_blocks_set
:1;
166 bool autocommit_time_set
:1;
167 bool writeback_fua_set
:1;
168 bool flush_on_suspend
:1;
171 unsigned writeback_all
;
172 struct workqueue_struct
*writeback_wq
;
173 struct work_struct writeback_work
;
174 struct work_struct flush_work
;
176 struct dm_io_client
*dm_io
;
178 raw_spinlock_t endio_list_lock
;
179 struct list_head endio_list
;
180 struct task_struct
*endio_thread
;
182 struct task_struct
*flush_thread
;
183 struct bio_list flush_list
;
185 struct dm_kcopyd_client
*dm_kcopyd
;
186 unsigned long *dirty_bitmap
;
187 unsigned dirty_bitmap_size
;
189 struct bio_set bio_set
;
193 #define WB_LIST_INLINE 16
195 struct writeback_struct
{
196 struct list_head endio_entry
;
197 struct dm_writecache
*wc
;
198 struct wc_entry
**wc_list
;
200 struct wc_entry
*wc_list_inline
[WB_LIST_INLINE
];
205 struct list_head endio_entry
;
206 struct dm_writecache
*wc
;
212 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle
,
213 "A percentage of time allocated for data copying");
215 static void wc_lock(struct dm_writecache
*wc
)
217 mutex_lock(&wc
->lock
);
220 static void wc_unlock(struct dm_writecache
*wc
)
222 mutex_unlock(&wc
->lock
);
225 #ifdef DM_WRITECACHE_HAS_PMEM
226 static int persistent_memory_claim(struct dm_writecache
*wc
)
235 wc
->memory_vmapped
= false;
237 if (!wc
->ssd_dev
->dax_dev
) {
241 s
= wc
->memory_map_size
;
247 if (p
!= s
>> PAGE_SHIFT
) {
252 id
= dax_read_lock();
254 da
= dax_direct_access(wc
->ssd_dev
->dax_dev
, 0, p
, &wc
->memory_map
, &pfn
);
256 wc
->memory_map
= NULL
;
260 if (!pfn_t_has_page(pfn
)) {
261 wc
->memory_map
= NULL
;
267 wc
->memory_map
= NULL
;
268 pages
= kvmalloc_array(p
, sizeof(struct page
*), GFP_KERNEL
);
276 daa
= dax_direct_access(wc
->ssd_dev
->dax_dev
, i
, p
- i
,
279 r
= daa
? daa
: -EINVAL
;
282 if (!pfn_t_has_page(pfn
)) {
286 while (daa
-- && i
< p
) {
287 pages
[i
++] = pfn_t_to_page(pfn
);
293 wc
->memory_map
= vmap(pages
, p
, VM_MAP
, PAGE_KERNEL
);
294 if (!wc
->memory_map
) {
299 wc
->memory_vmapped
= true;
304 wc
->memory_map
+= (size_t)wc
->start_sector
<< SECTOR_SHIFT
;
305 wc
->memory_map_size
-= (size_t)wc
->start_sector
<< SECTOR_SHIFT
;
316 static int persistent_memory_claim(struct dm_writecache
*wc
)
322 static void persistent_memory_release(struct dm_writecache
*wc
)
324 if (wc
->memory_vmapped
)
325 vunmap(wc
->memory_map
- ((size_t)wc
->start_sector
<< SECTOR_SHIFT
));
328 static struct page
*persistent_memory_page(void *addr
)
330 if (is_vmalloc_addr(addr
))
331 return vmalloc_to_page(addr
);
333 return virt_to_page(addr
);
336 static unsigned persistent_memory_page_offset(void *addr
)
338 return (unsigned long)addr
& (PAGE_SIZE
- 1);
341 static void persistent_memory_flush_cache(void *ptr
, size_t size
)
343 if (is_vmalloc_addr(ptr
))
344 flush_kernel_vmap_range(ptr
, size
);
347 static void persistent_memory_invalidate_cache(void *ptr
, size_t size
)
349 if (is_vmalloc_addr(ptr
))
350 invalidate_kernel_vmap_range(ptr
, size
);
353 static struct wc_memory_superblock
*sb(struct dm_writecache
*wc
)
355 return wc
->memory_map
;
358 static struct wc_memory_entry
*memory_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
360 return &sb(wc
)->entries
[e
->index
];
363 static void *memory_data(struct dm_writecache
*wc
, struct wc_entry
*e
)
365 return (char *)wc
->block_start
+ (e
->index
<< wc
->block_size_bits
);
368 static sector_t
cache_sector(struct dm_writecache
*wc
, struct wc_entry
*e
)
370 return wc
->start_sector
+ wc
->metadata_sectors
+
371 ((sector_t
)e
->index
<< (wc
->block_size_bits
- SECTOR_SHIFT
));
374 static uint64_t read_original_sector(struct dm_writecache
*wc
, struct wc_entry
*e
)
376 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
377 return e
->original_sector
;
379 return le64_to_cpu(memory_entry(wc
, e
)->original_sector
);
383 static uint64_t read_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
)
385 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
388 return le64_to_cpu(memory_entry(wc
, e
)->seq_count
);
392 static void clear_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
)
394 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
397 pmem_assign(memory_entry(wc
, e
)->seq_count
, cpu_to_le64(-1));
400 static void write_original_sector_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
,
401 uint64_t original_sector
, uint64_t seq_count
)
403 struct wc_memory_entry me
;
404 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
405 e
->original_sector
= original_sector
;
406 e
->seq_count
= seq_count
;
408 me
.original_sector
= cpu_to_le64(original_sector
);
409 me
.seq_count
= cpu_to_le64(seq_count
);
410 pmem_assign(*memory_entry(wc
, e
), me
);
413 #define writecache_error(wc, err, msg, arg...) \
415 if (!cmpxchg(&(wc)->error, 0, err)) \
417 wake_up(&(wc)->freelist_wait); \
420 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
422 static void writecache_flush_all_metadata(struct dm_writecache
*wc
)
424 if (!WC_MODE_PMEM(wc
))
425 memset(wc
->dirty_bitmap
, -1, wc
->dirty_bitmap_size
);
428 static void writecache_flush_region(struct dm_writecache
*wc
, void *ptr
, size_t size
)
430 if (!WC_MODE_PMEM(wc
))
431 __set_bit(((char *)ptr
- (char *)wc
->memory_map
) / BITMAP_GRANULARITY
,
435 static void writecache_disk_flush(struct dm_writecache
*wc
, struct dm_dev
*dev
);
438 struct dm_writecache
*wc
;
443 static void writecache_notify_io(unsigned long error
, void *context
)
445 struct io_notify
*endio
= context
;
447 if (unlikely(error
!= 0))
448 writecache_error(endio
->wc
, -EIO
, "error writing metadata");
449 BUG_ON(atomic_read(&endio
->count
) <= 0);
450 if (atomic_dec_and_test(&endio
->count
))
454 static void writecache_wait_for_ios(struct dm_writecache
*wc
, int direction
)
456 wait_event(wc
->bio_in_progress_wait
[direction
],
457 !atomic_read(&wc
->bio_in_progress
[direction
]));
460 static void ssd_commit_flushed(struct dm_writecache
*wc
, bool wait_for_ios
)
462 struct dm_io_region region
;
463 struct dm_io_request req
;
464 struct io_notify endio
= {
466 COMPLETION_INITIALIZER_ONSTACK(endio
.c
),
469 unsigned bitmap_bits
= wc
->dirty_bitmap_size
* 8;
474 i
= find_next_bit(wc
->dirty_bitmap
, bitmap_bits
, i
);
475 if (unlikely(i
== bitmap_bits
))
477 j
= find_next_zero_bit(wc
->dirty_bitmap
, bitmap_bits
, i
);
479 region
.bdev
= wc
->ssd_dev
->bdev
;
480 region
.sector
= (sector_t
)i
* (BITMAP_GRANULARITY
>> SECTOR_SHIFT
);
481 region
.count
= (sector_t
)(j
- i
) * (BITMAP_GRANULARITY
>> SECTOR_SHIFT
);
483 if (unlikely(region
.sector
>= wc
->metadata_sectors
))
485 if (unlikely(region
.sector
+ region
.count
> wc
->metadata_sectors
))
486 region
.count
= wc
->metadata_sectors
- region
.sector
;
488 region
.sector
+= wc
->start_sector
;
489 atomic_inc(&endio
.count
);
490 req
.bi_op
= REQ_OP_WRITE
;
491 req
.bi_op_flags
= REQ_SYNC
;
492 req
.mem
.type
= DM_IO_VMA
;
493 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
+ (size_t)i
* BITMAP_GRANULARITY
;
494 req
.client
= wc
->dm_io
;
495 req
.notify
.fn
= writecache_notify_io
;
496 req
.notify
.context
= &endio
;
498 /* writing via async dm-io (implied by notify.fn above) won't return an error */
499 (void) dm_io(&req
, 1, ®ion
, NULL
);
503 writecache_notify_io(0, &endio
);
504 wait_for_completion_io(&endio
.c
);
507 writecache_wait_for_ios(wc
, WRITE
);
509 writecache_disk_flush(wc
, wc
->ssd_dev
);
511 memset(wc
->dirty_bitmap
, 0, wc
->dirty_bitmap_size
);
514 static void ssd_commit_superblock(struct dm_writecache
*wc
)
517 struct dm_io_region region
;
518 struct dm_io_request req
;
520 region
.bdev
= wc
->ssd_dev
->bdev
;
522 region
.count
= PAGE_SIZE
;
524 if (unlikely(region
.sector
+ region
.count
> wc
->metadata_sectors
))
525 region
.count
= wc
->metadata_sectors
- region
.sector
;
527 region
.sector
+= wc
->start_sector
;
529 req
.bi_op
= REQ_OP_WRITE
;
530 req
.bi_op_flags
= REQ_SYNC
| REQ_FUA
;
531 req
.mem
.type
= DM_IO_VMA
;
532 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
;
533 req
.client
= wc
->dm_io
;
534 req
.notify
.fn
= NULL
;
535 req
.notify
.context
= NULL
;
537 r
= dm_io(&req
, 1, ®ion
, NULL
);
539 writecache_error(wc
, r
, "error writing superblock");
542 static void writecache_commit_flushed(struct dm_writecache
*wc
, bool wait_for_ios
)
544 if (WC_MODE_PMEM(wc
))
547 ssd_commit_flushed(wc
, wait_for_ios
);
550 static void writecache_disk_flush(struct dm_writecache
*wc
, struct dm_dev
*dev
)
553 struct dm_io_region region
;
554 struct dm_io_request req
;
556 region
.bdev
= dev
->bdev
;
559 req
.bi_op
= REQ_OP_WRITE
;
560 req
.bi_op_flags
= REQ_PREFLUSH
;
561 req
.mem
.type
= DM_IO_KMEM
;
562 req
.mem
.ptr
.addr
= NULL
;
563 req
.client
= wc
->dm_io
;
564 req
.notify
.fn
= NULL
;
566 r
= dm_io(&req
, 1, ®ion
, NULL
);
568 writecache_error(wc
, r
, "error flushing metadata: %d", r
);
571 #define WFE_RETURN_FOLLOWING 1
572 #define WFE_LOWEST_SEQ 2
574 static struct wc_entry
*writecache_find_entry(struct dm_writecache
*wc
,
575 uint64_t block
, int flags
)
578 struct rb_node
*node
= wc
->tree
.rb_node
;
584 e
= container_of(node
, struct wc_entry
, rb_node
);
585 if (read_original_sector(wc
, e
) == block
)
588 node
= (read_original_sector(wc
, e
) >= block
?
589 e
->rb_node
.rb_left
: e
->rb_node
.rb_right
);
590 if (unlikely(!node
)) {
591 if (!(flags
& WFE_RETURN_FOLLOWING
))
593 if (read_original_sector(wc
, e
) >= block
) {
596 node
= rb_next(&e
->rb_node
);
599 e
= container_of(node
, struct wc_entry
, rb_node
);
607 if (flags
& WFE_LOWEST_SEQ
)
608 node
= rb_prev(&e
->rb_node
);
610 node
= rb_next(&e
->rb_node
);
613 e2
= container_of(node
, struct wc_entry
, rb_node
);
614 if (read_original_sector(wc
, e2
) != block
)
620 static void writecache_insert_entry(struct dm_writecache
*wc
, struct wc_entry
*ins
)
623 struct rb_node
**node
= &wc
->tree
.rb_node
, *parent
= NULL
;
626 e
= container_of(*node
, struct wc_entry
, rb_node
);
627 parent
= &e
->rb_node
;
628 if (read_original_sector(wc
, e
) > read_original_sector(wc
, ins
))
629 node
= &parent
->rb_left
;
631 node
= &parent
->rb_right
;
633 rb_link_node(&ins
->rb_node
, parent
, node
);
634 rb_insert_color(&ins
->rb_node
, &wc
->tree
);
635 list_add(&ins
->lru
, &wc
->lru
);
639 static void writecache_unlink(struct dm_writecache
*wc
, struct wc_entry
*e
)
642 rb_erase(&e
->rb_node
, &wc
->tree
);
645 static void writecache_add_to_freelist(struct dm_writecache
*wc
, struct wc_entry
*e
)
647 if (WC_MODE_SORT_FREELIST(wc
)) {
648 struct rb_node
**node
= &wc
->freetree
.rb_node
, *parent
= NULL
;
649 if (unlikely(!*node
))
650 wc
->current_free
= e
;
653 if (&e
->rb_node
< *node
)
654 node
= &parent
->rb_left
;
656 node
= &parent
->rb_right
;
658 rb_link_node(&e
->rb_node
, parent
, node
);
659 rb_insert_color(&e
->rb_node
, &wc
->freetree
);
661 list_add_tail(&e
->lru
, &wc
->freelist
);
666 static inline void writecache_verify_watermark(struct dm_writecache
*wc
)
668 if (unlikely(wc
->freelist_size
+ wc
->writeback_size
<= wc
->freelist_high_watermark
))
669 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
672 static void writecache_max_age_timer(struct timer_list
*t
)
674 struct dm_writecache
*wc
= from_timer(wc
, t
, max_age_timer
);
676 if (!dm_suspended(wc
->ti
) && !writecache_has_error(wc
)) {
677 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
678 mod_timer(&wc
->max_age_timer
, jiffies
+ wc
->max_age
/ MAX_AGE_DIV
);
682 static struct wc_entry
*writecache_pop_from_freelist(struct dm_writecache
*wc
, sector_t expected_sector
)
686 if (WC_MODE_SORT_FREELIST(wc
)) {
687 struct rb_node
*next
;
688 if (unlikely(!wc
->current_free
))
690 e
= wc
->current_free
;
691 if (expected_sector
!= (sector_t
)-1 && unlikely(cache_sector(wc
, e
) != expected_sector
))
693 next
= rb_next(&e
->rb_node
);
694 rb_erase(&e
->rb_node
, &wc
->freetree
);
696 next
= rb_first(&wc
->freetree
);
697 wc
->current_free
= next
? container_of(next
, struct wc_entry
, rb_node
) : NULL
;
699 if (unlikely(list_empty(&wc
->freelist
)))
701 e
= container_of(wc
->freelist
.next
, struct wc_entry
, lru
);
702 if (expected_sector
!= (sector_t
)-1 && unlikely(cache_sector(wc
, e
) != expected_sector
))
708 writecache_verify_watermark(wc
);
713 static void writecache_free_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
715 writecache_unlink(wc
, e
);
716 writecache_add_to_freelist(wc
, e
);
717 clear_seq_count(wc
, e
);
718 writecache_flush_region(wc
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
719 if (unlikely(waitqueue_active(&wc
->freelist_wait
)))
720 wake_up(&wc
->freelist_wait
);
723 static void writecache_wait_on_freelist(struct dm_writecache
*wc
)
727 prepare_to_wait(&wc
->freelist_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
730 finish_wait(&wc
->freelist_wait
, &wait
);
734 static void writecache_poison_lists(struct dm_writecache
*wc
)
737 * Catch incorrect access to these values while the device is suspended.
739 memset(&wc
->tree
, -1, sizeof wc
->tree
);
740 wc
->lru
.next
= LIST_POISON1
;
741 wc
->lru
.prev
= LIST_POISON2
;
742 wc
->freelist
.next
= LIST_POISON1
;
743 wc
->freelist
.prev
= LIST_POISON2
;
746 static void writecache_flush_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
748 writecache_flush_region(wc
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
749 if (WC_MODE_PMEM(wc
))
750 writecache_flush_region(wc
, memory_data(wc
, e
), wc
->block_size
);
753 static bool writecache_entry_is_committed(struct dm_writecache
*wc
, struct wc_entry
*e
)
755 return read_seq_count(wc
, e
) < wc
->seq_count
;
758 static void writecache_flush(struct dm_writecache
*wc
)
760 struct wc_entry
*e
, *e2
;
761 bool need_flush_after_free
;
763 wc
->uncommitted_blocks
= 0;
764 del_timer(&wc
->autocommit_timer
);
766 if (list_empty(&wc
->lru
))
769 e
= container_of(wc
->lru
.next
, struct wc_entry
, lru
);
770 if (writecache_entry_is_committed(wc
, e
)) {
771 if (wc
->overwrote_committed
) {
772 writecache_wait_for_ios(wc
, WRITE
);
773 writecache_disk_flush(wc
, wc
->ssd_dev
);
774 wc
->overwrote_committed
= false;
779 writecache_flush_entry(wc
, e
);
780 if (unlikely(e
->lru
.next
== &wc
->lru
))
782 e2
= container_of(e
->lru
.next
, struct wc_entry
, lru
);
783 if (writecache_entry_is_committed(wc
, e2
))
788 writecache_commit_flushed(wc
, true);
791 pmem_assign(sb(wc
)->seq_count
, cpu_to_le64(wc
->seq_count
));
792 if (WC_MODE_PMEM(wc
))
793 writecache_commit_flushed(wc
, false);
795 ssd_commit_superblock(wc
);
797 wc
->overwrote_committed
= false;
799 need_flush_after_free
= false;
801 /* Free another committed entry with lower seq-count */
802 struct rb_node
*rb_node
= rb_prev(&e
->rb_node
);
805 e2
= container_of(rb_node
, struct wc_entry
, rb_node
);
806 if (read_original_sector(wc
, e2
) == read_original_sector(wc
, e
) &&
807 likely(!e2
->write_in_progress
)) {
808 writecache_free_entry(wc
, e2
);
809 need_flush_after_free
= true;
812 if (unlikely(e
->lru
.prev
== &wc
->lru
))
814 e
= container_of(e
->lru
.prev
, struct wc_entry
, lru
);
818 if (need_flush_after_free
)
819 writecache_commit_flushed(wc
, false);
822 static void writecache_flush_work(struct work_struct
*work
)
824 struct dm_writecache
*wc
= container_of(work
, struct dm_writecache
, flush_work
);
827 writecache_flush(wc
);
831 static void writecache_autocommit_timer(struct timer_list
*t
)
833 struct dm_writecache
*wc
= from_timer(wc
, t
, autocommit_timer
);
834 if (!writecache_has_error(wc
))
835 queue_work(wc
->writeback_wq
, &wc
->flush_work
);
838 static void writecache_schedule_autocommit(struct dm_writecache
*wc
)
840 if (!timer_pending(&wc
->autocommit_timer
))
841 mod_timer(&wc
->autocommit_timer
, jiffies
+ wc
->autocommit_jiffies
);
844 static void writecache_discard(struct dm_writecache
*wc
, sector_t start
, sector_t end
)
847 bool discarded_something
= false;
849 e
= writecache_find_entry(wc
, start
, WFE_RETURN_FOLLOWING
| WFE_LOWEST_SEQ
);
853 while (read_original_sector(wc
, e
) < end
) {
854 struct rb_node
*node
= rb_next(&e
->rb_node
);
856 if (likely(!e
->write_in_progress
)) {
857 if (!discarded_something
) {
858 writecache_wait_for_ios(wc
, READ
);
859 writecache_wait_for_ios(wc
, WRITE
);
860 discarded_something
= true;
862 if (!writecache_entry_is_committed(wc
, e
))
863 wc
->uncommitted_blocks
--;
864 writecache_free_entry(wc
, e
);
870 e
= container_of(node
, struct wc_entry
, rb_node
);
873 if (discarded_something
)
874 writecache_commit_flushed(wc
, false);
877 static bool writecache_wait_for_writeback(struct dm_writecache
*wc
)
879 if (wc
->writeback_size
) {
880 writecache_wait_on_freelist(wc
);
886 static void writecache_suspend(struct dm_target
*ti
)
888 struct dm_writecache
*wc
= ti
->private;
889 bool flush_on_suspend
;
891 del_timer_sync(&wc
->autocommit_timer
);
892 del_timer_sync(&wc
->max_age_timer
);
895 writecache_flush(wc
);
896 flush_on_suspend
= wc
->flush_on_suspend
;
897 if (flush_on_suspend
) {
898 wc
->flush_on_suspend
= false;
900 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
904 drain_workqueue(wc
->writeback_wq
);
907 if (flush_on_suspend
)
909 while (writecache_wait_for_writeback(wc
));
911 if (WC_MODE_PMEM(wc
))
912 persistent_memory_flush_cache(wc
->memory_map
, wc
->memory_map_size
);
914 writecache_poison_lists(wc
);
919 static int writecache_alloc_entries(struct dm_writecache
*wc
)
925 wc
->entries
= vmalloc(array_size(sizeof(struct wc_entry
), wc
->n_blocks
));
928 for (b
= 0; b
< wc
->n_blocks
; b
++) {
929 struct wc_entry
*e
= &wc
->entries
[b
];
931 e
->write_in_progress
= false;
938 static int writecache_read_metadata(struct dm_writecache
*wc
, sector_t n_sectors
)
940 struct dm_io_region region
;
941 struct dm_io_request req
;
943 region
.bdev
= wc
->ssd_dev
->bdev
;
944 region
.sector
= wc
->start_sector
;
945 region
.count
= n_sectors
;
946 req
.bi_op
= REQ_OP_READ
;
947 req
.bi_op_flags
= REQ_SYNC
;
948 req
.mem
.type
= DM_IO_VMA
;
949 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
;
950 req
.client
= wc
->dm_io
;
951 req
.notify
.fn
= NULL
;
953 return dm_io(&req
, 1, ®ion
, NULL
);
956 static void writecache_resume(struct dm_target
*ti
)
958 struct dm_writecache
*wc
= ti
->private;
960 bool need_flush
= false;
966 if (WC_MODE_PMEM(wc
)) {
967 persistent_memory_invalidate_cache(wc
->memory_map
, wc
->memory_map_size
);
969 r
= writecache_read_metadata(wc
, wc
->metadata_sectors
);
971 size_t sb_entries_offset
;
972 writecache_error(wc
, r
, "unable to read metadata: %d", r
);
973 sb_entries_offset
= offsetof(struct wc_memory_superblock
, entries
);
974 memset((char *)wc
->memory_map
+ sb_entries_offset
, -1,
975 (wc
->metadata_sectors
<< SECTOR_SHIFT
) - sb_entries_offset
);
980 INIT_LIST_HEAD(&wc
->lru
);
981 if (WC_MODE_SORT_FREELIST(wc
)) {
982 wc
->freetree
= RB_ROOT
;
983 wc
->current_free
= NULL
;
985 INIT_LIST_HEAD(&wc
->freelist
);
987 wc
->freelist_size
= 0;
989 r
= memcpy_mcsafe(&sb_seq_count
, &sb(wc
)->seq_count
, sizeof(uint64_t));
991 writecache_error(wc
, r
, "hardware memory error when reading superblock: %d", r
);
992 sb_seq_count
= cpu_to_le64(0);
994 wc
->seq_count
= le64_to_cpu(sb_seq_count
);
996 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
997 for (b
= 0; b
< wc
->n_blocks
; b
++) {
998 struct wc_entry
*e
= &wc
->entries
[b
];
999 struct wc_memory_entry wme
;
1000 if (writecache_has_error(wc
)) {
1001 e
->original_sector
= -1;
1005 r
= memcpy_mcsafe(&wme
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
1007 writecache_error(wc
, r
, "hardware memory error when reading metadata entry %lu: %d",
1008 (unsigned long)b
, r
);
1009 e
->original_sector
= -1;
1012 e
->original_sector
= le64_to_cpu(wme
.original_sector
);
1013 e
->seq_count
= le64_to_cpu(wme
.seq_count
);
1018 for (b
= 0; b
< wc
->n_blocks
; b
++) {
1019 struct wc_entry
*e
= &wc
->entries
[b
];
1020 if (!writecache_entry_is_committed(wc
, e
)) {
1021 if (read_seq_count(wc
, e
) != -1) {
1023 clear_seq_count(wc
, e
);
1026 writecache_add_to_freelist(wc
, e
);
1028 struct wc_entry
*old
;
1030 old
= writecache_find_entry(wc
, read_original_sector(wc
, e
), 0);
1032 writecache_insert_entry(wc
, e
);
1034 if (read_seq_count(wc
, old
) == read_seq_count(wc
, e
)) {
1035 writecache_error(wc
, -EINVAL
,
1036 "two identical entries, position %llu, sector %llu, sequence %llu",
1037 (unsigned long long)b
, (unsigned long long)read_original_sector(wc
, e
),
1038 (unsigned long long)read_seq_count(wc
, e
));
1040 if (read_seq_count(wc
, old
) > read_seq_count(wc
, e
)) {
1043 writecache_free_entry(wc
, old
);
1044 writecache_insert_entry(wc
, e
);
1053 writecache_flush_all_metadata(wc
);
1054 writecache_commit_flushed(wc
, false);
1057 writecache_verify_watermark(wc
);
1059 if (wc
->max_age
!= MAX_AGE_UNSPECIFIED
)
1060 mod_timer(&wc
->max_age_timer
, jiffies
+ wc
->max_age
/ MAX_AGE_DIV
);
1065 static int process_flush_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1071 if (dm_suspended(wc
->ti
)) {
1075 if (writecache_has_error(wc
)) {
1080 writecache_flush(wc
);
1081 wc
->writeback_all
++;
1082 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
1085 flush_workqueue(wc
->writeback_wq
);
1088 wc
->writeback_all
--;
1089 if (writecache_has_error(wc
)) {
1098 static int process_flush_on_suspend_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1104 wc
->flush_on_suspend
= true;
1110 static void activate_cleaner(struct dm_writecache
*wc
)
1112 wc
->flush_on_suspend
= true;
1114 wc
->freelist_high_watermark
= wc
->n_blocks
;
1115 wc
->freelist_low_watermark
= wc
->n_blocks
;
1118 static int process_cleaner_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1124 activate_cleaner(wc
);
1125 if (!dm_suspended(wc
->ti
))
1126 writecache_verify_watermark(wc
);
1132 static int writecache_message(struct dm_target
*ti
, unsigned argc
, char **argv
,
1133 char *result
, unsigned maxlen
)
1136 struct dm_writecache
*wc
= ti
->private;
1138 if (!strcasecmp(argv
[0], "flush"))
1139 r
= process_flush_mesg(argc
, argv
, wc
);
1140 else if (!strcasecmp(argv
[0], "flush_on_suspend"))
1141 r
= process_flush_on_suspend_mesg(argc
, argv
, wc
);
1142 else if (!strcasecmp(argv
[0], "cleaner"))
1143 r
= process_cleaner_mesg(argc
, argv
, wc
);
1145 DMERR("unrecognised message received: %s", argv
[0]);
1150 static void bio_copy_block(struct dm_writecache
*wc
, struct bio
*bio
, void *data
)
1153 unsigned long flags
;
1155 int rw
= bio_data_dir(bio
);
1156 unsigned remaining_size
= wc
->block_size
;
1159 struct bio_vec bv
= bio_iter_iovec(bio
, bio
->bi_iter
);
1160 buf
= bvec_kmap_irq(&bv
, &flags
);
1162 if (unlikely(size
> remaining_size
))
1163 size
= remaining_size
;
1167 r
= memcpy_mcsafe(buf
, data
, size
);
1168 flush_dcache_page(bio_page(bio
));
1170 writecache_error(wc
, r
, "hardware memory error when reading data: %d", r
);
1171 bio
->bi_status
= BLK_STS_IOERR
;
1174 flush_dcache_page(bio_page(bio
));
1175 memcpy_flushcache(data
, buf
, size
);
1178 bvec_kunmap_irq(buf
, &flags
);
1180 data
= (char *)data
+ size
;
1181 remaining_size
-= size
;
1182 bio_advance(bio
, size
);
1183 } while (unlikely(remaining_size
));
1186 static int writecache_flush_thread(void *data
)
1188 struct dm_writecache
*wc
= data
;
1194 bio
= bio_list_pop(&wc
->flush_list
);
1196 set_current_state(TASK_INTERRUPTIBLE
);
1199 if (unlikely(kthread_should_stop())) {
1200 set_current_state(TASK_RUNNING
);
1208 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1209 writecache_discard(wc
, bio
->bi_iter
.bi_sector
,
1210 bio_end_sector(bio
));
1212 bio_set_dev(bio
, wc
->dev
->bdev
);
1213 generic_make_request(bio
);
1215 writecache_flush(wc
);
1217 if (writecache_has_error(wc
))
1218 bio
->bi_status
= BLK_STS_IOERR
;
1226 static void writecache_offload_bio(struct dm_writecache
*wc
, struct bio
*bio
)
1228 if (bio_list_empty(&wc
->flush_list
))
1229 wake_up_process(wc
->flush_thread
);
1230 bio_list_add(&wc
->flush_list
, bio
);
1233 static int writecache_map(struct dm_target
*ti
, struct bio
*bio
)
1236 struct dm_writecache
*wc
= ti
->private;
1238 bio
->bi_private
= NULL
;
1242 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1243 if (writecache_has_error(wc
))
1245 if (WC_MODE_PMEM(wc
)) {
1246 writecache_flush(wc
);
1247 if (writecache_has_error(wc
))
1251 writecache_offload_bio(wc
, bio
);
1256 bio
->bi_iter
.bi_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1258 if (unlikely((((unsigned)bio
->bi_iter
.bi_sector
| bio_sectors(bio
)) &
1259 (wc
->block_size
/ 512 - 1)) != 0)) {
1260 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1261 (unsigned long long)bio
->bi_iter
.bi_sector
,
1262 bio
->bi_iter
.bi_size
, wc
->block_size
);
1266 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
1267 if (writecache_has_error(wc
))
1269 if (WC_MODE_PMEM(wc
)) {
1270 writecache_discard(wc
, bio
->bi_iter
.bi_sector
, bio_end_sector(bio
));
1271 goto unlock_remap_origin
;
1273 writecache_offload_bio(wc
, bio
);
1278 if (bio_data_dir(bio
) == READ
) {
1280 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, WFE_RETURN_FOLLOWING
);
1281 if (e
&& read_original_sector(wc
, e
) == bio
->bi_iter
.bi_sector
) {
1282 if (WC_MODE_PMEM(wc
)) {
1283 bio_copy_block(wc
, bio
, memory_data(wc
, e
));
1284 if (bio
->bi_iter
.bi_size
)
1285 goto read_next_block
;
1288 dm_accept_partial_bio(bio
, wc
->block_size
>> SECTOR_SHIFT
);
1289 bio_set_dev(bio
, wc
->ssd_dev
->bdev
);
1290 bio
->bi_iter
.bi_sector
= cache_sector(wc
, e
);
1291 if (!writecache_entry_is_committed(wc
, e
))
1292 writecache_wait_for_ios(wc
, WRITE
);
1297 sector_t next_boundary
=
1298 read_original_sector(wc
, e
) - bio
->bi_iter
.bi_sector
;
1299 if (next_boundary
< bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
) {
1300 dm_accept_partial_bio(bio
, next_boundary
);
1303 goto unlock_remap_origin
;
1307 bool found_entry
= false;
1308 if (writecache_has_error(wc
))
1310 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, 0);
1312 if (!writecache_entry_is_committed(wc
, e
))
1314 if (!WC_MODE_PMEM(wc
) && !e
->write_in_progress
) {
1315 wc
->overwrote_committed
= true;
1320 if (unlikely(wc
->cleaner
))
1323 e
= writecache_pop_from_freelist(wc
, (sector_t
)-1);
1327 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, WFE_RETURN_FOLLOWING
);
1329 sector_t next_boundary
= read_original_sector(wc
, e
) - bio
->bi_iter
.bi_sector
;
1330 BUG_ON(!next_boundary
);
1331 if (next_boundary
< bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
) {
1332 dm_accept_partial_bio(bio
, next_boundary
);
1335 goto unlock_remap_origin
;
1337 writecache_wait_on_freelist(wc
);
1340 write_original_sector_seq_count(wc
, e
, bio
->bi_iter
.bi_sector
, wc
->seq_count
);
1341 writecache_insert_entry(wc
, e
);
1342 wc
->uncommitted_blocks
++;
1344 if (WC_MODE_PMEM(wc
)) {
1345 bio_copy_block(wc
, bio
, memory_data(wc
, e
));
1347 unsigned bio_size
= wc
->block_size
;
1348 sector_t start_cache_sec
= cache_sector(wc
, e
);
1349 sector_t current_cache_sec
= start_cache_sec
+ (bio_size
>> SECTOR_SHIFT
);
1351 while (bio_size
< bio
->bi_iter
.bi_size
) {
1352 struct wc_entry
*f
= writecache_pop_from_freelist(wc
, current_cache_sec
);
1355 write_original_sector_seq_count(wc
, f
, bio
->bi_iter
.bi_sector
+
1356 (bio_size
>> SECTOR_SHIFT
), wc
->seq_count
);
1357 writecache_insert_entry(wc
, f
);
1358 wc
->uncommitted_blocks
++;
1359 bio_size
+= wc
->block_size
;
1360 current_cache_sec
+= wc
->block_size
>> SECTOR_SHIFT
;
1363 bio_set_dev(bio
, wc
->ssd_dev
->bdev
);
1364 bio
->bi_iter
.bi_sector
= start_cache_sec
;
1365 dm_accept_partial_bio(bio
, bio_size
>> SECTOR_SHIFT
);
1367 if (unlikely(wc
->uncommitted_blocks
>= wc
->autocommit_blocks
)) {
1368 wc
->uncommitted_blocks
= 0;
1369 queue_work(wc
->writeback_wq
, &wc
->flush_work
);
1371 writecache_schedule_autocommit(wc
);
1375 } while (bio
->bi_iter
.bi_size
);
1377 if (unlikely(bio
->bi_opf
& REQ_FUA
||
1378 wc
->uncommitted_blocks
>= wc
->autocommit_blocks
))
1379 writecache_flush(wc
);
1381 writecache_schedule_autocommit(wc
);
1385 unlock_remap_origin
:
1386 bio_set_dev(bio
, wc
->dev
->bdev
);
1388 return DM_MAPIO_REMAPPED
;
1391 /* make sure that writecache_end_io decrements bio_in_progress: */
1392 bio
->bi_private
= (void *)1;
1393 atomic_inc(&wc
->bio_in_progress
[bio_data_dir(bio
)]);
1395 return DM_MAPIO_REMAPPED
;
1400 return DM_MAPIO_SUBMITTED
;
1404 return DM_MAPIO_SUBMITTED
;
1409 return DM_MAPIO_SUBMITTED
;
1412 static int writecache_end_io(struct dm_target
*ti
, struct bio
*bio
, blk_status_t
*status
)
1414 struct dm_writecache
*wc
= ti
->private;
1416 if (bio
->bi_private
!= NULL
) {
1417 int dir
= bio_data_dir(bio
);
1418 if (atomic_dec_and_test(&wc
->bio_in_progress
[dir
]))
1419 if (unlikely(waitqueue_active(&wc
->bio_in_progress_wait
[dir
])))
1420 wake_up(&wc
->bio_in_progress_wait
[dir
]);
1425 static int writecache_iterate_devices(struct dm_target
*ti
,
1426 iterate_devices_callout_fn fn
, void *data
)
1428 struct dm_writecache
*wc
= ti
->private;
1430 return fn(ti
, wc
->dev
, 0, ti
->len
, data
);
1433 static void writecache_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
1435 struct dm_writecache
*wc
= ti
->private;
1437 if (limits
->logical_block_size
< wc
->block_size
)
1438 limits
->logical_block_size
= wc
->block_size
;
1440 if (limits
->physical_block_size
< wc
->block_size
)
1441 limits
->physical_block_size
= wc
->block_size
;
1443 if (limits
->io_min
< wc
->block_size
)
1444 limits
->io_min
= wc
->block_size
;
1448 static void writecache_writeback_endio(struct bio
*bio
)
1450 struct writeback_struct
*wb
= container_of(bio
, struct writeback_struct
, bio
);
1451 struct dm_writecache
*wc
= wb
->wc
;
1452 unsigned long flags
;
1454 raw_spin_lock_irqsave(&wc
->endio_list_lock
, flags
);
1455 if (unlikely(list_empty(&wc
->endio_list
)))
1456 wake_up_process(wc
->endio_thread
);
1457 list_add_tail(&wb
->endio_entry
, &wc
->endio_list
);
1458 raw_spin_unlock_irqrestore(&wc
->endio_list_lock
, flags
);
1461 static void writecache_copy_endio(int read_err
, unsigned long write_err
, void *ptr
)
1463 struct copy_struct
*c
= ptr
;
1464 struct dm_writecache
*wc
= c
->wc
;
1466 c
->error
= likely(!(read_err
| write_err
)) ? 0 : -EIO
;
1468 raw_spin_lock_irq(&wc
->endio_list_lock
);
1469 if (unlikely(list_empty(&wc
->endio_list
)))
1470 wake_up_process(wc
->endio_thread
);
1471 list_add_tail(&c
->endio_entry
, &wc
->endio_list
);
1472 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1475 static void __writecache_endio_pmem(struct dm_writecache
*wc
, struct list_head
*list
)
1478 struct writeback_struct
*wb
;
1480 unsigned long n_walked
= 0;
1483 wb
= list_entry(list
->next
, struct writeback_struct
, endio_entry
);
1484 list_del(&wb
->endio_entry
);
1486 if (unlikely(wb
->bio
.bi_status
!= BLK_STS_OK
))
1487 writecache_error(wc
, blk_status_to_errno(wb
->bio
.bi_status
),
1488 "write error %d", wb
->bio
.bi_status
);
1492 BUG_ON(!e
->write_in_progress
);
1493 e
->write_in_progress
= false;
1494 INIT_LIST_HEAD(&e
->lru
);
1495 if (!writecache_has_error(wc
))
1496 writecache_free_entry(wc
, e
);
1497 BUG_ON(!wc
->writeback_size
);
1498 wc
->writeback_size
--;
1500 if (unlikely(n_walked
>= ENDIO_LATENCY
)) {
1501 writecache_commit_flushed(wc
, false);
1506 } while (++i
< wb
->wc_list_n
);
1508 if (wb
->wc_list
!= wb
->wc_list_inline
)
1511 } while (!list_empty(list
));
1514 static void __writecache_endio_ssd(struct dm_writecache
*wc
, struct list_head
*list
)
1516 struct copy_struct
*c
;
1520 c
= list_entry(list
->next
, struct copy_struct
, endio_entry
);
1521 list_del(&c
->endio_entry
);
1523 if (unlikely(c
->error
))
1524 writecache_error(wc
, c
->error
, "copy error");
1528 BUG_ON(!e
->write_in_progress
);
1529 e
->write_in_progress
= false;
1530 INIT_LIST_HEAD(&e
->lru
);
1531 if (!writecache_has_error(wc
))
1532 writecache_free_entry(wc
, e
);
1534 BUG_ON(!wc
->writeback_size
);
1535 wc
->writeback_size
--;
1537 } while (--c
->n_entries
);
1538 mempool_free(c
, &wc
->copy_pool
);
1539 } while (!list_empty(list
));
1542 static int writecache_endio_thread(void *data
)
1544 struct dm_writecache
*wc
= data
;
1547 struct list_head list
;
1549 raw_spin_lock_irq(&wc
->endio_list_lock
);
1550 if (!list_empty(&wc
->endio_list
))
1552 set_current_state(TASK_INTERRUPTIBLE
);
1553 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1555 if (unlikely(kthread_should_stop())) {
1556 set_current_state(TASK_RUNNING
);
1565 list
= wc
->endio_list
;
1566 list
.next
->prev
= list
.prev
->next
= &list
;
1567 INIT_LIST_HEAD(&wc
->endio_list
);
1568 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1570 if (!WC_MODE_FUA(wc
))
1571 writecache_disk_flush(wc
, wc
->dev
);
1575 if (WC_MODE_PMEM(wc
)) {
1576 __writecache_endio_pmem(wc
, &list
);
1578 __writecache_endio_ssd(wc
, &list
);
1579 writecache_wait_for_ios(wc
, READ
);
1582 writecache_commit_flushed(wc
, false);
1590 static bool wc_add_block(struct writeback_struct
*wb
, struct wc_entry
*e
, gfp_t gfp
)
1592 struct dm_writecache
*wc
= wb
->wc
;
1593 unsigned block_size
= wc
->block_size
;
1594 void *address
= memory_data(wc
, e
);
1596 persistent_memory_flush_cache(address
, block_size
);
1597 return bio_add_page(&wb
->bio
, persistent_memory_page(address
),
1598 block_size
, persistent_memory_page_offset(address
)) != 0;
1601 struct writeback_list
{
1602 struct list_head list
;
1606 static void __writeback_throttle(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1608 if (unlikely(wc
->max_writeback_jobs
)) {
1609 if (READ_ONCE(wc
->writeback_size
) - wbl
->size
>= wc
->max_writeback_jobs
) {
1611 while (wc
->writeback_size
- wbl
->size
>= wc
->max_writeback_jobs
)
1612 writecache_wait_on_freelist(wc
);
1619 static void __writecache_writeback_pmem(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1621 struct wc_entry
*e
, *f
;
1623 struct writeback_struct
*wb
;
1628 e
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1631 max_pages
= e
->wc_list_contiguous
;
1633 bio
= bio_alloc_bioset(GFP_NOIO
, max_pages
, &wc
->bio_set
);
1634 wb
= container_of(bio
, struct writeback_struct
, bio
);
1636 bio
->bi_end_io
= writecache_writeback_endio
;
1637 bio_set_dev(bio
, wc
->dev
->bdev
);
1638 bio
->bi_iter
.bi_sector
= read_original_sector(wc
, e
);
1639 if (max_pages
<= WB_LIST_INLINE
||
1640 unlikely(!(wb
->wc_list
= kmalloc_array(max_pages
, sizeof(struct wc_entry
*),
1641 GFP_NOIO
| __GFP_NORETRY
|
1642 __GFP_NOMEMALLOC
| __GFP_NOWARN
)))) {
1643 wb
->wc_list
= wb
->wc_list_inline
;
1644 max_pages
= WB_LIST_INLINE
;
1647 BUG_ON(!wc_add_block(wb
, e
, GFP_NOIO
));
1652 while (wbl
->size
&& wb
->wc_list_n
< max_pages
) {
1653 f
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1654 if (read_original_sector(wc
, f
) !=
1655 read_original_sector(wc
, e
) + (wc
->block_size
>> SECTOR_SHIFT
))
1657 if (!wc_add_block(wb
, f
, GFP_NOWAIT
| __GFP_NOWARN
))
1661 wb
->wc_list
[wb
->wc_list_n
++] = f
;
1664 bio_set_op_attrs(bio
, REQ_OP_WRITE
, WC_MODE_FUA(wc
) * REQ_FUA
);
1665 if (writecache_has_error(wc
)) {
1666 bio
->bi_status
= BLK_STS_IOERR
;
1672 __writeback_throttle(wc
, wbl
);
1676 static void __writecache_writeback_ssd(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1678 struct wc_entry
*e
, *f
;
1679 struct dm_io_region from
, to
;
1680 struct copy_struct
*c
;
1686 e
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1689 n_sectors
= e
->wc_list_contiguous
<< (wc
->block_size_bits
- SECTOR_SHIFT
);
1691 from
.bdev
= wc
->ssd_dev
->bdev
;
1692 from
.sector
= cache_sector(wc
, e
);
1693 from
.count
= n_sectors
;
1694 to
.bdev
= wc
->dev
->bdev
;
1695 to
.sector
= read_original_sector(wc
, e
);
1696 to
.count
= n_sectors
;
1698 c
= mempool_alloc(&wc
->copy_pool
, GFP_NOIO
);
1701 c
->n_entries
= e
->wc_list_contiguous
;
1703 while ((n_sectors
-= wc
->block_size
>> SECTOR_SHIFT
)) {
1705 f
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1711 dm_kcopyd_copy(wc
->dm_kcopyd
, &from
, 1, &to
, 0, writecache_copy_endio
, c
);
1713 __writeback_throttle(wc
, wbl
);
1717 static void writecache_writeback(struct work_struct
*work
)
1719 struct dm_writecache
*wc
= container_of(work
, struct dm_writecache
, writeback_work
);
1720 struct blk_plug plug
;
1721 struct wc_entry
*f
, *uninitialized_var(g
), *e
= NULL
;
1722 struct rb_node
*node
, *next_node
;
1723 struct list_head skipped
;
1724 struct writeback_list wbl
;
1725 unsigned long n_walked
;
1729 if (writecache_has_error(wc
)) {
1734 if (unlikely(wc
->writeback_all
)) {
1735 if (writecache_wait_for_writeback(wc
))
1739 if (wc
->overwrote_committed
) {
1740 writecache_wait_for_ios(wc
, WRITE
);
1744 INIT_LIST_HEAD(&skipped
);
1745 INIT_LIST_HEAD(&wbl
.list
);
1747 while (!list_empty(&wc
->lru
) &&
1748 (wc
->writeback_all
||
1749 wc
->freelist_size
+ wc
->writeback_size
<= wc
->freelist_low_watermark
||
1750 (jiffies
- container_of(wc
->lru
.prev
, struct wc_entry
, lru
)->age
>=
1751 wc
->max_age
- wc
->max_age
/ MAX_AGE_DIV
))) {
1754 if (unlikely(n_walked
> WRITEBACK_LATENCY
) &&
1755 likely(!wc
->writeback_all
) && likely(!dm_suspended(wc
->ti
))) {
1756 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
1760 if (unlikely(wc
->writeback_all
)) {
1762 writecache_flush(wc
);
1763 e
= container_of(rb_first(&wc
->tree
), struct wc_entry
, rb_node
);
1767 e
= container_of(wc
->lru
.prev
, struct wc_entry
, lru
);
1768 BUG_ON(e
->write_in_progress
);
1769 if (unlikely(!writecache_entry_is_committed(wc
, e
))) {
1770 writecache_flush(wc
);
1772 node
= rb_prev(&e
->rb_node
);
1774 f
= container_of(node
, struct wc_entry
, rb_node
);
1775 if (unlikely(read_original_sector(wc
, f
) ==
1776 read_original_sector(wc
, e
))) {
1777 BUG_ON(!f
->write_in_progress
);
1779 list_add(&e
->lru
, &skipped
);
1784 wc
->writeback_size
++;
1786 list_add(&e
->lru
, &wbl
.list
);
1788 e
->write_in_progress
= true;
1789 e
->wc_list_contiguous
= 1;
1794 next_node
= rb_next(&f
->rb_node
);
1795 if (unlikely(!next_node
))
1797 g
= container_of(next_node
, struct wc_entry
, rb_node
);
1798 if (unlikely(read_original_sector(wc
, g
) ==
1799 read_original_sector(wc
, f
))) {
1803 if (read_original_sector(wc
, g
) !=
1804 read_original_sector(wc
, f
) + (wc
->block_size
>> SECTOR_SHIFT
))
1806 if (unlikely(g
->write_in_progress
))
1808 if (unlikely(!writecache_entry_is_committed(wc
, g
)))
1811 if (!WC_MODE_PMEM(wc
)) {
1817 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1820 wc
->writeback_size
++;
1822 list_add(&g
->lru
, &wbl
.list
);
1824 g
->write_in_progress
= true;
1825 g
->wc_list_contiguous
= BIO_MAX_PAGES
;
1827 e
->wc_list_contiguous
++;
1828 if (unlikely(e
->wc_list_contiguous
== BIO_MAX_PAGES
)) {
1829 if (unlikely(wc
->writeback_all
)) {
1830 next_node
= rb_next(&f
->rb_node
);
1831 if (likely(next_node
))
1832 g
= container_of(next_node
, struct wc_entry
, rb_node
);
1840 if (!list_empty(&skipped
)) {
1841 list_splice_tail(&skipped
, &wc
->lru
);
1843 * If we didn't do any progress, we must wait until some
1844 * writeback finishes to avoid burning CPU in a loop
1846 if (unlikely(!wbl
.size
))
1847 writecache_wait_for_writeback(wc
);
1852 blk_start_plug(&plug
);
1854 if (WC_MODE_PMEM(wc
))
1855 __writecache_writeback_pmem(wc
, &wbl
);
1857 __writecache_writeback_ssd(wc
, &wbl
);
1859 blk_finish_plug(&plug
);
1861 if (unlikely(wc
->writeback_all
)) {
1863 while (writecache_wait_for_writeback(wc
));
1868 static int calculate_memory_size(uint64_t device_size
, unsigned block_size
,
1869 size_t *n_blocks_p
, size_t *n_metadata_blocks_p
)
1871 uint64_t n_blocks
, offset
;
1874 n_blocks
= device_size
;
1875 do_div(n_blocks
, block_size
+ sizeof(struct wc_memory_entry
));
1880 /* Verify the following entries[n_blocks] won't overflow */
1881 if (n_blocks
>= ((size_t)-sizeof(struct wc_memory_superblock
) /
1882 sizeof(struct wc_memory_entry
)))
1884 offset
= offsetof(struct wc_memory_superblock
, entries
[n_blocks
]);
1885 offset
= (offset
+ block_size
- 1) & ~(uint64_t)(block_size
- 1);
1886 if (offset
+ n_blocks
* block_size
<= device_size
)
1891 /* check if the bit field overflows */
1893 if (e
.index
!= n_blocks
)
1897 *n_blocks_p
= n_blocks
;
1898 if (n_metadata_blocks_p
)
1899 *n_metadata_blocks_p
= offset
>> __ffs(block_size
);
1903 static int init_memory(struct dm_writecache
*wc
)
1908 r
= calculate_memory_size(wc
->memory_map_size
, wc
->block_size
, &wc
->n_blocks
, NULL
);
1912 r
= writecache_alloc_entries(wc
);
1916 for (b
= 0; b
< ARRAY_SIZE(sb(wc
)->padding
); b
++)
1917 pmem_assign(sb(wc
)->padding
[b
], cpu_to_le64(0));
1918 pmem_assign(sb(wc
)->version
, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION
));
1919 pmem_assign(sb(wc
)->block_size
, cpu_to_le32(wc
->block_size
));
1920 pmem_assign(sb(wc
)->n_blocks
, cpu_to_le64(wc
->n_blocks
));
1921 pmem_assign(sb(wc
)->seq_count
, cpu_to_le64(0));
1923 for (b
= 0; b
< wc
->n_blocks
; b
++) {
1924 write_original_sector_seq_count(wc
, &wc
->entries
[b
], -1, -1);
1928 writecache_flush_all_metadata(wc
);
1929 writecache_commit_flushed(wc
, false);
1930 pmem_assign(sb(wc
)->magic
, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC
));
1931 writecache_flush_region(wc
, &sb(wc
)->magic
, sizeof sb(wc
)->magic
);
1932 writecache_commit_flushed(wc
, false);
1937 static void writecache_dtr(struct dm_target
*ti
)
1939 struct dm_writecache
*wc
= ti
->private;
1944 if (wc
->endio_thread
)
1945 kthread_stop(wc
->endio_thread
);
1947 if (wc
->flush_thread
)
1948 kthread_stop(wc
->flush_thread
);
1950 bioset_exit(&wc
->bio_set
);
1952 mempool_exit(&wc
->copy_pool
);
1954 if (wc
->writeback_wq
)
1955 destroy_workqueue(wc
->writeback_wq
);
1958 dm_put_device(ti
, wc
->dev
);
1961 dm_put_device(ti
, wc
->ssd_dev
);
1966 if (wc
->memory_map
) {
1967 if (WC_MODE_PMEM(wc
))
1968 persistent_memory_release(wc
);
1970 vfree(wc
->memory_map
);
1974 dm_kcopyd_client_destroy(wc
->dm_kcopyd
);
1977 dm_io_client_destroy(wc
->dm_io
);
1979 if (wc
->dirty_bitmap
)
1980 vfree(wc
->dirty_bitmap
);
1985 static int writecache_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
1987 struct dm_writecache
*wc
;
1988 struct dm_arg_set as
;
1990 unsigned opt_params
;
1991 size_t offset
, data_size
;
1994 int high_wm_percent
= HIGH_WATERMARK
;
1995 int low_wm_percent
= LOW_WATERMARK
;
1997 struct wc_memory_superblock s
;
1999 static struct dm_arg _args
[] = {
2000 {0, 10, "Invalid number of feature args"},
2006 wc
= kzalloc(sizeof(struct dm_writecache
), GFP_KERNEL
);
2008 ti
->error
= "Cannot allocate writecache structure";
2015 mutex_init(&wc
->lock
);
2016 wc
->max_age
= MAX_AGE_UNSPECIFIED
;
2017 writecache_poison_lists(wc
);
2018 init_waitqueue_head(&wc
->freelist_wait
);
2019 timer_setup(&wc
->autocommit_timer
, writecache_autocommit_timer
, 0);
2020 timer_setup(&wc
->max_age_timer
, writecache_max_age_timer
, 0);
2022 for (i
= 0; i
< 2; i
++) {
2023 atomic_set(&wc
->bio_in_progress
[i
], 0);
2024 init_waitqueue_head(&wc
->bio_in_progress_wait
[i
]);
2027 wc
->dm_io
= dm_io_client_create();
2028 if (IS_ERR(wc
->dm_io
)) {
2029 r
= PTR_ERR(wc
->dm_io
);
2030 ti
->error
= "Unable to allocate dm-io client";
2035 wc
->writeback_wq
= alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM
, 1);
2036 if (!wc
->writeback_wq
) {
2038 ti
->error
= "Could not allocate writeback workqueue";
2041 INIT_WORK(&wc
->writeback_work
, writecache_writeback
);
2042 INIT_WORK(&wc
->flush_work
, writecache_flush_work
);
2044 raw_spin_lock_init(&wc
->endio_list_lock
);
2045 INIT_LIST_HEAD(&wc
->endio_list
);
2046 wc
->endio_thread
= kthread_create(writecache_endio_thread
, wc
, "writecache_endio");
2047 if (IS_ERR(wc
->endio_thread
)) {
2048 r
= PTR_ERR(wc
->endio_thread
);
2049 wc
->endio_thread
= NULL
;
2050 ti
->error
= "Couldn't spawn endio thread";
2053 wake_up_process(wc
->endio_thread
);
2056 * Parse the mode (pmem or ssd)
2058 string
= dm_shift_arg(&as
);
2062 if (!strcasecmp(string
, "s")) {
2063 wc
->pmem_mode
= false;
2064 } else if (!strcasecmp(string
, "p")) {
2065 #ifdef DM_WRITECACHE_HAS_PMEM
2066 wc
->pmem_mode
= true;
2067 wc
->writeback_fua
= true;
2070 * If the architecture doesn't support persistent memory or
2071 * the kernel doesn't support any DAX drivers, this driver can
2072 * only be used in SSD-only mode.
2075 ti
->error
= "Persistent memory or DAX not supported on this system";
2082 if (WC_MODE_PMEM(wc
)) {
2083 r
= bioset_init(&wc
->bio_set
, BIO_POOL_SIZE
,
2084 offsetof(struct writeback_struct
, bio
),
2087 ti
->error
= "Could not allocate bio set";
2091 r
= mempool_init_kmalloc_pool(&wc
->copy_pool
, 1, sizeof(struct copy_struct
));
2093 ti
->error
= "Could not allocate mempool";
2099 * Parse the origin data device
2101 string
= dm_shift_arg(&as
);
2104 r
= dm_get_device(ti
, string
, dm_table_get_mode(ti
->table
), &wc
->dev
);
2106 ti
->error
= "Origin data device lookup failed";
2111 * Parse cache data device (be it pmem or ssd)
2113 string
= dm_shift_arg(&as
);
2117 r
= dm_get_device(ti
, string
, dm_table_get_mode(ti
->table
), &wc
->ssd_dev
);
2119 ti
->error
= "Cache data device lookup failed";
2122 wc
->memory_map_size
= i_size_read(wc
->ssd_dev
->bdev
->bd_inode
);
2125 * Parse the cache block size
2127 string
= dm_shift_arg(&as
);
2130 if (sscanf(string
, "%u%c", &wc
->block_size
, &dummy
) != 1 ||
2131 wc
->block_size
< 512 || wc
->block_size
> PAGE_SIZE
||
2132 (wc
->block_size
& (wc
->block_size
- 1))) {
2134 ti
->error
= "Invalid block size";
2137 if (wc
->block_size
< bdev_logical_block_size(wc
->dev
->bdev
) ||
2138 wc
->block_size
< bdev_logical_block_size(wc
->ssd_dev
->bdev
)) {
2140 ti
->error
= "Block size is smaller than device logical block size";
2143 wc
->block_size_bits
= __ffs(wc
->block_size
);
2145 wc
->max_writeback_jobs
= MAX_WRITEBACK_JOBS
;
2146 wc
->autocommit_blocks
= !WC_MODE_PMEM(wc
) ? AUTOCOMMIT_BLOCKS_SSD
: AUTOCOMMIT_BLOCKS_PMEM
;
2147 wc
->autocommit_jiffies
= msecs_to_jiffies(AUTOCOMMIT_MSEC
);
2150 * Parse optional arguments
2152 r
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
2156 while (opt_params
) {
2157 string
= dm_shift_arg(&as
), opt_params
--;
2158 if (!strcasecmp(string
, "start_sector") && opt_params
>= 1) {
2159 unsigned long long start_sector
;
2160 string
= dm_shift_arg(&as
), opt_params
--;
2161 if (sscanf(string
, "%llu%c", &start_sector
, &dummy
) != 1)
2162 goto invalid_optional
;
2163 wc
->start_sector
= start_sector
;
2164 if (wc
->start_sector
!= start_sector
||
2165 wc
->start_sector
>= wc
->memory_map_size
>> SECTOR_SHIFT
)
2166 goto invalid_optional
;
2167 } else if (!strcasecmp(string
, "high_watermark") && opt_params
>= 1) {
2168 string
= dm_shift_arg(&as
), opt_params
--;
2169 if (sscanf(string
, "%d%c", &high_wm_percent
, &dummy
) != 1)
2170 goto invalid_optional
;
2171 if (high_wm_percent
< 0 || high_wm_percent
> 100)
2172 goto invalid_optional
;
2173 wc
->high_wm_percent_set
= true;
2174 } else if (!strcasecmp(string
, "low_watermark") && opt_params
>= 1) {
2175 string
= dm_shift_arg(&as
), opt_params
--;
2176 if (sscanf(string
, "%d%c", &low_wm_percent
, &dummy
) != 1)
2177 goto invalid_optional
;
2178 if (low_wm_percent
< 0 || low_wm_percent
> 100)
2179 goto invalid_optional
;
2180 wc
->low_wm_percent_set
= true;
2181 } else if (!strcasecmp(string
, "writeback_jobs") && opt_params
>= 1) {
2182 string
= dm_shift_arg(&as
), opt_params
--;
2183 if (sscanf(string
, "%u%c", &wc
->max_writeback_jobs
, &dummy
) != 1)
2184 goto invalid_optional
;
2185 wc
->max_writeback_jobs_set
= true;
2186 } else if (!strcasecmp(string
, "autocommit_blocks") && opt_params
>= 1) {
2187 string
= dm_shift_arg(&as
), opt_params
--;
2188 if (sscanf(string
, "%u%c", &wc
->autocommit_blocks
, &dummy
) != 1)
2189 goto invalid_optional
;
2190 wc
->autocommit_blocks_set
= true;
2191 } else if (!strcasecmp(string
, "autocommit_time") && opt_params
>= 1) {
2192 unsigned autocommit_msecs
;
2193 string
= dm_shift_arg(&as
), opt_params
--;
2194 if (sscanf(string
, "%u%c", &autocommit_msecs
, &dummy
) != 1)
2195 goto invalid_optional
;
2196 if (autocommit_msecs
> 3600000)
2197 goto invalid_optional
;
2198 wc
->autocommit_jiffies
= msecs_to_jiffies(autocommit_msecs
);
2199 wc
->autocommit_time_set
= true;
2200 } else if (!strcasecmp(string
, "max_age") && opt_params
>= 1) {
2201 unsigned max_age_msecs
;
2202 string
= dm_shift_arg(&as
), opt_params
--;
2203 if (sscanf(string
, "%u%c", &max_age_msecs
, &dummy
) != 1)
2204 goto invalid_optional
;
2205 if (max_age_msecs
> 86400000)
2206 goto invalid_optional
;
2207 wc
->max_age
= msecs_to_jiffies(max_age_msecs
);
2208 } else if (!strcasecmp(string
, "cleaner")) {
2210 } else if (!strcasecmp(string
, "fua")) {
2211 if (WC_MODE_PMEM(wc
)) {
2212 wc
->writeback_fua
= true;
2213 wc
->writeback_fua_set
= true;
2214 } else goto invalid_optional
;
2215 } else if (!strcasecmp(string
, "nofua")) {
2216 if (WC_MODE_PMEM(wc
)) {
2217 wc
->writeback_fua
= false;
2218 wc
->writeback_fua_set
= true;
2219 } else goto invalid_optional
;
2223 ti
->error
= "Invalid optional argument";
2228 if (high_wm_percent
< low_wm_percent
) {
2230 ti
->error
= "High watermark must be greater than or equal to low watermark";
2234 if (WC_MODE_PMEM(wc
)) {
2235 r
= persistent_memory_claim(wc
);
2237 ti
->error
= "Unable to map persistent memory for cache";
2241 size_t n_blocks
, n_metadata_blocks
;
2242 uint64_t n_bitmap_bits
;
2244 wc
->memory_map_size
-= (uint64_t)wc
->start_sector
<< SECTOR_SHIFT
;
2246 bio_list_init(&wc
->flush_list
);
2247 wc
->flush_thread
= kthread_create(writecache_flush_thread
, wc
, "dm_writecache_flush");
2248 if (IS_ERR(wc
->flush_thread
)) {
2249 r
= PTR_ERR(wc
->flush_thread
);
2250 wc
->flush_thread
= NULL
;
2251 ti
->error
= "Couldn't spawn flush thread";
2254 wake_up_process(wc
->flush_thread
);
2256 r
= calculate_memory_size(wc
->memory_map_size
, wc
->block_size
,
2257 &n_blocks
, &n_metadata_blocks
);
2259 ti
->error
= "Invalid device size";
2263 n_bitmap_bits
= (((uint64_t)n_metadata_blocks
<< wc
->block_size_bits
) +
2264 BITMAP_GRANULARITY
- 1) / BITMAP_GRANULARITY
;
2265 /* this is limitation of test_bit functions */
2266 if (n_bitmap_bits
> 1U << 31) {
2268 ti
->error
= "Invalid device size";
2272 wc
->memory_map
= vmalloc(n_metadata_blocks
<< wc
->block_size_bits
);
2273 if (!wc
->memory_map
) {
2275 ti
->error
= "Unable to allocate memory for metadata";
2279 wc
->dm_kcopyd
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
2280 if (IS_ERR(wc
->dm_kcopyd
)) {
2281 r
= PTR_ERR(wc
->dm_kcopyd
);
2282 ti
->error
= "Unable to allocate dm-kcopyd client";
2283 wc
->dm_kcopyd
= NULL
;
2287 wc
->metadata_sectors
= n_metadata_blocks
<< (wc
->block_size_bits
- SECTOR_SHIFT
);
2288 wc
->dirty_bitmap_size
= (n_bitmap_bits
+ BITS_PER_LONG
- 1) /
2289 BITS_PER_LONG
* sizeof(unsigned long);
2290 wc
->dirty_bitmap
= vzalloc(wc
->dirty_bitmap_size
);
2291 if (!wc
->dirty_bitmap
) {
2293 ti
->error
= "Unable to allocate dirty bitmap";
2297 r
= writecache_read_metadata(wc
, wc
->block_size
>> SECTOR_SHIFT
);
2299 ti
->error
= "Unable to read first block of metadata";
2304 r
= memcpy_mcsafe(&s
, sb(wc
), sizeof(struct wc_memory_superblock
));
2306 ti
->error
= "Hardware memory error when reading superblock";
2309 if (!le32_to_cpu(s
.magic
) && !le32_to_cpu(s
.version
)) {
2310 r
= init_memory(wc
);
2312 ti
->error
= "Unable to initialize device";
2315 r
= memcpy_mcsafe(&s
, sb(wc
), sizeof(struct wc_memory_superblock
));
2317 ti
->error
= "Hardware memory error when reading superblock";
2322 if (le32_to_cpu(s
.magic
) != MEMORY_SUPERBLOCK_MAGIC
) {
2323 ti
->error
= "Invalid magic in the superblock";
2328 if (le32_to_cpu(s
.version
) != MEMORY_SUPERBLOCK_VERSION
) {
2329 ti
->error
= "Invalid version in the superblock";
2334 if (le32_to_cpu(s
.block_size
) != wc
->block_size
) {
2335 ti
->error
= "Block size does not match superblock";
2340 wc
->n_blocks
= le64_to_cpu(s
.n_blocks
);
2342 offset
= wc
->n_blocks
* sizeof(struct wc_memory_entry
);
2343 if (offset
/ sizeof(struct wc_memory_entry
) != le64_to_cpu(sb(wc
)->n_blocks
)) {
2345 ti
->error
= "Overflow in size calculation";
2349 offset
+= sizeof(struct wc_memory_superblock
);
2350 if (offset
< sizeof(struct wc_memory_superblock
))
2352 offset
= (offset
+ wc
->block_size
- 1) & ~(size_t)(wc
->block_size
- 1);
2353 data_size
= wc
->n_blocks
* (size_t)wc
->block_size
;
2354 if (!offset
|| (data_size
/ wc
->block_size
!= wc
->n_blocks
) ||
2355 (offset
+ data_size
< offset
))
2357 if (offset
+ data_size
> wc
->memory_map_size
) {
2358 ti
->error
= "Memory area is too small";
2363 wc
->metadata_sectors
= offset
>> SECTOR_SHIFT
;
2364 wc
->block_start
= (char *)sb(wc
) + offset
;
2366 x
= (uint64_t)wc
->n_blocks
* (100 - high_wm_percent
);
2369 wc
->freelist_high_watermark
= x
;
2370 x
= (uint64_t)wc
->n_blocks
* (100 - low_wm_percent
);
2373 wc
->freelist_low_watermark
= x
;
2376 activate_cleaner(wc
);
2378 r
= writecache_alloc_entries(wc
);
2380 ti
->error
= "Cannot allocate memory";
2384 ti
->num_flush_bios
= 1;
2385 ti
->flush_supported
= true;
2386 ti
->num_discard_bios
= 1;
2388 if (WC_MODE_PMEM(wc
))
2389 persistent_memory_flush_cache(wc
->memory_map
, wc
->memory_map_size
);
2395 ti
->error
= "Bad arguments";
2401 static void writecache_status(struct dm_target
*ti
, status_type_t type
,
2402 unsigned status_flags
, char *result
, unsigned maxlen
)
2404 struct dm_writecache
*wc
= ti
->private;
2405 unsigned extra_args
;
2410 case STATUSTYPE_INFO
:
2411 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc
),
2412 (unsigned long long)wc
->n_blocks
, (unsigned long long)wc
->freelist_size
,
2413 (unsigned long long)wc
->writeback_size
);
2415 case STATUSTYPE_TABLE
:
2416 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc
) ? 'p' : 's',
2417 wc
->dev
->name
, wc
->ssd_dev
->name
, wc
->block_size
);
2419 if (wc
->start_sector
)
2421 if (wc
->high_wm_percent_set
&& !wc
->cleaner
)
2423 if (wc
->low_wm_percent_set
&& !wc
->cleaner
)
2425 if (wc
->max_writeback_jobs_set
)
2427 if (wc
->autocommit_blocks_set
)
2429 if (wc
->autocommit_time_set
)
2433 if (wc
->writeback_fua_set
)
2436 DMEMIT("%u", extra_args
);
2437 if (wc
->start_sector
)
2438 DMEMIT(" start_sector %llu", (unsigned long long)wc
->start_sector
);
2439 if (wc
->high_wm_percent_set
&& !wc
->cleaner
) {
2440 x
= (uint64_t)wc
->freelist_high_watermark
* 100;
2441 x
+= wc
->n_blocks
/ 2;
2442 do_div(x
, (size_t)wc
->n_blocks
);
2443 DMEMIT(" high_watermark %u", 100 - (unsigned)x
);
2445 if (wc
->low_wm_percent_set
&& !wc
->cleaner
) {
2446 x
= (uint64_t)wc
->freelist_low_watermark
* 100;
2447 x
+= wc
->n_blocks
/ 2;
2448 do_div(x
, (size_t)wc
->n_blocks
);
2449 DMEMIT(" low_watermark %u", 100 - (unsigned)x
);
2451 if (wc
->max_writeback_jobs_set
)
2452 DMEMIT(" writeback_jobs %u", wc
->max_writeback_jobs
);
2453 if (wc
->autocommit_blocks_set
)
2454 DMEMIT(" autocommit_blocks %u", wc
->autocommit_blocks
);
2455 if (wc
->autocommit_time_set
)
2456 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc
->autocommit_jiffies
));
2457 if (wc
->max_age
!= MAX_AGE_UNSPECIFIED
)
2458 DMEMIT(" max_age %u", jiffies_to_msecs(wc
->max_age
));
2461 if (wc
->writeback_fua_set
)
2462 DMEMIT(" %sfua", wc
->writeback_fua
? "" : "no");
2467 static struct target_type writecache_target
= {
2468 .name
= "writecache",
2469 .version
= {1, 3, 0},
2470 .module
= THIS_MODULE
,
2471 .ctr
= writecache_ctr
,
2472 .dtr
= writecache_dtr
,
2473 .status
= writecache_status
,
2474 .postsuspend
= writecache_suspend
,
2475 .resume
= writecache_resume
,
2476 .message
= writecache_message
,
2477 .map
= writecache_map
,
2478 .end_io
= writecache_end_io
,
2479 .iterate_devices
= writecache_iterate_devices
,
2480 .io_hints
= writecache_io_hints
,
2483 static int __init
dm_writecache_init(void)
2487 r
= dm_register_target(&writecache_target
);
2489 DMERR("register failed %d", r
);
2496 static void __exit
dm_writecache_exit(void)
2498 dm_unregister_target(&writecache_target
);
2501 module_init(dm_writecache_init
);
2502 module_exit(dm_writecache_exit
);
2504 MODULE_DESCRIPTION(DM_NAME
" writecache target");
2505 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2506 MODULE_LICENSE("GPL");