1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Red Hat. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
19 #define DM_MSG_PREFIX "writecache"
21 #define HIGH_WATERMARK 50
22 #define LOW_WATERMARK 45
23 #define MAX_WRITEBACK_JOBS 0
24 #define ENDIO_LATENCY 16
25 #define WRITEBACK_LATENCY 64
26 #define AUTOCOMMIT_BLOCKS_SSD 65536
27 #define AUTOCOMMIT_BLOCKS_PMEM 64
28 #define AUTOCOMMIT_MSEC 1000
29 #define MAX_AGE_DIV 16
30 #define MAX_AGE_UNSPECIFIED -1UL
32 #define BITMAP_GRANULARITY 65536
33 #if BITMAP_GRANULARITY < PAGE_SIZE
34 #undef BITMAP_GRANULARITY
35 #define BITMAP_GRANULARITY PAGE_SIZE
38 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39 #define DM_WRITECACHE_HAS_PMEM
42 #ifdef DM_WRITECACHE_HAS_PMEM
43 #define pmem_assign(dest, src) \
45 typeof(dest) uniq = (src); \
46 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
49 #define pmem_assign(dest, src) ((dest) = (src))
52 #if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
53 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
56 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
57 #define MEMORY_SUPERBLOCK_VERSION 1
59 struct wc_memory_entry
{
60 __le64 original_sector
;
64 struct wc_memory_superblock
{
76 struct wc_memory_entry entries
[0];
80 struct rb_node rb_node
;
82 unsigned short wc_list_contiguous
;
83 bool write_in_progress
84 #if BITS_PER_LONG == 64
89 #if BITS_PER_LONG == 64
94 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95 uint64_t original_sector
;
100 #ifdef DM_WRITECACHE_HAS_PMEM
101 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
102 #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
104 #define WC_MODE_PMEM(wc) false
105 #define WC_MODE_FUA(wc) false
107 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
109 struct dm_writecache
{
111 struct list_head lru
;
113 struct list_head freelist
;
115 struct rb_root freetree
;
116 struct wc_entry
*current_free
;
121 size_t freelist_size
;
122 size_t writeback_size
;
123 size_t freelist_high_watermark
;
124 size_t freelist_low_watermark
;
125 unsigned long max_age
;
127 unsigned uncommitted_blocks
;
128 unsigned autocommit_blocks
;
129 unsigned max_writeback_jobs
;
133 unsigned long autocommit_jiffies
;
134 struct timer_list autocommit_timer
;
135 struct wait_queue_head freelist_wait
;
137 struct timer_list max_age_timer
;
139 atomic_t bio_in_progress
[2];
140 struct wait_queue_head bio_in_progress_wait
[2];
142 struct dm_target
*ti
;
144 struct dm_dev
*ssd_dev
;
145 sector_t start_sector
;
147 uint64_t memory_map_size
;
148 size_t metadata_sectors
;
152 struct wc_entry
*entries
;
154 unsigned char block_size_bits
;
157 bool writeback_fua
:1;
159 bool overwrote_committed
:1;
160 bool memory_vmapped
:1;
162 bool high_wm_percent_set
:1;
163 bool low_wm_percent_set
:1;
164 bool max_writeback_jobs_set
:1;
165 bool autocommit_blocks_set
:1;
166 bool autocommit_time_set
:1;
167 bool writeback_fua_set
:1;
168 bool flush_on_suspend
:1;
171 unsigned writeback_all
;
172 struct workqueue_struct
*writeback_wq
;
173 struct work_struct writeback_work
;
174 struct work_struct flush_work
;
176 struct dm_io_client
*dm_io
;
178 raw_spinlock_t endio_list_lock
;
179 struct list_head endio_list
;
180 struct task_struct
*endio_thread
;
182 struct task_struct
*flush_thread
;
183 struct bio_list flush_list
;
185 struct dm_kcopyd_client
*dm_kcopyd
;
186 unsigned long *dirty_bitmap
;
187 unsigned dirty_bitmap_size
;
189 struct bio_set bio_set
;
193 #define WB_LIST_INLINE 16
195 struct writeback_struct
{
196 struct list_head endio_entry
;
197 struct dm_writecache
*wc
;
198 struct wc_entry
**wc_list
;
200 struct wc_entry
*wc_list_inline
[WB_LIST_INLINE
];
205 struct list_head endio_entry
;
206 struct dm_writecache
*wc
;
212 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle
,
213 "A percentage of time allocated for data copying");
215 static void wc_lock(struct dm_writecache
*wc
)
217 mutex_lock(&wc
->lock
);
220 static void wc_unlock(struct dm_writecache
*wc
)
222 mutex_unlock(&wc
->lock
);
225 #ifdef DM_WRITECACHE_HAS_PMEM
226 static int persistent_memory_claim(struct dm_writecache
*wc
)
235 wc
->memory_vmapped
= false;
237 if (!wc
->ssd_dev
->dax_dev
) {
241 s
= wc
->memory_map_size
;
247 if (p
!= s
>> PAGE_SHIFT
) {
252 id
= dax_read_lock();
254 da
= dax_direct_access(wc
->ssd_dev
->dax_dev
, 0, p
, &wc
->memory_map
, &pfn
);
256 wc
->memory_map
= NULL
;
260 if (!pfn_t_has_page(pfn
)) {
261 wc
->memory_map
= NULL
;
267 wc
->memory_map
= NULL
;
268 pages
= kvmalloc_array(p
, sizeof(struct page
*), GFP_KERNEL
);
276 daa
= dax_direct_access(wc
->ssd_dev
->dax_dev
, i
, p
- i
,
279 r
= daa
? daa
: -EINVAL
;
282 if (!pfn_t_has_page(pfn
)) {
286 while (daa
-- && i
< p
) {
287 pages
[i
++] = pfn_t_to_page(pfn
);
291 wc
->memory_map
= vmap(pages
, p
, VM_MAP
, PAGE_KERNEL
);
292 if (!wc
->memory_map
) {
297 wc
->memory_vmapped
= true;
302 wc
->memory_map
+= (size_t)wc
->start_sector
<< SECTOR_SHIFT
;
303 wc
->memory_map_size
-= (size_t)wc
->start_sector
<< SECTOR_SHIFT
;
314 static int persistent_memory_claim(struct dm_writecache
*wc
)
320 static void persistent_memory_release(struct dm_writecache
*wc
)
322 if (wc
->memory_vmapped
)
323 vunmap(wc
->memory_map
- ((size_t)wc
->start_sector
<< SECTOR_SHIFT
));
326 static struct page
*persistent_memory_page(void *addr
)
328 if (is_vmalloc_addr(addr
))
329 return vmalloc_to_page(addr
);
331 return virt_to_page(addr
);
334 static unsigned persistent_memory_page_offset(void *addr
)
336 return (unsigned long)addr
& (PAGE_SIZE
- 1);
339 static void persistent_memory_flush_cache(void *ptr
, size_t size
)
341 if (is_vmalloc_addr(ptr
))
342 flush_kernel_vmap_range(ptr
, size
);
345 static void persistent_memory_invalidate_cache(void *ptr
, size_t size
)
347 if (is_vmalloc_addr(ptr
))
348 invalidate_kernel_vmap_range(ptr
, size
);
351 static struct wc_memory_superblock
*sb(struct dm_writecache
*wc
)
353 return wc
->memory_map
;
356 static struct wc_memory_entry
*memory_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
358 return &sb(wc
)->entries
[e
->index
];
361 static void *memory_data(struct dm_writecache
*wc
, struct wc_entry
*e
)
363 return (char *)wc
->block_start
+ (e
->index
<< wc
->block_size_bits
);
366 static sector_t
cache_sector(struct dm_writecache
*wc
, struct wc_entry
*e
)
368 return wc
->start_sector
+ wc
->metadata_sectors
+
369 ((sector_t
)e
->index
<< (wc
->block_size_bits
- SECTOR_SHIFT
));
372 static uint64_t read_original_sector(struct dm_writecache
*wc
, struct wc_entry
*e
)
374 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
375 return e
->original_sector
;
377 return le64_to_cpu(memory_entry(wc
, e
)->original_sector
);
381 static uint64_t read_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
)
383 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
386 return le64_to_cpu(memory_entry(wc
, e
)->seq_count
);
390 static void clear_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
)
392 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
395 pmem_assign(memory_entry(wc
, e
)->seq_count
, cpu_to_le64(-1));
398 static void write_original_sector_seq_count(struct dm_writecache
*wc
, struct wc_entry
*e
,
399 uint64_t original_sector
, uint64_t seq_count
)
401 struct wc_memory_entry me
;
402 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
403 e
->original_sector
= original_sector
;
404 e
->seq_count
= seq_count
;
406 me
.original_sector
= cpu_to_le64(original_sector
);
407 me
.seq_count
= cpu_to_le64(seq_count
);
408 pmem_assign(*memory_entry(wc
, e
), me
);
411 #define writecache_error(wc, err, msg, arg...) \
413 if (!cmpxchg(&(wc)->error, 0, err)) \
415 wake_up(&(wc)->freelist_wait); \
418 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
420 static void writecache_flush_all_metadata(struct dm_writecache
*wc
)
422 if (!WC_MODE_PMEM(wc
))
423 memset(wc
->dirty_bitmap
, -1, wc
->dirty_bitmap_size
);
426 static void writecache_flush_region(struct dm_writecache
*wc
, void *ptr
, size_t size
)
428 if (!WC_MODE_PMEM(wc
))
429 __set_bit(((char *)ptr
- (char *)wc
->memory_map
) / BITMAP_GRANULARITY
,
433 static void writecache_disk_flush(struct dm_writecache
*wc
, struct dm_dev
*dev
);
436 struct dm_writecache
*wc
;
441 static void writecache_notify_io(unsigned long error
, void *context
)
443 struct io_notify
*endio
= context
;
445 if (unlikely(error
!= 0))
446 writecache_error(endio
->wc
, -EIO
, "error writing metadata");
447 BUG_ON(atomic_read(&endio
->count
) <= 0);
448 if (atomic_dec_and_test(&endio
->count
))
452 static void writecache_wait_for_ios(struct dm_writecache
*wc
, int direction
)
454 wait_event(wc
->bio_in_progress_wait
[direction
],
455 !atomic_read(&wc
->bio_in_progress
[direction
]));
458 static void ssd_commit_flushed(struct dm_writecache
*wc
, bool wait_for_ios
)
460 struct dm_io_region region
;
461 struct dm_io_request req
;
462 struct io_notify endio
= {
464 COMPLETION_INITIALIZER_ONSTACK(endio
.c
),
467 unsigned bitmap_bits
= wc
->dirty_bitmap_size
* 8;
472 i
= find_next_bit(wc
->dirty_bitmap
, bitmap_bits
, i
);
473 if (unlikely(i
== bitmap_bits
))
475 j
= find_next_zero_bit(wc
->dirty_bitmap
, bitmap_bits
, i
);
477 region
.bdev
= wc
->ssd_dev
->bdev
;
478 region
.sector
= (sector_t
)i
* (BITMAP_GRANULARITY
>> SECTOR_SHIFT
);
479 region
.count
= (sector_t
)(j
- i
) * (BITMAP_GRANULARITY
>> SECTOR_SHIFT
);
481 if (unlikely(region
.sector
>= wc
->metadata_sectors
))
483 if (unlikely(region
.sector
+ region
.count
> wc
->metadata_sectors
))
484 region
.count
= wc
->metadata_sectors
- region
.sector
;
486 region
.sector
+= wc
->start_sector
;
487 atomic_inc(&endio
.count
);
488 req
.bi_op
= REQ_OP_WRITE
;
489 req
.bi_op_flags
= REQ_SYNC
;
490 req
.mem
.type
= DM_IO_VMA
;
491 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
+ (size_t)i
* BITMAP_GRANULARITY
;
492 req
.client
= wc
->dm_io
;
493 req
.notify
.fn
= writecache_notify_io
;
494 req
.notify
.context
= &endio
;
496 /* writing via async dm-io (implied by notify.fn above) won't return an error */
497 (void) dm_io(&req
, 1, ®ion
, NULL
);
501 writecache_notify_io(0, &endio
);
502 wait_for_completion_io(&endio
.c
);
505 writecache_wait_for_ios(wc
, WRITE
);
507 writecache_disk_flush(wc
, wc
->ssd_dev
);
509 memset(wc
->dirty_bitmap
, 0, wc
->dirty_bitmap_size
);
512 static void ssd_commit_superblock(struct dm_writecache
*wc
)
515 struct dm_io_region region
;
516 struct dm_io_request req
;
518 region
.bdev
= wc
->ssd_dev
->bdev
;
520 region
.count
= PAGE_SIZE
;
522 if (unlikely(region
.sector
+ region
.count
> wc
->metadata_sectors
))
523 region
.count
= wc
->metadata_sectors
- region
.sector
;
525 region
.sector
+= wc
->start_sector
;
527 req
.bi_op
= REQ_OP_WRITE
;
528 req
.bi_op_flags
= REQ_SYNC
| REQ_FUA
;
529 req
.mem
.type
= DM_IO_VMA
;
530 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
;
531 req
.client
= wc
->dm_io
;
532 req
.notify
.fn
= NULL
;
533 req
.notify
.context
= NULL
;
535 r
= dm_io(&req
, 1, ®ion
, NULL
);
537 writecache_error(wc
, r
, "error writing superblock");
540 static void writecache_commit_flushed(struct dm_writecache
*wc
, bool wait_for_ios
)
542 if (WC_MODE_PMEM(wc
))
545 ssd_commit_flushed(wc
, wait_for_ios
);
548 static void writecache_disk_flush(struct dm_writecache
*wc
, struct dm_dev
*dev
)
551 struct dm_io_region region
;
552 struct dm_io_request req
;
554 region
.bdev
= dev
->bdev
;
557 req
.bi_op
= REQ_OP_WRITE
;
558 req
.bi_op_flags
= REQ_PREFLUSH
;
559 req
.mem
.type
= DM_IO_KMEM
;
560 req
.mem
.ptr
.addr
= NULL
;
561 req
.client
= wc
->dm_io
;
562 req
.notify
.fn
= NULL
;
564 r
= dm_io(&req
, 1, ®ion
, NULL
);
566 writecache_error(wc
, r
, "error flushing metadata: %d", r
);
569 #define WFE_RETURN_FOLLOWING 1
570 #define WFE_LOWEST_SEQ 2
572 static struct wc_entry
*writecache_find_entry(struct dm_writecache
*wc
,
573 uint64_t block
, int flags
)
576 struct rb_node
*node
= wc
->tree
.rb_node
;
582 e
= container_of(node
, struct wc_entry
, rb_node
);
583 if (read_original_sector(wc
, e
) == block
)
586 node
= (read_original_sector(wc
, e
) >= block
?
587 e
->rb_node
.rb_left
: e
->rb_node
.rb_right
);
588 if (unlikely(!node
)) {
589 if (!(flags
& WFE_RETURN_FOLLOWING
))
591 if (read_original_sector(wc
, e
) >= block
) {
594 node
= rb_next(&e
->rb_node
);
597 e
= container_of(node
, struct wc_entry
, rb_node
);
605 if (flags
& WFE_LOWEST_SEQ
)
606 node
= rb_prev(&e
->rb_node
);
608 node
= rb_next(&e
->rb_node
);
611 e2
= container_of(node
, struct wc_entry
, rb_node
);
612 if (read_original_sector(wc
, e2
) != block
)
618 static void writecache_insert_entry(struct dm_writecache
*wc
, struct wc_entry
*ins
)
621 struct rb_node
**node
= &wc
->tree
.rb_node
, *parent
= NULL
;
624 e
= container_of(*node
, struct wc_entry
, rb_node
);
625 parent
= &e
->rb_node
;
626 if (read_original_sector(wc
, e
) > read_original_sector(wc
, ins
))
627 node
= &parent
->rb_left
;
629 node
= &parent
->rb_right
;
631 rb_link_node(&ins
->rb_node
, parent
, node
);
632 rb_insert_color(&ins
->rb_node
, &wc
->tree
);
633 list_add(&ins
->lru
, &wc
->lru
);
637 static void writecache_unlink(struct dm_writecache
*wc
, struct wc_entry
*e
)
640 rb_erase(&e
->rb_node
, &wc
->tree
);
643 static void writecache_add_to_freelist(struct dm_writecache
*wc
, struct wc_entry
*e
)
645 if (WC_MODE_SORT_FREELIST(wc
)) {
646 struct rb_node
**node
= &wc
->freetree
.rb_node
, *parent
= NULL
;
647 if (unlikely(!*node
))
648 wc
->current_free
= e
;
651 if (&e
->rb_node
< *node
)
652 node
= &parent
->rb_left
;
654 node
= &parent
->rb_right
;
656 rb_link_node(&e
->rb_node
, parent
, node
);
657 rb_insert_color(&e
->rb_node
, &wc
->freetree
);
659 list_add_tail(&e
->lru
, &wc
->freelist
);
664 static inline void writecache_verify_watermark(struct dm_writecache
*wc
)
666 if (unlikely(wc
->freelist_size
+ wc
->writeback_size
<= wc
->freelist_high_watermark
))
667 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
670 static void writecache_max_age_timer(struct timer_list
*t
)
672 struct dm_writecache
*wc
= from_timer(wc
, t
, max_age_timer
);
674 if (!dm_suspended(wc
->ti
) && !writecache_has_error(wc
)) {
675 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
676 mod_timer(&wc
->max_age_timer
, jiffies
+ wc
->max_age
/ MAX_AGE_DIV
);
680 static struct wc_entry
*writecache_pop_from_freelist(struct dm_writecache
*wc
, sector_t expected_sector
)
684 if (WC_MODE_SORT_FREELIST(wc
)) {
685 struct rb_node
*next
;
686 if (unlikely(!wc
->current_free
))
688 e
= wc
->current_free
;
689 if (expected_sector
!= (sector_t
)-1 && unlikely(cache_sector(wc
, e
) != expected_sector
))
691 next
= rb_next(&e
->rb_node
);
692 rb_erase(&e
->rb_node
, &wc
->freetree
);
694 next
= rb_first(&wc
->freetree
);
695 wc
->current_free
= next
? container_of(next
, struct wc_entry
, rb_node
) : NULL
;
697 if (unlikely(list_empty(&wc
->freelist
)))
699 e
= container_of(wc
->freelist
.next
, struct wc_entry
, lru
);
700 if (expected_sector
!= (sector_t
)-1 && unlikely(cache_sector(wc
, e
) != expected_sector
))
706 writecache_verify_watermark(wc
);
711 static void writecache_free_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
713 writecache_unlink(wc
, e
);
714 writecache_add_to_freelist(wc
, e
);
715 clear_seq_count(wc
, e
);
716 writecache_flush_region(wc
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
717 if (unlikely(waitqueue_active(&wc
->freelist_wait
)))
718 wake_up(&wc
->freelist_wait
);
721 static void writecache_wait_on_freelist(struct dm_writecache
*wc
)
725 prepare_to_wait(&wc
->freelist_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
728 finish_wait(&wc
->freelist_wait
, &wait
);
732 static void writecache_poison_lists(struct dm_writecache
*wc
)
735 * Catch incorrect access to these values while the device is suspended.
737 memset(&wc
->tree
, -1, sizeof wc
->tree
);
738 wc
->lru
.next
= LIST_POISON1
;
739 wc
->lru
.prev
= LIST_POISON2
;
740 wc
->freelist
.next
= LIST_POISON1
;
741 wc
->freelist
.prev
= LIST_POISON2
;
744 static void writecache_flush_entry(struct dm_writecache
*wc
, struct wc_entry
*e
)
746 writecache_flush_region(wc
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
747 if (WC_MODE_PMEM(wc
))
748 writecache_flush_region(wc
, memory_data(wc
, e
), wc
->block_size
);
751 static bool writecache_entry_is_committed(struct dm_writecache
*wc
, struct wc_entry
*e
)
753 return read_seq_count(wc
, e
) < wc
->seq_count
;
756 static void writecache_flush(struct dm_writecache
*wc
)
758 struct wc_entry
*e
, *e2
;
759 bool need_flush_after_free
;
761 wc
->uncommitted_blocks
= 0;
762 del_timer(&wc
->autocommit_timer
);
764 if (list_empty(&wc
->lru
))
767 e
= container_of(wc
->lru
.next
, struct wc_entry
, lru
);
768 if (writecache_entry_is_committed(wc
, e
)) {
769 if (wc
->overwrote_committed
) {
770 writecache_wait_for_ios(wc
, WRITE
);
771 writecache_disk_flush(wc
, wc
->ssd_dev
);
772 wc
->overwrote_committed
= false;
777 writecache_flush_entry(wc
, e
);
778 if (unlikely(e
->lru
.next
== &wc
->lru
))
780 e2
= container_of(e
->lru
.next
, struct wc_entry
, lru
);
781 if (writecache_entry_is_committed(wc
, e2
))
786 writecache_commit_flushed(wc
, true);
789 pmem_assign(sb(wc
)->seq_count
, cpu_to_le64(wc
->seq_count
));
790 if (WC_MODE_PMEM(wc
))
791 writecache_commit_flushed(wc
, false);
793 ssd_commit_superblock(wc
);
795 wc
->overwrote_committed
= false;
797 need_flush_after_free
= false;
799 /* Free another committed entry with lower seq-count */
800 struct rb_node
*rb_node
= rb_prev(&e
->rb_node
);
803 e2
= container_of(rb_node
, struct wc_entry
, rb_node
);
804 if (read_original_sector(wc
, e2
) == read_original_sector(wc
, e
) &&
805 likely(!e2
->write_in_progress
)) {
806 writecache_free_entry(wc
, e2
);
807 need_flush_after_free
= true;
810 if (unlikely(e
->lru
.prev
== &wc
->lru
))
812 e
= container_of(e
->lru
.prev
, struct wc_entry
, lru
);
816 if (need_flush_after_free
)
817 writecache_commit_flushed(wc
, false);
820 static void writecache_flush_work(struct work_struct
*work
)
822 struct dm_writecache
*wc
= container_of(work
, struct dm_writecache
, flush_work
);
825 writecache_flush(wc
);
829 static void writecache_autocommit_timer(struct timer_list
*t
)
831 struct dm_writecache
*wc
= from_timer(wc
, t
, autocommit_timer
);
832 if (!writecache_has_error(wc
))
833 queue_work(wc
->writeback_wq
, &wc
->flush_work
);
836 static void writecache_schedule_autocommit(struct dm_writecache
*wc
)
838 if (!timer_pending(&wc
->autocommit_timer
))
839 mod_timer(&wc
->autocommit_timer
, jiffies
+ wc
->autocommit_jiffies
);
842 static void writecache_discard(struct dm_writecache
*wc
, sector_t start
, sector_t end
)
845 bool discarded_something
= false;
847 e
= writecache_find_entry(wc
, start
, WFE_RETURN_FOLLOWING
| WFE_LOWEST_SEQ
);
851 while (read_original_sector(wc
, e
) < end
) {
852 struct rb_node
*node
= rb_next(&e
->rb_node
);
854 if (likely(!e
->write_in_progress
)) {
855 if (!discarded_something
) {
856 writecache_wait_for_ios(wc
, READ
);
857 writecache_wait_for_ios(wc
, WRITE
);
858 discarded_something
= true;
860 writecache_free_entry(wc
, e
);
866 e
= container_of(node
, struct wc_entry
, rb_node
);
869 if (discarded_something
)
870 writecache_commit_flushed(wc
, false);
873 static bool writecache_wait_for_writeback(struct dm_writecache
*wc
)
875 if (wc
->writeback_size
) {
876 writecache_wait_on_freelist(wc
);
882 static void writecache_suspend(struct dm_target
*ti
)
884 struct dm_writecache
*wc
= ti
->private;
885 bool flush_on_suspend
;
887 del_timer_sync(&wc
->autocommit_timer
);
888 del_timer_sync(&wc
->max_age_timer
);
891 writecache_flush(wc
);
892 flush_on_suspend
= wc
->flush_on_suspend
;
893 if (flush_on_suspend
) {
894 wc
->flush_on_suspend
= false;
896 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
900 drain_workqueue(wc
->writeback_wq
);
903 if (flush_on_suspend
)
905 while (writecache_wait_for_writeback(wc
));
907 if (WC_MODE_PMEM(wc
))
908 persistent_memory_flush_cache(wc
->memory_map
, wc
->memory_map_size
);
910 writecache_poison_lists(wc
);
915 static int writecache_alloc_entries(struct dm_writecache
*wc
)
921 wc
->entries
= vmalloc(array_size(sizeof(struct wc_entry
), wc
->n_blocks
));
924 for (b
= 0; b
< wc
->n_blocks
; b
++) {
925 struct wc_entry
*e
= &wc
->entries
[b
];
927 e
->write_in_progress
= false;
934 static void writecache_resume(struct dm_target
*ti
)
936 struct dm_writecache
*wc
= ti
->private;
938 bool need_flush
= false;
944 if (WC_MODE_PMEM(wc
))
945 persistent_memory_invalidate_cache(wc
->memory_map
, wc
->memory_map_size
);
948 INIT_LIST_HEAD(&wc
->lru
);
949 if (WC_MODE_SORT_FREELIST(wc
)) {
950 wc
->freetree
= RB_ROOT
;
951 wc
->current_free
= NULL
;
953 INIT_LIST_HEAD(&wc
->freelist
);
955 wc
->freelist_size
= 0;
957 r
= memcpy_mcsafe(&sb_seq_count
, &sb(wc
)->seq_count
, sizeof(uint64_t));
959 writecache_error(wc
, r
, "hardware memory error when reading superblock: %d", r
);
960 sb_seq_count
= cpu_to_le64(0);
962 wc
->seq_count
= le64_to_cpu(sb_seq_count
);
964 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
965 for (b
= 0; b
< wc
->n_blocks
; b
++) {
966 struct wc_entry
*e
= &wc
->entries
[b
];
967 struct wc_memory_entry wme
;
968 if (writecache_has_error(wc
)) {
969 e
->original_sector
= -1;
973 r
= memcpy_mcsafe(&wme
, memory_entry(wc
, e
), sizeof(struct wc_memory_entry
));
975 writecache_error(wc
, r
, "hardware memory error when reading metadata entry %lu: %d",
976 (unsigned long)b
, r
);
977 e
->original_sector
= -1;
980 e
->original_sector
= le64_to_cpu(wme
.original_sector
);
981 e
->seq_count
= le64_to_cpu(wme
.seq_count
);
986 for (b
= 0; b
< wc
->n_blocks
; b
++) {
987 struct wc_entry
*e
= &wc
->entries
[b
];
988 if (!writecache_entry_is_committed(wc
, e
)) {
989 if (read_seq_count(wc
, e
) != -1) {
991 clear_seq_count(wc
, e
);
994 writecache_add_to_freelist(wc
, e
);
996 struct wc_entry
*old
;
998 old
= writecache_find_entry(wc
, read_original_sector(wc
, e
), 0);
1000 writecache_insert_entry(wc
, e
);
1002 if (read_seq_count(wc
, old
) == read_seq_count(wc
, e
)) {
1003 writecache_error(wc
, -EINVAL
,
1004 "two identical entries, position %llu, sector %llu, sequence %llu",
1005 (unsigned long long)b
, (unsigned long long)read_original_sector(wc
, e
),
1006 (unsigned long long)read_seq_count(wc
, e
));
1008 if (read_seq_count(wc
, old
) > read_seq_count(wc
, e
)) {
1011 writecache_free_entry(wc
, old
);
1012 writecache_insert_entry(wc
, e
);
1021 writecache_flush_all_metadata(wc
);
1022 writecache_commit_flushed(wc
, false);
1025 writecache_verify_watermark(wc
);
1027 if (wc
->max_age
!= MAX_AGE_UNSPECIFIED
)
1028 mod_timer(&wc
->max_age_timer
, jiffies
+ wc
->max_age
/ MAX_AGE_DIV
);
1033 static int process_flush_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1039 if (dm_suspended(wc
->ti
)) {
1043 if (writecache_has_error(wc
)) {
1048 writecache_flush(wc
);
1049 wc
->writeback_all
++;
1050 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
1053 flush_workqueue(wc
->writeback_wq
);
1056 wc
->writeback_all
--;
1057 if (writecache_has_error(wc
)) {
1066 static int process_flush_on_suspend_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1072 wc
->flush_on_suspend
= true;
1078 static void activate_cleaner(struct dm_writecache
*wc
)
1080 wc
->flush_on_suspend
= true;
1082 wc
->freelist_high_watermark
= wc
->n_blocks
;
1083 wc
->freelist_low_watermark
= wc
->n_blocks
;
1086 static int process_cleaner_mesg(unsigned argc
, char **argv
, struct dm_writecache
*wc
)
1092 activate_cleaner(wc
);
1093 if (!dm_suspended(wc
->ti
))
1094 writecache_verify_watermark(wc
);
1100 static int writecache_message(struct dm_target
*ti
, unsigned argc
, char **argv
,
1101 char *result
, unsigned maxlen
)
1104 struct dm_writecache
*wc
= ti
->private;
1106 if (!strcasecmp(argv
[0], "flush"))
1107 r
= process_flush_mesg(argc
, argv
, wc
);
1108 else if (!strcasecmp(argv
[0], "flush_on_suspend"))
1109 r
= process_flush_on_suspend_mesg(argc
, argv
, wc
);
1110 else if (!strcasecmp(argv
[0], "cleaner"))
1111 r
= process_cleaner_mesg(argc
, argv
, wc
);
1113 DMERR("unrecognised message received: %s", argv
[0]);
1118 static void bio_copy_block(struct dm_writecache
*wc
, struct bio
*bio
, void *data
)
1121 unsigned long flags
;
1123 int rw
= bio_data_dir(bio
);
1124 unsigned remaining_size
= wc
->block_size
;
1127 struct bio_vec bv
= bio_iter_iovec(bio
, bio
->bi_iter
);
1128 buf
= bvec_kmap_irq(&bv
, &flags
);
1130 if (unlikely(size
> remaining_size
))
1131 size
= remaining_size
;
1135 r
= memcpy_mcsafe(buf
, data
, size
);
1136 flush_dcache_page(bio_page(bio
));
1138 writecache_error(wc
, r
, "hardware memory error when reading data: %d", r
);
1139 bio
->bi_status
= BLK_STS_IOERR
;
1142 flush_dcache_page(bio_page(bio
));
1143 memcpy_flushcache(data
, buf
, size
);
1146 bvec_kunmap_irq(buf
, &flags
);
1148 data
= (char *)data
+ size
;
1149 remaining_size
-= size
;
1150 bio_advance(bio
, size
);
1151 } while (unlikely(remaining_size
));
1154 static int writecache_flush_thread(void *data
)
1156 struct dm_writecache
*wc
= data
;
1162 bio
= bio_list_pop(&wc
->flush_list
);
1164 set_current_state(TASK_INTERRUPTIBLE
);
1167 if (unlikely(kthread_should_stop())) {
1168 set_current_state(TASK_RUNNING
);
1176 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1177 writecache_discard(wc
, bio
->bi_iter
.bi_sector
,
1178 bio_end_sector(bio
));
1180 bio_set_dev(bio
, wc
->dev
->bdev
);
1181 generic_make_request(bio
);
1183 writecache_flush(wc
);
1185 if (writecache_has_error(wc
))
1186 bio
->bi_status
= BLK_STS_IOERR
;
1194 static void writecache_offload_bio(struct dm_writecache
*wc
, struct bio
*bio
)
1196 if (bio_list_empty(&wc
->flush_list
))
1197 wake_up_process(wc
->flush_thread
);
1198 bio_list_add(&wc
->flush_list
, bio
);
1201 static int writecache_map(struct dm_target
*ti
, struct bio
*bio
)
1204 struct dm_writecache
*wc
= ti
->private;
1206 bio
->bi_private
= NULL
;
1210 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1211 if (writecache_has_error(wc
))
1213 if (WC_MODE_PMEM(wc
)) {
1214 writecache_flush(wc
);
1215 if (writecache_has_error(wc
))
1219 writecache_offload_bio(wc
, bio
);
1224 bio
->bi_iter
.bi_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1226 if (unlikely((((unsigned)bio
->bi_iter
.bi_sector
| bio_sectors(bio
)) &
1227 (wc
->block_size
/ 512 - 1)) != 0)) {
1228 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1229 (unsigned long long)bio
->bi_iter
.bi_sector
,
1230 bio
->bi_iter
.bi_size
, wc
->block_size
);
1234 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
1235 if (writecache_has_error(wc
))
1237 if (WC_MODE_PMEM(wc
)) {
1238 writecache_discard(wc
, bio
->bi_iter
.bi_sector
, bio_end_sector(bio
));
1239 goto unlock_remap_origin
;
1241 writecache_offload_bio(wc
, bio
);
1246 if (bio_data_dir(bio
) == READ
) {
1248 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, WFE_RETURN_FOLLOWING
);
1249 if (e
&& read_original_sector(wc
, e
) == bio
->bi_iter
.bi_sector
) {
1250 if (WC_MODE_PMEM(wc
)) {
1251 bio_copy_block(wc
, bio
, memory_data(wc
, e
));
1252 if (bio
->bi_iter
.bi_size
)
1253 goto read_next_block
;
1256 dm_accept_partial_bio(bio
, wc
->block_size
>> SECTOR_SHIFT
);
1257 bio_set_dev(bio
, wc
->ssd_dev
->bdev
);
1258 bio
->bi_iter
.bi_sector
= cache_sector(wc
, e
);
1259 if (!writecache_entry_is_committed(wc
, e
))
1260 writecache_wait_for_ios(wc
, WRITE
);
1265 sector_t next_boundary
=
1266 read_original_sector(wc
, e
) - bio
->bi_iter
.bi_sector
;
1267 if (next_boundary
< bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
) {
1268 dm_accept_partial_bio(bio
, next_boundary
);
1271 goto unlock_remap_origin
;
1275 bool found_entry
= false;
1276 if (writecache_has_error(wc
))
1278 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, 0);
1280 if (!writecache_entry_is_committed(wc
, e
))
1282 if (!WC_MODE_PMEM(wc
) && !e
->write_in_progress
) {
1283 wc
->overwrote_committed
= true;
1288 if (unlikely(wc
->cleaner
))
1291 e
= writecache_pop_from_freelist(wc
, (sector_t
)-1);
1295 e
= writecache_find_entry(wc
, bio
->bi_iter
.bi_sector
, WFE_RETURN_FOLLOWING
);
1297 sector_t next_boundary
= read_original_sector(wc
, e
) - bio
->bi_iter
.bi_sector
;
1298 BUG_ON(!next_boundary
);
1299 if (next_boundary
< bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
) {
1300 dm_accept_partial_bio(bio
, next_boundary
);
1303 goto unlock_remap_origin
;
1305 writecache_wait_on_freelist(wc
);
1308 write_original_sector_seq_count(wc
, e
, bio
->bi_iter
.bi_sector
, wc
->seq_count
);
1309 writecache_insert_entry(wc
, e
);
1310 wc
->uncommitted_blocks
++;
1312 if (WC_MODE_PMEM(wc
)) {
1313 bio_copy_block(wc
, bio
, memory_data(wc
, e
));
1315 unsigned bio_size
= wc
->block_size
;
1316 sector_t start_cache_sec
= cache_sector(wc
, e
);
1317 sector_t current_cache_sec
= start_cache_sec
+ (bio_size
>> SECTOR_SHIFT
);
1319 while (bio_size
< bio
->bi_iter
.bi_size
) {
1320 struct wc_entry
*f
= writecache_pop_from_freelist(wc
, current_cache_sec
);
1323 write_original_sector_seq_count(wc
, f
, bio
->bi_iter
.bi_sector
+
1324 (bio_size
>> SECTOR_SHIFT
), wc
->seq_count
);
1325 writecache_insert_entry(wc
, f
);
1326 wc
->uncommitted_blocks
++;
1327 bio_size
+= wc
->block_size
;
1328 current_cache_sec
+= wc
->block_size
>> SECTOR_SHIFT
;
1331 bio_set_dev(bio
, wc
->ssd_dev
->bdev
);
1332 bio
->bi_iter
.bi_sector
= start_cache_sec
;
1333 dm_accept_partial_bio(bio
, bio_size
>> SECTOR_SHIFT
);
1335 if (unlikely(wc
->uncommitted_blocks
>= wc
->autocommit_blocks
)) {
1336 wc
->uncommitted_blocks
= 0;
1337 queue_work(wc
->writeback_wq
, &wc
->flush_work
);
1339 writecache_schedule_autocommit(wc
);
1343 } while (bio
->bi_iter
.bi_size
);
1345 if (unlikely(bio
->bi_opf
& REQ_FUA
||
1346 wc
->uncommitted_blocks
>= wc
->autocommit_blocks
))
1347 writecache_flush(wc
);
1349 writecache_schedule_autocommit(wc
);
1353 unlock_remap_origin
:
1354 bio_set_dev(bio
, wc
->dev
->bdev
);
1356 return DM_MAPIO_REMAPPED
;
1359 /* make sure that writecache_end_io decrements bio_in_progress: */
1360 bio
->bi_private
= (void *)1;
1361 atomic_inc(&wc
->bio_in_progress
[bio_data_dir(bio
)]);
1363 return DM_MAPIO_REMAPPED
;
1368 return DM_MAPIO_SUBMITTED
;
1372 return DM_MAPIO_SUBMITTED
;
1377 return DM_MAPIO_SUBMITTED
;
1380 static int writecache_end_io(struct dm_target
*ti
, struct bio
*bio
, blk_status_t
*status
)
1382 struct dm_writecache
*wc
= ti
->private;
1384 if (bio
->bi_private
!= NULL
) {
1385 int dir
= bio_data_dir(bio
);
1386 if (atomic_dec_and_test(&wc
->bio_in_progress
[dir
]))
1387 if (unlikely(waitqueue_active(&wc
->bio_in_progress_wait
[dir
])))
1388 wake_up(&wc
->bio_in_progress_wait
[dir
]);
1393 static int writecache_iterate_devices(struct dm_target
*ti
,
1394 iterate_devices_callout_fn fn
, void *data
)
1396 struct dm_writecache
*wc
= ti
->private;
1398 return fn(ti
, wc
->dev
, 0, ti
->len
, data
);
1401 static void writecache_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
1403 struct dm_writecache
*wc
= ti
->private;
1405 if (limits
->logical_block_size
< wc
->block_size
)
1406 limits
->logical_block_size
= wc
->block_size
;
1408 if (limits
->physical_block_size
< wc
->block_size
)
1409 limits
->physical_block_size
= wc
->block_size
;
1411 if (limits
->io_min
< wc
->block_size
)
1412 limits
->io_min
= wc
->block_size
;
1416 static void writecache_writeback_endio(struct bio
*bio
)
1418 struct writeback_struct
*wb
= container_of(bio
, struct writeback_struct
, bio
);
1419 struct dm_writecache
*wc
= wb
->wc
;
1420 unsigned long flags
;
1422 raw_spin_lock_irqsave(&wc
->endio_list_lock
, flags
);
1423 if (unlikely(list_empty(&wc
->endio_list
)))
1424 wake_up_process(wc
->endio_thread
);
1425 list_add_tail(&wb
->endio_entry
, &wc
->endio_list
);
1426 raw_spin_unlock_irqrestore(&wc
->endio_list_lock
, flags
);
1429 static void writecache_copy_endio(int read_err
, unsigned long write_err
, void *ptr
)
1431 struct copy_struct
*c
= ptr
;
1432 struct dm_writecache
*wc
= c
->wc
;
1434 c
->error
= likely(!(read_err
| write_err
)) ? 0 : -EIO
;
1436 raw_spin_lock_irq(&wc
->endio_list_lock
);
1437 if (unlikely(list_empty(&wc
->endio_list
)))
1438 wake_up_process(wc
->endio_thread
);
1439 list_add_tail(&c
->endio_entry
, &wc
->endio_list
);
1440 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1443 static void __writecache_endio_pmem(struct dm_writecache
*wc
, struct list_head
*list
)
1446 struct writeback_struct
*wb
;
1448 unsigned long n_walked
= 0;
1451 wb
= list_entry(list
->next
, struct writeback_struct
, endio_entry
);
1452 list_del(&wb
->endio_entry
);
1454 if (unlikely(wb
->bio
.bi_status
!= BLK_STS_OK
))
1455 writecache_error(wc
, blk_status_to_errno(wb
->bio
.bi_status
),
1456 "write error %d", wb
->bio
.bi_status
);
1460 BUG_ON(!e
->write_in_progress
);
1461 e
->write_in_progress
= false;
1462 INIT_LIST_HEAD(&e
->lru
);
1463 if (!writecache_has_error(wc
))
1464 writecache_free_entry(wc
, e
);
1465 BUG_ON(!wc
->writeback_size
);
1466 wc
->writeback_size
--;
1468 if (unlikely(n_walked
>= ENDIO_LATENCY
)) {
1469 writecache_commit_flushed(wc
, false);
1474 } while (++i
< wb
->wc_list_n
);
1476 if (wb
->wc_list
!= wb
->wc_list_inline
)
1479 } while (!list_empty(list
));
1482 static void __writecache_endio_ssd(struct dm_writecache
*wc
, struct list_head
*list
)
1484 struct copy_struct
*c
;
1488 c
= list_entry(list
->next
, struct copy_struct
, endio_entry
);
1489 list_del(&c
->endio_entry
);
1491 if (unlikely(c
->error
))
1492 writecache_error(wc
, c
->error
, "copy error");
1496 BUG_ON(!e
->write_in_progress
);
1497 e
->write_in_progress
= false;
1498 INIT_LIST_HEAD(&e
->lru
);
1499 if (!writecache_has_error(wc
))
1500 writecache_free_entry(wc
, e
);
1502 BUG_ON(!wc
->writeback_size
);
1503 wc
->writeback_size
--;
1505 } while (--c
->n_entries
);
1506 mempool_free(c
, &wc
->copy_pool
);
1507 } while (!list_empty(list
));
1510 static int writecache_endio_thread(void *data
)
1512 struct dm_writecache
*wc
= data
;
1515 struct list_head list
;
1517 raw_spin_lock_irq(&wc
->endio_list_lock
);
1518 if (!list_empty(&wc
->endio_list
))
1520 set_current_state(TASK_INTERRUPTIBLE
);
1521 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1523 if (unlikely(kthread_should_stop())) {
1524 set_current_state(TASK_RUNNING
);
1533 list
= wc
->endio_list
;
1534 list
.next
->prev
= list
.prev
->next
= &list
;
1535 INIT_LIST_HEAD(&wc
->endio_list
);
1536 raw_spin_unlock_irq(&wc
->endio_list_lock
);
1538 if (!WC_MODE_FUA(wc
))
1539 writecache_disk_flush(wc
, wc
->dev
);
1543 if (WC_MODE_PMEM(wc
)) {
1544 __writecache_endio_pmem(wc
, &list
);
1546 __writecache_endio_ssd(wc
, &list
);
1547 writecache_wait_for_ios(wc
, READ
);
1550 writecache_commit_flushed(wc
, false);
1558 static bool wc_add_block(struct writeback_struct
*wb
, struct wc_entry
*e
, gfp_t gfp
)
1560 struct dm_writecache
*wc
= wb
->wc
;
1561 unsigned block_size
= wc
->block_size
;
1562 void *address
= memory_data(wc
, e
);
1564 persistent_memory_flush_cache(address
, block_size
);
1565 return bio_add_page(&wb
->bio
, persistent_memory_page(address
),
1566 block_size
, persistent_memory_page_offset(address
)) != 0;
1569 struct writeback_list
{
1570 struct list_head list
;
1574 static void __writeback_throttle(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1576 if (unlikely(wc
->max_writeback_jobs
)) {
1577 if (READ_ONCE(wc
->writeback_size
) - wbl
->size
>= wc
->max_writeback_jobs
) {
1579 while (wc
->writeback_size
- wbl
->size
>= wc
->max_writeback_jobs
)
1580 writecache_wait_on_freelist(wc
);
1587 static void __writecache_writeback_pmem(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1589 struct wc_entry
*e
, *f
;
1591 struct writeback_struct
*wb
;
1596 e
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1599 max_pages
= e
->wc_list_contiguous
;
1601 bio
= bio_alloc_bioset(GFP_NOIO
, max_pages
, &wc
->bio_set
);
1602 wb
= container_of(bio
, struct writeback_struct
, bio
);
1604 bio
->bi_end_io
= writecache_writeback_endio
;
1605 bio_set_dev(bio
, wc
->dev
->bdev
);
1606 bio
->bi_iter
.bi_sector
= read_original_sector(wc
, e
);
1607 if (max_pages
<= WB_LIST_INLINE
||
1608 unlikely(!(wb
->wc_list
= kmalloc_array(max_pages
, sizeof(struct wc_entry
*),
1609 GFP_NOIO
| __GFP_NORETRY
|
1610 __GFP_NOMEMALLOC
| __GFP_NOWARN
)))) {
1611 wb
->wc_list
= wb
->wc_list_inline
;
1612 max_pages
= WB_LIST_INLINE
;
1615 BUG_ON(!wc_add_block(wb
, e
, GFP_NOIO
));
1620 while (wbl
->size
&& wb
->wc_list_n
< max_pages
) {
1621 f
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1622 if (read_original_sector(wc
, f
) !=
1623 read_original_sector(wc
, e
) + (wc
->block_size
>> SECTOR_SHIFT
))
1625 if (!wc_add_block(wb
, f
, GFP_NOWAIT
| __GFP_NOWARN
))
1629 wb
->wc_list
[wb
->wc_list_n
++] = f
;
1632 bio_set_op_attrs(bio
, REQ_OP_WRITE
, WC_MODE_FUA(wc
) * REQ_FUA
);
1633 if (writecache_has_error(wc
)) {
1634 bio
->bi_status
= BLK_STS_IOERR
;
1640 __writeback_throttle(wc
, wbl
);
1644 static void __writecache_writeback_ssd(struct dm_writecache
*wc
, struct writeback_list
*wbl
)
1646 struct wc_entry
*e
, *f
;
1647 struct dm_io_region from
, to
;
1648 struct copy_struct
*c
;
1654 e
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1657 n_sectors
= e
->wc_list_contiguous
<< (wc
->block_size_bits
- SECTOR_SHIFT
);
1659 from
.bdev
= wc
->ssd_dev
->bdev
;
1660 from
.sector
= cache_sector(wc
, e
);
1661 from
.count
= n_sectors
;
1662 to
.bdev
= wc
->dev
->bdev
;
1663 to
.sector
= read_original_sector(wc
, e
);
1664 to
.count
= n_sectors
;
1666 c
= mempool_alloc(&wc
->copy_pool
, GFP_NOIO
);
1669 c
->n_entries
= e
->wc_list_contiguous
;
1671 while ((n_sectors
-= wc
->block_size
>> SECTOR_SHIFT
)) {
1673 f
= container_of(wbl
->list
.prev
, struct wc_entry
, lru
);
1679 dm_kcopyd_copy(wc
->dm_kcopyd
, &from
, 1, &to
, 0, writecache_copy_endio
, c
);
1681 __writeback_throttle(wc
, wbl
);
1685 static void writecache_writeback(struct work_struct
*work
)
1687 struct dm_writecache
*wc
= container_of(work
, struct dm_writecache
, writeback_work
);
1688 struct blk_plug plug
;
1689 struct wc_entry
*f
, *uninitialized_var(g
), *e
= NULL
;
1690 struct rb_node
*node
, *next_node
;
1691 struct list_head skipped
;
1692 struct writeback_list wbl
;
1693 unsigned long n_walked
;
1697 if (writecache_has_error(wc
)) {
1702 if (unlikely(wc
->writeback_all
)) {
1703 if (writecache_wait_for_writeback(wc
))
1707 if (wc
->overwrote_committed
) {
1708 writecache_wait_for_ios(wc
, WRITE
);
1712 INIT_LIST_HEAD(&skipped
);
1713 INIT_LIST_HEAD(&wbl
.list
);
1715 while (!list_empty(&wc
->lru
) &&
1716 (wc
->writeback_all
||
1717 wc
->freelist_size
+ wc
->writeback_size
<= wc
->freelist_low_watermark
||
1718 (jiffies
- container_of(wc
->lru
.prev
, struct wc_entry
, lru
)->age
>=
1719 wc
->max_age
- wc
->max_age
/ MAX_AGE_DIV
))) {
1722 if (unlikely(n_walked
> WRITEBACK_LATENCY
) &&
1723 likely(!wc
->writeback_all
) && likely(!dm_suspended(wc
->ti
))) {
1724 queue_work(wc
->writeback_wq
, &wc
->writeback_work
);
1728 if (unlikely(wc
->writeback_all
)) {
1730 writecache_flush(wc
);
1731 e
= container_of(rb_first(&wc
->tree
), struct wc_entry
, rb_node
);
1735 e
= container_of(wc
->lru
.prev
, struct wc_entry
, lru
);
1736 BUG_ON(e
->write_in_progress
);
1737 if (unlikely(!writecache_entry_is_committed(wc
, e
))) {
1738 writecache_flush(wc
);
1740 node
= rb_prev(&e
->rb_node
);
1742 f
= container_of(node
, struct wc_entry
, rb_node
);
1743 if (unlikely(read_original_sector(wc
, f
) ==
1744 read_original_sector(wc
, e
))) {
1745 BUG_ON(!f
->write_in_progress
);
1747 list_add(&e
->lru
, &skipped
);
1752 wc
->writeback_size
++;
1754 list_add(&e
->lru
, &wbl
.list
);
1756 e
->write_in_progress
= true;
1757 e
->wc_list_contiguous
= 1;
1762 next_node
= rb_next(&f
->rb_node
);
1763 if (unlikely(!next_node
))
1765 g
= container_of(next_node
, struct wc_entry
, rb_node
);
1766 if (unlikely(read_original_sector(wc
, g
) ==
1767 read_original_sector(wc
, f
))) {
1771 if (read_original_sector(wc
, g
) !=
1772 read_original_sector(wc
, f
) + (wc
->block_size
>> SECTOR_SHIFT
))
1774 if (unlikely(g
->write_in_progress
))
1776 if (unlikely(!writecache_entry_is_committed(wc
, g
)))
1779 if (!WC_MODE_PMEM(wc
)) {
1785 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1788 wc
->writeback_size
++;
1790 list_add(&g
->lru
, &wbl
.list
);
1792 g
->write_in_progress
= true;
1793 g
->wc_list_contiguous
= BIO_MAX_PAGES
;
1795 e
->wc_list_contiguous
++;
1796 if (unlikely(e
->wc_list_contiguous
== BIO_MAX_PAGES
)) {
1797 if (unlikely(wc
->writeback_all
)) {
1798 next_node
= rb_next(&f
->rb_node
);
1799 if (likely(next_node
))
1800 g
= container_of(next_node
, struct wc_entry
, rb_node
);
1808 if (!list_empty(&skipped
)) {
1809 list_splice_tail(&skipped
, &wc
->lru
);
1811 * If we didn't do any progress, we must wait until some
1812 * writeback finishes to avoid burning CPU in a loop
1814 if (unlikely(!wbl
.size
))
1815 writecache_wait_for_writeback(wc
);
1820 blk_start_plug(&plug
);
1822 if (WC_MODE_PMEM(wc
))
1823 __writecache_writeback_pmem(wc
, &wbl
);
1825 __writecache_writeback_ssd(wc
, &wbl
);
1827 blk_finish_plug(&plug
);
1829 if (unlikely(wc
->writeback_all
)) {
1831 while (writecache_wait_for_writeback(wc
));
1836 static int calculate_memory_size(uint64_t device_size
, unsigned block_size
,
1837 size_t *n_blocks_p
, size_t *n_metadata_blocks_p
)
1839 uint64_t n_blocks
, offset
;
1842 n_blocks
= device_size
;
1843 do_div(n_blocks
, block_size
+ sizeof(struct wc_memory_entry
));
1848 /* Verify the following entries[n_blocks] won't overflow */
1849 if (n_blocks
>= ((size_t)-sizeof(struct wc_memory_superblock
) /
1850 sizeof(struct wc_memory_entry
)))
1852 offset
= offsetof(struct wc_memory_superblock
, entries
[n_blocks
]);
1853 offset
= (offset
+ block_size
- 1) & ~(uint64_t)(block_size
- 1);
1854 if (offset
+ n_blocks
* block_size
<= device_size
)
1859 /* check if the bit field overflows */
1861 if (e
.index
!= n_blocks
)
1865 *n_blocks_p
= n_blocks
;
1866 if (n_metadata_blocks_p
)
1867 *n_metadata_blocks_p
= offset
>> __ffs(block_size
);
1871 static int init_memory(struct dm_writecache
*wc
)
1876 r
= calculate_memory_size(wc
->memory_map_size
, wc
->block_size
, &wc
->n_blocks
, NULL
);
1880 r
= writecache_alloc_entries(wc
);
1884 for (b
= 0; b
< ARRAY_SIZE(sb(wc
)->padding
); b
++)
1885 pmem_assign(sb(wc
)->padding
[b
], cpu_to_le64(0));
1886 pmem_assign(sb(wc
)->version
, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION
));
1887 pmem_assign(sb(wc
)->block_size
, cpu_to_le32(wc
->block_size
));
1888 pmem_assign(sb(wc
)->n_blocks
, cpu_to_le64(wc
->n_blocks
));
1889 pmem_assign(sb(wc
)->seq_count
, cpu_to_le64(0));
1891 for (b
= 0; b
< wc
->n_blocks
; b
++) {
1892 write_original_sector_seq_count(wc
, &wc
->entries
[b
], -1, -1);
1896 writecache_flush_all_metadata(wc
);
1897 writecache_commit_flushed(wc
, false);
1898 pmem_assign(sb(wc
)->magic
, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC
));
1899 writecache_flush_region(wc
, &sb(wc
)->magic
, sizeof sb(wc
)->magic
);
1900 writecache_commit_flushed(wc
, false);
1905 static void writecache_dtr(struct dm_target
*ti
)
1907 struct dm_writecache
*wc
= ti
->private;
1912 if (wc
->endio_thread
)
1913 kthread_stop(wc
->endio_thread
);
1915 if (wc
->flush_thread
)
1916 kthread_stop(wc
->flush_thread
);
1918 bioset_exit(&wc
->bio_set
);
1920 mempool_exit(&wc
->copy_pool
);
1922 if (wc
->writeback_wq
)
1923 destroy_workqueue(wc
->writeback_wq
);
1926 dm_put_device(ti
, wc
->dev
);
1929 dm_put_device(ti
, wc
->ssd_dev
);
1934 if (wc
->memory_map
) {
1935 if (WC_MODE_PMEM(wc
))
1936 persistent_memory_release(wc
);
1938 vfree(wc
->memory_map
);
1942 dm_kcopyd_client_destroy(wc
->dm_kcopyd
);
1945 dm_io_client_destroy(wc
->dm_io
);
1947 if (wc
->dirty_bitmap
)
1948 vfree(wc
->dirty_bitmap
);
1953 static int writecache_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
1955 struct dm_writecache
*wc
;
1956 struct dm_arg_set as
;
1958 unsigned opt_params
;
1959 size_t offset
, data_size
;
1962 int high_wm_percent
= HIGH_WATERMARK
;
1963 int low_wm_percent
= LOW_WATERMARK
;
1965 struct wc_memory_superblock s
;
1967 static struct dm_arg _args
[] = {
1968 {0, 10, "Invalid number of feature args"},
1974 wc
= kzalloc(sizeof(struct dm_writecache
), GFP_KERNEL
);
1976 ti
->error
= "Cannot allocate writecache structure";
1983 mutex_init(&wc
->lock
);
1984 wc
->max_age
= MAX_AGE_UNSPECIFIED
;
1985 writecache_poison_lists(wc
);
1986 init_waitqueue_head(&wc
->freelist_wait
);
1987 timer_setup(&wc
->autocommit_timer
, writecache_autocommit_timer
, 0);
1988 timer_setup(&wc
->max_age_timer
, writecache_max_age_timer
, 0);
1990 for (i
= 0; i
< 2; i
++) {
1991 atomic_set(&wc
->bio_in_progress
[i
], 0);
1992 init_waitqueue_head(&wc
->bio_in_progress_wait
[i
]);
1995 wc
->dm_io
= dm_io_client_create();
1996 if (IS_ERR(wc
->dm_io
)) {
1997 r
= PTR_ERR(wc
->dm_io
);
1998 ti
->error
= "Unable to allocate dm-io client";
2003 wc
->writeback_wq
= alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM
, 1);
2004 if (!wc
->writeback_wq
) {
2006 ti
->error
= "Could not allocate writeback workqueue";
2009 INIT_WORK(&wc
->writeback_work
, writecache_writeback
);
2010 INIT_WORK(&wc
->flush_work
, writecache_flush_work
);
2012 raw_spin_lock_init(&wc
->endio_list_lock
);
2013 INIT_LIST_HEAD(&wc
->endio_list
);
2014 wc
->endio_thread
= kthread_create(writecache_endio_thread
, wc
, "writecache_endio");
2015 if (IS_ERR(wc
->endio_thread
)) {
2016 r
= PTR_ERR(wc
->endio_thread
);
2017 wc
->endio_thread
= NULL
;
2018 ti
->error
= "Couldn't spawn endio thread";
2021 wake_up_process(wc
->endio_thread
);
2024 * Parse the mode (pmem or ssd)
2026 string
= dm_shift_arg(&as
);
2030 if (!strcasecmp(string
, "s")) {
2031 wc
->pmem_mode
= false;
2032 } else if (!strcasecmp(string
, "p")) {
2033 #ifdef DM_WRITECACHE_HAS_PMEM
2034 wc
->pmem_mode
= true;
2035 wc
->writeback_fua
= true;
2038 * If the architecture doesn't support persistent memory or
2039 * the kernel doesn't support any DAX drivers, this driver can
2040 * only be used in SSD-only mode.
2043 ti
->error
= "Persistent memory or DAX not supported on this system";
2050 if (WC_MODE_PMEM(wc
)) {
2051 r
= bioset_init(&wc
->bio_set
, BIO_POOL_SIZE
,
2052 offsetof(struct writeback_struct
, bio
),
2055 ti
->error
= "Could not allocate bio set";
2059 r
= mempool_init_kmalloc_pool(&wc
->copy_pool
, 1, sizeof(struct copy_struct
));
2061 ti
->error
= "Could not allocate mempool";
2067 * Parse the origin data device
2069 string
= dm_shift_arg(&as
);
2072 r
= dm_get_device(ti
, string
, dm_table_get_mode(ti
->table
), &wc
->dev
);
2074 ti
->error
= "Origin data device lookup failed";
2079 * Parse cache data device (be it pmem or ssd)
2081 string
= dm_shift_arg(&as
);
2085 r
= dm_get_device(ti
, string
, dm_table_get_mode(ti
->table
), &wc
->ssd_dev
);
2087 ti
->error
= "Cache data device lookup failed";
2090 wc
->memory_map_size
= i_size_read(wc
->ssd_dev
->bdev
->bd_inode
);
2093 * Parse the cache block size
2095 string
= dm_shift_arg(&as
);
2098 if (sscanf(string
, "%u%c", &wc
->block_size
, &dummy
) != 1 ||
2099 wc
->block_size
< 512 || wc
->block_size
> PAGE_SIZE
||
2100 (wc
->block_size
& (wc
->block_size
- 1))) {
2102 ti
->error
= "Invalid block size";
2105 wc
->block_size_bits
= __ffs(wc
->block_size
);
2107 wc
->max_writeback_jobs
= MAX_WRITEBACK_JOBS
;
2108 wc
->autocommit_blocks
= !WC_MODE_PMEM(wc
) ? AUTOCOMMIT_BLOCKS_SSD
: AUTOCOMMIT_BLOCKS_PMEM
;
2109 wc
->autocommit_jiffies
= msecs_to_jiffies(AUTOCOMMIT_MSEC
);
2112 * Parse optional arguments
2114 r
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
2118 while (opt_params
) {
2119 string
= dm_shift_arg(&as
), opt_params
--;
2120 if (!strcasecmp(string
, "start_sector") && opt_params
>= 1) {
2121 unsigned long long start_sector
;
2122 string
= dm_shift_arg(&as
), opt_params
--;
2123 if (sscanf(string
, "%llu%c", &start_sector
, &dummy
) != 1)
2124 goto invalid_optional
;
2125 wc
->start_sector
= start_sector
;
2126 if (wc
->start_sector
!= start_sector
||
2127 wc
->start_sector
>= wc
->memory_map_size
>> SECTOR_SHIFT
)
2128 goto invalid_optional
;
2129 } else if (!strcasecmp(string
, "high_watermark") && opt_params
>= 1) {
2130 string
= dm_shift_arg(&as
), opt_params
--;
2131 if (sscanf(string
, "%d%c", &high_wm_percent
, &dummy
) != 1)
2132 goto invalid_optional
;
2133 if (high_wm_percent
< 0 || high_wm_percent
> 100)
2134 goto invalid_optional
;
2135 wc
->high_wm_percent_set
= true;
2136 } else if (!strcasecmp(string
, "low_watermark") && opt_params
>= 1) {
2137 string
= dm_shift_arg(&as
), opt_params
--;
2138 if (sscanf(string
, "%d%c", &low_wm_percent
, &dummy
) != 1)
2139 goto invalid_optional
;
2140 if (low_wm_percent
< 0 || low_wm_percent
> 100)
2141 goto invalid_optional
;
2142 wc
->low_wm_percent_set
= true;
2143 } else if (!strcasecmp(string
, "writeback_jobs") && opt_params
>= 1) {
2144 string
= dm_shift_arg(&as
), opt_params
--;
2145 if (sscanf(string
, "%u%c", &wc
->max_writeback_jobs
, &dummy
) != 1)
2146 goto invalid_optional
;
2147 wc
->max_writeback_jobs_set
= true;
2148 } else if (!strcasecmp(string
, "autocommit_blocks") && opt_params
>= 1) {
2149 string
= dm_shift_arg(&as
), opt_params
--;
2150 if (sscanf(string
, "%u%c", &wc
->autocommit_blocks
, &dummy
) != 1)
2151 goto invalid_optional
;
2152 wc
->autocommit_blocks_set
= true;
2153 } else if (!strcasecmp(string
, "autocommit_time") && opt_params
>= 1) {
2154 unsigned autocommit_msecs
;
2155 string
= dm_shift_arg(&as
), opt_params
--;
2156 if (sscanf(string
, "%u%c", &autocommit_msecs
, &dummy
) != 1)
2157 goto invalid_optional
;
2158 if (autocommit_msecs
> 3600000)
2159 goto invalid_optional
;
2160 wc
->autocommit_jiffies
= msecs_to_jiffies(autocommit_msecs
);
2161 wc
->autocommit_time_set
= true;
2162 } else if (!strcasecmp(string
, "max_age") && opt_params
>= 1) {
2163 unsigned max_age_msecs
;
2164 string
= dm_shift_arg(&as
), opt_params
--;
2165 if (sscanf(string
, "%u%c", &max_age_msecs
, &dummy
) != 1)
2166 goto invalid_optional
;
2167 if (max_age_msecs
> 86400000)
2168 goto invalid_optional
;
2169 wc
->max_age
= msecs_to_jiffies(max_age_msecs
);
2170 } else if (!strcasecmp(string
, "cleaner")) {
2172 } else if (!strcasecmp(string
, "fua")) {
2173 if (WC_MODE_PMEM(wc
)) {
2174 wc
->writeback_fua
= true;
2175 wc
->writeback_fua_set
= true;
2176 } else goto invalid_optional
;
2177 } else if (!strcasecmp(string
, "nofua")) {
2178 if (WC_MODE_PMEM(wc
)) {
2179 wc
->writeback_fua
= false;
2180 wc
->writeback_fua_set
= true;
2181 } else goto invalid_optional
;
2185 ti
->error
= "Invalid optional argument";
2190 if (high_wm_percent
< low_wm_percent
) {
2192 ti
->error
= "High watermark must be greater than or equal to low watermark";
2196 if (WC_MODE_PMEM(wc
)) {
2197 r
= persistent_memory_claim(wc
);
2199 ti
->error
= "Unable to map persistent memory for cache";
2203 struct dm_io_region region
;
2204 struct dm_io_request req
;
2205 size_t n_blocks
, n_metadata_blocks
;
2206 uint64_t n_bitmap_bits
;
2208 wc
->memory_map_size
-= (uint64_t)wc
->start_sector
<< SECTOR_SHIFT
;
2210 bio_list_init(&wc
->flush_list
);
2211 wc
->flush_thread
= kthread_create(writecache_flush_thread
, wc
, "dm_writecache_flush");
2212 if (IS_ERR(wc
->flush_thread
)) {
2213 r
= PTR_ERR(wc
->flush_thread
);
2214 wc
->flush_thread
= NULL
;
2215 ti
->error
= "Couldn't spawn flush thread";
2218 wake_up_process(wc
->flush_thread
);
2220 r
= calculate_memory_size(wc
->memory_map_size
, wc
->block_size
,
2221 &n_blocks
, &n_metadata_blocks
);
2223 ti
->error
= "Invalid device size";
2227 n_bitmap_bits
= (((uint64_t)n_metadata_blocks
<< wc
->block_size_bits
) +
2228 BITMAP_GRANULARITY
- 1) / BITMAP_GRANULARITY
;
2229 /* this is limitation of test_bit functions */
2230 if (n_bitmap_bits
> 1U << 31) {
2232 ti
->error
= "Invalid device size";
2236 wc
->memory_map
= vmalloc(n_metadata_blocks
<< wc
->block_size_bits
);
2237 if (!wc
->memory_map
) {
2239 ti
->error
= "Unable to allocate memory for metadata";
2243 wc
->dm_kcopyd
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
2244 if (IS_ERR(wc
->dm_kcopyd
)) {
2245 r
= PTR_ERR(wc
->dm_kcopyd
);
2246 ti
->error
= "Unable to allocate dm-kcopyd client";
2247 wc
->dm_kcopyd
= NULL
;
2251 wc
->metadata_sectors
= n_metadata_blocks
<< (wc
->block_size_bits
- SECTOR_SHIFT
);
2252 wc
->dirty_bitmap_size
= (n_bitmap_bits
+ BITS_PER_LONG
- 1) /
2253 BITS_PER_LONG
* sizeof(unsigned long);
2254 wc
->dirty_bitmap
= vzalloc(wc
->dirty_bitmap_size
);
2255 if (!wc
->dirty_bitmap
) {
2257 ti
->error
= "Unable to allocate dirty bitmap";
2261 region
.bdev
= wc
->ssd_dev
->bdev
;
2262 region
.sector
= wc
->start_sector
;
2263 region
.count
= wc
->metadata_sectors
;
2264 req
.bi_op
= REQ_OP_READ
;
2265 req
.bi_op_flags
= REQ_SYNC
;
2266 req
.mem
.type
= DM_IO_VMA
;
2267 req
.mem
.ptr
.vma
= (char *)wc
->memory_map
;
2268 req
.client
= wc
->dm_io
;
2269 req
.notify
.fn
= NULL
;
2271 r
= dm_io(&req
, 1, ®ion
, NULL
);
2273 ti
->error
= "Unable to read metadata";
2278 r
= memcpy_mcsafe(&s
, sb(wc
), sizeof(struct wc_memory_superblock
));
2280 ti
->error
= "Hardware memory error when reading superblock";
2283 if (!le32_to_cpu(s
.magic
) && !le32_to_cpu(s
.version
)) {
2284 r
= init_memory(wc
);
2286 ti
->error
= "Unable to initialize device";
2289 r
= memcpy_mcsafe(&s
, sb(wc
), sizeof(struct wc_memory_superblock
));
2291 ti
->error
= "Hardware memory error when reading superblock";
2296 if (le32_to_cpu(s
.magic
) != MEMORY_SUPERBLOCK_MAGIC
) {
2297 ti
->error
= "Invalid magic in the superblock";
2302 if (le32_to_cpu(s
.version
) != MEMORY_SUPERBLOCK_VERSION
) {
2303 ti
->error
= "Invalid version in the superblock";
2308 if (le32_to_cpu(s
.block_size
) != wc
->block_size
) {
2309 ti
->error
= "Block size does not match superblock";
2314 wc
->n_blocks
= le64_to_cpu(s
.n_blocks
);
2316 offset
= wc
->n_blocks
* sizeof(struct wc_memory_entry
);
2317 if (offset
/ sizeof(struct wc_memory_entry
) != le64_to_cpu(sb(wc
)->n_blocks
)) {
2319 ti
->error
= "Overflow in size calculation";
2323 offset
+= sizeof(struct wc_memory_superblock
);
2324 if (offset
< sizeof(struct wc_memory_superblock
))
2326 offset
= (offset
+ wc
->block_size
- 1) & ~(size_t)(wc
->block_size
- 1);
2327 data_size
= wc
->n_blocks
* (size_t)wc
->block_size
;
2328 if (!offset
|| (data_size
/ wc
->block_size
!= wc
->n_blocks
) ||
2329 (offset
+ data_size
< offset
))
2331 if (offset
+ data_size
> wc
->memory_map_size
) {
2332 ti
->error
= "Memory area is too small";
2337 wc
->metadata_sectors
= offset
>> SECTOR_SHIFT
;
2338 wc
->block_start
= (char *)sb(wc
) + offset
;
2340 x
= (uint64_t)wc
->n_blocks
* (100 - high_wm_percent
);
2343 wc
->freelist_high_watermark
= x
;
2344 x
= (uint64_t)wc
->n_blocks
* (100 - low_wm_percent
);
2347 wc
->freelist_low_watermark
= x
;
2350 activate_cleaner(wc
);
2352 r
= writecache_alloc_entries(wc
);
2354 ti
->error
= "Cannot allocate memory";
2358 ti
->num_flush_bios
= 1;
2359 ti
->flush_supported
= true;
2360 ti
->num_discard_bios
= 1;
2362 if (WC_MODE_PMEM(wc
))
2363 persistent_memory_flush_cache(wc
->memory_map
, wc
->memory_map_size
);
2369 ti
->error
= "Bad arguments";
2375 static void writecache_status(struct dm_target
*ti
, status_type_t type
,
2376 unsigned status_flags
, char *result
, unsigned maxlen
)
2378 struct dm_writecache
*wc
= ti
->private;
2379 unsigned extra_args
;
2384 case STATUSTYPE_INFO
:
2385 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc
),
2386 (unsigned long long)wc
->n_blocks
, (unsigned long long)wc
->freelist_size
,
2387 (unsigned long long)wc
->writeback_size
);
2389 case STATUSTYPE_TABLE
:
2390 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc
) ? 'p' : 's',
2391 wc
->dev
->name
, wc
->ssd_dev
->name
, wc
->block_size
);
2393 if (wc
->start_sector
)
2395 if (wc
->high_wm_percent_set
&& !wc
->cleaner
)
2397 if (wc
->low_wm_percent_set
&& !wc
->cleaner
)
2399 if (wc
->max_writeback_jobs_set
)
2401 if (wc
->autocommit_blocks_set
)
2403 if (wc
->autocommit_time_set
)
2407 if (wc
->writeback_fua_set
)
2410 DMEMIT("%u", extra_args
);
2411 if (wc
->start_sector
)
2412 DMEMIT(" start_sector %llu", (unsigned long long)wc
->start_sector
);
2413 if (wc
->high_wm_percent_set
&& !wc
->cleaner
) {
2414 x
= (uint64_t)wc
->freelist_high_watermark
* 100;
2415 x
+= wc
->n_blocks
/ 2;
2416 do_div(x
, (size_t)wc
->n_blocks
);
2417 DMEMIT(" high_watermark %u", 100 - (unsigned)x
);
2419 if (wc
->low_wm_percent_set
&& !wc
->cleaner
) {
2420 x
= (uint64_t)wc
->freelist_low_watermark
* 100;
2421 x
+= wc
->n_blocks
/ 2;
2422 do_div(x
, (size_t)wc
->n_blocks
);
2423 DMEMIT(" low_watermark %u", 100 - (unsigned)x
);
2425 if (wc
->max_writeback_jobs_set
)
2426 DMEMIT(" writeback_jobs %u", wc
->max_writeback_jobs
);
2427 if (wc
->autocommit_blocks_set
)
2428 DMEMIT(" autocommit_blocks %u", wc
->autocommit_blocks
);
2429 if (wc
->autocommit_time_set
)
2430 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc
->autocommit_jiffies
));
2431 if (wc
->max_age
!= MAX_AGE_UNSPECIFIED
)
2432 DMEMIT(" max_age %u", jiffies_to_msecs(wc
->max_age
));
2435 if (wc
->writeback_fua_set
)
2436 DMEMIT(" %sfua", wc
->writeback_fua
? "" : "no");
2441 static struct target_type writecache_target
= {
2442 .name
= "writecache",
2443 .version
= {1, 3, 0},
2444 .module
= THIS_MODULE
,
2445 .ctr
= writecache_ctr
,
2446 .dtr
= writecache_dtr
,
2447 .status
= writecache_status
,
2448 .postsuspend
= writecache_suspend
,
2449 .resume
= writecache_resume
,
2450 .message
= writecache_message
,
2451 .map
= writecache_map
,
2452 .end_io
= writecache_end_io
,
2453 .iterate_devices
= writecache_iterate_devices
,
2454 .io_hints
= writecache_io_hints
,
2457 static int __init
dm_writecache_init(void)
2461 r
= dm_register_target(&writecache_target
);
2463 DMERR("register failed %d", r
);
2470 static void __exit
dm_writecache_exit(void)
2472 dm_unregister_target(&writecache_target
);
2475 module_init(dm_writecache_init
);
2476 module_exit(dm_writecache_exit
);
2478 MODULE_DESCRIPTION(DM_NAME
" writecache target");
2479 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2480 MODULE_LICENSE("GPL");