1 // SPDX-License-Identifier: GPL-2.0
3 * background writeback - scan btree for dirty data and write it to the backing
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
20 static void update_gc_after_writeback(struct cache_set
*c
)
22 if (c
->gc_after_writeback
!= (BCH_ENABLE_AUTO_GC
) ||
23 c
->gc_stats
.in_use
< BCH_AUTO_GC_DIRTY_THRESHOLD
)
26 c
->gc_after_writeback
|= BCH_DO_AUTO_GC
;
30 static uint64_t __calc_target_rate(struct cached_dev
*dc
)
32 struct cache_set
*c
= dc
->disk
.c
;
35 * This is the size of the cache, minus the amount used for
38 uint64_t cache_sectors
= c
->nbuckets
* c
->cache
->sb
.bucket_size
-
39 atomic_long_read(&c
->flash_dev_dirty_sectors
);
42 * Unfortunately there is no control of global dirty data. If the
43 * user states that they want 10% dirty data in the cache, and has,
44 * e.g., 5 backing volumes of equal size, we try and ensure each
45 * backing volume uses about 2% of the cache for dirty data.
48 div64_u64(bdev_sectors(dc
->bdev
) << WRITEBACK_SHARE_SHIFT
,
49 c
->cached_dev_sectors
);
51 uint64_t cache_dirty_target
=
52 div_u64(cache_sectors
* dc
->writeback_percent
, 100);
54 /* Ensure each backing dev gets at least one dirty share */
58 return (cache_dirty_target
* bdev_share
) >> WRITEBACK_SHARE_SHIFT
;
61 static void __update_writeback_rate(struct cached_dev
*dc
)
65 * Figures out the amount that should be written per second.
67 * First, the error (number of sectors that are dirty beyond our
68 * target) is calculated. The error is accumulated (numerically
71 * Then, the proportional value and integral value are scaled
72 * based on configured values. These are stored as inverses to
73 * avoid fixed point math and to make configuration easy-- e.g.
74 * the default value of 40 for writeback_rate_p_term_inverse
75 * attempts to write at a rate that would retire all the dirty
76 * blocks in 40 seconds.
78 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79 * of the error is accumulated in the integral term per second.
80 * This acts as a slow, long-term average that is not subject to
81 * variations in usage like the p term.
83 int64_t target
= __calc_target_rate(dc
);
84 int64_t dirty
= bcache_dev_sectors_dirty(&dc
->disk
);
85 int64_t error
= dirty
- target
;
86 int64_t proportional_scaled
=
87 div_s64(error
, dc
->writeback_rate_p_term_inverse
);
88 int64_t integral_scaled
;
91 if ((error
< 0 && dc
->writeback_rate_integral
> 0) ||
92 (error
> 0 && time_before64(local_clock(),
93 dc
->writeback_rate
.next
+ NSEC_PER_MSEC
))) {
95 * Only decrease the integral term if it's more than
96 * zero. Only increase the integral term if the device
97 * is keeping up. (Don't wind up the integral
98 * ineffectively in either case).
100 * It's necessary to scale this by
101 * writeback_rate_update_seconds to keep the integral
102 * term dimensioned properly.
104 dc
->writeback_rate_integral
+= error
*
105 dc
->writeback_rate_update_seconds
;
108 integral_scaled
= div_s64(dc
->writeback_rate_integral
,
109 dc
->writeback_rate_i_term_inverse
);
111 new_rate
= clamp_t(int32_t, (proportional_scaled
+ integral_scaled
),
112 dc
->writeback_rate_minimum
, NSEC_PER_SEC
);
114 dc
->writeback_rate_proportional
= proportional_scaled
;
115 dc
->writeback_rate_integral_scaled
= integral_scaled
;
116 dc
->writeback_rate_change
= new_rate
-
117 atomic_long_read(&dc
->writeback_rate
.rate
);
118 atomic_long_set(&dc
->writeback_rate
.rate
, new_rate
);
119 dc
->writeback_rate_target
= target
;
122 static bool set_at_max_writeback_rate(struct cache_set
*c
,
123 struct cached_dev
*dc
)
125 /* Don't sst max writeback rate if it is disabled */
126 if (!c
->idle_max_writeback_rate_enabled
)
129 /* Don't set max writeback rate if gc is running */
130 if (!c
->gc_mark_valid
)
133 * Idle_counter is increased everytime when update_writeback_rate() is
134 * called. If all backing devices attached to the same cache set have
135 * identical dc->writeback_rate_update_seconds values, it is about 6
136 * rounds of update_writeback_rate() on each backing device before
137 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
138 * to each dc->writeback_rate.rate.
139 * In order to avoid extra locking cost for counting exact dirty cached
140 * devices number, c->attached_dev_nr is used to calculate the idle
141 * throushold. It might be bigger if not all cached device are in write-
142 * back mode, but it still works well with limited extra rounds of
143 * update_writeback_rate().
145 if (atomic_inc_return(&c
->idle_counter
) <
146 atomic_read(&c
->attached_dev_nr
) * 6)
149 if (atomic_read(&c
->at_max_writeback_rate
) != 1)
150 atomic_set(&c
->at_max_writeback_rate
, 1);
152 atomic_long_set(&dc
->writeback_rate
.rate
, INT_MAX
);
154 /* keep writeback_rate_target as existing value */
155 dc
->writeback_rate_proportional
= 0;
156 dc
->writeback_rate_integral_scaled
= 0;
157 dc
->writeback_rate_change
= 0;
160 * Check c->idle_counter and c->at_max_writeback_rate agagain in case
161 * new I/O arrives during before set_at_max_writeback_rate() returns.
162 * Then the writeback rate is set to 1, and its new value should be
163 * decided via __update_writeback_rate().
165 if ((atomic_read(&c
->idle_counter
) <
166 atomic_read(&c
->attached_dev_nr
) * 6) ||
167 !atomic_read(&c
->at_max_writeback_rate
))
173 static void update_writeback_rate(struct work_struct
*work
)
175 struct cached_dev
*dc
= container_of(to_delayed_work(work
),
177 writeback_rate_update
);
178 struct cache_set
*c
= dc
->disk
.c
;
181 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
182 * cancel_delayed_work_sync().
184 set_bit(BCACHE_DEV_RATE_DW_RUNNING
, &dc
->disk
.flags
);
185 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
186 smp_mb__after_atomic();
189 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
192 if (!test_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
) ||
193 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)) {
194 clear_bit(BCACHE_DEV_RATE_DW_RUNNING
, &dc
->disk
.flags
);
195 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
196 smp_mb__after_atomic();
200 if (atomic_read(&dc
->has_dirty
) && dc
->writeback_percent
) {
202 * If the whole cache set is idle, set_at_max_writeback_rate()
203 * will set writeback rate to a max number. Then it is
204 * unncessary to update writeback rate for an idle cache set
205 * in maximum writeback rate number(s).
207 if (!set_at_max_writeback_rate(c
, dc
)) {
208 down_read(&dc
->writeback_lock
);
209 __update_writeback_rate(dc
);
210 update_gc_after_writeback(c
);
211 up_read(&dc
->writeback_lock
);
217 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
220 if (test_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
) &&
221 !test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)) {
222 schedule_delayed_work(&dc
->writeback_rate_update
,
223 dc
->writeback_rate_update_seconds
* HZ
);
227 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
228 * cancel_delayed_work_sync().
230 clear_bit(BCACHE_DEV_RATE_DW_RUNNING
, &dc
->disk
.flags
);
231 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
232 smp_mb__after_atomic();
235 static unsigned int writeback_delay(struct cached_dev
*dc
,
236 unsigned int sectors
)
238 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) ||
239 !dc
->writeback_percent
)
242 return bch_next_delay(&dc
->writeback_rate
, sectors
);
247 struct cached_dev
*dc
;
252 static void dirty_init(struct keybuf_key
*w
)
254 struct dirty_io
*io
= w
->private;
255 struct bio
*bio
= &io
->bio
;
257 bio_init(bio
, bio
->bi_inline_vecs
,
258 DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
));
259 if (!io
->dc
->writeback_percent
)
260 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
262 bio
->bi_iter
.bi_size
= KEY_SIZE(&w
->key
) << 9;
264 bch_bio_map(bio
, NULL
);
267 static void dirty_io_destructor(struct closure
*cl
)
269 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
274 static void write_dirty_finish(struct closure
*cl
)
276 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
277 struct keybuf_key
*w
= io
->bio
.bi_private
;
278 struct cached_dev
*dc
= io
->dc
;
280 bio_free_pages(&io
->bio
);
282 /* This is kind of a dumb way of signalling errors. */
283 if (KEY_DIRTY(&w
->key
)) {
288 bch_keylist_init(&keys
);
290 bkey_copy(keys
.top
, &w
->key
);
291 SET_KEY_DIRTY(keys
.top
, false);
292 bch_keylist_push(&keys
);
294 for (i
= 0; i
< KEY_PTRS(&w
->key
); i
++)
295 atomic_inc(&PTR_BUCKET(dc
->disk
.c
, &w
->key
, i
)->pin
);
297 ret
= bch_btree_insert(dc
->disk
.c
, &keys
, NULL
, &w
->key
);
300 trace_bcache_writeback_collision(&w
->key
);
303 ? &dc
->disk
.c
->writeback_keys_failed
304 : &dc
->disk
.c
->writeback_keys_done
);
307 bch_keybuf_del(&dc
->writeback_keys
, w
);
310 closure_return_with_destructor(cl
, dirty_io_destructor
);
313 static void dirty_endio(struct bio
*bio
)
315 struct keybuf_key
*w
= bio
->bi_private
;
316 struct dirty_io
*io
= w
->private;
318 if (bio
->bi_status
) {
319 SET_KEY_DIRTY(&w
->key
, false);
320 bch_count_backing_io_errors(io
->dc
, bio
);
323 closure_put(&io
->cl
);
326 static void write_dirty(struct closure
*cl
)
328 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
329 struct keybuf_key
*w
= io
->bio
.bi_private
;
330 struct cached_dev
*dc
= io
->dc
;
332 uint16_t next_sequence
;
334 if (atomic_read(&dc
->writeback_sequence_next
) != io
->sequence
) {
335 /* Not our turn to write; wait for a write to complete */
336 closure_wait(&dc
->writeback_ordering_wait
, cl
);
338 if (atomic_read(&dc
->writeback_sequence_next
) == io
->sequence
) {
340 * Edge case-- it happened in indeterminate order
341 * relative to when we were added to wait list..
343 closure_wake_up(&dc
->writeback_ordering_wait
);
346 continue_at(cl
, write_dirty
, io
->dc
->writeback_write_wq
);
350 next_sequence
= io
->sequence
+ 1;
353 * IO errors are signalled using the dirty bit on the key.
354 * If we failed to read, we should not attempt to write to the
355 * backing device. Instead, immediately go to write_dirty_finish
358 if (KEY_DIRTY(&w
->key
)) {
360 bio_set_op_attrs(&io
->bio
, REQ_OP_WRITE
, 0);
361 io
->bio
.bi_iter
.bi_sector
= KEY_START(&w
->key
);
362 bio_set_dev(&io
->bio
, io
->dc
->bdev
);
363 io
->bio
.bi_end_io
= dirty_endio
;
365 /* I/O request sent to backing device */
366 closure_bio_submit(io
->dc
->disk
.c
, &io
->bio
, cl
);
369 atomic_set(&dc
->writeback_sequence_next
, next_sequence
);
370 closure_wake_up(&dc
->writeback_ordering_wait
);
372 continue_at(cl
, write_dirty_finish
, io
->dc
->writeback_write_wq
);
375 static void read_dirty_endio(struct bio
*bio
)
377 struct keybuf_key
*w
= bio
->bi_private
;
378 struct dirty_io
*io
= w
->private;
381 bch_count_io_errors(PTR_CACHE(io
->dc
->disk
.c
, &w
->key
, 0),
383 "reading dirty data from cache");
388 static void read_dirty_submit(struct closure
*cl
)
390 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
392 closure_bio_submit(io
->dc
->disk
.c
, &io
->bio
, cl
);
394 continue_at(cl
, write_dirty
, io
->dc
->writeback_write_wq
);
397 static void read_dirty(struct cached_dev
*dc
)
399 unsigned int delay
= 0;
400 struct keybuf_key
*next
, *keys
[MAX_WRITEBACKS_IN_PASS
], *w
;
405 uint16_t sequence
= 0;
407 BUG_ON(!llist_empty(&dc
->writeback_ordering_wait
.list
));
408 atomic_set(&dc
->writeback_sequence_next
, sequence
);
409 closure_init_stack(&cl
);
412 * XXX: if we error, background writeback just spins. Should use some
416 next
= bch_keybuf_next(&dc
->writeback_keys
);
418 while (!kthread_should_stop() &&
419 !test_bit(CACHE_SET_IO_DISABLE
, &dc
->disk
.c
->flags
) &&
425 BUG_ON(ptr_stale(dc
->disk
.c
, &next
->key
, 0));
428 * Don't combine too many operations, even if they
431 if (nk
>= MAX_WRITEBACKS_IN_PASS
)
435 * If the current operation is very large, don't
436 * further combine operations.
438 if (size
>= MAX_WRITESIZE_IN_PASS
)
442 * Operations are only eligible to be combined
443 * if they are contiguous.
445 * TODO: add a heuristic willing to fire a
446 * certain amount of non-contiguous IO per pass,
447 * so that we can benefit from backing device
450 if ((nk
!= 0) && bkey_cmp(&keys
[nk
-1]->key
,
451 &START_KEY(&next
->key
)))
454 size
+= KEY_SIZE(&next
->key
);
456 } while ((next
= bch_keybuf_next(&dc
->writeback_keys
)));
458 /* Now we have gathered a set of 1..5 keys to write back. */
459 for (i
= 0; i
< nk
; i
++) {
462 io
= kzalloc(struct_size(io
, bio
.bi_inline_vecs
,
463 DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
)),
470 io
->sequence
= sequence
++;
473 bio_set_op_attrs(&io
->bio
, REQ_OP_READ
, 0);
474 io
->bio
.bi_iter
.bi_sector
= PTR_OFFSET(&w
->key
, 0);
475 bio_set_dev(&io
->bio
,
476 PTR_CACHE(dc
->disk
.c
, &w
->key
, 0)->bdev
);
477 io
->bio
.bi_end_io
= read_dirty_endio
;
479 if (bch_bio_alloc_pages(&io
->bio
, GFP_KERNEL
))
482 trace_bcache_writeback(&w
->key
);
484 down(&dc
->in_flight
);
487 * We've acquired a semaphore for the maximum
488 * simultaneous number of writebacks; from here
489 * everything happens asynchronously.
491 closure_call(&io
->cl
, read_dirty_submit
, NULL
, &cl
);
494 delay
= writeback_delay(dc
, size
);
496 while (!kthread_should_stop() &&
497 !test_bit(CACHE_SET_IO_DISABLE
, &dc
->disk
.c
->flags
) &&
499 schedule_timeout_interruptible(delay
);
500 delay
= writeback_delay(dc
, 0);
508 bch_keybuf_del(&dc
->writeback_keys
, w
);
512 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
513 * freed) before refilling again
518 /* Scan for dirty data */
520 void bcache_dev_sectors_dirty_add(struct cache_set
*c
, unsigned int inode
,
521 uint64_t offset
, int nr_sectors
)
523 struct bcache_device
*d
= c
->devices
[inode
];
524 unsigned int stripe_offset
, sectors_dirty
;
530 stripe
= offset_to_stripe(d
, offset
);
534 if (UUID_FLASH_ONLY(&c
->uuids
[inode
]))
535 atomic_long_add(nr_sectors
, &c
->flash_dev_dirty_sectors
);
537 stripe_offset
= offset
& (d
->stripe_size
- 1);
540 int s
= min_t(unsigned int, abs(nr_sectors
),
541 d
->stripe_size
- stripe_offset
);
546 if (stripe
>= d
->nr_stripes
)
549 sectors_dirty
= atomic_add_return(s
,
550 d
->stripe_sectors_dirty
+ stripe
);
551 if (sectors_dirty
== d
->stripe_size
)
552 set_bit(stripe
, d
->full_dirty_stripes
);
554 clear_bit(stripe
, d
->full_dirty_stripes
);
562 static bool dirty_pred(struct keybuf
*buf
, struct bkey
*k
)
564 struct cached_dev
*dc
= container_of(buf
,
568 BUG_ON(KEY_INODE(k
) != dc
->disk
.id
);
573 static void refill_full_stripes(struct cached_dev
*dc
)
575 struct keybuf
*buf
= &dc
->writeback_keys
;
576 unsigned int start_stripe
, next_stripe
;
578 bool wrapped
= false;
580 stripe
= offset_to_stripe(&dc
->disk
, KEY_OFFSET(&buf
->last_scanned
));
584 start_stripe
= stripe
;
587 stripe
= find_next_bit(dc
->disk
.full_dirty_stripes
,
588 dc
->disk
.nr_stripes
, stripe
);
590 if (stripe
== dc
->disk
.nr_stripes
)
593 next_stripe
= find_next_zero_bit(dc
->disk
.full_dirty_stripes
,
594 dc
->disk
.nr_stripes
, stripe
);
596 buf
->last_scanned
= KEY(dc
->disk
.id
,
597 stripe
* dc
->disk
.stripe_size
, 0);
599 bch_refill_keybuf(dc
->disk
.c
, buf
,
601 next_stripe
* dc
->disk
.stripe_size
, 0),
604 if (array_freelist_empty(&buf
->freelist
))
607 stripe
= next_stripe
;
609 if (wrapped
&& stripe
> start_stripe
)
612 if (stripe
== dc
->disk
.nr_stripes
) {
620 * Returns true if we scanned the entire disk
622 static bool refill_dirty(struct cached_dev
*dc
)
624 struct keybuf
*buf
= &dc
->writeback_keys
;
625 struct bkey start
= KEY(dc
->disk
.id
, 0, 0);
626 struct bkey end
= KEY(dc
->disk
.id
, MAX_KEY_OFFSET
, 0);
627 struct bkey start_pos
;
630 * make sure keybuf pos is inside the range for this disk - at bringup
631 * we might not be attached yet so this disk's inode nr isn't
634 if (bkey_cmp(&buf
->last_scanned
, &start
) < 0 ||
635 bkey_cmp(&buf
->last_scanned
, &end
) > 0)
636 buf
->last_scanned
= start
;
638 if (dc
->partial_stripes_expensive
) {
639 refill_full_stripes(dc
);
640 if (array_freelist_empty(&buf
->freelist
))
644 start_pos
= buf
->last_scanned
;
645 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
, dirty_pred
);
647 if (bkey_cmp(&buf
->last_scanned
, &end
) < 0)
651 * If we get to the end start scanning again from the beginning, and
652 * only scan up to where we initially started scanning from:
654 buf
->last_scanned
= start
;
655 bch_refill_keybuf(dc
->disk
.c
, buf
, &start_pos
, dirty_pred
);
657 return bkey_cmp(&buf
->last_scanned
, &start_pos
) >= 0;
660 static int bch_writeback_thread(void *arg
)
662 struct cached_dev
*dc
= arg
;
663 struct cache_set
*c
= dc
->disk
.c
;
664 bool searched_full_index
;
666 bch_ratelimit_reset(&dc
->writeback_rate
);
668 while (!kthread_should_stop() &&
669 !test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)) {
670 down_write(&dc
->writeback_lock
);
671 set_current_state(TASK_INTERRUPTIBLE
);
673 * If the bache device is detaching, skip here and continue
674 * to perform writeback. Otherwise, if no dirty data on cache,
675 * or there is dirty data on cache but writeback is disabled,
676 * the writeback thread should sleep here and wait for others
679 if (!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) &&
680 (!atomic_read(&dc
->has_dirty
) || !dc
->writeback_running
)) {
681 up_write(&dc
->writeback_lock
);
683 if (kthread_should_stop() ||
684 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)) {
685 set_current_state(TASK_RUNNING
);
692 set_current_state(TASK_RUNNING
);
694 searched_full_index
= refill_dirty(dc
);
696 if (searched_full_index
&&
697 RB_EMPTY_ROOT(&dc
->writeback_keys
.keys
)) {
698 atomic_set(&dc
->has_dirty
, 0);
699 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
700 bch_write_bdev_super(dc
, NULL
);
702 * If bcache device is detaching via sysfs interface,
703 * writeback thread should stop after there is no dirty
704 * data on cache. BCACHE_DEV_DETACHING flag is set in
705 * bch_cached_dev_detach().
707 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
)) {
710 closure_init_stack(&cl
);
711 memset(&dc
->sb
.set_uuid
, 0, 16);
712 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_NONE
);
714 bch_write_bdev_super(dc
, &cl
);
717 up_write(&dc
->writeback_lock
);
722 * When dirty data rate is high (e.g. 50%+), there might
723 * be heavy buckets fragmentation after writeback
724 * finished, which hurts following write performance.
725 * If users really care about write performance they
726 * may set BCH_ENABLE_AUTO_GC via sysfs, then when
727 * BCH_DO_AUTO_GC is set, garbage collection thread
728 * will be wake up here. After moving gc, the shrunk
729 * btree and discarded free buckets SSD space may be
730 * helpful for following write requests.
732 if (c
->gc_after_writeback
==
733 (BCH_ENABLE_AUTO_GC
|BCH_DO_AUTO_GC
)) {
734 c
->gc_after_writeback
&= ~BCH_DO_AUTO_GC
;
739 up_write(&dc
->writeback_lock
);
743 if (searched_full_index
) {
744 unsigned int delay
= dc
->writeback_delay
* HZ
;
747 !kthread_should_stop() &&
748 !test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
) &&
749 !test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
750 delay
= schedule_timeout_interruptible(delay
);
752 bch_ratelimit_reset(&dc
->writeback_rate
);
756 if (dc
->writeback_write_wq
) {
757 flush_workqueue(dc
->writeback_write_wq
);
758 destroy_workqueue(dc
->writeback_write_wq
);
761 wait_for_kthread_stop();
767 #define INIT_KEYS_EACH_TIME 500000
768 #define INIT_KEYS_SLEEP_MS 100
770 struct sectors_dirty_init
{
777 static int sectors_dirty_init_fn(struct btree_op
*_op
, struct btree
*b
,
780 struct sectors_dirty_init
*op
= container_of(_op
,
781 struct sectors_dirty_init
, op
);
782 if (KEY_INODE(k
) > op
->inode
)
786 bcache_dev_sectors_dirty_add(b
->c
, KEY_INODE(k
),
787 KEY_START(k
), KEY_SIZE(k
));
790 if (atomic_read(&b
->c
->search_inflight
) &&
791 !(op
->count
% INIT_KEYS_EACH_TIME
)) {
792 bkey_copy_key(&op
->start
, k
);
799 static int bch_root_node_dirty_init(struct cache_set
*c
,
800 struct bcache_device
*d
,
803 struct sectors_dirty_init op
;
806 bch_btree_op_init(&op
.op
, -1);
809 op
.start
= KEY(op
.inode
, 0, 0);
812 ret
= bcache_btree(map_keys_recurse
,
817 sectors_dirty_init_fn
,
820 schedule_timeout_interruptible(
821 msecs_to_jiffies(INIT_KEYS_SLEEP_MS
));
823 pr_warn("sectors dirty init failed, ret=%d!\n", ret
);
826 } while (ret
== -EAGAIN
);
831 static int bch_dirty_init_thread(void *arg
)
833 struct dirty_init_thrd_info
*info
= arg
;
834 struct bch_dirty_init_state
*state
= info
->state
;
835 struct cache_set
*c
= state
->c
;
836 struct btree_iter iter
;
838 int cur_idx
, prev_idx
, skip_nr
;
841 cur_idx
= prev_idx
= 0;
843 bch_btree_iter_init(&c
->root
->keys
, &iter
, NULL
);
844 k
= bch_btree_iter_next_filter(&iter
, &c
->root
->keys
, bch_ptr_bad
);
850 spin_lock(&state
->idx_lock
);
851 cur_idx
= state
->key_idx
;
853 spin_unlock(&state
->idx_lock
);
855 skip_nr
= cur_idx
- prev_idx
;
858 k
= bch_btree_iter_next_filter(&iter
,
864 atomic_set(&state
->enough
, 1);
865 /* Update state->enough earlier */
866 smp_mb__after_atomic();
874 if (bch_root_node_dirty_init(c
, state
->d
, p
) < 0)
884 /* In order to wake up state->wait in time */
885 smp_mb__before_atomic();
886 if (atomic_dec_and_test(&state
->started
))
887 wake_up(&state
->wait
);
892 static int bch_btre_dirty_init_thread_nr(void)
894 int n
= num_online_cpus()/2;
898 else if (n
> BCH_DIRTY_INIT_THRD_MAX
)
899 n
= BCH_DIRTY_INIT_THRD_MAX
;
904 void bch_sectors_dirty_init(struct bcache_device
*d
)
907 struct bkey
*k
= NULL
;
908 struct btree_iter iter
;
909 struct sectors_dirty_init op
;
910 struct cache_set
*c
= d
->c
;
911 struct bch_dirty_init_state
*state
;
914 /* Just count root keys if no leaf node */
915 if (c
->root
->level
== 0) {
916 bch_btree_op_init(&op
.op
, -1);
919 op
.start
= KEY(op
.inode
, 0, 0);
921 for_each_key_filter(&c
->root
->keys
,
922 k
, &iter
, bch_ptr_invalid
)
923 sectors_dirty_init_fn(&op
.op
, c
->root
, k
);
927 state
= kzalloc(sizeof(struct bch_dirty_init_state
), GFP_KERNEL
);
929 pr_warn("sectors dirty init failed: cannot allocate memory\n");
935 state
->total_threads
= bch_btre_dirty_init_thread_nr();
937 spin_lock_init(&state
->idx_lock
);
938 atomic_set(&state
->started
, 0);
939 atomic_set(&state
->enough
, 0);
940 init_waitqueue_head(&state
->wait
);
942 for (i
= 0; i
< state
->total_threads
; i
++) {
943 /* Fetch latest state->enough earlier */
944 smp_mb__before_atomic();
945 if (atomic_read(&state
->enough
))
948 state
->infos
[i
].state
= state
;
949 atomic_inc(&state
->started
);
950 snprintf(name
, sizeof(name
), "bch_dirty_init[%d]", i
);
952 state
->infos
[i
].thread
=
953 kthread_run(bch_dirty_init_thread
,
956 if (IS_ERR(state
->infos
[i
].thread
)) {
957 pr_err("fails to run thread bch_dirty_init[%d]\n", i
);
958 for (--i
; i
>= 0; i
--)
959 kthread_stop(state
->infos
[i
].thread
);
964 wait_event_interruptible(state
->wait
,
965 atomic_read(&state
->started
) == 0 ||
966 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
972 void bch_cached_dev_writeback_init(struct cached_dev
*dc
)
974 sema_init(&dc
->in_flight
, 64);
975 init_rwsem(&dc
->writeback_lock
);
976 bch_keybuf_init(&dc
->writeback_keys
);
978 dc
->writeback_metadata
= true;
979 dc
->writeback_running
= false;
980 dc
->writeback_percent
= 10;
981 dc
->writeback_delay
= 30;
982 atomic_long_set(&dc
->writeback_rate
.rate
, 1024);
983 dc
->writeback_rate_minimum
= 8;
985 dc
->writeback_rate_update_seconds
= WRITEBACK_RATE_UPDATE_SECS_DEFAULT
;
986 dc
->writeback_rate_p_term_inverse
= 40;
987 dc
->writeback_rate_i_term_inverse
= 10000;
989 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
));
990 INIT_DELAYED_WORK(&dc
->writeback_rate_update
, update_writeback_rate
);
993 int bch_cached_dev_writeback_start(struct cached_dev
*dc
)
995 dc
->writeback_write_wq
= alloc_workqueue("bcache_writeback_wq",
997 if (!dc
->writeback_write_wq
)
1001 dc
->writeback_thread
= kthread_create(bch_writeback_thread
, dc
,
1002 "bcache_writeback");
1003 if (IS_ERR(dc
->writeback_thread
)) {
1005 destroy_workqueue(dc
->writeback_write_wq
);
1006 return PTR_ERR(dc
->writeback_thread
);
1008 dc
->writeback_running
= true;
1010 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
));
1011 schedule_delayed_work(&dc
->writeback_rate_update
,
1012 dc
->writeback_rate_update_seconds
* HZ
);
1014 bch_writeback_queue(dc
);