1 // SPDX-License-Identifier: GPL-2.0
3 * background writeback - scan btree for dirty data and write it to the backing
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
21 static uint64_t __calc_target_rate(struct cached_dev
*dc
)
23 struct cache_set
*c
= dc
->disk
.c
;
26 * This is the size of the cache, minus the amount used for
29 uint64_t cache_sectors
= c
->nbuckets
* c
->sb
.bucket_size
-
30 bcache_flash_devs_sectors_dirty(c
);
33 * Unfortunately there is no control of global dirty data. If the
34 * user states that they want 10% dirty data in the cache, and has,
35 * e.g., 5 backing volumes of equal size, we try and ensure each
36 * backing volume uses about 2% of the cache for dirty data.
39 div64_u64(bdev_sectors(dc
->bdev
) << WRITEBACK_SHARE_SHIFT
,
40 c
->cached_dev_sectors
);
42 uint64_t cache_dirty_target
=
43 div_u64(cache_sectors
* dc
->writeback_percent
, 100);
45 /* Ensure each backing dev gets at least one dirty share */
49 return (cache_dirty_target
* bdev_share
) >> WRITEBACK_SHARE_SHIFT
;
52 static void __update_writeback_rate(struct cached_dev
*dc
)
56 * Figures out the amount that should be written per second.
58 * First, the error (number of sectors that are dirty beyond our
59 * target) is calculated. The error is accumulated (numerically
62 * Then, the proportional value and integral value are scaled
63 * based on configured values. These are stored as inverses to
64 * avoid fixed point math and to make configuration easy-- e.g.
65 * the default value of 40 for writeback_rate_p_term_inverse
66 * attempts to write at a rate that would retire all the dirty
67 * blocks in 40 seconds.
69 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
70 * of the error is accumulated in the integral term per second.
71 * This acts as a slow, long-term average that is not subject to
72 * variations in usage like the p term.
74 int64_t target
= __calc_target_rate(dc
);
75 int64_t dirty
= bcache_dev_sectors_dirty(&dc
->disk
);
76 int64_t error
= dirty
- target
;
77 int64_t proportional_scaled
=
78 div_s64(error
, dc
->writeback_rate_p_term_inverse
);
79 int64_t integral_scaled
;
82 if ((error
< 0 && dc
->writeback_rate_integral
> 0) ||
83 (error
> 0 && time_before64(local_clock(),
84 dc
->writeback_rate
.next
+ NSEC_PER_MSEC
))) {
86 * Only decrease the integral term if it's more than
87 * zero. Only increase the integral term if the device
88 * is keeping up. (Don't wind up the integral
89 * ineffectively in either case).
91 * It's necessary to scale this by
92 * writeback_rate_update_seconds to keep the integral
93 * term dimensioned properly.
95 dc
->writeback_rate_integral
+= error
*
96 dc
->writeback_rate_update_seconds
;
99 integral_scaled
= div_s64(dc
->writeback_rate_integral
,
100 dc
->writeback_rate_i_term_inverse
);
102 new_rate
= clamp_t(int32_t, (proportional_scaled
+ integral_scaled
),
103 dc
->writeback_rate_minimum
, NSEC_PER_SEC
);
105 dc
->writeback_rate_proportional
= proportional_scaled
;
106 dc
->writeback_rate_integral_scaled
= integral_scaled
;
107 dc
->writeback_rate_change
= new_rate
- dc
->writeback_rate
.rate
;
108 dc
->writeback_rate
.rate
= new_rate
;
109 dc
->writeback_rate_target
= target
;
112 static void update_writeback_rate(struct work_struct
*work
)
114 struct cached_dev
*dc
= container_of(to_delayed_work(work
),
116 writeback_rate_update
);
118 down_read(&dc
->writeback_lock
);
120 if (atomic_read(&dc
->has_dirty
) &&
121 dc
->writeback_percent
)
122 __update_writeback_rate(dc
);
124 up_read(&dc
->writeback_lock
);
126 schedule_delayed_work(&dc
->writeback_rate_update
,
127 dc
->writeback_rate_update_seconds
* HZ
);
130 static unsigned writeback_delay(struct cached_dev
*dc
, unsigned sectors
)
132 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) ||
133 !dc
->writeback_percent
)
136 return bch_next_delay(&dc
->writeback_rate
, sectors
);
141 struct cached_dev
*dc
;
146 static void dirty_init(struct keybuf_key
*w
)
148 struct dirty_io
*io
= w
->private;
149 struct bio
*bio
= &io
->bio
;
151 bio_init(bio
, bio
->bi_inline_vecs
,
152 DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
));
153 if (!io
->dc
->writeback_percent
)
154 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
156 bio
->bi_iter
.bi_size
= KEY_SIZE(&w
->key
) << 9;
158 bch_bio_map(bio
, NULL
);
161 static void dirty_io_destructor(struct closure
*cl
)
163 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
167 static void write_dirty_finish(struct closure
*cl
)
169 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
170 struct keybuf_key
*w
= io
->bio
.bi_private
;
171 struct cached_dev
*dc
= io
->dc
;
173 bio_free_pages(&io
->bio
);
175 /* This is kind of a dumb way of signalling errors. */
176 if (KEY_DIRTY(&w
->key
)) {
181 bch_keylist_init(&keys
);
183 bkey_copy(keys
.top
, &w
->key
);
184 SET_KEY_DIRTY(keys
.top
, false);
185 bch_keylist_push(&keys
);
187 for (i
= 0; i
< KEY_PTRS(&w
->key
); i
++)
188 atomic_inc(&PTR_BUCKET(dc
->disk
.c
, &w
->key
, i
)->pin
);
190 ret
= bch_btree_insert(dc
->disk
.c
, &keys
, NULL
, &w
->key
);
193 trace_bcache_writeback_collision(&w
->key
);
196 ? &dc
->disk
.c
->writeback_keys_failed
197 : &dc
->disk
.c
->writeback_keys_done
);
200 bch_keybuf_del(&dc
->writeback_keys
, w
);
203 closure_return_with_destructor(cl
, dirty_io_destructor
);
206 static void dirty_endio(struct bio
*bio
)
208 struct keybuf_key
*w
= bio
->bi_private
;
209 struct dirty_io
*io
= w
->private;
212 SET_KEY_DIRTY(&w
->key
, false);
214 closure_put(&io
->cl
);
217 static void write_dirty(struct closure
*cl
)
219 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
220 struct keybuf_key
*w
= io
->bio
.bi_private
;
221 struct cached_dev
*dc
= io
->dc
;
223 uint16_t next_sequence
;
225 if (atomic_read(&dc
->writeback_sequence_next
) != io
->sequence
) {
226 /* Not our turn to write; wait for a write to complete */
227 closure_wait(&dc
->writeback_ordering_wait
, cl
);
229 if (atomic_read(&dc
->writeback_sequence_next
) == io
->sequence
) {
231 * Edge case-- it happened in indeterminate order
232 * relative to when we were added to wait list..
234 closure_wake_up(&dc
->writeback_ordering_wait
);
237 continue_at(cl
, write_dirty
, io
->dc
->writeback_write_wq
);
241 next_sequence
= io
->sequence
+ 1;
244 * IO errors are signalled using the dirty bit on the key.
245 * If we failed to read, we should not attempt to write to the
246 * backing device. Instead, immediately go to write_dirty_finish
249 if (KEY_DIRTY(&w
->key
)) {
251 bio_set_op_attrs(&io
->bio
, REQ_OP_WRITE
, 0);
252 io
->bio
.bi_iter
.bi_sector
= KEY_START(&w
->key
);
253 bio_set_dev(&io
->bio
, io
->dc
->bdev
);
254 io
->bio
.bi_end_io
= dirty_endio
;
256 closure_bio_submit(&io
->bio
, cl
);
259 atomic_set(&dc
->writeback_sequence_next
, next_sequence
);
260 closure_wake_up(&dc
->writeback_ordering_wait
);
262 continue_at(cl
, write_dirty_finish
, io
->dc
->writeback_write_wq
);
265 static void read_dirty_endio(struct bio
*bio
)
267 struct keybuf_key
*w
= bio
->bi_private
;
268 struct dirty_io
*io
= w
->private;
271 bch_count_io_errors(PTR_CACHE(io
->dc
->disk
.c
, &w
->key
, 0),
273 "reading dirty data from cache");
278 static void read_dirty_submit(struct closure
*cl
)
280 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
282 closure_bio_submit(&io
->bio
, cl
);
284 continue_at(cl
, write_dirty
, io
->dc
->writeback_write_wq
);
287 static void read_dirty(struct cached_dev
*dc
)
290 struct keybuf_key
*next
, *keys
[MAX_WRITEBACKS_IN_PASS
], *w
;
295 uint16_t sequence
= 0;
297 BUG_ON(!llist_empty(&dc
->writeback_ordering_wait
.list
));
298 atomic_set(&dc
->writeback_sequence_next
, sequence
);
299 closure_init_stack(&cl
);
302 * XXX: if we error, background writeback just spins. Should use some
306 next
= bch_keybuf_next(&dc
->writeback_keys
);
308 while (!kthread_should_stop() && next
) {
313 BUG_ON(ptr_stale(dc
->disk
.c
, &next
->key
, 0));
316 * Don't combine too many operations, even if they
319 if (nk
>= MAX_WRITEBACKS_IN_PASS
)
323 * If the current operation is very large, don't
324 * further combine operations.
326 if (size
>= MAX_WRITESIZE_IN_PASS
)
330 * Operations are only eligible to be combined
331 * if they are contiguous.
333 * TODO: add a heuristic willing to fire a
334 * certain amount of non-contiguous IO per pass,
335 * so that we can benefit from backing device
338 if ((nk
!= 0) && bkey_cmp(&keys
[nk
-1]->key
,
339 &START_KEY(&next
->key
)))
342 size
+= KEY_SIZE(&next
->key
);
344 } while ((next
= bch_keybuf_next(&dc
->writeback_keys
)));
346 /* Now we have gathered a set of 1..5 keys to write back. */
347 for (i
= 0; i
< nk
; i
++) {
350 io
= kzalloc(sizeof(struct dirty_io
) +
351 sizeof(struct bio_vec
) *
352 DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
359 io
->sequence
= sequence
++;
362 bio_set_op_attrs(&io
->bio
, REQ_OP_READ
, 0);
363 io
->bio
.bi_iter
.bi_sector
= PTR_OFFSET(&w
->key
, 0);
364 bio_set_dev(&io
->bio
,
365 PTR_CACHE(dc
->disk
.c
, &w
->key
, 0)->bdev
);
366 io
->bio
.bi_end_io
= read_dirty_endio
;
368 if (bch_bio_alloc_pages(&io
->bio
, GFP_KERNEL
))
371 trace_bcache_writeback(&w
->key
);
373 down(&dc
->in_flight
);
375 /* We've acquired a semaphore for the maximum
376 * simultaneous number of writebacks; from here
377 * everything happens asynchronously.
379 closure_call(&io
->cl
, read_dirty_submit
, NULL
, &cl
);
382 delay
= writeback_delay(dc
, size
);
384 /* If the control system would wait for at least half a
385 * second, and there's been no reqs hitting the backing disk
386 * for awhile: use an alternate mode where we have at most
387 * one contiguous set of writebacks in flight at a time. If
388 * someone wants to do IO it will be quick, as it will only
389 * have to contend with one operation in flight, and we'll
390 * be round-tripping data to the backing disk as quickly as
393 if (delay
>= HZ
/ 2) {
394 /* 3 means at least 1.5 seconds, up to 7.5 if we
395 * have slowed way down.
397 if (atomic_inc_return(&dc
->backing_idle
) >= 3) {
398 /* Wait for current I/Os to finish */
400 /* And immediately launch a new set. */
405 while (!kthread_should_stop() && delay
) {
406 schedule_timeout_interruptible(delay
);
407 delay
= writeback_delay(dc
, 0);
415 bch_keybuf_del(&dc
->writeback_keys
, w
);
419 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
420 * freed) before refilling again
425 /* Scan for dirty data */
427 void bcache_dev_sectors_dirty_add(struct cache_set
*c
, unsigned inode
,
428 uint64_t offset
, int nr_sectors
)
430 struct bcache_device
*d
= c
->devices
[inode
];
431 unsigned stripe_offset
, stripe
, sectors_dirty
;
436 stripe
= offset_to_stripe(d
, offset
);
437 stripe_offset
= offset
& (d
->stripe_size
- 1);
440 int s
= min_t(unsigned, abs(nr_sectors
),
441 d
->stripe_size
- stripe_offset
);
446 if (stripe
>= d
->nr_stripes
)
449 sectors_dirty
= atomic_add_return(s
,
450 d
->stripe_sectors_dirty
+ stripe
);
451 if (sectors_dirty
== d
->stripe_size
)
452 set_bit(stripe
, d
->full_dirty_stripes
);
454 clear_bit(stripe
, d
->full_dirty_stripes
);
462 static bool dirty_pred(struct keybuf
*buf
, struct bkey
*k
)
464 struct cached_dev
*dc
= container_of(buf
, struct cached_dev
, writeback_keys
);
466 BUG_ON(KEY_INODE(k
) != dc
->disk
.id
);
471 static void refill_full_stripes(struct cached_dev
*dc
)
473 struct keybuf
*buf
= &dc
->writeback_keys
;
474 unsigned start_stripe
, stripe
, next_stripe
;
475 bool wrapped
= false;
477 stripe
= offset_to_stripe(&dc
->disk
, KEY_OFFSET(&buf
->last_scanned
));
479 if (stripe
>= dc
->disk
.nr_stripes
)
482 start_stripe
= stripe
;
485 stripe
= find_next_bit(dc
->disk
.full_dirty_stripes
,
486 dc
->disk
.nr_stripes
, stripe
);
488 if (stripe
== dc
->disk
.nr_stripes
)
491 next_stripe
= find_next_zero_bit(dc
->disk
.full_dirty_stripes
,
492 dc
->disk
.nr_stripes
, stripe
);
494 buf
->last_scanned
= KEY(dc
->disk
.id
,
495 stripe
* dc
->disk
.stripe_size
, 0);
497 bch_refill_keybuf(dc
->disk
.c
, buf
,
499 next_stripe
* dc
->disk
.stripe_size
, 0),
502 if (array_freelist_empty(&buf
->freelist
))
505 stripe
= next_stripe
;
507 if (wrapped
&& stripe
> start_stripe
)
510 if (stripe
== dc
->disk
.nr_stripes
) {
518 * Returns true if we scanned the entire disk
520 static bool refill_dirty(struct cached_dev
*dc
)
522 struct keybuf
*buf
= &dc
->writeback_keys
;
523 struct bkey start
= KEY(dc
->disk
.id
, 0, 0);
524 struct bkey end
= KEY(dc
->disk
.id
, MAX_KEY_OFFSET
, 0);
525 struct bkey start_pos
;
528 * make sure keybuf pos is inside the range for this disk - at bringup
529 * we might not be attached yet so this disk's inode nr isn't
532 if (bkey_cmp(&buf
->last_scanned
, &start
) < 0 ||
533 bkey_cmp(&buf
->last_scanned
, &end
) > 0)
534 buf
->last_scanned
= start
;
536 if (dc
->partial_stripes_expensive
) {
537 refill_full_stripes(dc
);
538 if (array_freelist_empty(&buf
->freelist
))
542 start_pos
= buf
->last_scanned
;
543 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
, dirty_pred
);
545 if (bkey_cmp(&buf
->last_scanned
, &end
) < 0)
549 * If we get to the end start scanning again from the beginning, and
550 * only scan up to where we initially started scanning from:
552 buf
->last_scanned
= start
;
553 bch_refill_keybuf(dc
->disk
.c
, buf
, &start_pos
, dirty_pred
);
555 return bkey_cmp(&buf
->last_scanned
, &start_pos
) >= 0;
558 static int bch_writeback_thread(void *arg
)
560 struct cached_dev
*dc
= arg
;
561 bool searched_full_index
;
563 bch_ratelimit_reset(&dc
->writeback_rate
);
565 while (!kthread_should_stop()) {
566 down_write(&dc
->writeback_lock
);
567 set_current_state(TASK_INTERRUPTIBLE
);
568 if (!atomic_read(&dc
->has_dirty
) ||
569 (!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) &&
570 !dc
->writeback_running
)) {
571 up_write(&dc
->writeback_lock
);
573 if (kthread_should_stop()) {
574 set_current_state(TASK_RUNNING
);
581 set_current_state(TASK_RUNNING
);
583 searched_full_index
= refill_dirty(dc
);
585 if (searched_full_index
&&
586 RB_EMPTY_ROOT(&dc
->writeback_keys
.keys
)) {
587 atomic_set(&dc
->has_dirty
, 0);
589 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
590 bch_write_bdev_super(dc
, NULL
);
593 up_write(&dc
->writeback_lock
);
597 if (searched_full_index
) {
598 unsigned delay
= dc
->writeback_delay
* HZ
;
601 !kthread_should_stop() &&
602 !test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
603 delay
= schedule_timeout_interruptible(delay
);
605 bch_ratelimit_reset(&dc
->writeback_rate
);
614 struct sectors_dirty_init
{
619 static int sectors_dirty_init_fn(struct btree_op
*_op
, struct btree
*b
,
622 struct sectors_dirty_init
*op
= container_of(_op
,
623 struct sectors_dirty_init
, op
);
624 if (KEY_INODE(k
) > op
->inode
)
628 bcache_dev_sectors_dirty_add(b
->c
, KEY_INODE(k
),
629 KEY_START(k
), KEY_SIZE(k
));
634 void bch_sectors_dirty_init(struct bcache_device
*d
)
636 struct sectors_dirty_init op
;
638 bch_btree_op_init(&op
.op
, -1);
641 bch_btree_map_keys(&op
.op
, d
->c
, &KEY(op
.inode
, 0, 0),
642 sectors_dirty_init_fn
, 0);
645 void bch_cached_dev_writeback_init(struct cached_dev
*dc
)
647 sema_init(&dc
->in_flight
, 64);
648 init_rwsem(&dc
->writeback_lock
);
649 bch_keybuf_init(&dc
->writeback_keys
);
651 dc
->writeback_metadata
= true;
652 dc
->writeback_running
= true;
653 dc
->writeback_percent
= 10;
654 dc
->writeback_delay
= 30;
655 dc
->writeback_rate
.rate
= 1024;
656 dc
->writeback_rate_minimum
= 8;
658 dc
->writeback_rate_update_seconds
= WRITEBACK_RATE_UPDATE_SECS_DEFAULT
;
659 dc
->writeback_rate_p_term_inverse
= 40;
660 dc
->writeback_rate_i_term_inverse
= 10000;
662 INIT_DELAYED_WORK(&dc
->writeback_rate_update
, update_writeback_rate
);
665 int bch_cached_dev_writeback_start(struct cached_dev
*dc
)
667 dc
->writeback_write_wq
= alloc_workqueue("bcache_writeback_wq",
669 if (!dc
->writeback_write_wq
)
672 dc
->writeback_thread
= kthread_create(bch_writeback_thread
, dc
,
674 if (IS_ERR(dc
->writeback_thread
))
675 return PTR_ERR(dc
->writeback_thread
);
677 schedule_delayed_work(&dc
->writeback_rate_update
,
678 dc
->writeback_rate_update_seconds
* HZ
);
680 bch_writeback_queue(dc
);