2 * background writeback - scan btree for dirty data and write it to the backing
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <trace/events/bcache.h>
21 static void __update_writeback_rate(struct cached_dev
*dc
)
23 struct cache_set
*c
= dc
->disk
.c
;
24 uint64_t cache_sectors
= c
->nbuckets
* c
->sb
.bucket_size
-
25 bcache_flash_devs_sectors_dirty(c
);
26 uint64_t cache_dirty_target
=
27 div_u64(cache_sectors
* dc
->writeback_percent
, 100);
29 int64_t target
= div64_u64(cache_dirty_target
* bdev_sectors(dc
->bdev
),
30 c
->cached_dev_sectors
);
34 int64_t dirty
= bcache_dev_sectors_dirty(&dc
->disk
);
35 int64_t derivative
= dirty
- dc
->disk
.sectors_dirty_last
;
36 int64_t proportional
= dirty
- target
;
39 dc
->disk
.sectors_dirty_last
= dirty
;
41 /* Scale to sectors per second */
43 proportional
*= dc
->writeback_rate_update_seconds
;
44 proportional
= div_s64(proportional
, dc
->writeback_rate_p_term_inverse
);
46 derivative
= div_s64(derivative
, dc
->writeback_rate_update_seconds
);
48 derivative
= ewma_add(dc
->disk
.sectors_dirty_derivative
, derivative
,
49 (dc
->writeback_rate_d_term
/
50 dc
->writeback_rate_update_seconds
) ?: 1, 0);
52 derivative
*= dc
->writeback_rate_d_term
;
53 derivative
= div_s64(derivative
, dc
->writeback_rate_p_term_inverse
);
55 change
= proportional
+ derivative
;
57 /* Don't increase writeback rate if the device isn't keeping up */
59 time_after64(local_clock(),
60 dc
->writeback_rate
.next
+ NSEC_PER_MSEC
))
63 dc
->writeback_rate
.rate
=
64 clamp_t(int64_t, (int64_t) dc
->writeback_rate
.rate
+ change
,
67 dc
->writeback_rate_proportional
= proportional
;
68 dc
->writeback_rate_derivative
= derivative
;
69 dc
->writeback_rate_change
= change
;
70 dc
->writeback_rate_target
= target
;
73 static void update_writeback_rate(struct work_struct
*work
)
75 struct cached_dev
*dc
= container_of(to_delayed_work(work
),
77 writeback_rate_update
);
79 down_read(&dc
->writeback_lock
);
81 if (atomic_read(&dc
->has_dirty
) &&
82 dc
->writeback_percent
)
83 __update_writeback_rate(dc
);
85 up_read(&dc
->writeback_lock
);
87 schedule_delayed_work(&dc
->writeback_rate_update
,
88 dc
->writeback_rate_update_seconds
* HZ
);
91 static unsigned writeback_delay(struct cached_dev
*dc
, unsigned sectors
)
93 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) ||
94 !dc
->writeback_percent
)
97 return bch_next_delay(&dc
->writeback_rate
, sectors
);
102 struct cached_dev
*dc
;
106 static void dirty_init(struct keybuf_key
*w
)
108 struct dirty_io
*io
= w
->private;
109 struct bio
*bio
= &io
->bio
;
112 if (!io
->dc
->writeback_percent
)
113 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
115 bio
->bi_iter
.bi_size
= KEY_SIZE(&w
->key
) << 9;
116 bio
->bi_max_vecs
= DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
);
118 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
119 bch_bio_map(bio
, NULL
);
122 static void dirty_io_destructor(struct closure
*cl
)
124 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
128 static void write_dirty_finish(struct closure
*cl
)
130 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
131 struct keybuf_key
*w
= io
->bio
.bi_private
;
132 struct cached_dev
*dc
= io
->dc
;
136 bio_for_each_segment_all(bv
, &io
->bio
, i
)
137 __free_page(bv
->bv_page
);
139 /* This is kind of a dumb way of signalling errors. */
140 if (KEY_DIRTY(&w
->key
)) {
145 bch_keylist_init(&keys
);
147 bkey_copy(keys
.top
, &w
->key
);
148 SET_KEY_DIRTY(keys
.top
, false);
149 bch_keylist_push(&keys
);
151 for (i
= 0; i
< KEY_PTRS(&w
->key
); i
++)
152 atomic_inc(&PTR_BUCKET(dc
->disk
.c
, &w
->key
, i
)->pin
);
154 ret
= bch_btree_insert(dc
->disk
.c
, &keys
, NULL
, &w
->key
);
157 trace_bcache_writeback_collision(&w
->key
);
160 ? &dc
->disk
.c
->writeback_keys_failed
161 : &dc
->disk
.c
->writeback_keys_done
);
164 bch_keybuf_del(&dc
->writeback_keys
, w
);
167 closure_return_with_destructor(cl
, dirty_io_destructor
);
170 static void dirty_endio(struct bio
*bio
)
172 struct keybuf_key
*w
= bio
->bi_private
;
173 struct dirty_io
*io
= w
->private;
176 SET_KEY_DIRTY(&w
->key
, false);
178 closure_put(&io
->cl
);
181 static void write_dirty(struct closure
*cl
)
183 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
184 struct keybuf_key
*w
= io
->bio
.bi_private
;
187 io
->bio
.bi_rw
= WRITE
;
188 io
->bio
.bi_iter
.bi_sector
= KEY_START(&w
->key
);
189 io
->bio
.bi_bdev
= io
->dc
->bdev
;
190 io
->bio
.bi_end_io
= dirty_endio
;
192 closure_bio_submit(&io
->bio
, cl
);
194 continue_at(cl
, write_dirty_finish
, io
->dc
->writeback_write_wq
);
197 static void read_dirty_endio(struct bio
*bio
)
199 struct keybuf_key
*w
= bio
->bi_private
;
200 struct dirty_io
*io
= w
->private;
202 bch_count_io_errors(PTR_CACHE(io
->dc
->disk
.c
, &w
->key
, 0),
203 bio
->bi_error
, "reading dirty data from cache");
208 static void read_dirty_submit(struct closure
*cl
)
210 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
212 closure_bio_submit(&io
->bio
, cl
);
214 continue_at(cl
, write_dirty
, io
->dc
->writeback_write_wq
);
217 static void read_dirty(struct cached_dev
*dc
)
220 struct keybuf_key
*w
;
224 closure_init_stack(&cl
);
227 * XXX: if we error, background writeback just spins. Should use some
231 while (!kthread_should_stop()) {
234 w
= bch_keybuf_next(&dc
->writeback_keys
);
238 BUG_ON(ptr_stale(dc
->disk
.c
, &w
->key
, 0));
240 if (KEY_START(&w
->key
) != dc
->last_read
||
241 jiffies_to_msecs(delay
) > 50)
242 while (!kthread_should_stop() && delay
)
243 delay
= schedule_timeout_interruptible(delay
);
245 dc
->last_read
= KEY_OFFSET(&w
->key
);
247 io
= kzalloc(sizeof(struct dirty_io
) + sizeof(struct bio_vec
)
248 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
257 io
->bio
.bi_iter
.bi_sector
= PTR_OFFSET(&w
->key
, 0);
258 io
->bio
.bi_bdev
= PTR_CACHE(dc
->disk
.c
,
260 io
->bio
.bi_rw
= READ
;
261 io
->bio
.bi_end_io
= read_dirty_endio
;
263 if (bio_alloc_pages(&io
->bio
, GFP_KERNEL
))
266 trace_bcache_writeback(&w
->key
);
268 down(&dc
->in_flight
);
269 closure_call(&io
->cl
, read_dirty_submit
, NULL
, &cl
);
271 delay
= writeback_delay(dc
, KEY_SIZE(&w
->key
));
278 bch_keybuf_del(&dc
->writeback_keys
, w
);
282 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
283 * freed) before refilling again
288 /* Scan for dirty data */
290 void bcache_dev_sectors_dirty_add(struct cache_set
*c
, unsigned inode
,
291 uint64_t offset
, int nr_sectors
)
293 struct bcache_device
*d
= c
->devices
[inode
];
294 unsigned stripe_offset
, stripe
, sectors_dirty
;
299 stripe
= offset_to_stripe(d
, offset
);
300 stripe_offset
= offset
& (d
->stripe_size
- 1);
303 int s
= min_t(unsigned, abs(nr_sectors
),
304 d
->stripe_size
- stripe_offset
);
309 if (stripe
>= d
->nr_stripes
)
312 sectors_dirty
= atomic_add_return(s
,
313 d
->stripe_sectors_dirty
+ stripe
);
314 if (sectors_dirty
== d
->stripe_size
)
315 set_bit(stripe
, d
->full_dirty_stripes
);
317 clear_bit(stripe
, d
->full_dirty_stripes
);
325 static bool dirty_pred(struct keybuf
*buf
, struct bkey
*k
)
327 struct cached_dev
*dc
= container_of(buf
, struct cached_dev
, writeback_keys
);
329 BUG_ON(KEY_INODE(k
) != dc
->disk
.id
);
334 static void refill_full_stripes(struct cached_dev
*dc
)
336 struct keybuf
*buf
= &dc
->writeback_keys
;
337 unsigned start_stripe
, stripe
, next_stripe
;
338 bool wrapped
= false;
340 stripe
= offset_to_stripe(&dc
->disk
, KEY_OFFSET(&buf
->last_scanned
));
342 if (stripe
>= dc
->disk
.nr_stripes
)
345 start_stripe
= stripe
;
348 stripe
= find_next_bit(dc
->disk
.full_dirty_stripes
,
349 dc
->disk
.nr_stripes
, stripe
);
351 if (stripe
== dc
->disk
.nr_stripes
)
354 next_stripe
= find_next_zero_bit(dc
->disk
.full_dirty_stripes
,
355 dc
->disk
.nr_stripes
, stripe
);
357 buf
->last_scanned
= KEY(dc
->disk
.id
,
358 stripe
* dc
->disk
.stripe_size
, 0);
360 bch_refill_keybuf(dc
->disk
.c
, buf
,
362 next_stripe
* dc
->disk
.stripe_size
, 0),
365 if (array_freelist_empty(&buf
->freelist
))
368 stripe
= next_stripe
;
370 if (wrapped
&& stripe
> start_stripe
)
373 if (stripe
== dc
->disk
.nr_stripes
) {
381 * Returns true if we scanned the entire disk
383 static bool refill_dirty(struct cached_dev
*dc
)
385 struct keybuf
*buf
= &dc
->writeback_keys
;
386 struct bkey start
= KEY(dc
->disk
.id
, 0, 0);
387 struct bkey end
= KEY(dc
->disk
.id
, MAX_KEY_OFFSET
, 0);
388 struct bkey start_pos
;
391 * make sure keybuf pos is inside the range for this disk - at bringup
392 * we might not be attached yet so this disk's inode nr isn't
395 if (bkey_cmp(&buf
->last_scanned
, &start
) < 0 ||
396 bkey_cmp(&buf
->last_scanned
, &end
) > 0)
397 buf
->last_scanned
= start
;
399 if (dc
->partial_stripes_expensive
) {
400 refill_full_stripes(dc
);
401 if (array_freelist_empty(&buf
->freelist
))
405 start_pos
= buf
->last_scanned
;
406 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
, dirty_pred
);
408 if (bkey_cmp(&buf
->last_scanned
, &end
) < 0)
412 * If we get to the end start scanning again from the beginning, and
413 * only scan up to where we initially started scanning from:
415 buf
->last_scanned
= start
;
416 bch_refill_keybuf(dc
->disk
.c
, buf
, &start_pos
, dirty_pred
);
418 return bkey_cmp(&buf
->last_scanned
, &start_pos
) >= 0;
421 static int bch_writeback_thread(void *arg
)
423 struct cached_dev
*dc
= arg
;
424 bool searched_full_index
;
426 while (!kthread_should_stop()) {
427 down_write(&dc
->writeback_lock
);
428 set_current_state(TASK_INTERRUPTIBLE
);
430 * If the bache device is detaching, skip here and continue
431 * to perform writeback. Otherwise, if no dirty data on cache,
432 * or there is dirty data on cache but writeback is disabled,
433 * the writeback thread should sleep here and wait for others
436 if (!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) &&
437 (!atomic_read(&dc
->has_dirty
) || !dc
->writeback_running
)) {
438 up_write(&dc
->writeback_lock
);
440 if (kthread_should_stop()) {
441 set_current_state(TASK_RUNNING
);
449 set_current_state(TASK_RUNNING
);
451 searched_full_index
= refill_dirty(dc
);
453 if (searched_full_index
&&
454 RB_EMPTY_ROOT(&dc
->writeback_keys
.keys
)) {
455 atomic_set(&dc
->has_dirty
, 0);
457 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
458 bch_write_bdev_super(dc
, NULL
);
460 * If bcache device is detaching via sysfs interface,
461 * writeback thread should stop after there is no dirty
462 * data on cache. BCACHE_DEV_DETACHING flag is set in
463 * bch_cached_dev_detach().
465 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
)) {
466 up_write(&dc
->writeback_lock
);
471 up_write(&dc
->writeback_lock
);
473 bch_ratelimit_reset(&dc
->writeback_rate
);
476 if (searched_full_index
) {
477 unsigned delay
= dc
->writeback_delay
* HZ
;
480 !kthread_should_stop() &&
481 !test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
482 delay
= schedule_timeout_interruptible(delay
);
491 struct sectors_dirty_init
{
496 static int sectors_dirty_init_fn(struct btree_op
*_op
, struct btree
*b
,
499 struct sectors_dirty_init
*op
= container_of(_op
,
500 struct sectors_dirty_init
, op
);
501 if (KEY_INODE(k
) > op
->inode
)
505 bcache_dev_sectors_dirty_add(b
->c
, KEY_INODE(k
),
506 KEY_START(k
), KEY_SIZE(k
));
511 void bch_sectors_dirty_init(struct bcache_device
*d
)
513 struct sectors_dirty_init op
;
515 bch_btree_op_init(&op
.op
, -1);
518 bch_btree_map_keys(&op
.op
, d
->c
, &KEY(op
.inode
, 0, 0),
519 sectors_dirty_init_fn
, 0);
521 d
->sectors_dirty_last
= bcache_dev_sectors_dirty(d
);
524 void bch_cached_dev_writeback_init(struct cached_dev
*dc
)
526 sema_init(&dc
->in_flight
, 64);
527 init_rwsem(&dc
->writeback_lock
);
528 bch_keybuf_init(&dc
->writeback_keys
);
530 dc
->writeback_metadata
= true;
531 dc
->writeback_running
= true;
532 dc
->writeback_percent
= 10;
533 dc
->writeback_delay
= 30;
534 dc
->writeback_rate
.rate
= 1024;
536 dc
->writeback_rate_update_seconds
= 5;
537 dc
->writeback_rate_d_term
= 30;
538 dc
->writeback_rate_p_term_inverse
= 6000;
540 INIT_DELAYED_WORK(&dc
->writeback_rate_update
, update_writeback_rate
);
543 int bch_cached_dev_writeback_start(struct cached_dev
*dc
)
545 dc
->writeback_write_wq
= alloc_workqueue("bcache_writeback_wq",
547 if (!dc
->writeback_write_wq
)
550 dc
->writeback_thread
= kthread_create(bch_writeback_thread
, dc
,
552 if (IS_ERR(dc
->writeback_thread
))
553 return PTR_ERR(dc
->writeback_thread
);
555 schedule_delayed_work(&dc
->writeback_rate_update
,
556 dc
->writeback_rate_update_seconds
* HZ
);
558 bch_writeback_queue(dc
);