2 * background writeback - scan btree for dirty data and write it to the backing
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <trace/events/bcache.h>
16 static struct workqueue_struct
*dirty_wq
;
18 static void read_dirty(struct closure
*);
22 struct cached_dev
*dc
;
28 static void __update_writeback_rate(struct cached_dev
*dc
)
30 struct cache_set
*c
= dc
->disk
.c
;
31 uint64_t cache_sectors
= c
->nbuckets
* c
->sb
.bucket_size
;
32 uint64_t cache_dirty_target
=
33 div_u64(cache_sectors
* dc
->writeback_percent
, 100);
35 int64_t target
= div64_u64(cache_dirty_target
* bdev_sectors(dc
->bdev
),
36 c
->cached_dev_sectors
);
42 int64_t dirty
= bcache_dev_sectors_dirty(&dc
->disk
);
43 int64_t derivative
= dirty
- dc
->disk
.sectors_dirty_last
;
45 dc
->disk
.sectors_dirty_last
= dirty
;
47 derivative
*= dc
->writeback_rate_d_term
;
48 derivative
= clamp(derivative
, -dirty
, dirty
);
50 derivative
= ewma_add(dc
->disk
.sectors_dirty_derivative
, derivative
,
51 dc
->writeback_rate_d_smooth
, 0);
53 /* Avoid divide by zero */
57 error
= div64_s64((dirty
+ derivative
- target
) << 8, target
);
59 change
= div_s64((dc
->writeback_rate
.rate
* error
) >> 8,
60 dc
->writeback_rate_p_term_inverse
);
62 /* Don't increase writeback rate if the device isn't keeping up */
64 time_after64(local_clock(),
65 dc
->writeback_rate
.next
+ 10 * NSEC_PER_MSEC
))
68 dc
->writeback_rate
.rate
=
69 clamp_t(int64_t, dc
->writeback_rate
.rate
+ change
,
72 dc
->writeback_rate_derivative
= derivative
;
73 dc
->writeback_rate_change
= change
;
74 dc
->writeback_rate_target
= target
;
76 schedule_delayed_work(&dc
->writeback_rate_update
,
77 dc
->writeback_rate_update_seconds
* HZ
);
80 static void update_writeback_rate(struct work_struct
*work
)
82 struct cached_dev
*dc
= container_of(to_delayed_work(work
),
84 writeback_rate_update
);
86 down_read(&dc
->writeback_lock
);
88 if (atomic_read(&dc
->has_dirty
) &&
89 dc
->writeback_percent
)
90 __update_writeback_rate(dc
);
92 up_read(&dc
->writeback_lock
);
95 static unsigned writeback_delay(struct cached_dev
*dc
, unsigned sectors
)
99 if (atomic_read(&dc
->disk
.detaching
) ||
100 !dc
->writeback_percent
)
103 ret
= bch_next_delay(&dc
->writeback_rate
, sectors
* 10000000ULL);
105 return min_t(uint64_t, ret
, HZ
);
108 /* Background writeback */
110 static bool dirty_pred(struct keybuf
*buf
, struct bkey
*k
)
115 static bool dirty_full_stripe_pred(struct keybuf
*buf
, struct bkey
*k
)
118 unsigned nr_sectors
= KEY_SIZE(k
);
119 struct cached_dev
*dc
= container_of(buf
, struct cached_dev
,
121 unsigned stripe_size
= 1 << dc
->disk
.stripe_size_bits
;
126 stripe
= KEY_START(k
) >> dc
->disk
.stripe_size_bits
;
128 if (atomic_read(dc
->disk
.stripe_sectors_dirty
+ stripe
) !=
132 if (nr_sectors
<= stripe_size
)
135 nr_sectors
-= stripe_size
;
140 static void dirty_init(struct keybuf_key
*w
)
142 struct dirty_io
*io
= w
->private;
143 struct bio
*bio
= &io
->bio
;
146 if (!io
->dc
->writeback_percent
)
147 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
149 bio
->bi_size
= KEY_SIZE(&w
->key
) << 9;
150 bio
->bi_max_vecs
= DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
);
152 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
153 bch_bio_map(bio
, NULL
);
156 static void refill_dirty(struct closure
*cl
)
158 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
,
160 struct keybuf
*buf
= &dc
->writeback_keys
;
161 bool searched_from_start
= false;
162 struct bkey end
= MAX_KEY
;
163 SET_KEY_INODE(&end
, dc
->disk
.id
);
165 if (!atomic_read(&dc
->disk
.detaching
) &&
166 !dc
->writeback_running
)
169 down_write(&dc
->writeback_lock
);
171 if (!atomic_read(&dc
->has_dirty
)) {
172 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
173 bch_write_bdev_super(dc
, NULL
);
175 up_write(&dc
->writeback_lock
);
179 if (bkey_cmp(&buf
->last_scanned
, &end
) >= 0) {
180 buf
->last_scanned
= KEY(dc
->disk
.id
, 0, 0);
181 searched_from_start
= true;
184 if (dc
->partial_stripes_expensive
) {
187 for (i
= 0; i
< dc
->disk
.nr_stripes
; i
++)
188 if (atomic_read(dc
->disk
.stripe_sectors_dirty
+ i
) ==
189 1 << dc
->disk
.stripe_size_bits
)
194 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
,
195 dirty_full_stripe_pred
);
198 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
, dirty_pred
);
201 if (bkey_cmp(&buf
->last_scanned
, &end
) >= 0 && searched_from_start
) {
202 /* Searched the entire btree - delay awhile */
204 if (RB_EMPTY_ROOT(&buf
->keys
)) {
205 atomic_set(&dc
->has_dirty
, 0);
209 if (!atomic_read(&dc
->disk
.detaching
))
210 closure_delay(&dc
->writeback
, dc
->writeback_delay
* HZ
);
213 up_write(&dc
->writeback_lock
);
215 bch_ratelimit_reset(&dc
->writeback_rate
);
217 /* Punt to workqueue only so we don't recurse and blow the stack */
218 continue_at(cl
, read_dirty
, dirty_wq
);
221 void bch_writeback_queue(struct cached_dev
*dc
)
223 if (closure_trylock(&dc
->writeback
.cl
, &dc
->disk
.cl
)) {
224 if (!atomic_read(&dc
->disk
.detaching
))
225 closure_delay(&dc
->writeback
, dc
->writeback_delay
* HZ
);
227 continue_at(&dc
->writeback
.cl
, refill_dirty
, dirty_wq
);
231 void bch_writeback_add(struct cached_dev
*dc
)
233 if (!atomic_read(&dc
->has_dirty
) &&
234 !atomic_xchg(&dc
->has_dirty
, 1)) {
235 atomic_inc(&dc
->count
);
237 if (BDEV_STATE(&dc
->sb
) != BDEV_STATE_DIRTY
) {
238 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_DIRTY
);
239 /* XXX: should do this synchronously */
240 bch_write_bdev_super(dc
, NULL
);
243 bch_writeback_queue(dc
);
245 if (dc
->writeback_percent
)
246 schedule_delayed_work(&dc
->writeback_rate_update
,
247 dc
->writeback_rate_update_seconds
* HZ
);
251 void bcache_dev_sectors_dirty_add(struct cache_set
*c
, unsigned inode
,
252 uint64_t offset
, int nr_sectors
)
254 struct bcache_device
*d
= c
->devices
[inode
];
255 unsigned stripe_size
, stripe_offset
;
261 stripe_size
= 1 << d
->stripe_size_bits
;
262 stripe
= offset
>> d
->stripe_size_bits
;
263 stripe_offset
= offset
& (stripe_size
- 1);
266 int s
= min_t(unsigned, abs(nr_sectors
),
267 stripe_size
- stripe_offset
);
272 atomic_add(s
, d
->stripe_sectors_dirty
+ stripe
);
279 /* Background writeback - IO loop */
281 static void dirty_io_destructor(struct closure
*cl
)
283 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
287 static void write_dirty_finish(struct closure
*cl
)
289 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
290 struct keybuf_key
*w
= io
->bio
.bi_private
;
291 struct cached_dev
*dc
= io
->dc
;
295 bio_for_each_segment_all(bv
, &io
->bio
, i
)
296 __free_page(bv
->bv_page
);
298 /* This is kind of a dumb way of signalling errors. */
299 if (KEY_DIRTY(&w
->key
)) {
302 bch_btree_op_init_stack(&op
);
304 op
.type
= BTREE_REPLACE
;
305 bkey_copy(&op
.replace
, &w
->key
);
307 SET_KEY_DIRTY(&w
->key
, false);
308 bch_keylist_add(&op
.keys
, &w
->key
);
310 for (i
= 0; i
< KEY_PTRS(&w
->key
); i
++)
311 atomic_inc(&PTR_BUCKET(dc
->disk
.c
, &w
->key
, i
)->pin
);
313 bch_btree_insert(&op
, dc
->disk
.c
);
314 closure_sync(&op
.cl
);
316 if (op
.insert_collision
)
317 trace_bcache_writeback_collision(&w
->key
);
319 atomic_long_inc(op
.insert_collision
320 ? &dc
->disk
.c
->writeback_keys_failed
321 : &dc
->disk
.c
->writeback_keys_done
);
324 bch_keybuf_del(&dc
->writeback_keys
, w
);
327 closure_return_with_destructor(cl
, dirty_io_destructor
);
330 static void dirty_endio(struct bio
*bio
, int error
)
332 struct keybuf_key
*w
= bio
->bi_private
;
333 struct dirty_io
*io
= w
->private;
336 SET_KEY_DIRTY(&w
->key
, false);
338 closure_put(&io
->cl
);
341 static void write_dirty(struct closure
*cl
)
343 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
344 struct keybuf_key
*w
= io
->bio
.bi_private
;
347 io
->bio
.bi_rw
= WRITE
;
348 io
->bio
.bi_sector
= KEY_START(&w
->key
);
349 io
->bio
.bi_bdev
= io
->dc
->bdev
;
350 io
->bio
.bi_end_io
= dirty_endio
;
352 closure_bio_submit(&io
->bio
, cl
, &io
->dc
->disk
);
354 continue_at(cl
, write_dirty_finish
, system_wq
);
357 static void read_dirty_endio(struct bio
*bio
, int error
)
359 struct keybuf_key
*w
= bio
->bi_private
;
360 struct dirty_io
*io
= w
->private;
362 bch_count_io_errors(PTR_CACHE(io
->dc
->disk
.c
, &w
->key
, 0),
363 error
, "reading dirty data from cache");
365 dirty_endio(bio
, error
);
368 static void read_dirty_submit(struct closure
*cl
)
370 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
372 closure_bio_submit(&io
->bio
, cl
, &io
->dc
->disk
);
374 continue_at(cl
, write_dirty
, system_wq
);
377 static void read_dirty(struct closure
*cl
)
379 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
,
381 unsigned delay
= writeback_delay(dc
, 0);
382 struct keybuf_key
*w
;
386 * XXX: if we error, background writeback just spins. Should use some
391 w
= bch_keybuf_next(&dc
->writeback_keys
);
395 BUG_ON(ptr_stale(dc
->disk
.c
, &w
->key
, 0));
398 (KEY_START(&w
->key
) != dc
->last_read
||
399 jiffies_to_msecs(delay
) > 50))
400 delay
= schedule_timeout_uninterruptible(delay
);
402 dc
->last_read
= KEY_OFFSET(&w
->key
);
404 io
= kzalloc(sizeof(struct dirty_io
) + sizeof(struct bio_vec
)
405 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
414 io
->bio
.bi_sector
= PTR_OFFSET(&w
->key
, 0);
415 io
->bio
.bi_bdev
= PTR_CACHE(dc
->disk
.c
,
417 io
->bio
.bi_rw
= READ
;
418 io
->bio
.bi_end_io
= read_dirty_endio
;
420 if (bio_alloc_pages(&io
->bio
, GFP_KERNEL
))
423 trace_bcache_writeback(&w
->key
);
425 down(&dc
->in_flight
);
426 closure_call(&io
->cl
, read_dirty_submit
, NULL
, cl
);
428 delay
= writeback_delay(dc
, KEY_SIZE(&w
->key
));
435 bch_keybuf_del(&dc
->writeback_keys
, w
);
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
442 continue_at(cl
, refill_dirty
, dirty_wq
);
447 static int bch_btree_sectors_dirty_init(struct btree
*b
, struct btree_op
*op
,
448 struct cached_dev
*dc
)
451 struct btree_iter iter
;
453 bch_btree_iter_init(b
, &iter
, &KEY(dc
->disk
.id
, 0, 0));
454 while ((k
= bch_btree_iter_next_filter(&iter
, b
, bch_ptr_bad
)))
456 if (KEY_INODE(k
) > dc
->disk
.id
)
460 bcache_dev_sectors_dirty_add(b
->c
, dc
->disk
.id
,
464 btree(sectors_dirty_init
, k
, b
, op
, dc
);
465 if (KEY_INODE(k
) > dc
->disk
.id
)
474 void bch_sectors_dirty_init(struct cached_dev
*dc
)
478 bch_btree_op_init_stack(&op
);
479 btree_root(sectors_dirty_init
, dc
->disk
.c
, &op
, dc
);
482 void bch_cached_dev_writeback_init(struct cached_dev
*dc
)
484 sema_init(&dc
->in_flight
, 64);
485 closure_init_unlocked(&dc
->writeback
);
486 init_rwsem(&dc
->writeback_lock
);
488 bch_keybuf_init(&dc
->writeback_keys
);
490 dc
->writeback_metadata
= true;
491 dc
->writeback_running
= true;
492 dc
->writeback_percent
= 10;
493 dc
->writeback_delay
= 30;
494 dc
->writeback_rate
.rate
= 1024;
496 dc
->writeback_rate_update_seconds
= 30;
497 dc
->writeback_rate_d_term
= 16;
498 dc
->writeback_rate_p_term_inverse
= 64;
499 dc
->writeback_rate_d_smooth
= 8;
501 INIT_DELAYED_WORK(&dc
->writeback_rate_update
, update_writeback_rate
);
502 schedule_delayed_work(&dc
->writeback_rate_update
,
503 dc
->writeback_rate_update_seconds
* HZ
);
506 void bch_writeback_exit(void)
509 destroy_workqueue(dirty_wq
);
512 int __init
bch_writeback_init(void)
514 dirty_wq
= create_workqueue("bcache_writeback");