gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / md / bcache / writeback.c
blob540256a0df4fd9ce47d94889a15df6889bbe7a89
1 /*
2 * background writeback - scan btree for dirty data and write it to the backing
3 * device
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "writeback.h"
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <trace/events/bcache.h>
19 /* Rate limiting */
21 static void __update_writeback_rate(struct cached_dev *dc)
23 struct cache_set *c = dc->disk.c;
24 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
25 uint64_t cache_dirty_target =
26 div_u64(cache_sectors * dc->writeback_percent, 100);
28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
29 c->cached_dev_sectors);
31 /* PD controller */
33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
34 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35 int64_t proportional = dirty - target;
36 int64_t change;
38 dc->disk.sectors_dirty_last = dirty;
40 /* Scale to sectors per second */
42 proportional *= dc->writeback_rate_update_seconds;
43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48 (dc->writeback_rate_d_term /
49 dc->writeback_rate_update_seconds) ?: 1, 0);
51 derivative *= dc->writeback_rate_d_term;
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
54 change = proportional + derivative;
56 /* Don't increase writeback rate if the device isn't keeping up */
57 if (change > 0 &&
58 time_after64(local_clock(),
59 dc->writeback_rate.next + NSEC_PER_MSEC))
60 change = 0;
62 dc->writeback_rate.rate =
63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
64 1, NSEC_PER_MSEC);
66 dc->writeback_rate_proportional = proportional;
67 dc->writeback_rate_derivative = derivative;
68 dc->writeback_rate_change = change;
69 dc->writeback_rate_target = target;
72 static void update_writeback_rate(struct work_struct *work)
74 struct cached_dev *dc = container_of(to_delayed_work(work),
75 struct cached_dev,
76 writeback_rate_update);
78 down_read(&dc->writeback_lock);
80 if (atomic_read(&dc->has_dirty) &&
81 dc->writeback_percent)
82 __update_writeback_rate(dc);
84 up_read(&dc->writeback_lock);
86 schedule_delayed_work(&dc->writeback_rate_update,
87 dc->writeback_rate_update_seconds * HZ);
90 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93 !dc->writeback_percent)
94 return 0;
96 return bch_next_delay(&dc->writeback_rate, sectors);
99 struct dirty_io {
100 struct closure cl;
101 struct cached_dev *dc;
102 struct bio bio;
105 static void dirty_init(struct keybuf_key *w)
107 struct dirty_io *io = w->private;
108 struct bio *bio = &io->bio;
110 bio_init(bio);
111 if (!io->dc->writeback_percent)
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
116 bio->bi_private = w;
117 bio->bi_io_vec = bio->bi_inline_vecs;
118 bch_bio_map(bio, NULL);
121 static void dirty_io_destructor(struct closure *cl)
123 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
124 kfree(io);
127 static void write_dirty_finish(struct closure *cl)
129 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
130 struct keybuf_key *w = io->bio.bi_private;
131 struct cached_dev *dc = io->dc;
132 struct bio_vec *bv;
133 int i;
135 bio_for_each_segment_all(bv, &io->bio, i)
136 __free_page(bv->bv_page);
138 /* This is kind of a dumb way of signalling errors. */
139 if (KEY_DIRTY(&w->key)) {
140 int ret;
141 unsigned i;
142 struct keylist keys;
144 bch_keylist_init(&keys);
146 bkey_copy(keys.top, &w->key);
147 SET_KEY_DIRTY(keys.top, false);
148 bch_keylist_push(&keys);
150 for (i = 0; i < KEY_PTRS(&w->key); i++)
151 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
153 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
155 if (ret)
156 trace_bcache_writeback_collision(&w->key);
158 atomic_long_inc(ret
159 ? &dc->disk.c->writeback_keys_failed
160 : &dc->disk.c->writeback_keys_done);
163 bch_keybuf_del(&dc->writeback_keys, w);
164 up(&dc->in_flight);
166 closure_return_with_destructor(cl, dirty_io_destructor);
169 static void dirty_endio(struct bio *bio, int error)
171 struct keybuf_key *w = bio->bi_private;
172 struct dirty_io *io = w->private;
174 if (error)
175 SET_KEY_DIRTY(&w->key, false);
177 closure_put(&io->cl);
180 static void write_dirty(struct closure *cl)
182 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
183 struct keybuf_key *w = io->bio.bi_private;
185 dirty_init(w);
186 io->bio.bi_rw = WRITE;
187 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
188 io->bio.bi_bdev = io->dc->bdev;
189 io->bio.bi_end_io = dirty_endio;
191 closure_bio_submit(&io->bio, cl, &io->dc->disk);
193 continue_at(cl, write_dirty_finish, system_wq);
196 static void read_dirty_endio(struct bio *bio, int error)
198 struct keybuf_key *w = bio->bi_private;
199 struct dirty_io *io = w->private;
201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
202 error, "reading dirty data from cache");
204 dirty_endio(bio, error);
207 static void read_dirty_submit(struct closure *cl)
209 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
211 closure_bio_submit(&io->bio, cl, &io->dc->disk);
213 continue_at(cl, write_dirty, system_wq);
216 static void read_dirty(struct cached_dev *dc)
218 unsigned delay = 0;
219 struct keybuf_key *w;
220 struct dirty_io *io;
221 struct closure cl;
223 closure_init_stack(&cl);
226 * XXX: if we error, background writeback just spins. Should use some
227 * mempools.
230 while (!kthread_should_stop()) {
231 try_to_freeze();
233 w = bch_keybuf_next(&dc->writeback_keys);
234 if (!w)
235 break;
237 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
239 if (KEY_START(&w->key) != dc->last_read ||
240 jiffies_to_msecs(delay) > 50)
241 while (!kthread_should_stop() && delay)
242 delay = schedule_timeout_interruptible(delay);
244 dc->last_read = KEY_OFFSET(&w->key);
246 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
247 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
248 GFP_KERNEL);
249 if (!io)
250 goto err;
252 w->private = io;
253 io->dc = dc;
255 dirty_init(w);
256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
258 &w->key, 0)->bdev;
259 io->bio.bi_rw = READ;
260 io->bio.bi_end_io = read_dirty_endio;
262 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
263 goto err_free;
265 trace_bcache_writeback(&w->key);
267 down(&dc->in_flight);
268 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
270 delay = writeback_delay(dc, KEY_SIZE(&w->key));
273 if (0) {
274 err_free:
275 kfree(w->private);
276 err:
277 bch_keybuf_del(&dc->writeback_keys, w);
281 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
282 * freed) before refilling again
284 closure_sync(&cl);
287 /* Scan for dirty data */
289 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
290 uint64_t offset, int nr_sectors)
292 struct bcache_device *d = c->devices[inode];
293 unsigned stripe_offset, stripe, sectors_dirty;
295 if (!d)
296 return;
298 stripe = offset_to_stripe(d, offset);
299 stripe_offset = offset & (d->stripe_size - 1);
301 while (nr_sectors) {
302 int s = min_t(unsigned, abs(nr_sectors),
303 d->stripe_size - stripe_offset);
305 if (nr_sectors < 0)
306 s = -s;
308 if (stripe >= d->nr_stripes)
309 return;
311 sectors_dirty = atomic_add_return(s,
312 d->stripe_sectors_dirty + stripe);
313 if (sectors_dirty == d->stripe_size)
314 set_bit(stripe, d->full_dirty_stripes);
315 else
316 clear_bit(stripe, d->full_dirty_stripes);
318 nr_sectors -= s;
319 stripe_offset = 0;
320 stripe++;
324 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
326 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
328 BUG_ON(KEY_INODE(k) != dc->disk.id);
330 return KEY_DIRTY(k);
333 static void refill_full_stripes(struct cached_dev *dc)
335 struct keybuf *buf = &dc->writeback_keys;
336 unsigned start_stripe, stripe, next_stripe;
337 bool wrapped = false;
339 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
341 if (stripe >= dc->disk.nr_stripes)
342 stripe = 0;
344 start_stripe = stripe;
346 while (1) {
347 stripe = find_next_bit(dc->disk.full_dirty_stripes,
348 dc->disk.nr_stripes, stripe);
350 if (stripe == dc->disk.nr_stripes)
351 goto next;
353 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
354 dc->disk.nr_stripes, stripe);
356 buf->last_scanned = KEY(dc->disk.id,
357 stripe * dc->disk.stripe_size, 0);
359 bch_refill_keybuf(dc->disk.c, buf,
360 &KEY(dc->disk.id,
361 next_stripe * dc->disk.stripe_size, 0),
362 dirty_pred);
364 if (array_freelist_empty(&buf->freelist))
365 return;
367 stripe = next_stripe;
368 next:
369 if (wrapped && stripe > start_stripe)
370 return;
372 if (stripe == dc->disk.nr_stripes) {
373 stripe = 0;
374 wrapped = true;
380 * Returns true if we scanned the entire disk
382 static bool refill_dirty(struct cached_dev *dc)
384 struct keybuf *buf = &dc->writeback_keys;
385 struct bkey start = KEY(dc->disk.id, 0, 0);
386 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
387 struct bkey start_pos;
390 * make sure keybuf pos is inside the range for this disk - at bringup
391 * we might not be attached yet so this disk's inode nr isn't
392 * initialized then
394 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
395 bkey_cmp(&buf->last_scanned, &end) > 0)
396 buf->last_scanned = start;
398 if (dc->partial_stripes_expensive) {
399 refill_full_stripes(dc);
400 if (array_freelist_empty(&buf->freelist))
401 return false;
404 start_pos = buf->last_scanned;
405 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
407 if (bkey_cmp(&buf->last_scanned, &end) < 0)
408 return false;
411 * If we get to the end start scanning again from the beginning, and
412 * only scan up to where we initially started scanning from:
414 buf->last_scanned = start;
415 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
417 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
420 static int bch_writeback_thread(void *arg)
422 struct cached_dev *dc = arg;
423 bool searched_full_index;
425 while (!kthread_should_stop()) {
426 down_write(&dc->writeback_lock);
427 if (!atomic_read(&dc->has_dirty) ||
428 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
429 !dc->writeback_running)) {
430 up_write(&dc->writeback_lock);
431 set_current_state(TASK_INTERRUPTIBLE);
433 if (kthread_should_stop())
434 return 0;
436 try_to_freeze();
437 schedule();
438 continue;
441 searched_full_index = refill_dirty(dc);
443 if (searched_full_index &&
444 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
445 atomic_set(&dc->has_dirty, 0);
446 cached_dev_put(dc);
447 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
448 bch_write_bdev_super(dc, NULL);
451 up_write(&dc->writeback_lock);
453 bch_ratelimit_reset(&dc->writeback_rate);
454 read_dirty(dc);
456 if (searched_full_index) {
457 unsigned delay = dc->writeback_delay * HZ;
459 while (delay &&
460 !kthread_should_stop() &&
461 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
462 delay = schedule_timeout_interruptible(delay);
466 return 0;
469 /* Init */
471 struct sectors_dirty_init {
472 struct btree_op op;
473 unsigned inode;
476 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
477 struct bkey *k)
479 struct sectors_dirty_init *op = container_of(_op,
480 struct sectors_dirty_init, op);
481 if (KEY_INODE(k) > op->inode)
482 return MAP_DONE;
484 if (KEY_DIRTY(k))
485 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
486 KEY_START(k), KEY_SIZE(k));
488 return MAP_CONTINUE;
491 void bch_sectors_dirty_init(struct cached_dev *dc)
493 struct sectors_dirty_init op;
495 bch_btree_op_init(&op.op, -1);
496 op.inode = dc->disk.id;
498 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
499 sectors_dirty_init_fn, 0);
501 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
504 void bch_cached_dev_writeback_init(struct cached_dev *dc)
506 sema_init(&dc->in_flight, 64);
507 init_rwsem(&dc->writeback_lock);
508 bch_keybuf_init(&dc->writeback_keys);
510 dc->writeback_metadata = true;
511 dc->writeback_running = true;
512 dc->writeback_percent = 10;
513 dc->writeback_delay = 30;
514 dc->writeback_rate.rate = 1024;
516 dc->writeback_rate_update_seconds = 5;
517 dc->writeback_rate_d_term = 30;
518 dc->writeback_rate_p_term_inverse = 6000;
520 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
523 int bch_cached_dev_writeback_start(struct cached_dev *dc)
525 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
526 "bcache_writeback");
527 if (IS_ERR(dc->writeback_thread))
528 return PTR_ERR(dc->writeback_thread);
530 schedule_delayed_work(&dc->writeback_rate_update,
531 dc->writeback_rate_update_seconds * HZ);
533 bch_writeback_queue(dc);
535 return 0;