treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / md / bcache / request.c
blob73478a91a342b51b05245e54758d20dc78efd6b7
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "request.h"
14 #include "writeback.h"
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache *bch_search_cache;
28 static void bch_data_insert_start(struct closure *cl);
30 static unsigned int cache_mode(struct cached_dev *dc)
32 return BDEV_CACHE_MODE(&dc->sb);
35 static bool verify(struct cached_dev *dc)
37 return dc->verify;
40 static void bio_csum(struct bio *bio, struct bkey *k)
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
56 /* Insert data into cache */
58 static void bch_data_insert_keys(struct closure *cl)
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
90 static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
97 * The journalling code doesn't handle the case where the keys to insert
98 * is bigger than an empty write: If we just return -ENOMEM here,
99 * bch_data_insert_keys() will insert the keys created so far
100 * and finish the rest when the keylist is empty.
102 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
103 return -ENOMEM;
105 return __bch_keylist_realloc(l, u64s);
108 static void bch_data_invalidate(struct closure *cl)
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
113 pr_debug("invalidating %i sectors from %llu",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
132 op->insert_data_done = true;
133 /* get in bch_data_insert() */
134 bio_put(bio);
135 out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
139 static void bch_data_insert_error(struct closure *cl)
141 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
144 * Our data write just errored, which means we've got a bunch of keys to
145 * insert that point to data that wasn't successfully written.
147 * We don't have to insert those keys but we still have to invalidate
148 * that region of the cache - so, if we just strip off all the pointers
149 * from the keys we'll accomplish just that.
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
160 dst = bkey_next(dst);
161 src = n;
164 op->insert_keys.top = dst;
166 bch_data_insert_keys(cl);
169 static void bch_data_insert_endio(struct bio *bio)
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
174 if (bio->bi_status) {
175 /* TODO: We could try to recover from this. */
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
187 static void bch_data_insert_start(struct closure *cl)
189 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
192 if (op->bypass)
193 return bch_data_invalidate(cl);
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
199 * Journal writes are marked REQ_PREFLUSH; if the original write was a
200 * flush, it'll wait on the journal write.
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
209 /* 1 for the device pointer and 1 for the chksum */
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
247 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254 err:
255 /* bch_alloc_sectors() blocks if s->writeback = true */
256 BUG_ON(op->writeback);
259 * But if it's not a writeback write we'd rather just bail out if
260 * there aren't any buckets ready to write to - it might take awhile and
261 * we might be starving btree writes for gc or something.
264 if (!op->replace) {
266 * Writethrough write: We can't complete the write until we've
267 * updated the index. But we don't want to delay the write while
268 * we wait for buckets to be freed up, so just invalidate the
269 * rest of the write.
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
275 * From a cache miss, we can just insert the keys for the data
276 * we have written or bail out if we didn't do anything.
278 op->insert_data_done = true;
279 bio_put(bio);
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
289 * bch_data_insert - stick some data in the cache
290 * @cl: closure pointer.
292 * This is the starting point for any data to end up in a cache device; it could
293 * be from a normal write, or a writeback write, or a write to a flash only
294 * volume - it's also used by the moving garbage collector to compact data in
295 * mostly empty buckets.
297 * It first writes the data to the cache, creating a list of keys to be inserted
298 * (if the data had to be fragmented there will be multiple keys); after the
299 * data is written it calls bch_journal, and after the keys have been added to
300 * the next journal write they're inserted into the btree.
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
303 * and op->inode is used for the key inode.
305 * If op->bypass is true, instead of inserting the data it invalidates the
306 * region of the cache represented by op->bio and op->inode.
308 void bch_data_insert(struct closure *cl)
310 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(cl);
321 * Congested? Return 0 (not congested) or the limit (in sectors)
322 * beyond which we should bypass the cache due to congestion.
324 unsigned int bch_get_congested(const struct cache_set *c)
326 int i;
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
340 i += CONGESTED_MAX;
342 if (i > 0)
343 i = fract_exp_two(i, 6);
345 i -= hweight32(get_random_u32());
347 return i > 0 ? i : 1;
350 static void add_sequential(struct task_struct *t)
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
355 t->sequential_io = 0;
358 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373 (bio_op(bio) == REQ_OP_DISCARD))
374 goto skip;
376 if (mode == CACHE_MODE_NONE ||
377 (mode == CACHE_MODE_WRITEAROUND &&
378 op_is_write(bio_op(bio))))
379 goto skip;
382 * Flag for bypass if the IO is for read-ahead or background,
383 * unless the read-ahead request is for metadata
384 * (eg, for gfs2 or xfs).
386 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
387 !(bio->bi_opf & (REQ_META|REQ_PRIO)))
388 goto skip;
390 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
391 bio_sectors(bio) & (c->sb.block_size - 1)) {
392 pr_debug("skipping unaligned io");
393 goto skip;
396 if (bypass_torture_test(dc)) {
397 if ((get_random_int() & 3) == 3)
398 goto skip;
399 else
400 goto rescale;
403 congested = bch_get_congested(c);
404 if (!congested && !dc->sequential_cutoff)
405 goto rescale;
407 spin_lock(&dc->io_lock);
409 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
410 if (i->last == bio->bi_iter.bi_sector &&
411 time_before(jiffies, i->jiffies))
412 goto found;
414 i = list_first_entry(&dc->io_lru, struct io, lru);
416 add_sequential(task);
417 i->sequential = 0;
418 found:
419 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
420 i->sequential += bio->bi_iter.bi_size;
422 i->last = bio_end_sector(bio);
423 i->jiffies = jiffies + msecs_to_jiffies(5000);
424 task->sequential_io = i->sequential;
426 hlist_del(&i->hash);
427 hlist_add_head(&i->hash, iohash(dc, i->last));
428 list_move_tail(&i->lru, &dc->io_lru);
430 spin_unlock(&dc->io_lock);
432 sectors = max(task->sequential_io,
433 task->sequential_io_avg) >> 9;
435 if (dc->sequential_cutoff &&
436 sectors >= dc->sequential_cutoff >> 9) {
437 trace_bcache_bypass_sequential(bio);
438 goto skip;
441 if (congested && sectors >= congested) {
442 trace_bcache_bypass_congested(bio);
443 goto skip;
446 rescale:
447 bch_rescale_priorities(c, bio_sectors(bio));
448 return false;
449 skip:
450 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
451 return true;
454 /* Cache lookup */
456 struct search {
457 /* Stack frame for bio_complete */
458 struct closure cl;
460 struct bbio bio;
461 struct bio *orig_bio;
462 struct bio *cache_miss;
463 struct bcache_device *d;
465 unsigned int insert_bio_sectors;
466 unsigned int recoverable:1;
467 unsigned int write:1;
468 unsigned int read_dirty_data:1;
469 unsigned int cache_missed:1;
471 unsigned long start_time;
473 struct btree_op op;
474 struct data_insert_op iop;
477 static void bch_cache_read_endio(struct bio *bio)
479 struct bbio *b = container_of(bio, struct bbio, bio);
480 struct closure *cl = bio->bi_private;
481 struct search *s = container_of(cl, struct search, cl);
484 * If the bucket was reused while our bio was in flight, we might have
485 * read the wrong data. Set s->error but not error so it doesn't get
486 * counted against the cache device, but we'll still reread the data
487 * from the backing device.
490 if (bio->bi_status)
491 s->iop.status = bio->bi_status;
492 else if (!KEY_DIRTY(&b->key) &&
493 ptr_stale(s->iop.c, &b->key, 0)) {
494 atomic_long_inc(&s->iop.c->cache_read_races);
495 s->iop.status = BLK_STS_IOERR;
498 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
502 * Read from a single key, handling the initial cache miss if the key starts in
503 * the middle of the bio
505 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
507 struct search *s = container_of(op, struct search, op);
508 struct bio *n, *bio = &s->bio.bio;
509 struct bkey *bio_key;
510 unsigned int ptr;
512 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
513 return MAP_CONTINUE;
515 if (KEY_INODE(k) != s->iop.inode ||
516 KEY_START(k) > bio->bi_iter.bi_sector) {
517 unsigned int bio_sectors = bio_sectors(bio);
518 unsigned int sectors = KEY_INODE(k) == s->iop.inode
519 ? min_t(uint64_t, INT_MAX,
520 KEY_START(k) - bio->bi_iter.bi_sector)
521 : INT_MAX;
522 int ret = s->d->cache_miss(b, s, bio, sectors);
524 if (ret != MAP_CONTINUE)
525 return ret;
527 /* if this was a complete miss we shouldn't get here */
528 BUG_ON(bio_sectors <= sectors);
531 if (!KEY_SIZE(k))
532 return MAP_CONTINUE;
534 /* XXX: figure out best pointer - for multiple cache devices */
535 ptr = 0;
537 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
539 if (KEY_DIRTY(k))
540 s->read_dirty_data = true;
542 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
543 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
544 GFP_NOIO, &s->d->bio_split);
546 bio_key = &container_of(n, struct bbio, bio)->key;
547 bch_bkey_copy_single_ptr(bio_key, k, ptr);
549 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
550 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
552 n->bi_end_io = bch_cache_read_endio;
553 n->bi_private = &s->cl;
556 * The bucket we're reading from might be reused while our bio
557 * is in flight, and we could then end up reading the wrong
558 * data.
560 * We guard against this by checking (in cache_read_endio()) if
561 * the pointer is stale again; if so, we treat it as an error
562 * and reread from the backing device (but we don't pass that
563 * error up anywhere).
566 __bch_submit_bbio(n, b->c);
567 return n == bio ? MAP_DONE : MAP_CONTINUE;
570 static void cache_lookup(struct closure *cl)
572 struct search *s = container_of(cl, struct search, iop.cl);
573 struct bio *bio = &s->bio.bio;
574 struct cached_dev *dc;
575 int ret;
577 bch_btree_op_init(&s->op, -1);
579 ret = bch_btree_map_keys(&s->op, s->iop.c,
580 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
581 cache_lookup_fn, MAP_END_KEY);
582 if (ret == -EAGAIN) {
583 continue_at(cl, cache_lookup, bcache_wq);
584 return;
588 * We might meet err when searching the btree, If that happens, we will
589 * get negative ret, in this scenario we should not recover data from
590 * backing device (when cache device is dirty) because we don't know
591 * whether bkeys the read request covered are all clean.
593 * And after that happened, s->iop.status is still its initial value
594 * before we submit s->bio.bio
596 if (ret < 0) {
597 BUG_ON(ret == -EINTR);
598 if (s->d && s->d->c &&
599 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
600 dc = container_of(s->d, struct cached_dev, disk);
601 if (dc && atomic_read(&dc->has_dirty))
602 s->recoverable = false;
604 if (!s->iop.status)
605 s->iop.status = BLK_STS_IOERR;
608 closure_return(cl);
611 /* Common code for the make_request functions */
613 static void request_endio(struct bio *bio)
615 struct closure *cl = bio->bi_private;
617 if (bio->bi_status) {
618 struct search *s = container_of(cl, struct search, cl);
620 s->iop.status = bio->bi_status;
621 /* Only cache read errors are recoverable */
622 s->recoverable = false;
625 bio_put(bio);
626 closure_put(cl);
629 static void backing_request_endio(struct bio *bio)
631 struct closure *cl = bio->bi_private;
633 if (bio->bi_status) {
634 struct search *s = container_of(cl, struct search, cl);
635 struct cached_dev *dc = container_of(s->d,
636 struct cached_dev, disk);
638 * If a bio has REQ_PREFLUSH for writeback mode, it is
639 * speically assembled in cached_dev_write() for a non-zero
640 * write request which has REQ_PREFLUSH. we don't set
641 * s->iop.status by this failure, the status will be decided
642 * by result of bch_data_insert() operation.
644 if (unlikely(s->iop.writeback &&
645 bio->bi_opf & REQ_PREFLUSH)) {
646 pr_err("Can't flush %s: returned bi_status %i",
647 dc->backing_dev_name, bio->bi_status);
648 } else {
649 /* set to orig_bio->bi_status in bio_complete() */
650 s->iop.status = bio->bi_status;
652 s->recoverable = false;
653 /* should count I/O error for backing device here */
654 bch_count_backing_io_errors(dc, bio);
657 bio_put(bio);
658 closure_put(cl);
661 static void bio_complete(struct search *s)
663 if (s->orig_bio) {
664 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
665 &s->d->disk->part0, s->start_time);
667 trace_bcache_request_end(s->d, s->orig_bio);
668 s->orig_bio->bi_status = s->iop.status;
669 bio_endio(s->orig_bio);
670 s->orig_bio = NULL;
674 static void do_bio_hook(struct search *s,
675 struct bio *orig_bio,
676 bio_end_io_t *end_io_fn)
678 struct bio *bio = &s->bio.bio;
680 bio_init(bio, NULL, 0);
681 __bio_clone_fast(bio, orig_bio);
683 * bi_end_io can be set separately somewhere else, e.g. the
684 * variants in,
685 * - cache_bio->bi_end_io from cached_dev_cache_miss()
686 * - n->bi_end_io from cache_lookup_fn()
688 bio->bi_end_io = end_io_fn;
689 bio->bi_private = &s->cl;
691 bio_cnt_set(bio, 3);
694 static void search_free(struct closure *cl)
696 struct search *s = container_of(cl, struct search, cl);
698 atomic_dec(&s->iop.c->search_inflight);
700 if (s->iop.bio)
701 bio_put(s->iop.bio);
703 bio_complete(s);
704 closure_debug_destroy(cl);
705 mempool_free(s, &s->iop.c->search);
708 static inline struct search *search_alloc(struct bio *bio,
709 struct bcache_device *d)
711 struct search *s;
713 s = mempool_alloc(&d->c->search, GFP_NOIO);
715 closure_init(&s->cl, NULL);
716 do_bio_hook(s, bio, request_endio);
717 atomic_inc(&d->c->search_inflight);
719 s->orig_bio = bio;
720 s->cache_miss = NULL;
721 s->cache_missed = 0;
722 s->d = d;
723 s->recoverable = 1;
724 s->write = op_is_write(bio_op(bio));
725 s->read_dirty_data = 0;
726 s->start_time = jiffies;
728 s->iop.c = d->c;
729 s->iop.bio = NULL;
730 s->iop.inode = d->id;
731 s->iop.write_point = hash_long((unsigned long) current, 16);
732 s->iop.write_prio = 0;
733 s->iop.status = 0;
734 s->iop.flags = 0;
735 s->iop.flush_journal = op_is_flush(bio->bi_opf);
736 s->iop.wq = bcache_wq;
738 return s;
741 /* Cached devices */
743 static void cached_dev_bio_complete(struct closure *cl)
745 struct search *s = container_of(cl, struct search, cl);
746 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
748 cached_dev_put(dc);
749 search_free(cl);
752 /* Process reads */
754 static void cached_dev_read_error_done(struct closure *cl)
756 struct search *s = container_of(cl, struct search, cl);
758 if (s->iop.replace_collision)
759 bch_mark_cache_miss_collision(s->iop.c, s->d);
761 if (s->iop.bio)
762 bio_free_pages(s->iop.bio);
764 cached_dev_bio_complete(cl);
767 static void cached_dev_read_error(struct closure *cl)
769 struct search *s = container_of(cl, struct search, cl);
770 struct bio *bio = &s->bio.bio;
773 * If read request hit dirty data (s->read_dirty_data is true),
774 * then recovery a failed read request from cached device may
775 * get a stale data back. So read failure recovery is only
776 * permitted when read request hit clean data in cache device,
777 * or when cache read race happened.
779 if (s->recoverable && !s->read_dirty_data) {
780 /* Retry from the backing device: */
781 trace_bcache_read_retry(s->orig_bio);
783 s->iop.status = 0;
784 do_bio_hook(s, s->orig_bio, backing_request_endio);
786 /* XXX: invalidate cache */
788 /* I/O request sent to backing device */
789 closure_bio_submit(s->iop.c, bio, cl);
792 continue_at(cl, cached_dev_read_error_done, NULL);
795 static void cached_dev_cache_miss_done(struct closure *cl)
797 struct search *s = container_of(cl, struct search, cl);
798 struct bcache_device *d = s->d;
800 if (s->iop.replace_collision)
801 bch_mark_cache_miss_collision(s->iop.c, s->d);
803 if (s->iop.bio)
804 bio_free_pages(s->iop.bio);
806 cached_dev_bio_complete(cl);
807 closure_put(&d->cl);
810 static void cached_dev_read_done(struct closure *cl)
812 struct search *s = container_of(cl, struct search, cl);
813 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
816 * We had a cache miss; cache_bio now contains data ready to be inserted
817 * into the cache.
819 * First, we copy the data we just read from cache_bio's bounce buffers
820 * to the buffers the original bio pointed to:
823 if (s->iop.bio) {
824 bio_reset(s->iop.bio);
825 s->iop.bio->bi_iter.bi_sector =
826 s->cache_miss->bi_iter.bi_sector;
827 bio_copy_dev(s->iop.bio, s->cache_miss);
828 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
829 bch_bio_map(s->iop.bio, NULL);
831 bio_copy_data(s->cache_miss, s->iop.bio);
833 bio_put(s->cache_miss);
834 s->cache_miss = NULL;
837 if (verify(dc) && s->recoverable && !s->read_dirty_data)
838 bch_data_verify(dc, s->orig_bio);
840 closure_get(&dc->disk.cl);
841 bio_complete(s);
843 if (s->iop.bio &&
844 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
845 BUG_ON(!s->iop.replace);
846 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
849 continue_at(cl, cached_dev_cache_miss_done, NULL);
852 static void cached_dev_read_done_bh(struct closure *cl)
854 struct search *s = container_of(cl, struct search, cl);
855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
857 bch_mark_cache_accounting(s->iop.c, s->d,
858 !s->cache_missed, s->iop.bypass);
859 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
861 if (s->iop.status)
862 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
863 else if (s->iop.bio || verify(dc))
864 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
865 else
866 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
869 static int cached_dev_cache_miss(struct btree *b, struct search *s,
870 struct bio *bio, unsigned int sectors)
872 int ret = MAP_CONTINUE;
873 unsigned int reada = 0;
874 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
875 struct bio *miss, *cache_bio;
877 s->cache_missed = 1;
879 if (s->cache_miss || s->iop.bypass) {
880 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
881 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
882 goto out_submit;
885 if (!(bio->bi_opf & REQ_RAHEAD) &&
886 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
887 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
888 reada = min_t(sector_t, dc->readahead >> 9,
889 get_capacity(bio->bi_disk) - bio_end_sector(bio));
891 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
893 s->iop.replace_key = KEY(s->iop.inode,
894 bio->bi_iter.bi_sector + s->insert_bio_sectors,
895 s->insert_bio_sectors);
897 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
898 if (ret)
899 return ret;
901 s->iop.replace = true;
903 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
905 /* btree_search_recurse()'s btree iterator is no good anymore */
906 ret = miss == bio ? MAP_DONE : -EINTR;
908 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
909 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
910 &dc->disk.bio_split);
911 if (!cache_bio)
912 goto out_submit;
914 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
915 bio_copy_dev(cache_bio, miss);
916 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
918 cache_bio->bi_end_io = backing_request_endio;
919 cache_bio->bi_private = &s->cl;
921 bch_bio_map(cache_bio, NULL);
922 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
923 goto out_put;
925 if (reada)
926 bch_mark_cache_readahead(s->iop.c, s->d);
928 s->cache_miss = miss;
929 s->iop.bio = cache_bio;
930 bio_get(cache_bio);
931 /* I/O request sent to backing device */
932 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
934 return ret;
935 out_put:
936 bio_put(cache_bio);
937 out_submit:
938 miss->bi_end_io = backing_request_endio;
939 miss->bi_private = &s->cl;
940 /* I/O request sent to backing device */
941 closure_bio_submit(s->iop.c, miss, &s->cl);
942 return ret;
945 static void cached_dev_read(struct cached_dev *dc, struct search *s)
947 struct closure *cl = &s->cl;
949 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
950 continue_at(cl, cached_dev_read_done_bh, NULL);
953 /* Process writes */
955 static void cached_dev_write_complete(struct closure *cl)
957 struct search *s = container_of(cl, struct search, cl);
958 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
960 up_read_non_owner(&dc->writeback_lock);
961 cached_dev_bio_complete(cl);
964 static void cached_dev_write(struct cached_dev *dc, struct search *s)
966 struct closure *cl = &s->cl;
967 struct bio *bio = &s->bio.bio;
968 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
969 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
971 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
973 down_read_non_owner(&dc->writeback_lock);
974 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
976 * We overlap with some dirty data undergoing background
977 * writeback, force this write to writeback
979 s->iop.bypass = false;
980 s->iop.writeback = true;
984 * Discards aren't _required_ to do anything, so skipping if
985 * check_overlapping returned true is ok
987 * But check_overlapping drops dirty keys for which io hasn't started,
988 * so we still want to call it.
990 if (bio_op(bio) == REQ_OP_DISCARD)
991 s->iop.bypass = true;
993 if (should_writeback(dc, s->orig_bio,
994 cache_mode(dc),
995 s->iop.bypass)) {
996 s->iop.bypass = false;
997 s->iop.writeback = true;
1000 if (s->iop.bypass) {
1001 s->iop.bio = s->orig_bio;
1002 bio_get(s->iop.bio);
1004 if (bio_op(bio) == REQ_OP_DISCARD &&
1005 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1006 goto insert_data;
1008 /* I/O request sent to backing device */
1009 bio->bi_end_io = backing_request_endio;
1010 closure_bio_submit(s->iop.c, bio, cl);
1012 } else if (s->iop.writeback) {
1013 bch_writeback_add(dc);
1014 s->iop.bio = bio;
1016 if (bio->bi_opf & REQ_PREFLUSH) {
1018 * Also need to send a flush to the backing
1019 * device.
1021 struct bio *flush;
1023 flush = bio_alloc_bioset(GFP_NOIO, 0,
1024 &dc->disk.bio_split);
1025 if (!flush) {
1026 s->iop.status = BLK_STS_RESOURCE;
1027 goto insert_data;
1029 bio_copy_dev(flush, bio);
1030 flush->bi_end_io = backing_request_endio;
1031 flush->bi_private = cl;
1032 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1033 /* I/O request sent to backing device */
1034 closure_bio_submit(s->iop.c, flush, cl);
1036 } else {
1037 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1038 /* I/O request sent to backing device */
1039 bio->bi_end_io = backing_request_endio;
1040 closure_bio_submit(s->iop.c, bio, cl);
1043 insert_data:
1044 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1045 continue_at(cl, cached_dev_write_complete, NULL);
1048 static void cached_dev_nodata(struct closure *cl)
1050 struct search *s = container_of(cl, struct search, cl);
1051 struct bio *bio = &s->bio.bio;
1053 if (s->iop.flush_journal)
1054 bch_journal_meta(s->iop.c, cl);
1056 /* If it's a flush, we send the flush to the backing device too */
1057 bio->bi_end_io = backing_request_endio;
1058 closure_bio_submit(s->iop.c, bio, cl);
1060 continue_at(cl, cached_dev_bio_complete, NULL);
1063 struct detached_dev_io_private {
1064 struct bcache_device *d;
1065 unsigned long start_time;
1066 bio_end_io_t *bi_end_io;
1067 void *bi_private;
1070 static void detached_dev_end_io(struct bio *bio)
1072 struct detached_dev_io_private *ddip;
1074 ddip = bio->bi_private;
1075 bio->bi_end_io = ddip->bi_end_io;
1076 bio->bi_private = ddip->bi_private;
1078 generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1079 &ddip->d->disk->part0, ddip->start_time);
1081 if (bio->bi_status) {
1082 struct cached_dev *dc = container_of(ddip->d,
1083 struct cached_dev, disk);
1084 /* should count I/O error for backing device here */
1085 bch_count_backing_io_errors(dc, bio);
1088 kfree(ddip);
1089 bio->bi_end_io(bio);
1092 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1094 struct detached_dev_io_private *ddip;
1095 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1098 * no need to call closure_get(&dc->disk.cl),
1099 * because upper layer had already opened bcache device,
1100 * which would call closure_get(&dc->disk.cl)
1102 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1103 ddip->d = d;
1104 ddip->start_time = jiffies;
1105 ddip->bi_end_io = bio->bi_end_io;
1106 ddip->bi_private = bio->bi_private;
1107 bio->bi_end_io = detached_dev_end_io;
1108 bio->bi_private = ddip;
1110 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1111 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1112 bio->bi_end_io(bio);
1113 else
1114 generic_make_request(bio);
1117 static void quit_max_writeback_rate(struct cache_set *c,
1118 struct cached_dev *this_dc)
1120 int i;
1121 struct bcache_device *d;
1122 struct cached_dev *dc;
1125 * mutex bch_register_lock may compete with other parallel requesters,
1126 * or attach/detach operations on other backing device. Waiting to
1127 * the mutex lock may increase I/O request latency for seconds or more.
1128 * To avoid such situation, if mutext_trylock() failed, only writeback
1129 * rate of current cached device is set to 1, and __update_write_back()
1130 * will decide writeback rate of other cached devices (remember now
1131 * c->idle_counter is 0 already).
1133 if (mutex_trylock(&bch_register_lock)) {
1134 for (i = 0; i < c->devices_max_used; i++) {
1135 if (!c->devices[i])
1136 continue;
1138 if (UUID_FLASH_ONLY(&c->uuids[i]))
1139 continue;
1141 d = c->devices[i];
1142 dc = container_of(d, struct cached_dev, disk);
1144 * set writeback rate to default minimum value,
1145 * then let update_writeback_rate() to decide the
1146 * upcoming rate.
1148 atomic_long_set(&dc->writeback_rate.rate, 1);
1150 mutex_unlock(&bch_register_lock);
1151 } else
1152 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1155 /* Cached devices - read & write stuff */
1157 static blk_qc_t cached_dev_make_request(struct request_queue *q,
1158 struct bio *bio)
1160 struct search *s;
1161 struct bcache_device *d = bio->bi_disk->private_data;
1162 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1163 int rw = bio_data_dir(bio);
1165 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1166 dc->io_disable)) {
1167 bio->bi_status = BLK_STS_IOERR;
1168 bio_endio(bio);
1169 return BLK_QC_T_NONE;
1172 if (likely(d->c)) {
1173 if (atomic_read(&d->c->idle_counter))
1174 atomic_set(&d->c->idle_counter, 0);
1176 * If at_max_writeback_rate of cache set is true and new I/O
1177 * comes, quit max writeback rate of all cached devices
1178 * attached to this cache set, and set at_max_writeback_rate
1179 * to false.
1181 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1182 atomic_set(&d->c->at_max_writeback_rate, 0);
1183 quit_max_writeback_rate(d->c, dc);
1187 generic_start_io_acct(q,
1188 bio_op(bio),
1189 bio_sectors(bio),
1190 &d->disk->part0);
1192 bio_set_dev(bio, dc->bdev);
1193 bio->bi_iter.bi_sector += dc->sb.data_offset;
1195 if (cached_dev_get(dc)) {
1196 s = search_alloc(bio, d);
1197 trace_bcache_request_start(s->d, bio);
1199 if (!bio->bi_iter.bi_size) {
1201 * can't call bch_journal_meta from under
1202 * generic_make_request
1204 continue_at_nobarrier(&s->cl,
1205 cached_dev_nodata,
1206 bcache_wq);
1207 } else {
1208 s->iop.bypass = check_should_bypass(dc, bio);
1210 if (rw)
1211 cached_dev_write(dc, s);
1212 else
1213 cached_dev_read(dc, s);
1215 } else
1216 /* I/O request sent to backing device */
1217 detached_dev_do_request(d, bio);
1219 return BLK_QC_T_NONE;
1222 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1223 unsigned int cmd, unsigned long arg)
1225 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1227 if (dc->io_disable)
1228 return -EIO;
1230 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1233 static int cached_dev_congested(void *data, int bits)
1235 struct bcache_device *d = data;
1236 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1237 struct request_queue *q = bdev_get_queue(dc->bdev);
1238 int ret = 0;
1240 if (bdi_congested(q->backing_dev_info, bits))
1241 return 1;
1243 if (cached_dev_get(dc)) {
1244 unsigned int i;
1245 struct cache *ca;
1247 for_each_cache(ca, d->c, i) {
1248 q = bdev_get_queue(ca->bdev);
1249 ret |= bdi_congested(q->backing_dev_info, bits);
1252 cached_dev_put(dc);
1255 return ret;
1258 void bch_cached_dev_request_init(struct cached_dev *dc)
1260 struct gendisk *g = dc->disk.disk;
1262 g->queue->make_request_fn = cached_dev_make_request;
1263 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1264 dc->disk.cache_miss = cached_dev_cache_miss;
1265 dc->disk.ioctl = cached_dev_ioctl;
1268 /* Flash backed devices */
1270 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1271 struct bio *bio, unsigned int sectors)
1273 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1275 swap(bio->bi_iter.bi_size, bytes);
1276 zero_fill_bio(bio);
1277 swap(bio->bi_iter.bi_size, bytes);
1279 bio_advance(bio, bytes);
1281 if (!bio->bi_iter.bi_size)
1282 return MAP_DONE;
1284 return MAP_CONTINUE;
1287 static void flash_dev_nodata(struct closure *cl)
1289 struct search *s = container_of(cl, struct search, cl);
1291 if (s->iop.flush_journal)
1292 bch_journal_meta(s->iop.c, cl);
1294 continue_at(cl, search_free, NULL);
1297 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1298 struct bio *bio)
1300 struct search *s;
1301 struct closure *cl;
1302 struct bcache_device *d = bio->bi_disk->private_data;
1304 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1305 bio->bi_status = BLK_STS_IOERR;
1306 bio_endio(bio);
1307 return BLK_QC_T_NONE;
1310 generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1312 s = search_alloc(bio, d);
1313 cl = &s->cl;
1314 bio = &s->bio.bio;
1316 trace_bcache_request_start(s->d, bio);
1318 if (!bio->bi_iter.bi_size) {
1320 * can't call bch_journal_meta from under
1321 * generic_make_request
1323 continue_at_nobarrier(&s->cl,
1324 flash_dev_nodata,
1325 bcache_wq);
1326 return BLK_QC_T_NONE;
1327 } else if (bio_data_dir(bio)) {
1328 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1329 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1330 &KEY(d->id, bio_end_sector(bio), 0));
1332 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1333 s->iop.writeback = true;
1334 s->iop.bio = bio;
1336 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1337 } else {
1338 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1341 continue_at(cl, search_free, NULL);
1342 return BLK_QC_T_NONE;
1345 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1346 unsigned int cmd, unsigned long arg)
1348 return -ENOTTY;
1351 static int flash_dev_congested(void *data, int bits)
1353 struct bcache_device *d = data;
1354 struct request_queue *q;
1355 struct cache *ca;
1356 unsigned int i;
1357 int ret = 0;
1359 for_each_cache(ca, d->c, i) {
1360 q = bdev_get_queue(ca->bdev);
1361 ret |= bdi_congested(q->backing_dev_info, bits);
1364 return ret;
1367 void bch_flash_dev_request_init(struct bcache_device *d)
1369 struct gendisk *g = d->disk;
1371 g->queue->make_request_fn = flash_dev_make_request;
1372 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1373 d->cache_miss = flash_dev_cache_miss;
1374 d->ioctl = flash_dev_ioctl;
1377 void bch_request_exit(void)
1379 kmem_cache_destroy(bch_search_cache);
1382 int __init bch_request_init(void)
1384 bch_search_cache = KMEM_CACHE(search, 0);
1385 if (!bch_search_cache)
1386 return -ENOMEM;
1388 return 0;