watchdog/core: Rename some softlockup_* functions
[linux/fpc-iii.git] / drivers / md / bcache / request.c
blob681b4f12b05a0ceb2a23028bb8857b28e0ee86cd
1 /*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include <linux/backing-dev.h>
20 #include <trace/events/bcache.h>
22 #define CUTOFF_CACHE_ADD 95
23 #define CUTOFF_CACHE_READA 90
25 struct kmem_cache *bch_search_cache;
27 static void bch_data_insert_start(struct closure *);
29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
31 return BDEV_CACHE_MODE(&dc->sb);
34 static bool verify(struct cached_dev *dc, struct bio *bio)
36 return dc->verify;
39 static void bio_csum(struct bio *bio, struct bkey *k)
41 struct bio_vec bv;
42 struct bvec_iter iter;
43 uint64_t csum = 0;
45 bio_for_each_segment(bv, bio, iter) {
46 void *d = kmap(bv.bv_page) + bv.bv_offset;
47 csum = bch_crc64_update(csum, d, bv.bv_len);
48 kunmap(bv.bv_page);
51 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54 /* Insert data into cache */
56 static void bch_data_insert_keys(struct closure *cl)
58 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
59 atomic_t *journal_ref = NULL;
60 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
61 int ret;
64 * If we're looping, might already be waiting on
65 * another journal write - can't wait on more than one journal write at
66 * a time
68 * XXX: this looks wrong
70 #if 0
71 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
72 closure_sync(&s->cl);
73 #endif
75 if (!op->replace)
76 journal_ref = bch_journal(op->c, &op->insert_keys,
77 op->flush_journal ? cl : NULL);
79 ret = bch_btree_insert(op->c, &op->insert_keys,
80 journal_ref, replace_key);
81 if (ret == -ESRCH) {
82 op->replace_collision = true;
83 } else if (ret) {
84 op->status = BLK_STS_RESOURCE;
85 op->insert_data_done = true;
88 if (journal_ref)
89 atomic_dec_bug(journal_ref);
91 if (!op->insert_data_done) {
92 continue_at(cl, bch_data_insert_start, op->wq);
93 return;
96 bch_keylist_free(&op->insert_keys);
97 closure_return(cl);
100 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
101 struct cache_set *c)
103 size_t oldsize = bch_keylist_nkeys(l);
104 size_t newsize = oldsize + u64s;
107 * The journalling code doesn't handle the case where the keys to insert
108 * is bigger than an empty write: If we just return -ENOMEM here,
109 * bio_insert() and bio_invalidate() will insert the keys created so far
110 * and finish the rest when the keylist is empty.
112 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
113 return -ENOMEM;
115 return __bch_keylist_realloc(l, u64s);
118 static void bch_data_invalidate(struct closure *cl)
120 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
121 struct bio *bio = op->bio;
123 pr_debug("invalidating %i sectors from %llu",
124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
126 while (bio_sectors(bio)) {
127 unsigned sectors = min(bio_sectors(bio),
128 1U << (KEY_SIZE_BITS - 1));
130 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
131 goto out;
133 bio->bi_iter.bi_sector += sectors;
134 bio->bi_iter.bi_size -= sectors << 9;
136 bch_keylist_add(&op->insert_keys,
137 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
140 op->insert_data_done = true;
141 bio_put(bio);
142 out:
143 continue_at(cl, bch_data_insert_keys, op->wq);
146 static void bch_data_insert_error(struct closure *cl)
148 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
151 * Our data write just errored, which means we've got a bunch of keys to
152 * insert that point to data that wasn't succesfully written.
154 * We don't have to insert those keys but we still have to invalidate
155 * that region of the cache - so, if we just strip off all the pointers
156 * from the keys we'll accomplish just that.
159 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
161 while (src != op->insert_keys.top) {
162 struct bkey *n = bkey_next(src);
164 SET_KEY_PTRS(src, 0);
165 memmove(dst, src, bkey_bytes(src));
167 dst = bkey_next(dst);
168 src = n;
171 op->insert_keys.top = dst;
173 bch_data_insert_keys(cl);
176 static void bch_data_insert_endio(struct bio *bio)
178 struct closure *cl = bio->bi_private;
179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
181 if (bio->bi_status) {
182 /* TODO: We could try to recover from this. */
183 if (op->writeback)
184 op->status = bio->bi_status;
185 else if (!op->replace)
186 set_closure_fn(cl, bch_data_insert_error, op->wq);
187 else
188 set_closure_fn(cl, NULL, NULL);
191 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
194 static void bch_data_insert_start(struct closure *cl)
196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
197 struct bio *bio = op->bio, *n;
199 if (op->bypass)
200 return bch_data_invalidate(cl);
202 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
203 wake_up_gc(op->c);
206 * Journal writes are marked REQ_PREFLUSH; if the original write was a
207 * flush, it'll wait on the journal write.
209 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
211 do {
212 unsigned i;
213 struct bkey *k;
214 struct bio_set *split = op->c->bio_split;
216 /* 1 for the device pointer and 1 for the chksum */
217 if (bch_keylist_realloc(&op->insert_keys,
218 3 + (op->csum ? 1 : 0),
219 op->c)) {
220 continue_at(cl, bch_data_insert_keys, op->wq);
221 return;
224 k = op->insert_keys.top;
225 bkey_init(k);
226 SET_KEY_INODE(k, op->inode);
227 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
229 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
230 op->write_point, op->write_prio,
231 op->writeback))
232 goto err;
234 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
236 n->bi_end_io = bch_data_insert_endio;
237 n->bi_private = cl;
239 if (op->writeback) {
240 SET_KEY_DIRTY(k, true);
242 for (i = 0; i < KEY_PTRS(k); i++)
243 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
244 GC_MARK_DIRTY);
247 SET_KEY_CSUM(k, op->csum);
248 if (KEY_CSUM(k))
249 bio_csum(n, k);
251 trace_bcache_cache_insert(k);
252 bch_keylist_push(&op->insert_keys);
254 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
255 bch_submit_bbio(n, op->c, k, 0);
256 } while (n != bio);
258 op->insert_data_done = true;
259 continue_at(cl, bch_data_insert_keys, op->wq);
260 return;
261 err:
262 /* bch_alloc_sectors() blocks if s->writeback = true */
263 BUG_ON(op->writeback);
266 * But if it's not a writeback write we'd rather just bail out if
267 * there aren't any buckets ready to write to - it might take awhile and
268 * we might be starving btree writes for gc or something.
271 if (!op->replace) {
273 * Writethrough write: We can't complete the write until we've
274 * updated the index. But we don't want to delay the write while
275 * we wait for buckets to be freed up, so just invalidate the
276 * rest of the write.
278 op->bypass = true;
279 return bch_data_invalidate(cl);
280 } else {
282 * From a cache miss, we can just insert the keys for the data
283 * we have written or bail out if we didn't do anything.
285 op->insert_data_done = true;
286 bio_put(bio);
288 if (!bch_keylist_empty(&op->insert_keys))
289 continue_at(cl, bch_data_insert_keys, op->wq);
290 else
291 closure_return(cl);
296 * bch_data_insert - stick some data in the cache
298 * This is the starting point for any data to end up in a cache device; it could
299 * be from a normal write, or a writeback write, or a write to a flash only
300 * volume - it's also used by the moving garbage collector to compact data in
301 * mostly empty buckets.
303 * It first writes the data to the cache, creating a list of keys to be inserted
304 * (if the data had to be fragmented there will be multiple keys); after the
305 * data is written it calls bch_journal, and after the keys have been added to
306 * the next journal write they're inserted into the btree.
308 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
309 * and op->inode is used for the key inode.
311 * If s->bypass is true, instead of inserting the data it invalidates the
312 * region of the cache represented by s->cache_bio and op->inode.
314 void bch_data_insert(struct closure *cl)
316 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
318 trace_bcache_write(op->c, op->inode, op->bio,
319 op->writeback, op->bypass);
321 bch_keylist_init(&op->insert_keys);
322 bio_get(op->bio);
323 bch_data_insert_start(cl);
326 /* Congested? */
328 unsigned bch_get_congested(struct cache_set *c)
330 int i;
331 long rand;
333 if (!c->congested_read_threshold_us &&
334 !c->congested_write_threshold_us)
335 return 0;
337 i = (local_clock_us() - c->congested_last_us) / 1024;
338 if (i < 0)
339 return 0;
341 i += atomic_read(&c->congested);
342 if (i >= 0)
343 return 0;
345 i += CONGESTED_MAX;
347 if (i > 0)
348 i = fract_exp_two(i, 6);
350 rand = get_random_int();
351 i -= bitmap_weight(&rand, BITS_PER_LONG);
353 return i > 0 ? i : 1;
356 static void add_sequential(struct task_struct *t)
358 ewma_add(t->sequential_io_avg,
359 t->sequential_io, 8, 0);
361 t->sequential_io = 0;
364 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
366 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
369 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
371 struct cache_set *c = dc->disk.c;
372 unsigned mode = cache_mode(dc, bio);
373 unsigned sectors, congested = bch_get_congested(c);
374 struct task_struct *task = current;
375 struct io *i;
377 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
378 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
379 (bio_op(bio) == REQ_OP_DISCARD))
380 goto skip;
382 if (mode == CACHE_MODE_NONE ||
383 (mode == CACHE_MODE_WRITEAROUND &&
384 op_is_write(bio_op(bio))))
385 goto skip;
387 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
388 bio_sectors(bio) & (c->sb.block_size - 1)) {
389 pr_debug("skipping unaligned io");
390 goto skip;
393 if (bypass_torture_test(dc)) {
394 if ((get_random_int() & 3) == 3)
395 goto skip;
396 else
397 goto rescale;
400 if (!congested && !dc->sequential_cutoff)
401 goto rescale;
403 spin_lock(&dc->io_lock);
405 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
406 if (i->last == bio->bi_iter.bi_sector &&
407 time_before(jiffies, i->jiffies))
408 goto found;
410 i = list_first_entry(&dc->io_lru, struct io, lru);
412 add_sequential(task);
413 i->sequential = 0;
414 found:
415 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
416 i->sequential += bio->bi_iter.bi_size;
418 i->last = bio_end_sector(bio);
419 i->jiffies = jiffies + msecs_to_jiffies(5000);
420 task->sequential_io = i->sequential;
422 hlist_del(&i->hash);
423 hlist_add_head(&i->hash, iohash(dc, i->last));
424 list_move_tail(&i->lru, &dc->io_lru);
426 spin_unlock(&dc->io_lock);
428 sectors = max(task->sequential_io,
429 task->sequential_io_avg) >> 9;
431 if (dc->sequential_cutoff &&
432 sectors >= dc->sequential_cutoff >> 9) {
433 trace_bcache_bypass_sequential(bio);
434 goto skip;
437 if (congested && sectors >= congested) {
438 trace_bcache_bypass_congested(bio);
439 goto skip;
442 rescale:
443 bch_rescale_priorities(c, bio_sectors(bio));
444 return false;
445 skip:
446 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
447 return true;
450 /* Cache lookup */
452 struct search {
453 /* Stack frame for bio_complete */
454 struct closure cl;
456 struct bbio bio;
457 struct bio *orig_bio;
458 struct bio *cache_miss;
459 struct bcache_device *d;
461 unsigned insert_bio_sectors;
462 unsigned recoverable:1;
463 unsigned write:1;
464 unsigned read_dirty_data:1;
466 unsigned long start_time;
468 struct btree_op op;
469 struct data_insert_op iop;
472 static void bch_cache_read_endio(struct bio *bio)
474 struct bbio *b = container_of(bio, struct bbio, bio);
475 struct closure *cl = bio->bi_private;
476 struct search *s = container_of(cl, struct search, cl);
479 * If the bucket was reused while our bio was in flight, we might have
480 * read the wrong data. Set s->error but not error so it doesn't get
481 * counted against the cache device, but we'll still reread the data
482 * from the backing device.
485 if (bio->bi_status)
486 s->iop.status = bio->bi_status;
487 else if (!KEY_DIRTY(&b->key) &&
488 ptr_stale(s->iop.c, &b->key, 0)) {
489 atomic_long_inc(&s->iop.c->cache_read_races);
490 s->iop.status = BLK_STS_IOERR;
493 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
497 * Read from a single key, handling the initial cache miss if the key starts in
498 * the middle of the bio
500 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
502 struct search *s = container_of(op, struct search, op);
503 struct bio *n, *bio = &s->bio.bio;
504 struct bkey *bio_key;
505 unsigned ptr;
507 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
508 return MAP_CONTINUE;
510 if (KEY_INODE(k) != s->iop.inode ||
511 KEY_START(k) > bio->bi_iter.bi_sector) {
512 unsigned bio_sectors = bio_sectors(bio);
513 unsigned sectors = KEY_INODE(k) == s->iop.inode
514 ? min_t(uint64_t, INT_MAX,
515 KEY_START(k) - bio->bi_iter.bi_sector)
516 : INT_MAX;
518 int ret = s->d->cache_miss(b, s, bio, sectors);
519 if (ret != MAP_CONTINUE)
520 return ret;
522 /* if this was a complete miss we shouldn't get here */
523 BUG_ON(bio_sectors <= sectors);
526 if (!KEY_SIZE(k))
527 return MAP_CONTINUE;
529 /* XXX: figure out best pointer - for multiple cache devices */
530 ptr = 0;
532 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
534 if (KEY_DIRTY(k))
535 s->read_dirty_data = true;
537 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
538 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
539 GFP_NOIO, s->d->bio_split);
541 bio_key = &container_of(n, struct bbio, bio)->key;
542 bch_bkey_copy_single_ptr(bio_key, k, ptr);
544 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
545 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
547 n->bi_end_io = bch_cache_read_endio;
548 n->bi_private = &s->cl;
551 * The bucket we're reading from might be reused while our bio
552 * is in flight, and we could then end up reading the wrong
553 * data.
555 * We guard against this by checking (in cache_read_endio()) if
556 * the pointer is stale again; if so, we treat it as an error
557 * and reread from the backing device (but we don't pass that
558 * error up anywhere).
561 __bch_submit_bbio(n, b->c);
562 return n == bio ? MAP_DONE : MAP_CONTINUE;
565 static void cache_lookup(struct closure *cl)
567 struct search *s = container_of(cl, struct search, iop.cl);
568 struct bio *bio = &s->bio.bio;
569 int ret;
571 bch_btree_op_init(&s->op, -1);
573 ret = bch_btree_map_keys(&s->op, s->iop.c,
574 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
575 cache_lookup_fn, MAP_END_KEY);
576 if (ret == -EAGAIN) {
577 continue_at(cl, cache_lookup, bcache_wq);
578 return;
581 closure_return(cl);
584 /* Common code for the make_request functions */
586 static void request_endio(struct bio *bio)
588 struct closure *cl = bio->bi_private;
590 if (bio->bi_status) {
591 struct search *s = container_of(cl, struct search, cl);
592 s->iop.status = bio->bi_status;
593 /* Only cache read errors are recoverable */
594 s->recoverable = false;
597 bio_put(bio);
598 closure_put(cl);
601 static void bio_complete(struct search *s)
603 if (s->orig_bio) {
604 struct request_queue *q = s->orig_bio->bi_disk->queue;
605 generic_end_io_acct(q, bio_data_dir(s->orig_bio),
606 &s->d->disk->part0, s->start_time);
608 trace_bcache_request_end(s->d, s->orig_bio);
609 s->orig_bio->bi_status = s->iop.status;
610 bio_endio(s->orig_bio);
611 s->orig_bio = NULL;
615 static void do_bio_hook(struct search *s, struct bio *orig_bio)
617 struct bio *bio = &s->bio.bio;
619 bio_init(bio, NULL, 0);
620 __bio_clone_fast(bio, orig_bio);
621 bio->bi_end_io = request_endio;
622 bio->bi_private = &s->cl;
624 bio_cnt_set(bio, 3);
627 static void search_free(struct closure *cl)
629 struct search *s = container_of(cl, struct search, cl);
630 bio_complete(s);
632 if (s->iop.bio)
633 bio_put(s->iop.bio);
635 closure_debug_destroy(cl);
636 mempool_free(s, s->d->c->search);
639 static inline struct search *search_alloc(struct bio *bio,
640 struct bcache_device *d)
642 struct search *s;
644 s = mempool_alloc(d->c->search, GFP_NOIO);
646 closure_init(&s->cl, NULL);
647 do_bio_hook(s, bio);
649 s->orig_bio = bio;
650 s->cache_miss = NULL;
651 s->d = d;
652 s->recoverable = 1;
653 s->write = op_is_write(bio_op(bio));
654 s->read_dirty_data = 0;
655 s->start_time = jiffies;
657 s->iop.c = d->c;
658 s->iop.bio = NULL;
659 s->iop.inode = d->id;
660 s->iop.write_point = hash_long((unsigned long) current, 16);
661 s->iop.write_prio = 0;
662 s->iop.status = 0;
663 s->iop.flags = 0;
664 s->iop.flush_journal = op_is_flush(bio->bi_opf);
665 s->iop.wq = bcache_wq;
667 return s;
670 /* Cached devices */
672 static void cached_dev_bio_complete(struct closure *cl)
674 struct search *s = container_of(cl, struct search, cl);
675 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
677 search_free(cl);
678 cached_dev_put(dc);
681 /* Process reads */
683 static void cached_dev_cache_miss_done(struct closure *cl)
685 struct search *s = container_of(cl, struct search, cl);
687 if (s->iop.replace_collision)
688 bch_mark_cache_miss_collision(s->iop.c, s->d);
690 if (s->iop.bio)
691 bio_free_pages(s->iop.bio);
693 cached_dev_bio_complete(cl);
696 static void cached_dev_read_error(struct closure *cl)
698 struct search *s = container_of(cl, struct search, cl);
699 struct bio *bio = &s->bio.bio;
701 if (s->recoverable) {
702 /* Retry from the backing device: */
703 trace_bcache_read_retry(s->orig_bio);
705 s->iop.status = 0;
706 do_bio_hook(s, s->orig_bio);
708 /* XXX: invalidate cache */
710 closure_bio_submit(bio, cl);
713 continue_at(cl, cached_dev_cache_miss_done, NULL);
716 static void cached_dev_read_done(struct closure *cl)
718 struct search *s = container_of(cl, struct search, cl);
719 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
722 * We had a cache miss; cache_bio now contains data ready to be inserted
723 * into the cache.
725 * First, we copy the data we just read from cache_bio's bounce buffers
726 * to the buffers the original bio pointed to:
729 if (s->iop.bio) {
730 bio_reset(s->iop.bio);
731 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
732 bio_copy_dev(s->iop.bio, s->cache_miss);
733 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
734 bch_bio_map(s->iop.bio, NULL);
736 bio_copy_data(s->cache_miss, s->iop.bio);
738 bio_put(s->cache_miss);
739 s->cache_miss = NULL;
742 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
743 bch_data_verify(dc, s->orig_bio);
745 bio_complete(s);
747 if (s->iop.bio &&
748 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
749 BUG_ON(!s->iop.replace);
750 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
753 continue_at(cl, cached_dev_cache_miss_done, NULL);
756 static void cached_dev_read_done_bh(struct closure *cl)
758 struct search *s = container_of(cl, struct search, cl);
759 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
761 bch_mark_cache_accounting(s->iop.c, s->d,
762 !s->cache_miss, s->iop.bypass);
763 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
765 if (s->iop.status)
766 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
767 else if (s->iop.bio || verify(dc, &s->bio.bio))
768 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
769 else
770 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
773 static int cached_dev_cache_miss(struct btree *b, struct search *s,
774 struct bio *bio, unsigned sectors)
776 int ret = MAP_CONTINUE;
777 unsigned reada = 0;
778 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
779 struct bio *miss, *cache_bio;
781 if (s->cache_miss || s->iop.bypass) {
782 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
783 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
784 goto out_submit;
787 if (!(bio->bi_opf & REQ_RAHEAD) &&
788 !(bio->bi_opf & REQ_META) &&
789 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
790 reada = min_t(sector_t, dc->readahead >> 9,
791 get_capacity(bio->bi_disk) - bio_end_sector(bio));
793 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
795 s->iop.replace_key = KEY(s->iop.inode,
796 bio->bi_iter.bi_sector + s->insert_bio_sectors,
797 s->insert_bio_sectors);
799 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
800 if (ret)
801 return ret;
803 s->iop.replace = true;
805 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
807 /* btree_search_recurse()'s btree iterator is no good anymore */
808 ret = miss == bio ? MAP_DONE : -EINTR;
810 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
811 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
812 dc->disk.bio_split);
813 if (!cache_bio)
814 goto out_submit;
816 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
817 bio_copy_dev(cache_bio, miss);
818 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
820 cache_bio->bi_end_io = request_endio;
821 cache_bio->bi_private = &s->cl;
823 bch_bio_map(cache_bio, NULL);
824 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
825 goto out_put;
827 if (reada)
828 bch_mark_cache_readahead(s->iop.c, s->d);
830 s->cache_miss = miss;
831 s->iop.bio = cache_bio;
832 bio_get(cache_bio);
833 closure_bio_submit(cache_bio, &s->cl);
835 return ret;
836 out_put:
837 bio_put(cache_bio);
838 out_submit:
839 miss->bi_end_io = request_endio;
840 miss->bi_private = &s->cl;
841 closure_bio_submit(miss, &s->cl);
842 return ret;
845 static void cached_dev_read(struct cached_dev *dc, struct search *s)
847 struct closure *cl = &s->cl;
849 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
850 continue_at(cl, cached_dev_read_done_bh, NULL);
853 /* Process writes */
855 static void cached_dev_write_complete(struct closure *cl)
857 struct search *s = container_of(cl, struct search, cl);
858 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
860 up_read_non_owner(&dc->writeback_lock);
861 cached_dev_bio_complete(cl);
864 static void cached_dev_write(struct cached_dev *dc, struct search *s)
866 struct closure *cl = &s->cl;
867 struct bio *bio = &s->bio.bio;
868 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
869 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
871 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
873 down_read_non_owner(&dc->writeback_lock);
874 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
876 * We overlap with some dirty data undergoing background
877 * writeback, force this write to writeback
879 s->iop.bypass = false;
880 s->iop.writeback = true;
884 * Discards aren't _required_ to do anything, so skipping if
885 * check_overlapping returned true is ok
887 * But check_overlapping drops dirty keys for which io hasn't started,
888 * so we still want to call it.
890 if (bio_op(bio) == REQ_OP_DISCARD)
891 s->iop.bypass = true;
893 if (should_writeback(dc, s->orig_bio,
894 cache_mode(dc, bio),
895 s->iop.bypass)) {
896 s->iop.bypass = false;
897 s->iop.writeback = true;
900 if (s->iop.bypass) {
901 s->iop.bio = s->orig_bio;
902 bio_get(s->iop.bio);
904 if ((bio_op(bio) != REQ_OP_DISCARD) ||
905 blk_queue_discard(bdev_get_queue(dc->bdev)))
906 closure_bio_submit(bio, cl);
907 } else if (s->iop.writeback) {
908 bch_writeback_add(dc);
909 s->iop.bio = bio;
911 if (bio->bi_opf & REQ_PREFLUSH) {
912 /* Also need to send a flush to the backing device */
913 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
914 dc->disk.bio_split);
916 bio_copy_dev(flush, bio);
917 flush->bi_end_io = request_endio;
918 flush->bi_private = cl;
919 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
921 closure_bio_submit(flush, cl);
923 } else {
924 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
926 closure_bio_submit(bio, cl);
929 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
930 continue_at(cl, cached_dev_write_complete, NULL);
933 static void cached_dev_nodata(struct closure *cl)
935 struct search *s = container_of(cl, struct search, cl);
936 struct bio *bio = &s->bio.bio;
938 if (s->iop.flush_journal)
939 bch_journal_meta(s->iop.c, cl);
941 /* If it's a flush, we send the flush to the backing device too */
942 closure_bio_submit(bio, cl);
944 continue_at(cl, cached_dev_bio_complete, NULL);
947 /* Cached devices - read & write stuff */
949 static blk_qc_t cached_dev_make_request(struct request_queue *q,
950 struct bio *bio)
952 struct search *s;
953 struct bcache_device *d = bio->bi_disk->private_data;
954 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
955 int rw = bio_data_dir(bio);
957 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
959 bio_set_dev(bio, dc->bdev);
960 bio->bi_iter.bi_sector += dc->sb.data_offset;
962 if (cached_dev_get(dc)) {
963 s = search_alloc(bio, d);
964 trace_bcache_request_start(s->d, bio);
966 if (!bio->bi_iter.bi_size) {
968 * can't call bch_journal_meta from under
969 * generic_make_request
971 continue_at_nobarrier(&s->cl,
972 cached_dev_nodata,
973 bcache_wq);
974 } else {
975 s->iop.bypass = check_should_bypass(dc, bio);
977 if (rw)
978 cached_dev_write(dc, s);
979 else
980 cached_dev_read(dc, s);
982 } else {
983 if ((bio_op(bio) == REQ_OP_DISCARD) &&
984 !blk_queue_discard(bdev_get_queue(dc->bdev)))
985 bio_endio(bio);
986 else
987 generic_make_request(bio);
990 return BLK_QC_T_NONE;
993 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
994 unsigned int cmd, unsigned long arg)
996 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
997 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1000 static int cached_dev_congested(void *data, int bits)
1002 struct bcache_device *d = data;
1003 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1004 struct request_queue *q = bdev_get_queue(dc->bdev);
1005 int ret = 0;
1007 if (bdi_congested(q->backing_dev_info, bits))
1008 return 1;
1010 if (cached_dev_get(dc)) {
1011 unsigned i;
1012 struct cache *ca;
1014 for_each_cache(ca, d->c, i) {
1015 q = bdev_get_queue(ca->bdev);
1016 ret |= bdi_congested(q->backing_dev_info, bits);
1019 cached_dev_put(dc);
1022 return ret;
1025 void bch_cached_dev_request_init(struct cached_dev *dc)
1027 struct gendisk *g = dc->disk.disk;
1029 g->queue->make_request_fn = cached_dev_make_request;
1030 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1031 dc->disk.cache_miss = cached_dev_cache_miss;
1032 dc->disk.ioctl = cached_dev_ioctl;
1035 /* Flash backed devices */
1037 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1038 struct bio *bio, unsigned sectors)
1040 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1042 swap(bio->bi_iter.bi_size, bytes);
1043 zero_fill_bio(bio);
1044 swap(bio->bi_iter.bi_size, bytes);
1046 bio_advance(bio, bytes);
1048 if (!bio->bi_iter.bi_size)
1049 return MAP_DONE;
1051 return MAP_CONTINUE;
1054 static void flash_dev_nodata(struct closure *cl)
1056 struct search *s = container_of(cl, struct search, cl);
1058 if (s->iop.flush_journal)
1059 bch_journal_meta(s->iop.c, cl);
1061 continue_at(cl, search_free, NULL);
1064 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1065 struct bio *bio)
1067 struct search *s;
1068 struct closure *cl;
1069 struct bcache_device *d = bio->bi_disk->private_data;
1070 int rw = bio_data_dir(bio);
1072 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1074 s = search_alloc(bio, d);
1075 cl = &s->cl;
1076 bio = &s->bio.bio;
1078 trace_bcache_request_start(s->d, bio);
1080 if (!bio->bi_iter.bi_size) {
1082 * can't call bch_journal_meta from under
1083 * generic_make_request
1085 continue_at_nobarrier(&s->cl,
1086 flash_dev_nodata,
1087 bcache_wq);
1088 return BLK_QC_T_NONE;
1089 } else if (rw) {
1090 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1091 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1092 &KEY(d->id, bio_end_sector(bio), 0));
1094 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1095 s->iop.writeback = true;
1096 s->iop.bio = bio;
1098 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1099 } else {
1100 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1103 continue_at(cl, search_free, NULL);
1104 return BLK_QC_T_NONE;
1107 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1108 unsigned int cmd, unsigned long arg)
1110 return -ENOTTY;
1113 static int flash_dev_congested(void *data, int bits)
1115 struct bcache_device *d = data;
1116 struct request_queue *q;
1117 struct cache *ca;
1118 unsigned i;
1119 int ret = 0;
1121 for_each_cache(ca, d->c, i) {
1122 q = bdev_get_queue(ca->bdev);
1123 ret |= bdi_congested(q->backing_dev_info, bits);
1126 return ret;
1129 void bch_flash_dev_request_init(struct bcache_device *d)
1131 struct gendisk *g = d->disk;
1133 g->queue->make_request_fn = flash_dev_make_request;
1134 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1135 d->cache_miss = flash_dev_cache_miss;
1136 d->ioctl = flash_dev_ioctl;
1139 void bch_request_exit(void)
1141 if (bch_search_cache)
1142 kmem_cache_destroy(bch_search_cache);
1145 int __init bch_request_init(void)
1147 bch_search_cache = KMEM_CACHE(search, 0);
1148 if (!bch_search_cache)
1149 return -ENOMEM;
1151 return 0;