1 // SPDX-License-Identifier: GPL-2.0
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
14 #include "writeback.h"
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache
*bch_search_cache
;
28 static void bch_data_insert_start(struct closure
*cl
);
30 static unsigned int cache_mode(struct cached_dev
*dc
)
32 return BDEV_CACHE_MODE(&dc
->sb
);
35 static bool verify(struct cached_dev
*dc
)
40 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
43 struct bvec_iter iter
;
46 bio_for_each_segment(bv
, bio
, iter
) {
47 void *d
= kmap(bv
.bv_page
) + bv
.bv_offset
;
49 csum
= bch_crc64_update(csum
, d
, bv
.bv_len
);
53 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
56 /* Insert data into cache */
58 static void bch_data_insert_keys(struct closure
*cl
)
60 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
61 atomic_t
*journal_ref
= NULL
;
62 struct bkey
*replace_key
= op
->replace
? &op
->replace_key
: NULL
;
66 * If we're looping, might already be waiting on
67 * another journal write - can't wait on more than one journal write at
70 * XXX: this looks wrong
73 while (atomic_read(&s
->cl
.remaining
) & CLOSURE_WAITING
)
78 journal_ref
= bch_journal(op
->c
, &op
->insert_keys
,
79 op
->flush_journal
? cl
: NULL
);
81 ret
= bch_btree_insert(op
->c
, &op
->insert_keys
,
82 journal_ref
, replace_key
);
84 op
->replace_collision
= true;
86 op
->status
= BLK_STS_RESOURCE
;
87 op
->insert_data_done
= true;
91 atomic_dec_bug(journal_ref
);
93 if (!op
->insert_data_done
) {
94 continue_at(cl
, bch_data_insert_start
, op
->wq
);
98 bch_keylist_free(&op
->insert_keys
);
102 static int bch_keylist_realloc(struct keylist
*l
, unsigned int u64s
,
105 size_t oldsize
= bch_keylist_nkeys(l
);
106 size_t newsize
= oldsize
+ u64s
;
109 * The journalling code doesn't handle the case where the keys to insert
110 * is bigger than an empty write: If we just return -ENOMEM here,
111 * bch_data_insert_keys() will insert the keys created so far
112 * and finish the rest when the keylist is empty.
114 if (newsize
* sizeof(uint64_t) > block_bytes(c
) - sizeof(struct jset
))
117 return __bch_keylist_realloc(l
, u64s
);
120 static void bch_data_invalidate(struct closure
*cl
)
122 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
123 struct bio
*bio
= op
->bio
;
125 pr_debug("invalidating %i sectors from %llu",
126 bio_sectors(bio
), (uint64_t) bio
->bi_iter
.bi_sector
);
128 while (bio_sectors(bio
)) {
129 unsigned int sectors
= min(bio_sectors(bio
),
130 1U << (KEY_SIZE_BITS
- 1));
132 if (bch_keylist_realloc(&op
->insert_keys
, 2, op
->c
))
135 bio
->bi_iter
.bi_sector
+= sectors
;
136 bio
->bi_iter
.bi_size
-= sectors
<< 9;
138 bch_keylist_add(&op
->insert_keys
,
140 bio
->bi_iter
.bi_sector
,
144 op
->insert_data_done
= true;
145 /* get in bch_data_insert() */
148 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
151 static void bch_data_insert_error(struct closure
*cl
)
153 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
156 * Our data write just errored, which means we've got a bunch of keys to
157 * insert that point to data that wasn't successfully written.
159 * We don't have to insert those keys but we still have to invalidate
160 * that region of the cache - so, if we just strip off all the pointers
161 * from the keys we'll accomplish just that.
164 struct bkey
*src
= op
->insert_keys
.keys
, *dst
= op
->insert_keys
.keys
;
166 while (src
!= op
->insert_keys
.top
) {
167 struct bkey
*n
= bkey_next(src
);
169 SET_KEY_PTRS(src
, 0);
170 memmove(dst
, src
, bkey_bytes(src
));
172 dst
= bkey_next(dst
);
176 op
->insert_keys
.top
= dst
;
178 bch_data_insert_keys(cl
);
181 static void bch_data_insert_endio(struct bio
*bio
)
183 struct closure
*cl
= bio
->bi_private
;
184 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
186 if (bio
->bi_status
) {
187 /* TODO: We could try to recover from this. */
189 op
->status
= bio
->bi_status
;
190 else if (!op
->replace
)
191 set_closure_fn(cl
, bch_data_insert_error
, op
->wq
);
193 set_closure_fn(cl
, NULL
, NULL
);
196 bch_bbio_endio(op
->c
, bio
, bio
->bi_status
, "writing data to cache");
199 static void bch_data_insert_start(struct closure
*cl
)
201 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
202 struct bio
*bio
= op
->bio
, *n
;
205 return bch_data_invalidate(cl
);
207 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0)
211 * Journal writes are marked REQ_PREFLUSH; if the original write was a
212 * flush, it'll wait on the journal write.
214 bio
->bi_opf
&= ~(REQ_PREFLUSH
|REQ_FUA
);
219 struct bio_set
*split
= &op
->c
->bio_split
;
221 /* 1 for the device pointer and 1 for the chksum */
222 if (bch_keylist_realloc(&op
->insert_keys
,
223 3 + (op
->csum
? 1 : 0),
225 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
229 k
= op
->insert_keys
.top
;
231 SET_KEY_INODE(k
, op
->inode
);
232 SET_KEY_OFFSET(k
, bio
->bi_iter
.bi_sector
);
234 if (!bch_alloc_sectors(op
->c
, k
, bio_sectors(bio
),
235 op
->write_point
, op
->write_prio
,
239 n
= bio_next_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
241 n
->bi_end_io
= bch_data_insert_endio
;
245 SET_KEY_DIRTY(k
, true);
247 for (i
= 0; i
< KEY_PTRS(k
); i
++)
248 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
252 SET_KEY_CSUM(k
, op
->csum
);
256 trace_bcache_cache_insert(k
);
257 bch_keylist_push(&op
->insert_keys
);
259 bio_set_op_attrs(n
, REQ_OP_WRITE
, 0);
260 bch_submit_bbio(n
, op
->c
, k
, 0);
263 op
->insert_data_done
= true;
264 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
267 /* bch_alloc_sectors() blocks if s->writeback = true */
268 BUG_ON(op
->writeback
);
271 * But if it's not a writeback write we'd rather just bail out if
272 * there aren't any buckets ready to write to - it might take awhile and
273 * we might be starving btree writes for gc or something.
278 * Writethrough write: We can't complete the write until we've
279 * updated the index. But we don't want to delay the write while
280 * we wait for buckets to be freed up, so just invalidate the
284 return bch_data_invalidate(cl
);
287 * From a cache miss, we can just insert the keys for the data
288 * we have written or bail out if we didn't do anything.
290 op
->insert_data_done
= true;
293 if (!bch_keylist_empty(&op
->insert_keys
))
294 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
301 * bch_data_insert - stick some data in the cache
302 * @cl: closure pointer.
304 * This is the starting point for any data to end up in a cache device; it could
305 * be from a normal write, or a writeback write, or a write to a flash only
306 * volume - it's also used by the moving garbage collector to compact data in
307 * mostly empty buckets.
309 * It first writes the data to the cache, creating a list of keys to be inserted
310 * (if the data had to be fragmented there will be multiple keys); after the
311 * data is written it calls bch_journal, and after the keys have been added to
312 * the next journal write they're inserted into the btree.
314 * It inserts the data in op->bio; bi_sector is used for the key offset,
315 * and op->inode is used for the key inode.
317 * If op->bypass is true, instead of inserting the data it invalidates the
318 * region of the cache represented by op->bio and op->inode.
320 void bch_data_insert(struct closure
*cl
)
322 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
324 trace_bcache_write(op
->c
, op
->inode
, op
->bio
,
325 op
->writeback
, op
->bypass
);
327 bch_keylist_init(&op
->insert_keys
);
329 bch_data_insert_start(cl
);
333 * Congested? Return 0 (not congested) or the limit (in sectors)
334 * beyond which we should bypass the cache due to congestion.
336 unsigned int bch_get_congested(const struct cache_set
*c
)
340 if (!c
->congested_read_threshold_us
&&
341 !c
->congested_write_threshold_us
)
344 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
348 i
+= atomic_read(&c
->congested
);
355 i
= fract_exp_two(i
, 6);
357 i
-= hweight32(get_random_u32());
359 return i
> 0 ? i
: 1;
362 static void add_sequential(struct task_struct
*t
)
364 ewma_add(t
->sequential_io_avg
,
365 t
->sequential_io
, 8, 0);
367 t
->sequential_io
= 0;
370 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
372 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
375 static bool check_should_bypass(struct cached_dev
*dc
, struct bio
*bio
)
377 struct cache_set
*c
= dc
->disk
.c
;
378 unsigned int mode
= cache_mode(dc
);
379 unsigned int sectors
, congested
;
380 struct task_struct
*task
= current
;
383 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) ||
384 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
385 (bio_op(bio
) == REQ_OP_DISCARD
))
388 if (mode
== CACHE_MODE_NONE
||
389 (mode
== CACHE_MODE_WRITEAROUND
&&
390 op_is_write(bio_op(bio
))))
394 * If the bio is for read-ahead or background IO, bypass it or
395 * not depends on the following situations,
396 * - If the IO is for meta data, always cache it and no bypass
397 * - If the IO is not meta data, check dc->cache_reada_policy,
398 * BCH_CACHE_READA_ALL: cache it and not bypass
399 * BCH_CACHE_READA_META_ONLY: not cache it and bypass
400 * That is, read-ahead request for metadata always get cached
401 * (eg, for gfs2 or xfs).
403 if ((bio
->bi_opf
& (REQ_RAHEAD
|REQ_BACKGROUND
))) {
404 if (!(bio
->bi_opf
& (REQ_META
|REQ_PRIO
)) &&
405 (dc
->cache_readahead_policy
!= BCH_CACHE_READA_ALL
))
409 if (bio
->bi_iter
.bi_sector
& (c
->sb
.block_size
- 1) ||
410 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
411 pr_debug("skipping unaligned io");
415 if (bypass_torture_test(dc
)) {
416 if ((get_random_int() & 3) == 3)
422 congested
= bch_get_congested(c
);
423 if (!congested
&& !dc
->sequential_cutoff
)
426 spin_lock(&dc
->io_lock
);
428 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_iter
.bi_sector
), hash
)
429 if (i
->last
== bio
->bi_iter
.bi_sector
&&
430 time_before(jiffies
, i
->jiffies
))
433 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
435 add_sequential(task
);
438 if (i
->sequential
+ bio
->bi_iter
.bi_size
> i
->sequential
)
439 i
->sequential
+= bio
->bi_iter
.bi_size
;
441 i
->last
= bio_end_sector(bio
);
442 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
443 task
->sequential_io
= i
->sequential
;
446 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
447 list_move_tail(&i
->lru
, &dc
->io_lru
);
449 spin_unlock(&dc
->io_lock
);
451 sectors
= max(task
->sequential_io
,
452 task
->sequential_io_avg
) >> 9;
454 if (dc
->sequential_cutoff
&&
455 sectors
>= dc
->sequential_cutoff
>> 9) {
456 trace_bcache_bypass_sequential(bio
);
460 if (congested
&& sectors
>= congested
) {
461 trace_bcache_bypass_congested(bio
);
466 bch_rescale_priorities(c
, bio_sectors(bio
));
469 bch_mark_sectors_bypassed(c
, dc
, bio_sectors(bio
));
476 /* Stack frame for bio_complete */
480 struct bio
*orig_bio
;
481 struct bio
*cache_miss
;
482 struct bcache_device
*d
;
484 unsigned int insert_bio_sectors
;
485 unsigned int recoverable
:1;
486 unsigned int write
:1;
487 unsigned int read_dirty_data
:1;
488 unsigned int cache_missed
:1;
490 unsigned long start_time
;
493 struct data_insert_op iop
;
496 static void bch_cache_read_endio(struct bio
*bio
)
498 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
499 struct closure
*cl
= bio
->bi_private
;
500 struct search
*s
= container_of(cl
, struct search
, cl
);
503 * If the bucket was reused while our bio was in flight, we might have
504 * read the wrong data. Set s->error but not error so it doesn't get
505 * counted against the cache device, but we'll still reread the data
506 * from the backing device.
510 s
->iop
.status
= bio
->bi_status
;
511 else if (!KEY_DIRTY(&b
->key
) &&
512 ptr_stale(s
->iop
.c
, &b
->key
, 0)) {
513 atomic_long_inc(&s
->iop
.c
->cache_read_races
);
514 s
->iop
.status
= BLK_STS_IOERR
;
517 bch_bbio_endio(s
->iop
.c
, bio
, bio
->bi_status
, "reading from cache");
521 * Read from a single key, handling the initial cache miss if the key starts in
522 * the middle of the bio
524 static int cache_lookup_fn(struct btree_op
*op
, struct btree
*b
, struct bkey
*k
)
526 struct search
*s
= container_of(op
, struct search
, op
);
527 struct bio
*n
, *bio
= &s
->bio
.bio
;
528 struct bkey
*bio_key
;
531 if (bkey_cmp(k
, &KEY(s
->iop
.inode
, bio
->bi_iter
.bi_sector
, 0)) <= 0)
534 if (KEY_INODE(k
) != s
->iop
.inode
||
535 KEY_START(k
) > bio
->bi_iter
.bi_sector
) {
536 unsigned int bio_sectors
= bio_sectors(bio
);
537 unsigned int sectors
= KEY_INODE(k
) == s
->iop
.inode
538 ? min_t(uint64_t, INT_MAX
,
539 KEY_START(k
) - bio
->bi_iter
.bi_sector
)
541 int ret
= s
->d
->cache_miss(b
, s
, bio
, sectors
);
543 if (ret
!= MAP_CONTINUE
)
546 /* if this was a complete miss we shouldn't get here */
547 BUG_ON(bio_sectors
<= sectors
);
553 /* XXX: figure out best pointer - for multiple cache devices */
556 PTR_BUCKET(b
->c
, k
, ptr
)->prio
= INITIAL_PRIO
;
559 s
->read_dirty_data
= true;
561 n
= bio_next_split(bio
, min_t(uint64_t, INT_MAX
,
562 KEY_OFFSET(k
) - bio
->bi_iter
.bi_sector
),
563 GFP_NOIO
, &s
->d
->bio_split
);
565 bio_key
= &container_of(n
, struct bbio
, bio
)->key
;
566 bch_bkey_copy_single_ptr(bio_key
, k
, ptr
);
568 bch_cut_front(&KEY(s
->iop
.inode
, n
->bi_iter
.bi_sector
, 0), bio_key
);
569 bch_cut_back(&KEY(s
->iop
.inode
, bio_end_sector(n
), 0), bio_key
);
571 n
->bi_end_io
= bch_cache_read_endio
;
572 n
->bi_private
= &s
->cl
;
575 * The bucket we're reading from might be reused while our bio
576 * is in flight, and we could then end up reading the wrong
579 * We guard against this by checking (in cache_read_endio()) if
580 * the pointer is stale again; if so, we treat it as an error
581 * and reread from the backing device (but we don't pass that
582 * error up anywhere).
585 __bch_submit_bbio(n
, b
->c
);
586 return n
== bio
? MAP_DONE
: MAP_CONTINUE
;
589 static void cache_lookup(struct closure
*cl
)
591 struct search
*s
= container_of(cl
, struct search
, iop
.cl
);
592 struct bio
*bio
= &s
->bio
.bio
;
593 struct cached_dev
*dc
;
596 bch_btree_op_init(&s
->op
, -1);
598 ret
= bch_btree_map_keys(&s
->op
, s
->iop
.c
,
599 &KEY(s
->iop
.inode
, bio
->bi_iter
.bi_sector
, 0),
600 cache_lookup_fn
, MAP_END_KEY
);
601 if (ret
== -EAGAIN
) {
602 continue_at(cl
, cache_lookup
, bcache_wq
);
607 * We might meet err when searching the btree, If that happens, we will
608 * get negative ret, in this scenario we should not recover data from
609 * backing device (when cache device is dirty) because we don't know
610 * whether bkeys the read request covered are all clean.
612 * And after that happened, s->iop.status is still its initial value
613 * before we submit s->bio.bio
616 BUG_ON(ret
== -EINTR
);
617 if (s
->d
&& s
->d
->c
&&
618 !UUID_FLASH_ONLY(&s
->d
->c
->uuids
[s
->d
->id
])) {
619 dc
= container_of(s
->d
, struct cached_dev
, disk
);
620 if (dc
&& atomic_read(&dc
->has_dirty
))
621 s
->recoverable
= false;
624 s
->iop
.status
= BLK_STS_IOERR
;
630 /* Common code for the make_request functions */
632 static void request_endio(struct bio
*bio
)
634 struct closure
*cl
= bio
->bi_private
;
636 if (bio
->bi_status
) {
637 struct search
*s
= container_of(cl
, struct search
, cl
);
639 s
->iop
.status
= bio
->bi_status
;
640 /* Only cache read errors are recoverable */
641 s
->recoverable
= false;
648 static void backing_request_endio(struct bio
*bio
)
650 struct closure
*cl
= bio
->bi_private
;
652 if (bio
->bi_status
) {
653 struct search
*s
= container_of(cl
, struct search
, cl
);
654 struct cached_dev
*dc
= container_of(s
->d
,
655 struct cached_dev
, disk
);
657 * If a bio has REQ_PREFLUSH for writeback mode, it is
658 * speically assembled in cached_dev_write() for a non-zero
659 * write request which has REQ_PREFLUSH. we don't set
660 * s->iop.status by this failure, the status will be decided
661 * by result of bch_data_insert() operation.
663 if (unlikely(s
->iop
.writeback
&&
664 bio
->bi_opf
& REQ_PREFLUSH
)) {
665 pr_err("Can't flush %s: returned bi_status %i",
666 dc
->backing_dev_name
, bio
->bi_status
);
668 /* set to orig_bio->bi_status in bio_complete() */
669 s
->iop
.status
= bio
->bi_status
;
671 s
->recoverable
= false;
672 /* should count I/O error for backing device here */
673 bch_count_backing_io_errors(dc
, bio
);
680 static void bio_complete(struct search
*s
)
683 generic_end_io_acct(s
->d
->disk
->queue
, bio_op(s
->orig_bio
),
684 &s
->d
->disk
->part0
, s
->start_time
);
686 trace_bcache_request_end(s
->d
, s
->orig_bio
);
687 s
->orig_bio
->bi_status
= s
->iop
.status
;
688 bio_endio(s
->orig_bio
);
693 static void do_bio_hook(struct search
*s
,
694 struct bio
*orig_bio
,
695 bio_end_io_t
*end_io_fn
)
697 struct bio
*bio
= &s
->bio
.bio
;
699 bio_init(bio
, NULL
, 0);
700 __bio_clone_fast(bio
, orig_bio
);
702 * bi_end_io can be set separately somewhere else, e.g. the
704 * - cache_bio->bi_end_io from cached_dev_cache_miss()
705 * - n->bi_end_io from cache_lookup_fn()
707 bio
->bi_end_io
= end_io_fn
;
708 bio
->bi_private
= &s
->cl
;
713 static void search_free(struct closure
*cl
)
715 struct search
*s
= container_of(cl
, struct search
, cl
);
717 atomic_dec(&s
->iop
.c
->search_inflight
);
723 closure_debug_destroy(cl
);
724 mempool_free(s
, &s
->iop
.c
->search
);
727 static inline struct search
*search_alloc(struct bio
*bio
,
728 struct bcache_device
*d
)
732 s
= mempool_alloc(&d
->c
->search
, GFP_NOIO
);
734 closure_init(&s
->cl
, NULL
);
735 do_bio_hook(s
, bio
, request_endio
);
736 atomic_inc(&d
->c
->search_inflight
);
739 s
->cache_miss
= NULL
;
743 s
->write
= op_is_write(bio_op(bio
));
744 s
->read_dirty_data
= 0;
745 s
->start_time
= jiffies
;
749 s
->iop
.inode
= d
->id
;
750 s
->iop
.write_point
= hash_long((unsigned long) current
, 16);
751 s
->iop
.write_prio
= 0;
754 s
->iop
.flush_journal
= op_is_flush(bio
->bi_opf
);
755 s
->iop
.wq
= bcache_wq
;
762 static void cached_dev_bio_complete(struct closure
*cl
)
764 struct search
*s
= container_of(cl
, struct search
, cl
);
765 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
773 static void cached_dev_read_error_done(struct closure
*cl
)
775 struct search
*s
= container_of(cl
, struct search
, cl
);
777 if (s
->iop
.replace_collision
)
778 bch_mark_cache_miss_collision(s
->iop
.c
, s
->d
);
781 bio_free_pages(s
->iop
.bio
);
783 cached_dev_bio_complete(cl
);
786 static void cached_dev_read_error(struct closure
*cl
)
788 struct search
*s
= container_of(cl
, struct search
, cl
);
789 struct bio
*bio
= &s
->bio
.bio
;
792 * If read request hit dirty data (s->read_dirty_data is true),
793 * then recovery a failed read request from cached device may
794 * get a stale data back. So read failure recovery is only
795 * permitted when read request hit clean data in cache device,
796 * or when cache read race happened.
798 if (s
->recoverable
&& !s
->read_dirty_data
) {
799 /* Retry from the backing device: */
800 trace_bcache_read_retry(s
->orig_bio
);
803 do_bio_hook(s
, s
->orig_bio
, backing_request_endio
);
805 /* XXX: invalidate cache */
807 /* I/O request sent to backing device */
808 closure_bio_submit(s
->iop
.c
, bio
, cl
);
811 continue_at(cl
, cached_dev_read_error_done
, NULL
);
814 static void cached_dev_cache_miss_done(struct closure
*cl
)
816 struct search
*s
= container_of(cl
, struct search
, cl
);
817 struct bcache_device
*d
= s
->d
;
819 if (s
->iop
.replace_collision
)
820 bch_mark_cache_miss_collision(s
->iop
.c
, s
->d
);
823 bio_free_pages(s
->iop
.bio
);
825 cached_dev_bio_complete(cl
);
829 static void cached_dev_read_done(struct closure
*cl
)
831 struct search
*s
= container_of(cl
, struct search
, cl
);
832 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
835 * We had a cache miss; cache_bio now contains data ready to be inserted
838 * First, we copy the data we just read from cache_bio's bounce buffers
839 * to the buffers the original bio pointed to:
843 bio_reset(s
->iop
.bio
);
844 s
->iop
.bio
->bi_iter
.bi_sector
=
845 s
->cache_miss
->bi_iter
.bi_sector
;
846 bio_copy_dev(s
->iop
.bio
, s
->cache_miss
);
847 s
->iop
.bio
->bi_iter
.bi_size
= s
->insert_bio_sectors
<< 9;
848 bch_bio_map(s
->iop
.bio
, NULL
);
850 bio_copy_data(s
->cache_miss
, s
->iop
.bio
);
852 bio_put(s
->cache_miss
);
853 s
->cache_miss
= NULL
;
856 if (verify(dc
) && s
->recoverable
&& !s
->read_dirty_data
)
857 bch_data_verify(dc
, s
->orig_bio
);
859 closure_get(&dc
->disk
.cl
);
863 !test_bit(CACHE_SET_STOPPING
, &s
->iop
.c
->flags
)) {
864 BUG_ON(!s
->iop
.replace
);
865 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
868 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
871 static void cached_dev_read_done_bh(struct closure
*cl
)
873 struct search
*s
= container_of(cl
, struct search
, cl
);
874 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
876 bch_mark_cache_accounting(s
->iop
.c
, s
->d
,
877 !s
->cache_missed
, s
->iop
.bypass
);
878 trace_bcache_read(s
->orig_bio
, !s
->cache_missed
, s
->iop
.bypass
);
881 continue_at_nobarrier(cl
, cached_dev_read_error
, bcache_wq
);
882 else if (s
->iop
.bio
|| verify(dc
))
883 continue_at_nobarrier(cl
, cached_dev_read_done
, bcache_wq
);
885 continue_at_nobarrier(cl
, cached_dev_bio_complete
, NULL
);
888 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
889 struct bio
*bio
, unsigned int sectors
)
891 int ret
= MAP_CONTINUE
;
892 unsigned int reada
= 0;
893 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
894 struct bio
*miss
, *cache_bio
;
898 if (s
->cache_miss
|| s
->iop
.bypass
) {
899 miss
= bio_next_split(bio
, sectors
, GFP_NOIO
, &s
->d
->bio_split
);
900 ret
= miss
== bio
? MAP_DONE
: MAP_CONTINUE
;
904 if (!(bio
->bi_opf
& REQ_RAHEAD
) &&
905 !(bio
->bi_opf
& (REQ_META
|REQ_PRIO
)) &&
906 s
->iop
.c
->gc_stats
.in_use
< CUTOFF_CACHE_READA
)
907 reada
= min_t(sector_t
, dc
->readahead
>> 9,
908 get_capacity(bio
->bi_disk
) - bio_end_sector(bio
));
910 s
->insert_bio_sectors
= min(sectors
, bio_sectors(bio
) + reada
);
912 s
->iop
.replace_key
= KEY(s
->iop
.inode
,
913 bio
->bi_iter
.bi_sector
+ s
->insert_bio_sectors
,
914 s
->insert_bio_sectors
);
916 ret
= bch_btree_insert_check_key(b
, &s
->op
, &s
->iop
.replace_key
);
920 s
->iop
.replace
= true;
922 miss
= bio_next_split(bio
, sectors
, GFP_NOIO
, &s
->d
->bio_split
);
924 /* btree_search_recurse()'s btree iterator is no good anymore */
925 ret
= miss
== bio
? MAP_DONE
: -EINTR
;
927 cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
928 DIV_ROUND_UP(s
->insert_bio_sectors
, PAGE_SECTORS
),
929 &dc
->disk
.bio_split
);
933 cache_bio
->bi_iter
.bi_sector
= miss
->bi_iter
.bi_sector
;
934 bio_copy_dev(cache_bio
, miss
);
935 cache_bio
->bi_iter
.bi_size
= s
->insert_bio_sectors
<< 9;
937 cache_bio
->bi_end_io
= backing_request_endio
;
938 cache_bio
->bi_private
= &s
->cl
;
940 bch_bio_map(cache_bio
, NULL
);
941 if (bch_bio_alloc_pages(cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
945 bch_mark_cache_readahead(s
->iop
.c
, s
->d
);
947 s
->cache_miss
= miss
;
948 s
->iop
.bio
= cache_bio
;
950 /* I/O request sent to backing device */
951 closure_bio_submit(s
->iop
.c
, cache_bio
, &s
->cl
);
957 miss
->bi_end_io
= backing_request_endio
;
958 miss
->bi_private
= &s
->cl
;
959 /* I/O request sent to backing device */
960 closure_bio_submit(s
->iop
.c
, miss
, &s
->cl
);
964 static void cached_dev_read(struct cached_dev
*dc
, struct search
*s
)
966 struct closure
*cl
= &s
->cl
;
968 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
969 continue_at(cl
, cached_dev_read_done_bh
, NULL
);
974 static void cached_dev_write_complete(struct closure
*cl
)
976 struct search
*s
= container_of(cl
, struct search
, cl
);
977 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
979 up_read_non_owner(&dc
->writeback_lock
);
980 cached_dev_bio_complete(cl
);
983 static void cached_dev_write(struct cached_dev
*dc
, struct search
*s
)
985 struct closure
*cl
= &s
->cl
;
986 struct bio
*bio
= &s
->bio
.bio
;
987 struct bkey start
= KEY(dc
->disk
.id
, bio
->bi_iter
.bi_sector
, 0);
988 struct bkey end
= KEY(dc
->disk
.id
, bio_end_sector(bio
), 0);
990 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
, &start
, &end
);
992 down_read_non_owner(&dc
->writeback_lock
);
993 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
995 * We overlap with some dirty data undergoing background
996 * writeback, force this write to writeback
998 s
->iop
.bypass
= false;
999 s
->iop
.writeback
= true;
1003 * Discards aren't _required_ to do anything, so skipping if
1004 * check_overlapping returned true is ok
1006 * But check_overlapping drops dirty keys for which io hasn't started,
1007 * so we still want to call it.
1009 if (bio_op(bio
) == REQ_OP_DISCARD
)
1010 s
->iop
.bypass
= true;
1012 if (should_writeback(dc
, s
->orig_bio
,
1015 s
->iop
.bypass
= false;
1016 s
->iop
.writeback
= true;
1019 if (s
->iop
.bypass
) {
1020 s
->iop
.bio
= s
->orig_bio
;
1021 bio_get(s
->iop
.bio
);
1023 if (bio_op(bio
) == REQ_OP_DISCARD
&&
1024 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1027 /* I/O request sent to backing device */
1028 bio
->bi_end_io
= backing_request_endio
;
1029 closure_bio_submit(s
->iop
.c
, bio
, cl
);
1031 } else if (s
->iop
.writeback
) {
1032 bch_writeback_add(dc
);
1035 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1037 * Also need to send a flush to the backing
1042 flush
= bio_alloc_bioset(GFP_NOIO
, 0,
1043 &dc
->disk
.bio_split
);
1045 s
->iop
.status
= BLK_STS_RESOURCE
;
1048 bio_copy_dev(flush
, bio
);
1049 flush
->bi_end_io
= backing_request_endio
;
1050 flush
->bi_private
= cl
;
1051 flush
->bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
1052 /* I/O request sent to backing device */
1053 closure_bio_submit(s
->iop
.c
, flush
, cl
);
1056 s
->iop
.bio
= bio_clone_fast(bio
, GFP_NOIO
, &dc
->disk
.bio_split
);
1057 /* I/O request sent to backing device */
1058 bio
->bi_end_io
= backing_request_endio
;
1059 closure_bio_submit(s
->iop
.c
, bio
, cl
);
1063 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
1064 continue_at(cl
, cached_dev_write_complete
, NULL
);
1067 static void cached_dev_nodata(struct closure
*cl
)
1069 struct search
*s
= container_of(cl
, struct search
, cl
);
1070 struct bio
*bio
= &s
->bio
.bio
;
1072 if (s
->iop
.flush_journal
)
1073 bch_journal_meta(s
->iop
.c
, cl
);
1075 /* If it's a flush, we send the flush to the backing device too */
1076 bio
->bi_end_io
= backing_request_endio
;
1077 closure_bio_submit(s
->iop
.c
, bio
, cl
);
1079 continue_at(cl
, cached_dev_bio_complete
, NULL
);
1082 struct detached_dev_io_private
{
1083 struct bcache_device
*d
;
1084 unsigned long start_time
;
1085 bio_end_io_t
*bi_end_io
;
1089 static void detached_dev_end_io(struct bio
*bio
)
1091 struct detached_dev_io_private
*ddip
;
1093 ddip
= bio
->bi_private
;
1094 bio
->bi_end_io
= ddip
->bi_end_io
;
1095 bio
->bi_private
= ddip
->bi_private
;
1097 generic_end_io_acct(ddip
->d
->disk
->queue
, bio_op(bio
),
1098 &ddip
->d
->disk
->part0
, ddip
->start_time
);
1100 if (bio
->bi_status
) {
1101 struct cached_dev
*dc
= container_of(ddip
->d
,
1102 struct cached_dev
, disk
);
1103 /* should count I/O error for backing device here */
1104 bch_count_backing_io_errors(dc
, bio
);
1108 bio
->bi_end_io(bio
);
1111 static void detached_dev_do_request(struct bcache_device
*d
, struct bio
*bio
)
1113 struct detached_dev_io_private
*ddip
;
1114 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1117 * no need to call closure_get(&dc->disk.cl),
1118 * because upper layer had already opened bcache device,
1119 * which would call closure_get(&dc->disk.cl)
1121 ddip
= kzalloc(sizeof(struct detached_dev_io_private
), GFP_NOIO
);
1123 ddip
->start_time
= jiffies
;
1124 ddip
->bi_end_io
= bio
->bi_end_io
;
1125 ddip
->bi_private
= bio
->bi_private
;
1126 bio
->bi_end_io
= detached_dev_end_io
;
1127 bio
->bi_private
= ddip
;
1129 if ((bio_op(bio
) == REQ_OP_DISCARD
) &&
1130 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1131 bio
->bi_end_io(bio
);
1133 generic_make_request(bio
);
1136 static void quit_max_writeback_rate(struct cache_set
*c
,
1137 struct cached_dev
*this_dc
)
1140 struct bcache_device
*d
;
1141 struct cached_dev
*dc
;
1144 * mutex bch_register_lock may compete with other parallel requesters,
1145 * or attach/detach operations on other backing device. Waiting to
1146 * the mutex lock may increase I/O request latency for seconds or more.
1147 * To avoid such situation, if mutext_trylock() failed, only writeback
1148 * rate of current cached device is set to 1, and __update_write_back()
1149 * will decide writeback rate of other cached devices (remember now
1150 * c->idle_counter is 0 already).
1152 if (mutex_trylock(&bch_register_lock
)) {
1153 for (i
= 0; i
< c
->devices_max_used
; i
++) {
1157 if (UUID_FLASH_ONLY(&c
->uuids
[i
]))
1161 dc
= container_of(d
, struct cached_dev
, disk
);
1163 * set writeback rate to default minimum value,
1164 * then let update_writeback_rate() to decide the
1167 atomic_long_set(&dc
->writeback_rate
.rate
, 1);
1169 mutex_unlock(&bch_register_lock
);
1171 atomic_long_set(&this_dc
->writeback_rate
.rate
, 1);
1174 /* Cached devices - read & write stuff */
1176 static blk_qc_t
cached_dev_make_request(struct request_queue
*q
,
1180 struct bcache_device
*d
= bio
->bi_disk
->private_data
;
1181 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1182 int rw
= bio_data_dir(bio
);
1184 if (unlikely((d
->c
&& test_bit(CACHE_SET_IO_DISABLE
, &d
->c
->flags
)) ||
1186 bio
->bi_status
= BLK_STS_IOERR
;
1188 return BLK_QC_T_NONE
;
1192 if (atomic_read(&d
->c
->idle_counter
))
1193 atomic_set(&d
->c
->idle_counter
, 0);
1195 * If at_max_writeback_rate of cache set is true and new I/O
1196 * comes, quit max writeback rate of all cached devices
1197 * attached to this cache set, and set at_max_writeback_rate
1200 if (unlikely(atomic_read(&d
->c
->at_max_writeback_rate
) == 1)) {
1201 atomic_set(&d
->c
->at_max_writeback_rate
, 0);
1202 quit_max_writeback_rate(d
->c
, dc
);
1206 generic_start_io_acct(q
,
1211 bio_set_dev(bio
, dc
->bdev
);
1212 bio
->bi_iter
.bi_sector
+= dc
->sb
.data_offset
;
1214 if (cached_dev_get(dc
)) {
1215 s
= search_alloc(bio
, d
);
1216 trace_bcache_request_start(s
->d
, bio
);
1218 if (!bio
->bi_iter
.bi_size
) {
1220 * can't call bch_journal_meta from under
1221 * generic_make_request
1223 continue_at_nobarrier(&s
->cl
,
1227 s
->iop
.bypass
= check_should_bypass(dc
, bio
);
1230 cached_dev_write(dc
, s
);
1232 cached_dev_read(dc
, s
);
1235 /* I/O request sent to backing device */
1236 detached_dev_do_request(d
, bio
);
1238 return BLK_QC_T_NONE
;
1241 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1242 unsigned int cmd
, unsigned long arg
)
1244 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1249 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1252 static int cached_dev_congested(void *data
, int bits
)
1254 struct bcache_device
*d
= data
;
1255 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1256 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1259 if (bdi_congested(q
->backing_dev_info
, bits
))
1262 if (cached_dev_get(dc
)) {
1266 for_each_cache(ca
, d
->c
, i
) {
1267 q
= bdev_get_queue(ca
->bdev
);
1268 ret
|= bdi_congested(q
->backing_dev_info
, bits
);
1277 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1279 struct gendisk
*g
= dc
->disk
.disk
;
1281 g
->queue
->make_request_fn
= cached_dev_make_request
;
1282 g
->queue
->backing_dev_info
->congested_fn
= cached_dev_congested
;
1283 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1284 dc
->disk
.ioctl
= cached_dev_ioctl
;
1287 /* Flash backed devices */
1289 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1290 struct bio
*bio
, unsigned int sectors
)
1292 unsigned int bytes
= min(sectors
, bio_sectors(bio
)) << 9;
1294 swap(bio
->bi_iter
.bi_size
, bytes
);
1296 swap(bio
->bi_iter
.bi_size
, bytes
);
1298 bio_advance(bio
, bytes
);
1300 if (!bio
->bi_iter
.bi_size
)
1303 return MAP_CONTINUE
;
1306 static void flash_dev_nodata(struct closure
*cl
)
1308 struct search
*s
= container_of(cl
, struct search
, cl
);
1310 if (s
->iop
.flush_journal
)
1311 bch_journal_meta(s
->iop
.c
, cl
);
1313 continue_at(cl
, search_free
, NULL
);
1316 static blk_qc_t
flash_dev_make_request(struct request_queue
*q
,
1321 struct bcache_device
*d
= bio
->bi_disk
->private_data
;
1323 if (unlikely(d
->c
&& test_bit(CACHE_SET_IO_DISABLE
, &d
->c
->flags
))) {
1324 bio
->bi_status
= BLK_STS_IOERR
;
1326 return BLK_QC_T_NONE
;
1329 generic_start_io_acct(q
, bio_op(bio
), bio_sectors(bio
), &d
->disk
->part0
);
1331 s
= search_alloc(bio
, d
);
1335 trace_bcache_request_start(s
->d
, bio
);
1337 if (!bio
->bi_iter
.bi_size
) {
1339 * can't call bch_journal_meta from under
1340 * generic_make_request
1342 continue_at_nobarrier(&s
->cl
,
1345 return BLK_QC_T_NONE
;
1346 } else if (bio_data_dir(bio
)) {
1347 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
,
1348 &KEY(d
->id
, bio
->bi_iter
.bi_sector
, 0),
1349 &KEY(d
->id
, bio_end_sector(bio
), 0));
1351 s
->iop
.bypass
= (bio_op(bio
) == REQ_OP_DISCARD
) != 0;
1352 s
->iop
.writeback
= true;
1355 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
1357 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
1360 continue_at(cl
, search_free
, NULL
);
1361 return BLK_QC_T_NONE
;
1364 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1365 unsigned int cmd
, unsigned long arg
)
1370 static int flash_dev_congested(void *data
, int bits
)
1372 struct bcache_device
*d
= data
;
1373 struct request_queue
*q
;
1378 for_each_cache(ca
, d
->c
, i
) {
1379 q
= bdev_get_queue(ca
->bdev
);
1380 ret
|= bdi_congested(q
->backing_dev_info
, bits
);
1386 void bch_flash_dev_request_init(struct bcache_device
*d
)
1388 struct gendisk
*g
= d
->disk
;
1390 g
->queue
->make_request_fn
= flash_dev_make_request
;
1391 g
->queue
->backing_dev_info
->congested_fn
= flash_dev_congested
;
1392 d
->cache_miss
= flash_dev_cache_miss
;
1393 d
->ioctl
= flash_dev_ioctl
;
1396 void bch_request_exit(void)
1398 kmem_cache_destroy(bch_search_cache
);
1401 int __init
bch_request_init(void)
1403 bch_search_cache
= KMEM_CACHE(search
, 0);
1404 if (!bch_search_cache
)