2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include <linux/backing-dev.h>
20 #include <trace/events/bcache.h>
22 #define CUTOFF_CACHE_ADD 95
23 #define CUTOFF_CACHE_READA 90
25 struct kmem_cache
*bch_search_cache
;
27 static void bch_data_insert_start(struct closure
*);
29 static unsigned cache_mode(struct cached_dev
*dc
, struct bio
*bio
)
31 return BDEV_CACHE_MODE(&dc
->sb
);
34 static bool verify(struct cached_dev
*dc
, struct bio
*bio
)
39 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
42 struct bvec_iter iter
;
45 bio_for_each_segment(bv
, bio
, iter
) {
46 void *d
= kmap(bv
.bv_page
) + bv
.bv_offset
;
47 csum
= bch_crc64_update(csum
, d
, bv
.bv_len
);
51 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
54 /* Insert data into cache */
56 static void bch_data_insert_keys(struct closure
*cl
)
58 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
59 atomic_t
*journal_ref
= NULL
;
60 struct bkey
*replace_key
= op
->replace
? &op
->replace_key
: NULL
;
64 * If we're looping, might already be waiting on
65 * another journal write - can't wait on more than one journal write at
68 * XXX: this looks wrong
71 while (atomic_read(&s
->cl
.remaining
) & CLOSURE_WAITING
)
76 journal_ref
= bch_journal(op
->c
, &op
->insert_keys
,
77 op
->flush_journal
? cl
: NULL
);
79 ret
= bch_btree_insert(op
->c
, &op
->insert_keys
,
80 journal_ref
, replace_key
);
82 op
->replace_collision
= true;
85 op
->insert_data_done
= true;
89 atomic_dec_bug(journal_ref
);
91 if (!op
->insert_data_done
) {
92 continue_at(cl
, bch_data_insert_start
, op
->wq
);
96 bch_keylist_free(&op
->insert_keys
);
100 static int bch_keylist_realloc(struct keylist
*l
, unsigned u64s
,
103 size_t oldsize
= bch_keylist_nkeys(l
);
104 size_t newsize
= oldsize
+ u64s
;
107 * The journalling code doesn't handle the case where the keys to insert
108 * is bigger than an empty write: If we just return -ENOMEM here,
109 * bio_insert() and bio_invalidate() will insert the keys created so far
110 * and finish the rest when the keylist is empty.
112 if (newsize
* sizeof(uint64_t) > block_bytes(c
) - sizeof(struct jset
))
115 return __bch_keylist_realloc(l
, u64s
);
118 static void bch_data_invalidate(struct closure
*cl
)
120 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
121 struct bio
*bio
= op
->bio
;
123 pr_debug("invalidating %i sectors from %llu",
124 bio_sectors(bio
), (uint64_t) bio
->bi_iter
.bi_sector
);
126 while (bio_sectors(bio
)) {
127 unsigned sectors
= min(bio_sectors(bio
),
128 1U << (KEY_SIZE_BITS
- 1));
130 if (bch_keylist_realloc(&op
->insert_keys
, 2, op
->c
))
133 bio
->bi_iter
.bi_sector
+= sectors
;
134 bio
->bi_iter
.bi_size
-= sectors
<< 9;
136 bch_keylist_add(&op
->insert_keys
,
137 &KEY(op
->inode
, bio
->bi_iter
.bi_sector
, sectors
));
140 op
->insert_data_done
= true;
143 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
146 static void bch_data_insert_error(struct closure
*cl
)
148 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
151 * Our data write just errored, which means we've got a bunch of keys to
152 * insert that point to data that wasn't succesfully written.
154 * We don't have to insert those keys but we still have to invalidate
155 * that region of the cache - so, if we just strip off all the pointers
156 * from the keys we'll accomplish just that.
159 struct bkey
*src
= op
->insert_keys
.keys
, *dst
= op
->insert_keys
.keys
;
161 while (src
!= op
->insert_keys
.top
) {
162 struct bkey
*n
= bkey_next(src
);
164 SET_KEY_PTRS(src
, 0);
165 memmove(dst
, src
, bkey_bytes(src
));
167 dst
= bkey_next(dst
);
171 op
->insert_keys
.top
= dst
;
173 bch_data_insert_keys(cl
);
176 static void bch_data_insert_endio(struct bio
*bio
)
178 struct closure
*cl
= bio
->bi_private
;
179 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
182 /* TODO: We could try to recover from this. */
184 op
->error
= bio
->bi_error
;
185 else if (!op
->replace
)
186 set_closure_fn(cl
, bch_data_insert_error
, op
->wq
);
188 set_closure_fn(cl
, NULL
, NULL
);
191 bch_bbio_endio(op
->c
, bio
, bio
->bi_error
, "writing data to cache");
194 static void bch_data_insert_start(struct closure
*cl
)
196 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
197 struct bio
*bio
= op
->bio
, *n
;
199 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0)
203 return bch_data_invalidate(cl
);
206 * Journal writes are marked REQ_PREFLUSH; if the original write was a
207 * flush, it'll wait on the journal write.
209 bio
->bi_opf
&= ~(REQ_PREFLUSH
|REQ_FUA
);
214 struct bio_set
*split
= op
->c
->bio_split
;
216 /* 1 for the device pointer and 1 for the chksum */
217 if (bch_keylist_realloc(&op
->insert_keys
,
218 3 + (op
->csum
? 1 : 0),
220 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
224 k
= op
->insert_keys
.top
;
226 SET_KEY_INODE(k
, op
->inode
);
227 SET_KEY_OFFSET(k
, bio
->bi_iter
.bi_sector
);
229 if (!bch_alloc_sectors(op
->c
, k
, bio_sectors(bio
),
230 op
->write_point
, op
->write_prio
,
234 n
= bio_next_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
236 n
->bi_end_io
= bch_data_insert_endio
;
240 SET_KEY_DIRTY(k
, true);
242 for (i
= 0; i
< KEY_PTRS(k
); i
++)
243 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
247 SET_KEY_CSUM(k
, op
->csum
);
251 trace_bcache_cache_insert(k
);
252 bch_keylist_push(&op
->insert_keys
);
254 bio_set_op_attrs(n
, REQ_OP_WRITE
, 0);
255 bch_submit_bbio(n
, op
->c
, k
, 0);
258 op
->insert_data_done
= true;
259 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
262 /* bch_alloc_sectors() blocks if s->writeback = true */
263 BUG_ON(op
->writeback
);
266 * But if it's not a writeback write we'd rather just bail out if
267 * there aren't any buckets ready to write to - it might take awhile and
268 * we might be starving btree writes for gc or something.
273 * Writethrough write: We can't complete the write until we've
274 * updated the index. But we don't want to delay the write while
275 * we wait for buckets to be freed up, so just invalidate the
279 return bch_data_invalidate(cl
);
282 * From a cache miss, we can just insert the keys for the data
283 * we have written or bail out if we didn't do anything.
285 op
->insert_data_done
= true;
288 if (!bch_keylist_empty(&op
->insert_keys
))
289 continue_at(cl
, bch_data_insert_keys
, op
->wq
);
296 * bch_data_insert - stick some data in the cache
298 * This is the starting point for any data to end up in a cache device; it could
299 * be from a normal write, or a writeback write, or a write to a flash only
300 * volume - it's also used by the moving garbage collector to compact data in
301 * mostly empty buckets.
303 * It first writes the data to the cache, creating a list of keys to be inserted
304 * (if the data had to be fragmented there will be multiple keys); after the
305 * data is written it calls bch_journal, and after the keys have been added to
306 * the next journal write they're inserted into the btree.
308 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
309 * and op->inode is used for the key inode.
311 * If s->bypass is true, instead of inserting the data it invalidates the
312 * region of the cache represented by s->cache_bio and op->inode.
314 void bch_data_insert(struct closure
*cl
)
316 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
318 trace_bcache_write(op
->c
, op
->inode
, op
->bio
,
319 op
->writeback
, op
->bypass
);
321 bch_keylist_init(&op
->insert_keys
);
323 bch_data_insert_start(cl
);
328 unsigned bch_get_congested(struct cache_set
*c
)
333 if (!c
->congested_read_threshold_us
&&
334 !c
->congested_write_threshold_us
)
337 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
341 i
+= atomic_read(&c
->congested
);
348 i
= fract_exp_two(i
, 6);
350 rand
= get_random_int();
351 i
-= bitmap_weight(&rand
, BITS_PER_LONG
);
353 return i
> 0 ? i
: 1;
356 static void add_sequential(struct task_struct
*t
)
358 ewma_add(t
->sequential_io_avg
,
359 t
->sequential_io
, 8, 0);
361 t
->sequential_io
= 0;
364 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
366 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
369 static bool check_should_bypass(struct cached_dev
*dc
, struct bio
*bio
)
371 struct cache_set
*c
= dc
->disk
.c
;
372 unsigned mode
= cache_mode(dc
, bio
);
373 unsigned sectors
, congested
= bch_get_congested(c
);
374 struct task_struct
*task
= current
;
377 if (test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
) ||
378 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
379 (bio_op(bio
) == REQ_OP_DISCARD
))
382 if (mode
== CACHE_MODE_NONE
||
383 (mode
== CACHE_MODE_WRITEAROUND
&&
384 op_is_write(bio_op(bio
))))
387 if (bio
->bi_iter
.bi_sector
& (c
->sb
.block_size
- 1) ||
388 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
389 pr_debug("skipping unaligned io");
393 if (bypass_torture_test(dc
)) {
394 if ((get_random_int() & 3) == 3)
400 if (!congested
&& !dc
->sequential_cutoff
)
404 mode
== CACHE_MODE_WRITEBACK
&&
405 op_is_write(bio
->bi_opf
) &&
406 op_is_sync(bio
->bi_opf
))
409 spin_lock(&dc
->io_lock
);
411 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_iter
.bi_sector
), hash
)
412 if (i
->last
== bio
->bi_iter
.bi_sector
&&
413 time_before(jiffies
, i
->jiffies
))
416 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
418 add_sequential(task
);
421 if (i
->sequential
+ bio
->bi_iter
.bi_size
> i
->sequential
)
422 i
->sequential
+= bio
->bi_iter
.bi_size
;
424 i
->last
= bio_end_sector(bio
);
425 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
426 task
->sequential_io
= i
->sequential
;
429 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
430 list_move_tail(&i
->lru
, &dc
->io_lru
);
432 spin_unlock(&dc
->io_lock
);
434 sectors
= max(task
->sequential_io
,
435 task
->sequential_io_avg
) >> 9;
437 if (dc
->sequential_cutoff
&&
438 sectors
>= dc
->sequential_cutoff
>> 9) {
439 trace_bcache_bypass_sequential(bio
);
443 if (congested
&& sectors
>= congested
) {
444 trace_bcache_bypass_congested(bio
);
449 bch_rescale_priorities(c
, bio_sectors(bio
));
452 bch_mark_sectors_bypassed(c
, dc
, bio_sectors(bio
));
459 /* Stack frame for bio_complete */
463 struct bio
*orig_bio
;
464 struct bio
*cache_miss
;
465 struct bcache_device
*d
;
467 unsigned insert_bio_sectors
;
468 unsigned recoverable
:1;
470 unsigned read_dirty_data
:1;
472 unsigned long start_time
;
475 struct data_insert_op iop
;
478 static void bch_cache_read_endio(struct bio
*bio
)
480 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
481 struct closure
*cl
= bio
->bi_private
;
482 struct search
*s
= container_of(cl
, struct search
, cl
);
485 * If the bucket was reused while our bio was in flight, we might have
486 * read the wrong data. Set s->error but not error so it doesn't get
487 * counted against the cache device, but we'll still reread the data
488 * from the backing device.
492 s
->iop
.error
= bio
->bi_error
;
493 else if (!KEY_DIRTY(&b
->key
) &&
494 ptr_stale(s
->iop
.c
, &b
->key
, 0)) {
495 atomic_long_inc(&s
->iop
.c
->cache_read_races
);
496 s
->iop
.error
= -EINTR
;
499 bch_bbio_endio(s
->iop
.c
, bio
, bio
->bi_error
, "reading from cache");
503 * Read from a single key, handling the initial cache miss if the key starts in
504 * the middle of the bio
506 static int cache_lookup_fn(struct btree_op
*op
, struct btree
*b
, struct bkey
*k
)
508 struct search
*s
= container_of(op
, struct search
, op
);
509 struct bio
*n
, *bio
= &s
->bio
.bio
;
510 struct bkey
*bio_key
;
513 if (bkey_cmp(k
, &KEY(s
->iop
.inode
, bio
->bi_iter
.bi_sector
, 0)) <= 0)
516 if (KEY_INODE(k
) != s
->iop
.inode
||
517 KEY_START(k
) > bio
->bi_iter
.bi_sector
) {
518 unsigned bio_sectors
= bio_sectors(bio
);
519 unsigned sectors
= KEY_INODE(k
) == s
->iop
.inode
520 ? min_t(uint64_t, INT_MAX
,
521 KEY_START(k
) - bio
->bi_iter
.bi_sector
)
524 int ret
= s
->d
->cache_miss(b
, s
, bio
, sectors
);
525 if (ret
!= MAP_CONTINUE
)
528 /* if this was a complete miss we shouldn't get here */
529 BUG_ON(bio_sectors
<= sectors
);
535 /* XXX: figure out best pointer - for multiple cache devices */
538 PTR_BUCKET(b
->c
, k
, ptr
)->prio
= INITIAL_PRIO
;
541 s
->read_dirty_data
= true;
543 n
= bio_next_split(bio
, min_t(uint64_t, INT_MAX
,
544 KEY_OFFSET(k
) - bio
->bi_iter
.bi_sector
),
545 GFP_NOIO
, s
->d
->bio_split
);
547 bio_key
= &container_of(n
, struct bbio
, bio
)->key
;
548 bch_bkey_copy_single_ptr(bio_key
, k
, ptr
);
550 bch_cut_front(&KEY(s
->iop
.inode
, n
->bi_iter
.bi_sector
, 0), bio_key
);
551 bch_cut_back(&KEY(s
->iop
.inode
, bio_end_sector(n
), 0), bio_key
);
553 n
->bi_end_io
= bch_cache_read_endio
;
554 n
->bi_private
= &s
->cl
;
557 * The bucket we're reading from might be reused while our bio
558 * is in flight, and we could then end up reading the wrong
561 * We guard against this by checking (in cache_read_endio()) if
562 * the pointer is stale again; if so, we treat it as an error
563 * and reread from the backing device (but we don't pass that
564 * error up anywhere).
567 __bch_submit_bbio(n
, b
->c
);
568 return n
== bio
? MAP_DONE
: MAP_CONTINUE
;
571 static void cache_lookup(struct closure
*cl
)
573 struct search
*s
= container_of(cl
, struct search
, iop
.cl
);
574 struct bio
*bio
= &s
->bio
.bio
;
577 bch_btree_op_init(&s
->op
, -1);
579 ret
= bch_btree_map_keys(&s
->op
, s
->iop
.c
,
580 &KEY(s
->iop
.inode
, bio
->bi_iter
.bi_sector
, 0),
581 cache_lookup_fn
, MAP_END_KEY
);
582 if (ret
== -EAGAIN
) {
583 continue_at(cl
, cache_lookup
, bcache_wq
);
590 /* Common code for the make_request functions */
592 static void request_endio(struct bio
*bio
)
594 struct closure
*cl
= bio
->bi_private
;
597 struct search
*s
= container_of(cl
, struct search
, cl
);
598 s
->iop
.error
= bio
->bi_error
;
599 /* Only cache read errors are recoverable */
600 s
->recoverable
= false;
607 static void bio_complete(struct search
*s
)
610 generic_end_io_acct(bio_data_dir(s
->orig_bio
),
611 &s
->d
->disk
->part0
, s
->start_time
);
613 trace_bcache_request_end(s
->d
, s
->orig_bio
);
614 s
->orig_bio
->bi_error
= s
->iop
.error
;
615 bio_endio(s
->orig_bio
);
620 static void do_bio_hook(struct search
*s
, struct bio
*orig_bio
)
622 struct bio
*bio
= &s
->bio
.bio
;
624 bio_init(bio
, NULL
, 0);
625 __bio_clone_fast(bio
, orig_bio
);
626 bio
->bi_end_io
= request_endio
;
627 bio
->bi_private
= &s
->cl
;
632 static void search_free(struct closure
*cl
)
634 struct search
*s
= container_of(cl
, struct search
, cl
);
640 closure_debug_destroy(cl
);
641 mempool_free(s
, s
->d
->c
->search
);
644 static inline struct search
*search_alloc(struct bio
*bio
,
645 struct bcache_device
*d
)
649 s
= mempool_alloc(d
->c
->search
, GFP_NOIO
);
651 closure_init(&s
->cl
, NULL
);
655 s
->cache_miss
= NULL
;
658 s
->write
= op_is_write(bio_op(bio
));
659 s
->read_dirty_data
= 0;
660 s
->start_time
= jiffies
;
664 s
->iop
.inode
= d
->id
;
665 s
->iop
.write_point
= hash_long((unsigned long) current
, 16);
666 s
->iop
.write_prio
= 0;
669 s
->iop
.flush_journal
= (bio
->bi_opf
& (REQ_PREFLUSH
|REQ_FUA
)) != 0;
670 s
->iop
.wq
= bcache_wq
;
677 static void cached_dev_bio_complete(struct closure
*cl
)
679 struct search
*s
= container_of(cl
, struct search
, cl
);
680 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
688 static void cached_dev_cache_miss_done(struct closure
*cl
)
690 struct search
*s
= container_of(cl
, struct search
, cl
);
692 if (s
->iop
.replace_collision
)
693 bch_mark_cache_miss_collision(s
->iop
.c
, s
->d
);
696 bio_free_pages(s
->iop
.bio
);
698 cached_dev_bio_complete(cl
);
701 static void cached_dev_read_error(struct closure
*cl
)
703 struct search
*s
= container_of(cl
, struct search
, cl
);
704 struct bio
*bio
= &s
->bio
.bio
;
706 if (s
->recoverable
) {
707 /* Retry from the backing device: */
708 trace_bcache_read_retry(s
->orig_bio
);
711 do_bio_hook(s
, s
->orig_bio
);
713 /* XXX: invalidate cache */
715 closure_bio_submit(bio
, cl
);
718 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
721 static void cached_dev_read_done(struct closure
*cl
)
723 struct search
*s
= container_of(cl
, struct search
, cl
);
724 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
727 * We had a cache miss; cache_bio now contains data ready to be inserted
730 * First, we copy the data we just read from cache_bio's bounce buffers
731 * to the buffers the original bio pointed to:
735 bio_reset(s
->iop
.bio
);
736 s
->iop
.bio
->bi_iter
.bi_sector
= s
->cache_miss
->bi_iter
.bi_sector
;
737 s
->iop
.bio
->bi_bdev
= s
->cache_miss
->bi_bdev
;
738 s
->iop
.bio
->bi_iter
.bi_size
= s
->insert_bio_sectors
<< 9;
739 bch_bio_map(s
->iop
.bio
, NULL
);
741 bio_copy_data(s
->cache_miss
, s
->iop
.bio
);
743 bio_put(s
->cache_miss
);
744 s
->cache_miss
= NULL
;
747 if (verify(dc
, &s
->bio
.bio
) && s
->recoverable
&& !s
->read_dirty_data
)
748 bch_data_verify(dc
, s
->orig_bio
);
753 !test_bit(CACHE_SET_STOPPING
, &s
->iop
.c
->flags
)) {
754 BUG_ON(!s
->iop
.replace
);
755 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
758 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
761 static void cached_dev_read_done_bh(struct closure
*cl
)
763 struct search
*s
= container_of(cl
, struct search
, cl
);
764 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
766 bch_mark_cache_accounting(s
->iop
.c
, s
->d
,
767 !s
->cache_miss
, s
->iop
.bypass
);
768 trace_bcache_read(s
->orig_bio
, !s
->cache_miss
, s
->iop
.bypass
);
771 continue_at_nobarrier(cl
, cached_dev_read_error
, bcache_wq
);
772 else if (s
->iop
.bio
|| verify(dc
, &s
->bio
.bio
))
773 continue_at_nobarrier(cl
, cached_dev_read_done
, bcache_wq
);
775 continue_at_nobarrier(cl
, cached_dev_bio_complete
, NULL
);
778 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
779 struct bio
*bio
, unsigned sectors
)
781 int ret
= MAP_CONTINUE
;
783 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
784 struct bio
*miss
, *cache_bio
;
786 if (s
->cache_miss
|| s
->iop
.bypass
) {
787 miss
= bio_next_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
788 ret
= miss
== bio
? MAP_DONE
: MAP_CONTINUE
;
792 if (!(bio
->bi_opf
& REQ_RAHEAD
) &&
793 !(bio
->bi_opf
& REQ_META
) &&
794 s
->iop
.c
->gc_stats
.in_use
< CUTOFF_CACHE_READA
)
795 reada
= min_t(sector_t
, dc
->readahead
>> 9,
796 bdev_sectors(bio
->bi_bdev
) - bio_end_sector(bio
));
798 s
->insert_bio_sectors
= min(sectors
, bio_sectors(bio
) + reada
);
800 s
->iop
.replace_key
= KEY(s
->iop
.inode
,
801 bio
->bi_iter
.bi_sector
+ s
->insert_bio_sectors
,
802 s
->insert_bio_sectors
);
804 ret
= bch_btree_insert_check_key(b
, &s
->op
, &s
->iop
.replace_key
);
808 s
->iop
.replace
= true;
810 miss
= bio_next_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
812 /* btree_search_recurse()'s btree iterator is no good anymore */
813 ret
= miss
== bio
? MAP_DONE
: -EINTR
;
815 cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
816 DIV_ROUND_UP(s
->insert_bio_sectors
, PAGE_SECTORS
),
821 cache_bio
->bi_iter
.bi_sector
= miss
->bi_iter
.bi_sector
;
822 cache_bio
->bi_bdev
= miss
->bi_bdev
;
823 cache_bio
->bi_iter
.bi_size
= s
->insert_bio_sectors
<< 9;
825 cache_bio
->bi_end_io
= request_endio
;
826 cache_bio
->bi_private
= &s
->cl
;
828 bch_bio_map(cache_bio
, NULL
);
829 if (bio_alloc_pages(cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
833 bch_mark_cache_readahead(s
->iop
.c
, s
->d
);
835 s
->cache_miss
= miss
;
836 s
->iop
.bio
= cache_bio
;
838 closure_bio_submit(cache_bio
, &s
->cl
);
844 miss
->bi_end_io
= request_endio
;
845 miss
->bi_private
= &s
->cl
;
846 closure_bio_submit(miss
, &s
->cl
);
850 static void cached_dev_read(struct cached_dev
*dc
, struct search
*s
)
852 struct closure
*cl
= &s
->cl
;
854 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
855 continue_at(cl
, cached_dev_read_done_bh
, NULL
);
860 static void cached_dev_write_complete(struct closure
*cl
)
862 struct search
*s
= container_of(cl
, struct search
, cl
);
863 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
865 up_read_non_owner(&dc
->writeback_lock
);
866 cached_dev_bio_complete(cl
);
869 static void cached_dev_write(struct cached_dev
*dc
, struct search
*s
)
871 struct closure
*cl
= &s
->cl
;
872 struct bio
*bio
= &s
->bio
.bio
;
873 struct bkey start
= KEY(dc
->disk
.id
, bio
->bi_iter
.bi_sector
, 0);
874 struct bkey end
= KEY(dc
->disk
.id
, bio_end_sector(bio
), 0);
876 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
, &start
, &end
);
878 down_read_non_owner(&dc
->writeback_lock
);
879 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
881 * We overlap with some dirty data undergoing background
882 * writeback, force this write to writeback
884 s
->iop
.bypass
= false;
885 s
->iop
.writeback
= true;
889 * Discards aren't _required_ to do anything, so skipping if
890 * check_overlapping returned true is ok
892 * But check_overlapping drops dirty keys for which io hasn't started,
893 * so we still want to call it.
895 if (bio_op(bio
) == REQ_OP_DISCARD
)
896 s
->iop
.bypass
= true;
898 if (should_writeback(dc
, s
->orig_bio
,
901 s
->iop
.bypass
= false;
902 s
->iop
.writeback
= true;
906 s
->iop
.bio
= s
->orig_bio
;
909 if ((bio_op(bio
) != REQ_OP_DISCARD
) ||
910 blk_queue_discard(bdev_get_queue(dc
->bdev
)))
911 closure_bio_submit(bio
, cl
);
912 } else if (s
->iop
.writeback
) {
913 bch_writeback_add(dc
);
916 if (bio
->bi_opf
& REQ_PREFLUSH
) {
917 /* Also need to send a flush to the backing device */
918 struct bio
*flush
= bio_alloc_bioset(GFP_NOIO
, 0,
921 flush
->bi_bdev
= bio
->bi_bdev
;
922 flush
->bi_end_io
= request_endio
;
923 flush
->bi_private
= cl
;
924 flush
->bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
926 closure_bio_submit(flush
, cl
);
929 s
->iop
.bio
= bio_clone_fast(bio
, GFP_NOIO
, dc
->disk
.bio_split
);
931 closure_bio_submit(bio
, cl
);
934 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
935 continue_at(cl
, cached_dev_write_complete
, NULL
);
938 static void cached_dev_nodata(struct closure
*cl
)
940 struct search
*s
= container_of(cl
, struct search
, cl
);
941 struct bio
*bio
= &s
->bio
.bio
;
943 if (s
->iop
.flush_journal
)
944 bch_journal_meta(s
->iop
.c
, cl
);
946 /* If it's a flush, we send the flush to the backing device too */
947 closure_bio_submit(bio
, cl
);
949 continue_at(cl
, cached_dev_bio_complete
, NULL
);
952 /* Cached devices - read & write stuff */
954 static blk_qc_t
cached_dev_make_request(struct request_queue
*q
,
958 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
959 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
960 int rw
= bio_data_dir(bio
);
962 generic_start_io_acct(rw
, bio_sectors(bio
), &d
->disk
->part0
);
964 bio
->bi_bdev
= dc
->bdev
;
965 bio
->bi_iter
.bi_sector
+= dc
->sb
.data_offset
;
967 if (cached_dev_get(dc
)) {
968 s
= search_alloc(bio
, d
);
969 trace_bcache_request_start(s
->d
, bio
);
971 if (!bio
->bi_iter
.bi_size
) {
973 * can't call bch_journal_meta from under
974 * generic_make_request
976 continue_at_nobarrier(&s
->cl
,
980 s
->iop
.bypass
= check_should_bypass(dc
, bio
);
983 cached_dev_write(dc
, s
);
985 cached_dev_read(dc
, s
);
988 if ((bio_op(bio
) == REQ_OP_DISCARD
) &&
989 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
992 generic_make_request(bio
);
995 return BLK_QC_T_NONE
;
998 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
999 unsigned int cmd
, unsigned long arg
)
1001 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1002 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1005 static int cached_dev_congested(void *data
, int bits
)
1007 struct bcache_device
*d
= data
;
1008 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1009 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1012 if (bdi_congested(&q
->backing_dev_info
, bits
))
1015 if (cached_dev_get(dc
)) {
1019 for_each_cache(ca
, d
->c
, i
) {
1020 q
= bdev_get_queue(ca
->bdev
);
1021 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1030 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1032 struct gendisk
*g
= dc
->disk
.disk
;
1034 g
->queue
->make_request_fn
= cached_dev_make_request
;
1035 g
->queue
->backing_dev_info
.congested_fn
= cached_dev_congested
;
1036 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1037 dc
->disk
.ioctl
= cached_dev_ioctl
;
1040 /* Flash backed devices */
1042 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1043 struct bio
*bio
, unsigned sectors
)
1045 unsigned bytes
= min(sectors
, bio_sectors(bio
)) << 9;
1047 swap(bio
->bi_iter
.bi_size
, bytes
);
1049 swap(bio
->bi_iter
.bi_size
, bytes
);
1051 bio_advance(bio
, bytes
);
1053 if (!bio
->bi_iter
.bi_size
)
1056 return MAP_CONTINUE
;
1059 static void flash_dev_nodata(struct closure
*cl
)
1061 struct search
*s
= container_of(cl
, struct search
, cl
);
1063 if (s
->iop
.flush_journal
)
1064 bch_journal_meta(s
->iop
.c
, cl
);
1066 continue_at(cl
, search_free
, NULL
);
1069 static blk_qc_t
flash_dev_make_request(struct request_queue
*q
,
1074 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1075 int rw
= bio_data_dir(bio
);
1077 generic_start_io_acct(rw
, bio_sectors(bio
), &d
->disk
->part0
);
1079 s
= search_alloc(bio
, d
);
1083 trace_bcache_request_start(s
->d
, bio
);
1085 if (!bio
->bi_iter
.bi_size
) {
1087 * can't call bch_journal_meta from under
1088 * generic_make_request
1090 continue_at_nobarrier(&s
->cl
,
1093 return BLK_QC_T_NONE
;
1095 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
,
1096 &KEY(d
->id
, bio
->bi_iter
.bi_sector
, 0),
1097 &KEY(d
->id
, bio_end_sector(bio
), 0));
1099 s
->iop
.bypass
= (bio_op(bio
) == REQ_OP_DISCARD
) != 0;
1100 s
->iop
.writeback
= true;
1103 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
1105 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
1108 continue_at(cl
, search_free
, NULL
);
1109 return BLK_QC_T_NONE
;
1112 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1113 unsigned int cmd
, unsigned long arg
)
1118 static int flash_dev_congested(void *data
, int bits
)
1120 struct bcache_device
*d
= data
;
1121 struct request_queue
*q
;
1126 for_each_cache(ca
, d
->c
, i
) {
1127 q
= bdev_get_queue(ca
->bdev
);
1128 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1134 void bch_flash_dev_request_init(struct bcache_device
*d
)
1136 struct gendisk
*g
= d
->disk
;
1138 g
->queue
->make_request_fn
= flash_dev_make_request
;
1139 g
->queue
->backing_dev_info
.congested_fn
= flash_dev_congested
;
1140 d
->cache_miss
= flash_dev_cache_miss
;
1141 d
->ioctl
= flash_dev_ioctl
;
1144 void bch_request_exit(void)
1146 if (bch_search_cache
)
1147 kmem_cache_destroy(bch_search_cache
);
1150 int __init
bch_request_init(void)
1152 bch_search_cache
= KMEM_CACHE(search
, 0);
1153 if (!bch_search_cache
)