2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache
*bch_search_cache
;
28 static void check_should_skip(struct cached_dev
*, struct search
*);
30 /* Cgroup interface */
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup
= { .cache_mode
= -1 };
35 static struct bch_cgroup
*cgroup_to_bcache(struct cgroup
*cgroup
)
37 struct cgroup_subsys_state
*css
;
39 (css
= cgroup_subsys_state(cgroup
, bcache_subsys_id
))
40 ? container_of(css
, struct bch_cgroup
, css
)
41 : &bcache_default_cgroup
;
44 struct bch_cgroup
*bch_bio_to_cgroup(struct bio
*bio
)
46 struct cgroup_subsys_state
*css
= bio
->bi_css
47 ? cgroup_subsys_state(bio
->bi_css
->cgroup
, bcache_subsys_id
)
48 : task_subsys_state(current
, bcache_subsys_id
);
51 ? container_of(css
, struct bch_cgroup
, css
)
52 : &bcache_default_cgroup
;
55 static ssize_t
cache_mode_read(struct cgroup
*cgrp
, struct cftype
*cft
,
57 char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
60 int len
= bch_snprint_string_list(tmp
, PAGE_SIZE
, bch_cache_modes
,
61 cgroup_to_bcache(cgrp
)->cache_mode
+ 1);
66 return simple_read_from_buffer(buf
, nbytes
, ppos
, tmp
, len
);
69 static int cache_mode_write(struct cgroup
*cgrp
, struct cftype
*cft
,
72 int v
= bch_read_string_list(buf
, bch_cache_modes
);
76 cgroup_to_bcache(cgrp
)->cache_mode
= v
- 1;
80 static u64
bch_verify_read(struct cgroup
*cgrp
, struct cftype
*cft
)
82 return cgroup_to_bcache(cgrp
)->verify
;
85 static int bch_verify_write(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
87 cgroup_to_bcache(cgrp
)->verify
= val
;
91 static u64
bch_cache_hits_read(struct cgroup
*cgrp
, struct cftype
*cft
)
93 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
94 return atomic_read(&bcachecg
->stats
.cache_hits
);
97 static u64
bch_cache_misses_read(struct cgroup
*cgrp
, struct cftype
*cft
)
99 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
100 return atomic_read(&bcachecg
->stats
.cache_misses
);
103 static u64
bch_cache_bypass_hits_read(struct cgroup
*cgrp
,
106 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
107 return atomic_read(&bcachecg
->stats
.cache_bypass_hits
);
110 static u64
bch_cache_bypass_misses_read(struct cgroup
*cgrp
,
113 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
114 return atomic_read(&bcachecg
->stats
.cache_bypass_misses
);
117 static struct cftype bch_files
[] = {
119 .name
= "cache_mode",
120 .read
= cache_mode_read
,
121 .write_string
= cache_mode_write
,
125 .read_u64
= bch_verify_read
,
126 .write_u64
= bch_verify_write
,
129 .name
= "cache_hits",
130 .read_u64
= bch_cache_hits_read
,
133 .name
= "cache_misses",
134 .read_u64
= bch_cache_misses_read
,
137 .name
= "cache_bypass_hits",
138 .read_u64
= bch_cache_bypass_hits_read
,
141 .name
= "cache_bypass_misses",
142 .read_u64
= bch_cache_bypass_misses_read
,
147 static void init_bch_cgroup(struct bch_cgroup
*cg
)
152 static struct cgroup_subsys_state
*bcachecg_create(struct cgroup
*cgroup
)
154 struct bch_cgroup
*cg
;
156 cg
= kzalloc(sizeof(*cg
), GFP_KERNEL
);
158 return ERR_PTR(-ENOMEM
);
163 static void bcachecg_destroy(struct cgroup
*cgroup
)
165 struct bch_cgroup
*cg
= cgroup_to_bcache(cgroup
);
166 free_css_id(&bcache_subsys
, &cg
->css
);
170 struct cgroup_subsys bcache_subsys
= {
171 .create
= bcachecg_create
,
172 .destroy
= bcachecg_destroy
,
173 .subsys_id
= bcache_subsys_id
,
175 .module
= THIS_MODULE
,
177 EXPORT_SYMBOL_GPL(bcache_subsys
);
180 static unsigned cache_mode(struct cached_dev
*dc
, struct bio
*bio
)
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r
= bch_bio_to_cgroup(bio
)->cache_mode
;
187 return BDEV_CACHE_MODE(&dc
->sb
);
190 static bool verify(struct cached_dev
*dc
, struct bio
*bio
)
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio
)->verify
)
199 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
205 bio_for_each_segment(bv
, bio
, i
) {
206 void *d
= kmap(bv
->bv_page
) + bv
->bv_offset
;
207 csum
= bch_crc64_update(csum
, d
, bv
->bv_len
);
211 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
214 /* Insert data into cache */
216 static void bio_invalidate(struct closure
*cl
)
218 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
219 struct bio
*bio
= op
->cache_bio
;
221 pr_debug("invalidating %i sectors from %llu",
222 bio_sectors(bio
), (uint64_t) bio
->bi_sector
);
224 while (bio_sectors(bio
)) {
225 unsigned len
= min(bio_sectors(bio
), 1U << 14);
227 if (bch_keylist_realloc(&op
->keys
, 0, op
->c
))
230 bio
->bi_sector
+= len
;
231 bio
->bi_size
-= len
<< 9;
233 bch_keylist_add(&op
->keys
,
234 &KEY(op
->inode
, bio
->bi_sector
, len
));
237 op
->insert_data_done
= true;
240 continue_at(cl
, bch_journal
, bcache_wq
);
244 struct list_head list
;
245 struct task_struct
*last
;
246 unsigned sectors_free
;
250 void bch_open_buckets_free(struct cache_set
*c
)
252 struct open_bucket
*b
;
254 while (!list_empty(&c
->data_buckets
)) {
255 b
= list_first_entry(&c
->data_buckets
,
256 struct open_bucket
, list
);
262 int bch_open_buckets_alloc(struct cache_set
*c
)
266 spin_lock_init(&c
->data_bucket_lock
);
268 for (i
= 0; i
< 6; i
++) {
269 struct open_bucket
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
273 list_add(&b
->list
, &c
->data_buckets
);
280 * We keep multiple buckets open for writes, and try to segregate different
281 * write streams for better cache utilization: first we look for a bucket where
282 * the last write to it was sequential with the current write, and failing that
283 * we look for a bucket that was last used by the same task.
285 * The ideas is if you've got multiple tasks pulling data into the cache at the
286 * same time, you'll get better cache utilization if you try to segregate their
287 * data and preserve locality.
289 * For example, say you've starting Firefox at the same time you're copying a
290 * bunch of files. Firefox will likely end up being fairly hot and stay in the
291 * cache awhile, but the data you copied might not be; if you wrote all that
292 * data to the same buckets it'd get invalidated at the same time.
294 * Both of those tasks will be doing fairly random IO so we can't rely on
295 * detecting sequential IO to segregate their data, but going off of the task
296 * should be a sane heuristic.
298 static struct open_bucket
*pick_data_bucket(struct cache_set
*c
,
299 const struct bkey
*search
,
300 struct task_struct
*task
,
303 struct open_bucket
*ret
, *ret_task
= NULL
;
305 list_for_each_entry_reverse(ret
, &c
->data_buckets
, list
)
306 if (!bkey_cmp(&ret
->key
, search
))
308 else if (ret
->last
== task
)
311 ret
= ret_task
?: list_first_entry(&c
->data_buckets
,
312 struct open_bucket
, list
);
314 if (!ret
->sectors_free
&& KEY_PTRS(alloc
)) {
315 ret
->sectors_free
= c
->sb
.bucket_size
;
316 bkey_copy(&ret
->key
, alloc
);
320 if (!ret
->sectors_free
)
327 * Allocates some space in the cache to write to, and k to point to the newly
328 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
329 * end of the newly allocated space).
331 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
332 * sectors were actually allocated.
334 * If s->writeback is true, will not fail.
336 static bool bch_alloc_sectors(struct bkey
*k
, unsigned sectors
,
339 struct cache_set
*c
= s
->op
.c
;
340 struct open_bucket
*b
;
341 BKEY_PADDED(key
) alloc
;
342 struct closure cl
, *w
= NULL
;
346 closure_init_stack(&cl
);
351 * We might have to allocate a new bucket, which we can't do with a
352 * spinlock held. So if we have to allocate, we drop the lock, allocate
353 * and then retry. KEY_PTRS() indicates whether alloc points to
354 * allocated bucket(s).
357 bkey_init(&alloc
.key
);
358 spin_lock(&c
->data_bucket_lock
);
360 while (!(b
= pick_data_bucket(c
, k
, s
->task
, &alloc
.key
))) {
361 unsigned watermark
= s
->op
.write_prio
365 spin_unlock(&c
->data_bucket_lock
);
367 if (bch_bucket_alloc_set(c
, watermark
, &alloc
.key
, 1, w
))
370 spin_lock(&c
->data_bucket_lock
);
374 * If we had to allocate, we might race and not need to allocate the
375 * second time we call find_data_bucket(). If we allocated a bucket but
376 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
378 if (KEY_PTRS(&alloc
.key
))
379 __bkey_put(c
, &alloc
.key
);
381 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
382 EBUG_ON(ptr_stale(c
, &b
->key
, i
));
384 /* Set up the pointer to the space we're allocating: */
386 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
387 k
->ptr
[i
] = b
->key
.ptr
[i
];
389 sectors
= min(sectors
, b
->sectors_free
);
391 SET_KEY_OFFSET(k
, KEY_OFFSET(k
) + sectors
);
392 SET_KEY_SIZE(k
, sectors
);
393 SET_KEY_PTRS(k
, KEY_PTRS(&b
->key
));
396 * Move b to the end of the lru, and keep track of what this bucket was
399 list_move_tail(&b
->list
, &c
->data_buckets
);
400 bkey_copy_key(&b
->key
, k
);
403 b
->sectors_free
-= sectors
;
405 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++) {
406 SET_PTR_OFFSET(&b
->key
, i
, PTR_OFFSET(&b
->key
, i
) + sectors
);
408 atomic_long_add(sectors
,
409 &PTR_CACHE(c
, &b
->key
, i
)->sectors_written
);
412 if (b
->sectors_free
< c
->sb
.block_size
)
416 * k takes refcounts on the buckets it points to until it's inserted
417 * into the btree, but if we're done with this bucket we just transfer
418 * get_data_bucket()'s refcount.
421 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
422 atomic_inc(&PTR_BUCKET(c
, &b
->key
, i
)->pin
);
424 spin_unlock(&c
->data_bucket_lock
);
428 static void bch_insert_data_error(struct closure
*cl
)
430 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
433 * Our data write just errored, which means we've got a bunch of keys to
434 * insert that point to data that wasn't succesfully written.
436 * We don't have to insert those keys but we still have to invalidate
437 * that region of the cache - so, if we just strip off all the pointers
438 * from the keys we'll accomplish just that.
441 struct bkey
*src
= op
->keys
.bottom
, *dst
= op
->keys
.bottom
;
443 while (src
!= op
->keys
.top
) {
444 struct bkey
*n
= bkey_next(src
);
446 SET_KEY_PTRS(src
, 0);
449 dst
= bkey_next(dst
);
458 static void bch_insert_data_endio(struct bio
*bio
, int error
)
460 struct closure
*cl
= bio
->bi_private
;
461 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
462 struct search
*s
= container_of(op
, struct search
, op
);
465 /* TODO: We could try to recover from this. */
469 set_closure_fn(cl
, bch_insert_data_error
, bcache_wq
);
471 set_closure_fn(cl
, NULL
, NULL
);
474 bch_bbio_endio(op
->c
, bio
, error
, "writing data to cache");
477 static void bch_insert_data_loop(struct closure
*cl
)
479 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
480 struct search
*s
= container_of(op
, struct search
, op
);
481 struct bio
*bio
= op
->cache_bio
, *n
;
484 return bio_invalidate(cl
);
486 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0) {
487 set_gc_sectors(op
->c
);
492 * Journal writes are marked REQ_FLUSH; if the original write was a
493 * flush, it'll wait on the journal write.
495 bio
->bi_rw
&= ~(REQ_FLUSH
|REQ_FUA
);
500 struct bio_set
*split
= s
->d
501 ? s
->d
->bio_split
: op
->c
->bio_split
;
503 /* 1 for the device pointer and 1 for the chksum */
504 if (bch_keylist_realloc(&op
->keys
,
505 1 + (op
->csum
? 1 : 0),
507 continue_at(cl
, bch_journal
, bcache_wq
);
511 SET_KEY_INODE(k
, op
->inode
);
512 SET_KEY_OFFSET(k
, bio
->bi_sector
);
514 if (!bch_alloc_sectors(k
, bio_sectors(bio
), s
))
517 n
= bch_bio_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
519 n
->bi_end_io
= bch_insert_data_endio
;
523 SET_KEY_DIRTY(k
, true);
525 for (i
= 0; i
< KEY_PTRS(k
); i
++)
526 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
530 SET_KEY_CSUM(k
, op
->csum
);
534 trace_bcache_cache_insert(k
);
535 bch_keylist_push(&op
->keys
);
537 n
->bi_rw
|= REQ_WRITE
;
538 bch_submit_bbio(n
, op
->c
, k
, 0);
541 op
->insert_data_done
= true;
542 continue_at(cl
, bch_journal
, bcache_wq
);
544 /* bch_alloc_sectors() blocks if s->writeback = true */
545 BUG_ON(s
->writeback
);
548 * But if it's not a writeback write we'd rather just bail out if
549 * there aren't any buckets ready to write to - it might take awhile and
550 * we might be starving btree writes for gc or something.
555 * Writethrough write: We can't complete the write until we've
556 * updated the index. But we don't want to delay the write while
557 * we wait for buckets to be freed up, so just invalidate the
561 return bio_invalidate(cl
);
564 * From a cache miss, we can just insert the keys for the data
565 * we have written or bail out if we didn't do anything.
567 op
->insert_data_done
= true;
570 if (!bch_keylist_empty(&op
->keys
))
571 continue_at(cl
, bch_journal
, bcache_wq
);
578 * bch_insert_data - stick some data in the cache
580 * This is the starting point for any data to end up in a cache device; it could
581 * be from a normal write, or a writeback write, or a write to a flash only
582 * volume - it's also used by the moving garbage collector to compact data in
583 * mostly empty buckets.
585 * It first writes the data to the cache, creating a list of keys to be inserted
586 * (if the data had to be fragmented there will be multiple keys); after the
587 * data is written it calls bch_journal, and after the keys have been added to
588 * the next journal write they're inserted into the btree.
590 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591 * and op->inode is used for the key inode.
593 * If op->skip is true, instead of inserting the data it invalidates the region
594 * of the cache represented by op->cache_bio and op->inode.
596 void bch_insert_data(struct closure
*cl
)
598 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
600 bch_keylist_init(&op
->keys
);
601 bio_get(op
->cache_bio
);
602 bch_insert_data_loop(cl
);
605 void bch_btree_insert_async(struct closure
*cl
)
607 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
608 struct search
*s
= container_of(op
, struct search
, op
);
610 if (bch_btree_insert(op
, op
->c
)) {
612 op
->insert_data_done
= true;
615 if (op
->insert_data_done
) {
616 bch_keylist_free(&op
->keys
);
619 continue_at(cl
, bch_insert_data_loop
, bcache_wq
);
622 /* Common code for the make_request functions */
624 static void request_endio(struct bio
*bio
, int error
)
626 struct closure
*cl
= bio
->bi_private
;
629 struct search
*s
= container_of(cl
, struct search
, cl
);
631 /* Only cache read errors are recoverable */
632 s
->recoverable
= false;
639 void bch_cache_read_endio(struct bio
*bio
, int error
)
641 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
642 struct closure
*cl
= bio
->bi_private
;
643 struct search
*s
= container_of(cl
, struct search
, cl
);
646 * If the bucket was reused while our bio was in flight, we might have
647 * read the wrong data. Set s->error but not error so it doesn't get
648 * counted against the cache device, but we'll still reread the data
649 * from the backing device.
654 else if (ptr_stale(s
->op
.c
, &b
->key
, 0)) {
655 atomic_long_inc(&s
->op
.c
->cache_read_races
);
659 bch_bbio_endio(s
->op
.c
, bio
, error
, "reading from cache");
662 static void bio_complete(struct search
*s
)
665 int cpu
, rw
= bio_data_dir(s
->orig_bio
);
666 unsigned long duration
= jiffies
- s
->start_time
;
668 cpu
= part_stat_lock();
669 part_round_stats(cpu
, &s
->d
->disk
->part0
);
670 part_stat_add(cpu
, &s
->d
->disk
->part0
, ticks
[rw
], duration
);
673 trace_bcache_request_end(s
, s
->orig_bio
);
674 bio_endio(s
->orig_bio
, s
->error
);
679 static void do_bio_hook(struct search
*s
)
681 struct bio
*bio
= &s
->bio
.bio
;
682 memcpy(bio
, s
->orig_bio
, sizeof(struct bio
));
684 bio
->bi_end_io
= request_endio
;
685 bio
->bi_private
= &s
->cl
;
686 atomic_set(&bio
->bi_cnt
, 3);
689 static void search_free(struct closure
*cl
)
691 struct search
*s
= container_of(cl
, struct search
, cl
);
695 bio_put(s
->op
.cache_bio
);
697 if (s
->unaligned_bvec
)
698 mempool_free(s
->bio
.bio
.bi_io_vec
, s
->d
->unaligned_bvec
);
700 closure_debug_destroy(cl
);
701 mempool_free(s
, s
->d
->c
->search
);
704 static struct search
*search_alloc(struct bio
*bio
, struct bcache_device
*d
)
707 struct search
*s
= mempool_alloc(d
->c
->search
, GFP_NOIO
);
708 memset(s
, 0, offsetof(struct search
, op
.keys
));
710 __closure_init(&s
->cl
, NULL
);
718 s
->write
= (bio
->bi_rw
& REQ_WRITE
) != 0;
719 s
->op
.flush_journal
= (bio
->bi_rw
& (REQ_FLUSH
|REQ_FUA
)) != 0;
720 s
->op
.skip
= (bio
->bi_rw
& REQ_DISCARD
) != 0;
722 s
->start_time
= jiffies
;
725 if (bio
->bi_size
!= bio_segments(bio
) * PAGE_SIZE
) {
726 bv
= mempool_alloc(d
->unaligned_bvec
, GFP_NOIO
);
727 memcpy(bv
, bio_iovec(bio
),
728 sizeof(struct bio_vec
) * bio_segments(bio
));
730 s
->bio
.bio
.bi_io_vec
= bv
;
731 s
->unaligned_bvec
= 1;
737 static void btree_read_async(struct closure
*cl
)
739 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
741 int ret
= btree_root(search_recurse
, op
->c
, op
);
744 continue_at(cl
, btree_read_async
, bcache_wq
);
751 static void cached_dev_bio_complete(struct closure
*cl
)
753 struct search
*s
= container_of(cl
, struct search
, cl
);
754 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
762 static void cached_dev_read_complete(struct closure
*cl
)
764 struct search
*s
= container_of(cl
, struct search
, cl
);
766 if (s
->op
.insert_collision
)
767 bch_mark_cache_miss_collision(s
);
769 if (s
->op
.cache_bio
) {
773 __bio_for_each_segment(bv
, s
->op
.cache_bio
, i
, 0)
774 __free_page(bv
->bv_page
);
777 cached_dev_bio_complete(cl
);
780 static void request_read_error(struct closure
*cl
)
782 struct search
*s
= container_of(cl
, struct search
, cl
);
786 if (s
->recoverable
) {
787 /* Retry from the backing device: */
788 trace_bcache_read_retry(s
->orig_bio
);
791 bv
= s
->bio
.bio
.bi_io_vec
;
793 s
->bio
.bio
.bi_io_vec
= bv
;
795 if (!s
->unaligned_bvec
)
796 bio_for_each_segment(bv
, s
->orig_bio
, i
)
797 bv
->bv_offset
= 0, bv
->bv_len
= PAGE_SIZE
;
799 memcpy(s
->bio
.bio
.bi_io_vec
,
800 bio_iovec(s
->orig_bio
),
801 sizeof(struct bio_vec
) *
802 bio_segments(s
->orig_bio
));
804 /* XXX: invalidate cache */
806 closure_bio_submit(&s
->bio
.bio
, &s
->cl
, s
->d
);
809 continue_at(cl
, cached_dev_read_complete
, NULL
);
812 static void request_read_done(struct closure
*cl
)
814 struct search
*s
= container_of(cl
, struct search
, cl
);
815 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
818 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
819 * contains data ready to be inserted into the cache.
821 * First, we copy the data we just read from cache_bio's bounce buffers
822 * to the buffers the original bio pointed to:
825 if (s
->op
.cache_bio
) {
826 bio_reset(s
->op
.cache_bio
);
827 s
->op
.cache_bio
->bi_sector
= s
->cache_miss
->bi_sector
;
828 s
->op
.cache_bio
->bi_bdev
= s
->cache_miss
->bi_bdev
;
829 s
->op
.cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
830 bch_bio_map(s
->op
.cache_bio
, NULL
);
832 bio_copy_data(s
->cache_miss
, s
->op
.cache_bio
);
834 bio_put(s
->cache_miss
);
835 s
->cache_miss
= NULL
;
838 if (verify(dc
, &s
->bio
.bio
) && s
->recoverable
)
843 if (s
->op
.cache_bio
&&
844 !test_bit(CACHE_SET_STOPPING
, &s
->op
.c
->flags
)) {
845 s
->op
.type
= BTREE_REPLACE
;
846 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
849 continue_at(cl
, cached_dev_read_complete
, NULL
);
852 static void request_read_done_bh(struct closure
*cl
)
854 struct search
*s
= container_of(cl
, struct search
, cl
);
855 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
857 bch_mark_cache_accounting(s
, !s
->cache_miss
, s
->op
.skip
);
858 trace_bcache_read(s
->orig_bio
, !s
->cache_miss
, s
->op
.skip
);
861 continue_at_nobarrier(cl
, request_read_error
, bcache_wq
);
862 else if (s
->op
.cache_bio
|| verify(dc
, &s
->bio
.bio
))
863 continue_at_nobarrier(cl
, request_read_done
, bcache_wq
);
865 continue_at_nobarrier(cl
, cached_dev_read_complete
, NULL
);
868 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
869 struct bio
*bio
, unsigned sectors
)
873 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
876 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
878 s
->op
.lookup_done
= true;
880 miss
->bi_end_io
= request_endio
;
881 miss
->bi_private
= &s
->cl
;
883 if (s
->cache_miss
|| s
->op
.skip
)
887 (bio
->bi_rw
& REQ_RAHEAD
) ||
888 (bio
->bi_rw
& REQ_META
) ||
889 s
->op
.c
->gc_stats
.in_use
>= CUTOFF_CACHE_READA
)
892 reada
= min(dc
->readahead
>> 9,
893 sectors
- bio_sectors(miss
));
895 if (bio_end_sector(miss
) + reada
> bdev_sectors(miss
->bi_bdev
))
896 reada
= bdev_sectors(miss
->bi_bdev
) -
897 bio_end_sector(miss
);
900 s
->cache_bio_sectors
= bio_sectors(miss
) + reada
;
901 s
->op
.cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
902 DIV_ROUND_UP(s
->cache_bio_sectors
, PAGE_SECTORS
),
905 if (!s
->op
.cache_bio
)
908 s
->op
.cache_bio
->bi_sector
= miss
->bi_sector
;
909 s
->op
.cache_bio
->bi_bdev
= miss
->bi_bdev
;
910 s
->op
.cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
912 s
->op
.cache_bio
->bi_end_io
= request_endio
;
913 s
->op
.cache_bio
->bi_private
= &s
->cl
;
915 /* btree_search_recurse()'s btree iterator is no good anymore */
917 if (!bch_btree_insert_check_key(b
, &s
->op
, s
->op
.cache_bio
))
920 bch_bio_map(s
->op
.cache_bio
, NULL
);
921 if (bio_alloc_pages(s
->op
.cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
924 s
->cache_miss
= miss
;
925 bio_get(s
->op
.cache_bio
);
927 closure_bio_submit(s
->op
.cache_bio
, &s
->cl
, s
->d
);
931 bio_put(s
->op
.cache_bio
);
932 s
->op
.cache_bio
= NULL
;
934 closure_bio_submit(miss
, &s
->cl
, s
->d
);
938 static void request_read(struct cached_dev
*dc
, struct search
*s
)
940 struct closure
*cl
= &s
->cl
;
942 check_should_skip(dc
, s
);
943 closure_call(&s
->op
.cl
, btree_read_async
, NULL
, cl
);
945 continue_at(cl
, request_read_done_bh
, NULL
);
950 static void cached_dev_write_complete(struct closure
*cl
)
952 struct search
*s
= container_of(cl
, struct search
, cl
);
953 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
955 up_read_non_owner(&dc
->writeback_lock
);
956 cached_dev_bio_complete(cl
);
959 static void request_write(struct cached_dev
*dc
, struct search
*s
)
961 struct closure
*cl
= &s
->cl
;
962 struct bio
*bio
= &s
->bio
.bio
;
963 struct bkey start
, end
;
964 start
= KEY(dc
->disk
.id
, bio
->bi_sector
, 0);
965 end
= KEY(dc
->disk
.id
, bio_end_sector(bio
), 0);
967 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
, &start
, &end
);
969 check_should_skip(dc
, s
);
970 down_read_non_owner(&dc
->writeback_lock
);
972 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
977 if (bio
->bi_rw
& REQ_DISCARD
)
980 if (should_writeback(dc
, s
->orig_bio
,
990 trace_bcache_write(s
->orig_bio
, s
->writeback
, s
->op
.skip
);
993 s
->op
.cache_bio
= bio_clone_bioset(bio
, GFP_NOIO
,
996 closure_bio_submit(bio
, cl
, s
->d
);
998 bch_writeback_add(dc
);
999 s
->op
.cache_bio
= bio
;
1001 if (bio
->bi_rw
& REQ_FLUSH
) {
1002 /* Also need to send a flush to the backing device */
1003 struct bio
*flush
= bio_alloc_bioset(GFP_NOIO
, 0,
1004 dc
->disk
.bio_split
);
1006 flush
->bi_rw
= WRITE_FLUSH
;
1007 flush
->bi_bdev
= bio
->bi_bdev
;
1008 flush
->bi_end_io
= request_endio
;
1009 flush
->bi_private
= cl
;
1011 closure_bio_submit(flush
, cl
, s
->d
);
1015 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
1016 continue_at(cl
, cached_dev_write_complete
, NULL
);
1019 s
->op
.cache_bio
= s
->orig_bio
;
1020 bio_get(s
->op
.cache_bio
);
1022 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1023 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1026 closure_bio_submit(bio
, cl
, s
->d
);
1030 static void request_nodata(struct cached_dev
*dc
, struct search
*s
)
1032 struct closure
*cl
= &s
->cl
;
1033 struct bio
*bio
= &s
->bio
.bio
;
1035 if (bio
->bi_rw
& REQ_DISCARD
) {
1036 request_write(dc
, s
);
1040 if (s
->op
.flush_journal
)
1041 bch_journal_meta(s
->op
.c
, cl
);
1043 closure_bio_submit(bio
, cl
, s
->d
);
1045 continue_at(cl
, cached_dev_bio_complete
, NULL
);
1048 /* Cached devices - read & write stuff */
1050 unsigned bch_get_congested(struct cache_set
*c
)
1055 if (!c
->congested_read_threshold_us
&&
1056 !c
->congested_write_threshold_us
)
1059 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
1063 i
+= atomic_read(&c
->congested
);
1070 i
= fract_exp_two(i
, 6);
1072 rand
= get_random_int();
1073 i
-= bitmap_weight(&rand
, BITS_PER_LONG
);
1075 return i
> 0 ? i
: 1;
1078 static void add_sequential(struct task_struct
*t
)
1080 ewma_add(t
->sequential_io_avg
,
1081 t
->sequential_io
, 8, 0);
1083 t
->sequential_io
= 0;
1086 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
1088 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
1091 static void check_should_skip(struct cached_dev
*dc
, struct search
*s
)
1093 struct cache_set
*c
= s
->op
.c
;
1094 struct bio
*bio
= &s
->bio
.bio
;
1095 unsigned mode
= cache_mode(dc
, bio
);
1096 unsigned sectors
, congested
= bch_get_congested(c
);
1098 if (atomic_read(&dc
->disk
.detaching
) ||
1099 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
1100 (bio
->bi_rw
& REQ_DISCARD
))
1103 if (mode
== CACHE_MODE_NONE
||
1104 (mode
== CACHE_MODE_WRITEAROUND
&&
1105 (bio
->bi_rw
& REQ_WRITE
)))
1108 if (bio
->bi_sector
& (c
->sb
.block_size
- 1) ||
1109 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
1110 pr_debug("skipping unaligned io");
1114 if (!congested
&& !dc
->sequential_cutoff
)
1118 mode
== CACHE_MODE_WRITEBACK
&&
1119 (bio
->bi_rw
& REQ_WRITE
) &&
1120 (bio
->bi_rw
& REQ_SYNC
))
1123 if (dc
->sequential_merge
) {
1126 spin_lock(&dc
->io_lock
);
1128 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_sector
), hash
)
1129 if (i
->last
== bio
->bi_sector
&&
1130 time_before(jiffies
, i
->jiffies
))
1133 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
1135 add_sequential(s
->task
);
1138 if (i
->sequential
+ bio
->bi_size
> i
->sequential
)
1139 i
->sequential
+= bio
->bi_size
;
1141 i
->last
= bio_end_sector(bio
);
1142 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
1143 s
->task
->sequential_io
= i
->sequential
;
1145 hlist_del(&i
->hash
);
1146 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
1147 list_move_tail(&i
->lru
, &dc
->io_lru
);
1149 spin_unlock(&dc
->io_lock
);
1151 s
->task
->sequential_io
= bio
->bi_size
;
1153 add_sequential(s
->task
);
1156 sectors
= max(s
->task
->sequential_io
,
1157 s
->task
->sequential_io_avg
) >> 9;
1159 if (dc
->sequential_cutoff
&&
1160 sectors
>= dc
->sequential_cutoff
>> 9) {
1161 trace_bcache_bypass_sequential(s
->orig_bio
);
1165 if (congested
&& sectors
>= congested
) {
1166 trace_bcache_bypass_congested(s
->orig_bio
);
1171 bch_rescale_priorities(c
, bio_sectors(bio
));
1174 bch_mark_sectors_bypassed(s
, bio_sectors(bio
));
1178 static void cached_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1181 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1182 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1183 int cpu
, rw
= bio_data_dir(bio
);
1185 cpu
= part_stat_lock();
1186 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1187 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1190 bio
->bi_bdev
= dc
->bdev
;
1191 bio
->bi_sector
+= dc
->sb
.data_offset
;
1193 if (cached_dev_get(dc
)) {
1194 s
= search_alloc(bio
, d
);
1195 trace_bcache_request_start(s
, bio
);
1197 if (!bio_has_data(bio
))
1198 request_nodata(dc
, s
);
1200 request_write(dc
, s
);
1202 request_read(dc
, s
);
1204 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1205 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1208 bch_generic_make_request(bio
, &d
->bio_split_hook
);
1212 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1213 unsigned int cmd
, unsigned long arg
)
1215 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1216 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1219 static int cached_dev_congested(void *data
, int bits
)
1221 struct bcache_device
*d
= data
;
1222 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1223 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1226 if (bdi_congested(&q
->backing_dev_info
, bits
))
1229 if (cached_dev_get(dc
)) {
1233 for_each_cache(ca
, d
->c
, i
) {
1234 q
= bdev_get_queue(ca
->bdev
);
1235 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1244 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1246 struct gendisk
*g
= dc
->disk
.disk
;
1248 g
->queue
->make_request_fn
= cached_dev_make_request
;
1249 g
->queue
->backing_dev_info
.congested_fn
= cached_dev_congested
;
1250 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1251 dc
->disk
.ioctl
= cached_dev_ioctl
;
1254 /* Flash backed devices */
1256 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1257 struct bio
*bio
, unsigned sectors
)
1264 bio_for_each_segment(bv
, bio
, i
) {
1265 unsigned j
= min(bv
->bv_len
>> 9, sectors
);
1267 void *p
= kmap(bv
->bv_page
);
1268 memset(p
+ bv
->bv_offset
, 0, j
<< 9);
1269 kunmap(bv
->bv_page
);
1274 bio_advance(bio
, min(sectors
<< 9, bio
->bi_size
));
1277 s
->op
.lookup_done
= true;
1282 static void flash_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1286 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1287 int cpu
, rw
= bio_data_dir(bio
);
1289 cpu
= part_stat_lock();
1290 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1291 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1294 s
= search_alloc(bio
, d
);
1298 trace_bcache_request_start(s
, bio
);
1300 if (bio_has_data(bio
) && !rw
) {
1301 closure_call(&s
->op
.cl
, btree_read_async
, NULL
, cl
);
1302 } else if (bio_has_data(bio
) || s
->op
.skip
) {
1303 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
,
1304 &KEY(d
->id
, bio
->bi_sector
, 0),
1305 &KEY(d
->id
, bio_end_sector(bio
), 0));
1307 s
->writeback
= true;
1308 s
->op
.cache_bio
= bio
;
1310 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
1312 /* No data - probably a cache flush */
1313 if (s
->op
.flush_journal
)
1314 bch_journal_meta(s
->op
.c
, cl
);
1317 continue_at(cl
, search_free
, NULL
);
1320 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1321 unsigned int cmd
, unsigned long arg
)
1326 static int flash_dev_congested(void *data
, int bits
)
1328 struct bcache_device
*d
= data
;
1329 struct request_queue
*q
;
1334 for_each_cache(ca
, d
->c
, i
) {
1335 q
= bdev_get_queue(ca
->bdev
);
1336 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1342 void bch_flash_dev_request_init(struct bcache_device
*d
)
1344 struct gendisk
*g
= d
->disk
;
1346 g
->queue
->make_request_fn
= flash_dev_make_request
;
1347 g
->queue
->backing_dev_info
.congested_fn
= flash_dev_congested
;
1348 d
->cache_miss
= flash_dev_cache_miss
;
1349 d
->ioctl
= flash_dev_ioctl
;
1352 void bch_request_exit(void)
1354 #ifdef CONFIG_CGROUP_BCACHE
1355 cgroup_unload_subsys(&bcache_subsys
);
1357 if (bch_search_cache
)
1358 kmem_cache_destroy(bch_search_cache
);
1361 int __init
bch_request_init(void)
1363 bch_search_cache
= KMEM_CACHE(search
, 0);
1364 if (!bch_search_cache
)
1367 #ifdef CONFIG_CGROUP_BCACHE
1368 cgroup_load_subsys(&bcache_subsys
);
1369 init_bch_cgroup(&bcache_default_cgroup
);
1371 cgroup_add_cftypes(&bcache_subsys
, bch_files
);