2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 static unsigned bch_bio_max_sectors(struct bio
*bio
)
16 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
18 struct bvec_iter iter
;
19 unsigned ret
= 0, seg
= 0;
21 if (bio
->bi_rw
& REQ_DISCARD
)
22 return min(bio_sectors(bio
), q
->limits
.max_discard_sectors
);
24 bio_for_each_segment(bv
, bio
, iter
) {
25 struct bvec_merge_data bvm
= {
26 .bi_bdev
= bio
->bi_bdev
,
27 .bi_sector
= bio
->bi_iter
.bi_sector
,
32 if (seg
== min_t(unsigned, BIO_MAX_PAGES
,
33 queue_max_segments(q
)))
36 if (q
->merge_bvec_fn
&&
37 q
->merge_bvec_fn(q
, &bvm
, &bv
) < (int) bv
.bv_len
)
41 ret
+= bv
.bv_len
>> 9;
44 ret
= min(ret
, queue_max_sectors(q
));
47 ret
= max_t(int, ret
, bio_iovec(bio
).bv_len
>> 9);
52 static void bch_bio_submit_split_done(struct closure
*cl
)
54 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
56 s
->bio
->bi_end_io
= s
->bi_end_io
;
57 s
->bio
->bi_private
= s
->bi_private
;
58 bio_endio_nodec(s
->bio
, 0);
60 closure_debug_destroy(&s
->cl
);
61 mempool_free(s
, s
->p
->bio_split_hook
);
64 static void bch_bio_submit_split_endio(struct bio
*bio
, int error
)
66 struct closure
*cl
= bio
->bi_private
;
67 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
70 clear_bit(BIO_UPTODATE
, &s
->bio
->bi_flags
);
76 void bch_generic_make_request(struct bio
*bio
, struct bio_split_pool
*p
)
78 struct bio_split_hook
*s
;
81 if (!bio_has_data(bio
) && !(bio
->bi_rw
& REQ_DISCARD
))
84 if (bio_sectors(bio
) <= bch_bio_max_sectors(bio
))
87 s
= mempool_alloc(p
->bio_split_hook
, GFP_NOIO
);
88 closure_init(&s
->cl
, NULL
);
92 s
->bi_end_io
= bio
->bi_end_io
;
93 s
->bi_private
= bio
->bi_private
;
97 n
= bio_next_split(bio
, bch_bio_max_sectors(bio
),
98 GFP_NOIO
, s
->p
->bio_split
);
100 n
->bi_end_io
= bch_bio_submit_split_endio
;
101 n
->bi_private
= &s
->cl
;
104 generic_make_request(n
);
107 continue_at(&s
->cl
, bch_bio_submit_split_done
, NULL
);
109 generic_make_request(bio
);
112 /* Bios with headers */
114 void bch_bbio_free(struct bio
*bio
, struct cache_set
*c
)
116 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
117 mempool_free(b
, c
->bio_meta
);
120 struct bio
*bch_bbio_alloc(struct cache_set
*c
)
122 struct bbio
*b
= mempool_alloc(c
->bio_meta
, GFP_NOIO
);
123 struct bio
*bio
= &b
->bio
;
126 bio
->bi_flags
|= BIO_POOL_NONE
<< BIO_POOL_OFFSET
;
127 bio
->bi_max_vecs
= bucket_pages(c
);
128 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
133 void __bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
)
135 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
137 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
138 bio
->bi_bdev
= PTR_CACHE(c
, &b
->key
, 0)->bdev
;
140 b
->submit_time_us
= local_clock_us();
141 closure_bio_submit(bio
, bio
->bi_private
, PTR_CACHE(c
, &b
->key
, 0));
144 void bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
,
145 struct bkey
*k
, unsigned ptr
)
147 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
148 bch_bkey_copy_single_ptr(&b
->key
, k
, ptr
);
149 __bch_submit_bbio(bio
, c
);
154 void bch_count_io_errors(struct cache
*ca
, int error
, const char *m
)
157 * The halflife of an error is:
158 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
161 if (ca
->set
->error_decay
) {
162 unsigned count
= atomic_inc_return(&ca
->io_count
);
164 while (count
> ca
->set
->error_decay
) {
166 unsigned old
= count
;
167 unsigned new = count
- ca
->set
->error_decay
;
170 * First we subtract refresh from count; each time we
171 * succesfully do so, we rescale the errors once:
174 count
= atomic_cmpxchg(&ca
->io_count
, old
, new);
179 errors
= atomic_read(&ca
->io_errors
);
182 new = ((uint64_t) errors
* 127) / 128;
183 errors
= atomic_cmpxchg(&ca
->io_errors
,
185 } while (old
!= errors
);
191 char buf
[BDEVNAME_SIZE
];
192 unsigned errors
= atomic_add_return(1 << IO_ERROR_SHIFT
,
194 errors
>>= IO_ERROR_SHIFT
;
196 if (errors
< ca
->set
->error_limit
)
197 pr_err("%s: IO error on %s, recovering",
198 bdevname(ca
->bdev
, buf
), m
);
200 bch_cache_set_error(ca
->set
,
201 "%s: too many IO errors %s",
202 bdevname(ca
->bdev
, buf
), m
);
206 void bch_bbio_count_io_errors(struct cache_set
*c
, struct bio
*bio
,
207 int error
, const char *m
)
209 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
210 struct cache
*ca
= PTR_CACHE(c
, &b
->key
, 0);
212 unsigned threshold
= bio
->bi_rw
& REQ_WRITE
213 ? c
->congested_write_threshold_us
214 : c
->congested_read_threshold_us
;
217 unsigned t
= local_clock_us();
219 int us
= t
- b
->submit_time_us
;
220 int congested
= atomic_read(&c
->congested
);
222 if (us
> (int) threshold
) {
224 c
->congested_last_us
= t
;
226 ms
= min(ms
, CONGESTED_MAX
+ congested
);
227 atomic_sub(ms
, &c
->congested
);
228 } else if (congested
< 0)
229 atomic_inc(&c
->congested
);
232 bch_count_io_errors(ca
, error
, m
);
235 void bch_bbio_endio(struct cache_set
*c
, struct bio
*bio
,
236 int error
, const char *m
)
238 struct closure
*cl
= bio
->bi_private
;
240 bch_bbio_count_io_errors(c
, bio
, error
, m
);