2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 /* Bios with headers */
16 void bch_bbio_free(struct bio
*bio
, struct cache_set
*c
)
18 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
19 mempool_free(b
, c
->bio_meta
);
22 struct bio
*bch_bbio_alloc(struct cache_set
*c
)
24 struct bbio
*b
= mempool_alloc(c
->bio_meta
, GFP_NOIO
);
25 struct bio
*bio
= &b
->bio
;
28 bio
->bi_flags
|= BIO_POOL_NONE
<< BIO_POOL_OFFSET
;
29 bio
->bi_max_vecs
= bucket_pages(c
);
30 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
35 void __bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
)
37 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
39 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
40 bio
->bi_bdev
= PTR_CACHE(c
, &b
->key
, 0)->bdev
;
42 b
->submit_time_us
= local_clock_us();
43 closure_bio_submit(bio
, bio
->bi_private
);
46 void bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
,
47 struct bkey
*k
, unsigned ptr
)
49 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
50 bch_bkey_copy_single_ptr(&b
->key
, k
, ptr
);
51 __bch_submit_bbio(bio
, c
);
56 void bch_count_io_errors(struct cache
*ca
, int error
, const char *m
)
59 * The halflife of an error is:
60 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
63 if (ca
->set
->error_decay
) {
64 unsigned count
= atomic_inc_return(&ca
->io_count
);
66 while (count
> ca
->set
->error_decay
) {
69 unsigned new = count
- ca
->set
->error_decay
;
72 * First we subtract refresh from count; each time we
73 * succesfully do so, we rescale the errors once:
76 count
= atomic_cmpxchg(&ca
->io_count
, old
, new);
81 errors
= atomic_read(&ca
->io_errors
);
84 new = ((uint64_t) errors
* 127) / 128;
85 errors
= atomic_cmpxchg(&ca
->io_errors
,
87 } while (old
!= errors
);
93 char buf
[BDEVNAME_SIZE
];
94 unsigned errors
= atomic_add_return(1 << IO_ERROR_SHIFT
,
96 errors
>>= IO_ERROR_SHIFT
;
98 if (errors
< ca
->set
->error_limit
)
99 pr_err("%s: IO error on %s, recovering",
100 bdevname(ca
->bdev
, buf
), m
);
102 bch_cache_set_error(ca
->set
,
103 "%s: too many IO errors %s",
104 bdevname(ca
->bdev
, buf
), m
);
108 void bch_bbio_count_io_errors(struct cache_set
*c
, struct bio
*bio
,
109 int error
, const char *m
)
111 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
112 struct cache
*ca
= PTR_CACHE(c
, &b
->key
, 0);
114 unsigned threshold
= bio
->bi_rw
& REQ_WRITE
115 ? c
->congested_write_threshold_us
116 : c
->congested_read_threshold_us
;
119 unsigned t
= local_clock_us();
121 int us
= t
- b
->submit_time_us
;
122 int congested
= atomic_read(&c
->congested
);
124 if (us
> (int) threshold
) {
126 c
->congested_last_us
= t
;
128 ms
= min(ms
, CONGESTED_MAX
+ congested
);
129 atomic_sub(ms
, &c
->congested
);
130 } else if (congested
< 0)
131 atomic_inc(&c
->congested
);
134 bch_count_io_errors(ca
, error
, m
);
137 void bch_bbio_endio(struct cache_set
*c
, struct bio
*bio
,
138 int error
, const char *m
)
140 struct closure
*cl
= bio
->bi_private
;
142 bch_bbio_count_io_errors(c
, bio
, error
, m
);