2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 /* Bios with headers */
16 void bch_bbio_free(struct bio
*bio
, struct cache_set
*c
)
18 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
19 mempool_free(b
, c
->bio_meta
);
22 struct bio
*bch_bbio_alloc(struct cache_set
*c
)
24 struct bbio
*b
= mempool_alloc(c
->bio_meta
, GFP_NOIO
);
25 struct bio
*bio
= &b
->bio
;
27 bio_init(bio
, bio
->bi_inline_vecs
, bucket_pages(c
));
32 void __bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
)
34 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
36 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
37 bio_set_dev(bio
, PTR_CACHE(c
, &b
->key
, 0)->bdev
);
39 b
->submit_time_us
= local_clock_us();
40 closure_bio_submit(bio
, bio
->bi_private
);
43 void bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
,
44 struct bkey
*k
, unsigned ptr
)
46 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
47 bch_bkey_copy_single_ptr(&b
->key
, k
, ptr
);
48 __bch_submit_bbio(bio
, c
);
53 void bch_count_io_errors(struct cache
*ca
, blk_status_t error
, const char *m
)
56 * The halflife of an error is:
57 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
60 if (ca
->set
->error_decay
) {
61 unsigned count
= atomic_inc_return(&ca
->io_count
);
63 while (count
> ca
->set
->error_decay
) {
66 unsigned new = count
- ca
->set
->error_decay
;
69 * First we subtract refresh from count; each time we
70 * succesfully do so, we rescale the errors once:
73 count
= atomic_cmpxchg(&ca
->io_count
, old
, new);
78 errors
= atomic_read(&ca
->io_errors
);
81 new = ((uint64_t) errors
* 127) / 128;
82 errors
= atomic_cmpxchg(&ca
->io_errors
,
84 } while (old
!= errors
);
90 char buf
[BDEVNAME_SIZE
];
91 unsigned errors
= atomic_add_return(1 << IO_ERROR_SHIFT
,
93 errors
>>= IO_ERROR_SHIFT
;
95 if (errors
< ca
->set
->error_limit
)
96 pr_err("%s: IO error on %s, recovering",
97 bdevname(ca
->bdev
, buf
), m
);
99 bch_cache_set_error(ca
->set
,
100 "%s: too many IO errors %s",
101 bdevname(ca
->bdev
, buf
), m
);
105 void bch_bbio_count_io_errors(struct cache_set
*c
, struct bio
*bio
,
106 blk_status_t error
, const char *m
)
108 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
109 struct cache
*ca
= PTR_CACHE(c
, &b
->key
, 0);
111 unsigned threshold
= op_is_write(bio_op(bio
))
112 ? c
->congested_write_threshold_us
113 : c
->congested_read_threshold_us
;
116 unsigned t
= local_clock_us();
118 int us
= t
- b
->submit_time_us
;
119 int congested
= atomic_read(&c
->congested
);
121 if (us
> (int) threshold
) {
123 c
->congested_last_us
= t
;
125 ms
= min(ms
, CONGESTED_MAX
+ congested
);
126 atomic_sub(ms
, &c
->congested
);
127 } else if (congested
< 0)
128 atomic_inc(&c
->congested
);
131 bch_count_io_errors(ca
, error
, m
);
134 void bch_bbio_endio(struct cache_set
*c
, struct bio
*bio
,
135 blk_status_t error
, const char *m
)
137 struct closure
*cl
= bio
->bi_private
;
139 bch_bbio_count_io_errors(c
, bio
, error
, m
);