Linux 6.13-rc4
[linux.git] / fs / bcachefs / io_read.h
blobd9c18bb7d4035aee9884de2dc2019e4a97f7249d
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_READ_H
3 #define _BCACHEFS_IO_READ_H
5 #include "bkey_buf.h"
7 struct bch_read_bio {
8 struct bch_fs *c;
9 u64 start_time;
10 u64 submit_time;
13 * Reads will often have to be split, and if the extent being read from
14 * was checksummed or compressed we'll also have to allocate bounce
15 * buffers and copy the data back into the original bio.
17 * If we didn't have to split, we have to save and restore the original
18 * bi_end_io - @split below indicates which:
20 union {
21 struct bch_read_bio *parent;
22 bio_end_io_t *end_io;
26 * Saved copy of bio->bi_iter, from submission time - allows us to
27 * resubmit on IO error, and also to copy data back to the original bio
28 * when we're bouncing:
30 struct bvec_iter bvec_iter;
32 unsigned offset_into_extent;
34 u16 flags;
35 union {
36 struct {
37 u16 bounce:1,
38 split:1,
39 kmalloc:1,
40 have_ioref:1,
41 narrow_crcs:1,
42 hole:1,
43 retry:2,
44 context:2;
46 u16 _state;
49 struct bch_devs_list devs_have;
51 struct extent_ptr_decoded pick;
54 * pos we read from - different from data_pos for indirect extents:
56 u32 subvol;
57 struct bpos read_pos;
60 * start pos of data we read (may not be pos of data we want) - for
61 * promote, narrow extents paths:
63 enum btree_id data_btree;
64 struct bpos data_pos;
65 struct bversion version;
67 struct promote_op *promote;
69 struct bch_io_opts opts;
71 struct work_struct work;
73 struct bio bio;
76 #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio)
78 struct bch_devs_mask;
79 struct cache_promote_op;
80 struct extent_ptr_decoded;
82 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
83 struct bkey_buf *);
85 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
86 enum btree_id *data_btree,
87 unsigned *offset_into_extent,
88 struct bkey_buf *k)
90 if (k->k->k.type != KEY_TYPE_reflink_p)
91 return 0;
93 *data_btree = BTREE_ID_reflink;
94 return __bch2_read_indirect_extent(trans, offset_into_extent, k);
97 enum bch_read_flags {
98 BCH_READ_RETRY_IF_STALE = 1 << 0,
99 BCH_READ_MAY_PROMOTE = 1 << 1,
100 BCH_READ_USER_MAPPED = 1 << 2,
101 BCH_READ_NODECODE = 1 << 3,
102 BCH_READ_LAST_FRAGMENT = 1 << 4,
104 /* internal: */
105 BCH_READ_MUST_BOUNCE = 1 << 5,
106 BCH_READ_MUST_CLONE = 1 << 6,
107 BCH_READ_IN_RETRY = 1 << 7,
110 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
111 struct bvec_iter, struct bpos, enum btree_id,
112 struct bkey_s_c, unsigned,
113 struct bch_io_failures *, unsigned);
115 static inline void bch2_read_extent(struct btree_trans *trans,
116 struct bch_read_bio *rbio, struct bpos read_pos,
117 enum btree_id data_btree, struct bkey_s_c k,
118 unsigned offset_into_extent, unsigned flags)
120 __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
121 data_btree, k, offset_into_extent, NULL, flags);
124 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
125 subvol_inum, struct bch_io_failures *, unsigned flags);
127 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
128 subvol_inum inum)
130 struct bch_io_failures failed = { .nr = 0 };
132 BUG_ON(rbio->_state);
134 rbio->c = c;
135 rbio->start_time = local_clock();
136 rbio->subvol = inum.subvol;
138 __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
139 BCH_READ_RETRY_IF_STALE|
140 BCH_READ_MAY_PROMOTE|
141 BCH_READ_USER_MAPPED);
144 static inline struct bch_read_bio *rbio_init(struct bio *bio,
145 struct bch_io_opts opts)
147 struct bch_read_bio *rbio = to_rbio(bio);
149 rbio->_state = 0;
150 rbio->promote = NULL;
151 rbio->opts = opts;
152 return rbio;
155 void bch2_fs_io_read_exit(struct bch_fs *);
156 int bch2_fs_io_read_init(struct bch_fs *);
158 #endif /* _BCACHEFS_IO_READ_H */