1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef BTRFS_EXTENT_IO_H
4 #define BTRFS_EXTENT_IO_H
6 #include <linux/rbtree.h>
7 #include <linux/refcount.h>
11 * flags for bio submission. The high bits indicate the compression
14 #define EXTENT_BIO_COMPRESSED 1
15 #define EXTENT_BIO_FLAG_SHIFT 16
18 EXTENT_BUFFER_UPTODATE
,
20 EXTENT_BUFFER_CORRUPT
,
21 /* this got triggered by readahead */
22 EXTENT_BUFFER_READAHEAD
,
23 EXTENT_BUFFER_TREE_REF
,
25 EXTENT_BUFFER_WRITEBACK
,
27 EXTENT_BUFFER_READ_ERR
,
28 EXTENT_BUFFER_UNMAPPED
,
29 EXTENT_BUFFER_IN_TREE
,
31 EXTENT_BUFFER_WRITE_ERR
,
34 /* these are flags for __process_pages_contig */
35 #define PAGE_UNLOCK (1 << 0)
36 #define PAGE_CLEAR_DIRTY (1 << 1)
37 #define PAGE_SET_WRITEBACK (1 << 2)
38 #define PAGE_END_WRITEBACK (1 << 3)
39 #define PAGE_SET_PRIVATE2 (1 << 4)
40 #define PAGE_SET_ERROR (1 << 5)
41 #define PAGE_LOCK (1 << 6)
44 * page->private values. Every page that is controlled by the extent
45 * map has page->private set to one.
47 #define EXTENT_PAGE_PRIVATE 1
50 * The extent buffer bitmap operations are done with byte granularity instead of
51 * word granularity for two reasons:
52 * 1. The bitmaps must be little-endian on disk.
53 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
54 * single word in a bitmap may straddle two pages in the extent buffer.
56 #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
57 #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
58 #define BITMAP_FIRST_BYTE_MASK(start) \
59 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
60 #define BITMAP_LAST_BYTE_MASK(nbits) \
61 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
66 struct io_failure_record
;
67 struct extent_io_tree
;
69 typedef blk_status_t (extent_submit_bio_start_t
)(void *private_data
,
70 struct bio
*bio
, u64 bio_offset
);
72 struct extent_io_ops
{
74 * The following callbacks must be always defined, the function
75 * pointer will be called unconditionally.
77 blk_status_t (*submit_bio_hook
)(struct inode
*inode
, struct bio
*bio
,
78 int mirror_num
, unsigned long bio_flags
);
79 int (*readpage_end_io_hook
)(struct btrfs_io_bio
*io_bio
, u64 phy_offset
,
80 struct page
*page
, u64 start
, u64 end
,
85 #define INLINE_EXTENT_BUFFER_PAGES 16
86 #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
87 struct extent_buffer
{
91 struct btrfs_fs_info
*fs_info
;
96 struct rcu_head rcu_head
;
100 atomic_t blocking_readers
;
102 /* >= 0 if eb belongs to a log tree, -1 otherwise */
105 /* protects write locks */
108 /* readers use lock_wq while they wait for the write
109 * lock holders to unlock
111 wait_queue_head_t write_lock_wq
;
113 /* writers use read_lock_wq while they wait for readers
116 wait_queue_head_t read_lock_wq
;
117 struct page
*pages
[INLINE_EXTENT_BUFFER_PAGES
];
118 #ifdef CONFIG_BTRFS_DEBUG
119 int spinning_writers
;
120 atomic_t spinning_readers
;
123 struct list_head leak_list
;
128 * Structure to record how many bytes and which ranges are set/cleared
130 struct extent_changeset
{
131 /* How many bytes are set/cleared in this operation */
132 unsigned int bytes_changed
;
135 struct ulist range_changed
;
138 static inline void extent_changeset_init(struct extent_changeset
*changeset
)
140 changeset
->bytes_changed
= 0;
141 ulist_init(&changeset
->range_changed
);
144 static inline struct extent_changeset
*extent_changeset_alloc(void)
146 struct extent_changeset
*ret
;
148 ret
= kmalloc(sizeof(*ret
), GFP_KERNEL
);
152 extent_changeset_init(ret
);
156 static inline void extent_changeset_release(struct extent_changeset
*changeset
)
160 changeset
->bytes_changed
= 0;
161 ulist_release(&changeset
->range_changed
);
164 static inline void extent_changeset_free(struct extent_changeset
*changeset
)
168 extent_changeset_release(changeset
);
172 static inline void extent_set_compress_type(unsigned long *bio_flags
,
175 *bio_flags
|= compress_type
<< EXTENT_BIO_FLAG_SHIFT
;
178 static inline int extent_compress_type(unsigned long bio_flags
)
180 return bio_flags
>> EXTENT_BIO_FLAG_SHIFT
;
183 struct extent_map_tree
;
185 typedef struct extent_map
*(get_extent_t
)(struct btrfs_inode
*inode
,
186 struct page
*page
, size_t pg_offset
,
189 int try_release_extent_mapping(struct page
*page
, gfp_t mask
);
190 int try_release_extent_buffer(struct page
*page
);
192 int extent_read_full_page(struct page
*page
, get_extent_t
*get_extent
,
194 int extent_write_full_page(struct page
*page
, struct writeback_control
*wbc
);
195 int extent_write_locked_range(struct inode
*inode
, u64 start
, u64 end
,
197 int extent_writepages(struct address_space
*mapping
,
198 struct writeback_control
*wbc
);
199 int btree_write_cache_pages(struct address_space
*mapping
,
200 struct writeback_control
*wbc
);
201 int extent_readpages(struct address_space
*mapping
, struct list_head
*pages
,
203 int extent_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
204 __u64 start
, __u64 len
);
205 void set_page_extent_mapped(struct page
*page
);
207 struct extent_buffer
*alloc_extent_buffer(struct btrfs_fs_info
*fs_info
,
209 struct extent_buffer
*__alloc_dummy_extent_buffer(struct btrfs_fs_info
*fs_info
,
210 u64 start
, unsigned long len
);
211 struct extent_buffer
*alloc_dummy_extent_buffer(struct btrfs_fs_info
*fs_info
,
213 struct extent_buffer
*btrfs_clone_extent_buffer(struct extent_buffer
*src
);
214 struct extent_buffer
*find_extent_buffer(struct btrfs_fs_info
*fs_info
,
216 void free_extent_buffer(struct extent_buffer
*eb
);
217 void free_extent_buffer_stale(struct extent_buffer
*eb
);
219 #define WAIT_COMPLETE 1
220 #define WAIT_PAGE_LOCK 2
221 int read_extent_buffer_pages(struct extent_buffer
*eb
, int wait
,
223 void wait_on_extent_buffer_writeback(struct extent_buffer
*eb
);
225 static inline int num_extent_pages(const struct extent_buffer
*eb
)
227 return (round_up(eb
->start
+ eb
->len
, PAGE_SIZE
) >> PAGE_SHIFT
) -
228 (eb
->start
>> PAGE_SHIFT
);
231 static inline int extent_buffer_uptodate(struct extent_buffer
*eb
)
233 return test_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
);
236 int memcmp_extent_buffer(const struct extent_buffer
*eb
, const void *ptrv
,
237 unsigned long start
, unsigned long len
);
238 void read_extent_buffer(const struct extent_buffer
*eb
, void *dst
,
241 int read_extent_buffer_to_user(const struct extent_buffer
*eb
,
242 void __user
*dst
, unsigned long start
,
244 void write_extent_buffer_fsid(struct extent_buffer
*eb
, const void *src
);
245 void write_extent_buffer_chunk_tree_uuid(struct extent_buffer
*eb
,
247 void write_extent_buffer(struct extent_buffer
*eb
, const void *src
,
248 unsigned long start
, unsigned long len
);
249 void copy_extent_buffer_full(struct extent_buffer
*dst
,
250 struct extent_buffer
*src
);
251 void copy_extent_buffer(struct extent_buffer
*dst
, struct extent_buffer
*src
,
252 unsigned long dst_offset
, unsigned long src_offset
,
254 void memcpy_extent_buffer(struct extent_buffer
*dst
, unsigned long dst_offset
,
255 unsigned long src_offset
, unsigned long len
);
256 void memmove_extent_buffer(struct extent_buffer
*dst
, unsigned long dst_offset
,
257 unsigned long src_offset
, unsigned long len
);
258 void memzero_extent_buffer(struct extent_buffer
*eb
, unsigned long start
,
260 int extent_buffer_test_bit(struct extent_buffer
*eb
, unsigned long start
,
262 void extent_buffer_bitmap_set(struct extent_buffer
*eb
, unsigned long start
,
263 unsigned long pos
, unsigned long len
);
264 void extent_buffer_bitmap_clear(struct extent_buffer
*eb
, unsigned long start
,
265 unsigned long pos
, unsigned long len
);
266 void clear_extent_buffer_dirty(struct extent_buffer
*eb
);
267 bool set_extent_buffer_dirty(struct extent_buffer
*eb
);
268 void set_extent_buffer_uptodate(struct extent_buffer
*eb
);
269 void clear_extent_buffer_uptodate(struct extent_buffer
*eb
);
270 int extent_buffer_under_io(struct extent_buffer
*eb
);
271 int map_private_extent_buffer(const struct extent_buffer
*eb
,
272 unsigned long offset
, unsigned long min_len
,
273 char **map
, unsigned long *map_start
,
274 unsigned long *map_len
);
275 void extent_range_clear_dirty_for_io(struct inode
*inode
, u64 start
, u64 end
);
276 void extent_range_redirty_for_io(struct inode
*inode
, u64 start
, u64 end
);
277 void extent_clear_unlock_delalloc(struct inode
*inode
, u64 start
, u64 end
,
278 struct page
*locked_page
,
279 unsigned bits_to_clear
,
280 unsigned long page_ops
);
281 struct bio
*btrfs_bio_alloc(u64 first_byte
);
282 struct bio
*btrfs_io_bio_alloc(unsigned int nr_iovecs
);
283 struct bio
*btrfs_bio_clone(struct bio
*bio
);
284 struct bio
*btrfs_bio_clone_partial(struct bio
*orig
, int offset
, int size
);
286 struct btrfs_fs_info
;
289 int repair_io_failure(struct btrfs_fs_info
*fs_info
, u64 ino
, u64 start
,
290 u64 length
, u64 logical
, struct page
*page
,
291 unsigned int pg_offset
, int mirror_num
);
292 void end_extent_writepage(struct page
*page
, int err
, u64 start
, u64 end
);
293 int btrfs_repair_eb_io_failure(struct extent_buffer
*eb
, int mirror_num
);
296 * When IO fails, either with EIO or csum verification fails, we
297 * try other mirrors that might have a good copy of the data. This
298 * io_failure_record is used to record state as we go through all the
299 * mirrors. If another mirror has good data, the page is set up to date
300 * and things continue. If a good mirror can't be found, the original
301 * bio end_io callback is called to indicate things have failed.
303 struct io_failure_record
{
308 unsigned long bio_flags
;
315 bool btrfs_check_repairable(struct inode
*inode
, unsigned failed_bio_pages
,
316 struct io_failure_record
*failrec
, int fail_mirror
);
317 struct bio
*btrfs_create_repair_bio(struct inode
*inode
, struct bio
*failed_bio
,
318 struct io_failure_record
*failrec
,
319 struct page
*page
, int pg_offset
, int icsum
,
320 bio_end_io_t
*endio_func
, void *data
);
321 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
322 bool find_lock_delalloc_range(struct inode
*inode
,
323 struct page
*locked_page
, u64
*start
,
326 struct extent_buffer
*alloc_test_extent_buffer(struct btrfs_fs_info
*fs_info
,
329 #ifdef CONFIG_BTRFS_DEBUG
330 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info
*fs_info
);
332 #define btrfs_extent_buffer_leak_debug_check(fs_info) do {} while (0)