1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
6 #include <linux/rbtree.h>
7 #include <linux/spinlock.h>
8 #include <linux/refcount.h>
9 #include <linux/list.h>
10 #include <linux/wait.h>
13 struct extent_changeset
;
17 /* Bits for the extent state */
19 ENUM_BIT(EXTENT_DIRTY
),
20 ENUM_BIT(EXTENT_UPTODATE
),
21 ENUM_BIT(EXTENT_LOCKED
),
22 ENUM_BIT(EXTENT_DIO_LOCKED
),
24 ENUM_BIT(EXTENT_DELALLOC
),
25 ENUM_BIT(EXTENT_DEFRAG
),
26 ENUM_BIT(EXTENT_BOUNDARY
),
27 ENUM_BIT(EXTENT_NODATASUM
),
28 ENUM_BIT(EXTENT_CLEAR_META_RESV
),
29 ENUM_BIT(EXTENT_NEED_WAIT
),
30 ENUM_BIT(EXTENT_NORESERVE
),
31 ENUM_BIT(EXTENT_QGROUP_RESERVED
),
32 ENUM_BIT(EXTENT_CLEAR_DATA_RESV
),
34 * Must be cleared only during ordered extent completion or on error
35 * paths if we did not manage to submit bios and create the ordered
36 * extents for the range. Should not be cleared during page release
37 * and page invalidation (if there is an ordered extent in flight),
38 * that is left for the ordered extent completion.
40 ENUM_BIT(EXTENT_DELALLOC_NEW
),
42 * When an ordered extent successfully completes for a region marked as
43 * a new delalloc range, use this flag when clearing a new delalloc
44 * range to indicate that the VFS' inode number of bytes should be
45 * incremented and the inode's new delalloc bytes decremented, in an
46 * atomic way to prevent races with stat(2).
48 ENUM_BIT(EXTENT_ADD_INODE_BYTES
),
50 * Set during truncate when we're clearing an entire range and we just
51 * want the extent states to go away.
53 ENUM_BIT(EXTENT_CLEAR_ALL_BITS
),
58 * Bit not representing a state but a request for NOWAIT semantics,
59 * e.g. when allocating memory, and must be masked out from the other
62 ENUM_BIT(EXTENT_NOWAIT
)
65 #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
66 EXTENT_CLEAR_DATA_RESV)
67 #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
68 EXTENT_ADD_INODE_BYTES | \
69 EXTENT_CLEAR_ALL_BITS)
71 #define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
74 * Redefined bits above which are used only in the device allocation tree,
75 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
76 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
77 * manipulation functions
79 #define CHUNK_ALLOCATED EXTENT_DIRTY
80 #define CHUNK_TRIMMED EXTENT_DEFRAG
81 #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
85 IO_TREE_FS_PINNED_EXTENTS
,
86 IO_TREE_FS_EXCLUDED_EXTENTS
,
87 IO_TREE_BTREE_INODE_IO
,
90 IO_TREE_TRANS_DIRTY_PAGES
,
91 IO_TREE_ROOT_DIRTY_LOG_PAGES
,
92 IO_TREE_INODE_FILE_EXTENT
,
93 IO_TREE_LOG_CSUM_RANGE
,
95 IO_TREE_DEVICE_ALLOC_STATE
,
98 struct extent_io_tree
{
101 * The fs_info is needed for trace points, a tree attached to an inode
104 * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
105 * accessed as inode->root->fs_info
108 struct btrfs_fs_info
*fs_info
;
109 struct btrfs_inode
*inode
;
112 /* Who owns this io tree, should be one of IO_TREE_* */
118 struct extent_state
{
120 u64 end
; /* inclusive */
121 struct rb_node rb_node
;
123 /* ADD NEW ELEMENTS AFTER THIS */
124 wait_queue_head_t wq
;
128 #ifdef CONFIG_BTRFS_DEBUG
129 struct list_head leak_list
;
133 struct btrfs_inode
*extent_io_tree_to_inode(struct extent_io_tree
*tree
);
134 const struct btrfs_inode
*extent_io_tree_to_inode_const(const struct extent_io_tree
*tree
);
135 const struct btrfs_fs_info
*extent_io_tree_to_fs_info(const struct extent_io_tree
*tree
);
137 void extent_io_tree_init(struct btrfs_fs_info
*fs_info
,
138 struct extent_io_tree
*tree
, unsigned int owner
);
139 void extent_io_tree_release(struct extent_io_tree
*tree
);
140 int __lock_extent(struct extent_io_tree
*tree
, u64 start
, u64 end
, u32 bits
,
141 struct extent_state
**cached
);
142 bool __try_lock_extent(struct extent_io_tree
*tree
, u64 start
, u64 end
, u32 bits
,
143 struct extent_state
**cached
);
145 static inline int lock_extent(struct extent_io_tree
*tree
, u64 start
, u64 end
,
146 struct extent_state
**cached
)
148 return __lock_extent(tree
, start
, end
, EXTENT_LOCKED
, cached
);
151 static inline bool try_lock_extent(struct extent_io_tree
*tree
, u64 start
,
152 u64 end
, struct extent_state
**cached
)
154 return __try_lock_extent(tree
, start
, end
, EXTENT_LOCKED
, cached
);
157 int __init
extent_state_init_cachep(void);
158 void __cold
extent_state_free_cachep(void);
160 u64
count_range_bits(struct extent_io_tree
*tree
,
161 u64
*start
, u64 search_end
,
162 u64 max_bytes
, u32 bits
, int contig
,
163 struct extent_state
**cached_state
);
165 void free_extent_state(struct extent_state
*state
);
166 bool test_range_bit(struct extent_io_tree
*tree
, u64 start
, u64 end
, u32 bit
,
167 struct extent_state
*cached_state
);
168 bool test_range_bit_exists(struct extent_io_tree
*tree
, u64 start
, u64 end
, u32 bit
);
169 int clear_record_extent_bits(struct extent_io_tree
*tree
, u64 start
, u64 end
,
170 u32 bits
, struct extent_changeset
*changeset
);
171 int __clear_extent_bit(struct extent_io_tree
*tree
, u64 start
, u64 end
,
172 u32 bits
, struct extent_state
**cached
,
173 struct extent_changeset
*changeset
);
175 static inline int clear_extent_bit(struct extent_io_tree
*tree
, u64 start
,
177 struct extent_state
**cached
)
179 return __clear_extent_bit(tree
, start
, end
, bits
, cached
, NULL
);
182 static inline int unlock_extent(struct extent_io_tree
*tree
, u64 start
, u64 end
,
183 struct extent_state
**cached
)
185 return __clear_extent_bit(tree
, start
, end
, EXTENT_LOCKED
, cached
, NULL
);
188 static inline int clear_extent_bits(struct extent_io_tree
*tree
, u64 start
,
191 return clear_extent_bit(tree
, start
, end
, bits
, NULL
);
194 int set_record_extent_bits(struct extent_io_tree
*tree
, u64 start
, u64 end
,
195 u32 bits
, struct extent_changeset
*changeset
);
196 int set_extent_bit(struct extent_io_tree
*tree
, u64 start
, u64 end
,
197 u32 bits
, struct extent_state
**cached_state
);
199 static inline int clear_extent_uptodate(struct extent_io_tree
*tree
, u64 start
,
200 u64 end
, struct extent_state
**cached_state
)
202 return __clear_extent_bit(tree
, start
, end
, EXTENT_UPTODATE
,
206 static inline int clear_extent_dirty(struct extent_io_tree
*tree
, u64 start
,
207 u64 end
, struct extent_state
**cached
)
209 return clear_extent_bit(tree
, start
, end
,
210 EXTENT_DIRTY
| EXTENT_DELALLOC
|
211 EXTENT_DO_ACCOUNTING
, cached
);
214 int convert_extent_bit(struct extent_io_tree
*tree
, u64 start
, u64 end
,
215 u32 bits
, u32 clear_bits
,
216 struct extent_state
**cached_state
);
218 bool find_first_extent_bit(struct extent_io_tree
*tree
, u64 start
,
219 u64
*start_ret
, u64
*end_ret
, u32 bits
,
220 struct extent_state
**cached_state
);
221 void find_first_clear_extent_bit(struct extent_io_tree
*tree
, u64 start
,
222 u64
*start_ret
, u64
*end_ret
, u32 bits
);
223 int find_contiguous_extent_bit(struct extent_io_tree
*tree
, u64 start
,
224 u64
*start_ret
, u64
*end_ret
, u32 bits
);
225 bool btrfs_find_delalloc_range(struct extent_io_tree
*tree
, u64
*start
,
226 u64
*end
, u64 max_bytes
,
227 struct extent_state
**cached_state
);
228 static inline int lock_dio_extent(struct extent_io_tree
*tree
, u64 start
,
229 u64 end
, struct extent_state
**cached
)
231 return __lock_extent(tree
, start
, end
, EXTENT_DIO_LOCKED
, cached
);
234 static inline bool try_lock_dio_extent(struct extent_io_tree
*tree
, u64 start
,
235 u64 end
, struct extent_state
**cached
)
237 return __try_lock_extent(tree
, start
, end
, EXTENT_DIO_LOCKED
, cached
);
240 static inline int unlock_dio_extent(struct extent_io_tree
*tree
, u64 start
,
241 u64 end
, struct extent_state
**cached
)
243 return __clear_extent_bit(tree
, start
, end
, EXTENT_DIO_LOCKED
, cached
, NULL
);
246 #endif /* BTRFS_EXTENT_IO_TREE_H */