1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
6 #include "free-space-cache.h"
8 enum btrfs_disk_cache_state
{
16 * This describes the state of the block_group for async discard. This is due
17 * to the two pass nature of it where extent discarding is prioritized over
18 * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
19 * between lists to prevent contention for discard state variables
20 * (eg. discard_cursor).
22 enum btrfs_discard_state
{
23 BTRFS_DISCARD_EXTENTS
,
24 BTRFS_DISCARD_BITMAPS
,
25 BTRFS_DISCARD_RESET_CURSOR
,
29 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
30 * only allocate a chunk if we really need one.
32 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
33 * chunks already allocated. This is used as part of the clustering code to
34 * help make sure we have a good pool of storage to cluster in, without filling
35 * the FS with empty chunks
37 * CHUNK_ALLOC_FORCE means it must try to allocate one
39 enum btrfs_chunk_alloc_enum
{
45 struct btrfs_caching_control
{
46 struct list_head list
;
48 wait_queue_head_t wait
;
49 struct btrfs_work work
;
50 struct btrfs_block_group
*block_group
;
55 /* Once caching_thread() finds this much free space, it will wake up waiters. */
56 #define CACHING_CTL_WAKE_UP SZ_2M
58 struct btrfs_block_group
{
59 struct btrfs_fs_info
*fs_info
;
73 * If the free space extent count exceeds this number, convert the block
76 u32 bitmap_high_thresh
;
79 * If the free space extent count drops below this number, convert the
80 * block group back to extents.
82 u32 bitmap_low_thresh
;
85 * It is just used for the delayed data space allocation because
86 * only the data space allocation and the relative metadata update
87 * can be done cross the transaction.
89 struct rw_semaphore data_rwsem
;
91 /* For raid56, this is a full stripe, without parity */
92 unsigned long full_stripe_len
;
96 unsigned int has_caching_ctl
:1;
97 unsigned int removed
:1;
101 /* Cache tracking stuff */
103 struct btrfs_caching_control
*caching_ctl
;
104 u64 last_byte_to_unpin
;
106 struct btrfs_space_info
*space_info
;
108 /* Free space cache stuff */
109 struct btrfs_free_space_ctl
*free_space_ctl
;
111 /* Block group cache stuff */
112 struct rb_node cache_node
;
114 /* For block groups in the same raid type */
115 struct list_head list
;
120 * List of struct btrfs_free_clusters for this block group.
121 * Today it will only have one thing on it, but that may change
123 struct list_head cluster_list
;
125 /* For delayed block group creation or deletion of empty block groups */
126 struct list_head bg_list
;
128 /* For read-only block groups */
129 struct list_head ro_list
;
132 * When non-zero it means the block group's logical address and its
133 * device extents can not be reused for future block group allocations
134 * until the counter goes down to 0. This is to prevent them from being
135 * reused while some task is still using the block group after it was
136 * deleted - we want to make sure they can only be reused for new block
137 * groups after that task is done with the deleted block group.
141 /* For discard operations */
142 struct list_head discard_list
;
144 u64 discard_eligible_time
;
146 enum btrfs_discard_state discard_state
;
148 /* For dirty block groups */
149 struct list_head dirty_list
;
150 struct list_head io_list
;
152 struct btrfs_io_ctl io_ctl
;
155 * Incremented when doing extent allocations and holding a read lock
156 * on the space_info's groups_sem semaphore.
157 * Decremented when an ordered extent that represents an IO against this
158 * block group's range is created (after it's added to its inode's
159 * root's list of ordered extents) or immediately after the allocation
160 * if it's a metadata extent or fallocate extent (for these cases we
161 * don't create ordered extents).
163 atomic_t reservations
;
166 * Incremented while holding the spinlock *lock* by a task checking if
167 * it can perform a nocow write (incremented if the value for the *ro*
168 * field is 0). Decremented by such tasks once they create an ordered
169 * extent or before that if some error happens before reaching that step.
170 * This is to prevent races between block group relocation and nocow
171 * writes through direct IO.
173 atomic_t nocow_writers
;
175 /* Lock for free space tree operations. */
176 struct mutex free_space_lock
;
179 * Does the block group need to be added to the free space tree?
180 * Protected by free_space_lock.
182 int needs_free_space
;
184 /* Record locked full stripes for RAID5/6 block group */
185 struct btrfs_full_stripe_locks_tree full_stripe_locks_root
;
188 static inline u64
btrfs_block_group_end(struct btrfs_block_group
*block_group
)
190 return (block_group
->start
+ block_group
->length
);
193 static inline bool btrfs_is_block_group_data_only(
194 struct btrfs_block_group
*block_group
)
197 * In mixed mode the fragmentation is expected to be high, lowering the
198 * efficiency, so only proper data block groups are considered.
200 return (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
) &&
201 !(block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
);
204 #ifdef CONFIG_BTRFS_DEBUG
205 static inline int btrfs_should_fragment_free_space(
206 struct btrfs_block_group
*block_group
)
208 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
210 return (btrfs_test_opt(fs_info
, FRAGMENT_METADATA
) &&
211 block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) ||
212 (btrfs_test_opt(fs_info
, FRAGMENT_DATA
) &&
213 block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
);
217 struct btrfs_block_group
*btrfs_lookup_first_block_group(
218 struct btrfs_fs_info
*info
, u64 bytenr
);
219 struct btrfs_block_group
*btrfs_lookup_block_group(
220 struct btrfs_fs_info
*info
, u64 bytenr
);
221 struct btrfs_block_group
*btrfs_next_block_group(
222 struct btrfs_block_group
*cache
);
223 void btrfs_get_block_group(struct btrfs_block_group
*cache
);
224 void btrfs_put_block_group(struct btrfs_block_group
*cache
);
225 void btrfs_dec_block_group_reservations(struct btrfs_fs_info
*fs_info
,
227 void btrfs_wait_block_group_reservations(struct btrfs_block_group
*bg
);
228 bool btrfs_inc_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
);
229 void btrfs_dec_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
);
230 void btrfs_wait_nocow_writers(struct btrfs_block_group
*bg
);
231 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group
*cache
,
233 int btrfs_wait_block_group_cache_done(struct btrfs_block_group
*cache
);
234 int btrfs_cache_block_group(struct btrfs_block_group
*cache
,
235 int load_cache_only
);
236 void btrfs_put_caching_control(struct btrfs_caching_control
*ctl
);
237 struct btrfs_caching_control
*btrfs_get_caching_control(
238 struct btrfs_block_group
*cache
);
239 u64
add_new_free_space(struct btrfs_block_group
*block_group
,
241 struct btrfs_trans_handle
*btrfs_start_trans_remove_block_group(
242 struct btrfs_fs_info
*fs_info
,
243 const u64 chunk_offset
);
244 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
245 u64 group_start
, struct extent_map
*em
);
246 void btrfs_delete_unused_bgs(struct btrfs_fs_info
*fs_info
);
247 void btrfs_mark_bg_unused(struct btrfs_block_group
*bg
);
248 int btrfs_read_block_groups(struct btrfs_fs_info
*info
);
249 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
, u64 bytes_used
,
250 u64 type
, u64 chunk_offset
, u64 size
);
251 void btrfs_create_pending_block_groups(struct btrfs_trans_handle
*trans
);
252 int btrfs_inc_block_group_ro(struct btrfs_block_group
*cache
,
253 bool do_chunk_alloc
);
254 void btrfs_dec_block_group_ro(struct btrfs_block_group
*cache
);
255 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle
*trans
);
256 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
);
257 int btrfs_setup_space_cache(struct btrfs_trans_handle
*trans
);
258 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
259 u64 bytenr
, u64 num_bytes
, int alloc
);
260 int btrfs_add_reserved_bytes(struct btrfs_block_group
*cache
,
261 u64 ram_bytes
, u64 num_bytes
, int delalloc
);
262 void btrfs_free_reserved_bytes(struct btrfs_block_group
*cache
,
263 u64 num_bytes
, int delalloc
);
264 int btrfs_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 flags
,
265 enum btrfs_chunk_alloc_enum force
);
266 int btrfs_force_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 type
);
267 void check_system_chunk(struct btrfs_trans_handle
*trans
, const u64 type
);
268 u64
btrfs_get_alloc_profile(struct btrfs_fs_info
*fs_info
, u64 orig_flags
);
269 void btrfs_put_block_group_cache(struct btrfs_fs_info
*info
);
270 int btrfs_free_block_groups(struct btrfs_fs_info
*info
);
271 void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group
*cache
,
272 struct btrfs_caching_control
*caching_ctl
);
274 static inline u64
btrfs_data_alloc_profile(struct btrfs_fs_info
*fs_info
)
276 return btrfs_get_alloc_profile(fs_info
, BTRFS_BLOCK_GROUP_DATA
);
279 static inline u64
btrfs_metadata_alloc_profile(struct btrfs_fs_info
*fs_info
)
281 return btrfs_get_alloc_profile(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
284 static inline u64
btrfs_system_alloc_profile(struct btrfs_fs_info
*fs_info
)
286 return btrfs_get_alloc_profile(fs_info
, BTRFS_BLOCK_GROUP_SYSTEM
);
289 static inline int btrfs_block_group_done(struct btrfs_block_group
*cache
)
292 return cache
->cached
== BTRFS_CACHE_FINISHED
||
293 cache
->cached
== BTRFS_CACHE_ERROR
;
296 void btrfs_freeze_block_group(struct btrfs_block_group
*cache
);
297 void btrfs_unfreeze_block_group(struct btrfs_block_group
*cache
);
299 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
300 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
301 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
);
304 #endif /* BTRFS_BLOCK_GROUP_H */