1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #ifndef BTRFS_TRANSACTION_H
7 #define BTRFS_TRANSACTION_H
9 #include <linux/refcount.h>
10 #include "btrfs_inode.h"
11 #include "delayed-ref.h"
14 enum btrfs_trans_state
{
15 TRANS_STATE_RUNNING
= 0,
16 TRANS_STATE_BLOCKED
= 1,
17 TRANS_STATE_COMMIT_START
= 2,
18 TRANS_STATE_COMMIT_DOING
= 3,
19 TRANS_STATE_UNBLOCKED
= 4,
20 TRANS_STATE_COMPLETED
= 5,
24 #define BTRFS_TRANS_HAVE_FREE_BGS 0
25 #define BTRFS_TRANS_DIRTY_BG_RUN 1
26 #define BTRFS_TRANS_CACHE_ENOSPC 2
28 struct btrfs_transaction
{
31 * total external writers(USERSPACE/START/ATTACH) in this
32 * transaction, it must be zero before the transaction is
35 atomic_t num_extwriters
;
37 * total writers in this transaction, it must be zero before the
42 atomic_t pending_ordered
;
46 /* Be protected by fs_info->trans_lock when we want to change it. */
47 enum btrfs_trans_state state
;
49 struct list_head list
;
50 struct extent_io_tree dirty_pages
;
52 wait_queue_head_t writer_wait
;
53 wait_queue_head_t commit_wait
;
54 wait_queue_head_t pending_wait
;
55 struct list_head pending_snapshots
;
56 struct list_head pending_chunks
;
57 struct list_head switch_commits
;
58 struct list_head dirty_bgs
;
61 * There is no explicit lock which protects io_bgs, rather its
62 * consistency is implied by the fact that all the sites which modify
63 * it do so under some form of transaction critical section, namely:
65 * - btrfs_start_dirty_block_groups - This function can only ever be
66 * run by one of the transaction committers. Refer to
67 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
69 * - btrfs_write_dirty_blockgroups - this is called by
70 * commit_cowonly_roots from transaction critical section
71 * (TRANS_STATE_COMMIT_DOING)
73 * - btrfs_cleanup_dirty_bgs - called on transaction abort
75 struct list_head io_bgs
;
76 struct list_head dropped_roots
;
79 * we need to make sure block group deletion doesn't race with
80 * free space cache writeout. This mutex keeps them from stomping
83 struct mutex cache_write_mutex
;
84 spinlock_t dirty_bgs_lock
;
85 unsigned int num_dirty_bgs
;
86 /* Protected by spin lock fs_info->unused_bgs_lock. */
87 struct list_head deleted_bgs
;
88 spinlock_t dropped_roots_lock
;
89 struct btrfs_delayed_ref_root delayed_refs
;
90 struct btrfs_fs_info
*fs_info
;
93 #define __TRANS_FREEZABLE (1U << 0)
95 #define __TRANS_START (1U << 9)
96 #define __TRANS_ATTACH (1U << 10)
97 #define __TRANS_JOIN (1U << 11)
98 #define __TRANS_JOIN_NOLOCK (1U << 12)
99 #define __TRANS_DUMMY (1U << 13)
100 #define __TRANS_JOIN_NOSTART (1U << 14)
102 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
103 #define TRANS_ATTACH (__TRANS_ATTACH)
104 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
105 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
106 #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
108 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
110 #define BTRFS_SEND_TRANS_STUB ((void *)1)
112 struct btrfs_trans_handle
{
115 u64 chunk_bytes_reserved
;
116 unsigned long delayed_ref_updates
;
117 struct btrfs_transaction
*transaction
;
118 struct btrfs_block_rsv
*block_rsv
;
119 struct btrfs_block_rsv
*orig_rsv
;
120 refcount_t use_count
;
124 bool allocating_chunk
;
125 bool can_flush_pending_bgs
;
129 struct btrfs_root
*root
;
130 struct btrfs_fs_info
*fs_info
;
131 struct list_head new_bgs
;
134 struct btrfs_pending_snapshot
{
135 struct dentry
*dentry
;
137 struct btrfs_root
*root
;
138 struct btrfs_root_item
*root_item
;
139 struct btrfs_root
*snap
;
140 struct btrfs_qgroup_inherit
*inherit
;
141 struct btrfs_path
*path
;
142 /* block reservation for the operation */
143 struct btrfs_block_rsv block_rsv
;
144 /* extra metadata reservation for relocation */
147 struct list_head list
;
150 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle
*trans
,
153 spin_lock(&BTRFS_I(inode
)->lock
);
154 BTRFS_I(inode
)->last_trans
= trans
->transaction
->transid
;
155 BTRFS_I(inode
)->last_sub_trans
= BTRFS_I(inode
)->root
->log_transid
;
156 BTRFS_I(inode
)->last_log_commit
= BTRFS_I(inode
)->root
->last_log_commit
;
157 spin_unlock(&BTRFS_I(inode
)->lock
);
161 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
162 * qgroup won't contain the qgroupid in it.
164 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle
*trans
,
167 struct btrfs_delayed_ref_root
*delayed_refs
;
169 delayed_refs
= &trans
->transaction
->delayed_refs
;
170 WARN_ON(delayed_refs
->qgroup_to_skip
);
171 delayed_refs
->qgroup_to_skip
= qgroupid
;
174 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle
*trans
)
176 struct btrfs_delayed_ref_root
*delayed_refs
;
178 delayed_refs
= &trans
->transaction
->delayed_refs
;
179 WARN_ON(!delayed_refs
->qgroup_to_skip
);
180 delayed_refs
->qgroup_to_skip
= 0;
183 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
);
184 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
185 unsigned int num_items
);
186 struct btrfs_trans_handle
*btrfs_start_transaction_fallback_global_rsv(
187 struct btrfs_root
*root
,
188 unsigned int num_items
,
190 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
);
191 struct btrfs_trans_handle
*btrfs_join_transaction_nolock(struct btrfs_root
*root
);
192 struct btrfs_trans_handle
*btrfs_join_transaction_nostart(struct btrfs_root
*root
);
193 struct btrfs_trans_handle
*btrfs_attach_transaction(struct btrfs_root
*root
);
194 struct btrfs_trans_handle
*btrfs_attach_transaction_barrier(
195 struct btrfs_root
*root
);
196 int btrfs_wait_for_commit(struct btrfs_fs_info
*fs_info
, u64 transid
);
198 void btrfs_add_dead_root(struct btrfs_root
*root
);
199 int btrfs_defrag_root(struct btrfs_root
*root
);
200 int btrfs_clean_one_deleted_snapshot(struct btrfs_root
*root
);
201 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
);
202 int btrfs_commit_transaction_async(struct btrfs_trans_handle
*trans
,
203 int wait_for_unblock
);
206 * Try to commit transaction asynchronously, so this is safe to call
207 * even holding a spinlock.
209 * It's done by informing transaction_kthread to commit transaction without
210 * waiting for commit interval.
212 static inline void btrfs_commit_transaction_locksafe(
213 struct btrfs_fs_info
*fs_info
)
215 set_bit(BTRFS_FS_NEED_ASYNC_COMMIT
, &fs_info
->flags
);
216 wake_up_process(fs_info
->transaction_kthread
);
218 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
);
219 int btrfs_should_end_transaction(struct btrfs_trans_handle
*trans
);
220 void btrfs_throttle(struct btrfs_fs_info
*fs_info
);
221 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
222 struct btrfs_root
*root
);
223 int btrfs_write_marked_extents(struct btrfs_fs_info
*fs_info
,
224 struct extent_io_tree
*dirty_pages
, int mark
);
225 int btrfs_wait_extents(struct btrfs_fs_info
*fs_info
,
226 struct extent_io_tree
*dirty_pages
);
227 int btrfs_wait_tree_log_extents(struct btrfs_root
*root
, int mark
);
228 int btrfs_transaction_blocked(struct btrfs_fs_info
*info
);
229 int btrfs_transaction_in_commit(struct btrfs_fs_info
*info
);
230 void btrfs_put_transaction(struct btrfs_transaction
*transaction
);
231 void btrfs_apply_pending_changes(struct btrfs_fs_info
*fs_info
);
232 void btrfs_add_dropped_root(struct btrfs_trans_handle
*trans
,
233 struct btrfs_root
*root
);