2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
27 #include "transaction.h"
31 #define BTRFS_ROOT_TRANS_TAG 0
33 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
35 WARN_ON(transaction
->use_count
== 0);
36 transaction
->use_count
--;
37 if (transaction
->use_count
== 0) {
38 list_del_init(&transaction
->list
);
39 memset(transaction
, 0, sizeof(*transaction
));
40 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
44 static noinline
void switch_commit_root(struct btrfs_root
*root
)
46 free_extent_buffer(root
->commit_root
);
47 root
->commit_root
= btrfs_root_node(root
);
51 * either allocate a new transaction or hop into the existing one
53 static noinline
int join_transaction(struct btrfs_root
*root
)
55 struct btrfs_transaction
*cur_trans
;
56 cur_trans
= root
->fs_info
->running_transaction
;
58 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
61 root
->fs_info
->generation
++;
62 cur_trans
->num_writers
= 1;
63 cur_trans
->num_joined
= 0;
64 cur_trans
->transid
= root
->fs_info
->generation
;
65 init_waitqueue_head(&cur_trans
->writer_wait
);
66 init_waitqueue_head(&cur_trans
->commit_wait
);
67 cur_trans
->in_commit
= 0;
68 cur_trans
->blocked
= 0;
69 cur_trans
->use_count
= 1;
70 cur_trans
->commit_done
= 0;
71 cur_trans
->start_time
= get_seconds();
73 cur_trans
->delayed_refs
.root
= RB_ROOT
;
74 cur_trans
->delayed_refs
.num_entries
= 0;
75 cur_trans
->delayed_refs
.num_heads_ready
= 0;
76 cur_trans
->delayed_refs
.num_heads
= 0;
77 cur_trans
->delayed_refs
.flushing
= 0;
78 cur_trans
->delayed_refs
.run_delayed_start
= 0;
79 spin_lock_init(&cur_trans
->delayed_refs
.lock
);
81 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
82 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
83 extent_io_tree_init(&cur_trans
->dirty_pages
,
84 root
->fs_info
->btree_inode
->i_mapping
,
86 spin_lock(&root
->fs_info
->new_trans_lock
);
87 root
->fs_info
->running_transaction
= cur_trans
;
88 spin_unlock(&root
->fs_info
->new_trans_lock
);
90 cur_trans
->num_writers
++;
91 cur_trans
->num_joined
++;
98 * this does all the record keeping required to make sure that a reference
99 * counted root is properly recorded in a given transaction. This is required
100 * to make sure the old root from before we joined the transaction is deleted
101 * when the transaction commits
103 static noinline
int record_root_in_trans(struct btrfs_trans_handle
*trans
,
104 struct btrfs_root
*root
)
106 if (root
->ref_cows
&& root
->last_trans
< trans
->transid
) {
107 WARN_ON(root
== root
->fs_info
->extent_root
);
108 WARN_ON(root
->commit_root
!= root
->node
);
110 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
111 (unsigned long)root
->root_key
.objectid
,
112 BTRFS_ROOT_TRANS_TAG
);
113 root
->last_trans
= trans
->transid
;
114 btrfs_init_reloc_root(trans
, root
);
119 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
120 struct btrfs_root
*root
)
125 mutex_lock(&root
->fs_info
->trans_mutex
);
126 if (root
->last_trans
== trans
->transid
) {
127 mutex_unlock(&root
->fs_info
->trans_mutex
);
131 record_root_in_trans(trans
, root
);
132 mutex_unlock(&root
->fs_info
->trans_mutex
);
136 /* wait for commit against the current transaction to become unblocked
137 * when this is done, it is safe to start a new transaction, but the current
138 * transaction might not be fully on disk.
140 static void wait_current_trans(struct btrfs_root
*root
)
142 struct btrfs_transaction
*cur_trans
;
144 cur_trans
= root
->fs_info
->running_transaction
;
145 if (cur_trans
&& cur_trans
->blocked
) {
147 cur_trans
->use_count
++;
149 prepare_to_wait(&root
->fs_info
->transaction_wait
, &wait
,
150 TASK_UNINTERRUPTIBLE
);
151 if (!cur_trans
->blocked
)
153 mutex_unlock(&root
->fs_info
->trans_mutex
);
155 mutex_lock(&root
->fs_info
->trans_mutex
);
157 finish_wait(&root
->fs_info
->transaction_wait
, &wait
);
158 put_transaction(cur_trans
);
162 enum btrfs_trans_type
{
169 static int may_wait_transaction(struct btrfs_root
*root
, int type
)
171 if (!root
->fs_info
->log_root_recovering
&&
172 ((type
== TRANS_START
&& !root
->fs_info
->open_ioctl_trans
) ||
173 type
== TRANS_USERSPACE
))
178 static struct btrfs_trans_handle
*start_transaction(struct btrfs_root
*root
,
179 u64 num_items
, int type
)
181 struct btrfs_trans_handle
*h
;
182 struct btrfs_transaction
*cur_trans
;
185 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
186 return ERR_PTR(-EROFS
);
188 h
= kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
190 return ERR_PTR(-ENOMEM
);
192 if (type
!= TRANS_JOIN_NOLOCK
)
193 mutex_lock(&root
->fs_info
->trans_mutex
);
194 if (may_wait_transaction(root
, type
))
195 wait_current_trans(root
);
197 ret
= join_transaction(root
);
200 cur_trans
= root
->fs_info
->running_transaction
;
201 cur_trans
->use_count
++;
202 if (type
!= TRANS_JOIN_NOLOCK
)
203 mutex_unlock(&root
->fs_info
->trans_mutex
);
205 h
->transid
= cur_trans
->transid
;
206 h
->transaction
= cur_trans
;
209 h
->bytes_reserved
= 0;
210 h
->delayed_ref_updates
= 0;
214 if (cur_trans
->blocked
&& may_wait_transaction(root
, type
)) {
215 btrfs_commit_transaction(h
, root
);
220 ret
= btrfs_trans_reserve_metadata(h
, root
, num_items
);
221 if (ret
== -EAGAIN
) {
222 btrfs_commit_transaction(h
, root
);
226 btrfs_end_transaction(h
, root
);
231 if (type
!= TRANS_JOIN_NOLOCK
)
232 mutex_lock(&root
->fs_info
->trans_mutex
);
233 record_root_in_trans(h
, root
);
234 if (type
!= TRANS_JOIN_NOLOCK
)
235 mutex_unlock(&root
->fs_info
->trans_mutex
);
237 if (!current
->journal_info
&& type
!= TRANS_USERSPACE
)
238 current
->journal_info
= h
;
242 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
245 return start_transaction(root
, num_items
, TRANS_START
);
247 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
,
250 return start_transaction(root
, 0, TRANS_JOIN
);
253 struct btrfs_trans_handle
*btrfs_join_transaction_nolock(struct btrfs_root
*root
,
256 return start_transaction(root
, 0, TRANS_JOIN_NOLOCK
);
259 struct btrfs_trans_handle
*btrfs_start_ioctl_transaction(struct btrfs_root
*r
,
262 return start_transaction(r
, 0, TRANS_USERSPACE
);
265 /* wait for a transaction commit to be fully complete */
266 static noinline
int wait_for_commit(struct btrfs_root
*root
,
267 struct btrfs_transaction
*commit
)
270 mutex_lock(&root
->fs_info
->trans_mutex
);
271 while (!commit
->commit_done
) {
272 prepare_to_wait(&commit
->commit_wait
, &wait
,
273 TASK_UNINTERRUPTIBLE
);
274 if (commit
->commit_done
)
276 mutex_unlock(&root
->fs_info
->trans_mutex
);
278 mutex_lock(&root
->fs_info
->trans_mutex
);
280 mutex_unlock(&root
->fs_info
->trans_mutex
);
281 finish_wait(&commit
->commit_wait
, &wait
);
285 int btrfs_wait_for_commit(struct btrfs_root
*root
, u64 transid
)
287 struct btrfs_transaction
*cur_trans
= NULL
, *t
;
290 mutex_lock(&root
->fs_info
->trans_mutex
);
294 if (transid
<= root
->fs_info
->last_trans_committed
)
297 /* find specified transaction */
298 list_for_each_entry(t
, &root
->fs_info
->trans_list
, list
) {
299 if (t
->transid
== transid
) {
303 if (t
->transid
> transid
)
308 goto out_unlock
; /* bad transid */
310 /* find newest transaction that is committing | committed */
311 list_for_each_entry_reverse(t
, &root
->fs_info
->trans_list
,
321 goto out_unlock
; /* nothing committing|committed */
324 cur_trans
->use_count
++;
325 mutex_unlock(&root
->fs_info
->trans_mutex
);
327 wait_for_commit(root
, cur_trans
);
329 mutex_lock(&root
->fs_info
->trans_mutex
);
330 put_transaction(cur_trans
);
333 mutex_unlock(&root
->fs_info
->trans_mutex
);
339 * rate limit against the drop_snapshot code. This helps to slow down new
340 * operations if the drop_snapshot code isn't able to keep up.
342 static void throttle_on_drops(struct btrfs_root
*root
)
344 struct btrfs_fs_info
*info
= root
->fs_info
;
345 int harder_count
= 0;
348 if (atomic_read(&info
->throttles
)) {
351 thr
= atomic_read(&info
->throttle_gen
);
354 prepare_to_wait(&info
->transaction_throttle
,
355 &wait
, TASK_UNINTERRUPTIBLE
);
356 if (!atomic_read(&info
->throttles
)) {
357 finish_wait(&info
->transaction_throttle
, &wait
);
361 finish_wait(&info
->transaction_throttle
, &wait
);
362 } while (thr
== atomic_read(&info
->throttle_gen
));
365 if (root
->fs_info
->total_ref_cache_size
> 1 * 1024 * 1024 &&
369 if (root
->fs_info
->total_ref_cache_size
> 5 * 1024 * 1024 &&
373 if (root
->fs_info
->total_ref_cache_size
> 10 * 1024 * 1024 &&
380 void btrfs_throttle(struct btrfs_root
*root
)
382 mutex_lock(&root
->fs_info
->trans_mutex
);
383 if (!root
->fs_info
->open_ioctl_trans
)
384 wait_current_trans(root
);
385 mutex_unlock(&root
->fs_info
->trans_mutex
);
388 static int should_end_transaction(struct btrfs_trans_handle
*trans
,
389 struct btrfs_root
*root
)
392 ret
= btrfs_block_rsv_check(trans
, root
,
393 &root
->fs_info
->global_block_rsv
, 0, 5);
397 int btrfs_should_end_transaction(struct btrfs_trans_handle
*trans
,
398 struct btrfs_root
*root
)
400 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
403 if (cur_trans
->blocked
|| cur_trans
->delayed_refs
.flushing
)
406 updates
= trans
->delayed_ref_updates
;
407 trans
->delayed_ref_updates
= 0;
409 btrfs_run_delayed_refs(trans
, root
, updates
);
411 return should_end_transaction(trans
, root
);
414 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
415 struct btrfs_root
*root
, int throttle
, int lock
)
417 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
418 struct btrfs_fs_info
*info
= root
->fs_info
;
422 unsigned long cur
= trans
->delayed_ref_updates
;
423 trans
->delayed_ref_updates
= 0;
425 trans
->transaction
->delayed_refs
.num_heads_ready
> 64) {
426 trans
->delayed_ref_updates
= 0;
429 * do a full flush if the transaction is trying
432 if (trans
->transaction
->delayed_refs
.flushing
)
434 btrfs_run_delayed_refs(trans
, root
, cur
);
441 btrfs_trans_release_metadata(trans
, root
);
443 if (lock
&& !root
->fs_info
->open_ioctl_trans
&&
444 should_end_transaction(trans
, root
))
445 trans
->transaction
->blocked
= 1;
447 if (lock
&& cur_trans
->blocked
&& !cur_trans
->in_commit
) {
449 return btrfs_commit_transaction(trans
, root
);
451 wake_up_process(info
->transaction_kthread
);
455 mutex_lock(&info
->trans_mutex
);
456 WARN_ON(cur_trans
!= info
->running_transaction
);
457 WARN_ON(cur_trans
->num_writers
< 1);
458 cur_trans
->num_writers
--;
461 if (waitqueue_active(&cur_trans
->writer_wait
))
462 wake_up(&cur_trans
->writer_wait
);
463 put_transaction(cur_trans
);
465 mutex_unlock(&info
->trans_mutex
);
467 if (current
->journal_info
== trans
)
468 current
->journal_info
= NULL
;
469 memset(trans
, 0, sizeof(*trans
));
470 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
473 btrfs_run_delayed_iputs(root
);
478 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
479 struct btrfs_root
*root
)
481 return __btrfs_end_transaction(trans
, root
, 0, 1);
484 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
,
485 struct btrfs_root
*root
)
487 return __btrfs_end_transaction(trans
, root
, 1, 1);
490 int btrfs_end_transaction_nolock(struct btrfs_trans_handle
*trans
,
491 struct btrfs_root
*root
)
493 return __btrfs_end_transaction(trans
, root
, 0, 0);
497 * when btree blocks are allocated, they have some corresponding bits set for
498 * them in one of two extent_io trees. This is used to make sure all of
499 * those extents are sent to disk but does not wait on them
501 int btrfs_write_marked_extents(struct btrfs_root
*root
,
502 struct extent_io_tree
*dirty_pages
, int mark
)
508 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
514 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
518 while (start
<= end
) {
521 index
= start
>> PAGE_CACHE_SHIFT
;
522 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
523 page
= find_get_page(btree_inode
->i_mapping
, index
);
527 btree_lock_page_hook(page
);
528 if (!page
->mapping
) {
530 page_cache_release(page
);
534 if (PageWriteback(page
)) {
536 wait_on_page_writeback(page
);
539 page_cache_release(page
);
543 err
= write_one_page(page
, 0);
546 page_cache_release(page
);
555 * when btree blocks are allocated, they have some corresponding bits set for
556 * them in one of two extent_io trees. This is used to make sure all of
557 * those extents are on disk for transaction or log commit. We wait
558 * on all the pages and clear them from the dirty pages state tree
560 int btrfs_wait_marked_extents(struct btrfs_root
*root
,
561 struct extent_io_tree
*dirty_pages
, int mark
)
567 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
573 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
578 clear_extent_bits(dirty_pages
, start
, end
, mark
, GFP_NOFS
);
579 while (start
<= end
) {
580 index
= start
>> PAGE_CACHE_SHIFT
;
581 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
582 page
= find_get_page(btree_inode
->i_mapping
, index
);
585 if (PageDirty(page
)) {
586 btree_lock_page_hook(page
);
587 wait_on_page_writeback(page
);
588 err
= write_one_page(page
, 0);
592 wait_on_page_writeback(page
);
593 page_cache_release(page
);
603 * when btree blocks are allocated, they have some corresponding bits set for
604 * them in one of two extent_io trees. This is used to make sure all of
605 * those extents are on disk for transaction or log commit
607 int btrfs_write_and_wait_marked_extents(struct btrfs_root
*root
,
608 struct extent_io_tree
*dirty_pages
, int mark
)
613 ret
= btrfs_write_marked_extents(root
, dirty_pages
, mark
);
614 ret2
= btrfs_wait_marked_extents(root
, dirty_pages
, mark
);
618 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
619 struct btrfs_root
*root
)
621 if (!trans
|| !trans
->transaction
) {
622 struct inode
*btree_inode
;
623 btree_inode
= root
->fs_info
->btree_inode
;
624 return filemap_write_and_wait(btree_inode
->i_mapping
);
626 return btrfs_write_and_wait_marked_extents(root
,
627 &trans
->transaction
->dirty_pages
,
632 * this is used to update the root pointer in the tree of tree roots.
634 * But, in the case of the extent allocation tree, updating the root
635 * pointer may allocate blocks which may change the root of the extent
638 * So, this loops and repeats and makes sure the cowonly root didn't
639 * change while the root pointer was being updated in the metadata.
641 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
642 struct btrfs_root
*root
)
647 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
649 old_root_used
= btrfs_root_used(&root
->root_item
);
650 btrfs_write_dirty_block_groups(trans
, root
);
653 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
654 if (old_root_bytenr
== root
->node
->start
&&
655 old_root_used
== btrfs_root_used(&root
->root_item
))
658 btrfs_set_root_node(&root
->root_item
, root
->node
);
659 ret
= btrfs_update_root(trans
, tree_root
,
664 old_root_used
= btrfs_root_used(&root
->root_item
);
665 ret
= btrfs_write_dirty_block_groups(trans
, root
);
669 if (root
!= root
->fs_info
->extent_root
)
670 switch_commit_root(root
);
676 * update all the cowonly tree roots on disk
678 static noinline
int commit_cowonly_roots(struct btrfs_trans_handle
*trans
,
679 struct btrfs_root
*root
)
681 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
682 struct list_head
*next
;
683 struct extent_buffer
*eb
;
686 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
689 eb
= btrfs_lock_root_node(fs_info
->tree_root
);
690 btrfs_cow_block(trans
, fs_info
->tree_root
, eb
, NULL
, 0, &eb
);
691 btrfs_tree_unlock(eb
);
692 free_extent_buffer(eb
);
694 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
697 while (!list_empty(&fs_info
->dirty_cowonly_roots
)) {
698 next
= fs_info
->dirty_cowonly_roots
.next
;
700 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
702 update_cowonly_root(trans
, root
);
705 down_write(&fs_info
->extent_commit_sem
);
706 switch_commit_root(fs_info
->extent_root
);
707 up_write(&fs_info
->extent_commit_sem
);
713 * dead roots are old snapshots that need to be deleted. This allocates
714 * a dirty root struct and adds it into the list of dead roots that need to
717 int btrfs_add_dead_root(struct btrfs_root
*root
)
719 mutex_lock(&root
->fs_info
->trans_mutex
);
720 list_add(&root
->root_list
, &root
->fs_info
->dead_roots
);
721 mutex_unlock(&root
->fs_info
->trans_mutex
);
726 * update all the cowonly tree roots on disk
728 static noinline
int commit_fs_roots(struct btrfs_trans_handle
*trans
,
729 struct btrfs_root
*root
)
731 struct btrfs_root
*gang
[8];
732 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
738 ret
= radix_tree_gang_lookup_tag(&fs_info
->fs_roots_radix
,
741 BTRFS_ROOT_TRANS_TAG
);
744 for (i
= 0; i
< ret
; i
++) {
746 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
747 (unsigned long)root
->root_key
.objectid
,
748 BTRFS_ROOT_TRANS_TAG
);
750 btrfs_free_log(trans
, root
);
751 btrfs_update_reloc_root(trans
, root
);
752 btrfs_orphan_commit_root(trans
, root
);
754 if (root
->commit_root
!= root
->node
) {
755 switch_commit_root(root
);
756 btrfs_set_root_node(&root
->root_item
,
760 err
= btrfs_update_root(trans
, fs_info
->tree_root
,
771 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
772 * otherwise every leaf in the btree is read and defragged.
774 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
776 struct btrfs_fs_info
*info
= root
->fs_info
;
777 struct btrfs_trans_handle
*trans
;
781 if (xchg(&root
->defrag_running
, 1))
785 trans
= btrfs_start_transaction(root
, 0);
787 return PTR_ERR(trans
);
789 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
791 nr
= trans
->blocks_used
;
792 btrfs_end_transaction(trans
, root
);
793 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
796 if (root
->fs_info
->closing
|| ret
!= -EAGAIN
)
799 root
->defrag_running
= 0;
805 * when dropping snapshots, we generate a ton of delayed refs, and it makes
806 * sense not to join the transaction while it is trying to flush the current
807 * queue of delayed refs out.
809 * This is used by the drop snapshot code only
811 static noinline
int wait_transaction_pre_flush(struct btrfs_fs_info
*info
)
815 mutex_lock(&info
->trans_mutex
);
816 while (info
->running_transaction
&&
817 info
->running_transaction
->delayed_refs
.flushing
) {
818 prepare_to_wait(&info
->transaction_wait
, &wait
,
819 TASK_UNINTERRUPTIBLE
);
820 mutex_unlock(&info
->trans_mutex
);
824 mutex_lock(&info
->trans_mutex
);
825 finish_wait(&info
->transaction_wait
, &wait
);
827 mutex_unlock(&info
->trans_mutex
);
832 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
835 int btrfs_drop_dead_root(struct btrfs_root
*root
)
837 struct btrfs_trans_handle
*trans
;
838 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
844 * we don't want to jump in and create a bunch of
845 * delayed refs if the transaction is starting to close
847 wait_transaction_pre_flush(tree_root
->fs_info
);
848 trans
= btrfs_start_transaction(tree_root
, 1);
851 * we've joined a transaction, make sure it isn't
854 if (trans
->transaction
->delayed_refs
.flushing
) {
855 btrfs_end_transaction(trans
, tree_root
);
859 ret
= btrfs_drop_snapshot(trans
, root
);
863 ret
= btrfs_update_root(trans
, tree_root
,
869 nr
= trans
->blocks_used
;
870 ret
= btrfs_end_transaction(trans
, tree_root
);
873 btrfs_btree_balance_dirty(tree_root
, nr
);
878 ret
= btrfs_del_root(trans
, tree_root
, &root
->root_key
);
881 nr
= trans
->blocks_used
;
882 ret
= btrfs_end_transaction(trans
, tree_root
);
885 free_extent_buffer(root
->node
);
886 free_extent_buffer(root
->commit_root
);
889 btrfs_btree_balance_dirty(tree_root
, nr
);
895 * new snapshots need to be created at a very specific time in the
896 * transaction commit. This does the actual creation
898 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
899 struct btrfs_fs_info
*fs_info
,
900 struct btrfs_pending_snapshot
*pending
)
902 struct btrfs_key key
;
903 struct btrfs_root_item
*new_root_item
;
904 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
905 struct btrfs_root
*root
= pending
->root
;
906 struct btrfs_root
*parent_root
;
907 struct inode
*parent_inode
;
908 struct dentry
*parent
;
909 struct dentry
*dentry
;
910 struct extent_buffer
*tmp
;
911 struct extent_buffer
*old
;
918 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
919 if (!new_root_item
) {
920 pending
->error
= -ENOMEM
;
924 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
926 pending
->error
= ret
;
930 btrfs_reloc_pre_snapshot(trans
, pending
, &to_reserve
);
931 btrfs_orphan_pre_snapshot(trans
, pending
, &to_reserve
);
933 if (to_reserve
> 0) {
934 ret
= btrfs_block_rsv_add(trans
, root
, &pending
->block_rsv
,
937 pending
->error
= ret
;
942 key
.objectid
= objectid
;
943 key
.offset
= (u64
)-1;
944 key
.type
= BTRFS_ROOT_ITEM_KEY
;
946 trans
->block_rsv
= &pending
->block_rsv
;
948 dentry
= pending
->dentry
;
949 parent
= dget_parent(dentry
);
950 parent_inode
= parent
->d_inode
;
951 parent_root
= BTRFS_I(parent_inode
)->root
;
952 record_root_in_trans(trans
, parent_root
);
955 * insert the directory item
957 ret
= btrfs_set_inode_index(parent_inode
, &index
);
959 ret
= btrfs_insert_dir_item(trans
, parent_root
,
960 dentry
->d_name
.name
, dentry
->d_name
.len
,
961 parent_inode
->i_ino
, &key
,
962 BTRFS_FT_DIR
, index
);
965 btrfs_i_size_write(parent_inode
, parent_inode
->i_size
+
966 dentry
->d_name
.len
* 2);
967 ret
= btrfs_update_inode(trans
, parent_root
, parent_inode
);
970 record_root_in_trans(trans
, root
);
971 btrfs_set_root_last_snapshot(&root
->root_item
, trans
->transid
);
972 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
974 root_flags
= btrfs_root_flags(new_root_item
);
975 if (pending
->readonly
)
976 root_flags
|= BTRFS_ROOT_SUBVOL_RDONLY
;
978 root_flags
&= ~BTRFS_ROOT_SUBVOL_RDONLY
;
979 btrfs_set_root_flags(new_root_item
, root_flags
);
981 old
= btrfs_lock_root_node(root
);
982 btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
);
983 btrfs_set_lock_blocking(old
);
985 btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
986 btrfs_tree_unlock(old
);
987 free_extent_buffer(old
);
989 btrfs_set_root_node(new_root_item
, tmp
);
990 /* record when the snapshot was created in key.offset */
991 key
.offset
= trans
->transid
;
992 ret
= btrfs_insert_root(trans
, tree_root
, &key
, new_root_item
);
993 btrfs_tree_unlock(tmp
);
994 free_extent_buffer(tmp
);
998 * insert root back/forward references
1000 ret
= btrfs_add_root_ref(trans
, tree_root
, objectid
,
1001 parent_root
->root_key
.objectid
,
1002 parent_inode
->i_ino
, index
,
1003 dentry
->d_name
.name
, dentry
->d_name
.len
);
1007 key
.offset
= (u64
)-1;
1008 pending
->snap
= btrfs_read_fs_root_no_name(root
->fs_info
, &key
);
1009 BUG_ON(IS_ERR(pending
->snap
));
1011 btrfs_reloc_post_snapshot(trans
, pending
);
1012 btrfs_orphan_post_snapshot(trans
, pending
);
1014 kfree(new_root_item
);
1015 btrfs_block_rsv_release(root
, &pending
->block_rsv
, (u64
)-1);
1020 * create all the snapshots we've scheduled for creation
1022 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
1023 struct btrfs_fs_info
*fs_info
)
1025 struct btrfs_pending_snapshot
*pending
;
1026 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
1029 list_for_each_entry(pending
, head
, list
) {
1030 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
1036 static void update_super_roots(struct btrfs_root
*root
)
1038 struct btrfs_root_item
*root_item
;
1039 struct btrfs_super_block
*super
;
1041 super
= &root
->fs_info
->super_copy
;
1043 root_item
= &root
->fs_info
->chunk_root
->root_item
;
1044 super
->chunk_root
= root_item
->bytenr
;
1045 super
->chunk_root_generation
= root_item
->generation
;
1046 super
->chunk_root_level
= root_item
->level
;
1048 root_item
= &root
->fs_info
->tree_root
->root_item
;
1049 super
->root
= root_item
->bytenr
;
1050 super
->generation
= root_item
->generation
;
1051 super
->root_level
= root_item
->level
;
1052 if (super
->cache_generation
!= 0 || btrfs_test_opt(root
, SPACE_CACHE
))
1053 super
->cache_generation
= root_item
->generation
;
1056 int btrfs_transaction_in_commit(struct btrfs_fs_info
*info
)
1059 spin_lock(&info
->new_trans_lock
);
1060 if (info
->running_transaction
)
1061 ret
= info
->running_transaction
->in_commit
;
1062 spin_unlock(&info
->new_trans_lock
);
1066 int btrfs_transaction_blocked(struct btrfs_fs_info
*info
)
1069 spin_lock(&info
->new_trans_lock
);
1070 if (info
->running_transaction
)
1071 ret
= info
->running_transaction
->blocked
;
1072 spin_unlock(&info
->new_trans_lock
);
1077 * wait for the current transaction commit to start and block subsequent
1080 static void wait_current_trans_commit_start(struct btrfs_root
*root
,
1081 struct btrfs_transaction
*trans
)
1085 if (trans
->in_commit
)
1089 prepare_to_wait(&root
->fs_info
->transaction_blocked_wait
, &wait
,
1090 TASK_UNINTERRUPTIBLE
);
1091 if (trans
->in_commit
) {
1092 finish_wait(&root
->fs_info
->transaction_blocked_wait
,
1096 mutex_unlock(&root
->fs_info
->trans_mutex
);
1098 mutex_lock(&root
->fs_info
->trans_mutex
);
1099 finish_wait(&root
->fs_info
->transaction_blocked_wait
, &wait
);
1104 * wait for the current transaction to start and then become unblocked.
1107 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root
*root
,
1108 struct btrfs_transaction
*trans
)
1112 if (trans
->commit_done
|| (trans
->in_commit
&& !trans
->blocked
))
1116 prepare_to_wait(&root
->fs_info
->transaction_wait
, &wait
,
1117 TASK_UNINTERRUPTIBLE
);
1118 if (trans
->commit_done
||
1119 (trans
->in_commit
&& !trans
->blocked
)) {
1120 finish_wait(&root
->fs_info
->transaction_wait
,
1124 mutex_unlock(&root
->fs_info
->trans_mutex
);
1126 mutex_lock(&root
->fs_info
->trans_mutex
);
1127 finish_wait(&root
->fs_info
->transaction_wait
,
1133 * commit transactions asynchronously. once btrfs_commit_transaction_async
1134 * returns, any subsequent transaction will not be allowed to join.
1136 struct btrfs_async_commit
{
1137 struct btrfs_trans_handle
*newtrans
;
1138 struct btrfs_root
*root
;
1139 struct delayed_work work
;
1142 static void do_async_commit(struct work_struct
*work
)
1144 struct btrfs_async_commit
*ac
=
1145 container_of(work
, struct btrfs_async_commit
, work
.work
);
1147 btrfs_commit_transaction(ac
->newtrans
, ac
->root
);
1151 int btrfs_commit_transaction_async(struct btrfs_trans_handle
*trans
,
1152 struct btrfs_root
*root
,
1153 int wait_for_unblock
)
1155 struct btrfs_async_commit
*ac
;
1156 struct btrfs_transaction
*cur_trans
;
1158 ac
= kmalloc(sizeof(*ac
), GFP_NOFS
);
1161 INIT_DELAYED_WORK(&ac
->work
, do_async_commit
);
1163 ac
->newtrans
= btrfs_join_transaction(root
, 0);
1164 if (IS_ERR(ac
->newtrans
)) {
1165 int err
= PTR_ERR(ac
->newtrans
);
1170 /* take transaction reference */
1171 mutex_lock(&root
->fs_info
->trans_mutex
);
1172 cur_trans
= trans
->transaction
;
1173 cur_trans
->use_count
++;
1174 mutex_unlock(&root
->fs_info
->trans_mutex
);
1176 btrfs_end_transaction(trans
, root
);
1177 schedule_delayed_work(&ac
->work
, 0);
1179 /* wait for transaction to start and unblock */
1180 mutex_lock(&root
->fs_info
->trans_mutex
);
1181 if (wait_for_unblock
)
1182 wait_current_trans_commit_start_and_unblock(root
, cur_trans
);
1184 wait_current_trans_commit_start(root
, cur_trans
);
1185 put_transaction(cur_trans
);
1186 mutex_unlock(&root
->fs_info
->trans_mutex
);
1192 * btrfs_transaction state sequence:
1193 * in_commit = 0, blocked = 0 (initial)
1194 * in_commit = 1, blocked = 1
1198 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
1199 struct btrfs_root
*root
)
1201 unsigned long joined
= 0;
1202 struct btrfs_transaction
*cur_trans
;
1203 struct btrfs_transaction
*prev_trans
= NULL
;
1206 int should_grow
= 0;
1207 unsigned long now
= get_seconds();
1208 int flush_on_commit
= btrfs_test_opt(root
, FLUSHONCOMMIT
);
1210 btrfs_run_ordered_operations(root
, 0);
1212 /* make a pass through all the delayed refs we have so far
1213 * any runnings procs may add more while we are here
1215 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
1218 btrfs_trans_release_metadata(trans
, root
);
1220 cur_trans
= trans
->transaction
;
1222 * set the flushing flag so procs in this transaction have to
1223 * start sending their work down.
1225 cur_trans
->delayed_refs
.flushing
= 1;
1227 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
1230 mutex_lock(&root
->fs_info
->trans_mutex
);
1231 if (cur_trans
->in_commit
) {
1232 cur_trans
->use_count
++;
1233 mutex_unlock(&root
->fs_info
->trans_mutex
);
1234 btrfs_end_transaction(trans
, root
);
1236 ret
= wait_for_commit(root
, cur_trans
);
1239 mutex_lock(&root
->fs_info
->trans_mutex
);
1240 put_transaction(cur_trans
);
1241 mutex_unlock(&root
->fs_info
->trans_mutex
);
1246 trans
->transaction
->in_commit
= 1;
1247 trans
->transaction
->blocked
= 1;
1248 wake_up(&root
->fs_info
->transaction_blocked_wait
);
1250 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
1251 prev_trans
= list_entry(cur_trans
->list
.prev
,
1252 struct btrfs_transaction
, list
);
1253 if (!prev_trans
->commit_done
) {
1254 prev_trans
->use_count
++;
1255 mutex_unlock(&root
->fs_info
->trans_mutex
);
1257 wait_for_commit(root
, prev_trans
);
1259 mutex_lock(&root
->fs_info
->trans_mutex
);
1260 put_transaction(prev_trans
);
1264 if (now
< cur_trans
->start_time
|| now
- cur_trans
->start_time
< 1)
1268 int snap_pending
= 0;
1269 joined
= cur_trans
->num_joined
;
1270 if (!list_empty(&trans
->transaction
->pending_snapshots
))
1273 WARN_ON(cur_trans
!= trans
->transaction
);
1274 mutex_unlock(&root
->fs_info
->trans_mutex
);
1276 if (flush_on_commit
|| snap_pending
) {
1277 btrfs_start_delalloc_inodes(root
, 1);
1278 ret
= btrfs_wait_ordered_extents(root
, 0, 1);
1283 * rename don't use btrfs_join_transaction, so, once we
1284 * set the transaction to blocked above, we aren't going
1285 * to get any new ordered operations. We can safely run
1286 * it here and no for sure that nothing new will be added
1289 btrfs_run_ordered_operations(root
, 1);
1291 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
1292 TASK_UNINTERRUPTIBLE
);
1295 if (cur_trans
->num_writers
> 1)
1296 schedule_timeout(MAX_SCHEDULE_TIMEOUT
);
1297 else if (should_grow
)
1298 schedule_timeout(1);
1300 mutex_lock(&root
->fs_info
->trans_mutex
);
1301 finish_wait(&cur_trans
->writer_wait
, &wait
);
1302 } while (cur_trans
->num_writers
> 1 ||
1303 (should_grow
&& cur_trans
->num_joined
!= joined
));
1305 ret
= create_pending_snapshots(trans
, root
->fs_info
);
1308 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
1311 WARN_ON(cur_trans
!= trans
->transaction
);
1313 /* btrfs_commit_tree_roots is responsible for getting the
1314 * various roots consistent with each other. Every pointer
1315 * in the tree of tree roots has to point to the most up to date
1316 * root for every subvolume and other tree. So, we have to keep
1317 * the tree logging code from jumping in and changing any
1320 * At this point in the commit, there can't be any tree-log
1321 * writers, but a little lower down we drop the trans mutex
1322 * and let new people in. By holding the tree_log_mutex
1323 * from now until after the super is written, we avoid races
1324 * with the tree-log code.
1326 mutex_lock(&root
->fs_info
->tree_log_mutex
);
1328 ret
= commit_fs_roots(trans
, root
);
1331 /* commit_fs_roots gets rid of all the tree log roots, it is now
1332 * safe to free the root of tree log roots
1334 btrfs_free_log_root_tree(trans
, root
->fs_info
);
1336 ret
= commit_cowonly_roots(trans
, root
);
1339 btrfs_prepare_extent_commit(trans
, root
);
1341 cur_trans
= root
->fs_info
->running_transaction
;
1342 spin_lock(&root
->fs_info
->new_trans_lock
);
1343 root
->fs_info
->running_transaction
= NULL
;
1344 spin_unlock(&root
->fs_info
->new_trans_lock
);
1346 btrfs_set_root_node(&root
->fs_info
->tree_root
->root_item
,
1347 root
->fs_info
->tree_root
->node
);
1348 switch_commit_root(root
->fs_info
->tree_root
);
1350 btrfs_set_root_node(&root
->fs_info
->chunk_root
->root_item
,
1351 root
->fs_info
->chunk_root
->node
);
1352 switch_commit_root(root
->fs_info
->chunk_root
);
1354 update_super_roots(root
);
1356 if (!root
->fs_info
->log_root_recovering
) {
1357 btrfs_set_super_log_root(&root
->fs_info
->super_copy
, 0);
1358 btrfs_set_super_log_root_level(&root
->fs_info
->super_copy
, 0);
1361 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
1362 sizeof(root
->fs_info
->super_copy
));
1364 trans
->transaction
->blocked
= 0;
1366 wake_up(&root
->fs_info
->transaction_wait
);
1368 mutex_unlock(&root
->fs_info
->trans_mutex
);
1369 ret
= btrfs_write_and_wait_transaction(trans
, root
);
1371 write_ctree_super(trans
, root
, 0);
1374 * the super is written, we can safely allow the tree-loggers
1375 * to go about their business
1377 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1379 btrfs_finish_extent_commit(trans
, root
);
1381 mutex_lock(&root
->fs_info
->trans_mutex
);
1383 cur_trans
->commit_done
= 1;
1385 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
1387 wake_up(&cur_trans
->commit_wait
);
1389 put_transaction(cur_trans
);
1390 put_transaction(cur_trans
);
1392 mutex_unlock(&root
->fs_info
->trans_mutex
);
1394 if (current
->journal_info
== trans
)
1395 current
->journal_info
= NULL
;
1397 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1399 if (current
!= root
->fs_info
->transaction_kthread
)
1400 btrfs_run_delayed_iputs(root
);
1406 * interface function to delete all the snapshots we have scheduled for deletion
1408 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
1411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1413 mutex_lock(&fs_info
->trans_mutex
);
1414 list_splice_init(&fs_info
->dead_roots
, &list
);
1415 mutex_unlock(&fs_info
->trans_mutex
);
1417 while (!list_empty(&list
)) {
1418 root
= list_entry(list
.next
, struct btrfs_root
, root_list
);
1419 list_del(&root
->root_list
);
1421 if (btrfs_header_backref_rev(root
->node
) <
1422 BTRFS_MIXED_BACKREF_REV
)
1423 btrfs_drop_snapshot(root
, NULL
, 0);
1425 btrfs_drop_snapshot(root
, NULL
, 1);