2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include <linux/blkdev.h>
26 #include "transaction.h"
30 #define BTRFS_ROOT_TRANS_TAG 0
32 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
34 WARN_ON(transaction
->use_count
== 0);
35 transaction
->use_count
--;
36 if (transaction
->use_count
== 0) {
37 list_del_init(&transaction
->list
);
38 memset(transaction
, 0, sizeof(*transaction
));
39 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
44 * either allocate a new transaction or hop into the existing one
46 static noinline
int join_transaction(struct btrfs_root
*root
)
48 struct btrfs_transaction
*cur_trans
;
49 cur_trans
= root
->fs_info
->running_transaction
;
51 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
54 root
->fs_info
->generation
++;
55 cur_trans
->num_writers
= 1;
56 cur_trans
->num_joined
= 0;
57 cur_trans
->transid
= root
->fs_info
->generation
;
58 init_waitqueue_head(&cur_trans
->writer_wait
);
59 init_waitqueue_head(&cur_trans
->commit_wait
);
60 cur_trans
->in_commit
= 0;
61 cur_trans
->blocked
= 0;
62 cur_trans
->use_count
= 1;
63 cur_trans
->commit_done
= 0;
64 cur_trans
->start_time
= get_seconds();
66 cur_trans
->delayed_refs
.root
.rb_node
= NULL
;
67 cur_trans
->delayed_refs
.num_entries
= 0;
68 cur_trans
->delayed_refs
.num_heads_ready
= 0;
69 cur_trans
->delayed_refs
.num_heads
= 0;
70 cur_trans
->delayed_refs
.flushing
= 0;
71 cur_trans
->delayed_refs
.run_delayed_start
= 0;
72 spin_lock_init(&cur_trans
->delayed_refs
.lock
);
74 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
75 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
76 extent_io_tree_init(&cur_trans
->dirty_pages
,
77 root
->fs_info
->btree_inode
->i_mapping
,
79 spin_lock(&root
->fs_info
->new_trans_lock
);
80 root
->fs_info
->running_transaction
= cur_trans
;
81 spin_unlock(&root
->fs_info
->new_trans_lock
);
83 cur_trans
->num_writers
++;
84 cur_trans
->num_joined
++;
91 * this does all the record keeping required to make sure that a reference
92 * counted root is properly recorded in a given transaction. This is required
93 * to make sure the old root from before we joined the transaction is deleted
94 * when the transaction commits
96 static noinline
int record_root_in_trans(struct btrfs_trans_handle
*trans
,
97 struct btrfs_root
*root
)
99 if (root
->ref_cows
&& root
->last_trans
< trans
->transid
) {
100 WARN_ON(root
== root
->fs_info
->extent_root
);
101 WARN_ON(root
->root_item
.refs
== 0);
102 WARN_ON(root
->commit_root
!= root
->node
);
104 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
105 (unsigned long)root
->root_key
.objectid
,
106 BTRFS_ROOT_TRANS_TAG
);
107 root
->last_trans
= trans
->transid
;
108 btrfs_init_reloc_root(trans
, root
);
113 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
114 struct btrfs_root
*root
)
119 mutex_lock(&root
->fs_info
->trans_mutex
);
120 if (root
->last_trans
== trans
->transid
) {
121 mutex_unlock(&root
->fs_info
->trans_mutex
);
125 record_root_in_trans(trans
, root
);
126 mutex_unlock(&root
->fs_info
->trans_mutex
);
130 /* wait for commit against the current transaction to become unblocked
131 * when this is done, it is safe to start a new transaction, but the current
132 * transaction might not be fully on disk.
134 static void wait_current_trans(struct btrfs_root
*root
)
136 struct btrfs_transaction
*cur_trans
;
138 cur_trans
= root
->fs_info
->running_transaction
;
139 if (cur_trans
&& cur_trans
->blocked
) {
141 cur_trans
->use_count
++;
143 prepare_to_wait(&root
->fs_info
->transaction_wait
, &wait
,
144 TASK_UNINTERRUPTIBLE
);
145 if (cur_trans
->blocked
) {
146 mutex_unlock(&root
->fs_info
->trans_mutex
);
148 mutex_lock(&root
->fs_info
->trans_mutex
);
149 finish_wait(&root
->fs_info
->transaction_wait
,
152 finish_wait(&root
->fs_info
->transaction_wait
,
157 put_transaction(cur_trans
);
161 static struct btrfs_trans_handle
*start_transaction(struct btrfs_root
*root
,
162 int num_blocks
, int wait
)
164 struct btrfs_trans_handle
*h
=
165 kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
168 mutex_lock(&root
->fs_info
->trans_mutex
);
169 if (!root
->fs_info
->log_root_recovering
&&
170 ((wait
== 1 && !root
->fs_info
->open_ioctl_trans
) || wait
== 2))
171 wait_current_trans(root
);
172 ret
= join_transaction(root
);
175 h
->transid
= root
->fs_info
->running_transaction
->transid
;
176 h
->transaction
= root
->fs_info
->running_transaction
;
177 h
->blocks_reserved
= num_blocks
;
180 h
->alloc_exclude_nr
= 0;
181 h
->alloc_exclude_start
= 0;
182 h
->delayed_ref_updates
= 0;
184 root
->fs_info
->running_transaction
->use_count
++;
185 record_root_in_trans(h
, root
);
186 mutex_unlock(&root
->fs_info
->trans_mutex
);
190 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
193 return start_transaction(root
, num_blocks
, 1);
195 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
,
198 return start_transaction(root
, num_blocks
, 0);
201 struct btrfs_trans_handle
*btrfs_start_ioctl_transaction(struct btrfs_root
*r
,
204 return start_transaction(r
, num_blocks
, 2);
207 /* wait for a transaction commit to be fully complete */
208 static noinline
int wait_for_commit(struct btrfs_root
*root
,
209 struct btrfs_transaction
*commit
)
212 mutex_lock(&root
->fs_info
->trans_mutex
);
213 while (!commit
->commit_done
) {
214 prepare_to_wait(&commit
->commit_wait
, &wait
,
215 TASK_UNINTERRUPTIBLE
);
216 if (commit
->commit_done
)
218 mutex_unlock(&root
->fs_info
->trans_mutex
);
220 mutex_lock(&root
->fs_info
->trans_mutex
);
222 mutex_unlock(&root
->fs_info
->trans_mutex
);
223 finish_wait(&commit
->commit_wait
, &wait
);
229 * rate limit against the drop_snapshot code. This helps to slow down new
230 * operations if the drop_snapshot code isn't able to keep up.
232 static void throttle_on_drops(struct btrfs_root
*root
)
234 struct btrfs_fs_info
*info
= root
->fs_info
;
235 int harder_count
= 0;
238 if (atomic_read(&info
->throttles
)) {
241 thr
= atomic_read(&info
->throttle_gen
);
244 prepare_to_wait(&info
->transaction_throttle
,
245 &wait
, TASK_UNINTERRUPTIBLE
);
246 if (!atomic_read(&info
->throttles
)) {
247 finish_wait(&info
->transaction_throttle
, &wait
);
251 finish_wait(&info
->transaction_throttle
, &wait
);
252 } while (thr
== atomic_read(&info
->throttle_gen
));
255 if (root
->fs_info
->total_ref_cache_size
> 1 * 1024 * 1024 &&
259 if (root
->fs_info
->total_ref_cache_size
> 5 * 1024 * 1024 &&
263 if (root
->fs_info
->total_ref_cache_size
> 10 * 1024 * 1024 &&
270 void btrfs_throttle(struct btrfs_root
*root
)
272 mutex_lock(&root
->fs_info
->trans_mutex
);
273 if (!root
->fs_info
->open_ioctl_trans
)
274 wait_current_trans(root
);
275 mutex_unlock(&root
->fs_info
->trans_mutex
);
278 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
279 struct btrfs_root
*root
, int throttle
)
281 struct btrfs_transaction
*cur_trans
;
282 struct btrfs_fs_info
*info
= root
->fs_info
;
286 unsigned long cur
= trans
->delayed_ref_updates
;
287 trans
->delayed_ref_updates
= 0;
289 trans
->transaction
->delayed_refs
.num_heads_ready
> 64) {
290 trans
->delayed_ref_updates
= 0;
293 * do a full flush if the transaction is trying
296 if (trans
->transaction
->delayed_refs
.flushing
)
298 btrfs_run_delayed_refs(trans
, root
, cur
);
305 mutex_lock(&info
->trans_mutex
);
306 cur_trans
= info
->running_transaction
;
307 WARN_ON(cur_trans
!= trans
->transaction
);
308 WARN_ON(cur_trans
->num_writers
< 1);
309 cur_trans
->num_writers
--;
311 if (waitqueue_active(&cur_trans
->writer_wait
))
312 wake_up(&cur_trans
->writer_wait
);
313 put_transaction(cur_trans
);
314 mutex_unlock(&info
->trans_mutex
);
315 memset(trans
, 0, sizeof(*trans
));
316 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
321 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
322 struct btrfs_root
*root
)
324 return __btrfs_end_transaction(trans
, root
, 0);
327 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
,
328 struct btrfs_root
*root
)
330 return __btrfs_end_transaction(trans
, root
, 1);
334 * when btree blocks are allocated, they have some corresponding bits set for
335 * them in one of two extent_io trees. This is used to make sure all of
336 * those extents are on disk for transaction or log commit
338 int btrfs_write_and_wait_marked_extents(struct btrfs_root
*root
,
339 struct extent_io_tree
*dirty_pages
)
345 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
351 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
355 while (start
<= end
) {
358 index
= start
>> PAGE_CACHE_SHIFT
;
359 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
360 page
= find_get_page(btree_inode
->i_mapping
, index
);
364 btree_lock_page_hook(page
);
365 if (!page
->mapping
) {
367 page_cache_release(page
);
371 if (PageWriteback(page
)) {
373 wait_on_page_writeback(page
);
376 page_cache_release(page
);
380 err
= write_one_page(page
, 0);
383 page_cache_release(page
);
387 ret
= find_first_extent_bit(dirty_pages
, 0, &start
, &end
,
392 clear_extent_dirty(dirty_pages
, start
, end
, GFP_NOFS
);
393 while (start
<= end
) {
394 index
= start
>> PAGE_CACHE_SHIFT
;
395 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
396 page
= find_get_page(btree_inode
->i_mapping
, index
);
399 if (PageDirty(page
)) {
400 btree_lock_page_hook(page
);
401 wait_on_page_writeback(page
);
402 err
= write_one_page(page
, 0);
406 wait_on_page_writeback(page
);
407 page_cache_release(page
);
416 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
417 struct btrfs_root
*root
)
419 if (!trans
|| !trans
->transaction
) {
420 struct inode
*btree_inode
;
421 btree_inode
= root
->fs_info
->btree_inode
;
422 return filemap_write_and_wait(btree_inode
->i_mapping
);
424 return btrfs_write_and_wait_marked_extents(root
,
425 &trans
->transaction
->dirty_pages
);
429 * this is used to update the root pointer in the tree of tree roots.
431 * But, in the case of the extent allocation tree, updating the root
432 * pointer may allocate blocks which may change the root of the extent
435 * So, this loops and repeats and makes sure the cowonly root didn't
436 * change while the root pointer was being updated in the metadata.
438 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
439 struct btrfs_root
*root
)
443 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
445 btrfs_write_dirty_block_groups(trans
, root
);
447 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
451 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
452 if (old_root_bytenr
== root
->node
->start
)
455 btrfs_set_root_node(&root
->root_item
, root
->node
);
456 ret
= btrfs_update_root(trans
, tree_root
,
460 btrfs_write_dirty_block_groups(trans
, root
);
462 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
465 free_extent_buffer(root
->commit_root
);
466 root
->commit_root
= btrfs_root_node(root
);
471 * update all the cowonly tree roots on disk
473 static noinline
int commit_cowonly_roots(struct btrfs_trans_handle
*trans
,
474 struct btrfs_root
*root
)
476 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
477 struct list_head
*next
;
478 struct extent_buffer
*eb
;
481 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
484 eb
= btrfs_lock_root_node(fs_info
->tree_root
);
485 btrfs_cow_block(trans
, fs_info
->tree_root
, eb
, NULL
, 0, &eb
);
486 btrfs_tree_unlock(eb
);
487 free_extent_buffer(eb
);
489 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
492 while (!list_empty(&fs_info
->dirty_cowonly_roots
)) {
493 next
= fs_info
->dirty_cowonly_roots
.next
;
495 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
497 update_cowonly_root(trans
, root
);
499 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
506 * dead roots are old snapshots that need to be deleted. This allocates
507 * a dirty root struct and adds it into the list of dead roots that need to
510 int btrfs_add_dead_root(struct btrfs_root
*root
)
512 mutex_lock(&root
->fs_info
->trans_mutex
);
513 list_add(&root
->root_list
, &root
->fs_info
->dead_roots
);
514 mutex_unlock(&root
->fs_info
->trans_mutex
);
519 * update all the cowonly tree roots on disk
521 static noinline
int commit_fs_roots(struct btrfs_trans_handle
*trans
,
522 struct btrfs_root
*root
)
524 struct btrfs_root
*gang
[8];
525 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
531 ret
= radix_tree_gang_lookup_tag(&fs_info
->fs_roots_radix
,
534 BTRFS_ROOT_TRANS_TAG
);
537 for (i
= 0; i
< ret
; i
++) {
539 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
540 (unsigned long)root
->root_key
.objectid
,
541 BTRFS_ROOT_TRANS_TAG
);
543 btrfs_free_log(trans
, root
);
544 btrfs_update_reloc_root(trans
, root
);
546 if (root
->commit_root
== root
->node
)
549 free_extent_buffer(root
->commit_root
);
550 root
->commit_root
= btrfs_root_node(root
);
552 btrfs_set_root_node(&root
->root_item
, root
->node
);
553 err
= btrfs_update_root(trans
, fs_info
->tree_root
,
564 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
565 * otherwise every leaf in the btree is read and defragged.
567 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
569 struct btrfs_fs_info
*info
= root
->fs_info
;
571 struct btrfs_trans_handle
*trans
;
575 if (root
->defrag_running
)
577 trans
= btrfs_start_transaction(root
, 1);
579 root
->defrag_running
= 1;
580 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
581 nr
= trans
->blocks_used
;
582 btrfs_end_transaction(trans
, root
);
583 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
586 trans
= btrfs_start_transaction(root
, 1);
587 if (root
->fs_info
->closing
|| ret
!= -EAGAIN
)
590 root
->defrag_running
= 0;
592 btrfs_end_transaction(trans
, root
);
597 * when dropping snapshots, we generate a ton of delayed refs, and it makes
598 * sense not to join the transaction while it is trying to flush the current
599 * queue of delayed refs out.
601 * This is used by the drop snapshot code only
603 static noinline
int wait_transaction_pre_flush(struct btrfs_fs_info
*info
)
607 mutex_lock(&info
->trans_mutex
);
608 while (info
->running_transaction
&&
609 info
->running_transaction
->delayed_refs
.flushing
) {
610 prepare_to_wait(&info
->transaction_wait
, &wait
,
611 TASK_UNINTERRUPTIBLE
);
612 mutex_unlock(&info
->trans_mutex
);
616 mutex_lock(&info
->trans_mutex
);
617 finish_wait(&info
->transaction_wait
, &wait
);
619 mutex_unlock(&info
->trans_mutex
);
624 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
627 int btrfs_drop_dead_root(struct btrfs_root
*root
)
629 struct btrfs_trans_handle
*trans
;
630 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
636 * we don't want to jump in and create a bunch of
637 * delayed refs if the transaction is starting to close
639 wait_transaction_pre_flush(tree_root
->fs_info
);
640 trans
= btrfs_start_transaction(tree_root
, 1);
643 * we've joined a transaction, make sure it isn't
646 if (trans
->transaction
->delayed_refs
.flushing
) {
647 btrfs_end_transaction(trans
, tree_root
);
651 ret
= btrfs_drop_snapshot(trans
, root
);
655 ret
= btrfs_update_root(trans
, tree_root
,
661 nr
= trans
->blocks_used
;
662 ret
= btrfs_end_transaction(trans
, tree_root
);
665 btrfs_btree_balance_dirty(tree_root
, nr
);
670 ret
= btrfs_del_root(trans
, tree_root
, &root
->root_key
);
673 nr
= trans
->blocks_used
;
674 ret
= btrfs_end_transaction(trans
, tree_root
);
677 free_extent_buffer(root
->node
);
678 free_extent_buffer(root
->commit_root
);
681 btrfs_btree_balance_dirty(tree_root
, nr
);
686 * new snapshots need to be created at a very specific time in the
687 * transaction commit. This does the actual creation
689 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
690 struct btrfs_fs_info
*fs_info
,
691 struct btrfs_pending_snapshot
*pending
)
693 struct btrfs_key key
;
694 struct btrfs_root_item
*new_root_item
;
695 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
696 struct btrfs_root
*root
= pending
->root
;
697 struct extent_buffer
*tmp
;
698 struct extent_buffer
*old
;
702 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
703 if (!new_root_item
) {
707 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
711 record_root_in_trans(trans
, root
);
712 btrfs_set_root_last_snapshot(&root
->root_item
, trans
->transid
);
713 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
715 key
.objectid
= objectid
;
717 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
719 old
= btrfs_lock_root_node(root
);
720 btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
);
721 btrfs_set_lock_blocking(old
);
723 btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
724 btrfs_tree_unlock(old
);
725 free_extent_buffer(old
);
727 btrfs_set_root_node(new_root_item
, tmp
);
728 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
, &key
,
730 btrfs_tree_unlock(tmp
);
731 free_extent_buffer(tmp
);
735 key
.offset
= (u64
)-1;
736 memcpy(&pending
->root_key
, &key
, sizeof(key
));
738 kfree(new_root_item
);
742 static noinline
int finish_pending_snapshot(struct btrfs_fs_info
*fs_info
,
743 struct btrfs_pending_snapshot
*pending
)
748 struct btrfs_trans_handle
*trans
;
749 struct inode
*parent_inode
;
751 struct btrfs_root
*parent_root
;
753 parent_inode
= pending
->dentry
->d_parent
->d_inode
;
754 parent_root
= BTRFS_I(parent_inode
)->root
;
755 trans
= btrfs_join_transaction(parent_root
, 1);
758 * insert the directory item
760 namelen
= strlen(pending
->name
);
761 ret
= btrfs_set_inode_index(parent_inode
, &index
);
762 ret
= btrfs_insert_dir_item(trans
, parent_root
,
763 pending
->name
, namelen
,
765 &pending
->root_key
, BTRFS_FT_DIR
, index
);
770 btrfs_i_size_write(parent_inode
, parent_inode
->i_size
+ namelen
* 2);
771 ret
= btrfs_update_inode(trans
, parent_root
, parent_inode
);
774 /* add the backref first */
775 ret
= btrfs_add_root_ref(trans
, parent_root
->fs_info
->tree_root
,
776 pending
->root_key
.objectid
,
777 BTRFS_ROOT_BACKREF_KEY
,
778 parent_root
->root_key
.objectid
,
779 parent_inode
->i_ino
, index
, pending
->name
,
784 /* now add the forward ref */
785 ret
= btrfs_add_root_ref(trans
, parent_root
->fs_info
->tree_root
,
786 parent_root
->root_key
.objectid
,
788 pending
->root_key
.objectid
,
789 parent_inode
->i_ino
, index
, pending
->name
,
792 inode
= btrfs_lookup_dentry(parent_inode
, pending
->dentry
);
793 d_instantiate(pending
->dentry
, inode
);
795 btrfs_end_transaction(trans
, fs_info
->fs_root
);
800 * create all the snapshots we've scheduled for creation
802 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
803 struct btrfs_fs_info
*fs_info
)
805 struct btrfs_pending_snapshot
*pending
;
806 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
809 list_for_each_entry(pending
, head
, list
) {
810 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
816 static noinline
int finish_pending_snapshots(struct btrfs_trans_handle
*trans
,
817 struct btrfs_fs_info
*fs_info
)
819 struct btrfs_pending_snapshot
*pending
;
820 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
823 while (!list_empty(head
)) {
824 pending
= list_entry(head
->next
,
825 struct btrfs_pending_snapshot
, list
);
826 ret
= finish_pending_snapshot(fs_info
, pending
);
828 list_del(&pending
->list
);
829 kfree(pending
->name
);
835 static void update_super_roots(struct btrfs_root
*root
)
837 struct btrfs_root_item
*root_item
;
838 struct btrfs_super_block
*super
;
840 super
= &root
->fs_info
->super_copy
;
842 root_item
= &root
->fs_info
->chunk_root
->root_item
;
843 super
->chunk_root
= root_item
->bytenr
;
844 super
->chunk_root_generation
= root_item
->generation
;
845 super
->chunk_root_level
= root_item
->level
;
847 root_item
= &root
->fs_info
->tree_root
->root_item
;
848 super
->root
= root_item
->bytenr
;
849 super
->generation
= root_item
->generation
;
850 super
->root_level
= root_item
->level
;
853 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
854 struct btrfs_root
*root
)
856 unsigned long joined
= 0;
857 unsigned long timeout
= 1;
858 struct btrfs_transaction
*cur_trans
;
859 struct btrfs_transaction
*prev_trans
= NULL
;
860 struct extent_io_tree
*pinned_copy
;
864 unsigned long now
= get_seconds();
865 int flush_on_commit
= btrfs_test_opt(root
, FLUSHONCOMMIT
);
867 btrfs_run_ordered_operations(root
, 0);
869 /* make a pass through all the delayed refs we have so far
870 * any runnings procs may add more while we are here
872 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
875 cur_trans
= trans
->transaction
;
877 * set the flushing flag so procs in this transaction have to
878 * start sending their work down.
880 cur_trans
->delayed_refs
.flushing
= 1;
882 ret
= btrfs_run_delayed_refs(trans
, root
, 0);
885 mutex_lock(&root
->fs_info
->trans_mutex
);
886 if (cur_trans
->in_commit
) {
887 cur_trans
->use_count
++;
888 mutex_unlock(&root
->fs_info
->trans_mutex
);
889 btrfs_end_transaction(trans
, root
);
891 ret
= wait_for_commit(root
, cur_trans
);
894 mutex_lock(&root
->fs_info
->trans_mutex
);
895 put_transaction(cur_trans
);
896 mutex_unlock(&root
->fs_info
->trans_mutex
);
901 pinned_copy
= kmalloc(sizeof(*pinned_copy
), GFP_NOFS
);
905 extent_io_tree_init(pinned_copy
,
906 root
->fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
908 trans
->transaction
->in_commit
= 1;
909 trans
->transaction
->blocked
= 1;
910 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
911 prev_trans
= list_entry(cur_trans
->list
.prev
,
912 struct btrfs_transaction
, list
);
913 if (!prev_trans
->commit_done
) {
914 prev_trans
->use_count
++;
915 mutex_unlock(&root
->fs_info
->trans_mutex
);
917 wait_for_commit(root
, prev_trans
);
919 mutex_lock(&root
->fs_info
->trans_mutex
);
920 put_transaction(prev_trans
);
924 if (now
< cur_trans
->start_time
|| now
- cur_trans
->start_time
< 1)
928 int snap_pending
= 0;
929 joined
= cur_trans
->num_joined
;
930 if (!list_empty(&trans
->transaction
->pending_snapshots
))
933 WARN_ON(cur_trans
!= trans
->transaction
);
934 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
935 TASK_UNINTERRUPTIBLE
);
937 if (cur_trans
->num_writers
> 1)
938 timeout
= MAX_SCHEDULE_TIMEOUT
;
939 else if (should_grow
)
942 mutex_unlock(&root
->fs_info
->trans_mutex
);
944 if (flush_on_commit
|| snap_pending
) {
946 btrfs_start_delalloc_inodes(root
);
947 ret
= btrfs_wait_ordered_extents(root
, 1);
952 * rename don't use btrfs_join_transaction, so, once we
953 * set the transaction to blocked above, we aren't going
954 * to get any new ordered operations. We can safely run
955 * it here and no for sure that nothing new will be added
958 btrfs_run_ordered_operations(root
, 1);
961 if (cur_trans
->num_writers
> 1 || should_grow
)
962 schedule_timeout(timeout
);
964 mutex_lock(&root
->fs_info
->trans_mutex
);
965 finish_wait(&cur_trans
->writer_wait
, &wait
);
966 } while (cur_trans
->num_writers
> 1 ||
967 (should_grow
&& cur_trans
->num_joined
!= joined
));
969 ret
= create_pending_snapshots(trans
, root
->fs_info
);
972 ret
= btrfs_run_delayed_refs(trans
, root
, (unsigned long)-1);
975 WARN_ON(cur_trans
!= trans
->transaction
);
977 /* btrfs_commit_tree_roots is responsible for getting the
978 * various roots consistent with each other. Every pointer
979 * in the tree of tree roots has to point to the most up to date
980 * root for every subvolume and other tree. So, we have to keep
981 * the tree logging code from jumping in and changing any
984 * At this point in the commit, there can't be any tree-log
985 * writers, but a little lower down we drop the trans mutex
986 * and let new people in. By holding the tree_log_mutex
987 * from now until after the super is written, we avoid races
988 * with the tree-log code.
990 mutex_lock(&root
->fs_info
->tree_log_mutex
);
992 ret
= commit_fs_roots(trans
, root
);
995 /* commit_fs_roots gets rid of all the tree log roots, it is now
996 * safe to free the root of tree log roots
998 btrfs_free_log_root_tree(trans
, root
->fs_info
);
1000 ret
= commit_cowonly_roots(trans
, root
);
1003 cur_trans
= root
->fs_info
->running_transaction
;
1004 spin_lock(&root
->fs_info
->new_trans_lock
);
1005 root
->fs_info
->running_transaction
= NULL
;
1006 spin_unlock(&root
->fs_info
->new_trans_lock
);
1008 btrfs_set_root_node(&root
->fs_info
->tree_root
->root_item
,
1009 root
->fs_info
->tree_root
->node
);
1010 free_extent_buffer(root
->fs_info
->tree_root
->commit_root
);
1011 root
->fs_info
->tree_root
->commit_root
=
1012 btrfs_root_node(root
->fs_info
->tree_root
);
1014 btrfs_set_root_node(&root
->fs_info
->chunk_root
->root_item
,
1015 root
->fs_info
->chunk_root
->node
);
1016 free_extent_buffer(root
->fs_info
->chunk_root
->commit_root
);
1017 root
->fs_info
->chunk_root
->commit_root
=
1018 btrfs_root_node(root
->fs_info
->chunk_root
);
1020 update_super_roots(root
);
1022 if (!root
->fs_info
->log_root_recovering
) {
1023 btrfs_set_super_log_root(&root
->fs_info
->super_copy
, 0);
1024 btrfs_set_super_log_root_level(&root
->fs_info
->super_copy
, 0);
1027 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
1028 sizeof(root
->fs_info
->super_copy
));
1030 btrfs_copy_pinned(root
, pinned_copy
);
1032 trans
->transaction
->blocked
= 0;
1034 wake_up(&root
->fs_info
->transaction_wait
);
1036 mutex_unlock(&root
->fs_info
->trans_mutex
);
1037 ret
= btrfs_write_and_wait_transaction(trans
, root
);
1039 write_ctree_super(trans
, root
, 0);
1042 * the super is written, we can safely allow the tree-loggers
1043 * to go about their business
1045 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
1047 btrfs_finish_extent_commit(trans
, root
, pinned_copy
);
1050 /* do the directory inserts of any pending snapshot creations */
1051 finish_pending_snapshots(trans
, root
->fs_info
);
1053 mutex_lock(&root
->fs_info
->trans_mutex
);
1055 cur_trans
->commit_done
= 1;
1057 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
1058 wake_up(&cur_trans
->commit_wait
);
1060 put_transaction(cur_trans
);
1061 put_transaction(cur_trans
);
1063 mutex_unlock(&root
->fs_info
->trans_mutex
);
1065 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1070 * interface function to delete all the snapshots we have scheduled for deletion
1072 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
1075 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1077 mutex_lock(&fs_info
->trans_mutex
);
1078 list_splice_init(&fs_info
->dead_roots
, &list
);
1079 mutex_unlock(&fs_info
->trans_mutex
);
1081 while (!list_empty(&list
)) {
1082 root
= list_entry(list
.next
, struct btrfs_root
, root_list
);
1083 list_del_init(&root
->root_list
);
1084 btrfs_drop_dead_root(root
);