2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/iversion.h>
22 #include "delayed-inode.h"
24 #include "transaction.h"
27 #define BTRFS_DELAYED_WRITEBACK 512
28 #define BTRFS_DELAYED_BACKGROUND 128
29 #define BTRFS_DELAYED_BATCH 16
31 static struct kmem_cache
*delayed_node_cache
;
33 int __init
btrfs_delayed_inode_init(void)
35 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
36 sizeof(struct btrfs_delayed_node
),
40 if (!delayed_node_cache
)
45 void btrfs_delayed_inode_exit(void)
47 kmem_cache_destroy(delayed_node_cache
);
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node
*delayed_node
,
52 struct btrfs_root
*root
, u64 inode_id
)
54 delayed_node
->root
= root
;
55 delayed_node
->inode_id
= inode_id
;
56 refcount_set(&delayed_node
->refs
, 0);
57 delayed_node
->ins_root
= RB_ROOT
;
58 delayed_node
->del_root
= RB_ROOT
;
59 mutex_init(&delayed_node
->mutex
);
60 INIT_LIST_HEAD(&delayed_node
->n_list
);
61 INIT_LIST_HEAD(&delayed_node
->p_list
);
64 static inline int btrfs_is_continuous_delayed_item(
65 struct btrfs_delayed_item
*item1
,
66 struct btrfs_delayed_item
*item2
)
68 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
69 item1
->key
.objectid
== item2
->key
.objectid
&&
70 item1
->key
.type
== item2
->key
.type
&&
71 item1
->key
.offset
+ 1 == item2
->key
.offset
)
76 static struct btrfs_delayed_node
*btrfs_get_delayed_node(
77 struct btrfs_inode
*btrfs_inode
)
79 struct btrfs_root
*root
= btrfs_inode
->root
;
80 u64 ino
= btrfs_ino(btrfs_inode
);
81 struct btrfs_delayed_node
*node
;
83 node
= READ_ONCE(btrfs_inode
->delayed_node
);
85 refcount_inc(&node
->refs
);
89 spin_lock(&root
->inode_lock
);
90 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
93 if (btrfs_inode
->delayed_node
) {
94 refcount_inc(&node
->refs
); /* can be accessed */
95 BUG_ON(btrfs_inode
->delayed_node
!= node
);
96 spin_unlock(&root
->inode_lock
);
101 * It's possible that we're racing into the middle of removing
102 * this node from the radix tree. In this case, the refcount
103 * was zero and it should never go back to one. Just return
104 * NULL like it was never in the radix at all; our release
105 * function is in the process of removing it.
107 * Some implementations of refcount_inc refuse to bump the
108 * refcount once it has hit zero. If we don't do this dance
109 * here, refcount_inc() may decide to just WARN_ONCE() instead
110 * of actually bumping the refcount.
112 * If this node is properly in the radix, we want to bump the
113 * refcount twice, once for the inode and once for this get
116 if (refcount_inc_not_zero(&node
->refs
)) {
117 refcount_inc(&node
->refs
);
118 btrfs_inode
->delayed_node
= node
;
123 spin_unlock(&root
->inode_lock
);
126 spin_unlock(&root
->inode_lock
);
131 /* Will return either the node or PTR_ERR(-ENOMEM) */
132 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
133 struct btrfs_inode
*btrfs_inode
)
135 struct btrfs_delayed_node
*node
;
136 struct btrfs_root
*root
= btrfs_inode
->root
;
137 u64 ino
= btrfs_ino(btrfs_inode
);
141 node
= btrfs_get_delayed_node(btrfs_inode
);
145 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
147 return ERR_PTR(-ENOMEM
);
148 btrfs_init_delayed_node(node
, root
, ino
);
150 /* cached in the btrfs inode and can be accessed */
151 refcount_set(&node
->refs
, 2);
153 ret
= radix_tree_preload(GFP_NOFS
);
155 kmem_cache_free(delayed_node_cache
, node
);
159 spin_lock(&root
->inode_lock
);
160 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
161 if (ret
== -EEXIST
) {
162 spin_unlock(&root
->inode_lock
);
163 kmem_cache_free(delayed_node_cache
, node
);
164 radix_tree_preload_end();
167 btrfs_inode
->delayed_node
= node
;
168 spin_unlock(&root
->inode_lock
);
169 radix_tree_preload_end();
175 * Call it when holding delayed_node->mutex
177 * If mod = 1, add this node into the prepared list.
179 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
180 struct btrfs_delayed_node
*node
,
183 spin_lock(&root
->lock
);
184 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
185 if (!list_empty(&node
->p_list
))
186 list_move_tail(&node
->p_list
, &root
->prepare_list
);
188 list_add_tail(&node
->p_list
, &root
->prepare_list
);
190 list_add_tail(&node
->n_list
, &root
->node_list
);
191 list_add_tail(&node
->p_list
, &root
->prepare_list
);
192 refcount_inc(&node
->refs
); /* inserted into list */
194 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
196 spin_unlock(&root
->lock
);
199 /* Call it when holding delayed_node->mutex */
200 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
201 struct btrfs_delayed_node
*node
)
203 spin_lock(&root
->lock
);
204 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
206 refcount_dec(&node
->refs
); /* not in the list */
207 list_del_init(&node
->n_list
);
208 if (!list_empty(&node
->p_list
))
209 list_del_init(&node
->p_list
);
210 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
212 spin_unlock(&root
->lock
);
215 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
216 struct btrfs_delayed_root
*delayed_root
)
219 struct btrfs_delayed_node
*node
= NULL
;
221 spin_lock(&delayed_root
->lock
);
222 if (list_empty(&delayed_root
->node_list
))
225 p
= delayed_root
->node_list
.next
;
226 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
227 refcount_inc(&node
->refs
);
229 spin_unlock(&delayed_root
->lock
);
234 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
235 struct btrfs_delayed_node
*node
)
237 struct btrfs_delayed_root
*delayed_root
;
239 struct btrfs_delayed_node
*next
= NULL
;
241 delayed_root
= node
->root
->fs_info
->delayed_root
;
242 spin_lock(&delayed_root
->lock
);
243 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
244 /* not in the list */
245 if (list_empty(&delayed_root
->node_list
))
247 p
= delayed_root
->node_list
.next
;
248 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
251 p
= node
->n_list
.next
;
253 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
254 refcount_inc(&next
->refs
);
256 spin_unlock(&delayed_root
->lock
);
261 static void __btrfs_release_delayed_node(
262 struct btrfs_delayed_node
*delayed_node
,
265 struct btrfs_delayed_root
*delayed_root
;
270 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
272 mutex_lock(&delayed_node
->mutex
);
273 if (delayed_node
->count
)
274 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
276 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
277 mutex_unlock(&delayed_node
->mutex
);
279 if (refcount_dec_and_test(&delayed_node
->refs
)) {
280 struct btrfs_root
*root
= delayed_node
->root
;
282 spin_lock(&root
->inode_lock
);
284 * Once our refcount goes to zero, nobody is allowed to bump it
285 * back up. We can delete it now.
287 ASSERT(refcount_read(&delayed_node
->refs
) == 0);
288 radix_tree_delete(&root
->delayed_nodes_tree
,
289 delayed_node
->inode_id
);
290 spin_unlock(&root
->inode_lock
);
291 kmem_cache_free(delayed_node_cache
, delayed_node
);
295 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
297 __btrfs_release_delayed_node(node
, 0);
300 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
301 struct btrfs_delayed_root
*delayed_root
)
304 struct btrfs_delayed_node
*node
= NULL
;
306 spin_lock(&delayed_root
->lock
);
307 if (list_empty(&delayed_root
->prepare_list
))
310 p
= delayed_root
->prepare_list
.next
;
312 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
313 refcount_inc(&node
->refs
);
315 spin_unlock(&delayed_root
->lock
);
320 static inline void btrfs_release_prepared_delayed_node(
321 struct btrfs_delayed_node
*node
)
323 __btrfs_release_delayed_node(node
, 1);
326 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
328 struct btrfs_delayed_item
*item
;
329 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
331 item
->data_len
= data_len
;
332 item
->ins_or_del
= 0;
333 item
->bytes_reserved
= 0;
334 item
->delayed_node
= NULL
;
335 refcount_set(&item
->refs
, 1);
341 * __btrfs_lookup_delayed_item - look up the delayed item by key
342 * @delayed_node: pointer to the delayed node
343 * @key: the key to look up
344 * @prev: used to store the prev item if the right item isn't found
345 * @next: used to store the next item if the right item isn't found
347 * Note: if we don't find the right item, we will return the prev item and
350 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
351 struct rb_root
*root
,
352 struct btrfs_key
*key
,
353 struct btrfs_delayed_item
**prev
,
354 struct btrfs_delayed_item
**next
)
356 struct rb_node
*node
, *prev_node
= NULL
;
357 struct btrfs_delayed_item
*delayed_item
= NULL
;
360 node
= root
->rb_node
;
363 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
366 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
368 node
= node
->rb_right
;
370 node
= node
->rb_left
;
379 *prev
= delayed_item
;
380 else if ((node
= rb_prev(prev_node
)) != NULL
) {
381 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
391 *next
= delayed_item
;
392 else if ((node
= rb_next(prev_node
)) != NULL
) {
393 *next
= rb_entry(node
, struct btrfs_delayed_item
,
401 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
402 struct btrfs_delayed_node
*delayed_node
,
403 struct btrfs_key
*key
)
405 return __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
409 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
410 struct btrfs_delayed_item
*ins
,
413 struct rb_node
**p
, *node
;
414 struct rb_node
*parent_node
= NULL
;
415 struct rb_root
*root
;
416 struct btrfs_delayed_item
*item
;
419 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
420 root
= &delayed_node
->ins_root
;
421 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
422 root
= &delayed_node
->del_root
;
426 node
= &ins
->rb_node
;
430 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
433 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
442 rb_link_node(node
, parent_node
, p
);
443 rb_insert_color(node
, root
);
444 ins
->delayed_node
= delayed_node
;
445 ins
->ins_or_del
= action
;
447 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
448 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
449 ins
->key
.offset
>= delayed_node
->index_cnt
)
450 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
452 delayed_node
->count
++;
453 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
457 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
458 struct btrfs_delayed_item
*item
)
460 return __btrfs_add_delayed_item(node
, item
,
461 BTRFS_DELAYED_INSERTION_ITEM
);
464 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
465 struct btrfs_delayed_item
*item
)
467 return __btrfs_add_delayed_item(node
, item
,
468 BTRFS_DELAYED_DELETION_ITEM
);
471 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
473 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
476 * atomic_dec_return implies a barrier for waitqueue_active
478 if ((atomic_dec_return(&delayed_root
->items
) <
479 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
480 waitqueue_active(&delayed_root
->wait
))
481 wake_up(&delayed_root
->wait
);
484 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
486 struct rb_root
*root
;
487 struct btrfs_delayed_root
*delayed_root
;
489 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
491 BUG_ON(!delayed_root
);
492 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
493 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
495 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
496 root
= &delayed_item
->delayed_node
->ins_root
;
498 root
= &delayed_item
->delayed_node
->del_root
;
500 rb_erase(&delayed_item
->rb_node
, root
);
501 delayed_item
->delayed_node
->count
--;
503 finish_one_item(delayed_root
);
506 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
509 __btrfs_remove_delayed_item(item
);
510 if (refcount_dec_and_test(&item
->refs
))
515 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
516 struct btrfs_delayed_node
*delayed_node
)
519 struct btrfs_delayed_item
*item
= NULL
;
521 p
= rb_first(&delayed_node
->ins_root
);
523 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
528 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
529 struct btrfs_delayed_node
*delayed_node
)
532 struct btrfs_delayed_item
*item
= NULL
;
534 p
= rb_first(&delayed_node
->del_root
);
536 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
541 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
542 struct btrfs_delayed_item
*item
)
545 struct btrfs_delayed_item
*next
= NULL
;
547 p
= rb_next(&item
->rb_node
);
549 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
554 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
555 struct btrfs_fs_info
*fs_info
,
556 struct btrfs_delayed_item
*item
)
558 struct btrfs_block_rsv
*src_rsv
;
559 struct btrfs_block_rsv
*dst_rsv
;
563 if (!trans
->bytes_reserved
)
566 src_rsv
= trans
->block_rsv
;
567 dst_rsv
= &fs_info
->delayed_block_rsv
;
569 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
570 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
572 trace_btrfs_space_reservation(fs_info
, "delayed_item",
575 item
->bytes_reserved
= num_bytes
;
581 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info
*fs_info
,
582 struct btrfs_delayed_item
*item
)
584 struct btrfs_block_rsv
*rsv
;
586 if (!item
->bytes_reserved
)
589 rsv
= &fs_info
->delayed_block_rsv
;
590 trace_btrfs_space_reservation(fs_info
, "delayed_item",
591 item
->key
.objectid
, item
->bytes_reserved
,
593 btrfs_block_rsv_release(fs_info
, rsv
,
594 item
->bytes_reserved
);
597 static int btrfs_delayed_inode_reserve_metadata(
598 struct btrfs_trans_handle
*trans
,
599 struct btrfs_root
*root
,
600 struct btrfs_inode
*inode
,
601 struct btrfs_delayed_node
*node
)
603 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
604 struct btrfs_block_rsv
*src_rsv
;
605 struct btrfs_block_rsv
*dst_rsv
;
609 src_rsv
= trans
->block_rsv
;
610 dst_rsv
= &fs_info
->delayed_block_rsv
;
612 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
615 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
616 * which doesn't reserve space for speed. This is a problem since we
617 * still need to reserve space for this update, so try to reserve the
620 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
621 * we always reserve enough to update the inode item.
623 if (!src_rsv
|| (!trans
->bytes_reserved
&&
624 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
625 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
626 BTRFS_RESERVE_NO_FLUSH
);
628 * Since we're under a transaction reserve_metadata_bytes could
629 * try to commit the transaction which will make it return
630 * EAGAIN to make us stop the transaction we have, so return
631 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
636 node
->bytes_reserved
= num_bytes
;
637 trace_btrfs_space_reservation(fs_info
,
645 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
647 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
648 btrfs_ino(inode
), num_bytes
, 1);
649 node
->bytes_reserved
= num_bytes
;
655 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info
*fs_info
,
656 struct btrfs_delayed_node
*node
)
658 struct btrfs_block_rsv
*rsv
;
660 if (!node
->bytes_reserved
)
663 rsv
= &fs_info
->delayed_block_rsv
;
664 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
665 node
->inode_id
, node
->bytes_reserved
, 0);
666 btrfs_block_rsv_release(fs_info
, rsv
,
667 node
->bytes_reserved
);
668 node
->bytes_reserved
= 0;
672 * This helper will insert some continuous items into the same leaf according
673 * to the free space of the leaf.
675 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
676 struct btrfs_path
*path
,
677 struct btrfs_delayed_item
*item
)
679 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
680 struct btrfs_delayed_item
*curr
, *next
;
682 int total_data_size
= 0, total_size
= 0;
683 struct extent_buffer
*leaf
;
685 struct btrfs_key
*keys
;
687 struct list_head head
;
693 BUG_ON(!path
->nodes
[0]);
695 leaf
= path
->nodes
[0];
696 free_space
= btrfs_leaf_free_space(fs_info
, leaf
);
697 INIT_LIST_HEAD(&head
);
703 * count the number of the continuous items that we can insert in batch
705 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
707 total_data_size
+= next
->data_len
;
708 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
709 list_add_tail(&next
->tree_list
, &head
);
713 next
= __btrfs_next_delayed_item(curr
);
717 if (!btrfs_is_continuous_delayed_item(curr
, next
))
727 * we need allocate some memory space, but it might cause the task
728 * to sleep, so we set all locked nodes in the path to blocking locks
731 btrfs_set_path_blocking(path
);
733 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
739 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
745 /* get keys of all the delayed items */
747 list_for_each_entry(next
, &head
, tree_list
) {
749 data_size
[i
] = next
->data_len
;
753 /* reset all the locked nodes in the patch to spinning locks. */
754 btrfs_clear_path_blocking(path
, NULL
, 0);
756 /* insert the keys of the items */
757 setup_items_for_insert(root
, path
, keys
, data_size
,
758 total_data_size
, total_size
, nitems
);
760 /* insert the dir index items */
761 slot
= path
->slots
[0];
762 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
763 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
764 write_extent_buffer(leaf
, &curr
->data
,
765 (unsigned long)data_ptr
,
769 btrfs_delayed_item_release_metadata(fs_info
, curr
);
771 list_del(&curr
->tree_list
);
772 btrfs_release_delayed_item(curr
);
783 * This helper can just do simple insertion that needn't extend item for new
784 * data, such as directory name index insertion, inode insertion.
786 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
787 struct btrfs_root
*root
,
788 struct btrfs_path
*path
,
789 struct btrfs_delayed_item
*delayed_item
)
791 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
792 struct extent_buffer
*leaf
;
796 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
797 delayed_item
->data_len
);
798 if (ret
< 0 && ret
!= -EEXIST
)
801 leaf
= path
->nodes
[0];
803 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
805 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
806 delayed_item
->data_len
);
807 btrfs_mark_buffer_dirty(leaf
);
809 btrfs_delayed_item_release_metadata(fs_info
, delayed_item
);
814 * we insert an item first, then if there are some continuous items, we try
815 * to insert those items into the same leaf.
817 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
818 struct btrfs_path
*path
,
819 struct btrfs_root
*root
,
820 struct btrfs_delayed_node
*node
)
822 struct btrfs_delayed_item
*curr
, *prev
;
826 mutex_lock(&node
->mutex
);
827 curr
= __btrfs_first_delayed_insertion_item(node
);
831 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
833 btrfs_release_path(path
);
838 curr
= __btrfs_next_delayed_item(prev
);
839 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
840 /* insert the continuous items into the same leaf */
842 btrfs_batch_insert_items(root
, path
, curr
);
844 btrfs_release_delayed_item(prev
);
845 btrfs_mark_buffer_dirty(path
->nodes
[0]);
847 btrfs_release_path(path
);
848 mutex_unlock(&node
->mutex
);
852 mutex_unlock(&node
->mutex
);
856 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
857 struct btrfs_root
*root
,
858 struct btrfs_path
*path
,
859 struct btrfs_delayed_item
*item
)
861 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
862 struct btrfs_delayed_item
*curr
, *next
;
863 struct extent_buffer
*leaf
;
864 struct btrfs_key key
;
865 struct list_head head
;
866 int nitems
, i
, last_item
;
869 BUG_ON(!path
->nodes
[0]);
871 leaf
= path
->nodes
[0];
874 last_item
= btrfs_header_nritems(leaf
) - 1;
876 return -ENOENT
; /* FIXME: Is errno suitable? */
879 INIT_LIST_HEAD(&head
);
880 btrfs_item_key_to_cpu(leaf
, &key
, i
);
883 * count the number of the dir index items that we can delete in batch
885 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
886 list_add_tail(&next
->tree_list
, &head
);
890 next
= __btrfs_next_delayed_item(curr
);
894 if (!btrfs_is_continuous_delayed_item(curr
, next
))
900 btrfs_item_key_to_cpu(leaf
, &key
, i
);
906 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
910 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
911 btrfs_delayed_item_release_metadata(fs_info
, curr
);
912 list_del(&curr
->tree_list
);
913 btrfs_release_delayed_item(curr
);
920 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
921 struct btrfs_path
*path
,
922 struct btrfs_root
*root
,
923 struct btrfs_delayed_node
*node
)
925 struct btrfs_delayed_item
*curr
, *prev
;
929 mutex_lock(&node
->mutex
);
930 curr
= __btrfs_first_delayed_deletion_item(node
);
934 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
939 * can't find the item which the node points to, so this node
940 * is invalid, just drop it.
943 curr
= __btrfs_next_delayed_item(prev
);
944 btrfs_release_delayed_item(prev
);
946 btrfs_release_path(path
);
948 mutex_unlock(&node
->mutex
);
954 btrfs_batch_delete_items(trans
, root
, path
, curr
);
955 btrfs_release_path(path
);
956 mutex_unlock(&node
->mutex
);
960 btrfs_release_path(path
);
961 mutex_unlock(&node
->mutex
);
965 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
967 struct btrfs_delayed_root
*delayed_root
;
970 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
971 BUG_ON(!delayed_node
->root
);
972 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
973 delayed_node
->count
--;
975 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
976 finish_one_item(delayed_root
);
980 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
982 struct btrfs_delayed_root
*delayed_root
;
984 ASSERT(delayed_node
->root
);
985 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
986 delayed_node
->count
--;
988 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
989 finish_one_item(delayed_root
);
992 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
993 struct btrfs_root
*root
,
994 struct btrfs_path
*path
,
995 struct btrfs_delayed_node
*node
)
997 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
998 struct btrfs_key key
;
999 struct btrfs_inode_item
*inode_item
;
1000 struct extent_buffer
*leaf
;
1004 key
.objectid
= node
->inode_id
;
1005 key
.type
= BTRFS_INODE_ITEM_KEY
;
1008 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1013 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1015 btrfs_release_path(path
);
1017 } else if (ret
< 0) {
1021 leaf
= path
->nodes
[0];
1022 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1023 struct btrfs_inode_item
);
1024 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1025 sizeof(struct btrfs_inode_item
));
1026 btrfs_mark_buffer_dirty(leaf
);
1028 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1032 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1035 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1036 if (key
.objectid
!= node
->inode_id
)
1039 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1040 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1044 * Delayed iref deletion is for the inode who has only one link,
1045 * so there is only one iref. The case that several irefs are
1046 * in the same item doesn't exist.
1048 btrfs_del_item(trans
, root
, path
);
1050 btrfs_release_delayed_iref(node
);
1052 btrfs_release_path(path
);
1054 btrfs_delayed_inode_release_metadata(fs_info
, node
);
1055 btrfs_release_delayed_inode(node
);
1060 btrfs_release_path(path
);
1062 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1064 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1070 leaf
= path
->nodes
[0];
1075 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1076 struct btrfs_root
*root
,
1077 struct btrfs_path
*path
,
1078 struct btrfs_delayed_node
*node
)
1082 mutex_lock(&node
->mutex
);
1083 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1084 mutex_unlock(&node
->mutex
);
1088 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1089 mutex_unlock(&node
->mutex
);
1094 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1095 struct btrfs_path
*path
,
1096 struct btrfs_delayed_node
*node
)
1100 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1104 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1108 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1113 * Called when committing the transaction.
1114 * Returns 0 on success.
1115 * Returns < 0 on error and returns with an aborted transaction with any
1116 * outstanding delayed items cleaned up.
1118 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1119 struct btrfs_fs_info
*fs_info
, int nr
)
1121 struct btrfs_delayed_root
*delayed_root
;
1122 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1123 struct btrfs_path
*path
;
1124 struct btrfs_block_rsv
*block_rsv
;
1126 bool count
= (nr
> 0);
1131 path
= btrfs_alloc_path();
1134 path
->leave_spinning
= 1;
1136 block_rsv
= trans
->block_rsv
;
1137 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1139 delayed_root
= fs_info
->delayed_root
;
1141 curr_node
= btrfs_first_delayed_node(delayed_root
);
1142 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1143 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1146 btrfs_release_delayed_node(curr_node
);
1148 btrfs_abort_transaction(trans
, ret
);
1152 prev_node
= curr_node
;
1153 curr_node
= btrfs_next_delayed_node(curr_node
);
1154 btrfs_release_delayed_node(prev_node
);
1158 btrfs_release_delayed_node(curr_node
);
1159 btrfs_free_path(path
);
1160 trans
->block_rsv
= block_rsv
;
1165 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1166 struct btrfs_fs_info
*fs_info
)
1168 return __btrfs_run_delayed_items(trans
, fs_info
, -1);
1171 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1172 struct btrfs_fs_info
*fs_info
, int nr
)
1174 return __btrfs_run_delayed_items(trans
, fs_info
, nr
);
1177 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1178 struct btrfs_inode
*inode
)
1180 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1181 struct btrfs_path
*path
;
1182 struct btrfs_block_rsv
*block_rsv
;
1188 mutex_lock(&delayed_node
->mutex
);
1189 if (!delayed_node
->count
) {
1190 mutex_unlock(&delayed_node
->mutex
);
1191 btrfs_release_delayed_node(delayed_node
);
1194 mutex_unlock(&delayed_node
->mutex
);
1196 path
= btrfs_alloc_path();
1198 btrfs_release_delayed_node(delayed_node
);
1201 path
->leave_spinning
= 1;
1203 block_rsv
= trans
->block_rsv
;
1204 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1206 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1208 btrfs_release_delayed_node(delayed_node
);
1209 btrfs_free_path(path
);
1210 trans
->block_rsv
= block_rsv
;
1215 int btrfs_commit_inode_delayed_inode(struct btrfs_inode
*inode
)
1217 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1218 struct btrfs_trans_handle
*trans
;
1219 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1220 struct btrfs_path
*path
;
1221 struct btrfs_block_rsv
*block_rsv
;
1227 mutex_lock(&delayed_node
->mutex
);
1228 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1229 mutex_unlock(&delayed_node
->mutex
);
1230 btrfs_release_delayed_node(delayed_node
);
1233 mutex_unlock(&delayed_node
->mutex
);
1235 trans
= btrfs_join_transaction(delayed_node
->root
);
1236 if (IS_ERR(trans
)) {
1237 ret
= PTR_ERR(trans
);
1241 path
= btrfs_alloc_path();
1246 path
->leave_spinning
= 1;
1248 block_rsv
= trans
->block_rsv
;
1249 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1251 mutex_lock(&delayed_node
->mutex
);
1252 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1253 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1254 path
, delayed_node
);
1257 mutex_unlock(&delayed_node
->mutex
);
1259 btrfs_free_path(path
);
1260 trans
->block_rsv
= block_rsv
;
1262 btrfs_end_transaction(trans
);
1263 btrfs_btree_balance_dirty(fs_info
);
1265 btrfs_release_delayed_node(delayed_node
);
1270 void btrfs_remove_delayed_node(struct btrfs_inode
*inode
)
1272 struct btrfs_delayed_node
*delayed_node
;
1274 delayed_node
= READ_ONCE(inode
->delayed_node
);
1278 inode
->delayed_node
= NULL
;
1279 btrfs_release_delayed_node(delayed_node
);
1282 struct btrfs_async_delayed_work
{
1283 struct btrfs_delayed_root
*delayed_root
;
1285 struct btrfs_work work
;
1288 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1290 struct btrfs_async_delayed_work
*async_work
;
1291 struct btrfs_delayed_root
*delayed_root
;
1292 struct btrfs_trans_handle
*trans
;
1293 struct btrfs_path
*path
;
1294 struct btrfs_delayed_node
*delayed_node
= NULL
;
1295 struct btrfs_root
*root
;
1296 struct btrfs_block_rsv
*block_rsv
;
1299 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1300 delayed_root
= async_work
->delayed_root
;
1302 path
= btrfs_alloc_path();
1307 if (atomic_read(&delayed_root
->items
) <
1308 BTRFS_DELAYED_BACKGROUND
/ 2)
1311 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1315 path
->leave_spinning
= 1;
1316 root
= delayed_node
->root
;
1318 trans
= btrfs_join_transaction(root
);
1319 if (IS_ERR(trans
)) {
1320 btrfs_release_path(path
);
1321 btrfs_release_prepared_delayed_node(delayed_node
);
1326 block_rsv
= trans
->block_rsv
;
1327 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1329 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1331 trans
->block_rsv
= block_rsv
;
1332 btrfs_end_transaction(trans
);
1333 btrfs_btree_balance_dirty_nodelay(root
->fs_info
);
1335 btrfs_release_path(path
);
1336 btrfs_release_prepared_delayed_node(delayed_node
);
1339 } while ((async_work
->nr
== 0 && total_done
< BTRFS_DELAYED_WRITEBACK
)
1340 || total_done
< async_work
->nr
);
1342 btrfs_free_path(path
);
1344 wake_up(&delayed_root
->wait
);
1349 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1350 struct btrfs_fs_info
*fs_info
, int nr
)
1352 struct btrfs_async_delayed_work
*async_work
;
1354 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1358 async_work
->delayed_root
= delayed_root
;
1359 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1360 btrfs_async_run_delayed_root
, NULL
, NULL
);
1361 async_work
->nr
= nr
;
1363 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1367 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info
*fs_info
)
1369 WARN_ON(btrfs_first_delayed_node(fs_info
->delayed_root
));
1372 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1374 int val
= atomic_read(&delayed_root
->items_seq
);
1376 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1379 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1385 void btrfs_balance_delayed_items(struct btrfs_fs_info
*fs_info
)
1387 struct btrfs_delayed_root
*delayed_root
= fs_info
->delayed_root
;
1389 if ((atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
) ||
1390 btrfs_workqueue_normal_congested(fs_info
->delayed_workers
))
1393 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1397 seq
= atomic_read(&delayed_root
->items_seq
);
1399 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1403 wait_event_interruptible(delayed_root
->wait
,
1404 could_end_wait(delayed_root
, seq
));
1408 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1411 /* Will return 0 or -ENOMEM */
1412 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1413 struct btrfs_fs_info
*fs_info
,
1414 const char *name
, int name_len
,
1415 struct btrfs_inode
*dir
,
1416 struct btrfs_disk_key
*disk_key
, u8 type
,
1419 struct btrfs_delayed_node
*delayed_node
;
1420 struct btrfs_delayed_item
*delayed_item
;
1421 struct btrfs_dir_item
*dir_item
;
1424 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1425 if (IS_ERR(delayed_node
))
1426 return PTR_ERR(delayed_node
);
1428 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1429 if (!delayed_item
) {
1434 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1435 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1436 delayed_item
->key
.offset
= index
;
1438 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1439 dir_item
->location
= *disk_key
;
1440 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1441 btrfs_set_stack_dir_data_len(dir_item
, 0);
1442 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1443 btrfs_set_stack_dir_type(dir_item
, type
);
1444 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1446 ret
= btrfs_delayed_item_reserve_metadata(trans
, fs_info
, delayed_item
);
1448 * we have reserved enough space when we start a new transaction,
1449 * so reserving metadata failure is impossible
1454 mutex_lock(&delayed_node
->mutex
);
1455 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1456 if (unlikely(ret
)) {
1458 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1459 name_len
, name
, delayed_node
->root
->objectid
,
1460 delayed_node
->inode_id
, ret
);
1463 mutex_unlock(&delayed_node
->mutex
);
1466 btrfs_release_delayed_node(delayed_node
);
1470 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info
*fs_info
,
1471 struct btrfs_delayed_node
*node
,
1472 struct btrfs_key
*key
)
1474 struct btrfs_delayed_item
*item
;
1476 mutex_lock(&node
->mutex
);
1477 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1479 mutex_unlock(&node
->mutex
);
1483 btrfs_delayed_item_release_metadata(fs_info
, item
);
1484 btrfs_release_delayed_item(item
);
1485 mutex_unlock(&node
->mutex
);
1489 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1490 struct btrfs_fs_info
*fs_info
,
1491 struct btrfs_inode
*dir
, u64 index
)
1493 struct btrfs_delayed_node
*node
;
1494 struct btrfs_delayed_item
*item
;
1495 struct btrfs_key item_key
;
1498 node
= btrfs_get_or_create_delayed_node(dir
);
1500 return PTR_ERR(node
);
1502 item_key
.objectid
= btrfs_ino(dir
);
1503 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1504 item_key
.offset
= index
;
1506 ret
= btrfs_delete_delayed_insertion_item(fs_info
, node
, &item_key
);
1510 item
= btrfs_alloc_delayed_item(0);
1516 item
->key
= item_key
;
1518 ret
= btrfs_delayed_item_reserve_metadata(trans
, fs_info
, item
);
1520 * we have reserved enough space when we start a new transaction,
1521 * so reserving metadata failure is impossible.
1525 mutex_lock(&node
->mutex
);
1526 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1527 if (unlikely(ret
)) {
1529 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1530 index
, node
->root
->objectid
, node
->inode_id
, ret
);
1533 mutex_unlock(&node
->mutex
);
1535 btrfs_release_delayed_node(node
);
1539 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode
*inode
)
1541 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1547 * Since we have held i_mutex of this directory, it is impossible that
1548 * a new directory index is added into the delayed node and index_cnt
1549 * is updated now. So we needn't lock the delayed node.
1551 if (!delayed_node
->index_cnt
) {
1552 btrfs_release_delayed_node(delayed_node
);
1556 inode
->index_cnt
= delayed_node
->index_cnt
;
1557 btrfs_release_delayed_node(delayed_node
);
1561 bool btrfs_readdir_get_delayed_items(struct inode
*inode
,
1562 struct list_head
*ins_list
,
1563 struct list_head
*del_list
)
1565 struct btrfs_delayed_node
*delayed_node
;
1566 struct btrfs_delayed_item
*item
;
1568 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1573 * We can only do one readdir with delayed items at a time because of
1574 * item->readdir_list.
1576 inode_unlock_shared(inode
);
1579 mutex_lock(&delayed_node
->mutex
);
1580 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1582 refcount_inc(&item
->refs
);
1583 list_add_tail(&item
->readdir_list
, ins_list
);
1584 item
= __btrfs_next_delayed_item(item
);
1587 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1589 refcount_inc(&item
->refs
);
1590 list_add_tail(&item
->readdir_list
, del_list
);
1591 item
= __btrfs_next_delayed_item(item
);
1593 mutex_unlock(&delayed_node
->mutex
);
1595 * This delayed node is still cached in the btrfs inode, so refs
1596 * must be > 1 now, and we needn't check it is going to be freed
1599 * Besides that, this function is used to read dir, we do not
1600 * insert/delete delayed items in this period. So we also needn't
1601 * requeue or dequeue this delayed node.
1603 refcount_dec(&delayed_node
->refs
);
1608 void btrfs_readdir_put_delayed_items(struct inode
*inode
,
1609 struct list_head
*ins_list
,
1610 struct list_head
*del_list
)
1612 struct btrfs_delayed_item
*curr
, *next
;
1614 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1615 list_del(&curr
->readdir_list
);
1616 if (refcount_dec_and_test(&curr
->refs
))
1620 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1621 list_del(&curr
->readdir_list
);
1622 if (refcount_dec_and_test(&curr
->refs
))
1627 * The VFS is going to do up_read(), so we need to downgrade back to a
1630 downgrade_write(&inode
->i_rwsem
);
1633 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1636 struct btrfs_delayed_item
*curr
;
1639 list_for_each_entry(curr
, del_list
, readdir_list
) {
1640 if (curr
->key
.offset
> index
)
1642 if (curr
->key
.offset
== index
) {
1651 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1654 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1655 struct list_head
*ins_list
)
1657 struct btrfs_dir_item
*di
;
1658 struct btrfs_delayed_item
*curr
, *next
;
1659 struct btrfs_key location
;
1663 unsigned char d_type
;
1665 if (list_empty(ins_list
))
1669 * Changing the data of the delayed item is impossible. So
1670 * we needn't lock them. And we have held i_mutex of the
1671 * directory, nobody can delete any directory indexes now.
1673 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1674 list_del(&curr
->readdir_list
);
1676 if (curr
->key
.offset
< ctx
->pos
) {
1677 if (refcount_dec_and_test(&curr
->refs
))
1682 ctx
->pos
= curr
->key
.offset
;
1684 di
= (struct btrfs_dir_item
*)curr
->data
;
1685 name
= (char *)(di
+ 1);
1686 name_len
= btrfs_stack_dir_name_len(di
);
1688 d_type
= btrfs_filetype_table
[di
->type
];
1689 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1691 over
= !dir_emit(ctx
, name
, name_len
,
1692 location
.objectid
, d_type
);
1694 if (refcount_dec_and_test(&curr
->refs
))
1704 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1705 struct btrfs_inode_item
*inode_item
,
1706 struct inode
*inode
)
1708 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1709 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1710 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1711 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1712 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1713 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1714 btrfs_set_stack_inode_generation(inode_item
,
1715 BTRFS_I(inode
)->generation
);
1716 btrfs_set_stack_inode_sequence(inode_item
,
1717 inode_peek_iversion(inode
));
1718 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1719 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1720 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1721 btrfs_set_stack_inode_block_group(inode_item
, 0);
1723 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1724 inode
->i_atime
.tv_sec
);
1725 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1726 inode
->i_atime
.tv_nsec
);
1728 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1729 inode
->i_mtime
.tv_sec
);
1730 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1731 inode
->i_mtime
.tv_nsec
);
1733 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1734 inode
->i_ctime
.tv_sec
);
1735 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1736 inode
->i_ctime
.tv_nsec
);
1738 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1739 BTRFS_I(inode
)->i_otime
.tv_sec
);
1740 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1741 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1744 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1746 struct btrfs_delayed_node
*delayed_node
;
1747 struct btrfs_inode_item
*inode_item
;
1749 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1753 mutex_lock(&delayed_node
->mutex
);
1754 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1755 mutex_unlock(&delayed_node
->mutex
);
1756 btrfs_release_delayed_node(delayed_node
);
1760 inode_item
= &delayed_node
->inode_item
;
1762 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1763 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1764 btrfs_i_size_write(BTRFS_I(inode
), btrfs_stack_inode_size(inode_item
));
1765 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1766 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1767 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1768 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1769 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1771 inode_set_iversion_queried(inode
,
1772 btrfs_stack_inode_sequence(inode_item
));
1774 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1775 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1777 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1778 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1780 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1781 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1783 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1784 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1786 BTRFS_I(inode
)->i_otime
.tv_sec
=
1787 btrfs_stack_timespec_sec(&inode_item
->otime
);
1788 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1789 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1791 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1792 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1794 mutex_unlock(&delayed_node
->mutex
);
1795 btrfs_release_delayed_node(delayed_node
);
1799 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1800 struct btrfs_root
*root
, struct inode
*inode
)
1802 struct btrfs_delayed_node
*delayed_node
;
1805 delayed_node
= btrfs_get_or_create_delayed_node(BTRFS_I(inode
));
1806 if (IS_ERR(delayed_node
))
1807 return PTR_ERR(delayed_node
);
1809 mutex_lock(&delayed_node
->mutex
);
1810 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1811 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1815 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, BTRFS_I(inode
),
1820 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1821 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1822 delayed_node
->count
++;
1823 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1825 mutex_unlock(&delayed_node
->mutex
);
1826 btrfs_release_delayed_node(delayed_node
);
1830 int btrfs_delayed_delete_inode_ref(struct btrfs_inode
*inode
)
1832 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1833 struct btrfs_delayed_node
*delayed_node
;
1836 * we don't do delayed inode updates during log recovery because it
1837 * leads to enospc problems. This means we also can't do
1838 * delayed inode refs
1840 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
1843 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1844 if (IS_ERR(delayed_node
))
1845 return PTR_ERR(delayed_node
);
1848 * We don't reserve space for inode ref deletion is because:
1849 * - We ONLY do async inode ref deletion for the inode who has only
1850 * one link(i_nlink == 1), it means there is only one inode ref.
1851 * And in most case, the inode ref and the inode item are in the
1852 * same leaf, and we will deal with them at the same time.
1853 * Since we are sure we will reserve the space for the inode item,
1854 * it is unnecessary to reserve space for inode ref deletion.
1855 * - If the inode ref and the inode item are not in the same leaf,
1856 * We also needn't worry about enospc problem, because we reserve
1857 * much more space for the inode update than it needs.
1858 * - At the worst, we can steal some space from the global reservation.
1861 mutex_lock(&delayed_node
->mutex
);
1862 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1865 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1866 delayed_node
->count
++;
1867 atomic_inc(&fs_info
->delayed_root
->items
);
1869 mutex_unlock(&delayed_node
->mutex
);
1870 btrfs_release_delayed_node(delayed_node
);
1874 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1876 struct btrfs_root
*root
= delayed_node
->root
;
1877 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1878 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1880 mutex_lock(&delayed_node
->mutex
);
1881 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1883 btrfs_delayed_item_release_metadata(fs_info
, curr_item
);
1884 prev_item
= curr_item
;
1885 curr_item
= __btrfs_next_delayed_item(prev_item
);
1886 btrfs_release_delayed_item(prev_item
);
1889 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1891 btrfs_delayed_item_release_metadata(fs_info
, curr_item
);
1892 prev_item
= curr_item
;
1893 curr_item
= __btrfs_next_delayed_item(prev_item
);
1894 btrfs_release_delayed_item(prev_item
);
1897 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1898 btrfs_release_delayed_iref(delayed_node
);
1900 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1901 btrfs_delayed_inode_release_metadata(fs_info
, delayed_node
);
1902 btrfs_release_delayed_inode(delayed_node
);
1904 mutex_unlock(&delayed_node
->mutex
);
1907 void btrfs_kill_delayed_inode_items(struct btrfs_inode
*inode
)
1909 struct btrfs_delayed_node
*delayed_node
;
1911 delayed_node
= btrfs_get_delayed_node(inode
);
1915 __btrfs_kill_delayed_node(delayed_node
);
1916 btrfs_release_delayed_node(delayed_node
);
1919 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1922 struct btrfs_delayed_node
*delayed_nodes
[8];
1926 spin_lock(&root
->inode_lock
);
1927 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1928 (void **)delayed_nodes
, inode_id
,
1929 ARRAY_SIZE(delayed_nodes
));
1931 spin_unlock(&root
->inode_lock
);
1935 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1937 for (i
= 0; i
< n
; i
++)
1938 refcount_inc(&delayed_nodes
[i
]->refs
);
1939 spin_unlock(&root
->inode_lock
);
1941 for (i
= 0; i
< n
; i
++) {
1942 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1943 btrfs_release_delayed_node(delayed_nodes
[i
]);
1948 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info
*fs_info
)
1950 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1952 curr_node
= btrfs_first_delayed_node(fs_info
->delayed_root
);
1954 __btrfs_kill_delayed_node(curr_node
);
1956 prev_node
= curr_node
;
1957 curr_node
= btrfs_next_delayed_node(curr_node
);
1958 btrfs_release_delayed_node(prev_node
);