1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "delayed-inode.h"
12 #include "transaction.h"
16 #define BTRFS_DELAYED_WRITEBACK 512
17 #define BTRFS_DELAYED_BACKGROUND 128
18 #define BTRFS_DELAYED_BATCH 16
20 static struct kmem_cache
*delayed_node_cache
;
22 int __init
btrfs_delayed_inode_init(void)
24 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
25 sizeof(struct btrfs_delayed_node
),
29 if (!delayed_node_cache
)
34 void __cold
btrfs_delayed_inode_exit(void)
36 kmem_cache_destroy(delayed_node_cache
);
39 static inline void btrfs_init_delayed_node(
40 struct btrfs_delayed_node
*delayed_node
,
41 struct btrfs_root
*root
, u64 inode_id
)
43 delayed_node
->root
= root
;
44 delayed_node
->inode_id
= inode_id
;
45 refcount_set(&delayed_node
->refs
, 0);
46 delayed_node
->ins_root
= RB_ROOT
;
47 delayed_node
->del_root
= RB_ROOT
;
48 mutex_init(&delayed_node
->mutex
);
49 INIT_LIST_HEAD(&delayed_node
->n_list
);
50 INIT_LIST_HEAD(&delayed_node
->p_list
);
53 static inline int btrfs_is_continuous_delayed_item(
54 struct btrfs_delayed_item
*item1
,
55 struct btrfs_delayed_item
*item2
)
57 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
58 item1
->key
.objectid
== item2
->key
.objectid
&&
59 item1
->key
.type
== item2
->key
.type
&&
60 item1
->key
.offset
+ 1 == item2
->key
.offset
)
65 static struct btrfs_delayed_node
*btrfs_get_delayed_node(
66 struct btrfs_inode
*btrfs_inode
)
68 struct btrfs_root
*root
= btrfs_inode
->root
;
69 u64 ino
= btrfs_ino(btrfs_inode
);
70 struct btrfs_delayed_node
*node
;
72 node
= READ_ONCE(btrfs_inode
->delayed_node
);
74 refcount_inc(&node
->refs
);
78 spin_lock(&root
->inode_lock
);
79 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
82 if (btrfs_inode
->delayed_node
) {
83 refcount_inc(&node
->refs
); /* can be accessed */
84 BUG_ON(btrfs_inode
->delayed_node
!= node
);
85 spin_unlock(&root
->inode_lock
);
90 * It's possible that we're racing into the middle of removing
91 * this node from the radix tree. In this case, the refcount
92 * was zero and it should never go back to one. Just return
93 * NULL like it was never in the radix at all; our release
94 * function is in the process of removing it.
96 * Some implementations of refcount_inc refuse to bump the
97 * refcount once it has hit zero. If we don't do this dance
98 * here, refcount_inc() may decide to just WARN_ONCE() instead
99 * of actually bumping the refcount.
101 * If this node is properly in the radix, we want to bump the
102 * refcount twice, once for the inode and once for this get
105 if (refcount_inc_not_zero(&node
->refs
)) {
106 refcount_inc(&node
->refs
);
107 btrfs_inode
->delayed_node
= node
;
112 spin_unlock(&root
->inode_lock
);
115 spin_unlock(&root
->inode_lock
);
120 /* Will return either the node or PTR_ERR(-ENOMEM) */
121 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
122 struct btrfs_inode
*btrfs_inode
)
124 struct btrfs_delayed_node
*node
;
125 struct btrfs_root
*root
= btrfs_inode
->root
;
126 u64 ino
= btrfs_ino(btrfs_inode
);
130 node
= btrfs_get_delayed_node(btrfs_inode
);
134 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
136 return ERR_PTR(-ENOMEM
);
137 btrfs_init_delayed_node(node
, root
, ino
);
139 /* cached in the btrfs inode and can be accessed */
140 refcount_set(&node
->refs
, 2);
142 ret
= radix_tree_preload(GFP_NOFS
);
144 kmem_cache_free(delayed_node_cache
, node
);
148 spin_lock(&root
->inode_lock
);
149 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
150 if (ret
== -EEXIST
) {
151 spin_unlock(&root
->inode_lock
);
152 kmem_cache_free(delayed_node_cache
, node
);
153 radix_tree_preload_end();
156 btrfs_inode
->delayed_node
= node
;
157 spin_unlock(&root
->inode_lock
);
158 radix_tree_preload_end();
164 * Call it when holding delayed_node->mutex
166 * If mod = 1, add this node into the prepared list.
168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
169 struct btrfs_delayed_node
*node
,
172 spin_lock(&root
->lock
);
173 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
174 if (!list_empty(&node
->p_list
))
175 list_move_tail(&node
->p_list
, &root
->prepare_list
);
177 list_add_tail(&node
->p_list
, &root
->prepare_list
);
179 list_add_tail(&node
->n_list
, &root
->node_list
);
180 list_add_tail(&node
->p_list
, &root
->prepare_list
);
181 refcount_inc(&node
->refs
); /* inserted into list */
183 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
185 spin_unlock(&root
->lock
);
188 /* Call it when holding delayed_node->mutex */
189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
190 struct btrfs_delayed_node
*node
)
192 spin_lock(&root
->lock
);
193 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
195 refcount_dec(&node
->refs
); /* not in the list */
196 list_del_init(&node
->n_list
);
197 if (!list_empty(&node
->p_list
))
198 list_del_init(&node
->p_list
);
199 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
201 spin_unlock(&root
->lock
);
204 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
205 struct btrfs_delayed_root
*delayed_root
)
208 struct btrfs_delayed_node
*node
= NULL
;
210 spin_lock(&delayed_root
->lock
);
211 if (list_empty(&delayed_root
->node_list
))
214 p
= delayed_root
->node_list
.next
;
215 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
216 refcount_inc(&node
->refs
);
218 spin_unlock(&delayed_root
->lock
);
223 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
224 struct btrfs_delayed_node
*node
)
226 struct btrfs_delayed_root
*delayed_root
;
228 struct btrfs_delayed_node
*next
= NULL
;
230 delayed_root
= node
->root
->fs_info
->delayed_root
;
231 spin_lock(&delayed_root
->lock
);
232 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
233 /* not in the list */
234 if (list_empty(&delayed_root
->node_list
))
236 p
= delayed_root
->node_list
.next
;
237 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
240 p
= node
->n_list
.next
;
242 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
243 refcount_inc(&next
->refs
);
245 spin_unlock(&delayed_root
->lock
);
250 static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node
*delayed_node
,
254 struct btrfs_delayed_root
*delayed_root
;
259 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
261 mutex_lock(&delayed_node
->mutex
);
262 if (delayed_node
->count
)
263 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
265 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
266 mutex_unlock(&delayed_node
->mutex
);
268 if (refcount_dec_and_test(&delayed_node
->refs
)) {
269 struct btrfs_root
*root
= delayed_node
->root
;
271 spin_lock(&root
->inode_lock
);
273 * Once our refcount goes to zero, nobody is allowed to bump it
274 * back up. We can delete it now.
276 ASSERT(refcount_read(&delayed_node
->refs
) == 0);
277 radix_tree_delete(&root
->delayed_nodes_tree
,
278 delayed_node
->inode_id
);
279 spin_unlock(&root
->inode_lock
);
280 kmem_cache_free(delayed_node_cache
, delayed_node
);
284 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
286 __btrfs_release_delayed_node(node
, 0);
289 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
290 struct btrfs_delayed_root
*delayed_root
)
293 struct btrfs_delayed_node
*node
= NULL
;
295 spin_lock(&delayed_root
->lock
);
296 if (list_empty(&delayed_root
->prepare_list
))
299 p
= delayed_root
->prepare_list
.next
;
301 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
302 refcount_inc(&node
->refs
);
304 spin_unlock(&delayed_root
->lock
);
309 static inline void btrfs_release_prepared_delayed_node(
310 struct btrfs_delayed_node
*node
)
312 __btrfs_release_delayed_node(node
, 1);
315 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
317 struct btrfs_delayed_item
*item
;
318 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
320 item
->data_len
= data_len
;
321 item
->ins_or_del
= 0;
322 item
->bytes_reserved
= 0;
323 item
->delayed_node
= NULL
;
324 refcount_set(&item
->refs
, 1);
330 * __btrfs_lookup_delayed_item - look up the delayed item by key
331 * @delayed_node: pointer to the delayed node
332 * @key: the key to look up
333 * @prev: used to store the prev item if the right item isn't found
334 * @next: used to store the next item if the right item isn't found
336 * Note: if we don't find the right item, we will return the prev item and
339 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
340 struct rb_root
*root
,
341 struct btrfs_key
*key
,
342 struct btrfs_delayed_item
**prev
,
343 struct btrfs_delayed_item
**next
)
345 struct rb_node
*node
, *prev_node
= NULL
;
346 struct btrfs_delayed_item
*delayed_item
= NULL
;
349 node
= root
->rb_node
;
352 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
355 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
357 node
= node
->rb_right
;
359 node
= node
->rb_left
;
368 *prev
= delayed_item
;
369 else if ((node
= rb_prev(prev_node
)) != NULL
) {
370 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
380 *next
= delayed_item
;
381 else if ((node
= rb_next(prev_node
)) != NULL
) {
382 *next
= rb_entry(node
, struct btrfs_delayed_item
,
390 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
391 struct btrfs_delayed_node
*delayed_node
,
392 struct btrfs_key
*key
)
394 return __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
399 struct btrfs_delayed_item
*ins
,
402 struct rb_node
**p
, *node
;
403 struct rb_node
*parent_node
= NULL
;
404 struct rb_root
*root
;
405 struct btrfs_delayed_item
*item
;
408 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
409 root
= &delayed_node
->ins_root
;
410 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
411 root
= &delayed_node
->del_root
;
415 node
= &ins
->rb_node
;
419 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
422 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
431 rb_link_node(node
, parent_node
, p
);
432 rb_insert_color(node
, root
);
433 ins
->delayed_node
= delayed_node
;
434 ins
->ins_or_del
= action
;
436 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
437 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
438 ins
->key
.offset
>= delayed_node
->index_cnt
)
439 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
441 delayed_node
->count
++;
442 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
447 struct btrfs_delayed_item
*item
)
449 return __btrfs_add_delayed_item(node
, item
,
450 BTRFS_DELAYED_INSERTION_ITEM
);
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
454 struct btrfs_delayed_item
*item
)
456 return __btrfs_add_delayed_item(node
, item
,
457 BTRFS_DELAYED_DELETION_ITEM
);
460 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
462 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
464 /* atomic_dec_return implies a barrier */
465 if ((atomic_dec_return(&delayed_root
->items
) <
466 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0))
467 cond_wake_up_nomb(&delayed_root
->wait
);
470 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
472 struct rb_root
*root
;
473 struct btrfs_delayed_root
*delayed_root
;
475 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
477 BUG_ON(!delayed_root
);
478 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
479 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
481 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
482 root
= &delayed_item
->delayed_node
->ins_root
;
484 root
= &delayed_item
->delayed_node
->del_root
;
486 rb_erase(&delayed_item
->rb_node
, root
);
487 delayed_item
->delayed_node
->count
--;
489 finish_one_item(delayed_root
);
492 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
495 __btrfs_remove_delayed_item(item
);
496 if (refcount_dec_and_test(&item
->refs
))
501 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
502 struct btrfs_delayed_node
*delayed_node
)
505 struct btrfs_delayed_item
*item
= NULL
;
507 p
= rb_first(&delayed_node
->ins_root
);
509 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
514 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
515 struct btrfs_delayed_node
*delayed_node
)
518 struct btrfs_delayed_item
*item
= NULL
;
520 p
= rb_first(&delayed_node
->del_root
);
522 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
527 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
528 struct btrfs_delayed_item
*item
)
531 struct btrfs_delayed_item
*next
= NULL
;
533 p
= rb_next(&item
->rb_node
);
535 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
540 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
541 struct btrfs_root
*root
,
542 struct btrfs_delayed_item
*item
)
544 struct btrfs_block_rsv
*src_rsv
;
545 struct btrfs_block_rsv
*dst_rsv
;
546 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
550 if (!trans
->bytes_reserved
)
553 src_rsv
= trans
->block_rsv
;
554 dst_rsv
= &fs_info
->delayed_block_rsv
;
556 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
559 * Here we migrate space rsv from transaction rsv, since have already
560 * reserved space when starting a transaction. So no need to reserve
563 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
565 trace_btrfs_space_reservation(fs_info
, "delayed_item",
568 item
->bytes_reserved
= num_bytes
;
574 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
575 struct btrfs_delayed_item
*item
)
577 struct btrfs_block_rsv
*rsv
;
578 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
580 if (!item
->bytes_reserved
)
583 rsv
= &fs_info
->delayed_block_rsv
;
585 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
586 * to release/reserve qgroup space.
588 trace_btrfs_space_reservation(fs_info
, "delayed_item",
589 item
->key
.objectid
, item
->bytes_reserved
,
591 btrfs_block_rsv_release(fs_info
, rsv
,
592 item
->bytes_reserved
);
595 static int btrfs_delayed_inode_reserve_metadata(
596 struct btrfs_trans_handle
*trans
,
597 struct btrfs_root
*root
,
598 struct btrfs_inode
*inode
,
599 struct btrfs_delayed_node
*node
)
601 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
602 struct btrfs_block_rsv
*src_rsv
;
603 struct btrfs_block_rsv
*dst_rsv
;
607 src_rsv
= trans
->block_rsv
;
608 dst_rsv
= &fs_info
->delayed_block_rsv
;
610 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
613 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
614 * which doesn't reserve space for speed. This is a problem since we
615 * still need to reserve space for this update, so try to reserve the
618 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
619 * we always reserve enough to update the inode item.
621 if (!src_rsv
|| (!trans
->bytes_reserved
&&
622 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
623 ret
= btrfs_qgroup_reserve_meta_prealloc(root
,
624 fs_info
->nodesize
, true);
627 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
628 BTRFS_RESERVE_NO_FLUSH
);
630 * Since we're under a transaction reserve_metadata_bytes could
631 * try to commit the transaction which will make it return
632 * EAGAIN to make us stop the transaction we have, so return
633 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
635 if (ret
== -EAGAIN
) {
637 btrfs_qgroup_free_meta_prealloc(root
, num_bytes
);
640 node
->bytes_reserved
= num_bytes
;
641 trace_btrfs_space_reservation(fs_info
,
646 btrfs_qgroup_free_meta_prealloc(root
, fs_info
->nodesize
);
651 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
653 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
654 btrfs_ino(inode
), num_bytes
, 1);
655 node
->bytes_reserved
= num_bytes
;
661 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info
*fs_info
,
662 struct btrfs_delayed_node
*node
,
665 struct btrfs_block_rsv
*rsv
;
667 if (!node
->bytes_reserved
)
670 rsv
= &fs_info
->delayed_block_rsv
;
671 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
672 node
->inode_id
, node
->bytes_reserved
, 0);
673 btrfs_block_rsv_release(fs_info
, rsv
,
674 node
->bytes_reserved
);
676 btrfs_qgroup_free_meta_prealloc(node
->root
,
677 node
->bytes_reserved
);
679 btrfs_qgroup_convert_reserved_meta(node
->root
,
680 node
->bytes_reserved
);
681 node
->bytes_reserved
= 0;
685 * This helper will insert some continuous items into the same leaf according
686 * to the free space of the leaf.
688 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
689 struct btrfs_path
*path
,
690 struct btrfs_delayed_item
*item
)
692 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
693 struct btrfs_delayed_item
*curr
, *next
;
695 int total_data_size
= 0, total_size
= 0;
696 struct extent_buffer
*leaf
;
698 struct btrfs_key
*keys
;
700 struct list_head head
;
706 BUG_ON(!path
->nodes
[0]);
708 leaf
= path
->nodes
[0];
709 free_space
= btrfs_leaf_free_space(fs_info
, leaf
);
710 INIT_LIST_HEAD(&head
);
716 * count the number of the continuous items that we can insert in batch
718 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
720 total_data_size
+= next
->data_len
;
721 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
722 list_add_tail(&next
->tree_list
, &head
);
726 next
= __btrfs_next_delayed_item(curr
);
730 if (!btrfs_is_continuous_delayed_item(curr
, next
))
740 * we need allocate some memory space, but it might cause the task
741 * to sleep, so we set all locked nodes in the path to blocking locks
744 btrfs_set_path_blocking(path
);
746 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
752 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
758 /* get keys of all the delayed items */
760 list_for_each_entry(next
, &head
, tree_list
) {
762 data_size
[i
] = next
->data_len
;
766 /* reset all the locked nodes in the patch to spinning locks. */
767 btrfs_clear_path_blocking(path
, NULL
, 0);
769 /* insert the keys of the items */
770 setup_items_for_insert(root
, path
, keys
, data_size
,
771 total_data_size
, total_size
, nitems
);
773 /* insert the dir index items */
774 slot
= path
->slots
[0];
775 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
776 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
777 write_extent_buffer(leaf
, &curr
->data
,
778 (unsigned long)data_ptr
,
782 btrfs_delayed_item_release_metadata(root
, curr
);
784 list_del(&curr
->tree_list
);
785 btrfs_release_delayed_item(curr
);
796 * This helper can just do simple insertion that needn't extend item for new
797 * data, such as directory name index insertion, inode insertion.
799 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
800 struct btrfs_root
*root
,
801 struct btrfs_path
*path
,
802 struct btrfs_delayed_item
*delayed_item
)
804 struct extent_buffer
*leaf
;
805 unsigned int nofs_flag
;
809 nofs_flag
= memalloc_nofs_save();
810 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
811 delayed_item
->data_len
);
812 memalloc_nofs_restore(nofs_flag
);
813 if (ret
< 0 && ret
!= -EEXIST
)
816 leaf
= path
->nodes
[0];
818 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
820 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
821 delayed_item
->data_len
);
822 btrfs_mark_buffer_dirty(leaf
);
824 btrfs_delayed_item_release_metadata(root
, delayed_item
);
829 * we insert an item first, then if there are some continuous items, we try
830 * to insert those items into the same leaf.
832 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
833 struct btrfs_path
*path
,
834 struct btrfs_root
*root
,
835 struct btrfs_delayed_node
*node
)
837 struct btrfs_delayed_item
*curr
, *prev
;
841 mutex_lock(&node
->mutex
);
842 curr
= __btrfs_first_delayed_insertion_item(node
);
846 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
848 btrfs_release_path(path
);
853 curr
= __btrfs_next_delayed_item(prev
);
854 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
855 /* insert the continuous items into the same leaf */
857 btrfs_batch_insert_items(root
, path
, curr
);
859 btrfs_release_delayed_item(prev
);
860 btrfs_mark_buffer_dirty(path
->nodes
[0]);
862 btrfs_release_path(path
);
863 mutex_unlock(&node
->mutex
);
867 mutex_unlock(&node
->mutex
);
871 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
872 struct btrfs_root
*root
,
873 struct btrfs_path
*path
,
874 struct btrfs_delayed_item
*item
)
876 struct btrfs_delayed_item
*curr
, *next
;
877 struct extent_buffer
*leaf
;
878 struct btrfs_key key
;
879 struct list_head head
;
880 int nitems
, i
, last_item
;
883 BUG_ON(!path
->nodes
[0]);
885 leaf
= path
->nodes
[0];
888 last_item
= btrfs_header_nritems(leaf
) - 1;
890 return -ENOENT
; /* FIXME: Is errno suitable? */
893 INIT_LIST_HEAD(&head
);
894 btrfs_item_key_to_cpu(leaf
, &key
, i
);
897 * count the number of the dir index items that we can delete in batch
899 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
900 list_add_tail(&next
->tree_list
, &head
);
904 next
= __btrfs_next_delayed_item(curr
);
908 if (!btrfs_is_continuous_delayed_item(curr
, next
))
914 btrfs_item_key_to_cpu(leaf
, &key
, i
);
920 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
924 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
925 btrfs_delayed_item_release_metadata(root
, curr
);
926 list_del(&curr
->tree_list
);
927 btrfs_release_delayed_item(curr
);
934 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
935 struct btrfs_path
*path
,
936 struct btrfs_root
*root
,
937 struct btrfs_delayed_node
*node
)
939 struct btrfs_delayed_item
*curr
, *prev
;
940 unsigned int nofs_flag
;
944 mutex_lock(&node
->mutex
);
945 curr
= __btrfs_first_delayed_deletion_item(node
);
949 nofs_flag
= memalloc_nofs_save();
950 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
951 memalloc_nofs_restore(nofs_flag
);
956 * can't find the item which the node points to, so this node
957 * is invalid, just drop it.
960 curr
= __btrfs_next_delayed_item(prev
);
961 btrfs_release_delayed_item(prev
);
963 btrfs_release_path(path
);
965 mutex_unlock(&node
->mutex
);
971 btrfs_batch_delete_items(trans
, root
, path
, curr
);
972 btrfs_release_path(path
);
973 mutex_unlock(&node
->mutex
);
977 btrfs_release_path(path
);
978 mutex_unlock(&node
->mutex
);
982 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
984 struct btrfs_delayed_root
*delayed_root
;
987 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
988 BUG_ON(!delayed_node
->root
);
989 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
990 delayed_node
->count
--;
992 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
993 finish_one_item(delayed_root
);
997 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
999 struct btrfs_delayed_root
*delayed_root
;
1001 ASSERT(delayed_node
->root
);
1002 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1003 delayed_node
->count
--;
1005 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1006 finish_one_item(delayed_root
);
1009 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1010 struct btrfs_root
*root
,
1011 struct btrfs_path
*path
,
1012 struct btrfs_delayed_node
*node
)
1014 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1015 struct btrfs_key key
;
1016 struct btrfs_inode_item
*inode_item
;
1017 struct extent_buffer
*leaf
;
1018 unsigned int nofs_flag
;
1022 key
.objectid
= node
->inode_id
;
1023 key
.type
= BTRFS_INODE_ITEM_KEY
;
1026 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1031 nofs_flag
= memalloc_nofs_save();
1032 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1033 memalloc_nofs_restore(nofs_flag
);
1035 btrfs_release_path(path
);
1037 } else if (ret
< 0) {
1041 leaf
= path
->nodes
[0];
1042 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1043 struct btrfs_inode_item
);
1044 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1045 sizeof(struct btrfs_inode_item
));
1046 btrfs_mark_buffer_dirty(leaf
);
1048 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1052 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1055 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1056 if (key
.objectid
!= node
->inode_id
)
1059 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1060 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1064 * Delayed iref deletion is for the inode who has only one link,
1065 * so there is only one iref. The case that several irefs are
1066 * in the same item doesn't exist.
1068 btrfs_del_item(trans
, root
, path
);
1070 btrfs_release_delayed_iref(node
);
1072 btrfs_release_path(path
);
1074 btrfs_delayed_inode_release_metadata(fs_info
, node
, (ret
< 0));
1075 btrfs_release_delayed_inode(node
);
1080 btrfs_release_path(path
);
1082 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1085 nofs_flag
= memalloc_nofs_save();
1086 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1087 memalloc_nofs_restore(nofs_flag
);
1093 leaf
= path
->nodes
[0];
1098 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1099 struct btrfs_root
*root
,
1100 struct btrfs_path
*path
,
1101 struct btrfs_delayed_node
*node
)
1105 mutex_lock(&node
->mutex
);
1106 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1107 mutex_unlock(&node
->mutex
);
1111 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1112 mutex_unlock(&node
->mutex
);
1117 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1118 struct btrfs_path
*path
,
1119 struct btrfs_delayed_node
*node
)
1123 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1127 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1131 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1136 * Called when committing the transaction.
1137 * Returns 0 on success.
1138 * Returns < 0 on error and returns with an aborted transaction with any
1139 * outstanding delayed items cleaned up.
1141 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
, int nr
)
1143 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1144 struct btrfs_delayed_root
*delayed_root
;
1145 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1146 struct btrfs_path
*path
;
1147 struct btrfs_block_rsv
*block_rsv
;
1149 bool count
= (nr
> 0);
1154 path
= btrfs_alloc_path();
1157 path
->leave_spinning
= 1;
1159 block_rsv
= trans
->block_rsv
;
1160 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1162 delayed_root
= fs_info
->delayed_root
;
1164 curr_node
= btrfs_first_delayed_node(delayed_root
);
1165 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1166 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1169 btrfs_release_delayed_node(curr_node
);
1171 btrfs_abort_transaction(trans
, ret
);
1175 prev_node
= curr_node
;
1176 curr_node
= btrfs_next_delayed_node(curr_node
);
1177 btrfs_release_delayed_node(prev_node
);
1181 btrfs_release_delayed_node(curr_node
);
1182 btrfs_free_path(path
);
1183 trans
->block_rsv
= block_rsv
;
1188 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
)
1190 return __btrfs_run_delayed_items(trans
, -1);
1193 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
, int nr
)
1195 return __btrfs_run_delayed_items(trans
, nr
);
1198 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1199 struct btrfs_inode
*inode
)
1201 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1202 struct btrfs_path
*path
;
1203 struct btrfs_block_rsv
*block_rsv
;
1209 mutex_lock(&delayed_node
->mutex
);
1210 if (!delayed_node
->count
) {
1211 mutex_unlock(&delayed_node
->mutex
);
1212 btrfs_release_delayed_node(delayed_node
);
1215 mutex_unlock(&delayed_node
->mutex
);
1217 path
= btrfs_alloc_path();
1219 btrfs_release_delayed_node(delayed_node
);
1222 path
->leave_spinning
= 1;
1224 block_rsv
= trans
->block_rsv
;
1225 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1227 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1229 btrfs_release_delayed_node(delayed_node
);
1230 btrfs_free_path(path
);
1231 trans
->block_rsv
= block_rsv
;
1236 int btrfs_commit_inode_delayed_inode(struct btrfs_inode
*inode
)
1238 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1239 struct btrfs_trans_handle
*trans
;
1240 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1241 struct btrfs_path
*path
;
1242 struct btrfs_block_rsv
*block_rsv
;
1248 mutex_lock(&delayed_node
->mutex
);
1249 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1250 mutex_unlock(&delayed_node
->mutex
);
1251 btrfs_release_delayed_node(delayed_node
);
1254 mutex_unlock(&delayed_node
->mutex
);
1256 trans
= btrfs_join_transaction(delayed_node
->root
);
1257 if (IS_ERR(trans
)) {
1258 ret
= PTR_ERR(trans
);
1262 path
= btrfs_alloc_path();
1267 path
->leave_spinning
= 1;
1269 block_rsv
= trans
->block_rsv
;
1270 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1272 mutex_lock(&delayed_node
->mutex
);
1273 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1274 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1275 path
, delayed_node
);
1278 mutex_unlock(&delayed_node
->mutex
);
1280 btrfs_free_path(path
);
1281 trans
->block_rsv
= block_rsv
;
1283 btrfs_end_transaction(trans
);
1284 btrfs_btree_balance_dirty(fs_info
);
1286 btrfs_release_delayed_node(delayed_node
);
1291 void btrfs_remove_delayed_node(struct btrfs_inode
*inode
)
1293 struct btrfs_delayed_node
*delayed_node
;
1295 delayed_node
= READ_ONCE(inode
->delayed_node
);
1299 inode
->delayed_node
= NULL
;
1300 btrfs_release_delayed_node(delayed_node
);
1303 struct btrfs_async_delayed_work
{
1304 struct btrfs_delayed_root
*delayed_root
;
1306 struct btrfs_work work
;
1309 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1311 struct btrfs_async_delayed_work
*async_work
;
1312 struct btrfs_delayed_root
*delayed_root
;
1313 struct btrfs_trans_handle
*trans
;
1314 struct btrfs_path
*path
;
1315 struct btrfs_delayed_node
*delayed_node
= NULL
;
1316 struct btrfs_root
*root
;
1317 struct btrfs_block_rsv
*block_rsv
;
1320 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1321 delayed_root
= async_work
->delayed_root
;
1323 path
= btrfs_alloc_path();
1328 if (atomic_read(&delayed_root
->items
) <
1329 BTRFS_DELAYED_BACKGROUND
/ 2)
1332 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1336 path
->leave_spinning
= 1;
1337 root
= delayed_node
->root
;
1339 trans
= btrfs_join_transaction(root
);
1340 if (IS_ERR(trans
)) {
1341 btrfs_release_path(path
);
1342 btrfs_release_prepared_delayed_node(delayed_node
);
1347 block_rsv
= trans
->block_rsv
;
1348 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1350 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1352 trans
->block_rsv
= block_rsv
;
1353 btrfs_end_transaction(trans
);
1354 btrfs_btree_balance_dirty_nodelay(root
->fs_info
);
1356 btrfs_release_path(path
);
1357 btrfs_release_prepared_delayed_node(delayed_node
);
1360 } while ((async_work
->nr
== 0 && total_done
< BTRFS_DELAYED_WRITEBACK
)
1361 || total_done
< async_work
->nr
);
1363 btrfs_free_path(path
);
1365 wake_up(&delayed_root
->wait
);
1370 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1371 struct btrfs_fs_info
*fs_info
, int nr
)
1373 struct btrfs_async_delayed_work
*async_work
;
1375 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1379 async_work
->delayed_root
= delayed_root
;
1380 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1381 btrfs_async_run_delayed_root
, NULL
, NULL
);
1382 async_work
->nr
= nr
;
1384 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1388 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info
*fs_info
)
1390 WARN_ON(btrfs_first_delayed_node(fs_info
->delayed_root
));
1393 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1395 int val
= atomic_read(&delayed_root
->items_seq
);
1397 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1400 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1406 void btrfs_balance_delayed_items(struct btrfs_fs_info
*fs_info
)
1408 struct btrfs_delayed_root
*delayed_root
= fs_info
->delayed_root
;
1410 if ((atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
) ||
1411 btrfs_workqueue_normal_congested(fs_info
->delayed_workers
))
1414 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1418 seq
= atomic_read(&delayed_root
->items_seq
);
1420 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1424 wait_event_interruptible(delayed_root
->wait
,
1425 could_end_wait(delayed_root
, seq
));
1429 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1432 /* Will return 0 or -ENOMEM */
1433 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1434 const char *name
, int name_len
,
1435 struct btrfs_inode
*dir
,
1436 struct btrfs_disk_key
*disk_key
, u8 type
,
1439 struct btrfs_delayed_node
*delayed_node
;
1440 struct btrfs_delayed_item
*delayed_item
;
1441 struct btrfs_dir_item
*dir_item
;
1444 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1445 if (IS_ERR(delayed_node
))
1446 return PTR_ERR(delayed_node
);
1448 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1449 if (!delayed_item
) {
1454 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1455 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1456 delayed_item
->key
.offset
= index
;
1458 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1459 dir_item
->location
= *disk_key
;
1460 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1461 btrfs_set_stack_dir_data_len(dir_item
, 0);
1462 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1463 btrfs_set_stack_dir_type(dir_item
, type
);
1464 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1466 ret
= btrfs_delayed_item_reserve_metadata(trans
, dir
->root
, delayed_item
);
1468 * we have reserved enough space when we start a new transaction,
1469 * so reserving metadata failure is impossible
1473 mutex_lock(&delayed_node
->mutex
);
1474 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1475 if (unlikely(ret
)) {
1476 btrfs_err(trans
->fs_info
,
1477 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1478 name_len
, name
, delayed_node
->root
->objectid
,
1479 delayed_node
->inode_id
, ret
);
1482 mutex_unlock(&delayed_node
->mutex
);
1485 btrfs_release_delayed_node(delayed_node
);
1489 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info
*fs_info
,
1490 struct btrfs_delayed_node
*node
,
1491 struct btrfs_key
*key
)
1493 struct btrfs_delayed_item
*item
;
1495 mutex_lock(&node
->mutex
);
1496 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1498 mutex_unlock(&node
->mutex
);
1502 btrfs_delayed_item_release_metadata(node
->root
, item
);
1503 btrfs_release_delayed_item(item
);
1504 mutex_unlock(&node
->mutex
);
1508 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1509 struct btrfs_inode
*dir
, u64 index
)
1511 struct btrfs_delayed_node
*node
;
1512 struct btrfs_delayed_item
*item
;
1513 struct btrfs_key item_key
;
1516 node
= btrfs_get_or_create_delayed_node(dir
);
1518 return PTR_ERR(node
);
1520 item_key
.objectid
= btrfs_ino(dir
);
1521 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1522 item_key
.offset
= index
;
1524 ret
= btrfs_delete_delayed_insertion_item(trans
->fs_info
, node
,
1529 item
= btrfs_alloc_delayed_item(0);
1535 item
->key
= item_key
;
1537 ret
= btrfs_delayed_item_reserve_metadata(trans
, dir
->root
, item
);
1539 * we have reserved enough space when we start a new transaction,
1540 * so reserving metadata failure is impossible.
1544 mutex_lock(&node
->mutex
);
1545 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1546 if (unlikely(ret
)) {
1547 btrfs_err(trans
->fs_info
,
1548 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1549 index
, node
->root
->objectid
, node
->inode_id
, ret
);
1552 mutex_unlock(&node
->mutex
);
1554 btrfs_release_delayed_node(node
);
1558 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode
*inode
)
1560 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1566 * Since we have held i_mutex of this directory, it is impossible that
1567 * a new directory index is added into the delayed node and index_cnt
1568 * is updated now. So we needn't lock the delayed node.
1570 if (!delayed_node
->index_cnt
) {
1571 btrfs_release_delayed_node(delayed_node
);
1575 inode
->index_cnt
= delayed_node
->index_cnt
;
1576 btrfs_release_delayed_node(delayed_node
);
1580 bool btrfs_readdir_get_delayed_items(struct inode
*inode
,
1581 struct list_head
*ins_list
,
1582 struct list_head
*del_list
)
1584 struct btrfs_delayed_node
*delayed_node
;
1585 struct btrfs_delayed_item
*item
;
1587 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1592 * We can only do one readdir with delayed items at a time because of
1593 * item->readdir_list.
1595 inode_unlock_shared(inode
);
1598 mutex_lock(&delayed_node
->mutex
);
1599 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1601 refcount_inc(&item
->refs
);
1602 list_add_tail(&item
->readdir_list
, ins_list
);
1603 item
= __btrfs_next_delayed_item(item
);
1606 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1608 refcount_inc(&item
->refs
);
1609 list_add_tail(&item
->readdir_list
, del_list
);
1610 item
= __btrfs_next_delayed_item(item
);
1612 mutex_unlock(&delayed_node
->mutex
);
1614 * This delayed node is still cached in the btrfs inode, so refs
1615 * must be > 1 now, and we needn't check it is going to be freed
1618 * Besides that, this function is used to read dir, we do not
1619 * insert/delete delayed items in this period. So we also needn't
1620 * requeue or dequeue this delayed node.
1622 refcount_dec(&delayed_node
->refs
);
1627 void btrfs_readdir_put_delayed_items(struct inode
*inode
,
1628 struct list_head
*ins_list
,
1629 struct list_head
*del_list
)
1631 struct btrfs_delayed_item
*curr
, *next
;
1633 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1634 list_del(&curr
->readdir_list
);
1635 if (refcount_dec_and_test(&curr
->refs
))
1639 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1640 list_del(&curr
->readdir_list
);
1641 if (refcount_dec_and_test(&curr
->refs
))
1646 * The VFS is going to do up_read(), so we need to downgrade back to a
1649 downgrade_write(&inode
->i_rwsem
);
1652 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1655 struct btrfs_delayed_item
*curr
;
1658 list_for_each_entry(curr
, del_list
, readdir_list
) {
1659 if (curr
->key
.offset
> index
)
1661 if (curr
->key
.offset
== index
) {
1670 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1673 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1674 struct list_head
*ins_list
)
1676 struct btrfs_dir_item
*di
;
1677 struct btrfs_delayed_item
*curr
, *next
;
1678 struct btrfs_key location
;
1682 unsigned char d_type
;
1684 if (list_empty(ins_list
))
1688 * Changing the data of the delayed item is impossible. So
1689 * we needn't lock them. And we have held i_mutex of the
1690 * directory, nobody can delete any directory indexes now.
1692 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1693 list_del(&curr
->readdir_list
);
1695 if (curr
->key
.offset
< ctx
->pos
) {
1696 if (refcount_dec_and_test(&curr
->refs
))
1701 ctx
->pos
= curr
->key
.offset
;
1703 di
= (struct btrfs_dir_item
*)curr
->data
;
1704 name
= (char *)(di
+ 1);
1705 name_len
= btrfs_stack_dir_name_len(di
);
1707 d_type
= btrfs_filetype_table
[di
->type
];
1708 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1710 over
= !dir_emit(ctx
, name
, name_len
,
1711 location
.objectid
, d_type
);
1713 if (refcount_dec_and_test(&curr
->refs
))
1723 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1724 struct btrfs_inode_item
*inode_item
,
1725 struct inode
*inode
)
1727 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1728 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1729 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1730 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1731 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1732 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1733 btrfs_set_stack_inode_generation(inode_item
,
1734 BTRFS_I(inode
)->generation
);
1735 btrfs_set_stack_inode_sequence(inode_item
,
1736 inode_peek_iversion(inode
));
1737 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1738 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1739 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1740 btrfs_set_stack_inode_block_group(inode_item
, 0);
1742 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1743 inode
->i_atime
.tv_sec
);
1744 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1745 inode
->i_atime
.tv_nsec
);
1747 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1748 inode
->i_mtime
.tv_sec
);
1749 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1750 inode
->i_mtime
.tv_nsec
);
1752 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1753 inode
->i_ctime
.tv_sec
);
1754 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1755 inode
->i_ctime
.tv_nsec
);
1757 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1758 BTRFS_I(inode
)->i_otime
.tv_sec
);
1759 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1760 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1763 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1765 struct btrfs_delayed_node
*delayed_node
;
1766 struct btrfs_inode_item
*inode_item
;
1768 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1772 mutex_lock(&delayed_node
->mutex
);
1773 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1774 mutex_unlock(&delayed_node
->mutex
);
1775 btrfs_release_delayed_node(delayed_node
);
1779 inode_item
= &delayed_node
->inode_item
;
1781 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1782 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1783 btrfs_i_size_write(BTRFS_I(inode
), btrfs_stack_inode_size(inode_item
));
1784 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1785 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1786 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1787 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1788 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1790 inode_set_iversion_queried(inode
,
1791 btrfs_stack_inode_sequence(inode_item
));
1793 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1794 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1796 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1797 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1799 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1800 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1802 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1803 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1805 BTRFS_I(inode
)->i_otime
.tv_sec
=
1806 btrfs_stack_timespec_sec(&inode_item
->otime
);
1807 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1808 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1810 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1811 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1813 mutex_unlock(&delayed_node
->mutex
);
1814 btrfs_release_delayed_node(delayed_node
);
1818 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1819 struct btrfs_root
*root
, struct inode
*inode
)
1821 struct btrfs_delayed_node
*delayed_node
;
1824 delayed_node
= btrfs_get_or_create_delayed_node(BTRFS_I(inode
));
1825 if (IS_ERR(delayed_node
))
1826 return PTR_ERR(delayed_node
);
1828 mutex_lock(&delayed_node
->mutex
);
1829 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1830 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1834 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, BTRFS_I(inode
),
1839 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1840 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1841 delayed_node
->count
++;
1842 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1844 mutex_unlock(&delayed_node
->mutex
);
1845 btrfs_release_delayed_node(delayed_node
);
1849 int btrfs_delayed_delete_inode_ref(struct btrfs_inode
*inode
)
1851 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1852 struct btrfs_delayed_node
*delayed_node
;
1855 * we don't do delayed inode updates during log recovery because it
1856 * leads to enospc problems. This means we also can't do
1857 * delayed inode refs
1859 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
1862 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1863 if (IS_ERR(delayed_node
))
1864 return PTR_ERR(delayed_node
);
1867 * We don't reserve space for inode ref deletion is because:
1868 * - We ONLY do async inode ref deletion for the inode who has only
1869 * one link(i_nlink == 1), it means there is only one inode ref.
1870 * And in most case, the inode ref and the inode item are in the
1871 * same leaf, and we will deal with them at the same time.
1872 * Since we are sure we will reserve the space for the inode item,
1873 * it is unnecessary to reserve space for inode ref deletion.
1874 * - If the inode ref and the inode item are not in the same leaf,
1875 * We also needn't worry about enospc problem, because we reserve
1876 * much more space for the inode update than it needs.
1877 * - At the worst, we can steal some space from the global reservation.
1880 mutex_lock(&delayed_node
->mutex
);
1881 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1884 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1885 delayed_node
->count
++;
1886 atomic_inc(&fs_info
->delayed_root
->items
);
1888 mutex_unlock(&delayed_node
->mutex
);
1889 btrfs_release_delayed_node(delayed_node
);
1893 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1895 struct btrfs_root
*root
= delayed_node
->root
;
1896 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1897 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1899 mutex_lock(&delayed_node
->mutex
);
1900 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1902 btrfs_delayed_item_release_metadata(root
, curr_item
);
1903 prev_item
= curr_item
;
1904 curr_item
= __btrfs_next_delayed_item(prev_item
);
1905 btrfs_release_delayed_item(prev_item
);
1908 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1910 btrfs_delayed_item_release_metadata(root
, curr_item
);
1911 prev_item
= curr_item
;
1912 curr_item
= __btrfs_next_delayed_item(prev_item
);
1913 btrfs_release_delayed_item(prev_item
);
1916 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1917 btrfs_release_delayed_iref(delayed_node
);
1919 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1920 btrfs_delayed_inode_release_metadata(fs_info
, delayed_node
, false);
1921 btrfs_release_delayed_inode(delayed_node
);
1923 mutex_unlock(&delayed_node
->mutex
);
1926 void btrfs_kill_delayed_inode_items(struct btrfs_inode
*inode
)
1928 struct btrfs_delayed_node
*delayed_node
;
1930 delayed_node
= btrfs_get_delayed_node(inode
);
1934 __btrfs_kill_delayed_node(delayed_node
);
1935 btrfs_release_delayed_node(delayed_node
);
1938 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1941 struct btrfs_delayed_node
*delayed_nodes
[8];
1945 spin_lock(&root
->inode_lock
);
1946 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1947 (void **)delayed_nodes
, inode_id
,
1948 ARRAY_SIZE(delayed_nodes
));
1950 spin_unlock(&root
->inode_lock
);
1954 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1955 for (i
= 0; i
< n
; i
++) {
1957 * Don't increase refs in case the node is dead and
1958 * about to be removed from the tree in the loop below
1960 if (!refcount_inc_not_zero(&delayed_nodes
[i
]->refs
))
1961 delayed_nodes
[i
] = NULL
;
1963 spin_unlock(&root
->inode_lock
);
1965 for (i
= 0; i
< n
; i
++) {
1966 if (!delayed_nodes
[i
])
1968 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1969 btrfs_release_delayed_node(delayed_nodes
[i
]);
1974 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info
*fs_info
)
1976 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1978 curr_node
= btrfs_first_delayed_node(fs_info
->delayed_root
);
1980 __btrfs_kill_delayed_node(curr_node
);
1982 prev_node
= curr_node
;
1983 curr_node
= btrfs_next_delayed_node(curr_node
);
1984 btrfs_release_delayed_node(prev_node
);