1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "delayed-inode.h"
11 #include "transaction.h"
15 #define BTRFS_DELAYED_WRITEBACK 512
16 #define BTRFS_DELAYED_BACKGROUND 128
17 #define BTRFS_DELAYED_BATCH 16
19 static struct kmem_cache
*delayed_node_cache
;
21 int __init
btrfs_delayed_inode_init(void)
23 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
24 sizeof(struct btrfs_delayed_node
),
28 if (!delayed_node_cache
)
33 void __cold
btrfs_delayed_inode_exit(void)
35 kmem_cache_destroy(delayed_node_cache
);
38 static inline void btrfs_init_delayed_node(
39 struct btrfs_delayed_node
*delayed_node
,
40 struct btrfs_root
*root
, u64 inode_id
)
42 delayed_node
->root
= root
;
43 delayed_node
->inode_id
= inode_id
;
44 refcount_set(&delayed_node
->refs
, 0);
45 delayed_node
->ins_root
= RB_ROOT_CACHED
;
46 delayed_node
->del_root
= RB_ROOT_CACHED
;
47 mutex_init(&delayed_node
->mutex
);
48 INIT_LIST_HEAD(&delayed_node
->n_list
);
49 INIT_LIST_HEAD(&delayed_node
->p_list
);
52 static inline int btrfs_is_continuous_delayed_item(
53 struct btrfs_delayed_item
*item1
,
54 struct btrfs_delayed_item
*item2
)
56 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
57 item1
->key
.objectid
== item2
->key
.objectid
&&
58 item1
->key
.type
== item2
->key
.type
&&
59 item1
->key
.offset
+ 1 == item2
->key
.offset
)
64 static struct btrfs_delayed_node
*btrfs_get_delayed_node(
65 struct btrfs_inode
*btrfs_inode
)
67 struct btrfs_root
*root
= btrfs_inode
->root
;
68 u64 ino
= btrfs_ino(btrfs_inode
);
69 struct btrfs_delayed_node
*node
;
71 node
= READ_ONCE(btrfs_inode
->delayed_node
);
73 refcount_inc(&node
->refs
);
77 spin_lock(&root
->inode_lock
);
78 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
81 if (btrfs_inode
->delayed_node
) {
82 refcount_inc(&node
->refs
); /* can be accessed */
83 BUG_ON(btrfs_inode
->delayed_node
!= node
);
84 spin_unlock(&root
->inode_lock
);
89 * It's possible that we're racing into the middle of removing
90 * this node from the radix tree. In this case, the refcount
91 * was zero and it should never go back to one. Just return
92 * NULL like it was never in the radix at all; our release
93 * function is in the process of removing it.
95 * Some implementations of refcount_inc refuse to bump the
96 * refcount once it has hit zero. If we don't do this dance
97 * here, refcount_inc() may decide to just WARN_ONCE() instead
98 * of actually bumping the refcount.
100 * If this node is properly in the radix, we want to bump the
101 * refcount twice, once for the inode and once for this get
104 if (refcount_inc_not_zero(&node
->refs
)) {
105 refcount_inc(&node
->refs
);
106 btrfs_inode
->delayed_node
= node
;
111 spin_unlock(&root
->inode_lock
);
114 spin_unlock(&root
->inode_lock
);
119 /* Will return either the node or PTR_ERR(-ENOMEM) */
120 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
121 struct btrfs_inode
*btrfs_inode
)
123 struct btrfs_delayed_node
*node
;
124 struct btrfs_root
*root
= btrfs_inode
->root
;
125 u64 ino
= btrfs_ino(btrfs_inode
);
129 node
= btrfs_get_delayed_node(btrfs_inode
);
133 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
135 return ERR_PTR(-ENOMEM
);
136 btrfs_init_delayed_node(node
, root
, ino
);
138 /* cached in the btrfs inode and can be accessed */
139 refcount_set(&node
->refs
, 2);
141 ret
= radix_tree_preload(GFP_NOFS
);
143 kmem_cache_free(delayed_node_cache
, node
);
147 spin_lock(&root
->inode_lock
);
148 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
149 if (ret
== -EEXIST
) {
150 spin_unlock(&root
->inode_lock
);
151 kmem_cache_free(delayed_node_cache
, node
);
152 radix_tree_preload_end();
155 btrfs_inode
->delayed_node
= node
;
156 spin_unlock(&root
->inode_lock
);
157 radix_tree_preload_end();
163 * Call it when holding delayed_node->mutex
165 * If mod = 1, add this node into the prepared list.
167 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
168 struct btrfs_delayed_node
*node
,
171 spin_lock(&root
->lock
);
172 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
173 if (!list_empty(&node
->p_list
))
174 list_move_tail(&node
->p_list
, &root
->prepare_list
);
176 list_add_tail(&node
->p_list
, &root
->prepare_list
);
178 list_add_tail(&node
->n_list
, &root
->node_list
);
179 list_add_tail(&node
->p_list
, &root
->prepare_list
);
180 refcount_inc(&node
->refs
); /* inserted into list */
182 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
184 spin_unlock(&root
->lock
);
187 /* Call it when holding delayed_node->mutex */
188 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
189 struct btrfs_delayed_node
*node
)
191 spin_lock(&root
->lock
);
192 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
194 refcount_dec(&node
->refs
); /* not in the list */
195 list_del_init(&node
->n_list
);
196 if (!list_empty(&node
->p_list
))
197 list_del_init(&node
->p_list
);
198 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
200 spin_unlock(&root
->lock
);
203 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
204 struct btrfs_delayed_root
*delayed_root
)
207 struct btrfs_delayed_node
*node
= NULL
;
209 spin_lock(&delayed_root
->lock
);
210 if (list_empty(&delayed_root
->node_list
))
213 p
= delayed_root
->node_list
.next
;
214 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
215 refcount_inc(&node
->refs
);
217 spin_unlock(&delayed_root
->lock
);
222 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
223 struct btrfs_delayed_node
*node
)
225 struct btrfs_delayed_root
*delayed_root
;
227 struct btrfs_delayed_node
*next
= NULL
;
229 delayed_root
= node
->root
->fs_info
->delayed_root
;
230 spin_lock(&delayed_root
->lock
);
231 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
232 /* not in the list */
233 if (list_empty(&delayed_root
->node_list
))
235 p
= delayed_root
->node_list
.next
;
236 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
239 p
= node
->n_list
.next
;
241 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
242 refcount_inc(&next
->refs
);
244 spin_unlock(&delayed_root
->lock
);
249 static void __btrfs_release_delayed_node(
250 struct btrfs_delayed_node
*delayed_node
,
253 struct btrfs_delayed_root
*delayed_root
;
258 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
260 mutex_lock(&delayed_node
->mutex
);
261 if (delayed_node
->count
)
262 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
264 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
265 mutex_unlock(&delayed_node
->mutex
);
267 if (refcount_dec_and_test(&delayed_node
->refs
)) {
268 struct btrfs_root
*root
= delayed_node
->root
;
270 spin_lock(&root
->inode_lock
);
272 * Once our refcount goes to zero, nobody is allowed to bump it
273 * back up. We can delete it now.
275 ASSERT(refcount_read(&delayed_node
->refs
) == 0);
276 radix_tree_delete(&root
->delayed_nodes_tree
,
277 delayed_node
->inode_id
);
278 spin_unlock(&root
->inode_lock
);
279 kmem_cache_free(delayed_node_cache
, delayed_node
);
283 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
285 __btrfs_release_delayed_node(node
, 0);
288 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
289 struct btrfs_delayed_root
*delayed_root
)
292 struct btrfs_delayed_node
*node
= NULL
;
294 spin_lock(&delayed_root
->lock
);
295 if (list_empty(&delayed_root
->prepare_list
))
298 p
= delayed_root
->prepare_list
.next
;
300 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
301 refcount_inc(&node
->refs
);
303 spin_unlock(&delayed_root
->lock
);
308 static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node
*node
)
311 __btrfs_release_delayed_node(node
, 1);
314 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
316 struct btrfs_delayed_item
*item
;
317 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
319 item
->data_len
= data_len
;
320 item
->ins_or_del
= 0;
321 item
->bytes_reserved
= 0;
322 item
->delayed_node
= NULL
;
323 refcount_set(&item
->refs
, 1);
329 * __btrfs_lookup_delayed_item - look up the delayed item by key
330 * @delayed_node: pointer to the delayed node
331 * @key: the key to look up
332 * @prev: used to store the prev item if the right item isn't found
333 * @next: used to store the next item if the right item isn't found
335 * Note: if we don't find the right item, we will return the prev item and
338 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
339 struct rb_root
*root
,
340 struct btrfs_key
*key
,
341 struct btrfs_delayed_item
**prev
,
342 struct btrfs_delayed_item
**next
)
344 struct rb_node
*node
, *prev_node
= NULL
;
345 struct btrfs_delayed_item
*delayed_item
= NULL
;
348 node
= root
->rb_node
;
351 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
354 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
356 node
= node
->rb_right
;
358 node
= node
->rb_left
;
367 *prev
= delayed_item
;
368 else if ((node
= rb_prev(prev_node
)) != NULL
) {
369 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
379 *next
= delayed_item
;
380 else if ((node
= rb_next(prev_node
)) != NULL
) {
381 *next
= rb_entry(node
, struct btrfs_delayed_item
,
389 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
390 struct btrfs_delayed_node
*delayed_node
,
391 struct btrfs_key
*key
)
393 return __btrfs_lookup_delayed_item(&delayed_node
->ins_root
.rb_root
, key
,
397 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
398 struct btrfs_delayed_item
*ins
,
401 struct rb_node
**p
, *node
;
402 struct rb_node
*parent_node
= NULL
;
403 struct rb_root_cached
*root
;
404 struct btrfs_delayed_item
*item
;
406 bool leftmost
= true;
408 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
409 root
= &delayed_node
->ins_root
;
410 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
411 root
= &delayed_node
->del_root
;
414 p
= &root
->rb_root
.rb_node
;
415 node
= &ins
->rb_node
;
419 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
422 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
426 } else if (cmp
> 0) {
433 rb_link_node(node
, parent_node
, p
);
434 rb_insert_color_cached(node
, root
, leftmost
);
435 ins
->delayed_node
= delayed_node
;
436 ins
->ins_or_del
= action
;
438 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
439 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
440 ins
->key
.offset
>= delayed_node
->index_cnt
)
441 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
443 delayed_node
->count
++;
444 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
448 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
449 struct btrfs_delayed_item
*item
)
451 return __btrfs_add_delayed_item(node
, item
,
452 BTRFS_DELAYED_INSERTION_ITEM
);
455 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
456 struct btrfs_delayed_item
*item
)
458 return __btrfs_add_delayed_item(node
, item
,
459 BTRFS_DELAYED_DELETION_ITEM
);
462 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
464 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
466 /* atomic_dec_return implies a barrier */
467 if ((atomic_dec_return(&delayed_root
->items
) <
468 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0))
469 cond_wake_up_nomb(&delayed_root
->wait
);
472 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
474 struct rb_root_cached
*root
;
475 struct btrfs_delayed_root
*delayed_root
;
477 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
479 BUG_ON(!delayed_root
);
480 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
481 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
483 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
484 root
= &delayed_item
->delayed_node
->ins_root
;
486 root
= &delayed_item
->delayed_node
->del_root
;
488 rb_erase_cached(&delayed_item
->rb_node
, root
);
489 delayed_item
->delayed_node
->count
--;
491 finish_one_item(delayed_root
);
494 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
497 __btrfs_remove_delayed_item(item
);
498 if (refcount_dec_and_test(&item
->refs
))
503 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
504 struct btrfs_delayed_node
*delayed_node
)
507 struct btrfs_delayed_item
*item
= NULL
;
509 p
= rb_first_cached(&delayed_node
->ins_root
);
511 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
516 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
517 struct btrfs_delayed_node
*delayed_node
)
520 struct btrfs_delayed_item
*item
= NULL
;
522 p
= rb_first_cached(&delayed_node
->del_root
);
524 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
529 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
530 struct btrfs_delayed_item
*item
)
533 struct btrfs_delayed_item
*next
= NULL
;
535 p
= rb_next(&item
->rb_node
);
537 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
542 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
543 struct btrfs_root
*root
,
544 struct btrfs_delayed_item
*item
)
546 struct btrfs_block_rsv
*src_rsv
;
547 struct btrfs_block_rsv
*dst_rsv
;
548 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
552 if (!trans
->bytes_reserved
)
555 src_rsv
= trans
->block_rsv
;
556 dst_rsv
= &fs_info
->delayed_block_rsv
;
558 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
561 * Here we migrate space rsv from transaction rsv, since have already
562 * reserved space when starting a transaction. So no need to reserve
565 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, true);
567 trace_btrfs_space_reservation(fs_info
, "delayed_item",
570 item
->bytes_reserved
= num_bytes
;
576 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
577 struct btrfs_delayed_item
*item
)
579 struct btrfs_block_rsv
*rsv
;
580 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
582 if (!item
->bytes_reserved
)
585 rsv
= &fs_info
->delayed_block_rsv
;
587 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
588 * to release/reserve qgroup space.
590 trace_btrfs_space_reservation(fs_info
, "delayed_item",
591 item
->key
.objectid
, item
->bytes_reserved
,
593 btrfs_block_rsv_release(fs_info
, rsv
,
594 item
->bytes_reserved
);
597 static int btrfs_delayed_inode_reserve_metadata(
598 struct btrfs_trans_handle
*trans
,
599 struct btrfs_root
*root
,
600 struct btrfs_inode
*inode
,
601 struct btrfs_delayed_node
*node
)
603 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
604 struct btrfs_block_rsv
*src_rsv
;
605 struct btrfs_block_rsv
*dst_rsv
;
609 src_rsv
= trans
->block_rsv
;
610 dst_rsv
= &fs_info
->delayed_block_rsv
;
612 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
615 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
616 * which doesn't reserve space for speed. This is a problem since we
617 * still need to reserve space for this update, so try to reserve the
620 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
621 * we always reserve enough to update the inode item.
623 if (!src_rsv
|| (!trans
->bytes_reserved
&&
624 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
625 ret
= btrfs_qgroup_reserve_meta_prealloc(root
,
626 fs_info
->nodesize
, true);
629 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
630 BTRFS_RESERVE_NO_FLUSH
);
632 * Since we're under a transaction reserve_metadata_bytes could
633 * try to commit the transaction which will make it return
634 * EAGAIN to make us stop the transaction we have, so return
635 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
637 if (ret
== -EAGAIN
) {
639 btrfs_qgroup_free_meta_prealloc(root
, num_bytes
);
642 node
->bytes_reserved
= num_bytes
;
643 trace_btrfs_space_reservation(fs_info
,
648 btrfs_qgroup_free_meta_prealloc(root
, fs_info
->nodesize
);
653 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, true);
655 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
656 btrfs_ino(inode
), num_bytes
, 1);
657 node
->bytes_reserved
= num_bytes
;
663 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info
*fs_info
,
664 struct btrfs_delayed_node
*node
,
667 struct btrfs_block_rsv
*rsv
;
669 if (!node
->bytes_reserved
)
672 rsv
= &fs_info
->delayed_block_rsv
;
673 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
674 node
->inode_id
, node
->bytes_reserved
, 0);
675 btrfs_block_rsv_release(fs_info
, rsv
,
676 node
->bytes_reserved
);
678 btrfs_qgroup_free_meta_prealloc(node
->root
,
679 node
->bytes_reserved
);
681 btrfs_qgroup_convert_reserved_meta(node
->root
,
682 node
->bytes_reserved
);
683 node
->bytes_reserved
= 0;
687 * This helper will insert some continuous items into the same leaf according
688 * to the free space of the leaf.
690 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
691 struct btrfs_path
*path
,
692 struct btrfs_delayed_item
*item
)
694 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
695 struct btrfs_delayed_item
*curr
, *next
;
697 int total_data_size
= 0, total_size
= 0;
698 struct extent_buffer
*leaf
;
700 struct btrfs_key
*keys
;
702 struct list_head head
;
708 BUG_ON(!path
->nodes
[0]);
710 leaf
= path
->nodes
[0];
711 free_space
= btrfs_leaf_free_space(fs_info
, leaf
);
712 INIT_LIST_HEAD(&head
);
718 * count the number of the continuous items that we can insert in batch
720 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
722 total_data_size
+= next
->data_len
;
723 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
724 list_add_tail(&next
->tree_list
, &head
);
728 next
= __btrfs_next_delayed_item(curr
);
732 if (!btrfs_is_continuous_delayed_item(curr
, next
))
742 * we need allocate some memory space, but it might cause the task
743 * to sleep, so we set all locked nodes in the path to blocking locks
746 btrfs_set_path_blocking(path
);
748 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
754 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
760 /* get keys of all the delayed items */
762 list_for_each_entry(next
, &head
, tree_list
) {
764 data_size
[i
] = next
->data_len
;
768 /* insert the keys of the items */
769 setup_items_for_insert(root
, path
, keys
, data_size
,
770 total_data_size
, total_size
, nitems
);
772 /* insert the dir index items */
773 slot
= path
->slots
[0];
774 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
775 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
776 write_extent_buffer(leaf
, &curr
->data
,
777 (unsigned long)data_ptr
,
781 btrfs_delayed_item_release_metadata(root
, curr
);
783 list_del(&curr
->tree_list
);
784 btrfs_release_delayed_item(curr
);
795 * This helper can just do simple insertion that needn't extend item for new
796 * data, such as directory name index insertion, inode insertion.
798 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
799 struct btrfs_root
*root
,
800 struct btrfs_path
*path
,
801 struct btrfs_delayed_item
*delayed_item
)
803 struct extent_buffer
*leaf
;
807 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
808 delayed_item
->data_len
);
809 if (ret
< 0 && ret
!= -EEXIST
)
812 leaf
= path
->nodes
[0];
814 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
816 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
817 delayed_item
->data_len
);
818 btrfs_mark_buffer_dirty(leaf
);
820 btrfs_delayed_item_release_metadata(root
, delayed_item
);
825 * we insert an item first, then if there are some continuous items, we try
826 * to insert those items into the same leaf.
828 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
829 struct btrfs_path
*path
,
830 struct btrfs_root
*root
,
831 struct btrfs_delayed_node
*node
)
833 struct btrfs_delayed_item
*curr
, *prev
;
837 mutex_lock(&node
->mutex
);
838 curr
= __btrfs_first_delayed_insertion_item(node
);
842 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
844 btrfs_release_path(path
);
849 curr
= __btrfs_next_delayed_item(prev
);
850 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
851 /* insert the continuous items into the same leaf */
853 btrfs_batch_insert_items(root
, path
, curr
);
855 btrfs_release_delayed_item(prev
);
856 btrfs_mark_buffer_dirty(path
->nodes
[0]);
858 btrfs_release_path(path
);
859 mutex_unlock(&node
->mutex
);
863 mutex_unlock(&node
->mutex
);
867 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
868 struct btrfs_root
*root
,
869 struct btrfs_path
*path
,
870 struct btrfs_delayed_item
*item
)
872 struct btrfs_delayed_item
*curr
, *next
;
873 struct extent_buffer
*leaf
;
874 struct btrfs_key key
;
875 struct list_head head
;
876 int nitems
, i
, last_item
;
879 BUG_ON(!path
->nodes
[0]);
881 leaf
= path
->nodes
[0];
884 last_item
= btrfs_header_nritems(leaf
) - 1;
886 return -ENOENT
; /* FIXME: Is errno suitable? */
889 INIT_LIST_HEAD(&head
);
890 btrfs_item_key_to_cpu(leaf
, &key
, i
);
893 * count the number of the dir index items that we can delete in batch
895 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
896 list_add_tail(&next
->tree_list
, &head
);
900 next
= __btrfs_next_delayed_item(curr
);
904 if (!btrfs_is_continuous_delayed_item(curr
, next
))
910 btrfs_item_key_to_cpu(leaf
, &key
, i
);
916 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
920 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
921 btrfs_delayed_item_release_metadata(root
, curr
);
922 list_del(&curr
->tree_list
);
923 btrfs_release_delayed_item(curr
);
930 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
931 struct btrfs_path
*path
,
932 struct btrfs_root
*root
,
933 struct btrfs_delayed_node
*node
)
935 struct btrfs_delayed_item
*curr
, *prev
;
939 mutex_lock(&node
->mutex
);
940 curr
= __btrfs_first_delayed_deletion_item(node
);
944 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
949 * can't find the item which the node points to, so this node
950 * is invalid, just drop it.
953 curr
= __btrfs_next_delayed_item(prev
);
954 btrfs_release_delayed_item(prev
);
956 btrfs_release_path(path
);
958 mutex_unlock(&node
->mutex
);
964 btrfs_batch_delete_items(trans
, root
, path
, curr
);
965 btrfs_release_path(path
);
966 mutex_unlock(&node
->mutex
);
970 btrfs_release_path(path
);
971 mutex_unlock(&node
->mutex
);
975 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
977 struct btrfs_delayed_root
*delayed_root
;
980 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
981 BUG_ON(!delayed_node
->root
);
982 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
983 delayed_node
->count
--;
985 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
986 finish_one_item(delayed_root
);
990 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
992 struct btrfs_delayed_root
*delayed_root
;
994 ASSERT(delayed_node
->root
);
995 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
996 delayed_node
->count
--;
998 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
999 finish_one_item(delayed_root
);
1002 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1003 struct btrfs_root
*root
,
1004 struct btrfs_path
*path
,
1005 struct btrfs_delayed_node
*node
)
1007 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1008 struct btrfs_key key
;
1009 struct btrfs_inode_item
*inode_item
;
1010 struct extent_buffer
*leaf
;
1014 key
.objectid
= node
->inode_id
;
1015 key
.type
= BTRFS_INODE_ITEM_KEY
;
1018 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1023 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1025 btrfs_release_path(path
);
1027 } else if (ret
< 0) {
1031 leaf
= path
->nodes
[0];
1032 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1033 struct btrfs_inode_item
);
1034 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1035 sizeof(struct btrfs_inode_item
));
1036 btrfs_mark_buffer_dirty(leaf
);
1038 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1042 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1045 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1046 if (key
.objectid
!= node
->inode_id
)
1049 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1050 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1054 * Delayed iref deletion is for the inode who has only one link,
1055 * so there is only one iref. The case that several irefs are
1056 * in the same item doesn't exist.
1058 btrfs_del_item(trans
, root
, path
);
1060 btrfs_release_delayed_iref(node
);
1062 btrfs_release_path(path
);
1064 btrfs_delayed_inode_release_metadata(fs_info
, node
, (ret
< 0));
1065 btrfs_release_delayed_inode(node
);
1070 btrfs_release_path(path
);
1072 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1074 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1080 leaf
= path
->nodes
[0];
1085 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1086 struct btrfs_root
*root
,
1087 struct btrfs_path
*path
,
1088 struct btrfs_delayed_node
*node
)
1092 mutex_lock(&node
->mutex
);
1093 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1094 mutex_unlock(&node
->mutex
);
1098 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1099 mutex_unlock(&node
->mutex
);
1104 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1105 struct btrfs_path
*path
,
1106 struct btrfs_delayed_node
*node
)
1110 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1114 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1118 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1123 * Called when committing the transaction.
1124 * Returns 0 on success.
1125 * Returns < 0 on error and returns with an aborted transaction with any
1126 * outstanding delayed items cleaned up.
1128 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
, int nr
)
1130 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1131 struct btrfs_delayed_root
*delayed_root
;
1132 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1133 struct btrfs_path
*path
;
1134 struct btrfs_block_rsv
*block_rsv
;
1136 bool count
= (nr
> 0);
1141 path
= btrfs_alloc_path();
1144 path
->leave_spinning
= 1;
1146 block_rsv
= trans
->block_rsv
;
1147 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1149 delayed_root
= fs_info
->delayed_root
;
1151 curr_node
= btrfs_first_delayed_node(delayed_root
);
1152 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1153 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1156 btrfs_release_delayed_node(curr_node
);
1158 btrfs_abort_transaction(trans
, ret
);
1162 prev_node
= curr_node
;
1163 curr_node
= btrfs_next_delayed_node(curr_node
);
1164 btrfs_release_delayed_node(prev_node
);
1168 btrfs_release_delayed_node(curr_node
);
1169 btrfs_free_path(path
);
1170 trans
->block_rsv
= block_rsv
;
1175 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
)
1177 return __btrfs_run_delayed_items(trans
, -1);
1180 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
, int nr
)
1182 return __btrfs_run_delayed_items(trans
, nr
);
1185 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1186 struct btrfs_inode
*inode
)
1188 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1189 struct btrfs_path
*path
;
1190 struct btrfs_block_rsv
*block_rsv
;
1196 mutex_lock(&delayed_node
->mutex
);
1197 if (!delayed_node
->count
) {
1198 mutex_unlock(&delayed_node
->mutex
);
1199 btrfs_release_delayed_node(delayed_node
);
1202 mutex_unlock(&delayed_node
->mutex
);
1204 path
= btrfs_alloc_path();
1206 btrfs_release_delayed_node(delayed_node
);
1209 path
->leave_spinning
= 1;
1211 block_rsv
= trans
->block_rsv
;
1212 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1214 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1216 btrfs_release_delayed_node(delayed_node
);
1217 btrfs_free_path(path
);
1218 trans
->block_rsv
= block_rsv
;
1223 int btrfs_commit_inode_delayed_inode(struct btrfs_inode
*inode
)
1225 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1226 struct btrfs_trans_handle
*trans
;
1227 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1228 struct btrfs_path
*path
;
1229 struct btrfs_block_rsv
*block_rsv
;
1235 mutex_lock(&delayed_node
->mutex
);
1236 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1237 mutex_unlock(&delayed_node
->mutex
);
1238 btrfs_release_delayed_node(delayed_node
);
1241 mutex_unlock(&delayed_node
->mutex
);
1243 trans
= btrfs_join_transaction(delayed_node
->root
);
1244 if (IS_ERR(trans
)) {
1245 ret
= PTR_ERR(trans
);
1249 path
= btrfs_alloc_path();
1254 path
->leave_spinning
= 1;
1256 block_rsv
= trans
->block_rsv
;
1257 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1259 mutex_lock(&delayed_node
->mutex
);
1260 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1261 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1262 path
, delayed_node
);
1265 mutex_unlock(&delayed_node
->mutex
);
1267 btrfs_free_path(path
);
1268 trans
->block_rsv
= block_rsv
;
1270 btrfs_end_transaction(trans
);
1271 btrfs_btree_balance_dirty(fs_info
);
1273 btrfs_release_delayed_node(delayed_node
);
1278 void btrfs_remove_delayed_node(struct btrfs_inode
*inode
)
1280 struct btrfs_delayed_node
*delayed_node
;
1282 delayed_node
= READ_ONCE(inode
->delayed_node
);
1286 inode
->delayed_node
= NULL
;
1287 btrfs_release_delayed_node(delayed_node
);
1290 struct btrfs_async_delayed_work
{
1291 struct btrfs_delayed_root
*delayed_root
;
1293 struct btrfs_work work
;
1296 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1298 struct btrfs_async_delayed_work
*async_work
;
1299 struct btrfs_delayed_root
*delayed_root
;
1300 struct btrfs_trans_handle
*trans
;
1301 struct btrfs_path
*path
;
1302 struct btrfs_delayed_node
*delayed_node
= NULL
;
1303 struct btrfs_root
*root
;
1304 struct btrfs_block_rsv
*block_rsv
;
1307 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1308 delayed_root
= async_work
->delayed_root
;
1310 path
= btrfs_alloc_path();
1315 if (atomic_read(&delayed_root
->items
) <
1316 BTRFS_DELAYED_BACKGROUND
/ 2)
1319 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1323 path
->leave_spinning
= 1;
1324 root
= delayed_node
->root
;
1326 trans
= btrfs_join_transaction(root
);
1327 if (IS_ERR(trans
)) {
1328 btrfs_release_path(path
);
1329 btrfs_release_prepared_delayed_node(delayed_node
);
1334 block_rsv
= trans
->block_rsv
;
1335 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1337 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1339 trans
->block_rsv
= block_rsv
;
1340 btrfs_end_transaction(trans
);
1341 btrfs_btree_balance_dirty_nodelay(root
->fs_info
);
1343 btrfs_release_path(path
);
1344 btrfs_release_prepared_delayed_node(delayed_node
);
1347 } while ((async_work
->nr
== 0 && total_done
< BTRFS_DELAYED_WRITEBACK
)
1348 || total_done
< async_work
->nr
);
1350 btrfs_free_path(path
);
1352 wake_up(&delayed_root
->wait
);
1357 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1358 struct btrfs_fs_info
*fs_info
, int nr
)
1360 struct btrfs_async_delayed_work
*async_work
;
1362 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1366 async_work
->delayed_root
= delayed_root
;
1367 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1368 btrfs_async_run_delayed_root
, NULL
, NULL
);
1369 async_work
->nr
= nr
;
1371 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1375 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info
*fs_info
)
1377 WARN_ON(btrfs_first_delayed_node(fs_info
->delayed_root
));
1380 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1382 int val
= atomic_read(&delayed_root
->items_seq
);
1384 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1387 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1393 void btrfs_balance_delayed_items(struct btrfs_fs_info
*fs_info
)
1395 struct btrfs_delayed_root
*delayed_root
= fs_info
->delayed_root
;
1397 if ((atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
) ||
1398 btrfs_workqueue_normal_congested(fs_info
->delayed_workers
))
1401 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1405 seq
= atomic_read(&delayed_root
->items_seq
);
1407 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1411 wait_event_interruptible(delayed_root
->wait
,
1412 could_end_wait(delayed_root
, seq
));
1416 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1419 /* Will return 0 or -ENOMEM */
1420 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1421 const char *name
, int name_len
,
1422 struct btrfs_inode
*dir
,
1423 struct btrfs_disk_key
*disk_key
, u8 type
,
1426 struct btrfs_delayed_node
*delayed_node
;
1427 struct btrfs_delayed_item
*delayed_item
;
1428 struct btrfs_dir_item
*dir_item
;
1431 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1432 if (IS_ERR(delayed_node
))
1433 return PTR_ERR(delayed_node
);
1435 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1436 if (!delayed_item
) {
1441 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1442 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1443 delayed_item
->key
.offset
= index
;
1445 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1446 dir_item
->location
= *disk_key
;
1447 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1448 btrfs_set_stack_dir_data_len(dir_item
, 0);
1449 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1450 btrfs_set_stack_dir_type(dir_item
, type
);
1451 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1453 ret
= btrfs_delayed_item_reserve_metadata(trans
, dir
->root
, delayed_item
);
1455 * we have reserved enough space when we start a new transaction,
1456 * so reserving metadata failure is impossible
1460 mutex_lock(&delayed_node
->mutex
);
1461 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1462 if (unlikely(ret
)) {
1463 btrfs_err(trans
->fs_info
,
1464 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1465 name_len
, name
, delayed_node
->root
->root_key
.objectid
,
1466 delayed_node
->inode_id
, ret
);
1469 mutex_unlock(&delayed_node
->mutex
);
1472 btrfs_release_delayed_node(delayed_node
);
1476 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info
*fs_info
,
1477 struct btrfs_delayed_node
*node
,
1478 struct btrfs_key
*key
)
1480 struct btrfs_delayed_item
*item
;
1482 mutex_lock(&node
->mutex
);
1483 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1485 mutex_unlock(&node
->mutex
);
1489 btrfs_delayed_item_release_metadata(node
->root
, item
);
1490 btrfs_release_delayed_item(item
);
1491 mutex_unlock(&node
->mutex
);
1495 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1496 struct btrfs_inode
*dir
, u64 index
)
1498 struct btrfs_delayed_node
*node
;
1499 struct btrfs_delayed_item
*item
;
1500 struct btrfs_key item_key
;
1503 node
= btrfs_get_or_create_delayed_node(dir
);
1505 return PTR_ERR(node
);
1507 item_key
.objectid
= btrfs_ino(dir
);
1508 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1509 item_key
.offset
= index
;
1511 ret
= btrfs_delete_delayed_insertion_item(trans
->fs_info
, node
,
1516 item
= btrfs_alloc_delayed_item(0);
1522 item
->key
= item_key
;
1524 ret
= btrfs_delayed_item_reserve_metadata(trans
, dir
->root
, item
);
1526 * we have reserved enough space when we start a new transaction,
1527 * so reserving metadata failure is impossible.
1531 mutex_lock(&node
->mutex
);
1532 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1533 if (unlikely(ret
)) {
1534 btrfs_err(trans
->fs_info
,
1535 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1536 index
, node
->root
->root_key
.objectid
,
1537 node
->inode_id
, ret
);
1540 mutex_unlock(&node
->mutex
);
1542 btrfs_release_delayed_node(node
);
1546 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode
*inode
)
1548 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1554 * Since we have held i_mutex of this directory, it is impossible that
1555 * a new directory index is added into the delayed node and index_cnt
1556 * is updated now. So we needn't lock the delayed node.
1558 if (!delayed_node
->index_cnt
) {
1559 btrfs_release_delayed_node(delayed_node
);
1563 inode
->index_cnt
= delayed_node
->index_cnt
;
1564 btrfs_release_delayed_node(delayed_node
);
1568 bool btrfs_readdir_get_delayed_items(struct inode
*inode
,
1569 struct list_head
*ins_list
,
1570 struct list_head
*del_list
)
1572 struct btrfs_delayed_node
*delayed_node
;
1573 struct btrfs_delayed_item
*item
;
1575 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1580 * We can only do one readdir with delayed items at a time because of
1581 * item->readdir_list.
1583 inode_unlock_shared(inode
);
1586 mutex_lock(&delayed_node
->mutex
);
1587 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1589 refcount_inc(&item
->refs
);
1590 list_add_tail(&item
->readdir_list
, ins_list
);
1591 item
= __btrfs_next_delayed_item(item
);
1594 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1596 refcount_inc(&item
->refs
);
1597 list_add_tail(&item
->readdir_list
, del_list
);
1598 item
= __btrfs_next_delayed_item(item
);
1600 mutex_unlock(&delayed_node
->mutex
);
1602 * This delayed node is still cached in the btrfs inode, so refs
1603 * must be > 1 now, and we needn't check it is going to be freed
1606 * Besides that, this function is used to read dir, we do not
1607 * insert/delete delayed items in this period. So we also needn't
1608 * requeue or dequeue this delayed node.
1610 refcount_dec(&delayed_node
->refs
);
1615 void btrfs_readdir_put_delayed_items(struct inode
*inode
,
1616 struct list_head
*ins_list
,
1617 struct list_head
*del_list
)
1619 struct btrfs_delayed_item
*curr
, *next
;
1621 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1622 list_del(&curr
->readdir_list
);
1623 if (refcount_dec_and_test(&curr
->refs
))
1627 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1628 list_del(&curr
->readdir_list
);
1629 if (refcount_dec_and_test(&curr
->refs
))
1634 * The VFS is going to do up_read(), so we need to downgrade back to a
1637 downgrade_write(&inode
->i_rwsem
);
1640 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1643 struct btrfs_delayed_item
*curr
;
1646 list_for_each_entry(curr
, del_list
, readdir_list
) {
1647 if (curr
->key
.offset
> index
)
1649 if (curr
->key
.offset
== index
) {
1658 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1661 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1662 struct list_head
*ins_list
)
1664 struct btrfs_dir_item
*di
;
1665 struct btrfs_delayed_item
*curr
, *next
;
1666 struct btrfs_key location
;
1670 unsigned char d_type
;
1672 if (list_empty(ins_list
))
1676 * Changing the data of the delayed item is impossible. So
1677 * we needn't lock them. And we have held i_mutex of the
1678 * directory, nobody can delete any directory indexes now.
1680 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1681 list_del(&curr
->readdir_list
);
1683 if (curr
->key
.offset
< ctx
->pos
) {
1684 if (refcount_dec_and_test(&curr
->refs
))
1689 ctx
->pos
= curr
->key
.offset
;
1691 di
= (struct btrfs_dir_item
*)curr
->data
;
1692 name
= (char *)(di
+ 1);
1693 name_len
= btrfs_stack_dir_name_len(di
);
1695 d_type
= btrfs_filetype_table
[di
->type
];
1696 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1698 over
= !dir_emit(ctx
, name
, name_len
,
1699 location
.objectid
, d_type
);
1701 if (refcount_dec_and_test(&curr
->refs
))
1711 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1712 struct btrfs_inode_item
*inode_item
,
1713 struct inode
*inode
)
1715 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1716 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1717 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1718 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1719 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1720 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1721 btrfs_set_stack_inode_generation(inode_item
,
1722 BTRFS_I(inode
)->generation
);
1723 btrfs_set_stack_inode_sequence(inode_item
,
1724 inode_peek_iversion(inode
));
1725 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1726 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1727 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1728 btrfs_set_stack_inode_block_group(inode_item
, 0);
1730 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1731 inode
->i_atime
.tv_sec
);
1732 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1733 inode
->i_atime
.tv_nsec
);
1735 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1736 inode
->i_mtime
.tv_sec
);
1737 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1738 inode
->i_mtime
.tv_nsec
);
1740 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1741 inode
->i_ctime
.tv_sec
);
1742 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1743 inode
->i_ctime
.tv_nsec
);
1745 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1746 BTRFS_I(inode
)->i_otime
.tv_sec
);
1747 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1748 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1751 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1753 struct btrfs_delayed_node
*delayed_node
;
1754 struct btrfs_inode_item
*inode_item
;
1756 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1760 mutex_lock(&delayed_node
->mutex
);
1761 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1762 mutex_unlock(&delayed_node
->mutex
);
1763 btrfs_release_delayed_node(delayed_node
);
1767 inode_item
= &delayed_node
->inode_item
;
1769 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1770 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1771 btrfs_i_size_write(BTRFS_I(inode
), btrfs_stack_inode_size(inode_item
));
1772 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1773 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1774 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1775 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1776 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1778 inode_set_iversion_queried(inode
,
1779 btrfs_stack_inode_sequence(inode_item
));
1781 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1782 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1784 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1785 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1787 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1788 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1790 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1791 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1793 BTRFS_I(inode
)->i_otime
.tv_sec
=
1794 btrfs_stack_timespec_sec(&inode_item
->otime
);
1795 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1796 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1798 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1799 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1801 mutex_unlock(&delayed_node
->mutex
);
1802 btrfs_release_delayed_node(delayed_node
);
1806 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1807 struct btrfs_root
*root
, struct inode
*inode
)
1809 struct btrfs_delayed_node
*delayed_node
;
1812 delayed_node
= btrfs_get_or_create_delayed_node(BTRFS_I(inode
));
1813 if (IS_ERR(delayed_node
))
1814 return PTR_ERR(delayed_node
);
1816 mutex_lock(&delayed_node
->mutex
);
1817 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1818 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1822 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, BTRFS_I(inode
),
1827 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1828 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1829 delayed_node
->count
++;
1830 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1832 mutex_unlock(&delayed_node
->mutex
);
1833 btrfs_release_delayed_node(delayed_node
);
1837 int btrfs_delayed_delete_inode_ref(struct btrfs_inode
*inode
)
1839 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1840 struct btrfs_delayed_node
*delayed_node
;
1843 * we don't do delayed inode updates during log recovery because it
1844 * leads to enospc problems. This means we also can't do
1845 * delayed inode refs
1847 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
1850 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1851 if (IS_ERR(delayed_node
))
1852 return PTR_ERR(delayed_node
);
1855 * We don't reserve space for inode ref deletion is because:
1856 * - We ONLY do async inode ref deletion for the inode who has only
1857 * one link(i_nlink == 1), it means there is only one inode ref.
1858 * And in most case, the inode ref and the inode item are in the
1859 * same leaf, and we will deal with them at the same time.
1860 * Since we are sure we will reserve the space for the inode item,
1861 * it is unnecessary to reserve space for inode ref deletion.
1862 * - If the inode ref and the inode item are not in the same leaf,
1863 * We also needn't worry about enospc problem, because we reserve
1864 * much more space for the inode update than it needs.
1865 * - At the worst, we can steal some space from the global reservation.
1868 mutex_lock(&delayed_node
->mutex
);
1869 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1872 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1873 delayed_node
->count
++;
1874 atomic_inc(&fs_info
->delayed_root
->items
);
1876 mutex_unlock(&delayed_node
->mutex
);
1877 btrfs_release_delayed_node(delayed_node
);
1881 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1883 struct btrfs_root
*root
= delayed_node
->root
;
1884 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1885 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1887 mutex_lock(&delayed_node
->mutex
);
1888 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1890 btrfs_delayed_item_release_metadata(root
, curr_item
);
1891 prev_item
= curr_item
;
1892 curr_item
= __btrfs_next_delayed_item(prev_item
);
1893 btrfs_release_delayed_item(prev_item
);
1896 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1898 btrfs_delayed_item_release_metadata(root
, curr_item
);
1899 prev_item
= curr_item
;
1900 curr_item
= __btrfs_next_delayed_item(prev_item
);
1901 btrfs_release_delayed_item(prev_item
);
1904 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1905 btrfs_release_delayed_iref(delayed_node
);
1907 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1908 btrfs_delayed_inode_release_metadata(fs_info
, delayed_node
, false);
1909 btrfs_release_delayed_inode(delayed_node
);
1911 mutex_unlock(&delayed_node
->mutex
);
1914 void btrfs_kill_delayed_inode_items(struct btrfs_inode
*inode
)
1916 struct btrfs_delayed_node
*delayed_node
;
1918 delayed_node
= btrfs_get_delayed_node(inode
);
1922 __btrfs_kill_delayed_node(delayed_node
);
1923 btrfs_release_delayed_node(delayed_node
);
1926 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1929 struct btrfs_delayed_node
*delayed_nodes
[8];
1933 spin_lock(&root
->inode_lock
);
1934 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1935 (void **)delayed_nodes
, inode_id
,
1936 ARRAY_SIZE(delayed_nodes
));
1938 spin_unlock(&root
->inode_lock
);
1942 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1944 for (i
= 0; i
< n
; i
++)
1945 refcount_inc(&delayed_nodes
[i
]->refs
);
1946 spin_unlock(&root
->inode_lock
);
1948 for (i
= 0; i
< n
; i
++) {
1949 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1950 btrfs_release_delayed_node(delayed_nodes
[i
]);
1955 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info
*fs_info
)
1957 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1959 curr_node
= btrfs_first_delayed_node(fs_info
->delayed_root
);
1961 __btrfs_kill_delayed_node(curr_node
);
1963 prev_node
= curr_node
;
1964 curr_node
= btrfs_next_delayed_node(curr_node
);
1965 btrfs_release_delayed_node(prev_node
);