2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache
*delayed_node_cache
;
32 int __init
btrfs_delayed_inode_init(void)
34 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node
),
39 if (!delayed_node_cache
)
44 void btrfs_delayed_inode_exit(void)
46 kmem_cache_destroy(delayed_node_cache
);
49 static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node
*delayed_node
,
51 struct btrfs_root
*root
, u64 inode_id
)
53 delayed_node
->root
= root
;
54 delayed_node
->inode_id
= inode_id
;
55 atomic_set(&delayed_node
->refs
, 0);
56 delayed_node
->ins_root
= RB_ROOT
;
57 delayed_node
->del_root
= RB_ROOT
;
58 mutex_init(&delayed_node
->mutex
);
59 INIT_LIST_HEAD(&delayed_node
->n_list
);
60 INIT_LIST_HEAD(&delayed_node
->p_list
);
63 static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item
*item1
,
65 struct btrfs_delayed_item
*item2
)
67 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
68 item1
->key
.objectid
== item2
->key
.objectid
&&
69 item1
->key
.type
== item2
->key
.type
&&
70 item1
->key
.offset
+ 1 == item2
->key
.offset
)
75 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
76 struct btrfs_root
*root
)
78 return root
->fs_info
->delayed_root
;
81 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct inode
*inode
)
83 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
84 struct btrfs_root
*root
= btrfs_inode
->root
;
85 u64 ino
= btrfs_ino(inode
);
86 struct btrfs_delayed_node
*node
;
88 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
90 atomic_inc(&node
->refs
);
94 spin_lock(&root
->inode_lock
);
95 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
97 if (btrfs_inode
->delayed_node
) {
98 atomic_inc(&node
->refs
); /* can be accessed */
99 BUG_ON(btrfs_inode
->delayed_node
!= node
);
100 spin_unlock(&root
->inode_lock
);
103 btrfs_inode
->delayed_node
= node
;
104 /* can be accessed and cached in the inode */
105 atomic_add(2, &node
->refs
);
106 spin_unlock(&root
->inode_lock
);
109 spin_unlock(&root
->inode_lock
);
114 /* Will return either the node or PTR_ERR(-ENOMEM) */
115 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
118 struct btrfs_delayed_node
*node
;
119 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
120 struct btrfs_root
*root
= btrfs_inode
->root
;
121 u64 ino
= btrfs_ino(inode
);
125 node
= btrfs_get_delayed_node(inode
);
129 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
131 return ERR_PTR(-ENOMEM
);
132 btrfs_init_delayed_node(node
, root
, ino
);
134 /* cached in the btrfs inode and can be accessed */
135 atomic_add(2, &node
->refs
);
137 ret
= radix_tree_preload(GFP_NOFS
);
139 kmem_cache_free(delayed_node_cache
, node
);
143 spin_lock(&root
->inode_lock
);
144 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
145 if (ret
== -EEXIST
) {
146 spin_unlock(&root
->inode_lock
);
147 kmem_cache_free(delayed_node_cache
, node
);
148 radix_tree_preload_end();
151 btrfs_inode
->delayed_node
= node
;
152 spin_unlock(&root
->inode_lock
);
153 radix_tree_preload_end();
159 * Call it when holding delayed_node->mutex
161 * If mod = 1, add this node into the prepared list.
163 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
164 struct btrfs_delayed_node
*node
,
167 spin_lock(&root
->lock
);
168 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
169 if (!list_empty(&node
->p_list
))
170 list_move_tail(&node
->p_list
, &root
->prepare_list
);
172 list_add_tail(&node
->p_list
, &root
->prepare_list
);
174 list_add_tail(&node
->n_list
, &root
->node_list
);
175 list_add_tail(&node
->p_list
, &root
->prepare_list
);
176 atomic_inc(&node
->refs
); /* inserted into list */
178 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
180 spin_unlock(&root
->lock
);
183 /* Call it when holding delayed_node->mutex */
184 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
185 struct btrfs_delayed_node
*node
)
187 spin_lock(&root
->lock
);
188 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
190 atomic_dec(&node
->refs
); /* not in the list */
191 list_del_init(&node
->n_list
);
192 if (!list_empty(&node
->p_list
))
193 list_del_init(&node
->p_list
);
194 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
196 spin_unlock(&root
->lock
);
199 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
200 struct btrfs_delayed_root
*delayed_root
)
203 struct btrfs_delayed_node
*node
= NULL
;
205 spin_lock(&delayed_root
->lock
);
206 if (list_empty(&delayed_root
->node_list
))
209 p
= delayed_root
->node_list
.next
;
210 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
211 atomic_inc(&node
->refs
);
213 spin_unlock(&delayed_root
->lock
);
218 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
219 struct btrfs_delayed_node
*node
)
221 struct btrfs_delayed_root
*delayed_root
;
223 struct btrfs_delayed_node
*next
= NULL
;
225 delayed_root
= node
->root
->fs_info
->delayed_root
;
226 spin_lock(&delayed_root
->lock
);
227 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
228 /* not in the list */
229 if (list_empty(&delayed_root
->node_list
))
231 p
= delayed_root
->node_list
.next
;
232 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
235 p
= node
->n_list
.next
;
237 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
238 atomic_inc(&next
->refs
);
240 spin_unlock(&delayed_root
->lock
);
245 static void __btrfs_release_delayed_node(
246 struct btrfs_delayed_node
*delayed_node
,
249 struct btrfs_delayed_root
*delayed_root
;
254 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
256 mutex_lock(&delayed_node
->mutex
);
257 if (delayed_node
->count
)
258 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
260 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
261 mutex_unlock(&delayed_node
->mutex
);
263 if (atomic_dec_and_test(&delayed_node
->refs
)) {
265 struct btrfs_root
*root
= delayed_node
->root
;
266 spin_lock(&root
->inode_lock
);
267 if (atomic_read(&delayed_node
->refs
) == 0) {
268 radix_tree_delete(&root
->delayed_nodes_tree
,
269 delayed_node
->inode_id
);
272 spin_unlock(&root
->inode_lock
);
274 kmem_cache_free(delayed_node_cache
, delayed_node
);
278 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
280 __btrfs_release_delayed_node(node
, 0);
283 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
284 struct btrfs_delayed_root
*delayed_root
)
287 struct btrfs_delayed_node
*node
= NULL
;
289 spin_lock(&delayed_root
->lock
);
290 if (list_empty(&delayed_root
->prepare_list
))
293 p
= delayed_root
->prepare_list
.next
;
295 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
296 atomic_inc(&node
->refs
);
298 spin_unlock(&delayed_root
->lock
);
303 static inline void btrfs_release_prepared_delayed_node(
304 struct btrfs_delayed_node
*node
)
306 __btrfs_release_delayed_node(node
, 1);
309 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
311 struct btrfs_delayed_item
*item
;
312 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
314 item
->data_len
= data_len
;
315 item
->ins_or_del
= 0;
316 item
->bytes_reserved
= 0;
317 item
->delayed_node
= NULL
;
318 atomic_set(&item
->refs
, 1);
324 * __btrfs_lookup_delayed_item - look up the delayed item by key
325 * @delayed_node: pointer to the delayed node
326 * @key: the key to look up
327 * @prev: used to store the prev item if the right item isn't found
328 * @next: used to store the next item if the right item isn't found
330 * Note: if we don't find the right item, we will return the prev item and
333 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
334 struct rb_root
*root
,
335 struct btrfs_key
*key
,
336 struct btrfs_delayed_item
**prev
,
337 struct btrfs_delayed_item
**next
)
339 struct rb_node
*node
, *prev_node
= NULL
;
340 struct btrfs_delayed_item
*delayed_item
= NULL
;
343 node
= root
->rb_node
;
346 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
349 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
351 node
= node
->rb_right
;
353 node
= node
->rb_left
;
362 *prev
= delayed_item
;
363 else if ((node
= rb_prev(prev_node
)) != NULL
) {
364 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
374 *next
= delayed_item
;
375 else if ((node
= rb_next(prev_node
)) != NULL
) {
376 *next
= rb_entry(node
, struct btrfs_delayed_item
,
384 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
385 struct btrfs_delayed_node
*delayed_node
,
386 struct btrfs_key
*key
)
388 return __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
392 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
393 struct btrfs_delayed_item
*ins
,
396 struct rb_node
**p
, *node
;
397 struct rb_node
*parent_node
= NULL
;
398 struct rb_root
*root
;
399 struct btrfs_delayed_item
*item
;
402 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
403 root
= &delayed_node
->ins_root
;
404 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
405 root
= &delayed_node
->del_root
;
409 node
= &ins
->rb_node
;
413 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
416 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
425 rb_link_node(node
, parent_node
, p
);
426 rb_insert_color(node
, root
);
427 ins
->delayed_node
= delayed_node
;
428 ins
->ins_or_del
= action
;
430 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
431 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
432 ins
->key
.offset
>= delayed_node
->index_cnt
)
433 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
435 delayed_node
->count
++;
436 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
440 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
441 struct btrfs_delayed_item
*item
)
443 return __btrfs_add_delayed_item(node
, item
,
444 BTRFS_DELAYED_INSERTION_ITEM
);
447 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
448 struct btrfs_delayed_item
*item
)
450 return __btrfs_add_delayed_item(node
, item
,
451 BTRFS_DELAYED_DELETION_ITEM
);
454 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
456 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
459 * atomic_dec_return implies a barrier for waitqueue_active
461 if ((atomic_dec_return(&delayed_root
->items
) <
462 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
463 waitqueue_active(&delayed_root
->wait
))
464 wake_up(&delayed_root
->wait
);
467 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
469 struct rb_root
*root
;
470 struct btrfs_delayed_root
*delayed_root
;
472 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
474 BUG_ON(!delayed_root
);
475 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
476 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
478 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
479 root
= &delayed_item
->delayed_node
->ins_root
;
481 root
= &delayed_item
->delayed_node
->del_root
;
483 rb_erase(&delayed_item
->rb_node
, root
);
484 delayed_item
->delayed_node
->count
--;
486 finish_one_item(delayed_root
);
489 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
492 __btrfs_remove_delayed_item(item
);
493 if (atomic_dec_and_test(&item
->refs
))
498 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
499 struct btrfs_delayed_node
*delayed_node
)
502 struct btrfs_delayed_item
*item
= NULL
;
504 p
= rb_first(&delayed_node
->ins_root
);
506 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
511 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
512 struct btrfs_delayed_node
*delayed_node
)
515 struct btrfs_delayed_item
*item
= NULL
;
517 p
= rb_first(&delayed_node
->del_root
);
519 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
524 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
525 struct btrfs_delayed_item
*item
)
528 struct btrfs_delayed_item
*next
= NULL
;
530 p
= rb_next(&item
->rb_node
);
532 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
537 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
538 struct btrfs_root
*root
,
539 struct btrfs_delayed_item
*item
)
541 struct btrfs_block_rsv
*src_rsv
;
542 struct btrfs_block_rsv
*dst_rsv
;
546 if (!trans
->bytes_reserved
)
549 src_rsv
= trans
->block_rsv
;
550 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
552 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
553 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
555 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
558 item
->bytes_reserved
= num_bytes
;
564 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
565 struct btrfs_delayed_item
*item
)
567 struct btrfs_block_rsv
*rsv
;
569 if (!item
->bytes_reserved
)
572 rsv
= &root
->fs_info
->delayed_block_rsv
;
573 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
574 item
->key
.objectid
, item
->bytes_reserved
,
576 btrfs_block_rsv_release(root
, rsv
,
577 item
->bytes_reserved
);
580 static int btrfs_delayed_inode_reserve_metadata(
581 struct btrfs_trans_handle
*trans
,
582 struct btrfs_root
*root
,
584 struct btrfs_delayed_node
*node
)
586 struct btrfs_block_rsv
*src_rsv
;
587 struct btrfs_block_rsv
*dst_rsv
;
590 bool release
= false;
592 src_rsv
= trans
->block_rsv
;
593 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
595 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
598 * If our block_rsv is the delalloc block reserve then check and see if
599 * we have our extra reservation for updating the inode. If not fall
600 * through and try to reserve space quickly.
602 * We used to try and steal from the delalloc block rsv or the global
603 * reserve, but we'd steal a full reservation, which isn't kind. We are
604 * here through delalloc which means we've likely just cowed down close
605 * to the leaf that contains the inode, so we would steal less just
606 * doing the fallback inode update, so if we do end up having to steal
607 * from the global block rsv we hopefully only steal one or two blocks
608 * worth which is less likely to hurt us.
610 if (src_rsv
&& src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
611 spin_lock(&BTRFS_I(inode
)->lock
);
612 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
613 &BTRFS_I(inode
)->runtime_flags
))
617 spin_unlock(&BTRFS_I(inode
)->lock
);
621 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
622 * which doesn't reserve space for speed. This is a problem since we
623 * still need to reserve space for this update, so try to reserve the
626 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
627 * we're accounted for.
629 if (!src_rsv
|| (!trans
->bytes_reserved
&&
630 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
631 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
632 BTRFS_RESERVE_NO_FLUSH
);
634 * Since we're under a transaction reserve_metadata_bytes could
635 * try to commit the transaction which will make it return
636 * EAGAIN to make us stop the transaction we have, so return
637 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
642 node
->bytes_reserved
= num_bytes
;
643 trace_btrfs_space_reservation(root
->fs_info
,
651 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
654 * Migrate only takes a reservation, it doesn't touch the size of the
655 * block_rsv. This is to simplify people who don't normally have things
656 * migrated from their block rsv. If they go to release their
657 * reservation, that will decrease the size as well, so if migrate
658 * reduced size we'd end up with a negative size. But for the
659 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
660 * but we could in fact do this reserve/migrate dance several times
661 * between the time we did the original reservation and we'd clean it
662 * up. So to take care of this, release the space for the meta
663 * reservation here. I think it may be time for a documentation page on
664 * how block rsvs. work.
667 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
668 btrfs_ino(inode
), num_bytes
, 1);
669 node
->bytes_reserved
= num_bytes
;
673 trace_btrfs_space_reservation(root
->fs_info
, "delalloc",
674 btrfs_ino(inode
), num_bytes
, 0);
675 btrfs_block_rsv_release(root
, src_rsv
, num_bytes
);
681 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
682 struct btrfs_delayed_node
*node
)
684 struct btrfs_block_rsv
*rsv
;
686 if (!node
->bytes_reserved
)
689 rsv
= &root
->fs_info
->delayed_block_rsv
;
690 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
691 node
->inode_id
, node
->bytes_reserved
, 0);
692 btrfs_block_rsv_release(root
, rsv
,
693 node
->bytes_reserved
);
694 node
->bytes_reserved
= 0;
698 * This helper will insert some continuous items into the same leaf according
699 * to the free space of the leaf.
701 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
702 struct btrfs_path
*path
,
703 struct btrfs_delayed_item
*item
)
705 struct btrfs_delayed_item
*curr
, *next
;
707 int total_data_size
= 0, total_size
= 0;
708 struct extent_buffer
*leaf
;
710 struct btrfs_key
*keys
;
712 struct list_head head
;
718 BUG_ON(!path
->nodes
[0]);
720 leaf
= path
->nodes
[0];
721 free_space
= btrfs_leaf_free_space(root
, leaf
);
722 INIT_LIST_HEAD(&head
);
728 * count the number of the continuous items that we can insert in batch
730 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
732 total_data_size
+= next
->data_len
;
733 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
734 list_add_tail(&next
->tree_list
, &head
);
738 next
= __btrfs_next_delayed_item(curr
);
742 if (!btrfs_is_continuous_delayed_item(curr
, next
))
752 * we need allocate some memory space, but it might cause the task
753 * to sleep, so we set all locked nodes in the path to blocking locks
756 btrfs_set_path_blocking(path
);
758 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
764 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
770 /* get keys of all the delayed items */
772 list_for_each_entry(next
, &head
, tree_list
) {
774 data_size
[i
] = next
->data_len
;
778 /* reset all the locked nodes in the patch to spinning locks. */
779 btrfs_clear_path_blocking(path
, NULL
, 0);
781 /* insert the keys of the items */
782 setup_items_for_insert(root
, path
, keys
, data_size
,
783 total_data_size
, total_size
, nitems
);
785 /* insert the dir index items */
786 slot
= path
->slots
[0];
787 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
788 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
789 write_extent_buffer(leaf
, &curr
->data
,
790 (unsigned long)data_ptr
,
794 btrfs_delayed_item_release_metadata(root
, curr
);
796 list_del(&curr
->tree_list
);
797 btrfs_release_delayed_item(curr
);
808 * This helper can just do simple insertion that needn't extend item for new
809 * data, such as directory name index insertion, inode insertion.
811 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
812 struct btrfs_root
*root
,
813 struct btrfs_path
*path
,
814 struct btrfs_delayed_item
*delayed_item
)
816 struct extent_buffer
*leaf
;
820 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
821 delayed_item
->data_len
);
822 if (ret
< 0 && ret
!= -EEXIST
)
825 leaf
= path
->nodes
[0];
827 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
829 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
830 delayed_item
->data_len
);
831 btrfs_mark_buffer_dirty(leaf
);
833 btrfs_delayed_item_release_metadata(root
, delayed_item
);
838 * we insert an item first, then if there are some continuous items, we try
839 * to insert those items into the same leaf.
841 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
842 struct btrfs_path
*path
,
843 struct btrfs_root
*root
,
844 struct btrfs_delayed_node
*node
)
846 struct btrfs_delayed_item
*curr
, *prev
;
850 mutex_lock(&node
->mutex
);
851 curr
= __btrfs_first_delayed_insertion_item(node
);
855 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
857 btrfs_release_path(path
);
862 curr
= __btrfs_next_delayed_item(prev
);
863 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
864 /* insert the continuous items into the same leaf */
866 btrfs_batch_insert_items(root
, path
, curr
);
868 btrfs_release_delayed_item(prev
);
869 btrfs_mark_buffer_dirty(path
->nodes
[0]);
871 btrfs_release_path(path
);
872 mutex_unlock(&node
->mutex
);
876 mutex_unlock(&node
->mutex
);
880 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
881 struct btrfs_root
*root
,
882 struct btrfs_path
*path
,
883 struct btrfs_delayed_item
*item
)
885 struct btrfs_delayed_item
*curr
, *next
;
886 struct extent_buffer
*leaf
;
887 struct btrfs_key key
;
888 struct list_head head
;
889 int nitems
, i
, last_item
;
892 BUG_ON(!path
->nodes
[0]);
894 leaf
= path
->nodes
[0];
897 last_item
= btrfs_header_nritems(leaf
) - 1;
899 return -ENOENT
; /* FIXME: Is errno suitable? */
902 INIT_LIST_HEAD(&head
);
903 btrfs_item_key_to_cpu(leaf
, &key
, i
);
906 * count the number of the dir index items that we can delete in batch
908 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
909 list_add_tail(&next
->tree_list
, &head
);
913 next
= __btrfs_next_delayed_item(curr
);
917 if (!btrfs_is_continuous_delayed_item(curr
, next
))
923 btrfs_item_key_to_cpu(leaf
, &key
, i
);
929 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
933 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
934 btrfs_delayed_item_release_metadata(root
, curr
);
935 list_del(&curr
->tree_list
);
936 btrfs_release_delayed_item(curr
);
943 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
944 struct btrfs_path
*path
,
945 struct btrfs_root
*root
,
946 struct btrfs_delayed_node
*node
)
948 struct btrfs_delayed_item
*curr
, *prev
;
952 mutex_lock(&node
->mutex
);
953 curr
= __btrfs_first_delayed_deletion_item(node
);
957 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
962 * can't find the item which the node points to, so this node
963 * is invalid, just drop it.
966 curr
= __btrfs_next_delayed_item(prev
);
967 btrfs_release_delayed_item(prev
);
969 btrfs_release_path(path
);
971 mutex_unlock(&node
->mutex
);
977 btrfs_batch_delete_items(trans
, root
, path
, curr
);
978 btrfs_release_path(path
);
979 mutex_unlock(&node
->mutex
);
983 btrfs_release_path(path
);
984 mutex_unlock(&node
->mutex
);
988 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
990 struct btrfs_delayed_root
*delayed_root
;
993 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
994 BUG_ON(!delayed_node
->root
);
995 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
996 delayed_node
->count
--;
998 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
999 finish_one_item(delayed_root
);
1003 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
1005 struct btrfs_delayed_root
*delayed_root
;
1007 ASSERT(delayed_node
->root
);
1008 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1009 delayed_node
->count
--;
1011 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1012 finish_one_item(delayed_root
);
1015 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1016 struct btrfs_root
*root
,
1017 struct btrfs_path
*path
,
1018 struct btrfs_delayed_node
*node
)
1020 struct btrfs_key key
;
1021 struct btrfs_inode_item
*inode_item
;
1022 struct extent_buffer
*leaf
;
1026 key
.objectid
= node
->inode_id
;
1027 key
.type
= BTRFS_INODE_ITEM_KEY
;
1030 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1035 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1037 btrfs_release_path(path
);
1039 } else if (ret
< 0) {
1043 leaf
= path
->nodes
[0];
1044 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1045 struct btrfs_inode_item
);
1046 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1047 sizeof(struct btrfs_inode_item
));
1048 btrfs_mark_buffer_dirty(leaf
);
1050 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1054 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1057 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1058 if (key
.objectid
!= node
->inode_id
)
1061 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1062 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1066 * Delayed iref deletion is for the inode who has only one link,
1067 * so there is only one iref. The case that several irefs are
1068 * in the same item doesn't exist.
1070 btrfs_del_item(trans
, root
, path
);
1072 btrfs_release_delayed_iref(node
);
1074 btrfs_release_path(path
);
1076 btrfs_delayed_inode_release_metadata(root
, node
);
1077 btrfs_release_delayed_inode(node
);
1082 btrfs_release_path(path
);
1084 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1086 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1092 leaf
= path
->nodes
[0];
1097 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1098 struct btrfs_root
*root
,
1099 struct btrfs_path
*path
,
1100 struct btrfs_delayed_node
*node
)
1104 mutex_lock(&node
->mutex
);
1105 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1106 mutex_unlock(&node
->mutex
);
1110 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1111 mutex_unlock(&node
->mutex
);
1116 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1117 struct btrfs_path
*path
,
1118 struct btrfs_delayed_node
*node
)
1122 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1126 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1130 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1135 * Called when committing the transaction.
1136 * Returns 0 on success.
1137 * Returns < 0 on error and returns with an aborted transaction with any
1138 * outstanding delayed items cleaned up.
1140 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1141 struct btrfs_root
*root
, int nr
)
1143 struct btrfs_delayed_root
*delayed_root
;
1144 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1145 struct btrfs_path
*path
;
1146 struct btrfs_block_rsv
*block_rsv
;
1148 bool count
= (nr
> 0);
1153 path
= btrfs_alloc_path();
1156 path
->leave_spinning
= 1;
1158 block_rsv
= trans
->block_rsv
;
1159 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1161 delayed_root
= btrfs_get_delayed_root(root
);
1163 curr_node
= btrfs_first_delayed_node(delayed_root
);
1164 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1165 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1168 btrfs_release_delayed_node(curr_node
);
1170 btrfs_abort_transaction(trans
, ret
);
1174 prev_node
= curr_node
;
1175 curr_node
= btrfs_next_delayed_node(curr_node
);
1176 btrfs_release_delayed_node(prev_node
);
1180 btrfs_release_delayed_node(curr_node
);
1181 btrfs_free_path(path
);
1182 trans
->block_rsv
= block_rsv
;
1187 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1188 struct btrfs_root
*root
)
1190 return __btrfs_run_delayed_items(trans
, root
, -1);
1193 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1194 struct btrfs_root
*root
, int nr
)
1196 return __btrfs_run_delayed_items(trans
, root
, nr
);
1199 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1200 struct inode
*inode
)
1202 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1203 struct btrfs_path
*path
;
1204 struct btrfs_block_rsv
*block_rsv
;
1210 mutex_lock(&delayed_node
->mutex
);
1211 if (!delayed_node
->count
) {
1212 mutex_unlock(&delayed_node
->mutex
);
1213 btrfs_release_delayed_node(delayed_node
);
1216 mutex_unlock(&delayed_node
->mutex
);
1218 path
= btrfs_alloc_path();
1220 btrfs_release_delayed_node(delayed_node
);
1223 path
->leave_spinning
= 1;
1225 block_rsv
= trans
->block_rsv
;
1226 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1228 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1230 btrfs_release_delayed_node(delayed_node
);
1231 btrfs_free_path(path
);
1232 trans
->block_rsv
= block_rsv
;
1237 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1239 struct btrfs_trans_handle
*trans
;
1240 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1241 struct btrfs_path
*path
;
1242 struct btrfs_block_rsv
*block_rsv
;
1248 mutex_lock(&delayed_node
->mutex
);
1249 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1250 mutex_unlock(&delayed_node
->mutex
);
1251 btrfs_release_delayed_node(delayed_node
);
1254 mutex_unlock(&delayed_node
->mutex
);
1256 trans
= btrfs_join_transaction(delayed_node
->root
);
1257 if (IS_ERR(trans
)) {
1258 ret
= PTR_ERR(trans
);
1262 path
= btrfs_alloc_path();
1267 path
->leave_spinning
= 1;
1269 block_rsv
= trans
->block_rsv
;
1270 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1272 mutex_lock(&delayed_node
->mutex
);
1273 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1274 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1275 path
, delayed_node
);
1278 mutex_unlock(&delayed_node
->mutex
);
1280 btrfs_free_path(path
);
1281 trans
->block_rsv
= block_rsv
;
1283 btrfs_end_transaction(trans
, delayed_node
->root
);
1284 btrfs_btree_balance_dirty(delayed_node
->root
);
1286 btrfs_release_delayed_node(delayed_node
);
1291 void btrfs_remove_delayed_node(struct inode
*inode
)
1293 struct btrfs_delayed_node
*delayed_node
;
1295 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1299 BTRFS_I(inode
)->delayed_node
= NULL
;
1300 btrfs_release_delayed_node(delayed_node
);
1303 struct btrfs_async_delayed_work
{
1304 struct btrfs_delayed_root
*delayed_root
;
1306 struct btrfs_work work
;
1309 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1311 struct btrfs_async_delayed_work
*async_work
;
1312 struct btrfs_delayed_root
*delayed_root
;
1313 struct btrfs_trans_handle
*trans
;
1314 struct btrfs_path
*path
;
1315 struct btrfs_delayed_node
*delayed_node
= NULL
;
1316 struct btrfs_root
*root
;
1317 struct btrfs_block_rsv
*block_rsv
;
1320 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1321 delayed_root
= async_work
->delayed_root
;
1323 path
= btrfs_alloc_path();
1328 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
/ 2)
1331 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1335 path
->leave_spinning
= 1;
1336 root
= delayed_node
->root
;
1338 trans
= btrfs_join_transaction(root
);
1342 block_rsv
= trans
->block_rsv
;
1343 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1345 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1347 trans
->block_rsv
= block_rsv
;
1348 btrfs_end_transaction(trans
, root
);
1349 btrfs_btree_balance_dirty_nodelay(root
);
1352 btrfs_release_path(path
);
1355 btrfs_release_prepared_delayed_node(delayed_node
);
1356 if (async_work
->nr
== 0 || total_done
< async_work
->nr
)
1360 btrfs_free_path(path
);
1362 wake_up(&delayed_root
->wait
);
1367 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1368 struct btrfs_fs_info
*fs_info
, int nr
)
1370 struct btrfs_async_delayed_work
*async_work
;
1372 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1375 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1379 async_work
->delayed_root
= delayed_root
;
1380 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1381 btrfs_async_run_delayed_root
, NULL
, NULL
);
1382 async_work
->nr
= nr
;
1384 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1388 void btrfs_assert_delayed_root_empty(struct btrfs_root
*root
)
1390 struct btrfs_delayed_root
*delayed_root
;
1391 delayed_root
= btrfs_get_delayed_root(root
);
1392 WARN_ON(btrfs_first_delayed_node(delayed_root
));
1395 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1397 int val
= atomic_read(&delayed_root
->items_seq
);
1399 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1402 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1408 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1410 struct btrfs_delayed_root
*delayed_root
;
1411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1413 delayed_root
= btrfs_get_delayed_root(root
);
1415 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1418 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1422 seq
= atomic_read(&delayed_root
->items_seq
);
1424 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1428 wait_event_interruptible(delayed_root
->wait
,
1429 could_end_wait(delayed_root
, seq
));
1433 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1436 /* Will return 0 or -ENOMEM */
1437 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1438 struct btrfs_root
*root
, const char *name
,
1439 int name_len
, struct inode
*dir
,
1440 struct btrfs_disk_key
*disk_key
, u8 type
,
1443 struct btrfs_delayed_node
*delayed_node
;
1444 struct btrfs_delayed_item
*delayed_item
;
1445 struct btrfs_dir_item
*dir_item
;
1448 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1449 if (IS_ERR(delayed_node
))
1450 return PTR_ERR(delayed_node
);
1452 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1453 if (!delayed_item
) {
1458 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1459 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1460 delayed_item
->key
.offset
= index
;
1462 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1463 dir_item
->location
= *disk_key
;
1464 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1465 btrfs_set_stack_dir_data_len(dir_item
, 0);
1466 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1467 btrfs_set_stack_dir_type(dir_item
, type
);
1468 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1470 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1472 * we have reserved enough space when we start a new transaction,
1473 * so reserving metadata failure is impossible
1478 mutex_lock(&delayed_node
->mutex
);
1479 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1480 if (unlikely(ret
)) {
1481 btrfs_err(root
->fs_info
,
1482 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1483 name_len
, name
, delayed_node
->root
->objectid
,
1484 delayed_node
->inode_id
, ret
);
1487 mutex_unlock(&delayed_node
->mutex
);
1490 btrfs_release_delayed_node(delayed_node
);
1494 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1495 struct btrfs_delayed_node
*node
,
1496 struct btrfs_key
*key
)
1498 struct btrfs_delayed_item
*item
;
1500 mutex_lock(&node
->mutex
);
1501 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1503 mutex_unlock(&node
->mutex
);
1507 btrfs_delayed_item_release_metadata(root
, item
);
1508 btrfs_release_delayed_item(item
);
1509 mutex_unlock(&node
->mutex
);
1513 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1514 struct btrfs_root
*root
, struct inode
*dir
,
1517 struct btrfs_delayed_node
*node
;
1518 struct btrfs_delayed_item
*item
;
1519 struct btrfs_key item_key
;
1522 node
= btrfs_get_or_create_delayed_node(dir
);
1524 return PTR_ERR(node
);
1526 item_key
.objectid
= btrfs_ino(dir
);
1527 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1528 item_key
.offset
= index
;
1530 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1534 item
= btrfs_alloc_delayed_item(0);
1540 item
->key
= item_key
;
1542 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1544 * we have reserved enough space when we start a new transaction,
1545 * so reserving metadata failure is impossible.
1549 mutex_lock(&node
->mutex
);
1550 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1551 if (unlikely(ret
)) {
1552 btrfs_err(root
->fs_info
,
1553 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1554 index
, node
->root
->objectid
, node
->inode_id
, ret
);
1557 mutex_unlock(&node
->mutex
);
1559 btrfs_release_delayed_node(node
);
1563 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1565 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1571 * Since we have held i_mutex of this directory, it is impossible that
1572 * a new directory index is added into the delayed node and index_cnt
1573 * is updated now. So we needn't lock the delayed node.
1575 if (!delayed_node
->index_cnt
) {
1576 btrfs_release_delayed_node(delayed_node
);
1580 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1581 btrfs_release_delayed_node(delayed_node
);
1585 bool btrfs_readdir_get_delayed_items(struct inode
*inode
,
1586 struct list_head
*ins_list
,
1587 struct list_head
*del_list
)
1589 struct btrfs_delayed_node
*delayed_node
;
1590 struct btrfs_delayed_item
*item
;
1592 delayed_node
= btrfs_get_delayed_node(inode
);
1597 * We can only do one readdir with delayed items at a time because of
1598 * item->readdir_list.
1600 inode_unlock_shared(inode
);
1603 mutex_lock(&delayed_node
->mutex
);
1604 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1606 atomic_inc(&item
->refs
);
1607 list_add_tail(&item
->readdir_list
, ins_list
);
1608 item
= __btrfs_next_delayed_item(item
);
1611 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1613 atomic_inc(&item
->refs
);
1614 list_add_tail(&item
->readdir_list
, del_list
);
1615 item
= __btrfs_next_delayed_item(item
);
1617 mutex_unlock(&delayed_node
->mutex
);
1619 * This delayed node is still cached in the btrfs inode, so refs
1620 * must be > 1 now, and we needn't check it is going to be freed
1623 * Besides that, this function is used to read dir, we do not
1624 * insert/delete delayed items in this period. So we also needn't
1625 * requeue or dequeue this delayed node.
1627 atomic_dec(&delayed_node
->refs
);
1632 void btrfs_readdir_put_delayed_items(struct inode
*inode
,
1633 struct list_head
*ins_list
,
1634 struct list_head
*del_list
)
1636 struct btrfs_delayed_item
*curr
, *next
;
1638 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1639 list_del(&curr
->readdir_list
);
1640 if (atomic_dec_and_test(&curr
->refs
))
1644 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1645 list_del(&curr
->readdir_list
);
1646 if (atomic_dec_and_test(&curr
->refs
))
1651 * The VFS is going to do up_read(), so we need to downgrade back to a
1654 downgrade_write(&inode
->i_rwsem
);
1657 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1660 struct btrfs_delayed_item
*curr
, *next
;
1663 if (list_empty(del_list
))
1666 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1667 if (curr
->key
.offset
> index
)
1670 list_del(&curr
->readdir_list
);
1671 ret
= (curr
->key
.offset
== index
);
1673 if (atomic_dec_and_test(&curr
->refs
))
1685 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1688 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1689 struct list_head
*ins_list
)
1691 struct btrfs_dir_item
*di
;
1692 struct btrfs_delayed_item
*curr
, *next
;
1693 struct btrfs_key location
;
1697 unsigned char d_type
;
1699 if (list_empty(ins_list
))
1703 * Changing the data of the delayed item is impossible. So
1704 * we needn't lock them. And we have held i_mutex of the
1705 * directory, nobody can delete any directory indexes now.
1707 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1708 list_del(&curr
->readdir_list
);
1710 if (curr
->key
.offset
< ctx
->pos
) {
1711 if (atomic_dec_and_test(&curr
->refs
))
1716 ctx
->pos
= curr
->key
.offset
;
1718 di
= (struct btrfs_dir_item
*)curr
->data
;
1719 name
= (char *)(di
+ 1);
1720 name_len
= btrfs_stack_dir_name_len(di
);
1722 d_type
= btrfs_filetype_table
[di
->type
];
1723 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1725 over
= !dir_emit(ctx
, name
, name_len
,
1726 location
.objectid
, d_type
);
1728 if (atomic_dec_and_test(&curr
->refs
))
1737 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1738 struct btrfs_inode_item
*inode_item
,
1739 struct inode
*inode
)
1741 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1742 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1743 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1744 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1745 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1746 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1747 btrfs_set_stack_inode_generation(inode_item
,
1748 BTRFS_I(inode
)->generation
);
1749 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1750 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1751 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1752 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1753 btrfs_set_stack_inode_block_group(inode_item
, 0);
1755 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1756 inode
->i_atime
.tv_sec
);
1757 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1758 inode
->i_atime
.tv_nsec
);
1760 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1761 inode
->i_mtime
.tv_sec
);
1762 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1763 inode
->i_mtime
.tv_nsec
);
1765 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1766 inode
->i_ctime
.tv_sec
);
1767 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1768 inode
->i_ctime
.tv_nsec
);
1770 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1771 BTRFS_I(inode
)->i_otime
.tv_sec
);
1772 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1773 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1776 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1778 struct btrfs_delayed_node
*delayed_node
;
1779 struct btrfs_inode_item
*inode_item
;
1781 delayed_node
= btrfs_get_delayed_node(inode
);
1785 mutex_lock(&delayed_node
->mutex
);
1786 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1787 mutex_unlock(&delayed_node
->mutex
);
1788 btrfs_release_delayed_node(delayed_node
);
1792 inode_item
= &delayed_node
->inode_item
;
1794 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1795 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1796 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1797 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1798 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1799 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1800 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1801 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1803 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1805 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1806 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1808 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1809 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1811 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1812 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1814 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1815 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1817 BTRFS_I(inode
)->i_otime
.tv_sec
=
1818 btrfs_stack_timespec_sec(&inode_item
->otime
);
1819 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1820 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1822 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1823 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1825 mutex_unlock(&delayed_node
->mutex
);
1826 btrfs_release_delayed_node(delayed_node
);
1830 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1831 struct btrfs_root
*root
, struct inode
*inode
)
1833 struct btrfs_delayed_node
*delayed_node
;
1836 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1837 if (IS_ERR(delayed_node
))
1838 return PTR_ERR(delayed_node
);
1840 mutex_lock(&delayed_node
->mutex
);
1841 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1842 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1846 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, inode
,
1851 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1852 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1853 delayed_node
->count
++;
1854 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1856 mutex_unlock(&delayed_node
->mutex
);
1857 btrfs_release_delayed_node(delayed_node
);
1861 int btrfs_delayed_delete_inode_ref(struct inode
*inode
)
1863 struct btrfs_delayed_node
*delayed_node
;
1866 * we don't do delayed inode updates during log recovery because it
1867 * leads to enospc problems. This means we also can't do
1868 * delayed inode refs
1870 if (test_bit(BTRFS_FS_LOG_RECOVERING
,
1871 &BTRFS_I(inode
)->root
->fs_info
->flags
))
1874 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1875 if (IS_ERR(delayed_node
))
1876 return PTR_ERR(delayed_node
);
1879 * We don't reserve space for inode ref deletion is because:
1880 * - We ONLY do async inode ref deletion for the inode who has only
1881 * one link(i_nlink == 1), it means there is only one inode ref.
1882 * And in most case, the inode ref and the inode item are in the
1883 * same leaf, and we will deal with them at the same time.
1884 * Since we are sure we will reserve the space for the inode item,
1885 * it is unnecessary to reserve space for inode ref deletion.
1886 * - If the inode ref and the inode item are not in the same leaf,
1887 * We also needn't worry about enospc problem, because we reserve
1888 * much more space for the inode update than it needs.
1889 * - At the worst, we can steal some space from the global reservation.
1892 mutex_lock(&delayed_node
->mutex
);
1893 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1896 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1897 delayed_node
->count
++;
1898 atomic_inc(&BTRFS_I(inode
)->root
->fs_info
->delayed_root
->items
);
1900 mutex_unlock(&delayed_node
->mutex
);
1901 btrfs_release_delayed_node(delayed_node
);
1905 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1907 struct btrfs_root
*root
= delayed_node
->root
;
1908 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1910 mutex_lock(&delayed_node
->mutex
);
1911 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1913 btrfs_delayed_item_release_metadata(root
, curr_item
);
1914 prev_item
= curr_item
;
1915 curr_item
= __btrfs_next_delayed_item(prev_item
);
1916 btrfs_release_delayed_item(prev_item
);
1919 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1921 btrfs_delayed_item_release_metadata(root
, curr_item
);
1922 prev_item
= curr_item
;
1923 curr_item
= __btrfs_next_delayed_item(prev_item
);
1924 btrfs_release_delayed_item(prev_item
);
1927 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1928 btrfs_release_delayed_iref(delayed_node
);
1930 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1931 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1932 btrfs_release_delayed_inode(delayed_node
);
1934 mutex_unlock(&delayed_node
->mutex
);
1937 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1939 struct btrfs_delayed_node
*delayed_node
;
1941 delayed_node
= btrfs_get_delayed_node(inode
);
1945 __btrfs_kill_delayed_node(delayed_node
);
1946 btrfs_release_delayed_node(delayed_node
);
1949 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1952 struct btrfs_delayed_node
*delayed_nodes
[8];
1956 spin_lock(&root
->inode_lock
);
1957 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1958 (void **)delayed_nodes
, inode_id
,
1959 ARRAY_SIZE(delayed_nodes
));
1961 spin_unlock(&root
->inode_lock
);
1965 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1967 for (i
= 0; i
< n
; i
++)
1968 atomic_inc(&delayed_nodes
[i
]->refs
);
1969 spin_unlock(&root
->inode_lock
);
1971 for (i
= 0; i
< n
; i
++) {
1972 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1973 btrfs_release_delayed_node(delayed_nodes
[i
]);
1978 void btrfs_destroy_delayed_inodes(struct btrfs_root
*root
)
1980 struct btrfs_delayed_root
*delayed_root
;
1981 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1983 delayed_root
= btrfs_get_delayed_root(root
);
1985 curr_node
= btrfs_first_delayed_node(delayed_root
);
1987 __btrfs_kill_delayed_node(curr_node
);
1989 prev_node
= curr_node
;
1990 curr_node
= btrfs_next_delayed_node(curr_node
);
1991 btrfs_release_delayed_node(prev_node
);