2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache
*delayed_node_cache
;
32 int __init
btrfs_delayed_inode_init(void)
34 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node
),
37 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
39 if (!delayed_node_cache
)
44 void btrfs_delayed_inode_exit(void)
46 if (delayed_node_cache
)
47 kmem_cache_destroy(delayed_node_cache
);
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node
*delayed_node
,
52 struct btrfs_root
*root
, u64 inode_id
)
54 delayed_node
->root
= root
;
55 delayed_node
->inode_id
= inode_id
;
56 atomic_set(&delayed_node
->refs
, 0);
57 delayed_node
->count
= 0;
58 delayed_node
->in_list
= 0;
59 delayed_node
->inode_dirty
= 0;
60 delayed_node
->ins_root
= RB_ROOT
;
61 delayed_node
->del_root
= RB_ROOT
;
62 mutex_init(&delayed_node
->mutex
);
63 delayed_node
->index_cnt
= 0;
64 INIT_LIST_HEAD(&delayed_node
->n_list
);
65 INIT_LIST_HEAD(&delayed_node
->p_list
);
66 delayed_node
->bytes_reserved
= 0;
67 memset(&delayed_node
->inode_item
, 0, sizeof(delayed_node
->inode_item
));
70 static inline int btrfs_is_continuous_delayed_item(
71 struct btrfs_delayed_item
*item1
,
72 struct btrfs_delayed_item
*item2
)
74 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
75 item1
->key
.objectid
== item2
->key
.objectid
&&
76 item1
->key
.type
== item2
->key
.type
&&
77 item1
->key
.offset
+ 1 == item2
->key
.offset
)
82 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
83 struct btrfs_root
*root
)
85 return root
->fs_info
->delayed_root
;
88 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct inode
*inode
)
90 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
91 struct btrfs_root
*root
= btrfs_inode
->root
;
92 u64 ino
= btrfs_ino(inode
);
93 struct btrfs_delayed_node
*node
;
95 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
97 atomic_inc(&node
->refs
);
101 spin_lock(&root
->inode_lock
);
102 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
104 if (btrfs_inode
->delayed_node
) {
105 atomic_inc(&node
->refs
); /* can be accessed */
106 BUG_ON(btrfs_inode
->delayed_node
!= node
);
107 spin_unlock(&root
->inode_lock
);
110 btrfs_inode
->delayed_node
= node
;
111 atomic_inc(&node
->refs
); /* can be accessed */
112 atomic_inc(&node
->refs
); /* cached in the inode */
113 spin_unlock(&root
->inode_lock
);
116 spin_unlock(&root
->inode_lock
);
121 /* Will return either the node or PTR_ERR(-ENOMEM) */
122 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
125 struct btrfs_delayed_node
*node
;
126 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
127 struct btrfs_root
*root
= btrfs_inode
->root
;
128 u64 ino
= btrfs_ino(inode
);
132 node
= btrfs_get_delayed_node(inode
);
136 node
= kmem_cache_alloc(delayed_node_cache
, GFP_NOFS
);
138 return ERR_PTR(-ENOMEM
);
139 btrfs_init_delayed_node(node
, root
, ino
);
141 atomic_inc(&node
->refs
); /* cached in the btrfs inode */
142 atomic_inc(&node
->refs
); /* can be accessed */
144 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
146 kmem_cache_free(delayed_node_cache
, node
);
150 spin_lock(&root
->inode_lock
);
151 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
152 if (ret
== -EEXIST
) {
153 kmem_cache_free(delayed_node_cache
, node
);
154 spin_unlock(&root
->inode_lock
);
155 radix_tree_preload_end();
158 btrfs_inode
->delayed_node
= node
;
159 spin_unlock(&root
->inode_lock
);
160 radix_tree_preload_end();
166 * Call it when holding delayed_node->mutex
168 * If mod = 1, add this node into the prepared list.
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
171 struct btrfs_delayed_node
*node
,
174 spin_lock(&root
->lock
);
176 if (!list_empty(&node
->p_list
))
177 list_move_tail(&node
->p_list
, &root
->prepare_list
);
179 list_add_tail(&node
->p_list
, &root
->prepare_list
);
181 list_add_tail(&node
->n_list
, &root
->node_list
);
182 list_add_tail(&node
->p_list
, &root
->prepare_list
);
183 atomic_inc(&node
->refs
); /* inserted into list */
187 spin_unlock(&root
->lock
);
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
192 struct btrfs_delayed_node
*node
)
194 spin_lock(&root
->lock
);
197 atomic_dec(&node
->refs
); /* not in the list */
198 list_del_init(&node
->n_list
);
199 if (!list_empty(&node
->p_list
))
200 list_del_init(&node
->p_list
);
203 spin_unlock(&root
->lock
);
206 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
207 struct btrfs_delayed_root
*delayed_root
)
210 struct btrfs_delayed_node
*node
= NULL
;
212 spin_lock(&delayed_root
->lock
);
213 if (list_empty(&delayed_root
->node_list
))
216 p
= delayed_root
->node_list
.next
;
217 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
218 atomic_inc(&node
->refs
);
220 spin_unlock(&delayed_root
->lock
);
225 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
226 struct btrfs_delayed_node
*node
)
228 struct btrfs_delayed_root
*delayed_root
;
230 struct btrfs_delayed_node
*next
= NULL
;
232 delayed_root
= node
->root
->fs_info
->delayed_root
;
233 spin_lock(&delayed_root
->lock
);
234 if (!node
->in_list
) { /* not in the list */
235 if (list_empty(&delayed_root
->node_list
))
237 p
= delayed_root
->node_list
.next
;
238 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
241 p
= node
->n_list
.next
;
243 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
244 atomic_inc(&next
->refs
);
246 spin_unlock(&delayed_root
->lock
);
251 static void __btrfs_release_delayed_node(
252 struct btrfs_delayed_node
*delayed_node
,
255 struct btrfs_delayed_root
*delayed_root
;
260 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
262 mutex_lock(&delayed_node
->mutex
);
263 if (delayed_node
->count
)
264 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
266 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
267 mutex_unlock(&delayed_node
->mutex
);
269 if (atomic_dec_and_test(&delayed_node
->refs
)) {
270 struct btrfs_root
*root
= delayed_node
->root
;
271 spin_lock(&root
->inode_lock
);
272 if (atomic_read(&delayed_node
->refs
) == 0) {
273 radix_tree_delete(&root
->delayed_nodes_tree
,
274 delayed_node
->inode_id
);
275 kmem_cache_free(delayed_node_cache
, delayed_node
);
277 spin_unlock(&root
->inode_lock
);
281 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
283 __btrfs_release_delayed_node(node
, 0);
286 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
287 struct btrfs_delayed_root
*delayed_root
)
290 struct btrfs_delayed_node
*node
= NULL
;
292 spin_lock(&delayed_root
->lock
);
293 if (list_empty(&delayed_root
->prepare_list
))
296 p
= delayed_root
->prepare_list
.next
;
298 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
299 atomic_inc(&node
->refs
);
301 spin_unlock(&delayed_root
->lock
);
306 static inline void btrfs_release_prepared_delayed_node(
307 struct btrfs_delayed_node
*node
)
309 __btrfs_release_delayed_node(node
, 1);
312 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
314 struct btrfs_delayed_item
*item
;
315 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
317 item
->data_len
= data_len
;
318 item
->ins_or_del
= 0;
319 item
->bytes_reserved
= 0;
320 item
->delayed_node
= NULL
;
321 atomic_set(&item
->refs
, 1);
327 * __btrfs_lookup_delayed_item - look up the delayed item by key
328 * @delayed_node: pointer to the delayed node
329 * @key: the key to look up
330 * @prev: used to store the prev item if the right item isn't found
331 * @next: used to store the next item if the right item isn't found
333 * Note: if we don't find the right item, we will return the prev item and
336 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
337 struct rb_root
*root
,
338 struct btrfs_key
*key
,
339 struct btrfs_delayed_item
**prev
,
340 struct btrfs_delayed_item
**next
)
342 struct rb_node
*node
, *prev_node
= NULL
;
343 struct btrfs_delayed_item
*delayed_item
= NULL
;
346 node
= root
->rb_node
;
349 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
352 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
354 node
= node
->rb_right
;
356 node
= node
->rb_left
;
365 *prev
= delayed_item
;
366 else if ((node
= rb_prev(prev_node
)) != NULL
) {
367 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
377 *next
= delayed_item
;
378 else if ((node
= rb_next(prev_node
)) != NULL
) {
379 *next
= rb_entry(node
, struct btrfs_delayed_item
,
387 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
388 struct btrfs_delayed_node
*delayed_node
,
389 struct btrfs_key
*key
)
391 struct btrfs_delayed_item
*item
;
393 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
399 struct btrfs_delayed_item
*ins
,
402 struct rb_node
**p
, *node
;
403 struct rb_node
*parent_node
= NULL
;
404 struct rb_root
*root
;
405 struct btrfs_delayed_item
*item
;
408 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
409 root
= &delayed_node
->ins_root
;
410 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
411 root
= &delayed_node
->del_root
;
415 node
= &ins
->rb_node
;
419 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
422 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
431 rb_link_node(node
, parent_node
, p
);
432 rb_insert_color(node
, root
);
433 ins
->delayed_node
= delayed_node
;
434 ins
->ins_or_del
= action
;
436 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
437 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
438 ins
->key
.offset
>= delayed_node
->index_cnt
)
439 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
441 delayed_node
->count
++;
442 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
447 struct btrfs_delayed_item
*item
)
449 return __btrfs_add_delayed_item(node
, item
,
450 BTRFS_DELAYED_INSERTION_ITEM
);
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
454 struct btrfs_delayed_item
*item
)
456 return __btrfs_add_delayed_item(node
, item
,
457 BTRFS_DELAYED_DELETION_ITEM
);
460 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
462 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
463 if ((atomic_dec_return(&delayed_root
->items
) <
464 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
465 waitqueue_active(&delayed_root
->wait
))
466 wake_up(&delayed_root
->wait
);
469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
471 struct rb_root
*root
;
472 struct btrfs_delayed_root
*delayed_root
;
474 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
476 BUG_ON(!delayed_root
);
477 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
478 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
480 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
481 root
= &delayed_item
->delayed_node
->ins_root
;
483 root
= &delayed_item
->delayed_node
->del_root
;
485 rb_erase(&delayed_item
->rb_node
, root
);
486 delayed_item
->delayed_node
->count
--;
488 finish_one_item(delayed_root
);
491 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
494 __btrfs_remove_delayed_item(item
);
495 if (atomic_dec_and_test(&item
->refs
))
500 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
501 struct btrfs_delayed_node
*delayed_node
)
504 struct btrfs_delayed_item
*item
= NULL
;
506 p
= rb_first(&delayed_node
->ins_root
);
508 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
513 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
514 struct btrfs_delayed_node
*delayed_node
)
517 struct btrfs_delayed_item
*item
= NULL
;
519 p
= rb_first(&delayed_node
->del_root
);
521 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
526 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
527 struct btrfs_delayed_item
*item
)
530 struct btrfs_delayed_item
*next
= NULL
;
532 p
= rb_next(&item
->rb_node
);
534 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
539 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
540 struct btrfs_root
*root
,
541 struct btrfs_delayed_item
*item
)
543 struct btrfs_block_rsv
*src_rsv
;
544 struct btrfs_block_rsv
*dst_rsv
;
548 if (!trans
->bytes_reserved
)
551 src_rsv
= trans
->block_rsv
;
552 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
554 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
555 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
557 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
560 item
->bytes_reserved
= num_bytes
;
566 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
567 struct btrfs_delayed_item
*item
)
569 struct btrfs_block_rsv
*rsv
;
571 if (!item
->bytes_reserved
)
574 rsv
= &root
->fs_info
->delayed_block_rsv
;
575 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
576 item
->key
.objectid
, item
->bytes_reserved
,
578 btrfs_block_rsv_release(root
, rsv
,
579 item
->bytes_reserved
);
582 static int btrfs_delayed_inode_reserve_metadata(
583 struct btrfs_trans_handle
*trans
,
584 struct btrfs_root
*root
,
586 struct btrfs_delayed_node
*node
)
588 struct btrfs_block_rsv
*src_rsv
;
589 struct btrfs_block_rsv
*dst_rsv
;
592 bool release
= false;
594 src_rsv
= trans
->block_rsv
;
595 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
597 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
600 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
601 * which doesn't reserve space for speed. This is a problem since we
602 * still need to reserve space for this update, so try to reserve the
605 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
606 * we're accounted for.
608 if (!src_rsv
|| (!trans
->bytes_reserved
&&
609 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
610 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
611 BTRFS_RESERVE_NO_FLUSH
);
613 * Since we're under a transaction reserve_metadata_bytes could
614 * try to commit the transaction which will make it return
615 * EAGAIN to make us stop the transaction we have, so return
616 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
621 node
->bytes_reserved
= num_bytes
;
622 trace_btrfs_space_reservation(root
->fs_info
,
628 } else if (src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
629 spin_lock(&BTRFS_I(inode
)->lock
);
630 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
631 &BTRFS_I(inode
)->runtime_flags
)) {
632 spin_unlock(&BTRFS_I(inode
)->lock
);
636 spin_unlock(&BTRFS_I(inode
)->lock
);
638 /* Ok we didn't have space pre-reserved. This shouldn't happen
639 * too often but it can happen if we do delalloc to an existing
640 * inode which gets dirtied because of the time update, and then
641 * isn't touched again until after the transaction commits and
642 * then we try to write out the data. First try to be nice and
643 * reserve something strictly for us. If not be a pain and try
644 * to steal from the delalloc block rsv.
646 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
647 BTRFS_RESERVE_NO_FLUSH
);
651 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
656 * Ok this is a problem, let's just steal from the global rsv
657 * since this really shouldn't happen that often.
660 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->global_block_rsv
,
666 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
670 * Migrate only takes a reservation, it doesn't touch the size of the
671 * block_rsv. This is to simplify people who don't normally have things
672 * migrated from their block rsv. If they go to release their
673 * reservation, that will decrease the size as well, so if migrate
674 * reduced size we'd end up with a negative size. But for the
675 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
676 * but we could in fact do this reserve/migrate dance several times
677 * between the time we did the original reservation and we'd clean it
678 * up. So to take care of this, release the space for the meta
679 * reservation here. I think it may be time for a documentation page on
680 * how block rsvs. work.
683 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
684 btrfs_ino(inode
), num_bytes
, 1);
685 node
->bytes_reserved
= num_bytes
;
689 trace_btrfs_space_reservation(root
->fs_info
, "delalloc",
690 btrfs_ino(inode
), num_bytes
, 0);
691 btrfs_block_rsv_release(root
, src_rsv
, num_bytes
);
697 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
698 struct btrfs_delayed_node
*node
)
700 struct btrfs_block_rsv
*rsv
;
702 if (!node
->bytes_reserved
)
705 rsv
= &root
->fs_info
->delayed_block_rsv
;
706 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
707 node
->inode_id
, node
->bytes_reserved
, 0);
708 btrfs_block_rsv_release(root
, rsv
,
709 node
->bytes_reserved
);
710 node
->bytes_reserved
= 0;
714 * This helper will insert some continuous items into the same leaf according
715 * to the free space of the leaf.
717 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
718 struct btrfs_path
*path
,
719 struct btrfs_delayed_item
*item
)
721 struct btrfs_delayed_item
*curr
, *next
;
723 int total_data_size
= 0, total_size
= 0;
724 struct extent_buffer
*leaf
;
726 struct btrfs_key
*keys
;
728 struct list_head head
;
734 BUG_ON(!path
->nodes
[0]);
736 leaf
= path
->nodes
[0];
737 free_space
= btrfs_leaf_free_space(root
, leaf
);
738 INIT_LIST_HEAD(&head
);
744 * count the number of the continuous items that we can insert in batch
746 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
748 total_data_size
+= next
->data_len
;
749 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
750 list_add_tail(&next
->tree_list
, &head
);
754 next
= __btrfs_next_delayed_item(curr
);
758 if (!btrfs_is_continuous_delayed_item(curr
, next
))
768 * we need allocate some memory space, but it might cause the task
769 * to sleep, so we set all locked nodes in the path to blocking locks
772 btrfs_set_path_blocking(path
);
774 keys
= kmalloc(sizeof(struct btrfs_key
) * nitems
, GFP_NOFS
);
780 data_size
= kmalloc(sizeof(u32
) * nitems
, GFP_NOFS
);
786 /* get keys of all the delayed items */
788 list_for_each_entry(next
, &head
, tree_list
) {
790 data_size
[i
] = next
->data_len
;
794 /* reset all the locked nodes in the patch to spinning locks. */
795 btrfs_clear_path_blocking(path
, NULL
, 0);
797 /* insert the keys of the items */
798 setup_items_for_insert(root
, path
, keys
, data_size
,
799 total_data_size
, total_size
, nitems
);
801 /* insert the dir index items */
802 slot
= path
->slots
[0];
803 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
804 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
805 write_extent_buffer(leaf
, &curr
->data
,
806 (unsigned long)data_ptr
,
810 btrfs_delayed_item_release_metadata(root
, curr
);
812 list_del(&curr
->tree_list
);
813 btrfs_release_delayed_item(curr
);
824 * This helper can just do simple insertion that needn't extend item for new
825 * data, such as directory name index insertion, inode insertion.
827 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
828 struct btrfs_root
*root
,
829 struct btrfs_path
*path
,
830 struct btrfs_delayed_item
*delayed_item
)
832 struct extent_buffer
*leaf
;
836 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
837 delayed_item
->data_len
);
838 if (ret
< 0 && ret
!= -EEXIST
)
841 leaf
= path
->nodes
[0];
843 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
845 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
846 delayed_item
->data_len
);
847 btrfs_mark_buffer_dirty(leaf
);
849 btrfs_delayed_item_release_metadata(root
, delayed_item
);
854 * we insert an item first, then if there are some continuous items, we try
855 * to insert those items into the same leaf.
857 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
858 struct btrfs_path
*path
,
859 struct btrfs_root
*root
,
860 struct btrfs_delayed_node
*node
)
862 struct btrfs_delayed_item
*curr
, *prev
;
866 mutex_lock(&node
->mutex
);
867 curr
= __btrfs_first_delayed_insertion_item(node
);
871 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
873 btrfs_release_path(path
);
878 curr
= __btrfs_next_delayed_item(prev
);
879 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
880 /* insert the continuous items into the same leaf */
882 btrfs_batch_insert_items(root
, path
, curr
);
884 btrfs_release_delayed_item(prev
);
885 btrfs_mark_buffer_dirty(path
->nodes
[0]);
887 btrfs_release_path(path
);
888 mutex_unlock(&node
->mutex
);
892 mutex_unlock(&node
->mutex
);
896 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
897 struct btrfs_root
*root
,
898 struct btrfs_path
*path
,
899 struct btrfs_delayed_item
*item
)
901 struct btrfs_delayed_item
*curr
, *next
;
902 struct extent_buffer
*leaf
;
903 struct btrfs_key key
;
904 struct list_head head
;
905 int nitems
, i
, last_item
;
908 BUG_ON(!path
->nodes
[0]);
910 leaf
= path
->nodes
[0];
913 last_item
= btrfs_header_nritems(leaf
) - 1;
915 return -ENOENT
; /* FIXME: Is errno suitable? */
918 INIT_LIST_HEAD(&head
);
919 btrfs_item_key_to_cpu(leaf
, &key
, i
);
922 * count the number of the dir index items that we can delete in batch
924 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
925 list_add_tail(&next
->tree_list
, &head
);
929 next
= __btrfs_next_delayed_item(curr
);
933 if (!btrfs_is_continuous_delayed_item(curr
, next
))
939 btrfs_item_key_to_cpu(leaf
, &key
, i
);
945 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
949 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
950 btrfs_delayed_item_release_metadata(root
, curr
);
951 list_del(&curr
->tree_list
);
952 btrfs_release_delayed_item(curr
);
959 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
960 struct btrfs_path
*path
,
961 struct btrfs_root
*root
,
962 struct btrfs_delayed_node
*node
)
964 struct btrfs_delayed_item
*curr
, *prev
;
968 mutex_lock(&node
->mutex
);
969 curr
= __btrfs_first_delayed_deletion_item(node
);
973 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
978 * can't find the item which the node points to, so this node
979 * is invalid, just drop it.
982 curr
= __btrfs_next_delayed_item(prev
);
983 btrfs_release_delayed_item(prev
);
985 btrfs_release_path(path
);
987 mutex_unlock(&node
->mutex
);
993 btrfs_batch_delete_items(trans
, root
, path
, curr
);
994 btrfs_release_path(path
);
995 mutex_unlock(&node
->mutex
);
999 btrfs_release_path(path
);
1000 mutex_unlock(&node
->mutex
);
1004 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
1006 struct btrfs_delayed_root
*delayed_root
;
1008 if (delayed_node
&& delayed_node
->inode_dirty
) {
1009 BUG_ON(!delayed_node
->root
);
1010 delayed_node
->inode_dirty
= 0;
1011 delayed_node
->count
--;
1013 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1014 finish_one_item(delayed_root
);
1018 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1019 struct btrfs_root
*root
,
1020 struct btrfs_path
*path
,
1021 struct btrfs_delayed_node
*node
)
1023 struct btrfs_key key
;
1024 struct btrfs_inode_item
*inode_item
;
1025 struct extent_buffer
*leaf
;
1028 key
.objectid
= node
->inode_id
;
1029 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
1032 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, 1);
1034 btrfs_release_path(path
);
1036 } else if (ret
< 0) {
1040 btrfs_unlock_up_safe(path
, 1);
1041 leaf
= path
->nodes
[0];
1042 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1043 struct btrfs_inode_item
);
1044 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1045 sizeof(struct btrfs_inode_item
));
1046 btrfs_mark_buffer_dirty(leaf
);
1047 btrfs_release_path(path
);
1049 btrfs_delayed_inode_release_metadata(root
, node
);
1050 btrfs_release_delayed_inode(node
);
1055 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1056 struct btrfs_root
*root
,
1057 struct btrfs_path
*path
,
1058 struct btrfs_delayed_node
*node
)
1062 mutex_lock(&node
->mutex
);
1063 if (!node
->inode_dirty
) {
1064 mutex_unlock(&node
->mutex
);
1068 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1069 mutex_unlock(&node
->mutex
);
1074 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1075 struct btrfs_path
*path
,
1076 struct btrfs_delayed_node
*node
)
1080 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1084 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1088 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1093 * Called when committing the transaction.
1094 * Returns 0 on success.
1095 * Returns < 0 on error and returns with an aborted transaction with any
1096 * outstanding delayed items cleaned up.
1098 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1099 struct btrfs_root
*root
, int nr
)
1101 struct btrfs_delayed_root
*delayed_root
;
1102 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1103 struct btrfs_path
*path
;
1104 struct btrfs_block_rsv
*block_rsv
;
1106 bool count
= (nr
> 0);
1111 path
= btrfs_alloc_path();
1114 path
->leave_spinning
= 1;
1116 block_rsv
= trans
->block_rsv
;
1117 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1119 delayed_root
= btrfs_get_delayed_root(root
);
1121 curr_node
= btrfs_first_delayed_node(delayed_root
);
1122 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1123 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1126 btrfs_release_delayed_node(curr_node
);
1128 btrfs_abort_transaction(trans
, root
, ret
);
1132 prev_node
= curr_node
;
1133 curr_node
= btrfs_next_delayed_node(curr_node
);
1134 btrfs_release_delayed_node(prev_node
);
1138 btrfs_release_delayed_node(curr_node
);
1139 btrfs_free_path(path
);
1140 trans
->block_rsv
= block_rsv
;
1145 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1146 struct btrfs_root
*root
)
1148 return __btrfs_run_delayed_items(trans
, root
, -1);
1151 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1152 struct btrfs_root
*root
, int nr
)
1154 return __btrfs_run_delayed_items(trans
, root
, nr
);
1157 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1158 struct inode
*inode
)
1160 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1161 struct btrfs_path
*path
;
1162 struct btrfs_block_rsv
*block_rsv
;
1168 mutex_lock(&delayed_node
->mutex
);
1169 if (!delayed_node
->count
) {
1170 mutex_unlock(&delayed_node
->mutex
);
1171 btrfs_release_delayed_node(delayed_node
);
1174 mutex_unlock(&delayed_node
->mutex
);
1176 path
= btrfs_alloc_path();
1179 path
->leave_spinning
= 1;
1181 block_rsv
= trans
->block_rsv
;
1182 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1184 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1186 btrfs_release_delayed_node(delayed_node
);
1187 btrfs_free_path(path
);
1188 trans
->block_rsv
= block_rsv
;
1193 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1195 struct btrfs_trans_handle
*trans
;
1196 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1197 struct btrfs_path
*path
;
1198 struct btrfs_block_rsv
*block_rsv
;
1204 mutex_lock(&delayed_node
->mutex
);
1205 if (!delayed_node
->inode_dirty
) {
1206 mutex_unlock(&delayed_node
->mutex
);
1207 btrfs_release_delayed_node(delayed_node
);
1210 mutex_unlock(&delayed_node
->mutex
);
1212 trans
= btrfs_join_transaction(delayed_node
->root
);
1213 if (IS_ERR(trans
)) {
1214 ret
= PTR_ERR(trans
);
1218 path
= btrfs_alloc_path();
1223 path
->leave_spinning
= 1;
1225 block_rsv
= trans
->block_rsv
;
1226 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1228 mutex_lock(&delayed_node
->mutex
);
1229 if (delayed_node
->inode_dirty
)
1230 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1231 path
, delayed_node
);
1234 mutex_unlock(&delayed_node
->mutex
);
1236 btrfs_free_path(path
);
1237 trans
->block_rsv
= block_rsv
;
1239 btrfs_end_transaction(trans
, delayed_node
->root
);
1240 btrfs_btree_balance_dirty(delayed_node
->root
);
1242 btrfs_release_delayed_node(delayed_node
);
1247 void btrfs_remove_delayed_node(struct inode
*inode
)
1249 struct btrfs_delayed_node
*delayed_node
;
1251 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1255 BTRFS_I(inode
)->delayed_node
= NULL
;
1256 btrfs_release_delayed_node(delayed_node
);
1259 struct btrfs_async_delayed_work
{
1260 struct btrfs_delayed_root
*delayed_root
;
1262 struct btrfs_work work
;
1265 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1267 struct btrfs_async_delayed_work
*async_work
;
1268 struct btrfs_delayed_root
*delayed_root
;
1269 struct btrfs_trans_handle
*trans
;
1270 struct btrfs_path
*path
;
1271 struct btrfs_delayed_node
*delayed_node
= NULL
;
1272 struct btrfs_root
*root
;
1273 struct btrfs_block_rsv
*block_rsv
;
1276 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1277 delayed_root
= async_work
->delayed_root
;
1279 path
= btrfs_alloc_path();
1284 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
/ 2)
1287 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1291 path
->leave_spinning
= 1;
1292 root
= delayed_node
->root
;
1294 trans
= btrfs_join_transaction(root
);
1298 block_rsv
= trans
->block_rsv
;
1299 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1301 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1303 * Maybe new delayed items have been inserted, so we need requeue
1304 * the work. Besides that, we must dequeue the empty delayed nodes
1305 * to avoid the race between delayed items balance and the worker.
1306 * The race like this:
1307 * Task1 Worker thread
1308 * count == 0, needn't requeue
1309 * also needn't insert the
1310 * delayed node into prepare
1312 * add lots of delayed items
1313 * queue the delayed node
1314 * already in the list,
1315 * and not in the prepare
1316 * list, it means the delayed
1317 * node is being dealt with
1319 * do delayed items balance
1320 * the delayed node is being
1321 * dealt with by the worker
1323 * the worker goto idle.
1324 * Task1 will sleep until the transaction is commited.
1326 mutex_lock(&delayed_node
->mutex
);
1327 btrfs_dequeue_delayed_node(root
->fs_info
->delayed_root
, delayed_node
);
1328 mutex_unlock(&delayed_node
->mutex
);
1330 trans
->block_rsv
= block_rsv
;
1331 btrfs_end_transaction_dmeta(trans
, root
);
1332 btrfs_btree_balance_dirty_nodelay(root
);
1335 btrfs_release_path(path
);
1338 btrfs_release_prepared_delayed_node(delayed_node
);
1339 if (async_work
->nr
== 0 || total_done
< async_work
->nr
)
1343 btrfs_free_path(path
);
1345 wake_up(&delayed_root
->wait
);
1350 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1351 struct btrfs_root
*root
, int nr
)
1353 struct btrfs_async_delayed_work
*async_work
;
1355 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1358 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1362 async_work
->delayed_root
= delayed_root
;
1363 async_work
->work
.func
= btrfs_async_run_delayed_root
;
1364 async_work
->work
.flags
= 0;
1365 async_work
->nr
= nr
;
1367 btrfs_queue_worker(&root
->fs_info
->delayed_workers
, &async_work
->work
);
1371 void btrfs_assert_delayed_root_empty(struct btrfs_root
*root
)
1373 struct btrfs_delayed_root
*delayed_root
;
1374 delayed_root
= btrfs_get_delayed_root(root
);
1375 WARN_ON(btrfs_first_delayed_node(delayed_root
));
1378 static int refs_newer(struct btrfs_delayed_root
*delayed_root
,
1381 int val
= atomic_read(&delayed_root
->items_seq
);
1383 if (val
< seq
|| val
>= seq
+ count
)
1388 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1390 struct btrfs_delayed_root
*delayed_root
;
1393 delayed_root
= btrfs_get_delayed_root(root
);
1395 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1398 seq
= atomic_read(&delayed_root
->items_seq
);
1400 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1402 DEFINE_WAIT(__wait
);
1404 ret
= btrfs_wq_run_delayed_node(delayed_root
, root
, 0);
1409 prepare_to_wait(&delayed_root
->wait
, &__wait
,
1410 TASK_INTERRUPTIBLE
);
1412 if (refs_newer(delayed_root
, seq
,
1413 BTRFS_DELAYED_BATCH
) ||
1414 atomic_read(&delayed_root
->items
) <
1415 BTRFS_DELAYED_BACKGROUND
) {
1418 if (!signal_pending(current
))
1423 finish_wait(&delayed_root
->wait
, &__wait
);
1426 btrfs_wq_run_delayed_node(delayed_root
, root
, BTRFS_DELAYED_BATCH
);
1429 /* Will return 0 or -ENOMEM */
1430 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1431 struct btrfs_root
*root
, const char *name
,
1432 int name_len
, struct inode
*dir
,
1433 struct btrfs_disk_key
*disk_key
, u8 type
,
1436 struct btrfs_delayed_node
*delayed_node
;
1437 struct btrfs_delayed_item
*delayed_item
;
1438 struct btrfs_dir_item
*dir_item
;
1441 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1442 if (IS_ERR(delayed_node
))
1443 return PTR_ERR(delayed_node
);
1445 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1446 if (!delayed_item
) {
1451 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1452 btrfs_set_key_type(&delayed_item
->key
, BTRFS_DIR_INDEX_KEY
);
1453 delayed_item
->key
.offset
= index
;
1455 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1456 dir_item
->location
= *disk_key
;
1457 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1458 btrfs_set_stack_dir_data_len(dir_item
, 0);
1459 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1460 btrfs_set_stack_dir_type(dir_item
, type
);
1461 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1463 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1465 * we have reserved enough space when we start a new transaction,
1466 * so reserving metadata failure is impossible
1471 mutex_lock(&delayed_node
->mutex
);
1472 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1473 if (unlikely(ret
)) {
1474 printk(KERN_ERR
"err add delayed dir index item(name: %.*s) "
1475 "into the insertion tree of the delayed node"
1476 "(root id: %llu, inode id: %llu, errno: %d)\n",
1477 name_len
, name
, delayed_node
->root
->objectid
,
1478 delayed_node
->inode_id
, ret
);
1481 mutex_unlock(&delayed_node
->mutex
);
1484 btrfs_release_delayed_node(delayed_node
);
1488 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1489 struct btrfs_delayed_node
*node
,
1490 struct btrfs_key
*key
)
1492 struct btrfs_delayed_item
*item
;
1494 mutex_lock(&node
->mutex
);
1495 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1497 mutex_unlock(&node
->mutex
);
1501 btrfs_delayed_item_release_metadata(root
, item
);
1502 btrfs_release_delayed_item(item
);
1503 mutex_unlock(&node
->mutex
);
1507 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1508 struct btrfs_root
*root
, struct inode
*dir
,
1511 struct btrfs_delayed_node
*node
;
1512 struct btrfs_delayed_item
*item
;
1513 struct btrfs_key item_key
;
1516 node
= btrfs_get_or_create_delayed_node(dir
);
1518 return PTR_ERR(node
);
1520 item_key
.objectid
= btrfs_ino(dir
);
1521 btrfs_set_key_type(&item_key
, BTRFS_DIR_INDEX_KEY
);
1522 item_key
.offset
= index
;
1524 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1528 item
= btrfs_alloc_delayed_item(0);
1534 item
->key
= item_key
;
1536 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1538 * we have reserved enough space when we start a new transaction,
1539 * so reserving metadata failure is impossible.
1543 mutex_lock(&node
->mutex
);
1544 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1545 if (unlikely(ret
)) {
1546 printk(KERN_ERR
"err add delayed dir index item(index: %llu) "
1547 "into the deletion tree of the delayed node"
1548 "(root id: %llu, inode id: %llu, errno: %d)\n",
1549 index
, node
->root
->objectid
, node
->inode_id
,
1553 mutex_unlock(&node
->mutex
);
1555 btrfs_release_delayed_node(node
);
1559 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1561 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1567 * Since we have held i_mutex of this directory, it is impossible that
1568 * a new directory index is added into the delayed node and index_cnt
1569 * is updated now. So we needn't lock the delayed node.
1571 if (!delayed_node
->index_cnt
) {
1572 btrfs_release_delayed_node(delayed_node
);
1576 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1577 btrfs_release_delayed_node(delayed_node
);
1581 void btrfs_get_delayed_items(struct inode
*inode
, struct list_head
*ins_list
,
1582 struct list_head
*del_list
)
1584 struct btrfs_delayed_node
*delayed_node
;
1585 struct btrfs_delayed_item
*item
;
1587 delayed_node
= btrfs_get_delayed_node(inode
);
1591 mutex_lock(&delayed_node
->mutex
);
1592 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1594 atomic_inc(&item
->refs
);
1595 list_add_tail(&item
->readdir_list
, ins_list
);
1596 item
= __btrfs_next_delayed_item(item
);
1599 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1601 atomic_inc(&item
->refs
);
1602 list_add_tail(&item
->readdir_list
, del_list
);
1603 item
= __btrfs_next_delayed_item(item
);
1605 mutex_unlock(&delayed_node
->mutex
);
1607 * This delayed node is still cached in the btrfs inode, so refs
1608 * must be > 1 now, and we needn't check it is going to be freed
1611 * Besides that, this function is used to read dir, we do not
1612 * insert/delete delayed items in this period. So we also needn't
1613 * requeue or dequeue this delayed node.
1615 atomic_dec(&delayed_node
->refs
);
1618 void btrfs_put_delayed_items(struct list_head
*ins_list
,
1619 struct list_head
*del_list
)
1621 struct btrfs_delayed_item
*curr
, *next
;
1623 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1624 list_del(&curr
->readdir_list
);
1625 if (atomic_dec_and_test(&curr
->refs
))
1629 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1630 list_del(&curr
->readdir_list
);
1631 if (atomic_dec_and_test(&curr
->refs
))
1636 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1639 struct btrfs_delayed_item
*curr
, *next
;
1642 if (list_empty(del_list
))
1645 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1646 if (curr
->key
.offset
> index
)
1649 list_del(&curr
->readdir_list
);
1650 ret
= (curr
->key
.offset
== index
);
1652 if (atomic_dec_and_test(&curr
->refs
))
1664 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1667 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1668 struct list_head
*ins_list
)
1670 struct btrfs_dir_item
*di
;
1671 struct btrfs_delayed_item
*curr
, *next
;
1672 struct btrfs_key location
;
1676 unsigned char d_type
;
1678 if (list_empty(ins_list
))
1682 * Changing the data of the delayed item is impossible. So
1683 * we needn't lock them. And we have held i_mutex of the
1684 * directory, nobody can delete any directory indexes now.
1686 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1687 list_del(&curr
->readdir_list
);
1689 if (curr
->key
.offset
< ctx
->pos
) {
1690 if (atomic_dec_and_test(&curr
->refs
))
1695 ctx
->pos
= curr
->key
.offset
;
1697 di
= (struct btrfs_dir_item
*)curr
->data
;
1698 name
= (char *)(di
+ 1);
1699 name_len
= btrfs_stack_dir_name_len(di
);
1701 d_type
= btrfs_filetype_table
[di
->type
];
1702 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1704 over
= !dir_emit(ctx
, name
, name_len
,
1705 location
.objectid
, d_type
);
1707 if (atomic_dec_and_test(&curr
->refs
))
1716 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1717 struct btrfs_inode_item
*inode_item
,
1718 struct inode
*inode
)
1720 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1721 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1722 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1723 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1724 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1725 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1726 btrfs_set_stack_inode_generation(inode_item
,
1727 BTRFS_I(inode
)->generation
);
1728 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1729 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1730 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1731 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1732 btrfs_set_stack_inode_block_group(inode_item
, 0);
1734 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item
),
1735 inode
->i_atime
.tv_sec
);
1736 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item
),
1737 inode
->i_atime
.tv_nsec
);
1739 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item
),
1740 inode
->i_mtime
.tv_sec
);
1741 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item
),
1742 inode
->i_mtime
.tv_nsec
);
1744 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item
),
1745 inode
->i_ctime
.tv_sec
);
1746 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item
),
1747 inode
->i_ctime
.tv_nsec
);
1750 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1752 struct btrfs_delayed_node
*delayed_node
;
1753 struct btrfs_inode_item
*inode_item
;
1754 struct btrfs_timespec
*tspec
;
1756 delayed_node
= btrfs_get_delayed_node(inode
);
1760 mutex_lock(&delayed_node
->mutex
);
1761 if (!delayed_node
->inode_dirty
) {
1762 mutex_unlock(&delayed_node
->mutex
);
1763 btrfs_release_delayed_node(delayed_node
);
1767 inode_item
= &delayed_node
->inode_item
;
1769 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1770 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1771 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1772 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1773 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1774 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1775 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1776 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1778 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1779 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1781 tspec
= btrfs_inode_atime(inode_item
);
1782 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1783 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1785 tspec
= btrfs_inode_mtime(inode_item
);
1786 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1787 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1789 tspec
= btrfs_inode_ctime(inode_item
);
1790 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1791 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1793 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1794 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1796 mutex_unlock(&delayed_node
->mutex
);
1797 btrfs_release_delayed_node(delayed_node
);
1801 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1802 struct btrfs_root
*root
, struct inode
*inode
)
1804 struct btrfs_delayed_node
*delayed_node
;
1807 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1808 if (IS_ERR(delayed_node
))
1809 return PTR_ERR(delayed_node
);
1811 mutex_lock(&delayed_node
->mutex
);
1812 if (delayed_node
->inode_dirty
) {
1813 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1817 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, inode
,
1822 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1823 delayed_node
->inode_dirty
= 1;
1824 delayed_node
->count
++;
1825 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1827 mutex_unlock(&delayed_node
->mutex
);
1828 btrfs_release_delayed_node(delayed_node
);
1832 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1834 struct btrfs_root
*root
= delayed_node
->root
;
1835 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1837 mutex_lock(&delayed_node
->mutex
);
1838 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1840 btrfs_delayed_item_release_metadata(root
, curr_item
);
1841 prev_item
= curr_item
;
1842 curr_item
= __btrfs_next_delayed_item(prev_item
);
1843 btrfs_release_delayed_item(prev_item
);
1846 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1848 btrfs_delayed_item_release_metadata(root
, curr_item
);
1849 prev_item
= curr_item
;
1850 curr_item
= __btrfs_next_delayed_item(prev_item
);
1851 btrfs_release_delayed_item(prev_item
);
1854 if (delayed_node
->inode_dirty
) {
1855 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1856 btrfs_release_delayed_inode(delayed_node
);
1858 mutex_unlock(&delayed_node
->mutex
);
1861 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1863 struct btrfs_delayed_node
*delayed_node
;
1865 delayed_node
= btrfs_get_delayed_node(inode
);
1869 __btrfs_kill_delayed_node(delayed_node
);
1870 btrfs_release_delayed_node(delayed_node
);
1873 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1876 struct btrfs_delayed_node
*delayed_nodes
[8];
1880 spin_lock(&root
->inode_lock
);
1881 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1882 (void **)delayed_nodes
, inode_id
,
1883 ARRAY_SIZE(delayed_nodes
));
1885 spin_unlock(&root
->inode_lock
);
1889 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1891 for (i
= 0; i
< n
; i
++)
1892 atomic_inc(&delayed_nodes
[i
]->refs
);
1893 spin_unlock(&root
->inode_lock
);
1895 for (i
= 0; i
< n
; i
++) {
1896 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1897 btrfs_release_delayed_node(delayed_nodes
[i
]);
1902 void btrfs_destroy_delayed_inodes(struct btrfs_root
*root
)
1904 struct btrfs_delayed_root
*delayed_root
;
1905 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1907 delayed_root
= btrfs_get_delayed_root(root
);
1909 curr_node
= btrfs_first_delayed_node(delayed_root
);
1911 __btrfs_kill_delayed_node(curr_node
);
1913 prev_node
= curr_node
;
1914 curr_node
= btrfs_next_delayed_node(curr_node
);
1915 btrfs_release_delayed_node(prev_node
);