2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
28 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
29 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
30 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
41 * compare two delayed tree backrefs with same bytenr and type
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref1
,
44 struct btrfs_delayed_tree_ref
*ref2
)
46 if (ref1
->node
.type
== BTRFS_TREE_BLOCK_REF_KEY
) {
47 if (ref1
->root
< ref2
->root
)
49 if (ref1
->root
> ref2
->root
)
52 if (ref1
->parent
< ref2
->parent
)
54 if (ref1
->parent
> ref2
->parent
)
61 * compare two delayed data backrefs with same bytenr and type
63 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref1
,
64 struct btrfs_delayed_data_ref
*ref2
)
66 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
67 if (ref1
->root
< ref2
->root
)
69 if (ref1
->root
> ref2
->root
)
71 if (ref1
->objectid
< ref2
->objectid
)
73 if (ref1
->objectid
> ref2
->objectid
)
75 if (ref1
->offset
< ref2
->offset
)
77 if (ref1
->offset
> ref2
->offset
)
80 if (ref1
->parent
< ref2
->parent
)
82 if (ref1
->parent
> ref2
->parent
)
88 static int comp_refs(struct btrfs_delayed_ref_node
*ref1
,
89 struct btrfs_delayed_ref_node
*ref2
,
94 if (ref1
->type
< ref2
->type
)
96 if (ref1
->type
> ref2
->type
)
98 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
99 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
100 ret
= comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1
),
101 btrfs_delayed_node_to_tree_ref(ref2
));
103 ret
= comp_data_refs(btrfs_delayed_node_to_data_ref(ref1
),
104 btrfs_delayed_node_to_data_ref(ref2
));
108 if (ref1
->seq
< ref2
->seq
)
110 if (ref1
->seq
> ref2
->seq
)
116 /* insert a new ref to head ref rbtree */
117 static struct btrfs_delayed_ref_head
*htree_insert(struct rb_root
*root
,
118 struct rb_node
*node
)
120 struct rb_node
**p
= &root
->rb_node
;
121 struct rb_node
*parent_node
= NULL
;
122 struct btrfs_delayed_ref_head
*entry
;
123 struct btrfs_delayed_ref_head
*ins
;
126 ins
= rb_entry(node
, struct btrfs_delayed_ref_head
, href_node
);
127 bytenr
= ins
->bytenr
;
130 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_head
,
133 if (bytenr
< entry
->bytenr
)
135 else if (bytenr
> entry
->bytenr
)
141 rb_link_node(node
, parent_node
, p
);
142 rb_insert_color(node
, root
);
146 static struct btrfs_delayed_ref_node
* tree_insert(struct rb_root
*root
,
147 struct btrfs_delayed_ref_node
*ins
)
149 struct rb_node
**p
= &root
->rb_node
;
150 struct rb_node
*node
= &ins
->ref_node
;
151 struct rb_node
*parent_node
= NULL
;
152 struct btrfs_delayed_ref_node
*entry
;
158 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
160 comp
= comp_refs(ins
, entry
, true);
169 rb_link_node(node
, parent_node
, p
);
170 rb_insert_color(node
, root
);
175 * find an head entry based on bytenr. This returns the delayed ref
176 * head if it was able to find one, or NULL if nothing was in that spot.
177 * If return_bigger is given, the next bigger entry is returned if no exact
180 static struct btrfs_delayed_ref_head
*
181 find_ref_head(struct rb_root
*root
, u64 bytenr
,
185 struct btrfs_delayed_ref_head
*entry
;
190 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
192 if (bytenr
< entry
->bytenr
)
194 else if (bytenr
> entry
->bytenr
)
199 if (entry
&& return_bigger
) {
200 if (bytenr
> entry
->bytenr
) {
201 n
= rb_next(&entry
->href_node
);
204 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
,
213 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
214 struct btrfs_delayed_ref_head
*head
)
216 struct btrfs_delayed_ref_root
*delayed_refs
;
218 delayed_refs
= &trans
->transaction
->delayed_refs
;
219 assert_spin_locked(&delayed_refs
->lock
);
220 if (mutex_trylock(&head
->mutex
))
223 refcount_inc(&head
->refs
);
224 spin_unlock(&delayed_refs
->lock
);
226 mutex_lock(&head
->mutex
);
227 spin_lock(&delayed_refs
->lock
);
228 if (RB_EMPTY_NODE(&head
->href_node
)) {
229 mutex_unlock(&head
->mutex
);
230 btrfs_put_delayed_ref_head(head
);
233 btrfs_put_delayed_ref_head(head
);
237 static inline void drop_delayed_ref(struct btrfs_trans_handle
*trans
,
238 struct btrfs_delayed_ref_root
*delayed_refs
,
239 struct btrfs_delayed_ref_head
*head
,
240 struct btrfs_delayed_ref_node
*ref
)
242 assert_spin_locked(&head
->lock
);
243 rb_erase(&ref
->ref_node
, &head
->ref_tree
);
244 RB_CLEAR_NODE(&ref
->ref_node
);
245 if (!list_empty(&ref
->add_list
))
246 list_del(&ref
->add_list
);
248 btrfs_put_delayed_ref(ref
);
249 atomic_dec(&delayed_refs
->num_entries
);
250 if (trans
->delayed_ref_updates
)
251 trans
->delayed_ref_updates
--;
254 static bool merge_ref(struct btrfs_trans_handle
*trans
,
255 struct btrfs_delayed_ref_root
*delayed_refs
,
256 struct btrfs_delayed_ref_head
*head
,
257 struct btrfs_delayed_ref_node
*ref
,
260 struct btrfs_delayed_ref_node
*next
;
261 struct rb_node
*node
= rb_next(&ref
->ref_node
);
264 while (!done
&& node
) {
267 next
= rb_entry(node
, struct btrfs_delayed_ref_node
, ref_node
);
268 node
= rb_next(node
);
269 if (seq
&& next
->seq
>= seq
)
271 if (comp_refs(ref
, next
, false))
274 if (ref
->action
== next
->action
) {
277 if (ref
->ref_mod
< next
->ref_mod
) {
281 mod
= -next
->ref_mod
;
284 drop_delayed_ref(trans
, delayed_refs
, head
, next
);
286 if (ref
->ref_mod
== 0) {
287 drop_delayed_ref(trans
, delayed_refs
, head
, ref
);
291 * Can't have multiples of the same ref on a tree block.
293 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
294 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
301 void btrfs_merge_delayed_refs(struct btrfs_trans_handle
*trans
,
302 struct btrfs_fs_info
*fs_info
,
303 struct btrfs_delayed_ref_root
*delayed_refs
,
304 struct btrfs_delayed_ref_head
*head
)
306 struct btrfs_delayed_ref_node
*ref
;
307 struct rb_node
*node
;
310 assert_spin_locked(&head
->lock
);
312 if (RB_EMPTY_ROOT(&head
->ref_tree
))
315 /* We don't have too many refs to merge for data. */
319 spin_lock(&fs_info
->tree_mod_seq_lock
);
320 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
321 struct seq_list
*elem
;
323 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
324 struct seq_list
, list
);
327 spin_unlock(&fs_info
->tree_mod_seq_lock
);
330 for (node
= rb_first(&head
->ref_tree
); node
; node
= rb_next(node
)) {
331 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, ref_node
);
332 if (seq
&& ref
->seq
>= seq
)
334 if (merge_ref(trans
, delayed_refs
, head
, ref
, seq
))
339 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
340 struct btrfs_delayed_ref_root
*delayed_refs
,
343 struct seq_list
*elem
;
346 spin_lock(&fs_info
->tree_mod_seq_lock
);
347 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
348 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
349 struct seq_list
, list
);
350 if (seq
>= elem
->seq
) {
352 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
353 (u32
)(seq
>> 32), (u32
)seq
,
354 (u32
)(elem
->seq
>> 32), (u32
)elem
->seq
,
360 spin_unlock(&fs_info
->tree_mod_seq_lock
);
364 struct btrfs_delayed_ref_head
*
365 btrfs_select_ref_head(struct btrfs_trans_handle
*trans
)
367 struct btrfs_delayed_ref_root
*delayed_refs
;
368 struct btrfs_delayed_ref_head
*head
;
372 delayed_refs
= &trans
->transaction
->delayed_refs
;
375 start
= delayed_refs
->run_delayed_start
;
376 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
377 if (!head
&& !loop
) {
378 delayed_refs
->run_delayed_start
= 0;
381 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
384 } else if (!head
&& loop
) {
388 while (head
->processing
) {
389 struct rb_node
*node
;
391 node
= rb_next(&head
->href_node
);
395 delayed_refs
->run_delayed_start
= 0;
400 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
404 head
->processing
= 1;
405 WARN_ON(delayed_refs
->num_heads_ready
== 0);
406 delayed_refs
->num_heads_ready
--;
407 delayed_refs
->run_delayed_start
= head
->bytenr
+
413 * Helper to insert the ref_node to the tail or merge with tail.
415 * Return 0 for insert.
416 * Return >0 for merge.
418 static int insert_delayed_ref(struct btrfs_trans_handle
*trans
,
419 struct btrfs_delayed_ref_root
*root
,
420 struct btrfs_delayed_ref_head
*href
,
421 struct btrfs_delayed_ref_node
*ref
)
423 struct btrfs_delayed_ref_node
*exist
;
427 spin_lock(&href
->lock
);
428 exist
= tree_insert(&href
->ref_tree
, ref
);
432 /* Now we are sure we can merge */
434 if (exist
->action
== ref
->action
) {
437 /* Need to change action */
438 if (exist
->ref_mod
< ref
->ref_mod
) {
439 exist
->action
= ref
->action
;
440 mod
= -exist
->ref_mod
;
441 exist
->ref_mod
= ref
->ref_mod
;
442 if (ref
->action
== BTRFS_ADD_DELAYED_REF
)
443 list_add_tail(&exist
->add_list
,
444 &href
->ref_add_list
);
445 else if (ref
->action
== BTRFS_DROP_DELAYED_REF
) {
446 ASSERT(!list_empty(&exist
->add_list
));
447 list_del(&exist
->add_list
);
454 exist
->ref_mod
+= mod
;
456 /* remove existing tail if its ref_mod is zero */
457 if (exist
->ref_mod
== 0)
458 drop_delayed_ref(trans
, root
, href
, exist
);
459 spin_unlock(&href
->lock
);
462 if (ref
->action
== BTRFS_ADD_DELAYED_REF
)
463 list_add_tail(&ref
->add_list
, &href
->ref_add_list
);
464 atomic_inc(&root
->num_entries
);
465 trans
->delayed_ref_updates
++;
466 spin_unlock(&href
->lock
);
471 * helper function to update the accounting in the head ref
472 * existing and update must have the same bytenr
475 update_existing_head_ref(struct btrfs_delayed_ref_root
*delayed_refs
,
476 struct btrfs_delayed_ref_head
*existing
,
477 struct btrfs_delayed_ref_head
*update
,
478 int *old_ref_mod_ret
)
482 BUG_ON(existing
->is_data
!= update
->is_data
);
484 spin_lock(&existing
->lock
);
485 if (update
->must_insert_reserved
) {
486 /* if the extent was freed and then
487 * reallocated before the delayed ref
488 * entries were processed, we can end up
489 * with an existing head ref without
490 * the must_insert_reserved flag set.
493 existing
->must_insert_reserved
= update
->must_insert_reserved
;
496 * update the num_bytes so we make sure the accounting
499 existing
->num_bytes
= update
->num_bytes
;
503 if (update
->extent_op
) {
504 if (!existing
->extent_op
) {
505 existing
->extent_op
= update
->extent_op
;
507 if (update
->extent_op
->update_key
) {
508 memcpy(&existing
->extent_op
->key
,
509 &update
->extent_op
->key
,
510 sizeof(update
->extent_op
->key
));
511 existing
->extent_op
->update_key
= true;
513 if (update
->extent_op
->update_flags
) {
514 existing
->extent_op
->flags_to_set
|=
515 update
->extent_op
->flags_to_set
;
516 existing
->extent_op
->update_flags
= true;
518 btrfs_free_delayed_extent_op(update
->extent_op
);
522 * update the reference mod on the head to reflect this new operation,
523 * only need the lock for this case cause we could be processing it
524 * currently, for refs we just added we know we're a-ok.
526 old_ref_mod
= existing
->total_ref_mod
;
528 *old_ref_mod_ret
= old_ref_mod
;
529 existing
->ref_mod
+= update
->ref_mod
;
530 existing
->total_ref_mod
+= update
->ref_mod
;
533 * If we are going to from a positive ref mod to a negative or vice
534 * versa we need to make sure to adjust pending_csums accordingly.
536 if (existing
->is_data
) {
537 if (existing
->total_ref_mod
>= 0 && old_ref_mod
< 0)
538 delayed_refs
->pending_csums
-= existing
->num_bytes
;
539 if (existing
->total_ref_mod
< 0 && old_ref_mod
>= 0)
540 delayed_refs
->pending_csums
+= existing
->num_bytes
;
542 spin_unlock(&existing
->lock
);
546 * helper function to actually insert a head node into the rbtree.
547 * this does all the dirty work in terms of maintaining the correct
548 * overall modification count.
550 static noinline
struct btrfs_delayed_ref_head
*
551 add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
552 struct btrfs_trans_handle
*trans
,
553 struct btrfs_delayed_ref_head
*head_ref
,
554 struct btrfs_qgroup_extent_record
*qrecord
,
555 u64 bytenr
, u64 num_bytes
, u64 ref_root
, u64 reserved
,
556 int action
, int is_data
, int *qrecord_inserted_ret
,
557 int *old_ref_mod
, int *new_ref_mod
)
559 struct btrfs_delayed_ref_head
*existing
;
560 struct btrfs_delayed_ref_root
*delayed_refs
;
562 int must_insert_reserved
= 0;
563 int qrecord_inserted
= 0;
565 /* If reserved is provided, it must be a data extent. */
566 BUG_ON(!is_data
&& reserved
);
569 * the head node stores the sum of all the mods, so dropping a ref
570 * should drop the sum in the head node by one.
572 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
574 else if (action
== BTRFS_DROP_DELAYED_REF
)
578 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
579 * the reserved accounting when the extent is finally added, or
580 * if a later modification deletes the delayed ref without ever
581 * inserting the extent into the extent allocation tree.
582 * ref->must_insert_reserved is the flag used to record
583 * that accounting mods are required.
585 * Once we record must_insert_reserved, switch the action to
586 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
588 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
589 must_insert_reserved
= 1;
591 must_insert_reserved
= 0;
593 delayed_refs
= &trans
->transaction
->delayed_refs
;
595 refcount_set(&head_ref
->refs
, 1);
596 head_ref
->bytenr
= bytenr
;
597 head_ref
->num_bytes
= num_bytes
;
598 head_ref
->ref_mod
= count_mod
;
599 head_ref
->must_insert_reserved
= must_insert_reserved
;
600 head_ref
->is_data
= is_data
;
601 head_ref
->ref_tree
= RB_ROOT
;
602 INIT_LIST_HEAD(&head_ref
->ref_add_list
);
603 RB_CLEAR_NODE(&head_ref
->href_node
);
604 head_ref
->processing
= 0;
605 head_ref
->total_ref_mod
= count_mod
;
606 head_ref
->qgroup_reserved
= 0;
607 head_ref
->qgroup_ref_root
= 0;
608 spin_lock_init(&head_ref
->lock
);
609 mutex_init(&head_ref
->mutex
);
611 /* Record qgroup extent info if provided */
613 if (ref_root
&& reserved
) {
614 head_ref
->qgroup_ref_root
= ref_root
;
615 head_ref
->qgroup_reserved
= reserved
;
618 qrecord
->bytenr
= bytenr
;
619 qrecord
->num_bytes
= num_bytes
;
620 qrecord
->old_roots
= NULL
;
622 if(btrfs_qgroup_trace_extent_nolock(fs_info
,
623 delayed_refs
, qrecord
))
626 qrecord_inserted
= 1;
629 trace_add_delayed_ref_head(fs_info
, head_ref
, action
);
631 existing
= htree_insert(&delayed_refs
->href_root
,
632 &head_ref
->href_node
);
634 WARN_ON(ref_root
&& reserved
&& existing
->qgroup_ref_root
635 && existing
->qgroup_reserved
);
636 update_existing_head_ref(delayed_refs
, existing
, head_ref
,
639 * we've updated the existing ref, free the newly
642 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
647 if (is_data
&& count_mod
< 0)
648 delayed_refs
->pending_csums
+= num_bytes
;
649 delayed_refs
->num_heads
++;
650 delayed_refs
->num_heads_ready
++;
651 atomic_inc(&delayed_refs
->num_entries
);
652 trans
->delayed_ref_updates
++;
654 if (qrecord_inserted_ret
)
655 *qrecord_inserted_ret
= qrecord_inserted
;
657 *new_ref_mod
= head_ref
->total_ref_mod
;
662 * helper to insert a delayed tree ref into the rbtree.
665 add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
666 struct btrfs_trans_handle
*trans
,
667 struct btrfs_delayed_ref_head
*head_ref
,
668 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
669 u64 num_bytes
, u64 parent
, u64 ref_root
, int level
,
672 struct btrfs_delayed_tree_ref
*full_ref
;
673 struct btrfs_delayed_ref_root
*delayed_refs
;
677 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
678 action
= BTRFS_ADD_DELAYED_REF
;
680 if (is_fstree(ref_root
))
681 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
682 delayed_refs
= &trans
->transaction
->delayed_refs
;
684 /* first set the basic ref node struct up */
685 refcount_set(&ref
->refs
, 1);
686 ref
->bytenr
= bytenr
;
687 ref
->num_bytes
= num_bytes
;
689 ref
->action
= action
;
693 RB_CLEAR_NODE(&ref
->ref_node
);
694 INIT_LIST_HEAD(&ref
->add_list
);
696 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
697 full_ref
->parent
= parent
;
698 full_ref
->root
= ref_root
;
700 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
702 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
703 full_ref
->level
= level
;
705 trace_add_delayed_tree_ref(fs_info
, ref
, full_ref
, action
);
707 ret
= insert_delayed_ref(trans
, delayed_refs
, head_ref
, ref
);
710 * XXX: memory should be freed at the same level allocated.
711 * But bad practice is anywhere... Follow it now. Need cleanup.
714 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, full_ref
);
718 * helper to insert a delayed data ref into the rbtree.
721 add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
722 struct btrfs_trans_handle
*trans
,
723 struct btrfs_delayed_ref_head
*head_ref
,
724 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
725 u64 num_bytes
, u64 parent
, u64 ref_root
, u64 owner
,
726 u64 offset
, int action
)
728 struct btrfs_delayed_data_ref
*full_ref
;
729 struct btrfs_delayed_ref_root
*delayed_refs
;
733 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
734 action
= BTRFS_ADD_DELAYED_REF
;
736 delayed_refs
= &trans
->transaction
->delayed_refs
;
738 if (is_fstree(ref_root
))
739 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
741 /* first set the basic ref node struct up */
742 refcount_set(&ref
->refs
, 1);
743 ref
->bytenr
= bytenr
;
744 ref
->num_bytes
= num_bytes
;
746 ref
->action
= action
;
750 RB_CLEAR_NODE(&ref
->ref_node
);
751 INIT_LIST_HEAD(&ref
->add_list
);
753 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
754 full_ref
->parent
= parent
;
755 full_ref
->root
= ref_root
;
757 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
759 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
761 full_ref
->objectid
= owner
;
762 full_ref
->offset
= offset
;
764 trace_add_delayed_data_ref(fs_info
, ref
, full_ref
, action
);
766 ret
= insert_delayed_ref(trans
, delayed_refs
, head_ref
, ref
);
768 kmem_cache_free(btrfs_delayed_data_ref_cachep
, full_ref
);
772 * add a delayed tree ref. This does all of the accounting required
773 * to make sure the delayed ref is eventually processed before this
774 * transaction commits.
776 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
777 struct btrfs_trans_handle
*trans
,
778 u64 bytenr
, u64 num_bytes
, u64 parent
,
779 u64 ref_root
, int level
, int action
,
780 struct btrfs_delayed_extent_op
*extent_op
,
781 int *old_ref_mod
, int *new_ref_mod
)
783 struct btrfs_delayed_tree_ref
*ref
;
784 struct btrfs_delayed_ref_head
*head_ref
;
785 struct btrfs_delayed_ref_root
*delayed_refs
;
786 struct btrfs_qgroup_extent_record
*record
= NULL
;
787 int qrecord_inserted
;
789 BUG_ON(extent_op
&& extent_op
->is_data
);
790 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
794 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
798 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
799 is_fstree(ref_root
)) {
800 record
= kmalloc(sizeof(*record
), GFP_NOFS
);
805 head_ref
->extent_op
= extent_op
;
807 delayed_refs
= &trans
->transaction
->delayed_refs
;
808 spin_lock(&delayed_refs
->lock
);
811 * insert both the head node and the new ref without dropping
814 head_ref
= add_delayed_ref_head(fs_info
, trans
, head_ref
, record
,
815 bytenr
, num_bytes
, 0, 0, action
, 0,
816 &qrecord_inserted
, old_ref_mod
,
819 add_delayed_tree_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
820 num_bytes
, parent
, ref_root
, level
, action
);
821 spin_unlock(&delayed_refs
->lock
);
823 if (qrecord_inserted
)
824 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
828 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
830 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
836 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
838 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
839 struct btrfs_trans_handle
*trans
,
840 u64 bytenr
, u64 num_bytes
,
841 u64 parent
, u64 ref_root
,
842 u64 owner
, u64 offset
, u64 reserved
, int action
,
843 int *old_ref_mod
, int *new_ref_mod
)
845 struct btrfs_delayed_data_ref
*ref
;
846 struct btrfs_delayed_ref_head
*head_ref
;
847 struct btrfs_delayed_ref_root
*delayed_refs
;
848 struct btrfs_qgroup_extent_record
*record
= NULL
;
849 int qrecord_inserted
;
851 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
855 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
857 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
861 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
862 is_fstree(ref_root
)) {
863 record
= kmalloc(sizeof(*record
), GFP_NOFS
);
865 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
866 kmem_cache_free(btrfs_delayed_ref_head_cachep
,
872 head_ref
->extent_op
= NULL
;
874 delayed_refs
= &trans
->transaction
->delayed_refs
;
875 spin_lock(&delayed_refs
->lock
);
878 * insert both the head node and the new ref without dropping
881 head_ref
= add_delayed_ref_head(fs_info
, trans
, head_ref
, record
,
882 bytenr
, num_bytes
, ref_root
, reserved
,
883 action
, 1, &qrecord_inserted
,
884 old_ref_mod
, new_ref_mod
);
886 add_delayed_data_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
887 num_bytes
, parent
, ref_root
, owner
, offset
,
889 spin_unlock(&delayed_refs
->lock
);
891 if (qrecord_inserted
)
892 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
896 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
897 struct btrfs_trans_handle
*trans
,
898 u64 bytenr
, u64 num_bytes
,
899 struct btrfs_delayed_extent_op
*extent_op
)
901 struct btrfs_delayed_ref_head
*head_ref
;
902 struct btrfs_delayed_ref_root
*delayed_refs
;
904 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
908 head_ref
->extent_op
= extent_op
;
910 delayed_refs
= &trans
->transaction
->delayed_refs
;
911 spin_lock(&delayed_refs
->lock
);
913 add_delayed_ref_head(fs_info
, trans
, head_ref
, NULL
, bytenr
,
914 num_bytes
, 0, 0, BTRFS_UPDATE_DELAYED_HEAD
,
915 extent_op
->is_data
, NULL
, NULL
, NULL
);
917 spin_unlock(&delayed_refs
->lock
);
922 * this does a simple search for the head node for a given extent.
923 * It must be called with the delayed ref spinlock held, and it returns
924 * the head node if any where found, or NULL if not.
926 struct btrfs_delayed_ref_head
*
927 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
, u64 bytenr
)
929 return find_ref_head(&delayed_refs
->href_root
, bytenr
, 0);
932 void btrfs_delayed_ref_exit(void)
934 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
935 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
936 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
937 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
940 int btrfs_delayed_ref_init(void)
942 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
943 "btrfs_delayed_ref_head",
944 sizeof(struct btrfs_delayed_ref_head
), 0,
945 SLAB_MEM_SPREAD
, NULL
);
946 if (!btrfs_delayed_ref_head_cachep
)
949 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
950 "btrfs_delayed_tree_ref",
951 sizeof(struct btrfs_delayed_tree_ref
), 0,
952 SLAB_MEM_SPREAD
, NULL
);
953 if (!btrfs_delayed_tree_ref_cachep
)
956 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
957 "btrfs_delayed_data_ref",
958 sizeof(struct btrfs_delayed_data_ref
), 0,
959 SLAB_MEM_SPREAD
, NULL
);
960 if (!btrfs_delayed_data_ref_cachep
)
963 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
964 "btrfs_delayed_extent_op",
965 sizeof(struct btrfs_delayed_extent_op
), 0,
966 SLAB_MEM_SPREAD
, NULL
);
967 if (!btrfs_delayed_extent_op_cachep
)
972 btrfs_delayed_ref_exit();