2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
28 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
29 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
30 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
41 * compare two delayed tree backrefs with same bytenr and type
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
44 struct btrfs_delayed_tree_ref
*ref1
, int type
)
46 if (type
== BTRFS_TREE_BLOCK_REF_KEY
) {
47 if (ref1
->root
< ref2
->root
)
49 if (ref1
->root
> ref2
->root
)
52 if (ref1
->parent
< ref2
->parent
)
54 if (ref1
->parent
> ref2
->parent
)
61 * compare two delayed data backrefs with same bytenr and type
63 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
64 struct btrfs_delayed_data_ref
*ref1
)
66 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
67 if (ref1
->root
< ref2
->root
)
69 if (ref1
->root
> ref2
->root
)
71 if (ref1
->objectid
< ref2
->objectid
)
73 if (ref1
->objectid
> ref2
->objectid
)
75 if (ref1
->offset
< ref2
->offset
)
77 if (ref1
->offset
> ref2
->offset
)
80 if (ref1
->parent
< ref2
->parent
)
82 if (ref1
->parent
> ref2
->parent
)
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head
*htree_insert(struct rb_root
*root
,
92 struct rb_node
**p
= &root
->rb_node
;
93 struct rb_node
*parent_node
= NULL
;
94 struct btrfs_delayed_ref_head
*entry
;
95 struct btrfs_delayed_ref_head
*ins
;
98 ins
= rb_entry(node
, struct btrfs_delayed_ref_head
, href_node
);
99 bytenr
= ins
->node
.bytenr
;
102 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_head
,
105 if (bytenr
< entry
->node
.bytenr
)
107 else if (bytenr
> entry
->node
.bytenr
)
113 rb_link_node(node
, parent_node
, p
);
114 rb_insert_color(node
, root
);
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
124 static struct btrfs_delayed_ref_head
*
125 find_ref_head(struct rb_root
*root
, u64 bytenr
,
129 struct btrfs_delayed_ref_head
*entry
;
134 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
136 if (bytenr
< entry
->node
.bytenr
)
138 else if (bytenr
> entry
->node
.bytenr
)
143 if (entry
&& return_bigger
) {
144 if (bytenr
> entry
->node
.bytenr
) {
145 n
= rb_next(&entry
->href_node
);
148 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
,
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
158 struct btrfs_delayed_ref_head
*head
)
160 struct btrfs_delayed_ref_root
*delayed_refs
;
162 delayed_refs
= &trans
->transaction
->delayed_refs
;
163 assert_spin_locked(&delayed_refs
->lock
);
164 if (mutex_trylock(&head
->mutex
))
167 atomic_inc(&head
->node
.refs
);
168 spin_unlock(&delayed_refs
->lock
);
170 mutex_lock(&head
->mutex
);
171 spin_lock(&delayed_refs
->lock
);
172 if (!head
->node
.in_tree
) {
173 mutex_unlock(&head
->mutex
);
174 btrfs_put_delayed_ref(&head
->node
);
177 btrfs_put_delayed_ref(&head
->node
);
181 static inline void drop_delayed_ref(struct btrfs_trans_handle
*trans
,
182 struct btrfs_delayed_ref_root
*delayed_refs
,
183 struct btrfs_delayed_ref_head
*head
,
184 struct btrfs_delayed_ref_node
*ref
)
186 if (btrfs_delayed_ref_is_head(ref
)) {
187 head
= btrfs_delayed_node_to_head(ref
);
188 rb_erase(&head
->href_node
, &delayed_refs
->href_root
);
190 assert_spin_locked(&head
->lock
);
191 list_del(&ref
->list
);
194 btrfs_put_delayed_ref(ref
);
195 atomic_dec(&delayed_refs
->num_entries
);
196 if (trans
->delayed_ref_updates
)
197 trans
->delayed_ref_updates
--;
200 static bool merge_ref(struct btrfs_trans_handle
*trans
,
201 struct btrfs_delayed_ref_root
*delayed_refs
,
202 struct btrfs_delayed_ref_head
*head
,
203 struct btrfs_delayed_ref_node
*ref
,
206 struct btrfs_delayed_ref_node
*next
;
209 next
= list_first_entry(&head
->ref_list
, struct btrfs_delayed_ref_node
,
211 while (!done
&& &next
->list
!= &head
->ref_list
) {
213 struct btrfs_delayed_ref_node
*next2
;
215 next2
= list_next_entry(next
, list
);
220 if (seq
&& next
->seq
>= seq
)
223 if (next
->type
!= ref
->type
)
226 if ((ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
227 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) &&
228 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref
),
229 btrfs_delayed_node_to_tree_ref(next
),
232 if ((ref
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
233 ref
->type
== BTRFS_SHARED_DATA_REF_KEY
) &&
234 comp_data_refs(btrfs_delayed_node_to_data_ref(ref
),
235 btrfs_delayed_node_to_data_ref(next
)))
238 if (ref
->action
== next
->action
) {
241 if (ref
->ref_mod
< next
->ref_mod
) {
245 mod
= -next
->ref_mod
;
248 drop_delayed_ref(trans
, delayed_refs
, head
, next
);
250 if (ref
->ref_mod
== 0) {
251 drop_delayed_ref(trans
, delayed_refs
, head
, ref
);
255 * Can't have multiples of the same ref on a tree block.
257 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
258 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
267 void btrfs_merge_delayed_refs(struct btrfs_trans_handle
*trans
,
268 struct btrfs_fs_info
*fs_info
,
269 struct btrfs_delayed_ref_root
*delayed_refs
,
270 struct btrfs_delayed_ref_head
*head
)
272 struct btrfs_delayed_ref_node
*ref
;
275 assert_spin_locked(&head
->lock
);
277 if (list_empty(&head
->ref_list
))
280 /* We don't have too many refs to merge for data. */
284 spin_lock(&fs_info
->tree_mod_seq_lock
);
285 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
286 struct seq_list
*elem
;
288 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
289 struct seq_list
, list
);
292 spin_unlock(&fs_info
->tree_mod_seq_lock
);
294 ref
= list_first_entry(&head
->ref_list
, struct btrfs_delayed_ref_node
,
296 while (&ref
->list
!= &head
->ref_list
) {
297 if (seq
&& ref
->seq
>= seq
)
300 if (merge_ref(trans
, delayed_refs
, head
, ref
, seq
)) {
301 if (list_empty(&head
->ref_list
))
303 ref
= list_first_entry(&head
->ref_list
,
304 struct btrfs_delayed_ref_node
,
309 ref
= list_next_entry(ref
, list
);
313 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
314 struct btrfs_delayed_ref_root
*delayed_refs
,
317 struct seq_list
*elem
;
320 spin_lock(&fs_info
->tree_mod_seq_lock
);
321 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
322 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
323 struct seq_list
, list
);
324 if (seq
>= elem
->seq
) {
326 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
327 (u32
)(seq
>> 32), (u32
)seq
,
328 (u32
)(elem
->seq
>> 32), (u32
)elem
->seq
,
334 spin_unlock(&fs_info
->tree_mod_seq_lock
);
338 struct btrfs_delayed_ref_head
*
339 btrfs_select_ref_head(struct btrfs_trans_handle
*trans
)
341 struct btrfs_delayed_ref_root
*delayed_refs
;
342 struct btrfs_delayed_ref_head
*head
;
346 delayed_refs
= &trans
->transaction
->delayed_refs
;
349 start
= delayed_refs
->run_delayed_start
;
350 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
351 if (!head
&& !loop
) {
352 delayed_refs
->run_delayed_start
= 0;
355 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
358 } else if (!head
&& loop
) {
362 while (head
->processing
) {
363 struct rb_node
*node
;
365 node
= rb_next(&head
->href_node
);
369 delayed_refs
->run_delayed_start
= 0;
374 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
378 head
->processing
= 1;
379 WARN_ON(delayed_refs
->num_heads_ready
== 0);
380 delayed_refs
->num_heads_ready
--;
381 delayed_refs
->run_delayed_start
= head
->node
.bytenr
+
382 head
->node
.num_bytes
;
387 * Helper to insert the ref_node to the tail or merge with tail.
389 * Return 0 for insert.
390 * Return >0 for merge.
393 add_delayed_ref_tail_merge(struct btrfs_trans_handle
*trans
,
394 struct btrfs_delayed_ref_root
*root
,
395 struct btrfs_delayed_ref_head
*href
,
396 struct btrfs_delayed_ref_node
*ref
)
398 struct btrfs_delayed_ref_node
*exist
;
402 spin_lock(&href
->lock
);
403 /* Check whether we can merge the tail node with ref */
404 if (list_empty(&href
->ref_list
))
406 exist
= list_entry(href
->ref_list
.prev
, struct btrfs_delayed_ref_node
,
408 /* No need to compare bytenr nor is_head */
409 if (exist
->type
!= ref
->type
|| exist
->seq
!= ref
->seq
)
412 if ((exist
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
413 exist
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) &&
414 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist
),
415 btrfs_delayed_node_to_tree_ref(ref
),
418 if ((exist
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
419 exist
->type
== BTRFS_SHARED_DATA_REF_KEY
) &&
420 comp_data_refs(btrfs_delayed_node_to_data_ref(exist
),
421 btrfs_delayed_node_to_data_ref(ref
)))
424 /* Now we are sure we can merge */
426 if (exist
->action
== ref
->action
) {
429 /* Need to change action */
430 if (exist
->ref_mod
< ref
->ref_mod
) {
431 exist
->action
= ref
->action
;
432 mod
= -exist
->ref_mod
;
433 exist
->ref_mod
= ref
->ref_mod
;
437 exist
->ref_mod
+= mod
;
439 /* remove existing tail if its ref_mod is zero */
440 if (exist
->ref_mod
== 0)
441 drop_delayed_ref(trans
, root
, href
, exist
);
442 spin_unlock(&href
->lock
);
446 list_add_tail(&ref
->list
, &href
->ref_list
);
447 atomic_inc(&root
->num_entries
);
448 trans
->delayed_ref_updates
++;
449 spin_unlock(&href
->lock
);
454 * helper function to update the accounting in the head ref
455 * existing and update must have the same bytenr
458 update_existing_head_ref(struct btrfs_delayed_ref_root
*delayed_refs
,
459 struct btrfs_delayed_ref_node
*existing
,
460 struct btrfs_delayed_ref_node
*update
)
462 struct btrfs_delayed_ref_head
*existing_ref
;
463 struct btrfs_delayed_ref_head
*ref
;
466 existing_ref
= btrfs_delayed_node_to_head(existing
);
467 ref
= btrfs_delayed_node_to_head(update
);
468 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
470 spin_lock(&existing_ref
->lock
);
471 if (ref
->must_insert_reserved
) {
472 /* if the extent was freed and then
473 * reallocated before the delayed ref
474 * entries were processed, we can end up
475 * with an existing head ref without
476 * the must_insert_reserved flag set.
479 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
482 * update the num_bytes so we make sure the accounting
485 existing
->num_bytes
= update
->num_bytes
;
489 if (ref
->extent_op
) {
490 if (!existing_ref
->extent_op
) {
491 existing_ref
->extent_op
= ref
->extent_op
;
493 if (ref
->extent_op
->update_key
) {
494 memcpy(&existing_ref
->extent_op
->key
,
495 &ref
->extent_op
->key
,
496 sizeof(ref
->extent_op
->key
));
497 existing_ref
->extent_op
->update_key
= true;
499 if (ref
->extent_op
->update_flags
) {
500 existing_ref
->extent_op
->flags_to_set
|=
501 ref
->extent_op
->flags_to_set
;
502 existing_ref
->extent_op
->update_flags
= true;
504 btrfs_free_delayed_extent_op(ref
->extent_op
);
508 * update the reference mod on the head to reflect this new operation,
509 * only need the lock for this case cause we could be processing it
510 * currently, for refs we just added we know we're a-ok.
512 old_ref_mod
= existing_ref
->total_ref_mod
;
513 existing
->ref_mod
+= update
->ref_mod
;
514 existing_ref
->total_ref_mod
+= update
->ref_mod
;
517 * If we are going to from a positive ref mod to a negative or vice
518 * versa we need to make sure to adjust pending_csums accordingly.
520 if (existing_ref
->is_data
) {
521 if (existing_ref
->total_ref_mod
>= 0 && old_ref_mod
< 0)
522 delayed_refs
->pending_csums
-= existing
->num_bytes
;
523 if (existing_ref
->total_ref_mod
< 0 && old_ref_mod
>= 0)
524 delayed_refs
->pending_csums
+= existing
->num_bytes
;
526 spin_unlock(&existing_ref
->lock
);
530 * helper function to actually insert a head node into the rbtree.
531 * this does all the dirty work in terms of maintaining the correct
532 * overall modification count.
534 static noinline
struct btrfs_delayed_ref_head
*
535 add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
536 struct btrfs_trans_handle
*trans
,
537 struct btrfs_delayed_ref_node
*ref
,
538 struct btrfs_qgroup_extent_record
*qrecord
,
539 u64 bytenr
, u64 num_bytes
, u64 ref_root
, u64 reserved
,
540 int action
, int is_data
)
542 struct btrfs_delayed_ref_head
*existing
;
543 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
544 struct btrfs_delayed_ref_root
*delayed_refs
;
546 int must_insert_reserved
= 0;
548 /* If reserved is provided, it must be a data extent. */
549 BUG_ON(!is_data
&& reserved
);
552 * the head node stores the sum of all the mods, so dropping a ref
553 * should drop the sum in the head node by one.
555 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
557 else if (action
== BTRFS_DROP_DELAYED_REF
)
561 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562 * the reserved accounting when the extent is finally added, or
563 * if a later modification deletes the delayed ref without ever
564 * inserting the extent into the extent allocation tree.
565 * ref->must_insert_reserved is the flag used to record
566 * that accounting mods are required.
568 * Once we record must_insert_reserved, switch the action to
569 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
571 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
572 must_insert_reserved
= 1;
574 must_insert_reserved
= 0;
576 delayed_refs
= &trans
->transaction
->delayed_refs
;
578 /* first set the basic ref node struct up */
579 atomic_set(&ref
->refs
, 1);
580 ref
->bytenr
= bytenr
;
581 ref
->num_bytes
= num_bytes
;
582 ref
->ref_mod
= count_mod
;
589 head_ref
= btrfs_delayed_node_to_head(ref
);
590 head_ref
->must_insert_reserved
= must_insert_reserved
;
591 head_ref
->is_data
= is_data
;
592 INIT_LIST_HEAD(&head_ref
->ref_list
);
593 head_ref
->processing
= 0;
594 head_ref
->total_ref_mod
= count_mod
;
595 head_ref
->qgroup_reserved
= 0;
596 head_ref
->qgroup_ref_root
= 0;
598 /* Record qgroup extent info if provided */
600 if (ref_root
&& reserved
) {
601 head_ref
->qgroup_ref_root
= ref_root
;
602 head_ref
->qgroup_reserved
= reserved
;
605 qrecord
->bytenr
= bytenr
;
606 qrecord
->num_bytes
= num_bytes
;
607 qrecord
->old_roots
= NULL
;
609 if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info
,
610 delayed_refs
, qrecord
))
614 spin_lock_init(&head_ref
->lock
);
615 mutex_init(&head_ref
->mutex
);
617 trace_add_delayed_ref_head(fs_info
, ref
, head_ref
, action
);
619 existing
= htree_insert(&delayed_refs
->href_root
,
620 &head_ref
->href_node
);
622 WARN_ON(ref_root
&& reserved
&& existing
->qgroup_ref_root
623 && existing
->qgroup_reserved
);
624 update_existing_head_ref(delayed_refs
, &existing
->node
, ref
);
626 * we've updated the existing ref, free the newly
629 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
632 if (is_data
&& count_mod
< 0)
633 delayed_refs
->pending_csums
+= num_bytes
;
634 delayed_refs
->num_heads
++;
635 delayed_refs
->num_heads_ready
++;
636 atomic_inc(&delayed_refs
->num_entries
);
637 trans
->delayed_ref_updates
++;
643 * helper to insert a delayed tree ref into the rbtree.
646 add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
647 struct btrfs_trans_handle
*trans
,
648 struct btrfs_delayed_ref_head
*head_ref
,
649 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
650 u64 num_bytes
, u64 parent
, u64 ref_root
, int level
,
653 struct btrfs_delayed_tree_ref
*full_ref
;
654 struct btrfs_delayed_ref_root
*delayed_refs
;
658 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
659 action
= BTRFS_ADD_DELAYED_REF
;
661 if (is_fstree(ref_root
))
662 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
663 delayed_refs
= &trans
->transaction
->delayed_refs
;
665 /* first set the basic ref node struct up */
666 atomic_set(&ref
->refs
, 1);
667 ref
->bytenr
= bytenr
;
668 ref
->num_bytes
= num_bytes
;
670 ref
->action
= action
;
675 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
676 full_ref
->parent
= parent
;
677 full_ref
->root
= ref_root
;
679 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
681 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
682 full_ref
->level
= level
;
684 trace_add_delayed_tree_ref(fs_info
, ref
, full_ref
, action
);
686 ret
= add_delayed_ref_tail_merge(trans
, delayed_refs
, head_ref
, ref
);
689 * XXX: memory should be freed at the same level allocated.
690 * But bad practice is anywhere... Follow it now. Need cleanup.
693 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, full_ref
);
697 * helper to insert a delayed data ref into the rbtree.
700 add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
701 struct btrfs_trans_handle
*trans
,
702 struct btrfs_delayed_ref_head
*head_ref
,
703 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
704 u64 num_bytes
, u64 parent
, u64 ref_root
, u64 owner
,
705 u64 offset
, int action
)
707 struct btrfs_delayed_data_ref
*full_ref
;
708 struct btrfs_delayed_ref_root
*delayed_refs
;
712 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
713 action
= BTRFS_ADD_DELAYED_REF
;
715 delayed_refs
= &trans
->transaction
->delayed_refs
;
717 if (is_fstree(ref_root
))
718 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
720 /* first set the basic ref node struct up */
721 atomic_set(&ref
->refs
, 1);
722 ref
->bytenr
= bytenr
;
723 ref
->num_bytes
= num_bytes
;
725 ref
->action
= action
;
730 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
731 full_ref
->parent
= parent
;
732 full_ref
->root
= ref_root
;
734 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
736 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
738 full_ref
->objectid
= owner
;
739 full_ref
->offset
= offset
;
741 trace_add_delayed_data_ref(fs_info
, ref
, full_ref
, action
);
743 ret
= add_delayed_ref_tail_merge(trans
, delayed_refs
, head_ref
, ref
);
746 kmem_cache_free(btrfs_delayed_data_ref_cachep
, full_ref
);
750 * add a delayed tree ref. This does all of the accounting required
751 * to make sure the delayed ref is eventually processed before this
752 * transaction commits.
754 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
755 struct btrfs_trans_handle
*trans
,
756 u64 bytenr
, u64 num_bytes
, u64 parent
,
757 u64 ref_root
, int level
, int action
,
758 struct btrfs_delayed_extent_op
*extent_op
)
760 struct btrfs_delayed_tree_ref
*ref
;
761 struct btrfs_delayed_ref_head
*head_ref
;
762 struct btrfs_delayed_ref_root
*delayed_refs
;
763 struct btrfs_qgroup_extent_record
*record
= NULL
;
765 BUG_ON(extent_op
&& extent_op
->is_data
);
766 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
770 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
774 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
775 is_fstree(ref_root
)) {
776 record
= kmalloc(sizeof(*record
), GFP_NOFS
);
781 head_ref
->extent_op
= extent_op
;
783 delayed_refs
= &trans
->transaction
->delayed_refs
;
784 spin_lock(&delayed_refs
->lock
);
787 * insert both the head node and the new ref without dropping
790 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, record
,
791 bytenr
, num_bytes
, 0, 0, action
, 0);
793 add_delayed_tree_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
794 num_bytes
, parent
, ref_root
, level
, action
);
795 spin_unlock(&delayed_refs
->lock
);
800 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
802 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
810 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
811 struct btrfs_trans_handle
*trans
,
812 u64 bytenr
, u64 num_bytes
,
813 u64 parent
, u64 ref_root
,
814 u64 owner
, u64 offset
, u64 reserved
, int action
,
815 struct btrfs_delayed_extent_op
*extent_op
)
817 struct btrfs_delayed_data_ref
*ref
;
818 struct btrfs_delayed_ref_head
*head_ref
;
819 struct btrfs_delayed_ref_root
*delayed_refs
;
820 struct btrfs_qgroup_extent_record
*record
= NULL
;
822 BUG_ON(extent_op
&& !extent_op
->is_data
);
823 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
827 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
829 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
833 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
834 is_fstree(ref_root
)) {
835 record
= kmalloc(sizeof(*record
), GFP_NOFS
);
837 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
838 kmem_cache_free(btrfs_delayed_ref_head_cachep
,
844 head_ref
->extent_op
= extent_op
;
846 delayed_refs
= &trans
->transaction
->delayed_refs
;
847 spin_lock(&delayed_refs
->lock
);
850 * insert both the head node and the new ref without dropping
853 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, record
,
854 bytenr
, num_bytes
, ref_root
, reserved
,
857 add_delayed_data_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
858 num_bytes
, parent
, ref_root
, owner
, offset
,
860 spin_unlock(&delayed_refs
->lock
);
865 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
866 struct btrfs_trans_handle
*trans
,
867 u64 bytenr
, u64 num_bytes
,
868 struct btrfs_delayed_extent_op
*extent_op
)
870 struct btrfs_delayed_ref_head
*head_ref
;
871 struct btrfs_delayed_ref_root
*delayed_refs
;
873 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
877 head_ref
->extent_op
= extent_op
;
879 delayed_refs
= &trans
->transaction
->delayed_refs
;
880 spin_lock(&delayed_refs
->lock
);
882 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, NULL
, bytenr
,
883 num_bytes
, 0, 0, BTRFS_UPDATE_DELAYED_HEAD
,
886 spin_unlock(&delayed_refs
->lock
);
891 * this does a simple search for the head node for a given extent.
892 * It must be called with the delayed ref spinlock held, and it returns
893 * the head node if any where found, or NULL if not.
895 struct btrfs_delayed_ref_head
*
896 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
898 struct btrfs_delayed_ref_root
*delayed_refs
;
900 delayed_refs
= &trans
->transaction
->delayed_refs
;
901 return find_ref_head(&delayed_refs
->href_root
, bytenr
, 0);
904 void btrfs_delayed_ref_exit(void)
906 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
907 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
908 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
909 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
912 int btrfs_delayed_ref_init(void)
914 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
915 "btrfs_delayed_ref_head",
916 sizeof(struct btrfs_delayed_ref_head
), 0,
917 SLAB_MEM_SPREAD
, NULL
);
918 if (!btrfs_delayed_ref_head_cachep
)
921 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
922 "btrfs_delayed_tree_ref",
923 sizeof(struct btrfs_delayed_tree_ref
), 0,
924 SLAB_MEM_SPREAD
, NULL
);
925 if (!btrfs_delayed_tree_ref_cachep
)
928 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
929 "btrfs_delayed_data_ref",
930 sizeof(struct btrfs_delayed_data_ref
), 0,
931 SLAB_MEM_SPREAD
, NULL
);
932 if (!btrfs_delayed_data_ref_cachep
)
935 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
936 "btrfs_delayed_extent_op",
937 sizeof(struct btrfs_delayed_extent_op
), 0,
938 SLAB_MEM_SPREAD
, NULL
);
939 if (!btrfs_delayed_extent_op_cachep
)
944 btrfs_delayed_ref_exit();