Linux 3.12.5
[linux/fpc-iii.git] / fs / btrfs / delayed-ref.c
blobe4d467be2dd44d131977d64c3029af3fc9d99ce3
1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
40 * compare two delayed tree backrefs with same bytenr and type
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43 struct btrfs_delayed_tree_ref *ref1, int type)
45 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46 if (ref1->root < ref2->root)
47 return -1;
48 if (ref1->root > ref2->root)
49 return 1;
50 } else {
51 if (ref1->parent < ref2->parent)
52 return -1;
53 if (ref1->parent > ref2->parent)
54 return 1;
56 return 0;
60 * compare two delayed data backrefs with same bytenr and type
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63 struct btrfs_delayed_data_ref *ref1)
65 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66 if (ref1->root < ref2->root)
67 return -1;
68 if (ref1->root > ref2->root)
69 return 1;
70 if (ref1->objectid < ref2->objectid)
71 return -1;
72 if (ref1->objectid > ref2->objectid)
73 return 1;
74 if (ref1->offset < ref2->offset)
75 return -1;
76 if (ref1->offset > ref2->offset)
77 return 1;
78 } else {
79 if (ref1->parent < ref2->parent)
80 return -1;
81 if (ref1->parent > ref2->parent)
82 return 1;
84 return 0;
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 struct btrfs_delayed_ref_node *ref1,
93 bool compare_seq)
95 if (ref1->bytenr < ref2->bytenr)
96 return -1;
97 if (ref1->bytenr > ref2->bytenr)
98 return 1;
99 if (ref1->is_head && ref2->is_head)
100 return 0;
101 if (ref2->is_head)
102 return -1;
103 if (ref1->is_head)
104 return 1;
105 if (ref1->type < ref2->type)
106 return -1;
107 if (ref1->type > ref2->type)
108 return 1;
109 /* merging of sequenced refs is not allowed */
110 if (compare_seq) {
111 if (ref1->seq < ref2->seq)
112 return -1;
113 if (ref1->seq > ref2->seq)
114 return 1;
116 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119 btrfs_delayed_node_to_tree_ref(ref1),
120 ref1->type);
121 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124 btrfs_delayed_node_to_data_ref(ref1));
126 BUG();
127 return 0;
131 * insert a new ref into the rbtree. This returns any existing refs
132 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133 * inserted.
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136 struct rb_node *node)
138 struct rb_node **p = &root->rb_node;
139 struct rb_node *parent_node = NULL;
140 struct btrfs_delayed_ref_node *entry;
141 struct btrfs_delayed_ref_node *ins;
142 int cmp;
144 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145 while (*p) {
146 parent_node = *p;
147 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148 rb_node);
150 cmp = comp_entry(entry, ins, 1);
151 if (cmp < 0)
152 p = &(*p)->rb_left;
153 else if (cmp > 0)
154 p = &(*p)->rb_right;
155 else
156 return entry;
159 rb_link_node(node, parent_node, p);
160 rb_insert_color(node, root);
161 return NULL;
165 * find an head entry based on bytenr. This returns the delayed ref
166 * head if it was able to find one, or NULL if nothing was in that spot.
167 * If return_bigger is given, the next bigger entry is returned if no exact
168 * match is found.
170 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
171 u64 bytenr,
172 struct btrfs_delayed_ref_node **last,
173 int return_bigger)
175 struct rb_node *n;
176 struct btrfs_delayed_ref_node *entry;
177 int cmp = 0;
179 again:
180 n = root->rb_node;
181 entry = NULL;
182 while (n) {
183 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
184 WARN_ON(!entry->in_tree);
185 if (last)
186 *last = entry;
188 if (bytenr < entry->bytenr)
189 cmp = -1;
190 else if (bytenr > entry->bytenr)
191 cmp = 1;
192 else if (!btrfs_delayed_ref_is_head(entry))
193 cmp = 1;
194 else
195 cmp = 0;
197 if (cmp < 0)
198 n = n->rb_left;
199 else if (cmp > 0)
200 n = n->rb_right;
201 else
202 return entry;
204 if (entry && return_bigger) {
205 if (cmp > 0) {
206 n = rb_next(&entry->rb_node);
207 if (!n)
208 n = rb_first(root);
209 entry = rb_entry(n, struct btrfs_delayed_ref_node,
210 rb_node);
211 bytenr = entry->bytenr;
212 return_bigger = 0;
213 goto again;
215 return entry;
217 return NULL;
220 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
221 struct btrfs_delayed_ref_head *head)
223 struct btrfs_delayed_ref_root *delayed_refs;
225 delayed_refs = &trans->transaction->delayed_refs;
226 assert_spin_locked(&delayed_refs->lock);
227 if (mutex_trylock(&head->mutex))
228 return 0;
230 atomic_inc(&head->node.refs);
231 spin_unlock(&delayed_refs->lock);
233 mutex_lock(&head->mutex);
234 spin_lock(&delayed_refs->lock);
235 if (!head->node.in_tree) {
236 mutex_unlock(&head->mutex);
237 btrfs_put_delayed_ref(&head->node);
238 return -EAGAIN;
240 btrfs_put_delayed_ref(&head->node);
241 return 0;
244 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
245 struct btrfs_delayed_ref_root *delayed_refs,
246 struct btrfs_delayed_ref_node *ref)
248 rb_erase(&ref->rb_node, &delayed_refs->root);
249 ref->in_tree = 0;
250 btrfs_put_delayed_ref(ref);
251 delayed_refs->num_entries--;
252 if (trans->delayed_ref_updates)
253 trans->delayed_ref_updates--;
256 static int merge_ref(struct btrfs_trans_handle *trans,
257 struct btrfs_delayed_ref_root *delayed_refs,
258 struct btrfs_delayed_ref_node *ref, u64 seq)
260 struct rb_node *node;
261 int merged = 0;
262 int mod = 0;
263 int done = 0;
265 node = rb_prev(&ref->rb_node);
266 while (node) {
267 struct btrfs_delayed_ref_node *next;
269 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
270 node = rb_prev(node);
271 if (next->bytenr != ref->bytenr)
272 break;
273 if (seq && next->seq >= seq)
274 break;
275 if (comp_entry(ref, next, 0))
276 continue;
278 if (ref->action == next->action) {
279 mod = next->ref_mod;
280 } else {
281 if (ref->ref_mod < next->ref_mod) {
282 struct btrfs_delayed_ref_node *tmp;
284 tmp = ref;
285 ref = next;
286 next = tmp;
287 done = 1;
289 mod = -next->ref_mod;
292 merged++;
293 drop_delayed_ref(trans, delayed_refs, next);
294 ref->ref_mod += mod;
295 if (ref->ref_mod == 0) {
296 drop_delayed_ref(trans, delayed_refs, ref);
297 break;
298 } else {
300 * You can't have multiples of the same ref on a tree
301 * block.
303 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
304 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
307 if (done)
308 break;
309 node = rb_prev(&ref->rb_node);
312 return merged;
315 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
316 struct btrfs_fs_info *fs_info,
317 struct btrfs_delayed_ref_root *delayed_refs,
318 struct btrfs_delayed_ref_head *head)
320 struct rb_node *node;
321 u64 seq = 0;
323 spin_lock(&fs_info->tree_mod_seq_lock);
324 if (!list_empty(&fs_info->tree_mod_seq_list)) {
325 struct seq_list *elem;
327 elem = list_first_entry(&fs_info->tree_mod_seq_list,
328 struct seq_list, list);
329 seq = elem->seq;
331 spin_unlock(&fs_info->tree_mod_seq_lock);
333 node = rb_prev(&head->node.rb_node);
334 while (node) {
335 struct btrfs_delayed_ref_node *ref;
337 ref = rb_entry(node, struct btrfs_delayed_ref_node,
338 rb_node);
339 if (ref->bytenr != head->node.bytenr)
340 break;
342 /* We can't merge refs that are outside of our seq count */
343 if (seq && ref->seq >= seq)
344 break;
345 if (merge_ref(trans, delayed_refs, ref, seq))
346 node = rb_prev(&head->node.rb_node);
347 else
348 node = rb_prev(node);
352 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
353 struct btrfs_delayed_ref_root *delayed_refs,
354 u64 seq)
356 struct seq_list *elem;
357 int ret = 0;
359 spin_lock(&fs_info->tree_mod_seq_lock);
360 if (!list_empty(&fs_info->tree_mod_seq_list)) {
361 elem = list_first_entry(&fs_info->tree_mod_seq_list,
362 struct seq_list, list);
363 if (seq >= elem->seq) {
364 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
365 (u32)(seq >> 32), (u32)seq,
366 (u32)(elem->seq >> 32), (u32)elem->seq,
367 delayed_refs);
368 ret = 1;
372 spin_unlock(&fs_info->tree_mod_seq_lock);
373 return ret;
376 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
377 struct list_head *cluster, u64 start)
379 int count = 0;
380 struct btrfs_delayed_ref_root *delayed_refs;
381 struct rb_node *node;
382 struct btrfs_delayed_ref_node *ref;
383 struct btrfs_delayed_ref_head *head;
385 delayed_refs = &trans->transaction->delayed_refs;
386 if (start == 0) {
387 node = rb_first(&delayed_refs->root);
388 } else {
389 ref = NULL;
390 find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
391 if (ref) {
392 node = &ref->rb_node;
393 } else
394 node = rb_first(&delayed_refs->root);
396 again:
397 while (node && count < 32) {
398 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
399 if (btrfs_delayed_ref_is_head(ref)) {
400 head = btrfs_delayed_node_to_head(ref);
401 if (list_empty(&head->cluster)) {
402 list_add_tail(&head->cluster, cluster);
403 delayed_refs->run_delayed_start =
404 head->node.bytenr;
405 count++;
407 WARN_ON(delayed_refs->num_heads_ready == 0);
408 delayed_refs->num_heads_ready--;
409 } else if (count) {
410 /* the goal of the clustering is to find extents
411 * that are likely to end up in the same extent
412 * leaf on disk. So, we don't want them spread
413 * all over the tree. Stop now if we've hit
414 * a head that was already in use
416 break;
419 node = rb_next(node);
421 if (count) {
422 return 0;
423 } else if (start) {
425 * we've gone to the end of the rbtree without finding any
426 * clusters. start from the beginning and try again
428 start = 0;
429 node = rb_first(&delayed_refs->root);
430 goto again;
432 return 1;
435 void btrfs_release_ref_cluster(struct list_head *cluster)
437 struct list_head *pos, *q;
439 list_for_each_safe(pos, q, cluster)
440 list_del_init(pos);
444 * helper function to update an extent delayed ref in the
445 * rbtree. existing and update must both have the same
446 * bytenr and parent
448 * This may free existing if the update cancels out whatever
449 * operation it was doing.
451 static noinline void
452 update_existing_ref(struct btrfs_trans_handle *trans,
453 struct btrfs_delayed_ref_root *delayed_refs,
454 struct btrfs_delayed_ref_node *existing,
455 struct btrfs_delayed_ref_node *update)
457 if (update->action != existing->action) {
459 * this is effectively undoing either an add or a
460 * drop. We decrement the ref_mod, and if it goes
461 * down to zero we just delete the entry without
462 * every changing the extent allocation tree.
464 existing->ref_mod--;
465 if (existing->ref_mod == 0)
466 drop_delayed_ref(trans, delayed_refs, existing);
467 else
468 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
469 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
470 } else {
471 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
472 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
474 * the action on the existing ref matches
475 * the action on the ref we're trying to add.
476 * Bump the ref_mod by one so the backref that
477 * is eventually added/removed has the correct
478 * reference count
480 existing->ref_mod += update->ref_mod;
485 * helper function to update the accounting in the head ref
486 * existing and update must have the same bytenr
488 static noinline void
489 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
490 struct btrfs_delayed_ref_node *update)
492 struct btrfs_delayed_ref_head *existing_ref;
493 struct btrfs_delayed_ref_head *ref;
495 existing_ref = btrfs_delayed_node_to_head(existing);
496 ref = btrfs_delayed_node_to_head(update);
497 BUG_ON(existing_ref->is_data != ref->is_data);
499 if (ref->must_insert_reserved) {
500 /* if the extent was freed and then
501 * reallocated before the delayed ref
502 * entries were processed, we can end up
503 * with an existing head ref without
504 * the must_insert_reserved flag set.
505 * Set it again here
507 existing_ref->must_insert_reserved = ref->must_insert_reserved;
510 * update the num_bytes so we make sure the accounting
511 * is done correctly
513 existing->num_bytes = update->num_bytes;
517 if (ref->extent_op) {
518 if (!existing_ref->extent_op) {
519 existing_ref->extent_op = ref->extent_op;
520 } else {
521 if (ref->extent_op->update_key) {
522 memcpy(&existing_ref->extent_op->key,
523 &ref->extent_op->key,
524 sizeof(ref->extent_op->key));
525 existing_ref->extent_op->update_key = 1;
527 if (ref->extent_op->update_flags) {
528 existing_ref->extent_op->flags_to_set |=
529 ref->extent_op->flags_to_set;
530 existing_ref->extent_op->update_flags = 1;
532 btrfs_free_delayed_extent_op(ref->extent_op);
536 * update the reference mod on the head to reflect this new operation
538 existing->ref_mod += update->ref_mod;
542 * helper function to actually insert a head node into the rbtree.
543 * this does all the dirty work in terms of maintaining the correct
544 * overall modification count.
546 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
547 struct btrfs_trans_handle *trans,
548 struct btrfs_delayed_ref_node *ref,
549 u64 bytenr, u64 num_bytes,
550 int action, int is_data)
552 struct btrfs_delayed_ref_node *existing;
553 struct btrfs_delayed_ref_head *head_ref = NULL;
554 struct btrfs_delayed_ref_root *delayed_refs;
555 int count_mod = 1;
556 int must_insert_reserved = 0;
559 * the head node stores the sum of all the mods, so dropping a ref
560 * should drop the sum in the head node by one.
562 if (action == BTRFS_UPDATE_DELAYED_HEAD)
563 count_mod = 0;
564 else if (action == BTRFS_DROP_DELAYED_REF)
565 count_mod = -1;
568 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
569 * the reserved accounting when the extent is finally added, or
570 * if a later modification deletes the delayed ref without ever
571 * inserting the extent into the extent allocation tree.
572 * ref->must_insert_reserved is the flag used to record
573 * that accounting mods are required.
575 * Once we record must_insert_reserved, switch the action to
576 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
578 if (action == BTRFS_ADD_DELAYED_EXTENT)
579 must_insert_reserved = 1;
580 else
581 must_insert_reserved = 0;
583 delayed_refs = &trans->transaction->delayed_refs;
585 /* first set the basic ref node struct up */
586 atomic_set(&ref->refs, 1);
587 ref->bytenr = bytenr;
588 ref->num_bytes = num_bytes;
589 ref->ref_mod = count_mod;
590 ref->type = 0;
591 ref->action = 0;
592 ref->is_head = 1;
593 ref->in_tree = 1;
594 ref->seq = 0;
596 head_ref = btrfs_delayed_node_to_head(ref);
597 head_ref->must_insert_reserved = must_insert_reserved;
598 head_ref->is_data = is_data;
600 INIT_LIST_HEAD(&head_ref->cluster);
601 mutex_init(&head_ref->mutex);
603 trace_add_delayed_ref_head(ref, head_ref, action);
605 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
607 if (existing) {
608 update_existing_head_ref(existing, ref);
610 * we've updated the existing ref, free the newly
611 * allocated ref
613 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
614 } else {
615 delayed_refs->num_heads++;
616 delayed_refs->num_heads_ready++;
617 delayed_refs->num_entries++;
618 trans->delayed_ref_updates++;
623 * helper to insert a delayed tree ref into the rbtree.
625 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
626 struct btrfs_trans_handle *trans,
627 struct btrfs_delayed_ref_node *ref,
628 u64 bytenr, u64 num_bytes, u64 parent,
629 u64 ref_root, int level, int action,
630 int for_cow)
632 struct btrfs_delayed_ref_node *existing;
633 struct btrfs_delayed_tree_ref *full_ref;
634 struct btrfs_delayed_ref_root *delayed_refs;
635 u64 seq = 0;
637 if (action == BTRFS_ADD_DELAYED_EXTENT)
638 action = BTRFS_ADD_DELAYED_REF;
640 delayed_refs = &trans->transaction->delayed_refs;
642 /* first set the basic ref node struct up */
643 atomic_set(&ref->refs, 1);
644 ref->bytenr = bytenr;
645 ref->num_bytes = num_bytes;
646 ref->ref_mod = 1;
647 ref->action = action;
648 ref->is_head = 0;
649 ref->in_tree = 1;
651 if (need_ref_seq(for_cow, ref_root))
652 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
653 ref->seq = seq;
655 full_ref = btrfs_delayed_node_to_tree_ref(ref);
656 full_ref->parent = parent;
657 full_ref->root = ref_root;
658 if (parent)
659 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
660 else
661 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
662 full_ref->level = level;
664 trace_add_delayed_tree_ref(ref, full_ref, action);
666 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
668 if (existing) {
669 update_existing_ref(trans, delayed_refs, existing, ref);
671 * we've updated the existing ref, free the newly
672 * allocated ref
674 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
675 } else {
676 delayed_refs->num_entries++;
677 trans->delayed_ref_updates++;
682 * helper to insert a delayed data ref into the rbtree.
684 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
685 struct btrfs_trans_handle *trans,
686 struct btrfs_delayed_ref_node *ref,
687 u64 bytenr, u64 num_bytes, u64 parent,
688 u64 ref_root, u64 owner, u64 offset,
689 int action, int for_cow)
691 struct btrfs_delayed_ref_node *existing;
692 struct btrfs_delayed_data_ref *full_ref;
693 struct btrfs_delayed_ref_root *delayed_refs;
694 u64 seq = 0;
696 if (action == BTRFS_ADD_DELAYED_EXTENT)
697 action = BTRFS_ADD_DELAYED_REF;
699 delayed_refs = &trans->transaction->delayed_refs;
701 /* first set the basic ref node struct up */
702 atomic_set(&ref->refs, 1);
703 ref->bytenr = bytenr;
704 ref->num_bytes = num_bytes;
705 ref->ref_mod = 1;
706 ref->action = action;
707 ref->is_head = 0;
708 ref->in_tree = 1;
710 if (need_ref_seq(for_cow, ref_root))
711 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
712 ref->seq = seq;
714 full_ref = btrfs_delayed_node_to_data_ref(ref);
715 full_ref->parent = parent;
716 full_ref->root = ref_root;
717 if (parent)
718 ref->type = BTRFS_SHARED_DATA_REF_KEY;
719 else
720 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
722 full_ref->objectid = owner;
723 full_ref->offset = offset;
725 trace_add_delayed_data_ref(ref, full_ref, action);
727 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
729 if (existing) {
730 update_existing_ref(trans, delayed_refs, existing, ref);
732 * we've updated the existing ref, free the newly
733 * allocated ref
735 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
736 } else {
737 delayed_refs->num_entries++;
738 trans->delayed_ref_updates++;
743 * add a delayed tree ref. This does all of the accounting required
744 * to make sure the delayed ref is eventually processed before this
745 * transaction commits.
747 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
748 struct btrfs_trans_handle *trans,
749 u64 bytenr, u64 num_bytes, u64 parent,
750 u64 ref_root, int level, int action,
751 struct btrfs_delayed_extent_op *extent_op,
752 int for_cow)
754 struct btrfs_delayed_tree_ref *ref;
755 struct btrfs_delayed_ref_head *head_ref;
756 struct btrfs_delayed_ref_root *delayed_refs;
758 BUG_ON(extent_op && extent_op->is_data);
759 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
760 if (!ref)
761 return -ENOMEM;
763 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
764 if (!head_ref) {
765 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
766 return -ENOMEM;
769 head_ref->extent_op = extent_op;
771 delayed_refs = &trans->transaction->delayed_refs;
772 spin_lock(&delayed_refs->lock);
775 * insert both the head node and the new ref without dropping
776 * the spin lock
778 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
779 num_bytes, action, 0);
781 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
782 num_bytes, parent, ref_root, level, action,
783 for_cow);
784 spin_unlock(&delayed_refs->lock);
785 if (need_ref_seq(for_cow, ref_root))
786 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
788 return 0;
792 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
794 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
795 struct btrfs_trans_handle *trans,
796 u64 bytenr, u64 num_bytes,
797 u64 parent, u64 ref_root,
798 u64 owner, u64 offset, int action,
799 struct btrfs_delayed_extent_op *extent_op,
800 int for_cow)
802 struct btrfs_delayed_data_ref *ref;
803 struct btrfs_delayed_ref_head *head_ref;
804 struct btrfs_delayed_ref_root *delayed_refs;
806 BUG_ON(extent_op && !extent_op->is_data);
807 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
808 if (!ref)
809 return -ENOMEM;
811 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
812 if (!head_ref) {
813 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
814 return -ENOMEM;
817 head_ref->extent_op = extent_op;
819 delayed_refs = &trans->transaction->delayed_refs;
820 spin_lock(&delayed_refs->lock);
823 * insert both the head node and the new ref without dropping
824 * the spin lock
826 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
827 num_bytes, action, 1);
829 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
830 num_bytes, parent, ref_root, owner, offset,
831 action, for_cow);
832 spin_unlock(&delayed_refs->lock);
833 if (need_ref_seq(for_cow, ref_root))
834 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
836 return 0;
839 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
840 struct btrfs_trans_handle *trans,
841 u64 bytenr, u64 num_bytes,
842 struct btrfs_delayed_extent_op *extent_op)
844 struct btrfs_delayed_ref_head *head_ref;
845 struct btrfs_delayed_ref_root *delayed_refs;
847 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
848 if (!head_ref)
849 return -ENOMEM;
851 head_ref->extent_op = extent_op;
853 delayed_refs = &trans->transaction->delayed_refs;
854 spin_lock(&delayed_refs->lock);
856 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
857 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
858 extent_op->is_data);
860 spin_unlock(&delayed_refs->lock);
861 return 0;
865 * this does a simple search for the head node for a given extent.
866 * It must be called with the delayed ref spinlock held, and it returns
867 * the head node if any where found, or NULL if not.
869 struct btrfs_delayed_ref_head *
870 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
872 struct btrfs_delayed_ref_node *ref;
873 struct btrfs_delayed_ref_root *delayed_refs;
875 delayed_refs = &trans->transaction->delayed_refs;
876 ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
877 if (ref)
878 return btrfs_delayed_node_to_head(ref);
879 return NULL;
882 void btrfs_delayed_ref_exit(void)
884 if (btrfs_delayed_ref_head_cachep)
885 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
886 if (btrfs_delayed_tree_ref_cachep)
887 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
888 if (btrfs_delayed_data_ref_cachep)
889 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
890 if (btrfs_delayed_extent_op_cachep)
891 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
894 int btrfs_delayed_ref_init(void)
896 btrfs_delayed_ref_head_cachep = kmem_cache_create(
897 "btrfs_delayed_ref_head",
898 sizeof(struct btrfs_delayed_ref_head), 0,
899 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
900 if (!btrfs_delayed_ref_head_cachep)
901 goto fail;
903 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
904 "btrfs_delayed_tree_ref",
905 sizeof(struct btrfs_delayed_tree_ref), 0,
906 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
907 if (!btrfs_delayed_tree_ref_cachep)
908 goto fail;
910 btrfs_delayed_data_ref_cachep = kmem_cache_create(
911 "btrfs_delayed_data_ref",
912 sizeof(struct btrfs_delayed_data_ref), 0,
913 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
914 if (!btrfs_delayed_data_ref_cachep)
915 goto fail;
917 btrfs_delayed_extent_op_cachep = kmem_cache_create(
918 "btrfs_delayed_extent_op",
919 sizeof(struct btrfs_delayed_extent_op), 0,
920 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
921 if (!btrfs_delayed_extent_op_cachep)
922 goto fail;
924 return 0;
925 fail:
926 btrfs_delayed_ref_exit();
927 return -ENOMEM;