Linux 4.18.10
[linux/fpc-iii.git] / fs / btrfs / delayed-ref.c
blob03dec673d12abc4b5d753f1cc8ae585f4e6978f6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
28 * compare two delayed tree backrefs with same bytenr and type
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
33 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 if (ref1->root < ref2->root)
35 return -1;
36 if (ref1->root > ref2->root)
37 return 1;
38 } else {
39 if (ref1->parent < ref2->parent)
40 return -1;
41 if (ref1->parent > ref2->parent)
42 return 1;
44 return 0;
48 * compare two delayed data backrefs with same bytenr and type
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
55 return -1;
56 if (ref1->root > ref2->root)
57 return 1;
58 if (ref1->objectid < ref2->objectid)
59 return -1;
60 if (ref1->objectid > ref2->objectid)
61 return 1;
62 if (ref1->offset < ref2->offset)
63 return -1;
64 if (ref1->offset > ref2->offset)
65 return 1;
66 } else {
67 if (ref1->parent < ref2->parent)
68 return -1;
69 if (ref1->parent > ref2->parent)
70 return 1;
72 return 0;
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
77 bool check_seq)
79 int ret = 0;
81 if (ref1->type < ref2->type)
82 return -1;
83 if (ref1->type > ref2->type)
84 return 1;
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
89 else
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
92 if (ret)
93 return ret;
94 if (check_seq) {
95 if (ref1->seq < ref2->seq)
96 return -1;
97 if (ref1->seq > ref2->seq)
98 return 1;
100 return 0;
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105 struct rb_node *node)
107 struct rb_node **p = &root->rb_node;
108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
111 u64 bytenr;
113 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114 bytenr = ins->bytenr;
115 while (*p) {
116 parent_node = *p;
117 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118 href_node);
120 if (bytenr < entry->bytenr)
121 p = &(*p)->rb_left;
122 else if (bytenr > entry->bytenr)
123 p = &(*p)->rb_right;
124 else
125 return entry;
128 rb_link_node(node, parent_node, p);
129 rb_insert_color(node, root);
130 return NULL;
133 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134 struct btrfs_delayed_ref_node *ins)
136 struct rb_node **p = &root->rb_node;
137 struct rb_node *node = &ins->ref_node;
138 struct rb_node *parent_node = NULL;
139 struct btrfs_delayed_ref_node *entry;
141 while (*p) {
142 int comp;
144 parent_node = *p;
145 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146 ref_node);
147 comp = comp_refs(ins, entry, true);
148 if (comp < 0)
149 p = &(*p)->rb_left;
150 else if (comp > 0)
151 p = &(*p)->rb_right;
152 else
153 return entry;
156 rb_link_node(node, parent_node, p);
157 rb_insert_color(node, root);
158 return NULL;
162 * find an head entry based on bytenr. This returns the delayed ref
163 * head if it was able to find one, or NULL if nothing was in that spot.
164 * If return_bigger is given, the next bigger entry is returned if no exact
165 * match is found.
167 static struct btrfs_delayed_ref_head *
168 find_ref_head(struct rb_root *root, u64 bytenr,
169 int return_bigger)
171 struct rb_node *n;
172 struct btrfs_delayed_ref_head *entry;
174 n = root->rb_node;
175 entry = NULL;
176 while (n) {
177 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
179 if (bytenr < entry->bytenr)
180 n = n->rb_left;
181 else if (bytenr > entry->bytenr)
182 n = n->rb_right;
183 else
184 return entry;
186 if (entry && return_bigger) {
187 if (bytenr > entry->bytenr) {
188 n = rb_next(&entry->href_node);
189 if (!n)
190 n = rb_first(root);
191 entry = rb_entry(n, struct btrfs_delayed_ref_head,
192 href_node);
193 return entry;
195 return entry;
197 return NULL;
200 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201 struct btrfs_delayed_ref_head *head)
203 struct btrfs_delayed_ref_root *delayed_refs;
205 delayed_refs = &trans->transaction->delayed_refs;
206 lockdep_assert_held(&delayed_refs->lock);
207 if (mutex_trylock(&head->mutex))
208 return 0;
210 refcount_inc(&head->refs);
211 spin_unlock(&delayed_refs->lock);
213 mutex_lock(&head->mutex);
214 spin_lock(&delayed_refs->lock);
215 if (RB_EMPTY_NODE(&head->href_node)) {
216 mutex_unlock(&head->mutex);
217 btrfs_put_delayed_ref_head(head);
218 return -EAGAIN;
220 btrfs_put_delayed_ref_head(head);
221 return 0;
224 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225 struct btrfs_delayed_ref_root *delayed_refs,
226 struct btrfs_delayed_ref_head *head,
227 struct btrfs_delayed_ref_node *ref)
229 lockdep_assert_held(&head->lock);
230 rb_erase(&ref->ref_node, &head->ref_tree);
231 RB_CLEAR_NODE(&ref->ref_node);
232 if (!list_empty(&ref->add_list))
233 list_del(&ref->add_list);
234 ref->in_tree = 0;
235 btrfs_put_delayed_ref(ref);
236 atomic_dec(&delayed_refs->num_entries);
237 if (trans->delayed_ref_updates)
238 trans->delayed_ref_updates--;
241 static bool merge_ref(struct btrfs_trans_handle *trans,
242 struct btrfs_delayed_ref_root *delayed_refs,
243 struct btrfs_delayed_ref_head *head,
244 struct btrfs_delayed_ref_node *ref,
245 u64 seq)
247 struct btrfs_delayed_ref_node *next;
248 struct rb_node *node = rb_next(&ref->ref_node);
249 bool done = false;
251 while (!done && node) {
252 int mod;
254 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
255 node = rb_next(node);
256 if (seq && next->seq >= seq)
257 break;
258 if (comp_refs(ref, next, false))
259 break;
261 if (ref->action == next->action) {
262 mod = next->ref_mod;
263 } else {
264 if (ref->ref_mod < next->ref_mod) {
265 swap(ref, next);
266 done = true;
268 mod = -next->ref_mod;
271 drop_delayed_ref(trans, delayed_refs, head, next);
272 ref->ref_mod += mod;
273 if (ref->ref_mod == 0) {
274 drop_delayed_ref(trans, delayed_refs, head, ref);
275 done = true;
276 } else {
278 * Can't have multiples of the same ref on a tree block.
280 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
281 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
285 return done;
288 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
289 struct btrfs_delayed_ref_root *delayed_refs,
290 struct btrfs_delayed_ref_head *head)
292 struct btrfs_fs_info *fs_info = trans->fs_info;
293 struct btrfs_delayed_ref_node *ref;
294 struct rb_node *node;
295 u64 seq = 0;
297 lockdep_assert_held(&head->lock);
299 if (RB_EMPTY_ROOT(&head->ref_tree))
300 return;
302 /* We don't have too many refs to merge for data. */
303 if (head->is_data)
304 return;
306 spin_lock(&fs_info->tree_mod_seq_lock);
307 if (!list_empty(&fs_info->tree_mod_seq_list)) {
308 struct seq_list *elem;
310 elem = list_first_entry(&fs_info->tree_mod_seq_list,
311 struct seq_list, list);
312 seq = elem->seq;
314 spin_unlock(&fs_info->tree_mod_seq_lock);
316 again:
317 for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
318 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
319 if (seq && ref->seq >= seq)
320 continue;
321 if (merge_ref(trans, delayed_refs, head, ref, seq))
322 goto again;
326 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
328 struct seq_list *elem;
329 int ret = 0;
331 spin_lock(&fs_info->tree_mod_seq_lock);
332 if (!list_empty(&fs_info->tree_mod_seq_list)) {
333 elem = list_first_entry(&fs_info->tree_mod_seq_list,
334 struct seq_list, list);
335 if (seq >= elem->seq) {
336 btrfs_debug(fs_info,
337 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
338 (u32)(seq >> 32), (u32)seq,
339 (u32)(elem->seq >> 32), (u32)elem->seq);
340 ret = 1;
344 spin_unlock(&fs_info->tree_mod_seq_lock);
345 return ret;
348 struct btrfs_delayed_ref_head *
349 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
351 struct btrfs_delayed_ref_root *delayed_refs;
352 struct btrfs_delayed_ref_head *head;
353 u64 start;
354 bool loop = false;
356 delayed_refs = &trans->transaction->delayed_refs;
358 again:
359 start = delayed_refs->run_delayed_start;
360 head = find_ref_head(&delayed_refs->href_root, start, 1);
361 if (!head && !loop) {
362 delayed_refs->run_delayed_start = 0;
363 start = 0;
364 loop = true;
365 head = find_ref_head(&delayed_refs->href_root, start, 1);
366 if (!head)
367 return NULL;
368 } else if (!head && loop) {
369 return NULL;
372 while (head->processing) {
373 struct rb_node *node;
375 node = rb_next(&head->href_node);
376 if (!node) {
377 if (loop)
378 return NULL;
379 delayed_refs->run_delayed_start = 0;
380 start = 0;
381 loop = true;
382 goto again;
384 head = rb_entry(node, struct btrfs_delayed_ref_head,
385 href_node);
388 head->processing = 1;
389 WARN_ON(delayed_refs->num_heads_ready == 0);
390 delayed_refs->num_heads_ready--;
391 delayed_refs->run_delayed_start = head->bytenr +
392 head->num_bytes;
393 return head;
397 * Helper to insert the ref_node to the tail or merge with tail.
399 * Return 0 for insert.
400 * Return >0 for merge.
402 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
403 struct btrfs_delayed_ref_root *root,
404 struct btrfs_delayed_ref_head *href,
405 struct btrfs_delayed_ref_node *ref)
407 struct btrfs_delayed_ref_node *exist;
408 int mod;
409 int ret = 0;
411 spin_lock(&href->lock);
412 exist = tree_insert(&href->ref_tree, ref);
413 if (!exist)
414 goto inserted;
416 /* Now we are sure we can merge */
417 ret = 1;
418 if (exist->action == ref->action) {
419 mod = ref->ref_mod;
420 } else {
421 /* Need to change action */
422 if (exist->ref_mod < ref->ref_mod) {
423 exist->action = ref->action;
424 mod = -exist->ref_mod;
425 exist->ref_mod = ref->ref_mod;
426 if (ref->action == BTRFS_ADD_DELAYED_REF)
427 list_add_tail(&exist->add_list,
428 &href->ref_add_list);
429 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
430 ASSERT(!list_empty(&exist->add_list));
431 list_del(&exist->add_list);
432 } else {
433 ASSERT(0);
435 } else
436 mod = -ref->ref_mod;
438 exist->ref_mod += mod;
440 /* remove existing tail if its ref_mod is zero */
441 if (exist->ref_mod == 0)
442 drop_delayed_ref(trans, root, href, exist);
443 spin_unlock(&href->lock);
444 return ret;
445 inserted:
446 if (ref->action == BTRFS_ADD_DELAYED_REF)
447 list_add_tail(&ref->add_list, &href->ref_add_list);
448 atomic_inc(&root->num_entries);
449 trans->delayed_ref_updates++;
450 spin_unlock(&href->lock);
451 return ret;
455 * helper function to update the accounting in the head ref
456 * existing and update must have the same bytenr
458 static noinline void
459 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
460 struct btrfs_delayed_ref_head *existing,
461 struct btrfs_delayed_ref_head *update,
462 int *old_ref_mod_ret)
464 int old_ref_mod;
466 BUG_ON(existing->is_data != update->is_data);
468 spin_lock(&existing->lock);
469 if (update->must_insert_reserved) {
470 /* if the extent was freed and then
471 * reallocated before the delayed ref
472 * entries were processed, we can end up
473 * with an existing head ref without
474 * the must_insert_reserved flag set.
475 * Set it again here
477 existing->must_insert_reserved = update->must_insert_reserved;
480 * update the num_bytes so we make sure the accounting
481 * is done correctly
483 existing->num_bytes = update->num_bytes;
487 if (update->extent_op) {
488 if (!existing->extent_op) {
489 existing->extent_op = update->extent_op;
490 } else {
491 if (update->extent_op->update_key) {
492 memcpy(&existing->extent_op->key,
493 &update->extent_op->key,
494 sizeof(update->extent_op->key));
495 existing->extent_op->update_key = true;
497 if (update->extent_op->update_flags) {
498 existing->extent_op->flags_to_set |=
499 update->extent_op->flags_to_set;
500 existing->extent_op->update_flags = true;
502 btrfs_free_delayed_extent_op(update->extent_op);
506 * update the reference mod on the head to reflect this new operation,
507 * only need the lock for this case cause we could be processing it
508 * currently, for refs we just added we know we're a-ok.
510 old_ref_mod = existing->total_ref_mod;
511 if (old_ref_mod_ret)
512 *old_ref_mod_ret = old_ref_mod;
513 existing->ref_mod += update->ref_mod;
514 existing->total_ref_mod += update->ref_mod;
517 * If we are going to from a positive ref mod to a negative or vice
518 * versa we need to make sure to adjust pending_csums accordingly.
520 if (existing->is_data) {
521 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
522 delayed_refs->pending_csums -= existing->num_bytes;
523 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
524 delayed_refs->pending_csums += existing->num_bytes;
526 spin_unlock(&existing->lock);
529 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
530 struct btrfs_qgroup_extent_record *qrecord,
531 u64 bytenr, u64 num_bytes, u64 ref_root,
532 u64 reserved, int action, bool is_data,
533 bool is_system)
535 int count_mod = 1;
536 int must_insert_reserved = 0;
538 /* If reserved is provided, it must be a data extent. */
539 BUG_ON(!is_data && reserved);
542 * The head node stores the sum of all the mods, so dropping a ref
543 * should drop the sum in the head node by one.
545 if (action == BTRFS_UPDATE_DELAYED_HEAD)
546 count_mod = 0;
547 else if (action == BTRFS_DROP_DELAYED_REF)
548 count_mod = -1;
551 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
552 * accounting when the extent is finally added, or if a later
553 * modification deletes the delayed ref without ever inserting the
554 * extent into the extent allocation tree. ref->must_insert_reserved
555 * is the flag used to record that accounting mods are required.
557 * Once we record must_insert_reserved, switch the action to
558 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
560 if (action == BTRFS_ADD_DELAYED_EXTENT)
561 must_insert_reserved = 1;
562 else
563 must_insert_reserved = 0;
565 refcount_set(&head_ref->refs, 1);
566 head_ref->bytenr = bytenr;
567 head_ref->num_bytes = num_bytes;
568 head_ref->ref_mod = count_mod;
569 head_ref->must_insert_reserved = must_insert_reserved;
570 head_ref->is_data = is_data;
571 head_ref->is_system = is_system;
572 head_ref->ref_tree = RB_ROOT;
573 INIT_LIST_HEAD(&head_ref->ref_add_list);
574 RB_CLEAR_NODE(&head_ref->href_node);
575 head_ref->processing = 0;
576 head_ref->total_ref_mod = count_mod;
577 head_ref->qgroup_reserved = 0;
578 head_ref->qgroup_ref_root = 0;
579 spin_lock_init(&head_ref->lock);
580 mutex_init(&head_ref->mutex);
582 if (qrecord) {
583 if (ref_root && reserved) {
584 head_ref->qgroup_ref_root = ref_root;
585 head_ref->qgroup_reserved = reserved;
588 qrecord->bytenr = bytenr;
589 qrecord->num_bytes = num_bytes;
590 qrecord->old_roots = NULL;
595 * helper function to actually insert a head node into the rbtree.
596 * this does all the dirty work in terms of maintaining the correct
597 * overall modification count.
599 static noinline struct btrfs_delayed_ref_head *
600 add_delayed_ref_head(struct btrfs_trans_handle *trans,
601 struct btrfs_delayed_ref_head *head_ref,
602 struct btrfs_qgroup_extent_record *qrecord,
603 int action, int *qrecord_inserted_ret,
604 int *old_ref_mod, int *new_ref_mod)
606 struct btrfs_delayed_ref_head *existing;
607 struct btrfs_delayed_ref_root *delayed_refs;
608 int qrecord_inserted = 0;
610 delayed_refs = &trans->transaction->delayed_refs;
612 /* Record qgroup extent info if provided */
613 if (qrecord) {
614 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
615 delayed_refs, qrecord))
616 kfree(qrecord);
617 else
618 qrecord_inserted = 1;
621 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
623 existing = htree_insert(&delayed_refs->href_root,
624 &head_ref->href_node);
625 if (existing) {
626 WARN_ON(qrecord && head_ref->qgroup_ref_root
627 && head_ref->qgroup_reserved
628 && existing->qgroup_ref_root
629 && existing->qgroup_reserved);
630 update_existing_head_ref(delayed_refs, existing, head_ref,
631 old_ref_mod);
633 * we've updated the existing ref, free the newly
634 * allocated ref
636 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
637 head_ref = existing;
638 } else {
639 if (old_ref_mod)
640 *old_ref_mod = 0;
641 if (head_ref->is_data && head_ref->ref_mod < 0)
642 delayed_refs->pending_csums += head_ref->num_bytes;
643 delayed_refs->num_heads++;
644 delayed_refs->num_heads_ready++;
645 atomic_inc(&delayed_refs->num_entries);
646 trans->delayed_ref_updates++;
648 if (qrecord_inserted_ret)
649 *qrecord_inserted_ret = qrecord_inserted;
650 if (new_ref_mod)
651 *new_ref_mod = head_ref->total_ref_mod;
653 return head_ref;
657 * init_delayed_ref_common - Initialize the structure which represents a
658 * modification to a an extent.
660 * @fs_info: Internal to the mounted filesystem mount structure.
662 * @ref: The structure which is going to be initialized.
664 * @bytenr: The logical address of the extent for which a modification is
665 * going to be recorded.
667 * @num_bytes: Size of the extent whose modification is being recorded.
669 * @ref_root: The id of the root where this modification has originated, this
670 * can be either one of the well-known metadata trees or the
671 * subvolume id which references this extent.
673 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
674 * BTRFS_ADD_DELAYED_EXTENT
676 * @ref_type: Holds the type of the extent which is being recorded, can be
677 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
678 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
679 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
681 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
682 struct btrfs_delayed_ref_node *ref,
683 u64 bytenr, u64 num_bytes, u64 ref_root,
684 int action, u8 ref_type)
686 u64 seq = 0;
688 if (action == BTRFS_ADD_DELAYED_EXTENT)
689 action = BTRFS_ADD_DELAYED_REF;
691 if (is_fstree(ref_root))
692 seq = atomic64_read(&fs_info->tree_mod_seq);
694 refcount_set(&ref->refs, 1);
695 ref->bytenr = bytenr;
696 ref->num_bytes = num_bytes;
697 ref->ref_mod = 1;
698 ref->action = action;
699 ref->is_head = 0;
700 ref->in_tree = 1;
701 ref->seq = seq;
702 ref->type = ref_type;
703 RB_CLEAR_NODE(&ref->ref_node);
704 INIT_LIST_HEAD(&ref->add_list);
708 * add a delayed tree ref. This does all of the accounting required
709 * to make sure the delayed ref is eventually processed before this
710 * transaction commits.
712 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
713 struct btrfs_trans_handle *trans,
714 u64 bytenr, u64 num_bytes, u64 parent,
715 u64 ref_root, int level, int action,
716 struct btrfs_delayed_extent_op *extent_op,
717 int *old_ref_mod, int *new_ref_mod)
719 struct btrfs_delayed_tree_ref *ref;
720 struct btrfs_delayed_ref_head *head_ref;
721 struct btrfs_delayed_ref_root *delayed_refs;
722 struct btrfs_qgroup_extent_record *record = NULL;
723 int qrecord_inserted;
724 bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
725 int ret;
726 u8 ref_type;
728 BUG_ON(extent_op && extent_op->is_data);
729 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
730 if (!ref)
731 return -ENOMEM;
733 if (parent)
734 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
735 else
736 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
737 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
738 ref_root, action, ref_type);
739 ref->root = ref_root;
740 ref->parent = parent;
741 ref->level = level;
743 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
744 if (!head_ref)
745 goto free_ref;
747 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
748 is_fstree(ref_root)) {
749 record = kmalloc(sizeof(*record), GFP_NOFS);
750 if (!record)
751 goto free_head_ref;
754 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
755 ref_root, 0, action, false, is_system);
756 head_ref->extent_op = extent_op;
758 delayed_refs = &trans->transaction->delayed_refs;
759 spin_lock(&delayed_refs->lock);
762 * insert both the head node and the new ref without dropping
763 * the spin lock
765 head_ref = add_delayed_ref_head(trans, head_ref, record,
766 action, &qrecord_inserted,
767 old_ref_mod, new_ref_mod);
769 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
770 spin_unlock(&delayed_refs->lock);
772 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
773 action == BTRFS_ADD_DELAYED_EXTENT ?
774 BTRFS_ADD_DELAYED_REF : action);
775 if (ret > 0)
776 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
778 if (qrecord_inserted)
779 btrfs_qgroup_trace_extent_post(fs_info, record);
781 return 0;
783 free_head_ref:
784 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
785 free_ref:
786 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
788 return -ENOMEM;
792 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
794 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
795 struct btrfs_trans_handle *trans,
796 u64 bytenr, u64 num_bytes,
797 u64 parent, u64 ref_root,
798 u64 owner, u64 offset, u64 reserved, int action,
799 int *old_ref_mod, int *new_ref_mod)
801 struct btrfs_delayed_data_ref *ref;
802 struct btrfs_delayed_ref_head *head_ref;
803 struct btrfs_delayed_ref_root *delayed_refs;
804 struct btrfs_qgroup_extent_record *record = NULL;
805 int qrecord_inserted;
806 int ret;
807 u8 ref_type;
809 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
810 if (!ref)
811 return -ENOMEM;
813 if (parent)
814 ref_type = BTRFS_SHARED_DATA_REF_KEY;
815 else
816 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
817 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
818 ref_root, action, ref_type);
819 ref->root = ref_root;
820 ref->parent = parent;
821 ref->objectid = owner;
822 ref->offset = offset;
825 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
826 if (!head_ref) {
827 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
828 return -ENOMEM;
831 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
832 is_fstree(ref_root)) {
833 record = kmalloc(sizeof(*record), GFP_NOFS);
834 if (!record) {
835 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
836 kmem_cache_free(btrfs_delayed_ref_head_cachep,
837 head_ref);
838 return -ENOMEM;
842 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
843 reserved, action, true, false);
844 head_ref->extent_op = NULL;
846 delayed_refs = &trans->transaction->delayed_refs;
847 spin_lock(&delayed_refs->lock);
850 * insert both the head node and the new ref without dropping
851 * the spin lock
853 head_ref = add_delayed_ref_head(trans, head_ref, record,
854 action, &qrecord_inserted,
855 old_ref_mod, new_ref_mod);
857 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
858 spin_unlock(&delayed_refs->lock);
860 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
861 action == BTRFS_ADD_DELAYED_EXTENT ?
862 BTRFS_ADD_DELAYED_REF : action);
863 if (ret > 0)
864 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
867 if (qrecord_inserted)
868 return btrfs_qgroup_trace_extent_post(fs_info, record);
869 return 0;
872 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
873 struct btrfs_trans_handle *trans,
874 u64 bytenr, u64 num_bytes,
875 struct btrfs_delayed_extent_op *extent_op)
877 struct btrfs_delayed_ref_head *head_ref;
878 struct btrfs_delayed_ref_root *delayed_refs;
880 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
881 if (!head_ref)
882 return -ENOMEM;
884 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
885 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
886 false);
887 head_ref->extent_op = extent_op;
889 delayed_refs = &trans->transaction->delayed_refs;
890 spin_lock(&delayed_refs->lock);
892 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
893 NULL, NULL, NULL);
895 spin_unlock(&delayed_refs->lock);
896 return 0;
900 * this does a simple search for the head node for a given extent.
901 * It must be called with the delayed ref spinlock held, and it returns
902 * the head node if any where found, or NULL if not.
904 struct btrfs_delayed_ref_head *
905 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
907 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
910 void __cold btrfs_delayed_ref_exit(void)
912 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
913 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
914 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
915 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
918 int __init btrfs_delayed_ref_init(void)
920 btrfs_delayed_ref_head_cachep = kmem_cache_create(
921 "btrfs_delayed_ref_head",
922 sizeof(struct btrfs_delayed_ref_head), 0,
923 SLAB_MEM_SPREAD, NULL);
924 if (!btrfs_delayed_ref_head_cachep)
925 goto fail;
927 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
928 "btrfs_delayed_tree_ref",
929 sizeof(struct btrfs_delayed_tree_ref), 0,
930 SLAB_MEM_SPREAD, NULL);
931 if (!btrfs_delayed_tree_ref_cachep)
932 goto fail;
934 btrfs_delayed_data_ref_cachep = kmem_cache_create(
935 "btrfs_delayed_data_ref",
936 sizeof(struct btrfs_delayed_data_ref), 0,
937 SLAB_MEM_SPREAD, NULL);
938 if (!btrfs_delayed_data_ref_cachep)
939 goto fail;
941 btrfs_delayed_extent_op_cachep = kmem_cache_create(
942 "btrfs_delayed_extent_op",
943 sizeof(struct btrfs_delayed_extent_op), 0,
944 SLAB_MEM_SPREAD, NULL);
945 if (!btrfs_delayed_extent_op_cachep)
946 goto fail;
948 return 0;
949 fail:
950 btrfs_delayed_ref_exit();
951 return -ENOMEM;