1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
17 #include "transaction.h"
22 #include "extent_io.h"
27 * - subvol delete -> delete when ref goes to 0? delete limits also?
31 * - copy also limits on subvol creation
33 * - caches fuer ulists
34 * - performance benchmarks
35 * - check all ioctl parameters
39 * Helpers to access qgroup reservation
41 * Callers should ensure the lock context and type are valid
44 static u64
qgroup_rsv_total(const struct btrfs_qgroup
*qgroup
)
49 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
50 ret
+= qgroup
->rsv
.values
[i
];
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type
)
58 if (type
== BTRFS_QGROUP_RSV_DATA
)
60 if (type
== BTRFS_QGROUP_RSV_META_PERTRANS
)
61 return "meta_pertrans";
62 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
63 return "meta_prealloc";
68 static void qgroup_rsv_add(struct btrfs_fs_info
*fs_info
,
69 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
70 enum btrfs_qgroup_rsv_type type
)
72 trace_qgroup_update_reserve(fs_info
, qgroup
, num_bytes
, type
);
73 qgroup
->rsv
.values
[type
] += num_bytes
;
76 static void qgroup_rsv_release(struct btrfs_fs_info
*fs_info
,
77 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
78 enum btrfs_qgroup_rsv_type type
)
80 trace_qgroup_update_reserve(fs_info
, qgroup
, -(s64
)num_bytes
, type
);
81 if (qgroup
->rsv
.values
[type
] >= num_bytes
) {
82 qgroup
->rsv
.values
[type
] -= num_bytes
;
85 #ifdef CONFIG_BTRFS_DEBUG
87 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 qgroup
->qgroupid
, qgroup_rsv_type_str(type
),
89 qgroup
->rsv
.values
[type
], num_bytes
);
91 qgroup
->rsv
.values
[type
] = 0;
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info
*fs_info
,
95 struct btrfs_qgroup
*dest
,
96 struct btrfs_qgroup
*src
)
100 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
101 qgroup_rsv_add(fs_info
, dest
, src
->rsv
.values
[i
], i
);
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info
*fs_info
,
105 struct btrfs_qgroup
*dest
,
106 struct btrfs_qgroup
*src
)
110 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
111 qgroup_rsv_release(fs_info
, dest
, src
->rsv
.values
[i
], i
);
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
117 if (qg
->old_refcnt
< seq
)
118 qg
->old_refcnt
= seq
;
119 qg
->old_refcnt
+= mod
;
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
125 if (qg
->new_refcnt
< seq
)
126 qg
->new_refcnt
= seq
;
127 qg
->new_refcnt
+= mod
;
130 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
132 if (qg
->old_refcnt
< seq
)
134 return qg
->old_refcnt
- seq
;
137 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
139 if (qg
->new_refcnt
< seq
)
141 return qg
->new_refcnt
- seq
;
145 * glue structure to represent the relations between qgroups.
147 struct btrfs_qgroup_list
{
148 struct list_head next_group
;
149 struct list_head next_member
;
150 struct btrfs_qgroup
*group
;
151 struct btrfs_qgroup
*member
;
154 static inline u64
qgroup_to_aux(struct btrfs_qgroup
*qg
)
156 return (u64
)(uintptr_t)qg
;
159 static inline struct btrfs_qgroup
* unode_aux_to_qgroup(struct ulist_node
*n
)
161 return (struct btrfs_qgroup
*)(uintptr_t)n
->aux
;
165 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
173 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
174 struct btrfs_qgroup
*qgroup
;
177 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
178 if (qgroup
->qgroupid
< qgroupid
)
180 else if (qgroup
->qgroupid
> qgroupid
)
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
192 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
193 struct rb_node
*parent
= NULL
;
194 struct btrfs_qgroup
*qgroup
;
198 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
200 if (qgroup
->qgroupid
< qgroupid
)
202 else if (qgroup
->qgroupid
> qgroupid
)
208 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
210 return ERR_PTR(-ENOMEM
);
212 qgroup
->qgroupid
= qgroupid
;
213 INIT_LIST_HEAD(&qgroup
->groups
);
214 INIT_LIST_HEAD(&qgroup
->members
);
215 INIT_LIST_HEAD(&qgroup
->dirty
);
217 rb_link_node(&qgroup
->node
, parent
, p
);
218 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
223 static void __del_qgroup_rb(struct btrfs_qgroup
*qgroup
)
225 struct btrfs_qgroup_list
*list
;
227 list_del(&qgroup
->dirty
);
228 while (!list_empty(&qgroup
->groups
)) {
229 list
= list_first_entry(&qgroup
->groups
,
230 struct btrfs_qgroup_list
, next_group
);
231 list_del(&list
->next_group
);
232 list_del(&list
->next_member
);
236 while (!list_empty(&qgroup
->members
)) {
237 list
= list_first_entry(&qgroup
->members
,
238 struct btrfs_qgroup_list
, next_member
);
239 list_del(&list
->next_group
);
240 list_del(&list
->next_member
);
246 /* must be called with qgroup_lock held */
247 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
249 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
254 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
255 __del_qgroup_rb(qgroup
);
259 /* must be called with qgroup_lock held */
260 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
261 u64 memberid
, u64 parentid
)
263 struct btrfs_qgroup
*member
;
264 struct btrfs_qgroup
*parent
;
265 struct btrfs_qgroup_list
*list
;
267 member
= find_qgroup_rb(fs_info
, memberid
);
268 parent
= find_qgroup_rb(fs_info
, parentid
);
269 if (!member
|| !parent
)
272 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
276 list
->group
= parent
;
277 list
->member
= member
;
278 list_add_tail(&list
->next_group
, &member
->groups
);
279 list_add_tail(&list
->next_member
, &parent
->members
);
284 /* must be called with qgroup_lock held */
285 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
286 u64 memberid
, u64 parentid
)
288 struct btrfs_qgroup
*member
;
289 struct btrfs_qgroup
*parent
;
290 struct btrfs_qgroup_list
*list
;
292 member
= find_qgroup_rb(fs_info
, memberid
);
293 parent
= find_qgroup_rb(fs_info
, parentid
);
294 if (!member
|| !parent
)
297 list_for_each_entry(list
, &member
->groups
, next_group
) {
298 if (list
->group
== parent
) {
299 list_del(&list
->next_group
);
300 list_del(&list
->next_member
);
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
312 struct btrfs_qgroup
*qgroup
;
314 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
317 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
324 * The full config is read in one go, only called from open_ctree()
325 * It doesn't use any locking, as at this point we're still single-threaded
327 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
329 struct btrfs_key key
;
330 struct btrfs_key found_key
;
331 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
332 struct btrfs_path
*path
= NULL
;
333 struct extent_buffer
*l
;
337 u64 rescan_progress
= 0;
339 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
342 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
343 if (!fs_info
->qgroup_ulist
) {
348 path
= btrfs_alloc_path();
354 /* default this to quota off, in case no status key is found */
355 fs_info
->qgroup_flags
= 0;
358 * pass 1: read status, all qgroup infos and limits
363 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
368 struct btrfs_qgroup
*qgroup
;
370 slot
= path
->slots
[0];
372 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
374 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
375 struct btrfs_qgroup_status_item
*ptr
;
377 ptr
= btrfs_item_ptr(l
, slot
,
378 struct btrfs_qgroup_status_item
);
380 if (btrfs_qgroup_status_version(l
, ptr
) !=
381 BTRFS_QGROUP_STATUS_VERSION
) {
383 "old qgroup version, quota disabled");
386 if (btrfs_qgroup_status_generation(l
, ptr
) !=
387 fs_info
->generation
) {
388 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
390 "qgroup generation mismatch, marked as inconsistent");
392 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
394 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
398 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
399 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
402 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
403 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
404 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
405 btrfs_err(fs_info
, "inconsistent qgroup config");
406 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
409 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
410 if (IS_ERR(qgroup
)) {
411 ret
= PTR_ERR(qgroup
);
415 switch (found_key
.type
) {
416 case BTRFS_QGROUP_INFO_KEY
: {
417 struct btrfs_qgroup_info_item
*ptr
;
419 ptr
= btrfs_item_ptr(l
, slot
,
420 struct btrfs_qgroup_info_item
);
421 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
422 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
423 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
424 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
425 /* generation currently unused */
428 case BTRFS_QGROUP_LIMIT_KEY
: {
429 struct btrfs_qgroup_limit_item
*ptr
;
431 ptr
= btrfs_item_ptr(l
, slot
,
432 struct btrfs_qgroup_limit_item
);
433 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
434 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
435 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
436 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
437 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
442 ret
= btrfs_next_item(quota_root
, path
);
448 btrfs_release_path(path
);
451 * pass 2: read all qgroup relations
454 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
456 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
460 slot
= path
->slots
[0];
462 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
464 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
467 if (found_key
.objectid
> found_key
.offset
) {
468 /* parent <- member, not needed to build config */
469 /* FIXME should we omit the key completely? */
473 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
475 if (ret
== -ENOENT
) {
477 "orphan qgroup relation 0x%llx->0x%llx",
478 found_key
.objectid
, found_key
.offset
);
479 ret
= 0; /* ignore the error */
484 ret
= btrfs_next_item(quota_root
, path
);
491 fs_info
->qgroup_flags
|= flags
;
492 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
493 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
494 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
496 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
497 btrfs_free_path(path
);
500 ulist_free(fs_info
->qgroup_ulist
);
501 fs_info
->qgroup_ulist
= NULL
;
502 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
505 return ret
< 0 ? ret
: 0;
509 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510 * first two are in single-threaded paths.And for the third one, we have set
511 * quota_root to be null with qgroup_lock held before, so it is safe to clean
512 * up the in-memory structures without qgroup_lock held.
514 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
517 struct btrfs_qgroup
*qgroup
;
519 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
520 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
521 rb_erase(n
, &fs_info
->qgroup_tree
);
522 __del_qgroup_rb(qgroup
);
525 * we call btrfs_free_qgroup_config() when umounting
526 * filesystem and disabling quota, so we set qgroup_ulist
527 * to be null here to avoid double free.
529 ulist_free(fs_info
->qgroup_ulist
);
530 fs_info
->qgroup_ulist
= NULL
;
533 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
537 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
538 struct btrfs_path
*path
;
539 struct btrfs_key key
;
541 path
= btrfs_alloc_path();
546 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
549 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
551 btrfs_mark_buffer_dirty(path
->nodes
[0]);
553 btrfs_free_path(path
);
557 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
561 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
562 struct btrfs_path
*path
;
563 struct btrfs_key key
;
565 path
= btrfs_alloc_path();
570 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
573 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
582 ret
= btrfs_del_item(trans
, quota_root
, path
);
584 btrfs_free_path(path
);
588 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
589 struct btrfs_root
*quota_root
, u64 qgroupid
)
592 struct btrfs_path
*path
;
593 struct btrfs_qgroup_info_item
*qgroup_info
;
594 struct btrfs_qgroup_limit_item
*qgroup_limit
;
595 struct extent_buffer
*leaf
;
596 struct btrfs_key key
;
598 if (btrfs_is_testing(quota_root
->fs_info
))
601 path
= btrfs_alloc_path();
606 key
.type
= BTRFS_QGROUP_INFO_KEY
;
607 key
.offset
= qgroupid
;
610 * Avoid a transaction abort by catching -EEXIST here. In that
611 * case, we proceed by re-initializing the existing structure
615 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
616 sizeof(*qgroup_info
));
617 if (ret
&& ret
!= -EEXIST
)
620 leaf
= path
->nodes
[0];
621 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
622 struct btrfs_qgroup_info_item
);
623 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
624 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
625 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
626 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
627 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
629 btrfs_mark_buffer_dirty(leaf
);
631 btrfs_release_path(path
);
633 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
634 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
635 sizeof(*qgroup_limit
));
636 if (ret
&& ret
!= -EEXIST
)
639 leaf
= path
->nodes
[0];
640 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
641 struct btrfs_qgroup_limit_item
);
642 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
643 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
644 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
645 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
646 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
648 btrfs_mark_buffer_dirty(leaf
);
652 btrfs_free_path(path
);
656 static int del_qgroup_item(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
659 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
660 struct btrfs_path
*path
;
661 struct btrfs_key key
;
663 path
= btrfs_alloc_path();
668 key
.type
= BTRFS_QGROUP_INFO_KEY
;
669 key
.offset
= qgroupid
;
670 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
679 ret
= btrfs_del_item(trans
, quota_root
, path
);
683 btrfs_release_path(path
);
685 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
686 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
695 ret
= btrfs_del_item(trans
, quota_root
, path
);
698 btrfs_free_path(path
);
702 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
703 struct btrfs_qgroup
*qgroup
)
705 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
706 struct btrfs_path
*path
;
707 struct btrfs_key key
;
708 struct extent_buffer
*l
;
709 struct btrfs_qgroup_limit_item
*qgroup_limit
;
714 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
715 key
.offset
= qgroup
->qgroupid
;
717 path
= btrfs_alloc_path();
721 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
729 slot
= path
->slots
[0];
730 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
731 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
732 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
733 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
734 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
735 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
737 btrfs_mark_buffer_dirty(l
);
740 btrfs_free_path(path
);
744 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
745 struct btrfs_qgroup
*qgroup
)
747 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
748 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
749 struct btrfs_path
*path
;
750 struct btrfs_key key
;
751 struct extent_buffer
*l
;
752 struct btrfs_qgroup_info_item
*qgroup_info
;
756 if (btrfs_is_testing(fs_info
))
760 key
.type
= BTRFS_QGROUP_INFO_KEY
;
761 key
.offset
= qgroup
->qgroupid
;
763 path
= btrfs_alloc_path();
767 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
775 slot
= path
->slots
[0];
776 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
777 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
778 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
779 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
780 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
781 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
783 btrfs_mark_buffer_dirty(l
);
786 btrfs_free_path(path
);
790 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
)
792 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
793 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
794 struct btrfs_path
*path
;
795 struct btrfs_key key
;
796 struct extent_buffer
*l
;
797 struct btrfs_qgroup_status_item
*ptr
;
802 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
805 path
= btrfs_alloc_path();
809 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
817 slot
= path
->slots
[0];
818 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
819 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
820 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
821 btrfs_set_qgroup_status_rescan(l
, ptr
,
822 fs_info
->qgroup_rescan_progress
.objectid
);
824 btrfs_mark_buffer_dirty(l
);
827 btrfs_free_path(path
);
832 * called with qgroup_lock held
834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
835 struct btrfs_root
*root
)
837 struct btrfs_path
*path
;
838 struct btrfs_key key
;
839 struct extent_buffer
*leaf
= NULL
;
843 path
= btrfs_alloc_path();
847 path
->leave_spinning
= 1;
854 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
857 leaf
= path
->nodes
[0];
858 nr
= btrfs_header_nritems(leaf
);
862 * delete the leaf one by one
863 * since the whole tree is going
867 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
871 btrfs_release_path(path
);
875 btrfs_free_path(path
);
879 int btrfs_quota_enable(struct btrfs_fs_info
*fs_info
)
881 struct btrfs_root
*quota_root
;
882 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
883 struct btrfs_path
*path
= NULL
;
884 struct btrfs_qgroup_status_item
*ptr
;
885 struct extent_buffer
*leaf
;
886 struct btrfs_key key
;
887 struct btrfs_key found_key
;
888 struct btrfs_qgroup
*qgroup
= NULL
;
889 struct btrfs_trans_handle
*trans
= NULL
;
893 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
894 if (fs_info
->quota_root
)
898 * 1 for quota root item
899 * 1 for BTRFS_QGROUP_STATUS item
901 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
902 * per subvolume. However those are not currently reserved since it
903 * would be a lot of overkill.
905 trans
= btrfs_start_transaction(tree_root
, 2);
907 ret
= PTR_ERR(trans
);
912 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
913 if (!fs_info
->qgroup_ulist
) {
915 btrfs_abort_transaction(trans
, ret
);
920 * initially create the quota tree
922 quota_root
= btrfs_create_tree(trans
, fs_info
,
923 BTRFS_QUOTA_TREE_OBJECTID
);
924 if (IS_ERR(quota_root
)) {
925 ret
= PTR_ERR(quota_root
);
926 btrfs_abort_transaction(trans
, ret
);
930 path
= btrfs_alloc_path();
933 btrfs_abort_transaction(trans
, ret
);
938 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
941 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
944 btrfs_abort_transaction(trans
, ret
);
948 leaf
= path
->nodes
[0];
949 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
950 struct btrfs_qgroup_status_item
);
951 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
952 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
953 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
954 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
955 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
956 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
958 btrfs_mark_buffer_dirty(leaf
);
961 key
.type
= BTRFS_ROOT_REF_KEY
;
964 btrfs_release_path(path
);
965 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
969 btrfs_abort_transaction(trans
, ret
);
974 slot
= path
->slots
[0];
975 leaf
= path
->nodes
[0];
976 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
978 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
979 ret
= add_qgroup_item(trans
, quota_root
,
982 btrfs_abort_transaction(trans
, ret
);
986 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
987 if (IS_ERR(qgroup
)) {
988 ret
= PTR_ERR(qgroup
);
989 btrfs_abort_transaction(trans
, ret
);
993 ret
= btrfs_next_item(tree_root
, path
);
995 btrfs_abort_transaction(trans
, ret
);
1003 btrfs_release_path(path
);
1004 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
1006 btrfs_abort_transaction(trans
, ret
);
1010 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
1011 if (IS_ERR(qgroup
)) {
1012 ret
= PTR_ERR(qgroup
);
1013 btrfs_abort_transaction(trans
, ret
);
1016 spin_lock(&fs_info
->qgroup_lock
);
1017 fs_info
->quota_root
= quota_root
;
1018 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1019 spin_unlock(&fs_info
->qgroup_lock
);
1021 ret
= btrfs_commit_transaction(trans
);
1026 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1028 qgroup_rescan_zero_tracking(fs_info
);
1029 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1030 &fs_info
->qgroup_rescan_work
);
1034 btrfs_free_path(path
);
1037 free_extent_buffer(quota_root
->node
);
1038 free_extent_buffer(quota_root
->commit_root
);
1043 ulist_free(fs_info
->qgroup_ulist
);
1044 fs_info
->qgroup_ulist
= NULL
;
1046 btrfs_end_transaction(trans
);
1048 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1052 int btrfs_quota_disable(struct btrfs_fs_info
*fs_info
)
1054 struct btrfs_root
*quota_root
;
1055 struct btrfs_trans_handle
*trans
= NULL
;
1058 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1059 if (!fs_info
->quota_root
)
1063 * 1 For the root item
1065 * We should also reserve enough items for the quota tree deletion in
1066 * btrfs_clean_quota_tree but this is not done.
1068 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
1069 if (IS_ERR(trans
)) {
1070 ret
= PTR_ERR(trans
);
1074 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1075 btrfs_qgroup_wait_for_completion(fs_info
, false);
1076 spin_lock(&fs_info
->qgroup_lock
);
1077 quota_root
= fs_info
->quota_root
;
1078 fs_info
->quota_root
= NULL
;
1079 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1080 spin_unlock(&fs_info
->qgroup_lock
);
1082 btrfs_free_qgroup_config(fs_info
);
1084 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1086 btrfs_abort_transaction(trans
, ret
);
1090 ret
= btrfs_del_root(trans
, "a_root
->root_key
);
1092 btrfs_abort_transaction(trans
, ret
);
1096 list_del("a_root
->dirty_list
);
1098 btrfs_tree_lock(quota_root
->node
);
1099 clean_tree_block(fs_info
, quota_root
->node
);
1100 btrfs_tree_unlock(quota_root
->node
);
1101 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1103 free_extent_buffer(quota_root
->node
);
1104 free_extent_buffer(quota_root
->commit_root
);
1108 ret
= btrfs_end_transaction(trans
);
1110 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1114 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1115 struct btrfs_qgroup
*qgroup
)
1117 if (list_empty(&qgroup
->dirty
))
1118 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1122 * The easy accounting, we're updating qgroup relationship whose child qgroup
1123 * only has exclusive extents.
1125 * In this case, all exclsuive extents will also be exlusive for parent, so
1126 * excl/rfer just get added/removed.
1128 * So is qgroup reservation space, which should also be added/removed to
1130 * Or when child tries to release reservation space, parent will underflow its
1131 * reservation (for relationship adding case).
1133 * Caller should hold fs_info->qgroup_lock.
1135 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1136 struct ulist
*tmp
, u64 ref_root
,
1137 struct btrfs_qgroup
*src
, int sign
)
1139 struct btrfs_qgroup
*qgroup
;
1140 struct btrfs_qgroup_list
*glist
;
1141 struct ulist_node
*unode
;
1142 struct ulist_iterator uiter
;
1143 u64 num_bytes
= src
->excl
;
1146 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1150 qgroup
->rfer
+= sign
* num_bytes
;
1151 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1153 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1154 qgroup
->excl
+= sign
* num_bytes
;
1155 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1158 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1160 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1162 qgroup_dirty(fs_info
, qgroup
);
1164 /* Get all of the parent groups that contain this qgroup */
1165 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1166 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1167 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1172 /* Iterate all of the parents and adjust their reference counts */
1173 ULIST_ITER_INIT(&uiter
);
1174 while ((unode
= ulist_next(tmp
, &uiter
))) {
1175 qgroup
= unode_aux_to_qgroup(unode
);
1176 qgroup
->rfer
+= sign
* num_bytes
;
1177 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1178 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1179 qgroup
->excl
+= sign
* num_bytes
;
1181 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1183 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1184 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1185 qgroup_dirty(fs_info
, qgroup
);
1187 /* Add any parents of the parents */
1188 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1189 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1190 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1202 * Quick path for updating qgroup with only excl refs.
1204 * In that case, just update all parent will be enough.
1205 * Or we needs to do a full rescan.
1206 * Caller should also hold fs_info->qgroup_lock.
1208 * Return 0 for quick update, return >0 for need to full rescan
1209 * and mark INCONSISTENT flag.
1210 * Return < 0 for other error.
1212 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1213 struct ulist
*tmp
, u64 src
, u64 dst
,
1216 struct btrfs_qgroup
*qgroup
;
1220 qgroup
= find_qgroup_rb(fs_info
, src
);
1223 if (qgroup
->excl
== qgroup
->rfer
) {
1225 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1234 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1238 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1241 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1242 struct btrfs_root
*quota_root
;
1243 struct btrfs_qgroup
*parent
;
1244 struct btrfs_qgroup
*member
;
1245 struct btrfs_qgroup_list
*list
;
1249 /* Check the level of src and dst first */
1250 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1253 tmp
= ulist_alloc(GFP_KERNEL
);
1257 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1258 quota_root
= fs_info
->quota_root
;
1263 member
= find_qgroup_rb(fs_info
, src
);
1264 parent
= find_qgroup_rb(fs_info
, dst
);
1265 if (!member
|| !parent
) {
1270 /* check if such qgroup relation exist firstly */
1271 list_for_each_entry(list
, &member
->groups
, next_group
) {
1272 if (list
->group
== parent
) {
1278 ret
= add_qgroup_relation_item(trans
, src
, dst
);
1282 ret
= add_qgroup_relation_item(trans
, dst
, src
);
1284 del_qgroup_relation_item(trans
, src
, dst
);
1288 spin_lock(&fs_info
->qgroup_lock
);
1289 ret
= add_relation_rb(fs_info
, src
, dst
);
1291 spin_unlock(&fs_info
->qgroup_lock
);
1294 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1295 spin_unlock(&fs_info
->qgroup_lock
);
1297 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1302 static int __del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1305 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1306 struct btrfs_root
*quota_root
;
1307 struct btrfs_qgroup
*parent
;
1308 struct btrfs_qgroup
*member
;
1309 struct btrfs_qgroup_list
*list
;
1314 tmp
= ulist_alloc(GFP_KERNEL
);
1318 quota_root
= fs_info
->quota_root
;
1324 member
= find_qgroup_rb(fs_info
, src
);
1325 parent
= find_qgroup_rb(fs_info
, dst
);
1326 if (!member
|| !parent
) {
1331 /* check if such qgroup relation exist firstly */
1332 list_for_each_entry(list
, &member
->groups
, next_group
) {
1333 if (list
->group
== parent
)
1339 ret
= del_qgroup_relation_item(trans
, src
, dst
);
1340 err
= del_qgroup_relation_item(trans
, dst
, src
);
1344 spin_lock(&fs_info
->qgroup_lock
);
1345 del_relation_rb(fs_info
, src
, dst
);
1346 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1347 spin_unlock(&fs_info
->qgroup_lock
);
1353 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1356 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1359 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1360 ret
= __del_qgroup_relation(trans
, src
, dst
);
1361 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1366 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1368 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1369 struct btrfs_root
*quota_root
;
1370 struct btrfs_qgroup
*qgroup
;
1373 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1374 quota_root
= fs_info
->quota_root
;
1379 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1385 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1389 spin_lock(&fs_info
->qgroup_lock
);
1390 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1391 spin_unlock(&fs_info
->qgroup_lock
);
1394 ret
= PTR_ERR(qgroup
);
1396 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1400 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1402 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1403 struct btrfs_root
*quota_root
;
1404 struct btrfs_qgroup
*qgroup
;
1405 struct btrfs_qgroup_list
*list
;
1408 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1409 quota_root
= fs_info
->quota_root
;
1415 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1421 /* Check if there are no children of this qgroup */
1422 if (!list_empty(&qgroup
->members
)) {
1427 ret
= del_qgroup_item(trans
, qgroupid
);
1428 if (ret
&& ret
!= -ENOENT
)
1431 while (!list_empty(&qgroup
->groups
)) {
1432 list
= list_first_entry(&qgroup
->groups
,
1433 struct btrfs_qgroup_list
, next_group
);
1434 ret
= __del_qgroup_relation(trans
, qgroupid
,
1435 list
->group
->qgroupid
);
1440 spin_lock(&fs_info
->qgroup_lock
);
1441 del_qgroup_rb(fs_info
, qgroupid
);
1442 spin_unlock(&fs_info
->qgroup_lock
);
1444 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1448 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
,
1449 struct btrfs_qgroup_limit
*limit
)
1451 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1452 struct btrfs_root
*quota_root
;
1453 struct btrfs_qgroup
*qgroup
;
1455 /* Sometimes we would want to clear the limit on this qgroup.
1456 * To meet this requirement, we treat the -1 as a special value
1457 * which tell kernel to clear the limit on this qgroup.
1459 const u64 CLEAR_VALUE
= -1;
1461 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1462 quota_root
= fs_info
->quota_root
;
1468 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1474 spin_lock(&fs_info
->qgroup_lock
);
1475 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1476 if (limit
->max_rfer
== CLEAR_VALUE
) {
1477 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1478 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1479 qgroup
->max_rfer
= 0;
1481 qgroup
->max_rfer
= limit
->max_rfer
;
1484 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1485 if (limit
->max_excl
== CLEAR_VALUE
) {
1486 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1487 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1488 qgroup
->max_excl
= 0;
1490 qgroup
->max_excl
= limit
->max_excl
;
1493 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1494 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1495 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1496 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1497 qgroup
->rsv_rfer
= 0;
1499 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1502 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1503 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1504 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1505 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1506 qgroup
->rsv_excl
= 0;
1508 qgroup
->rsv_excl
= limit
->rsv_excl
;
1511 qgroup
->lim_flags
|= limit
->flags
;
1513 spin_unlock(&fs_info
->qgroup_lock
);
1515 ret
= update_qgroup_limit_item(trans
, qgroup
);
1517 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1518 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1523 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1527 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info
*fs_info
,
1528 struct btrfs_delayed_ref_root
*delayed_refs
,
1529 struct btrfs_qgroup_extent_record
*record
)
1531 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1532 struct rb_node
*parent_node
= NULL
;
1533 struct btrfs_qgroup_extent_record
*entry
;
1534 u64 bytenr
= record
->bytenr
;
1536 lockdep_assert_held(&delayed_refs
->lock
);
1537 trace_btrfs_qgroup_trace_extent(fs_info
, record
);
1541 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1543 if (bytenr
< entry
->bytenr
)
1545 else if (bytenr
> entry
->bytenr
)
1546 p
= &(*p
)->rb_right
;
1551 rb_link_node(&record
->node
, parent_node
, p
);
1552 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1556 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info
*fs_info
,
1557 struct btrfs_qgroup_extent_record
*qrecord
)
1559 struct ulist
*old_root
;
1560 u64 bytenr
= qrecord
->bytenr
;
1563 ret
= btrfs_find_all_roots(NULL
, fs_info
, bytenr
, 0, &old_root
, false);
1565 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1567 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1573 * Here we don't need to get the lock of
1574 * trans->transaction->delayed_refs, since inserted qrecord won't
1575 * be deleted, only qrecord->node may be modified (new qrecord insert)
1577 * So modifying qrecord->old_roots is safe here
1579 qrecord
->old_roots
= old_root
;
1583 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
1584 u64 num_bytes
, gfp_t gfp_flag
)
1586 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1587 struct btrfs_qgroup_extent_record
*record
;
1588 struct btrfs_delayed_ref_root
*delayed_refs
;
1591 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1592 || bytenr
== 0 || num_bytes
== 0)
1594 record
= kmalloc(sizeof(*record
), gfp_flag
);
1598 delayed_refs
= &trans
->transaction
->delayed_refs
;
1599 record
->bytenr
= bytenr
;
1600 record
->num_bytes
= num_bytes
;
1601 record
->old_roots
= NULL
;
1603 spin_lock(&delayed_refs
->lock
);
1604 ret
= btrfs_qgroup_trace_extent_nolock(fs_info
, delayed_refs
, record
);
1605 spin_unlock(&delayed_refs
->lock
);
1610 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
1613 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle
*trans
,
1614 struct extent_buffer
*eb
)
1616 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1617 int nr
= btrfs_header_nritems(eb
);
1618 int i
, extent_type
, ret
;
1619 struct btrfs_key key
;
1620 struct btrfs_file_extent_item
*fi
;
1621 u64 bytenr
, num_bytes
;
1623 /* We can be called directly from walk_up_proc() */
1624 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1627 for (i
= 0; i
< nr
; i
++) {
1628 btrfs_item_key_to_cpu(eb
, &key
, i
);
1630 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1633 fi
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
1634 /* filter out non qgroup-accountable extents */
1635 extent_type
= btrfs_file_extent_type(eb
, fi
);
1637 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1640 bytenr
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1644 num_bytes
= btrfs_file_extent_disk_num_bytes(eb
, fi
);
1646 ret
= btrfs_qgroup_trace_extent(trans
, bytenr
, num_bytes
,
1656 * Walk up the tree from the bottom, freeing leaves and any interior
1657 * nodes which have had all slots visited. If a node (leaf or
1658 * interior) is freed, the node above it will have it's slot
1659 * incremented. The root node will never be freed.
1661 * At the end of this function, we should have a path which has all
1662 * slots incremented to the next position for a search. If we need to
1663 * read a new node it will be NULL and the node above it will have the
1664 * correct slot selected for a later read.
1666 * If we increment the root nodes slot counter past the number of
1667 * elements, 1 is returned to signal completion of the search.
1669 static int adjust_slots_upwards(struct btrfs_path
*path
, int root_level
)
1673 struct extent_buffer
*eb
;
1675 if (root_level
== 0)
1678 while (level
<= root_level
) {
1679 eb
= path
->nodes
[level
];
1680 nr
= btrfs_header_nritems(eb
);
1681 path
->slots
[level
]++;
1682 slot
= path
->slots
[level
];
1683 if (slot
>= nr
|| level
== 0) {
1685 * Don't free the root - we will detect this
1686 * condition after our loop and return a
1687 * positive value for caller to stop walking the tree.
1689 if (level
!= root_level
) {
1690 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
1691 path
->locks
[level
] = 0;
1693 free_extent_buffer(eb
);
1694 path
->nodes
[level
] = NULL
;
1695 path
->slots
[level
] = 0;
1699 * We have a valid slot to walk back down
1700 * from. Stop here so caller can process these
1709 eb
= path
->nodes
[root_level
];
1710 if (path
->slots
[root_level
] >= btrfs_header_nritems(eb
))
1717 * Helper function to trace a subtree tree block swap.
1719 * The swap will happen in highest tree block, but there may be a lot of
1720 * tree blocks involved.
1723 * OO = Old tree blocks
1724 * NN = New tree blocks allocated during balance
1726 * File tree (257) Reloc tree for 257
1729 * L1 OO OO (a) OO NN (a)
1731 * L0 OO OO OO OO OO OO NN NN
1734 * When calling qgroup_trace_extent_swap(), we will pass:
1736 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1740 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1741 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1743 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1745 * 1) Tree search from @src_eb
1746 * It should acts as a simplified btrfs_search_slot().
1747 * The key for search can be extracted from @dst_path->nodes[dst_level]
1750 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1751 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1752 * They should be marked during preivous (@dst_level = 1) iteration.
1754 * 3) Mark file extents in leaves dirty
1755 * We don't have good way to pick out new file extents only.
1756 * So we still follow the old method by scanning all file extents in
1759 * This function can free us from keeping two pathes, thus later we only need
1760 * to care about how to iterate all new tree blocks in reloc tree.
1762 static int qgroup_trace_extent_swap(struct btrfs_trans_handle
* trans
,
1763 struct extent_buffer
*src_eb
,
1764 struct btrfs_path
*dst_path
,
1765 int dst_level
, int root_level
,
1768 struct btrfs_key key
;
1769 struct btrfs_path
*src_path
;
1770 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1771 u32 nodesize
= fs_info
->nodesize
;
1772 int cur_level
= root_level
;
1775 BUG_ON(dst_level
> root_level
);
1776 /* Level mismatch */
1777 if (btrfs_header_level(src_eb
) != root_level
)
1780 src_path
= btrfs_alloc_path();
1787 btrfs_node_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1789 btrfs_item_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1792 extent_buffer_get(src_eb
);
1793 src_path
->nodes
[root_level
] = src_eb
;
1794 src_path
->slots
[root_level
] = dst_path
->slots
[root_level
];
1795 src_path
->locks
[root_level
] = 0;
1797 /* A simplified version of btrfs_search_slot() */
1798 while (cur_level
>= dst_level
) {
1799 struct btrfs_key src_key
;
1800 struct btrfs_key dst_key
;
1802 if (src_path
->nodes
[cur_level
] == NULL
) {
1803 struct btrfs_key first_key
;
1804 struct extent_buffer
*eb
;
1809 eb
= src_path
->nodes
[cur_level
+ 1];
1810 parent_slot
= src_path
->slots
[cur_level
+ 1];
1811 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
1812 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
1813 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
1815 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
1816 cur_level
, &first_key
);
1820 } else if (!extent_buffer_uptodate(eb
)) {
1821 free_extent_buffer(eb
);
1826 src_path
->nodes
[cur_level
] = eb
;
1828 btrfs_tree_read_lock(eb
);
1829 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1830 src_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
1833 src_path
->slots
[cur_level
] = dst_path
->slots
[cur_level
];
1835 btrfs_node_key_to_cpu(dst_path
->nodes
[cur_level
],
1836 &dst_key
, dst_path
->slots
[cur_level
]);
1837 btrfs_node_key_to_cpu(src_path
->nodes
[cur_level
],
1838 &src_key
, src_path
->slots
[cur_level
]);
1840 btrfs_item_key_to_cpu(dst_path
->nodes
[cur_level
],
1841 &dst_key
, dst_path
->slots
[cur_level
]);
1842 btrfs_item_key_to_cpu(src_path
->nodes
[cur_level
],
1843 &src_key
, src_path
->slots
[cur_level
]);
1845 /* Content mismatch, something went wrong */
1846 if (btrfs_comp_cpu_keys(&dst_key
, &src_key
)) {
1854 * Now both @dst_path and @src_path have been populated, record the tree
1855 * blocks for qgroup accounting.
1857 ret
= btrfs_qgroup_trace_extent(trans
, src_path
->nodes
[dst_level
]->start
,
1858 nodesize
, GFP_NOFS
);
1861 ret
= btrfs_qgroup_trace_extent(trans
,
1862 dst_path
->nodes
[dst_level
]->start
,
1863 nodesize
, GFP_NOFS
);
1867 /* Record leaf file extents */
1868 if (dst_level
== 0 && trace_leaf
) {
1869 ret
= btrfs_qgroup_trace_leaf_items(trans
, src_path
->nodes
[0]);
1872 ret
= btrfs_qgroup_trace_leaf_items(trans
, dst_path
->nodes
[0]);
1875 btrfs_free_path(src_path
);
1880 * Helper function to do recursive generation-aware depth-first search, to
1881 * locate all new tree blocks in a subtree of reloc tree.
1883 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1892 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1896 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1897 * above tree blocks along with their counter parts in file tree.
1898 * While during search, old tree blocsk OO(c) will be skiped as tree block swap
1899 * won't affect OO(c).
1901 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle
* trans
,
1902 struct extent_buffer
*src_eb
,
1903 struct btrfs_path
*dst_path
,
1904 int cur_level
, int root_level
,
1905 u64 last_snapshot
, bool trace_leaf
)
1907 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1908 struct extent_buffer
*eb
;
1909 bool need_cleanup
= false;
1913 /* Level sanity check */
1914 if (cur_level
< 0 || cur_level
>= BTRFS_MAX_LEVEL
||
1915 root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
||
1916 root_level
< cur_level
) {
1917 btrfs_err_rl(fs_info
,
1918 "%s: bad levels, cur_level=%d root_level=%d",
1919 __func__
, cur_level
, root_level
);
1923 /* Read the tree block if needed */
1924 if (dst_path
->nodes
[cur_level
] == NULL
) {
1925 struct btrfs_key first_key
;
1931 * dst_path->nodes[root_level] must be initialized before
1932 * calling this function.
1934 if (cur_level
== root_level
) {
1935 btrfs_err_rl(fs_info
,
1936 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1937 __func__
, root_level
, root_level
, cur_level
);
1942 * We need to get child blockptr/gen from parent before we can
1945 eb
= dst_path
->nodes
[cur_level
+ 1];
1946 parent_slot
= dst_path
->slots
[cur_level
+ 1];
1947 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
1948 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
1949 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
1951 /* This node is old, no need to trace */
1952 if (child_gen
< last_snapshot
)
1955 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
1956 cur_level
, &first_key
);
1960 } else if (!extent_buffer_uptodate(eb
)) {
1961 free_extent_buffer(eb
);
1966 dst_path
->nodes
[cur_level
] = eb
;
1967 dst_path
->slots
[cur_level
] = 0;
1969 btrfs_tree_read_lock(eb
);
1970 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1971 dst_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
1972 need_cleanup
= true;
1975 /* Now record this tree block and its counter part for qgroups */
1976 ret
= qgroup_trace_extent_swap(trans
, src_eb
, dst_path
, cur_level
,
1977 root_level
, trace_leaf
);
1981 eb
= dst_path
->nodes
[cur_level
];
1983 if (cur_level
> 0) {
1984 /* Iterate all child tree blocks */
1985 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
1986 /* Skip old tree blocks as they won't be swapped */
1987 if (btrfs_node_ptr_generation(eb
, i
) < last_snapshot
)
1989 dst_path
->slots
[cur_level
] = i
;
1991 /* Recursive call (at most 7 times) */
1992 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
,
1993 dst_path
, cur_level
- 1, root_level
,
1994 last_snapshot
, trace_leaf
);
2003 btrfs_tree_unlock_rw(dst_path
->nodes
[cur_level
],
2004 dst_path
->locks
[cur_level
]);
2005 free_extent_buffer(dst_path
->nodes
[cur_level
]);
2006 dst_path
->nodes
[cur_level
] = NULL
;
2007 dst_path
->slots
[cur_level
] = 0;
2008 dst_path
->locks
[cur_level
] = 0;
2015 * Inform qgroup to trace subtree swap used in balance.
2017 * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
2018 * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
2020 * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
2021 * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
2022 * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
2023 * the conterpart of the tree block, then mark both tree blocks as qgroup dirty,
2024 * and skip all tree blocks whose generation is smaller than last_snapshot.
2026 * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
2027 * which could be the cause of very slow balance if the file tree is large.
2029 * @src_parent, @src_slot: pointer to src (file tree) eb.
2030 * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
2032 int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle
*trans
,
2033 struct btrfs_block_group_cache
*bg_cache
,
2034 struct extent_buffer
*src_parent
, int src_slot
,
2035 struct extent_buffer
*dst_parent
, int dst_slot
,
2038 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2039 struct btrfs_path
*dst_path
= NULL
;
2040 struct btrfs_key first_key
;
2041 struct extent_buffer
*src_eb
= NULL
;
2042 struct extent_buffer
*dst_eb
= NULL
;
2043 bool trace_leaf
= false;
2049 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2052 /* Check parameter order */
2053 if (btrfs_node_ptr_generation(src_parent
, src_slot
) >
2054 btrfs_node_ptr_generation(dst_parent
, dst_slot
)) {
2055 btrfs_err_rl(fs_info
,
2056 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__
,
2057 btrfs_node_ptr_generation(src_parent
, src_slot
),
2058 btrfs_node_ptr_generation(dst_parent
, dst_slot
));
2063 * Only trace leaf if we're relocating data block groups, this could
2064 * reduce tons of data extents tracing for meta/sys bg relocation.
2066 if (bg_cache
->flags
& BTRFS_BLOCK_GROUP_DATA
)
2068 /* Read out real @src_eb, pointed by @src_parent and @src_slot */
2069 child_bytenr
= btrfs_node_blockptr(src_parent
, src_slot
);
2070 child_gen
= btrfs_node_ptr_generation(src_parent
, src_slot
);
2071 btrfs_node_key_to_cpu(src_parent
, &first_key
, src_slot
);
2073 src_eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2074 btrfs_header_level(src_parent
) - 1, &first_key
);
2075 if (IS_ERR(src_eb
)) {
2076 ret
= PTR_ERR(src_eb
);
2080 /* Read out real @dst_eb, pointed by @src_parent and @src_slot */
2081 child_bytenr
= btrfs_node_blockptr(dst_parent
, dst_slot
);
2082 child_gen
= btrfs_node_ptr_generation(dst_parent
, dst_slot
);
2083 btrfs_node_key_to_cpu(dst_parent
, &first_key
, dst_slot
);
2085 dst_eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2086 btrfs_header_level(dst_parent
) - 1, &first_key
);
2087 if (IS_ERR(dst_eb
)) {
2088 ret
= PTR_ERR(dst_eb
);
2092 if (!extent_buffer_uptodate(src_eb
) || !extent_buffer_uptodate(dst_eb
)) {
2097 level
= btrfs_header_level(dst_eb
);
2098 dst_path
= btrfs_alloc_path();
2105 extent_buffer_get(dst_eb
);
2106 dst_path
->nodes
[level
] = dst_eb
;
2107 dst_path
->slots
[level
] = 0;
2108 dst_path
->locks
[level
] = 0;
2110 /* Do the generation-aware breadth-first search */
2111 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
, dst_path
, level
,
2112 level
, last_snapshot
, trace_leaf
);
2118 free_extent_buffer(src_eb
);
2119 free_extent_buffer(dst_eb
);
2120 btrfs_free_path(dst_path
);
2122 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2126 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle
*trans
,
2127 struct extent_buffer
*root_eb
,
2128 u64 root_gen
, int root_level
)
2130 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2133 struct extent_buffer
*eb
= root_eb
;
2134 struct btrfs_path
*path
= NULL
;
2136 BUG_ON(root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
);
2137 BUG_ON(root_eb
== NULL
);
2139 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2142 if (!extent_buffer_uptodate(root_eb
)) {
2143 ret
= btrfs_read_buffer(root_eb
, root_gen
, root_level
, NULL
);
2148 if (root_level
== 0) {
2149 ret
= btrfs_qgroup_trace_leaf_items(trans
, root_eb
);
2153 path
= btrfs_alloc_path();
2158 * Walk down the tree. Missing extent blocks are filled in as
2159 * we go. Metadata is accounted every time we read a new
2162 * When we reach a leaf, we account for file extent items in it,
2163 * walk back up the tree (adjusting slot pointers as we go)
2164 * and restart the search process.
2166 extent_buffer_get(root_eb
); /* For path */
2167 path
->nodes
[root_level
] = root_eb
;
2168 path
->slots
[root_level
] = 0;
2169 path
->locks
[root_level
] = 0; /* so release_path doesn't try to unlock */
2172 while (level
>= 0) {
2173 if (path
->nodes
[level
] == NULL
) {
2174 struct btrfs_key first_key
;
2180 * We need to get child blockptr/gen from parent before
2183 eb
= path
->nodes
[level
+ 1];
2184 parent_slot
= path
->slots
[level
+ 1];
2185 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2186 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2187 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
2189 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2194 } else if (!extent_buffer_uptodate(eb
)) {
2195 free_extent_buffer(eb
);
2200 path
->nodes
[level
] = eb
;
2201 path
->slots
[level
] = 0;
2203 btrfs_tree_read_lock(eb
);
2204 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
2205 path
->locks
[level
] = BTRFS_READ_LOCK_BLOCKING
;
2207 ret
= btrfs_qgroup_trace_extent(trans
, child_bytenr
,
2215 ret
= btrfs_qgroup_trace_leaf_items(trans
,
2216 path
->nodes
[level
]);
2220 /* Nonzero return here means we completed our search */
2221 ret
= adjust_slots_upwards(path
, root_level
);
2225 /* Restart search with new slots */
2234 btrfs_free_path(path
);
2239 #define UPDATE_NEW 0
2240 #define UPDATE_OLD 1
2242 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2244 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
2245 struct ulist
*roots
, struct ulist
*tmp
,
2246 struct ulist
*qgroups
, u64 seq
, int update_old
)
2248 struct ulist_node
*unode
;
2249 struct ulist_iterator uiter
;
2250 struct ulist_node
*tmp_unode
;
2251 struct ulist_iterator tmp_uiter
;
2252 struct btrfs_qgroup
*qg
;
2257 ULIST_ITER_INIT(&uiter
);
2258 while ((unode
= ulist_next(roots
, &uiter
))) {
2259 qg
= find_qgroup_rb(fs_info
, unode
->val
);
2264 ret
= ulist_add(qgroups
, qg
->qgroupid
, qgroup_to_aux(qg
),
2268 ret
= ulist_add(tmp
, qg
->qgroupid
, qgroup_to_aux(qg
), GFP_ATOMIC
);
2271 ULIST_ITER_INIT(&tmp_uiter
);
2272 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
2273 struct btrfs_qgroup_list
*glist
;
2275 qg
= unode_aux_to_qgroup(tmp_unode
);
2277 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
2279 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
2280 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2281 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
2282 qgroup_to_aux(glist
->group
),
2286 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
2287 qgroup_to_aux(glist
->group
),
2298 * Update qgroup rfer/excl counters.
2299 * Rfer update is easy, codes can explain themselves.
2301 * Excl update is tricky, the update is split into 2 part.
2302 * Part 1: Possible exclusive <-> sharing detect:
2304 * -------------------------------------
2306 * -------------------------------------
2308 * -------------------------------------
2311 * A: cur_old_roots < nr_old_roots (not exclusive before)
2312 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2313 * B: cur_new_roots < nr_new_roots (not exclusive now)
2314 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2317 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2318 * *: Definitely not changed. **: Possible unchanged.
2320 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2322 * To make the logic clear, we first use condition A and B to split
2323 * combination into 4 results.
2325 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2326 * only on variant maybe 0.
2328 * Lastly, check result **, since there are 2 variants maybe 0, split them
2330 * But this time we don't need to consider other things, the codes and logic
2331 * is easy to understand now.
2333 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
2334 struct ulist
*qgroups
,
2337 u64 num_bytes
, u64 seq
)
2339 struct ulist_node
*unode
;
2340 struct ulist_iterator uiter
;
2341 struct btrfs_qgroup
*qg
;
2342 u64 cur_new_count
, cur_old_count
;
2344 ULIST_ITER_INIT(&uiter
);
2345 while ((unode
= ulist_next(qgroups
, &uiter
))) {
2348 qg
= unode_aux_to_qgroup(unode
);
2349 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
2350 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
2352 trace_qgroup_update_counters(fs_info
, qg
, cur_old_count
,
2355 /* Rfer update part */
2356 if (cur_old_count
== 0 && cur_new_count
> 0) {
2357 qg
->rfer
+= num_bytes
;
2358 qg
->rfer_cmpr
+= num_bytes
;
2361 if (cur_old_count
> 0 && cur_new_count
== 0) {
2362 qg
->rfer
-= num_bytes
;
2363 qg
->rfer_cmpr
-= num_bytes
;
2367 /* Excl update part */
2368 /* Exclusive/none -> shared case */
2369 if (cur_old_count
== nr_old_roots
&&
2370 cur_new_count
< nr_new_roots
) {
2371 /* Exclusive -> shared */
2372 if (cur_old_count
!= 0) {
2373 qg
->excl
-= num_bytes
;
2374 qg
->excl_cmpr
-= num_bytes
;
2379 /* Shared -> exclusive/none case */
2380 if (cur_old_count
< nr_old_roots
&&
2381 cur_new_count
== nr_new_roots
) {
2382 /* Shared->exclusive */
2383 if (cur_new_count
!= 0) {
2384 qg
->excl
+= num_bytes
;
2385 qg
->excl_cmpr
+= num_bytes
;
2390 /* Exclusive/none -> exclusive/none case */
2391 if (cur_old_count
== nr_old_roots
&&
2392 cur_new_count
== nr_new_roots
) {
2393 if (cur_old_count
== 0) {
2394 /* None -> exclusive/none */
2396 if (cur_new_count
!= 0) {
2397 /* None -> exclusive */
2398 qg
->excl
+= num_bytes
;
2399 qg
->excl_cmpr
+= num_bytes
;
2402 /* None -> none, nothing changed */
2404 /* Exclusive -> exclusive/none */
2406 if (cur_new_count
== 0) {
2407 /* Exclusive -> none */
2408 qg
->excl
-= num_bytes
;
2409 qg
->excl_cmpr
-= num_bytes
;
2412 /* Exclusive -> exclusive, nothing changed */
2417 qgroup_dirty(fs_info
, qg
);
2423 * Check if the @roots potentially is a list of fs tree roots
2425 * Return 0 for definitely not a fs/subvol tree roots ulist
2426 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2429 static int maybe_fs_roots(struct ulist
*roots
)
2431 struct ulist_node
*unode
;
2432 struct ulist_iterator uiter
;
2434 /* Empty one, still possible for fs roots */
2435 if (!roots
|| roots
->nnodes
== 0)
2438 ULIST_ITER_INIT(&uiter
);
2439 unode
= ulist_next(roots
, &uiter
);
2444 * If it contains fs tree roots, then it must belong to fs/subvol
2446 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2448 return is_fstree(unode
->val
);
2451 int btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
2452 u64 num_bytes
, struct ulist
*old_roots
,
2453 struct ulist
*new_roots
)
2455 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2456 struct ulist
*qgroups
= NULL
;
2457 struct ulist
*tmp
= NULL
;
2459 u64 nr_new_roots
= 0;
2460 u64 nr_old_roots
= 0;
2463 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2467 if (!maybe_fs_roots(new_roots
))
2469 nr_new_roots
= new_roots
->nnodes
;
2472 if (!maybe_fs_roots(old_roots
))
2474 nr_old_roots
= old_roots
->nnodes
;
2477 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2478 if (nr_old_roots
== 0 && nr_new_roots
== 0)
2481 BUG_ON(!fs_info
->quota_root
);
2483 trace_btrfs_qgroup_account_extent(fs_info
, trans
->transid
, bytenr
,
2484 num_bytes
, nr_old_roots
, nr_new_roots
);
2486 qgroups
= ulist_alloc(GFP_NOFS
);
2491 tmp
= ulist_alloc(GFP_NOFS
);
2497 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2498 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2499 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
2500 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2505 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2507 spin_lock(&fs_info
->qgroup_lock
);
2508 seq
= fs_info
->qgroup_seq
;
2510 /* Update old refcnts using old_roots */
2511 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
2516 /* Update new refcnts using new_roots */
2517 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
2522 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
2526 * Bump qgroup_seq to avoid seq overlap
2528 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
2530 spin_unlock(&fs_info
->qgroup_lock
);
2533 ulist_free(qgroups
);
2534 ulist_free(old_roots
);
2535 ulist_free(new_roots
);
2539 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
)
2541 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2542 struct btrfs_qgroup_extent_record
*record
;
2543 struct btrfs_delayed_ref_root
*delayed_refs
;
2544 struct ulist
*new_roots
= NULL
;
2545 struct rb_node
*node
;
2546 u64 num_dirty_extents
= 0;
2550 delayed_refs
= &trans
->transaction
->delayed_refs
;
2551 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
2552 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
2553 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
2556 num_dirty_extents
++;
2557 trace_btrfs_qgroup_account_extents(fs_info
, record
);
2561 * Old roots should be searched when inserting qgroup
2564 if (WARN_ON(!record
->old_roots
)) {
2565 /* Search commit root to find old_roots */
2566 ret
= btrfs_find_all_roots(NULL
, fs_info
,
2568 &record
->old_roots
, false);
2574 * Use SEQ_LAST as time_seq to do special search, which
2575 * doesn't lock tree or delayed_refs and search current
2576 * root. It's safe inside commit_transaction().
2578 ret
= btrfs_find_all_roots(trans
, fs_info
,
2579 record
->bytenr
, SEQ_LAST
, &new_roots
, false);
2582 if (qgroup_to_skip
) {
2583 ulist_del(new_roots
, qgroup_to_skip
, 0);
2584 ulist_del(record
->old_roots
, qgroup_to_skip
,
2587 ret
= btrfs_qgroup_account_extent(trans
, record
->bytenr
,
2591 record
->old_roots
= NULL
;
2595 ulist_free(record
->old_roots
);
2596 ulist_free(new_roots
);
2598 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
2602 trace_qgroup_num_dirty_extents(fs_info
, trans
->transid
,
2608 * called from commit_transaction. Writes all changed qgroups to disk.
2610 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
)
2612 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2613 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2619 spin_lock(&fs_info
->qgroup_lock
);
2620 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2621 struct btrfs_qgroup
*qgroup
;
2622 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2623 struct btrfs_qgroup
, dirty
);
2624 list_del_init(&qgroup
->dirty
);
2625 spin_unlock(&fs_info
->qgroup_lock
);
2626 ret
= update_qgroup_info_item(trans
, qgroup
);
2628 fs_info
->qgroup_flags
|=
2629 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2630 ret
= update_qgroup_limit_item(trans
, qgroup
);
2632 fs_info
->qgroup_flags
|=
2633 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2634 spin_lock(&fs_info
->qgroup_lock
);
2636 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2637 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2639 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2640 spin_unlock(&fs_info
->qgroup_lock
);
2642 ret
= update_qgroup_status_item(trans
);
2644 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2650 * Copy the accounting information between qgroups. This is necessary
2651 * when a snapshot or a subvolume is created. Throwing an error will
2652 * cause a transaction abort so we take extra care here to only error
2653 * when a readonly fs is a reasonable outcome.
2655 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
, u64 srcid
,
2656 u64 objectid
, struct btrfs_qgroup_inherit
*inherit
)
2661 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2662 struct btrfs_root
*quota_root
;
2663 struct btrfs_qgroup
*srcgroup
;
2664 struct btrfs_qgroup
*dstgroup
;
2668 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2669 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2672 quota_root
= fs_info
->quota_root
;
2679 i_qgroups
= (u64
*)(inherit
+ 1);
2680 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2681 2 * inherit
->num_excl_copies
;
2682 for (i
= 0; i
< nums
; ++i
) {
2683 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2686 * Zero out invalid groups so we can ignore
2690 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
2698 * create a tracking group for the subvol itself
2700 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2705 * add qgroup to all inherited groups
2708 i_qgroups
= (u64
*)(inherit
+ 1);
2709 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
2710 if (*i_qgroups
== 0)
2712 ret
= add_qgroup_relation_item(trans
, objectid
,
2714 if (ret
&& ret
!= -EEXIST
)
2716 ret
= add_qgroup_relation_item(trans
, *i_qgroups
,
2718 if (ret
&& ret
!= -EEXIST
)
2725 spin_lock(&fs_info
->qgroup_lock
);
2727 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2728 if (IS_ERR(dstgroup
)) {
2729 ret
= PTR_ERR(dstgroup
);
2733 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2734 dstgroup
->lim_flags
= inherit
->lim
.flags
;
2735 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
2736 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
2737 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
2738 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
2740 ret
= update_qgroup_limit_item(trans
, dstgroup
);
2742 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2744 "unable to update quota limit for %llu",
2745 dstgroup
->qgroupid
);
2751 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2756 * We call inherit after we clone the root in order to make sure
2757 * our counts don't go crazy, so at this point the only
2758 * difference between the two roots should be the root node.
2760 level_size
= fs_info
->nodesize
;
2761 dstgroup
->rfer
= srcgroup
->rfer
;
2762 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2763 dstgroup
->excl
= level_size
;
2764 dstgroup
->excl_cmpr
= level_size
;
2765 srcgroup
->excl
= level_size
;
2766 srcgroup
->excl_cmpr
= level_size
;
2768 /* inherit the limit info */
2769 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2770 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2771 dstgroup
->max_excl
= srcgroup
->max_excl
;
2772 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2773 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2775 qgroup_dirty(fs_info
, dstgroup
);
2776 qgroup_dirty(fs_info
, srcgroup
);
2782 i_qgroups
= (u64
*)(inherit
+ 1);
2783 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2785 ret
= add_relation_rb(fs_info
, objectid
, *i_qgroups
);
2792 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2793 struct btrfs_qgroup
*src
;
2794 struct btrfs_qgroup
*dst
;
2796 if (!i_qgroups
[0] || !i_qgroups
[1])
2799 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2800 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2807 dst
->rfer
= src
->rfer
- level_size
;
2808 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2810 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2811 struct btrfs_qgroup
*src
;
2812 struct btrfs_qgroup
*dst
;
2814 if (!i_qgroups
[0] || !i_qgroups
[1])
2817 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2818 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2825 dst
->excl
= src
->excl
+ level_size
;
2826 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2830 spin_unlock(&fs_info
->qgroup_lock
);
2832 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2837 * Two limits to commit transaction in advance.
2839 * For RATIO, it will be 1/RATIO of the remaining limit
2840 * (excluding data and prealloc meta) as threshold.
2841 * For SIZE, it will be in byte unit as threshold.
2843 #define QGROUP_PERTRANS_RATIO 32
2844 #define QGROUP_PERTRANS_SIZE SZ_32M
2845 static bool qgroup_check_limits(struct btrfs_fs_info
*fs_info
,
2846 const struct btrfs_qgroup
*qg
, u64 num_bytes
)
2851 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2852 qgroup_rsv_total(qg
) + (s64
)qg
->rfer
+ num_bytes
> qg
->max_rfer
)
2855 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2856 qgroup_rsv_total(qg
) + (s64
)qg
->excl
+ num_bytes
> qg
->max_excl
)
2860 * Even if we passed the check, it's better to check if reservation
2861 * for meta_pertrans is pushing us near limit.
2862 * If there is too much pertrans reservation or it's near the limit,
2863 * let's try commit transaction to free some, using transaction_kthread
2865 if ((qg
->lim_flags
& (BTRFS_QGROUP_LIMIT_MAX_RFER
|
2866 BTRFS_QGROUP_LIMIT_MAX_EXCL
))) {
2867 if (qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
)
2868 limit
= qg
->max_excl
;
2870 limit
= qg
->max_rfer
;
2871 threshold
= (limit
- qg
->rsv
.values
[BTRFS_QGROUP_RSV_DATA
] -
2872 qg
->rsv
.values
[BTRFS_QGROUP_RSV_META_PREALLOC
]) /
2873 QGROUP_PERTRANS_RATIO
;
2874 threshold
= min_t(u64
, threshold
, QGROUP_PERTRANS_SIZE
);
2877 * Use transaction_kthread to commit transaction, so we no
2878 * longer need to bother nested transaction nor lock context.
2880 if (qg
->rsv
.values
[BTRFS_QGROUP_RSV_META_PERTRANS
] > threshold
)
2881 btrfs_commit_transaction_locksafe(fs_info
);
2887 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
, bool enforce
,
2888 enum btrfs_qgroup_rsv_type type
)
2890 struct btrfs_root
*quota_root
;
2891 struct btrfs_qgroup
*qgroup
;
2892 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2893 u64 ref_root
= root
->root_key
.objectid
;
2895 struct ulist_node
*unode
;
2896 struct ulist_iterator uiter
;
2898 if (!is_fstree(ref_root
))
2904 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE
, &fs_info
->flags
) &&
2905 capable(CAP_SYS_RESOURCE
))
2908 spin_lock(&fs_info
->qgroup_lock
);
2909 quota_root
= fs_info
->quota_root
;
2913 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2918 * in a first step, we check all affected qgroups if any limits would
2921 ulist_reinit(fs_info
->qgroup_ulist
);
2922 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2923 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
2926 ULIST_ITER_INIT(&uiter
);
2927 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2928 struct btrfs_qgroup
*qg
;
2929 struct btrfs_qgroup_list
*glist
;
2931 qg
= unode_aux_to_qgroup(unode
);
2933 if (enforce
&& !qgroup_check_limits(fs_info
, qg
, num_bytes
)) {
2938 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2939 ret
= ulist_add(fs_info
->qgroup_ulist
,
2940 glist
->group
->qgroupid
,
2941 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
2948 * no limits exceeded, now record the reservation into all qgroups
2950 ULIST_ITER_INIT(&uiter
);
2951 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2952 struct btrfs_qgroup
*qg
;
2954 qg
= unode_aux_to_qgroup(unode
);
2956 trace_qgroup_update_reserve(fs_info
, qg
, num_bytes
, type
);
2957 qgroup_rsv_add(fs_info
, qg
, num_bytes
, type
);
2961 spin_unlock(&fs_info
->qgroup_lock
);
2966 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
2969 * Will handle all higher level qgroup too.
2971 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2972 * This special case is only used for META_PERTRANS type.
2974 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
2975 u64 ref_root
, u64 num_bytes
,
2976 enum btrfs_qgroup_rsv_type type
)
2978 struct btrfs_root
*quota_root
;
2979 struct btrfs_qgroup
*qgroup
;
2980 struct ulist_node
*unode
;
2981 struct ulist_iterator uiter
;
2984 if (!is_fstree(ref_root
))
2990 if (num_bytes
== (u64
)-1 && type
!= BTRFS_QGROUP_RSV_META_PERTRANS
) {
2991 WARN(1, "%s: Invalid type to free", __func__
);
2994 spin_lock(&fs_info
->qgroup_lock
);
2996 quota_root
= fs_info
->quota_root
;
3000 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3004 if (num_bytes
== (u64
)-1)
3006 * We're freeing all pertrans rsv, get reserved value from
3007 * level 0 qgroup as real num_bytes to free.
3009 num_bytes
= qgroup
->rsv
.values
[type
];
3011 ulist_reinit(fs_info
->qgroup_ulist
);
3012 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3013 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3016 ULIST_ITER_INIT(&uiter
);
3017 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3018 struct btrfs_qgroup
*qg
;
3019 struct btrfs_qgroup_list
*glist
;
3021 qg
= unode_aux_to_qgroup(unode
);
3023 trace_qgroup_update_reserve(fs_info
, qg
, -(s64
)num_bytes
, type
);
3024 qgroup_rsv_release(fs_info
, qg
, num_bytes
, type
);
3026 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3027 ret
= ulist_add(fs_info
->qgroup_ulist
,
3028 glist
->group
->qgroupid
,
3029 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3036 spin_unlock(&fs_info
->qgroup_lock
);
3040 * Check if the leaf is the last leaf. Which means all node pointers
3041 * are at their last position.
3043 static bool is_last_leaf(struct btrfs_path
*path
)
3047 for (i
= 1; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
3048 if (path
->slots
[i
] != btrfs_header_nritems(path
->nodes
[i
]) - 1)
3055 * returns < 0 on error, 0 when more leafs are to be scanned.
3056 * returns 1 when done.
3058 static int qgroup_rescan_leaf(struct btrfs_trans_handle
*trans
,
3059 struct btrfs_path
*path
)
3061 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3062 struct btrfs_key found
;
3063 struct extent_buffer
*scratch_leaf
= NULL
;
3064 struct ulist
*roots
= NULL
;
3070 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3071 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
3072 &fs_info
->qgroup_rescan_progress
,
3075 btrfs_debug(fs_info
,
3076 "current progress key (%llu %u %llu), search_slot ret %d",
3077 fs_info
->qgroup_rescan_progress
.objectid
,
3078 fs_info
->qgroup_rescan_progress
.type
,
3079 fs_info
->qgroup_rescan_progress
.offset
, ret
);
3083 * The rescan is about to end, we will not be scanning any
3084 * further blocks. We cannot unset the RESCAN flag here, because
3085 * we want to commit the transaction if everything went well.
3086 * To make the live accounting work in this phase, we set our
3087 * scan progress pointer such that every real extent objectid
3090 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3091 btrfs_release_path(path
);
3092 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3095 done
= is_last_leaf(path
);
3097 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
3098 btrfs_header_nritems(path
->nodes
[0]) - 1);
3099 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
3101 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
3102 if (!scratch_leaf
) {
3104 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3107 extent_buffer_get(scratch_leaf
);
3108 btrfs_tree_read_lock(scratch_leaf
);
3109 btrfs_set_lock_blocking_rw(scratch_leaf
, BTRFS_READ_LOCK
);
3110 slot
= path
->slots
[0];
3111 btrfs_release_path(path
);
3112 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3114 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
3115 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
3116 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3117 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
3119 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
3120 num_bytes
= fs_info
->nodesize
;
3122 num_bytes
= found
.offset
;
3124 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
3128 /* For rescan, just pass old_roots as NULL */
3129 ret
= btrfs_qgroup_account_extent(trans
, found
.objectid
,
3130 num_bytes
, NULL
, roots
);
3136 btrfs_tree_read_unlock_blocking(scratch_leaf
);
3137 free_extent_buffer(scratch_leaf
);
3142 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3147 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
3149 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
3150 qgroup_rescan_work
);
3151 struct btrfs_path
*path
;
3152 struct btrfs_trans_handle
*trans
= NULL
;
3156 path
= btrfs_alloc_path();
3160 * Rescan should only search for commit root, and any later difference
3161 * should be recorded by qgroup
3163 path
->search_commit_root
= 1;
3164 path
->skip_locking
= 1;
3167 while (!err
&& !btrfs_fs_closing(fs_info
)) {
3168 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
3169 if (IS_ERR(trans
)) {
3170 err
= PTR_ERR(trans
);
3173 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3176 err
= qgroup_rescan_leaf(trans
, path
);
3179 btrfs_commit_transaction(trans
);
3181 btrfs_end_transaction(trans
);
3185 btrfs_free_path(path
);
3187 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3188 if (!btrfs_fs_closing(fs_info
))
3189 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3192 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
3193 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3194 } else if (err
< 0) {
3195 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3197 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3200 * only update status, since the previous part has already updated the
3203 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
3204 if (IS_ERR(trans
)) {
3205 err
= PTR_ERR(trans
);
3207 "fail to start transaction for status update: %d",
3211 ret
= update_qgroup_status_item(trans
);
3214 btrfs_err(fs_info
, "fail to update qgroup status: %d", err
);
3216 btrfs_end_transaction(trans
);
3218 if (btrfs_fs_closing(fs_info
)) {
3219 btrfs_info(fs_info
, "qgroup scan paused");
3220 } else if (err
>= 0) {
3221 btrfs_info(fs_info
, "qgroup scan completed%s",
3222 err
> 0 ? " (inconsistency flag cleared)" : "");
3224 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
3228 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3229 fs_info
->qgroup_rescan_running
= false;
3230 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3231 complete_all(&fs_info
->qgroup_rescan_completion
);
3235 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3236 * memory required for the rescan context.
3239 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
3245 /* we're resuming qgroup rescan at mount time */
3246 if (!(fs_info
->qgroup_flags
&
3247 BTRFS_QGROUP_STATUS_FLAG_RESCAN
)) {
3249 "qgroup rescan init failed, qgroup is not enabled");
3251 } else if (!(fs_info
->qgroup_flags
&
3252 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3254 "qgroup rescan init failed, qgroup rescan is not queued");
3262 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3263 spin_lock(&fs_info
->qgroup_lock
);
3266 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3268 "qgroup rescan is already in progress");
3270 } else if (!(fs_info
->qgroup_flags
&
3271 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3273 "qgroup rescan init failed, qgroup is not enabled");
3278 spin_unlock(&fs_info
->qgroup_lock
);
3279 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3282 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3285 memset(&fs_info
->qgroup_rescan_progress
, 0,
3286 sizeof(fs_info
->qgroup_rescan_progress
));
3287 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
3288 init_completion(&fs_info
->qgroup_rescan_completion
);
3289 fs_info
->qgroup_rescan_running
= true;
3291 spin_unlock(&fs_info
->qgroup_lock
);
3292 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3294 memset(&fs_info
->qgroup_rescan_work
, 0,
3295 sizeof(fs_info
->qgroup_rescan_work
));
3296 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
3297 btrfs_qgroup_rescan_helper
,
3298 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
3303 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
3306 struct btrfs_qgroup
*qgroup
;
3308 spin_lock(&fs_info
->qgroup_lock
);
3309 /* clear all current qgroup tracking information */
3310 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
3311 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
3313 qgroup
->rfer_cmpr
= 0;
3315 qgroup
->excl_cmpr
= 0;
3316 qgroup_dirty(fs_info
, qgroup
);
3318 spin_unlock(&fs_info
->qgroup_lock
);
3322 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
3325 struct btrfs_trans_handle
*trans
;
3327 ret
= qgroup_rescan_init(fs_info
, 0, 1);
3332 * We have set the rescan_progress to 0, which means no more
3333 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3334 * However, btrfs_qgroup_account_ref may be right after its call
3335 * to btrfs_find_all_roots, in which case it would still do the
3337 * To solve this, we're committing the transaction, which will
3338 * ensure we run all delayed refs and only after that, we are
3339 * going to clear all tracking information for a clean start.
3342 trans
= btrfs_join_transaction(fs_info
->fs_root
);
3343 if (IS_ERR(trans
)) {
3344 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3345 return PTR_ERR(trans
);
3347 ret
= btrfs_commit_transaction(trans
);
3349 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3353 qgroup_rescan_zero_tracking(fs_info
);
3355 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3356 &fs_info
->qgroup_rescan_work
);
3361 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
3367 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3368 spin_lock(&fs_info
->qgroup_lock
);
3369 running
= fs_info
->qgroup_rescan_running
;
3370 spin_unlock(&fs_info
->qgroup_lock
);
3371 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3377 ret
= wait_for_completion_interruptible(
3378 &fs_info
->qgroup_rescan_completion
);
3380 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
3386 * this is only called from open_ctree where we're still single threaded, thus
3387 * locking is omitted here.
3390 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
3392 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
3393 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3394 &fs_info
->qgroup_rescan_work
);
3398 * Reserve qgroup space for range [start, start + len).
3400 * This function will either reserve space from related qgroups or doing
3401 * nothing if the range is already reserved.
3403 * Return 0 for successful reserve
3404 * Return <0 for error (including -EQUOT)
3406 * NOTE: this function may sleep for memory allocation.
3407 * if btrfs_qgroup_reserve_data() is called multiple times with
3408 * same @reserved, caller must ensure when error happens it's OK
3409 * to free *ALL* reserved space.
3411 int btrfs_qgroup_reserve_data(struct inode
*inode
,
3412 struct extent_changeset
**reserved_ret
, u64 start
,
3415 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3416 struct ulist_node
*unode
;
3417 struct ulist_iterator uiter
;
3418 struct extent_changeset
*reserved
;
3423 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
3424 !is_fstree(root
->root_key
.objectid
) || len
== 0)
3427 /* @reserved parameter is mandatory for qgroup */
3428 if (WARN_ON(!reserved_ret
))
3430 if (!*reserved_ret
) {
3431 *reserved_ret
= extent_changeset_alloc();
3435 reserved
= *reserved_ret
;
3436 /* Record already reserved space */
3437 orig_reserved
= reserved
->bytes_changed
;
3438 ret
= set_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
3439 start
+ len
-1, EXTENT_QGROUP_RESERVED
, reserved
);
3441 /* Newly reserved space */
3442 to_reserve
= reserved
->bytes_changed
- orig_reserved
;
3443 trace_btrfs_qgroup_reserve_data(inode
, start
, len
,
3444 to_reserve
, QGROUP_RESERVE
);
3447 ret
= qgroup_reserve(root
, to_reserve
, true, BTRFS_QGROUP_RSV_DATA
);
3454 /* cleanup *ALL* already reserved ranges */
3455 ULIST_ITER_INIT(&uiter
);
3456 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
)))
3457 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, unode
->val
,
3458 unode
->aux
, EXTENT_QGROUP_RESERVED
, 0, 0, NULL
);
3459 extent_changeset_release(reserved
);
3463 /* Free ranges specified by @reserved, normally in error path */
3464 static int qgroup_free_reserved_data(struct inode
*inode
,
3465 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3467 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3468 struct ulist_node
*unode
;
3469 struct ulist_iterator uiter
;
3470 struct extent_changeset changeset
;
3474 extent_changeset_init(&changeset
);
3475 len
= round_up(start
+ len
, root
->fs_info
->sectorsize
);
3476 start
= round_down(start
, root
->fs_info
->sectorsize
);
3478 ULIST_ITER_INIT(&uiter
);
3479 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
))) {
3480 u64 range_start
= unode
->val
;
3481 /* unode->aux is the inclusive end */
3482 u64 range_len
= unode
->aux
- range_start
+ 1;
3486 extent_changeset_release(&changeset
);
3488 /* Only free range in range [start, start + len) */
3489 if (range_start
>= start
+ len
||
3490 range_start
+ range_len
<= start
)
3492 free_start
= max(range_start
, start
);
3493 free_len
= min(start
+ len
, range_start
+ range_len
) -
3496 * TODO: To also modify reserved->ranges_reserved to reflect
3499 * However as long as we free qgroup reserved according to
3500 * EXTENT_QGROUP_RESERVED, we won't double free.
3501 * So not need to rush.
3503 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_failure_tree
,
3504 free_start
, free_start
+ free_len
- 1,
3505 EXTENT_QGROUP_RESERVED
, &changeset
);
3508 freed
+= changeset
.bytes_changed
;
3510 btrfs_qgroup_free_refroot(root
->fs_info
, root
->root_key
.objectid
, freed
,
3511 BTRFS_QGROUP_RSV_DATA
);
3514 extent_changeset_release(&changeset
);
3518 static int __btrfs_qgroup_release_data(struct inode
*inode
,
3519 struct extent_changeset
*reserved
, u64 start
, u64 len
,
3522 struct extent_changeset changeset
;
3523 int trace_op
= QGROUP_RELEASE
;
3526 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
,
3527 &BTRFS_I(inode
)->root
->fs_info
->flags
))
3530 /* In release case, we shouldn't have @reserved */
3531 WARN_ON(!free
&& reserved
);
3532 if (free
&& reserved
)
3533 return qgroup_free_reserved_data(inode
, reserved
, start
, len
);
3534 extent_changeset_init(&changeset
);
3535 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
3536 start
+ len
-1, EXTENT_QGROUP_RESERVED
, &changeset
);
3541 trace_op
= QGROUP_FREE
;
3542 trace_btrfs_qgroup_release_data(inode
, start
, len
,
3543 changeset
.bytes_changed
, trace_op
);
3545 btrfs_qgroup_free_refroot(BTRFS_I(inode
)->root
->fs_info
,
3546 BTRFS_I(inode
)->root
->root_key
.objectid
,
3547 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3548 ret
= changeset
.bytes_changed
;
3550 extent_changeset_release(&changeset
);
3555 * Free a reserved space range from io_tree and related qgroups
3557 * Should be called when a range of pages get invalidated before reaching disk.
3558 * Or for error cleanup case.
3559 * if @reserved is given, only reserved range in [@start, @start + @len) will
3562 * For data written to disk, use btrfs_qgroup_release_data().
3564 * NOTE: This function may sleep for memory allocation.
3566 int btrfs_qgroup_free_data(struct inode
*inode
,
3567 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3569 return __btrfs_qgroup_release_data(inode
, reserved
, start
, len
, 1);
3573 * Release a reserved space range from io_tree only.
3575 * Should be called when a range of pages get written to disk and corresponding
3576 * FILE_EXTENT is inserted into corresponding root.
3578 * Since new qgroup accounting framework will only update qgroup numbers at
3579 * commit_transaction() time, its reserved space shouldn't be freed from
3582 * But we should release the range from io_tree, to allow further write to be
3585 * NOTE: This function may sleep for memory allocation.
3587 int btrfs_qgroup_release_data(struct inode
*inode
, u64 start
, u64 len
)
3589 return __btrfs_qgroup_release_data(inode
, NULL
, start
, len
, 0);
3592 static void add_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3593 enum btrfs_qgroup_rsv_type type
)
3595 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3596 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3601 spin_lock(&root
->qgroup_meta_rsv_lock
);
3602 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
3603 root
->qgroup_meta_rsv_prealloc
+= num_bytes
;
3605 root
->qgroup_meta_rsv_pertrans
+= num_bytes
;
3606 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3609 static int sub_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3610 enum btrfs_qgroup_rsv_type type
)
3612 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3613 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3618 spin_lock(&root
->qgroup_meta_rsv_lock
);
3619 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
) {
3620 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_prealloc
,
3622 root
->qgroup_meta_rsv_prealloc
-= num_bytes
;
3624 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_pertrans
,
3626 root
->qgroup_meta_rsv_pertrans
-= num_bytes
;
3628 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3632 int __btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3633 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3635 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3638 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3639 !is_fstree(root
->root_key
.objectid
) || num_bytes
== 0)
3642 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3643 trace_qgroup_meta_reserve(root
, type
, (s64
)num_bytes
);
3644 ret
= qgroup_reserve(root
, num_bytes
, enforce
, type
);
3648 * Record what we have reserved into root.
3650 * To avoid quota disabled->enabled underflow.
3651 * In that case, we may try to free space we haven't reserved
3652 * (since quota was disabled), so record what we reserved into root.
3653 * And ensure later release won't underflow this number.
3655 add_root_meta_rsv(root
, num_bytes
, type
);
3659 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root
*root
)
3661 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3663 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3664 !is_fstree(root
->root_key
.objectid
))
3667 /* TODO: Update trace point to handle such free */
3668 trace_qgroup_meta_free_all_pertrans(root
);
3669 /* Special value -1 means to free all reserved space */
3670 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
, (u64
)-1,
3671 BTRFS_QGROUP_RSV_META_PERTRANS
);
3674 void __btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
,
3675 enum btrfs_qgroup_rsv_type type
)
3677 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3679 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3680 !is_fstree(root
->root_key
.objectid
))
3684 * reservation for META_PREALLOC can happen before quota is enabled,
3685 * which can lead to underflow.
3686 * Here ensure we will only free what we really have reserved.
3688 num_bytes
= sub_root_meta_rsv(root
, num_bytes
, type
);
3689 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3690 trace_qgroup_meta_reserve(root
, type
, -(s64
)num_bytes
);
3691 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
,
3695 static void qgroup_convert_meta(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
3698 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
3699 struct btrfs_qgroup
*qgroup
;
3700 struct ulist_node
*unode
;
3701 struct ulist_iterator uiter
;
3709 spin_lock(&fs_info
->qgroup_lock
);
3710 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3713 ulist_reinit(fs_info
->qgroup_ulist
);
3714 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3715 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3718 ULIST_ITER_INIT(&uiter
);
3719 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3720 struct btrfs_qgroup
*qg
;
3721 struct btrfs_qgroup_list
*glist
;
3723 qg
= unode_aux_to_qgroup(unode
);
3725 qgroup_rsv_release(fs_info
, qg
, num_bytes
,
3726 BTRFS_QGROUP_RSV_META_PREALLOC
);
3727 qgroup_rsv_add(fs_info
, qg
, num_bytes
,
3728 BTRFS_QGROUP_RSV_META_PERTRANS
);
3729 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3730 ret
= ulist_add(fs_info
->qgroup_ulist
,
3731 glist
->group
->qgroupid
,
3732 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3738 spin_unlock(&fs_info
->qgroup_lock
);
3741 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root
*root
, int num_bytes
)
3743 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3745 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3746 !is_fstree(root
->root_key
.objectid
))
3748 /* Same as btrfs_qgroup_free_meta_prealloc() */
3749 num_bytes
= sub_root_meta_rsv(root
, num_bytes
,
3750 BTRFS_QGROUP_RSV_META_PREALLOC
);
3751 trace_qgroup_meta_convert(root
, num_bytes
);
3752 qgroup_convert_meta(fs_info
, root
->root_key
.objectid
, num_bytes
);
3756 * Check qgroup reserved space leaking, normally at destroy inode
3759 void btrfs_qgroup_check_reserved_leak(struct inode
*inode
)
3761 struct extent_changeset changeset
;
3762 struct ulist_node
*unode
;
3763 struct ulist_iterator iter
;
3766 extent_changeset_init(&changeset
);
3767 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, (u64
)-1,
3768 EXTENT_QGROUP_RESERVED
, &changeset
);
3771 if (WARN_ON(changeset
.bytes_changed
)) {
3772 ULIST_ITER_INIT(&iter
);
3773 while ((unode
= ulist_next(&changeset
.range_changed
, &iter
))) {
3774 btrfs_warn(BTRFS_I(inode
)->root
->fs_info
,
3775 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3776 inode
->i_ino
, unode
->val
, unode
->aux
);
3778 btrfs_qgroup_free_refroot(BTRFS_I(inode
)->root
->fs_info
,
3779 BTRFS_I(inode
)->root
->root_key
.objectid
,
3780 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3783 extent_changeset_release(&changeset
);