1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
17 #include "transaction.h"
22 #include "extent_io.h"
24 #include "block-group.h"
28 * - subvol delete -> delete when ref goes to 0? delete limits also?
32 * - copy also limits on subvol creation
35 * - performance benchmarks
36 * - check all ioctl parameters
40 * Helpers to access qgroup reservation
42 * Callers should ensure the lock context and type are valid
45 static u64
qgroup_rsv_total(const struct btrfs_qgroup
*qgroup
)
50 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
51 ret
+= qgroup
->rsv
.values
[i
];
56 #ifdef CONFIG_BTRFS_DEBUG
57 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type
)
59 if (type
== BTRFS_QGROUP_RSV_DATA
)
61 if (type
== BTRFS_QGROUP_RSV_META_PERTRANS
)
62 return "meta_pertrans";
63 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
64 return "meta_prealloc";
69 static void qgroup_rsv_add(struct btrfs_fs_info
*fs_info
,
70 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
71 enum btrfs_qgroup_rsv_type type
)
73 trace_qgroup_update_reserve(fs_info
, qgroup
, num_bytes
, type
);
74 qgroup
->rsv
.values
[type
] += num_bytes
;
77 static void qgroup_rsv_release(struct btrfs_fs_info
*fs_info
,
78 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
79 enum btrfs_qgroup_rsv_type type
)
81 trace_qgroup_update_reserve(fs_info
, qgroup
, -(s64
)num_bytes
, type
);
82 if (qgroup
->rsv
.values
[type
] >= num_bytes
) {
83 qgroup
->rsv
.values
[type
] -= num_bytes
;
86 #ifdef CONFIG_BTRFS_DEBUG
88 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
89 qgroup
->qgroupid
, qgroup_rsv_type_str(type
),
90 qgroup
->rsv
.values
[type
], num_bytes
);
92 qgroup
->rsv
.values
[type
] = 0;
95 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info
*fs_info
,
96 struct btrfs_qgroup
*dest
,
97 struct btrfs_qgroup
*src
)
101 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
102 qgroup_rsv_add(fs_info
, dest
, src
->rsv
.values
[i
], i
);
105 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info
*fs_info
,
106 struct btrfs_qgroup
*dest
,
107 struct btrfs_qgroup
*src
)
111 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
112 qgroup_rsv_release(fs_info
, dest
, src
->rsv
.values
[i
], i
);
115 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
118 if (qg
->old_refcnt
< seq
)
119 qg
->old_refcnt
= seq
;
120 qg
->old_refcnt
+= mod
;
123 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
126 if (qg
->new_refcnt
< seq
)
127 qg
->new_refcnt
= seq
;
128 qg
->new_refcnt
+= mod
;
131 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
133 if (qg
->old_refcnt
< seq
)
135 return qg
->old_refcnt
- seq
;
138 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
140 if (qg
->new_refcnt
< seq
)
142 return qg
->new_refcnt
- seq
;
146 * glue structure to represent the relations between qgroups.
148 struct btrfs_qgroup_list
{
149 struct list_head next_group
;
150 struct list_head next_member
;
151 struct btrfs_qgroup
*group
;
152 struct btrfs_qgroup
*member
;
155 static inline u64
qgroup_to_aux(struct btrfs_qgroup
*qg
)
157 return (u64
)(uintptr_t)qg
;
160 static inline struct btrfs_qgroup
* unode_aux_to_qgroup(struct ulist_node
*n
)
162 return (struct btrfs_qgroup
*)(uintptr_t)n
->aux
;
166 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
168 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
170 /* must be called with qgroup_ioctl_lock held */
171 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
174 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
175 struct btrfs_qgroup
*qgroup
;
178 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
179 if (qgroup
->qgroupid
< qgroupid
)
181 else if (qgroup
->qgroupid
> qgroupid
)
189 /* must be called with qgroup_lock held */
190 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
193 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
194 struct rb_node
*parent
= NULL
;
195 struct btrfs_qgroup
*qgroup
;
199 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
201 if (qgroup
->qgroupid
< qgroupid
)
203 else if (qgroup
->qgroupid
> qgroupid
)
209 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
211 return ERR_PTR(-ENOMEM
);
213 qgroup
->qgroupid
= qgroupid
;
214 INIT_LIST_HEAD(&qgroup
->groups
);
215 INIT_LIST_HEAD(&qgroup
->members
);
216 INIT_LIST_HEAD(&qgroup
->dirty
);
218 rb_link_node(&qgroup
->node
, parent
, p
);
219 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
224 static void __del_qgroup_rb(struct btrfs_fs_info
*fs_info
,
225 struct btrfs_qgroup
*qgroup
)
227 struct btrfs_qgroup_list
*list
;
229 btrfs_sysfs_del_one_qgroup(fs_info
, qgroup
);
230 list_del(&qgroup
->dirty
);
231 while (!list_empty(&qgroup
->groups
)) {
232 list
= list_first_entry(&qgroup
->groups
,
233 struct btrfs_qgroup_list
, next_group
);
234 list_del(&list
->next_group
);
235 list_del(&list
->next_member
);
239 while (!list_empty(&qgroup
->members
)) {
240 list
= list_first_entry(&qgroup
->members
,
241 struct btrfs_qgroup_list
, next_member
);
242 list_del(&list
->next_group
);
243 list_del(&list
->next_member
);
249 /* must be called with qgroup_lock held */
250 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
252 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
257 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
258 __del_qgroup_rb(fs_info
, qgroup
);
262 /* must be called with qgroup_lock held */
263 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
264 u64 memberid
, u64 parentid
)
266 struct btrfs_qgroup
*member
;
267 struct btrfs_qgroup
*parent
;
268 struct btrfs_qgroup_list
*list
;
270 member
= find_qgroup_rb(fs_info
, memberid
);
271 parent
= find_qgroup_rb(fs_info
, parentid
);
272 if (!member
|| !parent
)
275 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
279 list
->group
= parent
;
280 list
->member
= member
;
281 list_add_tail(&list
->next_group
, &member
->groups
);
282 list_add_tail(&list
->next_member
, &parent
->members
);
287 /* must be called with qgroup_lock held */
288 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
289 u64 memberid
, u64 parentid
)
291 struct btrfs_qgroup
*member
;
292 struct btrfs_qgroup
*parent
;
293 struct btrfs_qgroup_list
*list
;
295 member
= find_qgroup_rb(fs_info
, memberid
);
296 parent
= find_qgroup_rb(fs_info
, parentid
);
297 if (!member
|| !parent
)
300 list_for_each_entry(list
, &member
->groups
, next_group
) {
301 if (list
->group
== parent
) {
302 list_del(&list
->next_group
);
303 list_del(&list
->next_member
);
311 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
312 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
315 struct btrfs_qgroup
*qgroup
;
317 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
320 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
327 * The full config is read in one go, only called from open_ctree()
328 * It doesn't use any locking, as at this point we're still single-threaded
330 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
332 struct btrfs_key key
;
333 struct btrfs_key found_key
;
334 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
335 struct btrfs_path
*path
= NULL
;
336 struct extent_buffer
*l
;
340 u64 rescan_progress
= 0;
342 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
345 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
346 if (!fs_info
->qgroup_ulist
) {
351 path
= btrfs_alloc_path();
357 ret
= btrfs_sysfs_add_qgroups(fs_info
);
360 /* default this to quota off, in case no status key is found */
361 fs_info
->qgroup_flags
= 0;
364 * pass 1: read status, all qgroup infos and limits
369 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
374 struct btrfs_qgroup
*qgroup
;
376 slot
= path
->slots
[0];
378 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
380 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
381 struct btrfs_qgroup_status_item
*ptr
;
383 ptr
= btrfs_item_ptr(l
, slot
,
384 struct btrfs_qgroup_status_item
);
386 if (btrfs_qgroup_status_version(l
, ptr
) !=
387 BTRFS_QGROUP_STATUS_VERSION
) {
389 "old qgroup version, quota disabled");
392 if (btrfs_qgroup_status_generation(l
, ptr
) !=
393 fs_info
->generation
) {
394 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
396 "qgroup generation mismatch, marked as inconsistent");
398 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
400 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
404 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
405 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
408 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
409 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
410 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
411 btrfs_err(fs_info
, "inconsistent qgroup config");
412 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
415 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
416 if (IS_ERR(qgroup
)) {
417 ret
= PTR_ERR(qgroup
);
421 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
425 switch (found_key
.type
) {
426 case BTRFS_QGROUP_INFO_KEY
: {
427 struct btrfs_qgroup_info_item
*ptr
;
429 ptr
= btrfs_item_ptr(l
, slot
,
430 struct btrfs_qgroup_info_item
);
431 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
432 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
433 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
434 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
435 /* generation currently unused */
438 case BTRFS_QGROUP_LIMIT_KEY
: {
439 struct btrfs_qgroup_limit_item
*ptr
;
441 ptr
= btrfs_item_ptr(l
, slot
,
442 struct btrfs_qgroup_limit_item
);
443 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
444 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
445 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
446 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
447 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
452 ret
= btrfs_next_item(quota_root
, path
);
458 btrfs_release_path(path
);
461 * pass 2: read all qgroup relations
464 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
466 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
470 slot
= path
->slots
[0];
472 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
474 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
477 if (found_key
.objectid
> found_key
.offset
) {
478 /* parent <- member, not needed to build config */
479 /* FIXME should we omit the key completely? */
483 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
485 if (ret
== -ENOENT
) {
487 "orphan qgroup relation 0x%llx->0x%llx",
488 found_key
.objectid
, found_key
.offset
);
489 ret
= 0; /* ignore the error */
494 ret
= btrfs_next_item(quota_root
, path
);
501 btrfs_free_path(path
);
502 fs_info
->qgroup_flags
|= flags
;
503 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
504 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
505 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
507 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
510 ulist_free(fs_info
->qgroup_ulist
);
511 fs_info
->qgroup_ulist
= NULL
;
512 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
513 btrfs_sysfs_del_qgroups(fs_info
);
516 return ret
< 0 ? ret
: 0;
520 * Called in close_ctree() when quota is still enabled. This verifies we don't
521 * leak some reserved space.
523 * Return false if no reserved space is left.
524 * Return true if some reserved space is leaked.
526 bool btrfs_check_quota_leak(struct btrfs_fs_info
*fs_info
)
528 struct rb_node
*node
;
531 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
534 * Since we're unmounting, there is no race and no need to grab qgroup
535 * lock. And here we don't go post-order to provide a more user
536 * friendly sorted result.
538 for (node
= rb_first(&fs_info
->qgroup_tree
); node
; node
= rb_next(node
)) {
539 struct btrfs_qgroup
*qgroup
;
542 qgroup
= rb_entry(node
, struct btrfs_qgroup
, node
);
543 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++) {
544 if (qgroup
->rsv
.values
[i
]) {
547 "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
548 btrfs_qgroup_level(qgroup
->qgroupid
),
549 btrfs_qgroup_subvolid(qgroup
->qgroupid
),
550 i
, qgroup
->rsv
.values
[i
]);
558 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
559 * first two are in single-threaded paths.And for the third one, we have set
560 * quota_root to be null with qgroup_lock held before, so it is safe to clean
561 * up the in-memory structures without qgroup_lock held.
563 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
566 struct btrfs_qgroup
*qgroup
;
568 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
569 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
570 rb_erase(n
, &fs_info
->qgroup_tree
);
571 __del_qgroup_rb(fs_info
, qgroup
);
574 * We call btrfs_free_qgroup_config() when unmounting
575 * filesystem and disabling quota, so we set qgroup_ulist
576 * to be null here to avoid double free.
578 ulist_free(fs_info
->qgroup_ulist
);
579 fs_info
->qgroup_ulist
= NULL
;
580 btrfs_sysfs_del_qgroups(fs_info
);
583 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
587 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
588 struct btrfs_path
*path
;
589 struct btrfs_key key
;
591 path
= btrfs_alloc_path();
596 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
599 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
601 btrfs_mark_buffer_dirty(path
->nodes
[0]);
603 btrfs_free_path(path
);
607 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
611 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
612 struct btrfs_path
*path
;
613 struct btrfs_key key
;
615 path
= btrfs_alloc_path();
620 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
623 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
632 ret
= btrfs_del_item(trans
, quota_root
, path
);
634 btrfs_free_path(path
);
638 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
639 struct btrfs_root
*quota_root
, u64 qgroupid
)
642 struct btrfs_path
*path
;
643 struct btrfs_qgroup_info_item
*qgroup_info
;
644 struct btrfs_qgroup_limit_item
*qgroup_limit
;
645 struct extent_buffer
*leaf
;
646 struct btrfs_key key
;
648 if (btrfs_is_testing(quota_root
->fs_info
))
651 path
= btrfs_alloc_path();
656 key
.type
= BTRFS_QGROUP_INFO_KEY
;
657 key
.offset
= qgroupid
;
660 * Avoid a transaction abort by catching -EEXIST here. In that
661 * case, we proceed by re-initializing the existing structure
665 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
666 sizeof(*qgroup_info
));
667 if (ret
&& ret
!= -EEXIST
)
670 leaf
= path
->nodes
[0];
671 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
672 struct btrfs_qgroup_info_item
);
673 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
674 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
675 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
676 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
677 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
679 btrfs_mark_buffer_dirty(leaf
);
681 btrfs_release_path(path
);
683 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
684 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
685 sizeof(*qgroup_limit
));
686 if (ret
&& ret
!= -EEXIST
)
689 leaf
= path
->nodes
[0];
690 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
691 struct btrfs_qgroup_limit_item
);
692 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
693 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
694 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
695 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
696 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
698 btrfs_mark_buffer_dirty(leaf
);
702 btrfs_free_path(path
);
706 static int del_qgroup_item(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
709 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
710 struct btrfs_path
*path
;
711 struct btrfs_key key
;
713 path
= btrfs_alloc_path();
718 key
.type
= BTRFS_QGROUP_INFO_KEY
;
719 key
.offset
= qgroupid
;
720 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
729 ret
= btrfs_del_item(trans
, quota_root
, path
);
733 btrfs_release_path(path
);
735 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
736 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
745 ret
= btrfs_del_item(trans
, quota_root
, path
);
748 btrfs_free_path(path
);
752 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
753 struct btrfs_qgroup
*qgroup
)
755 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
756 struct btrfs_path
*path
;
757 struct btrfs_key key
;
758 struct extent_buffer
*l
;
759 struct btrfs_qgroup_limit_item
*qgroup_limit
;
764 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
765 key
.offset
= qgroup
->qgroupid
;
767 path
= btrfs_alloc_path();
771 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
779 slot
= path
->slots
[0];
780 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
781 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
782 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
783 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
784 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
785 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
787 btrfs_mark_buffer_dirty(l
);
790 btrfs_free_path(path
);
794 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
795 struct btrfs_qgroup
*qgroup
)
797 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
798 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
799 struct btrfs_path
*path
;
800 struct btrfs_key key
;
801 struct extent_buffer
*l
;
802 struct btrfs_qgroup_info_item
*qgroup_info
;
806 if (btrfs_is_testing(fs_info
))
810 key
.type
= BTRFS_QGROUP_INFO_KEY
;
811 key
.offset
= qgroup
->qgroupid
;
813 path
= btrfs_alloc_path();
817 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
825 slot
= path
->slots
[0];
826 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
827 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
828 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
829 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
830 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
831 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
833 btrfs_mark_buffer_dirty(l
);
836 btrfs_free_path(path
);
840 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
)
842 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
843 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
844 struct btrfs_path
*path
;
845 struct btrfs_key key
;
846 struct extent_buffer
*l
;
847 struct btrfs_qgroup_status_item
*ptr
;
852 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
855 path
= btrfs_alloc_path();
859 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
867 slot
= path
->slots
[0];
868 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
869 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
870 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
871 btrfs_set_qgroup_status_rescan(l
, ptr
,
872 fs_info
->qgroup_rescan_progress
.objectid
);
874 btrfs_mark_buffer_dirty(l
);
877 btrfs_free_path(path
);
882 * called with qgroup_lock held
884 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
885 struct btrfs_root
*root
)
887 struct btrfs_path
*path
;
888 struct btrfs_key key
;
889 struct extent_buffer
*leaf
= NULL
;
893 path
= btrfs_alloc_path();
902 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
905 leaf
= path
->nodes
[0];
906 nr
= btrfs_header_nritems(leaf
);
910 * delete the leaf one by one
911 * since the whole tree is going
915 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
919 btrfs_release_path(path
);
923 btrfs_free_path(path
);
927 int btrfs_quota_enable(struct btrfs_fs_info
*fs_info
)
929 struct btrfs_root
*quota_root
;
930 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
931 struct btrfs_path
*path
= NULL
;
932 struct btrfs_qgroup_status_item
*ptr
;
933 struct extent_buffer
*leaf
;
934 struct btrfs_key key
;
935 struct btrfs_key found_key
;
936 struct btrfs_qgroup
*qgroup
= NULL
;
937 struct btrfs_trans_handle
*trans
= NULL
;
938 struct ulist
*ulist
= NULL
;
942 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
943 if (fs_info
->quota_root
)
946 ulist
= ulist_alloc(GFP_KERNEL
);
952 ret
= btrfs_sysfs_add_qgroups(fs_info
);
957 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
958 * avoid lock acquisition inversion problems (reported by lockdep) between
959 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
960 * start a transaction.
961 * After we started the transaction lock qgroup_ioctl_lock again and
962 * check if someone else created the quota root in the meanwhile. If so,
963 * just return success and release the transaction handle.
965 * Also we don't need to worry about someone else calling
966 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
967 * that function returns 0 (success) when the sysfs entries already exist.
969 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
972 * 1 for quota root item
973 * 1 for BTRFS_QGROUP_STATUS item
975 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
976 * per subvolume. However those are not currently reserved since it
977 * would be a lot of overkill.
979 trans
= btrfs_start_transaction(tree_root
, 2);
981 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
983 ret
= PTR_ERR(trans
);
988 if (fs_info
->quota_root
)
991 fs_info
->qgroup_ulist
= ulist
;
995 * initially create the quota tree
997 quota_root
= btrfs_create_tree(trans
, BTRFS_QUOTA_TREE_OBJECTID
);
998 if (IS_ERR(quota_root
)) {
999 ret
= PTR_ERR(quota_root
);
1000 btrfs_abort_transaction(trans
, ret
);
1004 path
= btrfs_alloc_path();
1007 btrfs_abort_transaction(trans
, ret
);
1012 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
1015 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
1018 btrfs_abort_transaction(trans
, ret
);
1022 leaf
= path
->nodes
[0];
1023 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
1024 struct btrfs_qgroup_status_item
);
1025 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
1026 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
1027 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
1028 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1029 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
1030 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
1032 btrfs_mark_buffer_dirty(leaf
);
1035 key
.type
= BTRFS_ROOT_REF_KEY
;
1038 btrfs_release_path(path
);
1039 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
1043 btrfs_abort_transaction(trans
, ret
);
1048 slot
= path
->slots
[0];
1049 leaf
= path
->nodes
[0];
1050 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1052 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
1054 /* Release locks on tree_root before we access quota_root */
1055 btrfs_release_path(path
);
1057 ret
= add_qgroup_item(trans
, quota_root
,
1060 btrfs_abort_transaction(trans
, ret
);
1064 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
1065 if (IS_ERR(qgroup
)) {
1066 ret
= PTR_ERR(qgroup
);
1067 btrfs_abort_transaction(trans
, ret
);
1070 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1072 btrfs_abort_transaction(trans
, ret
);
1075 ret
= btrfs_search_slot_for_read(tree_root
, &found_key
,
1078 btrfs_abort_transaction(trans
, ret
);
1083 * Shouldn't happen, but in case it does we
1084 * don't need to do the btrfs_next_item, just
1090 ret
= btrfs_next_item(tree_root
, path
);
1092 btrfs_abort_transaction(trans
, ret
);
1100 btrfs_release_path(path
);
1101 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
1103 btrfs_abort_transaction(trans
, ret
);
1107 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
1108 if (IS_ERR(qgroup
)) {
1109 ret
= PTR_ERR(qgroup
);
1110 btrfs_abort_transaction(trans
, ret
);
1113 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1115 btrfs_abort_transaction(trans
, ret
);
1119 ret
= btrfs_commit_transaction(trans
);
1125 * Set quota enabled flag after committing the transaction, to avoid
1126 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1129 spin_lock(&fs_info
->qgroup_lock
);
1130 fs_info
->quota_root
= quota_root
;
1131 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1132 spin_unlock(&fs_info
->qgroup_lock
);
1134 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1136 qgroup_rescan_zero_tracking(fs_info
);
1137 fs_info
->qgroup_rescan_running
= true;
1138 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1139 &fs_info
->qgroup_rescan_work
);
1143 btrfs_free_path(path
);
1146 btrfs_put_root(quota_root
);
1149 ulist_free(fs_info
->qgroup_ulist
);
1150 fs_info
->qgroup_ulist
= NULL
;
1151 btrfs_sysfs_del_qgroups(fs_info
);
1153 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1155 btrfs_end_transaction(trans
);
1157 ret
= btrfs_end_transaction(trans
);
1162 int btrfs_quota_disable(struct btrfs_fs_info
*fs_info
)
1164 struct btrfs_root
*quota_root
;
1165 struct btrfs_trans_handle
*trans
= NULL
;
1168 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1169 if (!fs_info
->quota_root
)
1171 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1174 * 1 For the root item
1176 * We should also reserve enough items for the quota tree deletion in
1177 * btrfs_clean_quota_tree but this is not done.
1179 * Also, we must always start a transaction without holding the mutex
1180 * qgroup_ioctl_lock, see btrfs_quota_enable().
1182 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
1184 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1185 if (IS_ERR(trans
)) {
1186 ret
= PTR_ERR(trans
);
1191 if (!fs_info
->quota_root
)
1194 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1195 btrfs_qgroup_wait_for_completion(fs_info
, false);
1196 spin_lock(&fs_info
->qgroup_lock
);
1197 quota_root
= fs_info
->quota_root
;
1198 fs_info
->quota_root
= NULL
;
1199 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1200 spin_unlock(&fs_info
->qgroup_lock
);
1202 btrfs_free_qgroup_config(fs_info
);
1204 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1206 btrfs_abort_transaction(trans
, ret
);
1210 ret
= btrfs_del_root(trans
, "a_root
->root_key
);
1212 btrfs_abort_transaction(trans
, ret
);
1216 list_del("a_root
->dirty_list
);
1218 btrfs_tree_lock(quota_root
->node
);
1219 btrfs_clean_tree_block(quota_root
->node
);
1220 btrfs_tree_unlock(quota_root
->node
);
1221 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1223 btrfs_put_root(quota_root
);
1226 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1228 btrfs_end_transaction(trans
);
1230 ret
= btrfs_end_transaction(trans
);
1235 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1236 struct btrfs_qgroup
*qgroup
)
1238 if (list_empty(&qgroup
->dirty
))
1239 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1243 * The easy accounting, we're updating qgroup relationship whose child qgroup
1244 * only has exclusive extents.
1246 * In this case, all exclusive extents will also be exclusive for parent, so
1247 * excl/rfer just get added/removed.
1249 * So is qgroup reservation space, which should also be added/removed to
1251 * Or when child tries to release reservation space, parent will underflow its
1252 * reservation (for relationship adding case).
1254 * Caller should hold fs_info->qgroup_lock.
1256 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1257 struct ulist
*tmp
, u64 ref_root
,
1258 struct btrfs_qgroup
*src
, int sign
)
1260 struct btrfs_qgroup
*qgroup
;
1261 struct btrfs_qgroup_list
*glist
;
1262 struct ulist_node
*unode
;
1263 struct ulist_iterator uiter
;
1264 u64 num_bytes
= src
->excl
;
1267 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1271 qgroup
->rfer
+= sign
* num_bytes
;
1272 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1274 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1275 qgroup
->excl
+= sign
* num_bytes
;
1276 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1279 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1281 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1283 qgroup_dirty(fs_info
, qgroup
);
1285 /* Get all of the parent groups that contain this qgroup */
1286 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1287 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1288 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1293 /* Iterate all of the parents and adjust their reference counts */
1294 ULIST_ITER_INIT(&uiter
);
1295 while ((unode
= ulist_next(tmp
, &uiter
))) {
1296 qgroup
= unode_aux_to_qgroup(unode
);
1297 qgroup
->rfer
+= sign
* num_bytes
;
1298 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1299 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1300 qgroup
->excl
+= sign
* num_bytes
;
1302 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1304 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1305 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1306 qgroup_dirty(fs_info
, qgroup
);
1308 /* Add any parents of the parents */
1309 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1310 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1311 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1323 * Quick path for updating qgroup with only excl refs.
1325 * In that case, just update all parent will be enough.
1326 * Or we needs to do a full rescan.
1327 * Caller should also hold fs_info->qgroup_lock.
1329 * Return 0 for quick update, return >0 for need to full rescan
1330 * and mark INCONSISTENT flag.
1331 * Return < 0 for other error.
1333 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1334 struct ulist
*tmp
, u64 src
, u64 dst
,
1337 struct btrfs_qgroup
*qgroup
;
1341 qgroup
= find_qgroup_rb(fs_info
, src
);
1344 if (qgroup
->excl
== qgroup
->rfer
) {
1346 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1355 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1359 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1362 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1363 struct btrfs_qgroup
*parent
;
1364 struct btrfs_qgroup
*member
;
1365 struct btrfs_qgroup_list
*list
;
1367 unsigned int nofs_flag
;
1370 /* Check the level of src and dst first */
1371 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1374 /* We hold a transaction handle open, must do a NOFS allocation. */
1375 nofs_flag
= memalloc_nofs_save();
1376 tmp
= ulist_alloc(GFP_KERNEL
);
1377 memalloc_nofs_restore(nofs_flag
);
1381 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1382 if (!fs_info
->quota_root
) {
1386 member
= find_qgroup_rb(fs_info
, src
);
1387 parent
= find_qgroup_rb(fs_info
, dst
);
1388 if (!member
|| !parent
) {
1393 /* check if such qgroup relation exist firstly */
1394 list_for_each_entry(list
, &member
->groups
, next_group
) {
1395 if (list
->group
== parent
) {
1401 ret
= add_qgroup_relation_item(trans
, src
, dst
);
1405 ret
= add_qgroup_relation_item(trans
, dst
, src
);
1407 del_qgroup_relation_item(trans
, src
, dst
);
1411 spin_lock(&fs_info
->qgroup_lock
);
1412 ret
= add_relation_rb(fs_info
, src
, dst
);
1414 spin_unlock(&fs_info
->qgroup_lock
);
1417 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1418 spin_unlock(&fs_info
->qgroup_lock
);
1420 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1425 static int __del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1428 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1429 struct btrfs_qgroup
*parent
;
1430 struct btrfs_qgroup
*member
;
1431 struct btrfs_qgroup_list
*list
;
1434 unsigned int nofs_flag
;
1438 /* We hold a transaction handle open, must do a NOFS allocation. */
1439 nofs_flag
= memalloc_nofs_save();
1440 tmp
= ulist_alloc(GFP_KERNEL
);
1441 memalloc_nofs_restore(nofs_flag
);
1445 if (!fs_info
->quota_root
) {
1450 member
= find_qgroup_rb(fs_info
, src
);
1451 parent
= find_qgroup_rb(fs_info
, dst
);
1453 * The parent/member pair doesn't exist, then try to delete the dead
1454 * relation items only.
1456 if (!member
|| !parent
)
1459 /* check if such qgroup relation exist firstly */
1460 list_for_each_entry(list
, &member
->groups
, next_group
) {
1461 if (list
->group
== parent
) {
1468 ret
= del_qgroup_relation_item(trans
, src
, dst
);
1469 if (ret
< 0 && ret
!= -ENOENT
)
1471 ret2
= del_qgroup_relation_item(trans
, dst
, src
);
1472 if (ret2
< 0 && ret2
!= -ENOENT
)
1475 /* At least one deletion succeeded, return 0 */
1480 spin_lock(&fs_info
->qgroup_lock
);
1481 del_relation_rb(fs_info
, src
, dst
);
1482 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1483 spin_unlock(&fs_info
->qgroup_lock
);
1490 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1493 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1496 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1497 ret
= __del_qgroup_relation(trans
, src
, dst
);
1498 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1503 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1505 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1506 struct btrfs_root
*quota_root
;
1507 struct btrfs_qgroup
*qgroup
;
1510 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1511 if (!fs_info
->quota_root
) {
1515 quota_root
= fs_info
->quota_root
;
1516 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1522 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1526 spin_lock(&fs_info
->qgroup_lock
);
1527 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1528 spin_unlock(&fs_info
->qgroup_lock
);
1530 if (IS_ERR(qgroup
)) {
1531 ret
= PTR_ERR(qgroup
);
1534 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1536 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1540 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1542 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1543 struct btrfs_qgroup
*qgroup
;
1544 struct btrfs_qgroup_list
*list
;
1547 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1548 if (!fs_info
->quota_root
) {
1553 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1559 /* Check if there are no children of this qgroup */
1560 if (!list_empty(&qgroup
->members
)) {
1565 ret
= del_qgroup_item(trans
, qgroupid
);
1566 if (ret
&& ret
!= -ENOENT
)
1569 while (!list_empty(&qgroup
->groups
)) {
1570 list
= list_first_entry(&qgroup
->groups
,
1571 struct btrfs_qgroup_list
, next_group
);
1572 ret
= __del_qgroup_relation(trans
, qgroupid
,
1573 list
->group
->qgroupid
);
1578 spin_lock(&fs_info
->qgroup_lock
);
1579 del_qgroup_rb(fs_info
, qgroupid
);
1580 spin_unlock(&fs_info
->qgroup_lock
);
1582 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1586 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
,
1587 struct btrfs_qgroup_limit
*limit
)
1589 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1590 struct btrfs_qgroup
*qgroup
;
1592 /* Sometimes we would want to clear the limit on this qgroup.
1593 * To meet this requirement, we treat the -1 as a special value
1594 * which tell kernel to clear the limit on this qgroup.
1596 const u64 CLEAR_VALUE
= -1;
1598 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1599 if (!fs_info
->quota_root
) {
1604 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1610 spin_lock(&fs_info
->qgroup_lock
);
1611 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1612 if (limit
->max_rfer
== CLEAR_VALUE
) {
1613 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1614 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1615 qgroup
->max_rfer
= 0;
1617 qgroup
->max_rfer
= limit
->max_rfer
;
1620 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1621 if (limit
->max_excl
== CLEAR_VALUE
) {
1622 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1623 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1624 qgroup
->max_excl
= 0;
1626 qgroup
->max_excl
= limit
->max_excl
;
1629 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1630 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1631 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1632 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1633 qgroup
->rsv_rfer
= 0;
1635 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1638 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1639 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1640 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1641 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1642 qgroup
->rsv_excl
= 0;
1644 qgroup
->rsv_excl
= limit
->rsv_excl
;
1647 qgroup
->lim_flags
|= limit
->flags
;
1649 spin_unlock(&fs_info
->qgroup_lock
);
1651 ret
= update_qgroup_limit_item(trans
, qgroup
);
1653 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1654 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1659 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1663 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info
*fs_info
,
1664 struct btrfs_delayed_ref_root
*delayed_refs
,
1665 struct btrfs_qgroup_extent_record
*record
)
1667 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1668 struct rb_node
*parent_node
= NULL
;
1669 struct btrfs_qgroup_extent_record
*entry
;
1670 u64 bytenr
= record
->bytenr
;
1672 lockdep_assert_held(&delayed_refs
->lock
);
1673 trace_btrfs_qgroup_trace_extent(fs_info
, record
);
1677 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1679 if (bytenr
< entry
->bytenr
) {
1681 } else if (bytenr
> entry
->bytenr
) {
1682 p
= &(*p
)->rb_right
;
1684 if (record
->data_rsv
&& !entry
->data_rsv
) {
1685 entry
->data_rsv
= record
->data_rsv
;
1686 entry
->data_rsv_refroot
=
1687 record
->data_rsv_refroot
;
1693 rb_link_node(&record
->node
, parent_node
, p
);
1694 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1698 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info
*fs_info
,
1699 struct btrfs_qgroup_extent_record
*qrecord
)
1701 struct ulist
*old_root
;
1702 u64 bytenr
= qrecord
->bytenr
;
1705 ret
= btrfs_find_all_roots(NULL
, fs_info
, bytenr
, 0, &old_root
, false);
1707 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1709 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1715 * Here we don't need to get the lock of
1716 * trans->transaction->delayed_refs, since inserted qrecord won't
1717 * be deleted, only qrecord->node may be modified (new qrecord insert)
1719 * So modifying qrecord->old_roots is safe here
1721 qrecord
->old_roots
= old_root
;
1725 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
1726 u64 num_bytes
, gfp_t gfp_flag
)
1728 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1729 struct btrfs_qgroup_extent_record
*record
;
1730 struct btrfs_delayed_ref_root
*delayed_refs
;
1733 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1734 || bytenr
== 0 || num_bytes
== 0)
1736 record
= kzalloc(sizeof(*record
), gfp_flag
);
1740 delayed_refs
= &trans
->transaction
->delayed_refs
;
1741 record
->bytenr
= bytenr
;
1742 record
->num_bytes
= num_bytes
;
1743 record
->old_roots
= NULL
;
1745 spin_lock(&delayed_refs
->lock
);
1746 ret
= btrfs_qgroup_trace_extent_nolock(fs_info
, delayed_refs
, record
);
1747 spin_unlock(&delayed_refs
->lock
);
1752 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
1755 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle
*trans
,
1756 struct extent_buffer
*eb
)
1758 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1759 int nr
= btrfs_header_nritems(eb
);
1760 int i
, extent_type
, ret
;
1761 struct btrfs_key key
;
1762 struct btrfs_file_extent_item
*fi
;
1763 u64 bytenr
, num_bytes
;
1765 /* We can be called directly from walk_up_proc() */
1766 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1769 for (i
= 0; i
< nr
; i
++) {
1770 btrfs_item_key_to_cpu(eb
, &key
, i
);
1772 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1775 fi
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
1776 /* filter out non qgroup-accountable extents */
1777 extent_type
= btrfs_file_extent_type(eb
, fi
);
1779 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1782 bytenr
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1786 num_bytes
= btrfs_file_extent_disk_num_bytes(eb
, fi
);
1788 ret
= btrfs_qgroup_trace_extent(trans
, bytenr
, num_bytes
,
1798 * Walk up the tree from the bottom, freeing leaves and any interior
1799 * nodes which have had all slots visited. If a node (leaf or
1800 * interior) is freed, the node above it will have it's slot
1801 * incremented. The root node will never be freed.
1803 * At the end of this function, we should have a path which has all
1804 * slots incremented to the next position for a search. If we need to
1805 * read a new node it will be NULL and the node above it will have the
1806 * correct slot selected for a later read.
1808 * If we increment the root nodes slot counter past the number of
1809 * elements, 1 is returned to signal completion of the search.
1811 static int adjust_slots_upwards(struct btrfs_path
*path
, int root_level
)
1815 struct extent_buffer
*eb
;
1817 if (root_level
== 0)
1820 while (level
<= root_level
) {
1821 eb
= path
->nodes
[level
];
1822 nr
= btrfs_header_nritems(eb
);
1823 path
->slots
[level
]++;
1824 slot
= path
->slots
[level
];
1825 if (slot
>= nr
|| level
== 0) {
1827 * Don't free the root - we will detect this
1828 * condition after our loop and return a
1829 * positive value for caller to stop walking the tree.
1831 if (level
!= root_level
) {
1832 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
1833 path
->locks
[level
] = 0;
1835 free_extent_buffer(eb
);
1836 path
->nodes
[level
] = NULL
;
1837 path
->slots
[level
] = 0;
1841 * We have a valid slot to walk back down
1842 * from. Stop here so caller can process these
1851 eb
= path
->nodes
[root_level
];
1852 if (path
->slots
[root_level
] >= btrfs_header_nritems(eb
))
1859 * Helper function to trace a subtree tree block swap.
1861 * The swap will happen in highest tree block, but there may be a lot of
1862 * tree blocks involved.
1865 * OO = Old tree blocks
1866 * NN = New tree blocks allocated during balance
1868 * File tree (257) Reloc tree for 257
1871 * L1 OO OO (a) OO NN (a)
1873 * L0 OO OO OO OO OO OO NN NN
1876 * When calling qgroup_trace_extent_swap(), we will pass:
1878 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1882 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1883 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1885 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1887 * 1) Tree search from @src_eb
1888 * It should acts as a simplified btrfs_search_slot().
1889 * The key for search can be extracted from @dst_path->nodes[dst_level]
1892 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1893 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1894 * They should be marked during previous (@dst_level = 1) iteration.
1896 * 3) Mark file extents in leaves dirty
1897 * We don't have good way to pick out new file extents only.
1898 * So we still follow the old method by scanning all file extents in
1901 * This function can free us from keeping two paths, thus later we only need
1902 * to care about how to iterate all new tree blocks in reloc tree.
1904 static int qgroup_trace_extent_swap(struct btrfs_trans_handle
* trans
,
1905 struct extent_buffer
*src_eb
,
1906 struct btrfs_path
*dst_path
,
1907 int dst_level
, int root_level
,
1910 struct btrfs_key key
;
1911 struct btrfs_path
*src_path
;
1912 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1913 u32 nodesize
= fs_info
->nodesize
;
1914 int cur_level
= root_level
;
1917 BUG_ON(dst_level
> root_level
);
1918 /* Level mismatch */
1919 if (btrfs_header_level(src_eb
) != root_level
)
1922 src_path
= btrfs_alloc_path();
1929 btrfs_node_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1931 btrfs_item_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1934 atomic_inc(&src_eb
->refs
);
1935 src_path
->nodes
[root_level
] = src_eb
;
1936 src_path
->slots
[root_level
] = dst_path
->slots
[root_level
];
1937 src_path
->locks
[root_level
] = 0;
1939 /* A simplified version of btrfs_search_slot() */
1940 while (cur_level
>= dst_level
) {
1941 struct btrfs_key src_key
;
1942 struct btrfs_key dst_key
;
1944 if (src_path
->nodes
[cur_level
] == NULL
) {
1945 struct extent_buffer
*eb
;
1948 eb
= src_path
->nodes
[cur_level
+ 1];
1949 parent_slot
= src_path
->slots
[cur_level
+ 1];
1951 eb
= btrfs_read_node_slot(eb
, parent_slot
);
1957 src_path
->nodes
[cur_level
] = eb
;
1959 btrfs_tree_read_lock(eb
);
1960 src_path
->locks
[cur_level
] = BTRFS_READ_LOCK
;
1963 src_path
->slots
[cur_level
] = dst_path
->slots
[cur_level
];
1965 btrfs_node_key_to_cpu(dst_path
->nodes
[cur_level
],
1966 &dst_key
, dst_path
->slots
[cur_level
]);
1967 btrfs_node_key_to_cpu(src_path
->nodes
[cur_level
],
1968 &src_key
, src_path
->slots
[cur_level
]);
1970 btrfs_item_key_to_cpu(dst_path
->nodes
[cur_level
],
1971 &dst_key
, dst_path
->slots
[cur_level
]);
1972 btrfs_item_key_to_cpu(src_path
->nodes
[cur_level
],
1973 &src_key
, src_path
->slots
[cur_level
]);
1975 /* Content mismatch, something went wrong */
1976 if (btrfs_comp_cpu_keys(&dst_key
, &src_key
)) {
1984 * Now both @dst_path and @src_path have been populated, record the tree
1985 * blocks for qgroup accounting.
1987 ret
= btrfs_qgroup_trace_extent(trans
, src_path
->nodes
[dst_level
]->start
,
1988 nodesize
, GFP_NOFS
);
1991 ret
= btrfs_qgroup_trace_extent(trans
,
1992 dst_path
->nodes
[dst_level
]->start
,
1993 nodesize
, GFP_NOFS
);
1997 /* Record leaf file extents */
1998 if (dst_level
== 0 && trace_leaf
) {
1999 ret
= btrfs_qgroup_trace_leaf_items(trans
, src_path
->nodes
[0]);
2002 ret
= btrfs_qgroup_trace_leaf_items(trans
, dst_path
->nodes
[0]);
2005 btrfs_free_path(src_path
);
2010 * Helper function to do recursive generation-aware depth-first search, to
2011 * locate all new tree blocks in a subtree of reloc tree.
2013 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2022 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2026 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2027 * above tree blocks along with their counter parts in file tree.
2028 * While during search, old tree blocks OO(c) will be skipped as tree block swap
2029 * won't affect OO(c).
2031 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle
* trans
,
2032 struct extent_buffer
*src_eb
,
2033 struct btrfs_path
*dst_path
,
2034 int cur_level
, int root_level
,
2035 u64 last_snapshot
, bool trace_leaf
)
2037 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2038 struct extent_buffer
*eb
;
2039 bool need_cleanup
= false;
2043 /* Level sanity check */
2044 if (cur_level
< 0 || cur_level
>= BTRFS_MAX_LEVEL
- 1 ||
2045 root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
- 1 ||
2046 root_level
< cur_level
) {
2047 btrfs_err_rl(fs_info
,
2048 "%s: bad levels, cur_level=%d root_level=%d",
2049 __func__
, cur_level
, root_level
);
2053 /* Read the tree block if needed */
2054 if (dst_path
->nodes
[cur_level
] == NULL
) {
2059 * dst_path->nodes[root_level] must be initialized before
2060 * calling this function.
2062 if (cur_level
== root_level
) {
2063 btrfs_err_rl(fs_info
,
2064 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2065 __func__
, root_level
, root_level
, cur_level
);
2070 * We need to get child blockptr/gen from parent before we can
2073 eb
= dst_path
->nodes
[cur_level
+ 1];
2074 parent_slot
= dst_path
->slots
[cur_level
+ 1];
2075 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2077 /* This node is old, no need to trace */
2078 if (child_gen
< last_snapshot
)
2081 eb
= btrfs_read_node_slot(eb
, parent_slot
);
2087 dst_path
->nodes
[cur_level
] = eb
;
2088 dst_path
->slots
[cur_level
] = 0;
2090 btrfs_tree_read_lock(eb
);
2091 dst_path
->locks
[cur_level
] = BTRFS_READ_LOCK
;
2092 need_cleanup
= true;
2095 /* Now record this tree block and its counter part for qgroups */
2096 ret
= qgroup_trace_extent_swap(trans
, src_eb
, dst_path
, cur_level
,
2097 root_level
, trace_leaf
);
2101 eb
= dst_path
->nodes
[cur_level
];
2103 if (cur_level
> 0) {
2104 /* Iterate all child tree blocks */
2105 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
2106 /* Skip old tree blocks as they won't be swapped */
2107 if (btrfs_node_ptr_generation(eb
, i
) < last_snapshot
)
2109 dst_path
->slots
[cur_level
] = i
;
2111 /* Recursive call (at most 7 times) */
2112 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
,
2113 dst_path
, cur_level
- 1, root_level
,
2114 last_snapshot
, trace_leaf
);
2123 btrfs_tree_unlock_rw(dst_path
->nodes
[cur_level
],
2124 dst_path
->locks
[cur_level
]);
2125 free_extent_buffer(dst_path
->nodes
[cur_level
]);
2126 dst_path
->nodes
[cur_level
] = NULL
;
2127 dst_path
->slots
[cur_level
] = 0;
2128 dst_path
->locks
[cur_level
] = 0;
2134 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle
*trans
,
2135 struct extent_buffer
*src_eb
,
2136 struct extent_buffer
*dst_eb
,
2137 u64 last_snapshot
, bool trace_leaf
)
2139 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2140 struct btrfs_path
*dst_path
= NULL
;
2144 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2147 /* Wrong parameter order */
2148 if (btrfs_header_generation(src_eb
) > btrfs_header_generation(dst_eb
)) {
2149 btrfs_err_rl(fs_info
,
2150 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__
,
2151 btrfs_header_generation(src_eb
),
2152 btrfs_header_generation(dst_eb
));
2156 if (!extent_buffer_uptodate(src_eb
) || !extent_buffer_uptodate(dst_eb
)) {
2161 level
= btrfs_header_level(dst_eb
);
2162 dst_path
= btrfs_alloc_path();
2168 atomic_inc(&dst_eb
->refs
);
2169 dst_path
->nodes
[level
] = dst_eb
;
2170 dst_path
->slots
[level
] = 0;
2171 dst_path
->locks
[level
] = 0;
2173 /* Do the generation aware breadth-first search */
2174 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
, dst_path
, level
,
2175 level
, last_snapshot
, trace_leaf
);
2181 btrfs_free_path(dst_path
);
2183 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2187 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle
*trans
,
2188 struct extent_buffer
*root_eb
,
2189 u64 root_gen
, int root_level
)
2191 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2194 struct extent_buffer
*eb
= root_eb
;
2195 struct btrfs_path
*path
= NULL
;
2197 BUG_ON(root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
);
2198 BUG_ON(root_eb
== NULL
);
2200 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2203 if (!extent_buffer_uptodate(root_eb
)) {
2204 ret
= btrfs_read_buffer(root_eb
, root_gen
, root_level
, NULL
);
2209 if (root_level
== 0) {
2210 ret
= btrfs_qgroup_trace_leaf_items(trans
, root_eb
);
2214 path
= btrfs_alloc_path();
2219 * Walk down the tree. Missing extent blocks are filled in as
2220 * we go. Metadata is accounted every time we read a new
2223 * When we reach a leaf, we account for file extent items in it,
2224 * walk back up the tree (adjusting slot pointers as we go)
2225 * and restart the search process.
2227 atomic_inc(&root_eb
->refs
); /* For path */
2228 path
->nodes
[root_level
] = root_eb
;
2229 path
->slots
[root_level
] = 0;
2230 path
->locks
[root_level
] = 0; /* so release_path doesn't try to unlock */
2233 while (level
>= 0) {
2234 if (path
->nodes
[level
] == NULL
) {
2239 * We need to get child blockptr from parent before we
2242 eb
= path
->nodes
[level
+ 1];
2243 parent_slot
= path
->slots
[level
+ 1];
2244 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2246 eb
= btrfs_read_node_slot(eb
, parent_slot
);
2252 path
->nodes
[level
] = eb
;
2253 path
->slots
[level
] = 0;
2255 btrfs_tree_read_lock(eb
);
2256 path
->locks
[level
] = BTRFS_READ_LOCK
;
2258 ret
= btrfs_qgroup_trace_extent(trans
, child_bytenr
,
2266 ret
= btrfs_qgroup_trace_leaf_items(trans
,
2267 path
->nodes
[level
]);
2271 /* Nonzero return here means we completed our search */
2272 ret
= adjust_slots_upwards(path
, root_level
);
2276 /* Restart search with new slots */
2285 btrfs_free_path(path
);
2290 #define UPDATE_NEW 0
2291 #define UPDATE_OLD 1
2293 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2295 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
2296 struct ulist
*roots
, struct ulist
*tmp
,
2297 struct ulist
*qgroups
, u64 seq
, int update_old
)
2299 struct ulist_node
*unode
;
2300 struct ulist_iterator uiter
;
2301 struct ulist_node
*tmp_unode
;
2302 struct ulist_iterator tmp_uiter
;
2303 struct btrfs_qgroup
*qg
;
2308 ULIST_ITER_INIT(&uiter
);
2309 while ((unode
= ulist_next(roots
, &uiter
))) {
2310 qg
= find_qgroup_rb(fs_info
, unode
->val
);
2315 ret
= ulist_add(qgroups
, qg
->qgroupid
, qgroup_to_aux(qg
),
2319 ret
= ulist_add(tmp
, qg
->qgroupid
, qgroup_to_aux(qg
), GFP_ATOMIC
);
2322 ULIST_ITER_INIT(&tmp_uiter
);
2323 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
2324 struct btrfs_qgroup_list
*glist
;
2326 qg
= unode_aux_to_qgroup(tmp_unode
);
2328 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
2330 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
2331 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2332 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
2333 qgroup_to_aux(glist
->group
),
2337 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
2338 qgroup_to_aux(glist
->group
),
2349 * Update qgroup rfer/excl counters.
2350 * Rfer update is easy, codes can explain themselves.
2352 * Excl update is tricky, the update is split into 2 parts.
2353 * Part 1: Possible exclusive <-> sharing detect:
2355 * -------------------------------------
2357 * -------------------------------------
2359 * -------------------------------------
2362 * A: cur_old_roots < nr_old_roots (not exclusive before)
2363 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2364 * B: cur_new_roots < nr_new_roots (not exclusive now)
2365 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2368 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2369 * *: Definitely not changed. **: Possible unchanged.
2371 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2373 * To make the logic clear, we first use condition A and B to split
2374 * combination into 4 results.
2376 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2377 * only on variant maybe 0.
2379 * Lastly, check result **, since there are 2 variants maybe 0, split them
2381 * But this time we don't need to consider other things, the codes and logic
2382 * is easy to understand now.
2384 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
2385 struct ulist
*qgroups
,
2388 u64 num_bytes
, u64 seq
)
2390 struct ulist_node
*unode
;
2391 struct ulist_iterator uiter
;
2392 struct btrfs_qgroup
*qg
;
2393 u64 cur_new_count
, cur_old_count
;
2395 ULIST_ITER_INIT(&uiter
);
2396 while ((unode
= ulist_next(qgroups
, &uiter
))) {
2399 qg
= unode_aux_to_qgroup(unode
);
2400 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
2401 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
2403 trace_qgroup_update_counters(fs_info
, qg
, cur_old_count
,
2406 /* Rfer update part */
2407 if (cur_old_count
== 0 && cur_new_count
> 0) {
2408 qg
->rfer
+= num_bytes
;
2409 qg
->rfer_cmpr
+= num_bytes
;
2412 if (cur_old_count
> 0 && cur_new_count
== 0) {
2413 qg
->rfer
-= num_bytes
;
2414 qg
->rfer_cmpr
-= num_bytes
;
2418 /* Excl update part */
2419 /* Exclusive/none -> shared case */
2420 if (cur_old_count
== nr_old_roots
&&
2421 cur_new_count
< nr_new_roots
) {
2422 /* Exclusive -> shared */
2423 if (cur_old_count
!= 0) {
2424 qg
->excl
-= num_bytes
;
2425 qg
->excl_cmpr
-= num_bytes
;
2430 /* Shared -> exclusive/none case */
2431 if (cur_old_count
< nr_old_roots
&&
2432 cur_new_count
== nr_new_roots
) {
2433 /* Shared->exclusive */
2434 if (cur_new_count
!= 0) {
2435 qg
->excl
+= num_bytes
;
2436 qg
->excl_cmpr
+= num_bytes
;
2441 /* Exclusive/none -> exclusive/none case */
2442 if (cur_old_count
== nr_old_roots
&&
2443 cur_new_count
== nr_new_roots
) {
2444 if (cur_old_count
== 0) {
2445 /* None -> exclusive/none */
2447 if (cur_new_count
!= 0) {
2448 /* None -> exclusive */
2449 qg
->excl
+= num_bytes
;
2450 qg
->excl_cmpr
+= num_bytes
;
2453 /* None -> none, nothing changed */
2455 /* Exclusive -> exclusive/none */
2457 if (cur_new_count
== 0) {
2458 /* Exclusive -> none */
2459 qg
->excl
-= num_bytes
;
2460 qg
->excl_cmpr
-= num_bytes
;
2463 /* Exclusive -> exclusive, nothing changed */
2468 qgroup_dirty(fs_info
, qg
);
2474 * Check if the @roots potentially is a list of fs tree roots
2476 * Return 0 for definitely not a fs/subvol tree roots ulist
2477 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2480 static int maybe_fs_roots(struct ulist
*roots
)
2482 struct ulist_node
*unode
;
2483 struct ulist_iterator uiter
;
2485 /* Empty one, still possible for fs roots */
2486 if (!roots
|| roots
->nnodes
== 0)
2489 ULIST_ITER_INIT(&uiter
);
2490 unode
= ulist_next(roots
, &uiter
);
2495 * If it contains fs tree roots, then it must belong to fs/subvol
2497 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2499 return is_fstree(unode
->val
);
2502 int btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
2503 u64 num_bytes
, struct ulist
*old_roots
,
2504 struct ulist
*new_roots
)
2506 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2507 struct ulist
*qgroups
= NULL
;
2508 struct ulist
*tmp
= NULL
;
2510 u64 nr_new_roots
= 0;
2511 u64 nr_old_roots
= 0;
2515 * If quotas get disabled meanwhile, the resouces need to be freed and
2516 * we can't just exit here.
2518 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2522 if (!maybe_fs_roots(new_roots
))
2524 nr_new_roots
= new_roots
->nnodes
;
2527 if (!maybe_fs_roots(old_roots
))
2529 nr_old_roots
= old_roots
->nnodes
;
2532 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2533 if (nr_old_roots
== 0 && nr_new_roots
== 0)
2536 BUG_ON(!fs_info
->quota_root
);
2538 trace_btrfs_qgroup_account_extent(fs_info
, trans
->transid
, bytenr
,
2539 num_bytes
, nr_old_roots
, nr_new_roots
);
2541 qgroups
= ulist_alloc(GFP_NOFS
);
2546 tmp
= ulist_alloc(GFP_NOFS
);
2552 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2553 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2554 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
2555 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2560 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2562 spin_lock(&fs_info
->qgroup_lock
);
2563 seq
= fs_info
->qgroup_seq
;
2565 /* Update old refcnts using old_roots */
2566 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
2571 /* Update new refcnts using new_roots */
2572 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
2577 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
2581 * Bump qgroup_seq to avoid seq overlap
2583 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
2585 spin_unlock(&fs_info
->qgroup_lock
);
2588 ulist_free(qgroups
);
2589 ulist_free(old_roots
);
2590 ulist_free(new_roots
);
2594 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
)
2596 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2597 struct btrfs_qgroup_extent_record
*record
;
2598 struct btrfs_delayed_ref_root
*delayed_refs
;
2599 struct ulist
*new_roots
= NULL
;
2600 struct rb_node
*node
;
2601 u64 num_dirty_extents
= 0;
2605 delayed_refs
= &trans
->transaction
->delayed_refs
;
2606 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
2607 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
2608 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
2611 num_dirty_extents
++;
2612 trace_btrfs_qgroup_account_extents(fs_info
, record
);
2616 * Old roots should be searched when inserting qgroup
2619 if (WARN_ON(!record
->old_roots
)) {
2620 /* Search commit root to find old_roots */
2621 ret
= btrfs_find_all_roots(NULL
, fs_info
,
2623 &record
->old_roots
, false);
2628 /* Free the reserved data space */
2629 btrfs_qgroup_free_refroot(fs_info
,
2630 record
->data_rsv_refroot
,
2632 BTRFS_QGROUP_RSV_DATA
);
2634 * Use SEQ_LAST as time_seq to do special search, which
2635 * doesn't lock tree or delayed_refs and search current
2636 * root. It's safe inside commit_transaction().
2638 ret
= btrfs_find_all_roots(trans
, fs_info
,
2639 record
->bytenr
, SEQ_LAST
, &new_roots
, false);
2642 if (qgroup_to_skip
) {
2643 ulist_del(new_roots
, qgroup_to_skip
, 0);
2644 ulist_del(record
->old_roots
, qgroup_to_skip
,
2647 ret
= btrfs_qgroup_account_extent(trans
, record
->bytenr
,
2651 record
->old_roots
= NULL
;
2655 ulist_free(record
->old_roots
);
2656 ulist_free(new_roots
);
2658 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
2662 trace_qgroup_num_dirty_extents(fs_info
, trans
->transid
,
2668 * called from commit_transaction. Writes all changed qgroups to disk.
2670 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
)
2672 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2675 if (!fs_info
->quota_root
)
2678 spin_lock(&fs_info
->qgroup_lock
);
2679 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2680 struct btrfs_qgroup
*qgroup
;
2681 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2682 struct btrfs_qgroup
, dirty
);
2683 list_del_init(&qgroup
->dirty
);
2684 spin_unlock(&fs_info
->qgroup_lock
);
2685 ret
= update_qgroup_info_item(trans
, qgroup
);
2687 fs_info
->qgroup_flags
|=
2688 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2689 ret
= update_qgroup_limit_item(trans
, qgroup
);
2691 fs_info
->qgroup_flags
|=
2692 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2693 spin_lock(&fs_info
->qgroup_lock
);
2695 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2696 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2698 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2699 spin_unlock(&fs_info
->qgroup_lock
);
2701 ret
= update_qgroup_status_item(trans
);
2703 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2709 * Copy the accounting information between qgroups. This is necessary
2710 * when a snapshot or a subvolume is created. Throwing an error will
2711 * cause a transaction abort so we take extra care here to only error
2712 * when a readonly fs is a reasonable outcome.
2714 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
, u64 srcid
,
2715 u64 objectid
, struct btrfs_qgroup_inherit
*inherit
)
2720 bool committing
= false;
2721 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2722 struct btrfs_root
*quota_root
;
2723 struct btrfs_qgroup
*srcgroup
;
2724 struct btrfs_qgroup
*dstgroup
;
2725 bool need_rescan
= false;
2730 * There are only two callers of this function.
2732 * One in create_subvol() in the ioctl context, which needs to hold
2733 * the qgroup_ioctl_lock.
2735 * The other one in create_pending_snapshot() where no other qgroup
2736 * code can modify the fs as they all need to either start a new trans
2737 * or hold a trans handler, thus we don't need to hold
2738 * qgroup_ioctl_lock.
2739 * This would avoid long and complex lock chain and make lockdep happy.
2741 spin_lock(&fs_info
->trans_lock
);
2742 if (trans
->transaction
->state
== TRANS_STATE_COMMIT_DOING
)
2744 spin_unlock(&fs_info
->trans_lock
);
2747 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2748 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2751 quota_root
= fs_info
->quota_root
;
2758 i_qgroups
= (u64
*)(inherit
+ 1);
2759 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2760 2 * inherit
->num_excl_copies
;
2761 for (i
= 0; i
< nums
; ++i
) {
2762 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2765 * Zero out invalid groups so we can ignore
2769 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
2777 * create a tracking group for the subvol itself
2779 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2784 * add qgroup to all inherited groups
2787 i_qgroups
= (u64
*)(inherit
+ 1);
2788 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
2789 if (*i_qgroups
== 0)
2791 ret
= add_qgroup_relation_item(trans
, objectid
,
2793 if (ret
&& ret
!= -EEXIST
)
2795 ret
= add_qgroup_relation_item(trans
, *i_qgroups
,
2797 if (ret
&& ret
!= -EEXIST
)
2804 spin_lock(&fs_info
->qgroup_lock
);
2806 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2807 if (IS_ERR(dstgroup
)) {
2808 ret
= PTR_ERR(dstgroup
);
2812 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2813 dstgroup
->lim_flags
= inherit
->lim
.flags
;
2814 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
2815 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
2816 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
2817 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
2819 ret
= update_qgroup_limit_item(trans
, dstgroup
);
2821 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2823 "unable to update quota limit for %llu",
2824 dstgroup
->qgroupid
);
2830 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2835 * We call inherit after we clone the root in order to make sure
2836 * our counts don't go crazy, so at this point the only
2837 * difference between the two roots should be the root node.
2839 level_size
= fs_info
->nodesize
;
2840 dstgroup
->rfer
= srcgroup
->rfer
;
2841 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2842 dstgroup
->excl
= level_size
;
2843 dstgroup
->excl_cmpr
= level_size
;
2844 srcgroup
->excl
= level_size
;
2845 srcgroup
->excl_cmpr
= level_size
;
2847 /* inherit the limit info */
2848 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2849 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2850 dstgroup
->max_excl
= srcgroup
->max_excl
;
2851 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2852 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2854 qgroup_dirty(fs_info
, dstgroup
);
2855 qgroup_dirty(fs_info
, srcgroup
);
2861 i_qgroups
= (u64
*)(inherit
+ 1);
2862 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2864 ret
= add_relation_rb(fs_info
, objectid
, *i_qgroups
);
2871 * If we're doing a snapshot, and adding the snapshot to a new
2872 * qgroup, the numbers are guaranteed to be incorrect.
2878 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2879 struct btrfs_qgroup
*src
;
2880 struct btrfs_qgroup
*dst
;
2882 if (!i_qgroups
[0] || !i_qgroups
[1])
2885 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2886 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2893 dst
->rfer
= src
->rfer
- level_size
;
2894 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2896 /* Manually tweaking numbers certainly needs a rescan */
2899 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2900 struct btrfs_qgroup
*src
;
2901 struct btrfs_qgroup
*dst
;
2903 if (!i_qgroups
[0] || !i_qgroups
[1])
2906 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2907 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2914 dst
->excl
= src
->excl
+ level_size
;
2915 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2920 spin_unlock(&fs_info
->qgroup_lock
);
2922 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, dstgroup
);
2925 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2927 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2931 static bool qgroup_check_limits(const struct btrfs_qgroup
*qg
, u64 num_bytes
)
2933 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2934 qgroup_rsv_total(qg
) + (s64
)qg
->rfer
+ num_bytes
> qg
->max_rfer
)
2937 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2938 qgroup_rsv_total(qg
) + (s64
)qg
->excl
+ num_bytes
> qg
->max_excl
)
2944 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
, bool enforce
,
2945 enum btrfs_qgroup_rsv_type type
)
2947 struct btrfs_qgroup
*qgroup
;
2948 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2949 u64 ref_root
= root
->root_key
.objectid
;
2951 struct ulist_node
*unode
;
2952 struct ulist_iterator uiter
;
2954 if (!is_fstree(ref_root
))
2960 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE
, &fs_info
->flags
) &&
2961 capable(CAP_SYS_RESOURCE
))
2964 spin_lock(&fs_info
->qgroup_lock
);
2965 if (!fs_info
->quota_root
)
2968 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2973 * in a first step, we check all affected qgroups if any limits would
2976 ulist_reinit(fs_info
->qgroup_ulist
);
2977 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2978 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
2981 ULIST_ITER_INIT(&uiter
);
2982 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2983 struct btrfs_qgroup
*qg
;
2984 struct btrfs_qgroup_list
*glist
;
2986 qg
= unode_aux_to_qgroup(unode
);
2988 if (enforce
&& !qgroup_check_limits(qg
, num_bytes
)) {
2993 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2994 ret
= ulist_add(fs_info
->qgroup_ulist
,
2995 glist
->group
->qgroupid
,
2996 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3003 * no limits exceeded, now record the reservation into all qgroups
3005 ULIST_ITER_INIT(&uiter
);
3006 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3007 struct btrfs_qgroup
*qg
;
3009 qg
= unode_aux_to_qgroup(unode
);
3011 qgroup_rsv_add(fs_info
, qg
, num_bytes
, type
);
3015 spin_unlock(&fs_info
->qgroup_lock
);
3020 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
3023 * Will handle all higher level qgroup too.
3025 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3026 * This special case is only used for META_PERTRANS type.
3028 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
3029 u64 ref_root
, u64 num_bytes
,
3030 enum btrfs_qgroup_rsv_type type
)
3032 struct btrfs_qgroup
*qgroup
;
3033 struct ulist_node
*unode
;
3034 struct ulist_iterator uiter
;
3037 if (!is_fstree(ref_root
))
3043 if (num_bytes
== (u64
)-1 && type
!= BTRFS_QGROUP_RSV_META_PERTRANS
) {
3044 WARN(1, "%s: Invalid type to free", __func__
);
3047 spin_lock(&fs_info
->qgroup_lock
);
3049 if (!fs_info
->quota_root
)
3052 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3056 if (num_bytes
== (u64
)-1)
3058 * We're freeing all pertrans rsv, get reserved value from
3059 * level 0 qgroup as real num_bytes to free.
3061 num_bytes
= qgroup
->rsv
.values
[type
];
3063 ulist_reinit(fs_info
->qgroup_ulist
);
3064 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3065 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3068 ULIST_ITER_INIT(&uiter
);
3069 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3070 struct btrfs_qgroup
*qg
;
3071 struct btrfs_qgroup_list
*glist
;
3073 qg
= unode_aux_to_qgroup(unode
);
3075 qgroup_rsv_release(fs_info
, qg
, num_bytes
, type
);
3077 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3078 ret
= ulist_add(fs_info
->qgroup_ulist
,
3079 glist
->group
->qgroupid
,
3080 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3087 spin_unlock(&fs_info
->qgroup_lock
);
3091 * Check if the leaf is the last leaf. Which means all node pointers
3092 * are at their last position.
3094 static bool is_last_leaf(struct btrfs_path
*path
)
3098 for (i
= 1; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
3099 if (path
->slots
[i
] != btrfs_header_nritems(path
->nodes
[i
]) - 1)
3106 * returns < 0 on error, 0 when more leafs are to be scanned.
3107 * returns 1 when done.
3109 static int qgroup_rescan_leaf(struct btrfs_trans_handle
*trans
,
3110 struct btrfs_path
*path
)
3112 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3113 struct btrfs_key found
;
3114 struct extent_buffer
*scratch_leaf
= NULL
;
3115 struct ulist
*roots
= NULL
;
3121 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3122 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
3123 &fs_info
->qgroup_rescan_progress
,
3126 btrfs_debug(fs_info
,
3127 "current progress key (%llu %u %llu), search_slot ret %d",
3128 fs_info
->qgroup_rescan_progress
.objectid
,
3129 fs_info
->qgroup_rescan_progress
.type
,
3130 fs_info
->qgroup_rescan_progress
.offset
, ret
);
3134 * The rescan is about to end, we will not be scanning any
3135 * further blocks. We cannot unset the RESCAN flag here, because
3136 * we want to commit the transaction if everything went well.
3137 * To make the live accounting work in this phase, we set our
3138 * scan progress pointer such that every real extent objectid
3141 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3142 btrfs_release_path(path
);
3143 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3146 done
= is_last_leaf(path
);
3148 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
3149 btrfs_header_nritems(path
->nodes
[0]) - 1);
3150 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
3152 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
3153 if (!scratch_leaf
) {
3155 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3158 slot
= path
->slots
[0];
3159 btrfs_release_path(path
);
3160 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3162 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
3163 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
3164 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3165 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
3167 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
3168 num_bytes
= fs_info
->nodesize
;
3170 num_bytes
= found
.offset
;
3172 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
3176 /* For rescan, just pass old_roots as NULL */
3177 ret
= btrfs_qgroup_account_extent(trans
, found
.objectid
,
3178 num_bytes
, NULL
, roots
);
3184 free_extent_buffer(scratch_leaf
);
3188 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3193 static bool rescan_should_stop(struct btrfs_fs_info
*fs_info
)
3195 return btrfs_fs_closing(fs_info
) ||
3196 test_bit(BTRFS_FS_STATE_REMOUNTING
, &fs_info
->fs_state
);
3199 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
3201 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
3202 qgroup_rescan_work
);
3203 struct btrfs_path
*path
;
3204 struct btrfs_trans_handle
*trans
= NULL
;
3207 bool stopped
= false;
3209 path
= btrfs_alloc_path();
3213 * Rescan should only search for commit root, and any later difference
3214 * should be recorded by qgroup
3216 path
->search_commit_root
= 1;
3217 path
->skip_locking
= 1;
3220 while (!err
&& !(stopped
= rescan_should_stop(fs_info
))) {
3221 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
3222 if (IS_ERR(trans
)) {
3223 err
= PTR_ERR(trans
);
3226 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3229 err
= qgroup_rescan_leaf(trans
, path
);
3232 btrfs_commit_transaction(trans
);
3234 btrfs_end_transaction(trans
);
3238 btrfs_free_path(path
);
3240 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3242 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
3243 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3244 } else if (err
< 0) {
3245 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3247 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3250 * only update status, since the previous part has already updated the
3253 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
3254 if (IS_ERR(trans
)) {
3255 err
= PTR_ERR(trans
);
3258 "fail to start transaction for status update: %d",
3262 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3264 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3266 ret
= update_qgroup_status_item(trans
);
3269 btrfs_err(fs_info
, "fail to update qgroup status: %d",
3273 fs_info
->qgroup_rescan_running
= false;
3274 complete_all(&fs_info
->qgroup_rescan_completion
);
3275 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3280 btrfs_end_transaction(trans
);
3283 btrfs_info(fs_info
, "qgroup scan paused");
3284 } else if (err
>= 0) {
3285 btrfs_info(fs_info
, "qgroup scan completed%s",
3286 err
> 0 ? " (inconsistency flag cleared)" : "");
3288 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
3293 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3294 * memory required for the rescan context.
3297 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
3303 /* we're resuming qgroup rescan at mount time */
3304 if (!(fs_info
->qgroup_flags
&
3305 BTRFS_QGROUP_STATUS_FLAG_RESCAN
)) {
3307 "qgroup rescan init failed, qgroup rescan is not queued");
3309 } else if (!(fs_info
->qgroup_flags
&
3310 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3312 "qgroup rescan init failed, qgroup is not enabled");
3320 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3323 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3325 "qgroup rescan is already in progress");
3327 } else if (!(fs_info
->qgroup_flags
&
3328 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3330 "qgroup rescan init failed, qgroup is not enabled");
3335 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3338 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3341 memset(&fs_info
->qgroup_rescan_progress
, 0,
3342 sizeof(fs_info
->qgroup_rescan_progress
));
3343 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
3344 init_completion(&fs_info
->qgroup_rescan_completion
);
3345 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3347 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
3348 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
3353 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
3356 struct btrfs_qgroup
*qgroup
;
3358 spin_lock(&fs_info
->qgroup_lock
);
3359 /* clear all current qgroup tracking information */
3360 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
3361 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
3363 qgroup
->rfer_cmpr
= 0;
3365 qgroup
->excl_cmpr
= 0;
3366 qgroup_dirty(fs_info
, qgroup
);
3368 spin_unlock(&fs_info
->qgroup_lock
);
3372 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
3375 struct btrfs_trans_handle
*trans
;
3377 ret
= qgroup_rescan_init(fs_info
, 0, 1);
3382 * We have set the rescan_progress to 0, which means no more
3383 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3384 * However, btrfs_qgroup_account_ref may be right after its call
3385 * to btrfs_find_all_roots, in which case it would still do the
3387 * To solve this, we're committing the transaction, which will
3388 * ensure we run all delayed refs and only after that, we are
3389 * going to clear all tracking information for a clean start.
3392 trans
= btrfs_join_transaction(fs_info
->fs_root
);
3393 if (IS_ERR(trans
)) {
3394 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3395 return PTR_ERR(trans
);
3397 ret
= btrfs_commit_transaction(trans
);
3399 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3403 qgroup_rescan_zero_tracking(fs_info
);
3405 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3406 fs_info
->qgroup_rescan_running
= true;
3407 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3408 &fs_info
->qgroup_rescan_work
);
3409 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3414 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
3420 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3421 running
= fs_info
->qgroup_rescan_running
;
3422 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3428 ret
= wait_for_completion_interruptible(
3429 &fs_info
->qgroup_rescan_completion
);
3431 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
3437 * this is only called from open_ctree where we're still single threaded, thus
3438 * locking is omitted here.
3441 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
3443 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3444 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3445 fs_info
->qgroup_rescan_running
= true;
3446 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3447 &fs_info
->qgroup_rescan_work
);
3448 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3452 #define rbtree_iterate_from_safe(node, next, start) \
3453 for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3455 static int qgroup_unreserve_range(struct btrfs_inode
*inode
,
3456 struct extent_changeset
*reserved
, u64 start
,
3459 struct rb_node
*node
;
3460 struct rb_node
*next
;
3461 struct ulist_node
*entry
;
3464 node
= reserved
->range_changed
.root
.rb_node
;
3468 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3469 if (entry
->val
< start
)
3470 node
= node
->rb_right
;
3472 node
= node
->rb_left
;
3475 if (entry
->val
> start
&& rb_prev(&entry
->rb_node
))
3476 entry
= rb_entry(rb_prev(&entry
->rb_node
), struct ulist_node
,
3479 rbtree_iterate_from_safe(node
, next
, &entry
->rb_node
) {
3485 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3486 entry_start
= entry
->val
;
3487 entry_end
= entry
->aux
;
3488 entry_len
= entry_end
- entry_start
+ 1;
3490 if (entry_start
>= start
+ len
)
3492 if (entry_start
+ entry_len
<= start
)
3495 * Now the entry is in [start, start + len), revert the
3496 * EXTENT_QGROUP_RESERVED bit.
3498 clear_ret
= clear_extent_bits(&inode
->io_tree
, entry_start
,
3499 entry_end
, EXTENT_QGROUP_RESERVED
);
3500 if (!ret
&& clear_ret
< 0)
3503 ulist_del(&reserved
->range_changed
, entry
->val
, entry
->aux
);
3504 if (likely(reserved
->bytes_changed
>= entry_len
)) {
3505 reserved
->bytes_changed
-= entry_len
;
3508 reserved
->bytes_changed
= 0;
3516 * Try to free some space for qgroup.
3518 * For qgroup, there are only 3 ways to free qgroup space:
3519 * - Flush nodatacow write
3520 * Any nodatacow write will free its reserved data space at run_delalloc_range().
3521 * In theory, we should only flush nodatacow inodes, but it's not yet
3522 * possible, so we need to flush the whole root.
3524 * - Wait for ordered extents
3525 * When ordered extents are finished, their reserved metadata is finally
3526 * converted to per_trans status, which can be freed by later commit
3529 * - Commit transaction
3530 * This would free the meta_per_trans space.
3531 * In theory this shouldn't provide much space, but any more qgroup space
3534 static int try_flush_qgroup(struct btrfs_root
*root
)
3536 struct btrfs_trans_handle
*trans
;
3538 bool can_commit
= true;
3541 * If current process holds a transaction, we shouldn't flush, as we
3542 * assume all space reservation happens before a transaction handle is
3545 * But there are cases like btrfs_delayed_item_reserve_metadata() where
3546 * we try to reserve space with one transction handle already held.
3547 * In that case we can't commit transaction, but at least try to end it
3548 * and hope the started data writes can free some space.
3550 if (current
->journal_info
&&
3551 current
->journal_info
!= BTRFS_SEND_TRANS_STUB
)
3555 * We don't want to run flush again and again, so if there is a running
3556 * one, we won't try to start a new flush, but exit directly.
3558 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
)) {
3560 * We are already holding a transaction, thus we can block other
3561 * threads from flushing. So exit right now. This increases
3562 * the chance of EDQUOT for heavy load and near limit cases.
3563 * But we can argue that if we're already near limit, EDQUOT is
3564 * unavoidable anyway.
3569 wait_event(root
->qgroup_flush_wait
,
3570 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
));
3574 ret
= btrfs_start_delalloc_snapshot(root
);
3577 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, (u64
)-1);
3579 trans
= btrfs_join_transaction(root
);
3580 if (IS_ERR(trans
)) {
3581 ret
= PTR_ERR(trans
);
3586 ret
= btrfs_commit_transaction(trans
);
3588 ret
= btrfs_end_transaction(trans
);
3590 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
);
3591 wake_up(&root
->qgroup_flush_wait
);
3595 static int qgroup_reserve_data(struct btrfs_inode
*inode
,
3596 struct extent_changeset
**reserved_ret
, u64 start
,
3599 struct btrfs_root
*root
= inode
->root
;
3600 struct extent_changeset
*reserved
;
3601 bool new_reserved
= false;
3606 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
3607 !is_fstree(root
->root_key
.objectid
) || len
== 0)
3610 /* @reserved parameter is mandatory for qgroup */
3611 if (WARN_ON(!reserved_ret
))
3613 if (!*reserved_ret
) {
3614 new_reserved
= true;
3615 *reserved_ret
= extent_changeset_alloc();
3619 reserved
= *reserved_ret
;
3620 /* Record already reserved space */
3621 orig_reserved
= reserved
->bytes_changed
;
3622 ret
= set_record_extent_bits(&inode
->io_tree
, start
,
3623 start
+ len
-1, EXTENT_QGROUP_RESERVED
, reserved
);
3625 /* Newly reserved space */
3626 to_reserve
= reserved
->bytes_changed
- orig_reserved
;
3627 trace_btrfs_qgroup_reserve_data(&inode
->vfs_inode
, start
, len
,
3628 to_reserve
, QGROUP_RESERVE
);
3631 ret
= qgroup_reserve(root
, to_reserve
, true, BTRFS_QGROUP_RSV_DATA
);
3638 qgroup_unreserve_range(inode
, reserved
, start
, len
);
3641 extent_changeset_release(reserved
);
3643 *reserved_ret
= NULL
;
3649 * Reserve qgroup space for range [start, start + len).
3651 * This function will either reserve space from related qgroups or do nothing
3652 * if the range is already reserved.
3654 * Return 0 for successful reservation
3655 * Return <0 for error (including -EQUOT)
3657 * NOTE: This function may sleep for memory allocation, dirty page flushing and
3658 * commit transaction. So caller should not hold any dirty page locked.
3660 int btrfs_qgroup_reserve_data(struct btrfs_inode
*inode
,
3661 struct extent_changeset
**reserved_ret
, u64 start
,
3666 ret
= qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3667 if (ret
<= 0 && ret
!= -EDQUOT
)
3670 ret
= try_flush_qgroup(inode
->root
);
3673 return qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3676 /* Free ranges specified by @reserved, normally in error path */
3677 static int qgroup_free_reserved_data(struct btrfs_inode
*inode
,
3678 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3680 struct btrfs_root
*root
= inode
->root
;
3681 struct ulist_node
*unode
;
3682 struct ulist_iterator uiter
;
3683 struct extent_changeset changeset
;
3687 extent_changeset_init(&changeset
);
3688 len
= round_up(start
+ len
, root
->fs_info
->sectorsize
);
3689 start
= round_down(start
, root
->fs_info
->sectorsize
);
3691 ULIST_ITER_INIT(&uiter
);
3692 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
))) {
3693 u64 range_start
= unode
->val
;
3694 /* unode->aux is the inclusive end */
3695 u64 range_len
= unode
->aux
- range_start
+ 1;
3699 extent_changeset_release(&changeset
);
3701 /* Only free range in range [start, start + len) */
3702 if (range_start
>= start
+ len
||
3703 range_start
+ range_len
<= start
)
3705 free_start
= max(range_start
, start
);
3706 free_len
= min(start
+ len
, range_start
+ range_len
) -
3709 * TODO: To also modify reserved->ranges_reserved to reflect
3712 * However as long as we free qgroup reserved according to
3713 * EXTENT_QGROUP_RESERVED, we won't double free.
3714 * So not need to rush.
3716 ret
= clear_record_extent_bits(&inode
->io_tree
, free_start
,
3717 free_start
+ free_len
- 1,
3718 EXTENT_QGROUP_RESERVED
, &changeset
);
3721 freed
+= changeset
.bytes_changed
;
3723 btrfs_qgroup_free_refroot(root
->fs_info
, root
->root_key
.objectid
, freed
,
3724 BTRFS_QGROUP_RSV_DATA
);
3727 extent_changeset_release(&changeset
);
3731 static int __btrfs_qgroup_release_data(struct btrfs_inode
*inode
,
3732 struct extent_changeset
*reserved
, u64 start
, u64 len
,
3735 struct extent_changeset changeset
;
3736 int trace_op
= QGROUP_RELEASE
;
3739 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &inode
->root
->fs_info
->flags
))
3742 /* In release case, we shouldn't have @reserved */
3743 WARN_ON(!free
&& reserved
);
3744 if (free
&& reserved
)
3745 return qgroup_free_reserved_data(inode
, reserved
, start
, len
);
3746 extent_changeset_init(&changeset
);
3747 ret
= clear_record_extent_bits(&inode
->io_tree
, start
, start
+ len
-1,
3748 EXTENT_QGROUP_RESERVED
, &changeset
);
3753 trace_op
= QGROUP_FREE
;
3754 trace_btrfs_qgroup_release_data(&inode
->vfs_inode
, start
, len
,
3755 changeset
.bytes_changed
, trace_op
);
3757 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
3758 inode
->root
->root_key
.objectid
,
3759 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3760 ret
= changeset
.bytes_changed
;
3762 extent_changeset_release(&changeset
);
3767 * Free a reserved space range from io_tree and related qgroups
3769 * Should be called when a range of pages get invalidated before reaching disk.
3770 * Or for error cleanup case.
3771 * if @reserved is given, only reserved range in [@start, @start + @len) will
3774 * For data written to disk, use btrfs_qgroup_release_data().
3776 * NOTE: This function may sleep for memory allocation.
3778 int btrfs_qgroup_free_data(struct btrfs_inode
*inode
,
3779 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3781 return __btrfs_qgroup_release_data(inode
, reserved
, start
, len
, 1);
3785 * Release a reserved space range from io_tree only.
3787 * Should be called when a range of pages get written to disk and corresponding
3788 * FILE_EXTENT is inserted into corresponding root.
3790 * Since new qgroup accounting framework will only update qgroup numbers at
3791 * commit_transaction() time, its reserved space shouldn't be freed from
3794 * But we should release the range from io_tree, to allow further write to be
3797 * NOTE: This function may sleep for memory allocation.
3799 int btrfs_qgroup_release_data(struct btrfs_inode
*inode
, u64 start
, u64 len
)
3801 return __btrfs_qgroup_release_data(inode
, NULL
, start
, len
, 0);
3804 static void add_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3805 enum btrfs_qgroup_rsv_type type
)
3807 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3808 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3813 spin_lock(&root
->qgroup_meta_rsv_lock
);
3814 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
3815 root
->qgroup_meta_rsv_prealloc
+= num_bytes
;
3817 root
->qgroup_meta_rsv_pertrans
+= num_bytes
;
3818 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3821 static int sub_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3822 enum btrfs_qgroup_rsv_type type
)
3824 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3825 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3830 spin_lock(&root
->qgroup_meta_rsv_lock
);
3831 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
) {
3832 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_prealloc
,
3834 root
->qgroup_meta_rsv_prealloc
-= num_bytes
;
3836 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_pertrans
,
3838 root
->qgroup_meta_rsv_pertrans
-= num_bytes
;
3840 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3844 static int qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3845 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3847 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3850 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3851 !is_fstree(root
->root_key
.objectid
) || num_bytes
== 0)
3854 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3855 trace_qgroup_meta_reserve(root
, (s64
)num_bytes
, type
);
3856 ret
= qgroup_reserve(root
, num_bytes
, enforce
, type
);
3860 * Record what we have reserved into root.
3862 * To avoid quota disabled->enabled underflow.
3863 * In that case, we may try to free space we haven't reserved
3864 * (since quota was disabled), so record what we reserved into root.
3865 * And ensure later release won't underflow this number.
3867 add_root_meta_rsv(root
, num_bytes
, type
);
3871 int __btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3872 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3876 ret
= qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3877 if (ret
<= 0 && ret
!= -EDQUOT
)
3880 ret
= try_flush_qgroup(root
);
3883 return qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3886 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root
*root
)
3888 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3890 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3891 !is_fstree(root
->root_key
.objectid
))
3894 /* TODO: Update trace point to handle such free */
3895 trace_qgroup_meta_free_all_pertrans(root
);
3896 /* Special value -1 means to free all reserved space */
3897 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
, (u64
)-1,
3898 BTRFS_QGROUP_RSV_META_PERTRANS
);
3901 void __btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
,
3902 enum btrfs_qgroup_rsv_type type
)
3904 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3906 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3907 !is_fstree(root
->root_key
.objectid
))
3911 * reservation for META_PREALLOC can happen before quota is enabled,
3912 * which can lead to underflow.
3913 * Here ensure we will only free what we really have reserved.
3915 num_bytes
= sub_root_meta_rsv(root
, num_bytes
, type
);
3916 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3917 trace_qgroup_meta_reserve(root
, -(s64
)num_bytes
, type
);
3918 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
,
3922 static void qgroup_convert_meta(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
3925 struct btrfs_qgroup
*qgroup
;
3926 struct ulist_node
*unode
;
3927 struct ulist_iterator uiter
;
3932 if (!fs_info
->quota_root
)
3935 spin_lock(&fs_info
->qgroup_lock
);
3936 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3939 ulist_reinit(fs_info
->qgroup_ulist
);
3940 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3941 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3944 ULIST_ITER_INIT(&uiter
);
3945 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3946 struct btrfs_qgroup
*qg
;
3947 struct btrfs_qgroup_list
*glist
;
3949 qg
= unode_aux_to_qgroup(unode
);
3951 qgroup_rsv_release(fs_info
, qg
, num_bytes
,
3952 BTRFS_QGROUP_RSV_META_PREALLOC
);
3953 qgroup_rsv_add(fs_info
, qg
, num_bytes
,
3954 BTRFS_QGROUP_RSV_META_PERTRANS
);
3955 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3956 ret
= ulist_add(fs_info
->qgroup_ulist
,
3957 glist
->group
->qgroupid
,
3958 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3964 spin_unlock(&fs_info
->qgroup_lock
);
3967 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root
*root
, int num_bytes
)
3969 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3971 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3972 !is_fstree(root
->root_key
.objectid
))
3974 /* Same as btrfs_qgroup_free_meta_prealloc() */
3975 num_bytes
= sub_root_meta_rsv(root
, num_bytes
,
3976 BTRFS_QGROUP_RSV_META_PREALLOC
);
3977 trace_qgroup_meta_convert(root
, num_bytes
);
3978 qgroup_convert_meta(fs_info
, root
->root_key
.objectid
, num_bytes
);
3982 * Check qgroup reserved space leaking, normally at destroy inode
3985 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode
*inode
)
3987 struct extent_changeset changeset
;
3988 struct ulist_node
*unode
;
3989 struct ulist_iterator iter
;
3992 extent_changeset_init(&changeset
);
3993 ret
= clear_record_extent_bits(&inode
->io_tree
, 0, (u64
)-1,
3994 EXTENT_QGROUP_RESERVED
, &changeset
);
3997 if (WARN_ON(changeset
.bytes_changed
)) {
3998 ULIST_ITER_INIT(&iter
);
3999 while ((unode
= ulist_next(&changeset
.range_changed
, &iter
))) {
4000 btrfs_warn(inode
->root
->fs_info
,
4001 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4002 btrfs_ino(inode
), unode
->val
, unode
->aux
);
4004 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
4005 inode
->root
->root_key
.objectid
,
4006 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
4009 extent_changeset_release(&changeset
);
4012 void btrfs_qgroup_init_swapped_blocks(
4013 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
)
4017 spin_lock_init(&swapped_blocks
->lock
);
4018 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
4019 swapped_blocks
->blocks
[i
] = RB_ROOT
;
4020 swapped_blocks
->swapped
= false;
4024 * Delete all swapped blocks record of @root.
4025 * Every record here means we skipped a full subtree scan for qgroup.
4027 * Gets called when committing one transaction.
4029 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root
*root
)
4031 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
;
4034 swapped_blocks
= &root
->swapped_blocks
;
4036 spin_lock(&swapped_blocks
->lock
);
4037 if (!swapped_blocks
->swapped
)
4039 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4040 struct rb_root
*cur_root
= &swapped_blocks
->blocks
[i
];
4041 struct btrfs_qgroup_swapped_block
*entry
;
4042 struct btrfs_qgroup_swapped_block
*next
;
4044 rbtree_postorder_for_each_entry_safe(entry
, next
, cur_root
,
4047 swapped_blocks
->blocks
[i
] = RB_ROOT
;
4049 swapped_blocks
->swapped
= false;
4051 spin_unlock(&swapped_blocks
->lock
);
4055 * Add subtree roots record into @subvol_root.
4057 * @subvol_root: tree root of the subvolume tree get swapped
4058 * @bg: block group under balance
4059 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
4060 * @reloc_parent/slot: pointer to the subtree root in reloc tree
4061 * BOTH POINTERS ARE BEFORE TREE SWAP
4062 * @last_snapshot: last snapshot generation of the subvolume tree
4064 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle
*trans
,
4065 struct btrfs_root
*subvol_root
,
4066 struct btrfs_block_group
*bg
,
4067 struct extent_buffer
*subvol_parent
, int subvol_slot
,
4068 struct extent_buffer
*reloc_parent
, int reloc_slot
,
4071 struct btrfs_fs_info
*fs_info
= subvol_root
->fs_info
;
4072 struct btrfs_qgroup_swapped_blocks
*blocks
= &subvol_root
->swapped_blocks
;
4073 struct btrfs_qgroup_swapped_block
*block
;
4074 struct rb_node
**cur
;
4075 struct rb_node
*parent
= NULL
;
4076 int level
= btrfs_header_level(subvol_parent
) - 1;
4079 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4082 if (btrfs_node_ptr_generation(subvol_parent
, subvol_slot
) >
4083 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
)) {
4084 btrfs_err_rl(fs_info
,
4085 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4087 btrfs_node_ptr_generation(subvol_parent
, subvol_slot
),
4088 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
));
4092 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
4099 * @reloc_parent/slot is still before swap, while @block is going to
4100 * record the bytenr after swap, so we do the swap here.
4102 block
->subvol_bytenr
= btrfs_node_blockptr(reloc_parent
, reloc_slot
);
4103 block
->subvol_generation
= btrfs_node_ptr_generation(reloc_parent
,
4105 block
->reloc_bytenr
= btrfs_node_blockptr(subvol_parent
, subvol_slot
);
4106 block
->reloc_generation
= btrfs_node_ptr_generation(subvol_parent
,
4108 block
->last_snapshot
= last_snapshot
;
4109 block
->level
= level
;
4112 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4113 * no one else can modify tree blocks thus we qgroup will not change
4114 * no matter the value of trace_leaf.
4116 if (bg
&& bg
->flags
& BTRFS_BLOCK_GROUP_DATA
)
4117 block
->trace_leaf
= true;
4119 block
->trace_leaf
= false;
4120 btrfs_node_key_to_cpu(reloc_parent
, &block
->first_key
, reloc_slot
);
4122 /* Insert @block into @blocks */
4123 spin_lock(&blocks
->lock
);
4124 cur
= &blocks
->blocks
[level
].rb_node
;
4126 struct btrfs_qgroup_swapped_block
*entry
;
4129 entry
= rb_entry(parent
, struct btrfs_qgroup_swapped_block
,
4132 if (entry
->subvol_bytenr
< block
->subvol_bytenr
) {
4133 cur
= &(*cur
)->rb_left
;
4134 } else if (entry
->subvol_bytenr
> block
->subvol_bytenr
) {
4135 cur
= &(*cur
)->rb_right
;
4137 if (entry
->subvol_generation
!=
4138 block
->subvol_generation
||
4139 entry
->reloc_bytenr
!= block
->reloc_bytenr
||
4140 entry
->reloc_generation
!=
4141 block
->reloc_generation
) {
4143 * Duplicated but mismatch entry found.
4146 * Marking qgroup inconsistent should be enough
4149 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
4156 rb_link_node(&block
->node
, parent
, cur
);
4157 rb_insert_color(&block
->node
, &blocks
->blocks
[level
]);
4158 blocks
->swapped
= true;
4160 spin_unlock(&blocks
->lock
);
4163 fs_info
->qgroup_flags
|=
4164 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4169 * Check if the tree block is a subtree root, and if so do the needed
4170 * delayed subtree trace for qgroup.
4172 * This is called during btrfs_cow_block().
4174 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle
*trans
,
4175 struct btrfs_root
*root
,
4176 struct extent_buffer
*subvol_eb
)
4178 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4179 struct btrfs_qgroup_swapped_blocks
*blocks
= &root
->swapped_blocks
;
4180 struct btrfs_qgroup_swapped_block
*block
;
4181 struct extent_buffer
*reloc_eb
= NULL
;
4182 struct rb_node
*node
;
4184 bool swapped
= false;
4185 int level
= btrfs_header_level(subvol_eb
);
4189 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4191 if (!is_fstree(root
->root_key
.objectid
) || !root
->reloc_root
)
4194 spin_lock(&blocks
->lock
);
4195 if (!blocks
->swapped
) {
4196 spin_unlock(&blocks
->lock
);
4199 node
= blocks
->blocks
[level
].rb_node
;
4202 block
= rb_entry(node
, struct btrfs_qgroup_swapped_block
, node
);
4203 if (block
->subvol_bytenr
< subvol_eb
->start
) {
4204 node
= node
->rb_left
;
4205 } else if (block
->subvol_bytenr
> subvol_eb
->start
) {
4206 node
= node
->rb_right
;
4213 spin_unlock(&blocks
->lock
);
4216 /* Found one, remove it from @blocks first and update blocks->swapped */
4217 rb_erase(&block
->node
, &blocks
->blocks
[level
]);
4218 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4219 if (RB_EMPTY_ROOT(&blocks
->blocks
[i
])) {
4224 blocks
->swapped
= swapped
;
4225 spin_unlock(&blocks
->lock
);
4227 /* Read out reloc subtree root */
4228 reloc_eb
= read_tree_block(fs_info
, block
->reloc_bytenr
, 0,
4229 block
->reloc_generation
, block
->level
,
4231 if (IS_ERR(reloc_eb
)) {
4232 ret
= PTR_ERR(reloc_eb
);
4236 if (!extent_buffer_uptodate(reloc_eb
)) {
4241 ret
= qgroup_trace_subtree_swap(trans
, reloc_eb
, subvol_eb
,
4242 block
->last_snapshot
, block
->trace_leaf
);
4245 free_extent_buffer(reloc_eb
);
4248 btrfs_err_rl(fs_info
,
4249 "failed to account subtree at bytenr %llu: %d",
4250 subvol_eb
->start
, ret
);
4251 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4256 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction
*trans
)
4258 struct btrfs_qgroup_extent_record
*entry
;
4259 struct btrfs_qgroup_extent_record
*next
;
4260 struct rb_root
*root
;
4262 root
= &trans
->delayed_refs
.dirty_extent_root
;
4263 rbtree_postorder_for_each_entry_safe(entry
, next
, root
, node
) {
4264 ulist_free(entry
->old_roots
);